]>
Commit | Line | Data |
---|---|---|
0d08fcee | 1 | use std::collections::HashSet; |
d33d8f4e DC |
2 | use std::ffi::OsStr; |
3 | use std::os::unix::ffi::OsStrExt; | |
6b809ff5 | 4 | use std::sync::{Arc, Mutex}; |
53a561a2 | 5 | use std::path::{Path, PathBuf}; |
804f6143 | 6 | use std::pin::Pin; |
cad540e9 | 7 | |
6ef9bb59 | 8 | use anyhow::{bail, format_err, Error}; |
9e47c0a5 | 9 | use futures::*; |
cad540e9 WB |
10 | use hyper::http::request::Parts; |
11 | use hyper::{header, Body, Response, StatusCode}; | |
15e9b4ed DM |
12 | use serde_json::{json, Value}; |
13 | ||
bb34b589 DM |
14 | use proxmox::api::{ |
15 | api, ApiResponseFuture, ApiHandler, ApiMethod, Router, | |
e7cb4dc5 WB |
16 | RpcEnvironment, RpcEnvironmentType, Permission |
17 | }; | |
cad540e9 WB |
18 | use proxmox::api::router::SubdirMap; |
19 | use proxmox::api::schema::*; | |
60f9a6ea | 20 | use proxmox::tools::fs::{replace_file, CreateOptions}; |
9ea4bce4 | 21 | use proxmox::{http_err, identity, list_subdirs_api_method, sortable}; |
e18a6c9e | 22 | |
804f6143 | 23 | use pxar::accessor::aio::{Accessor, FileContents, FileEntry}; |
d33d8f4e DC |
24 | use pxar::EntryKind; |
25 | ||
cad540e9 | 26 | use crate::api2::types::*; |
431cc7b1 | 27 | use crate::api2::node::rrd::create_value_from_rrd; |
e5064ba6 | 28 | use crate::backup::*; |
cad540e9 | 29 | use crate::config::datastore; |
54552dda DM |
30 | use crate::config::cached_user_info::CachedUserInfo; |
31 | ||
4fdf5ddf | 32 | use crate::server::{jobstate::Job, WorkerTask}; |
804f6143 DC |
33 | use crate::tools::{ |
34 | self, | |
35 | zip::{ZipEncoder, ZipEntry}, | |
36 | AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream, | |
37 | }; | |
38 | ||
d00e1a21 DM |
39 | use crate::config::acl::{ |
40 | PRIV_DATASTORE_AUDIT, | |
54552dda | 41 | PRIV_DATASTORE_MODIFY, |
d00e1a21 DM |
42 | PRIV_DATASTORE_READ, |
43 | PRIV_DATASTORE_PRUNE, | |
54552dda | 44 | PRIV_DATASTORE_BACKUP, |
09f6a240 | 45 | PRIV_DATASTORE_VERIFY, |
d00e1a21 | 46 | }; |
1629d2ad | 47 | |
bff85572 | 48 | fn check_priv_or_backup_owner( |
e7cb4dc5 WB |
49 | store: &DataStore, |
50 | group: &BackupGroup, | |
e6dc35ac | 51 | auth_id: &Authid, |
bff85572 FG |
52 | required_privs: u64, |
53 | ) -> Result<(), Error> { | |
54 | let user_info = CachedUserInfo::new()?; | |
55 | let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]); | |
56 | ||
57 | if privs & required_privs == 0 { | |
58 | let owner = store.get_owner(group)?; | |
59 | check_backup_owner(&owner, auth_id)?; | |
60 | } | |
61 | Ok(()) | |
62 | } | |
63 | ||
64 | fn check_backup_owner( | |
65 | owner: &Authid, | |
66 | auth_id: &Authid, | |
e7cb4dc5 | 67 | ) -> Result<(), Error> { |
bff85572 FG |
68 | let correct_owner = owner == auth_id |
69 | || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id); | |
70 | if !correct_owner { | |
e6dc35ac | 71 | bail!("backup owner check failed ({} != {})", auth_id, owner); |
54552dda DM |
72 | } |
73 | Ok(()) | |
74 | } | |
75 | ||
e7cb4dc5 WB |
76 | fn read_backup_index( |
77 | store: &DataStore, | |
78 | backup_dir: &BackupDir, | |
79 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { | |
8c70e3eb | 80 | |
ff86ef00 | 81 | let (manifest, index_size) = store.load_manifest(backup_dir)?; |
8c70e3eb | 82 | |
09b1f7b2 DM |
83 | let mut result = Vec::new(); |
84 | for item in manifest.files() { | |
85 | result.push(BackupContent { | |
86 | filename: item.filename.clone(), | |
f28d9088 | 87 | crypt_mode: Some(item.crypt_mode), |
09b1f7b2 DM |
88 | size: Some(item.size), |
89 | }); | |
8c70e3eb DM |
90 | } |
91 | ||
09b1f7b2 | 92 | result.push(BackupContent { |
96d65fbc | 93 | filename: MANIFEST_BLOB_NAME.to_string(), |
882c0823 FG |
94 | crypt_mode: match manifest.signature { |
95 | Some(_) => Some(CryptMode::SignOnly), | |
96 | None => Some(CryptMode::None), | |
97 | }, | |
09b1f7b2 DM |
98 | size: Some(index_size), |
99 | }); | |
4f1e40a2 | 100 | |
70030b43 | 101 | Ok((manifest, result)) |
8c70e3eb DM |
102 | } |
103 | ||
1c090810 DC |
104 | fn get_all_snapshot_files( |
105 | store: &DataStore, | |
106 | info: &BackupInfo, | |
70030b43 DM |
107 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { |
108 | ||
109 | let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?; | |
1c090810 DC |
110 | |
111 | let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { | |
112 | acc.insert(item.filename.clone()); | |
113 | acc | |
114 | }); | |
115 | ||
116 | for file in &info.files { | |
117 | if file_set.contains(file) { continue; } | |
f28d9088 WB |
118 | files.push(BackupContent { |
119 | filename: file.to_string(), | |
120 | size: None, | |
121 | crypt_mode: None, | |
122 | }); | |
1c090810 DC |
123 | } |
124 | ||
70030b43 | 125 | Ok((manifest, files)) |
1c090810 DC |
126 | } |
127 | ||
b31c8019 DM |
128 | #[api( |
129 | input: { | |
130 | properties: { | |
131 | store: { | |
132 | schema: DATASTORE_SCHEMA, | |
133 | }, | |
134 | }, | |
135 | }, | |
136 | returns: { | |
137 | type: Array, | |
138 | description: "Returns the list of backup groups.", | |
139 | items: { | |
140 | type: GroupListItem, | |
141 | } | |
142 | }, | |
bb34b589 | 143 | access: { |
54552dda DM |
144 | permission: &Permission::Privilege( |
145 | &["datastore", "{store}"], | |
146 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
147 | true), | |
bb34b589 | 148 | }, |
b31c8019 DM |
149 | )] |
150 | /// List backup groups. | |
ad20d198 | 151 | fn list_groups( |
b31c8019 | 152 | store: String, |
54552dda | 153 | rpcenv: &mut dyn RpcEnvironment, |
b31c8019 | 154 | ) -> Result<Vec<GroupListItem>, Error> { |
812c6f87 | 155 | |
e6dc35ac | 156 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 157 | let user_info = CachedUserInfo::new()?; |
e6dc35ac | 158 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
54552dda | 159 | |
b31c8019 | 160 | let datastore = DataStore::lookup_datastore(&store)?; |
0d08fcee FG |
161 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; |
162 | ||
163 | let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?; | |
164 | ||
165 | let group_info = backup_groups | |
166 | .into_iter() | |
167 | .fold(Vec::new(), |mut group_info, group| { | |
168 | let owner = match datastore.get_owner(&group) { | |
169 | Ok(auth_id) => auth_id, | |
170 | Err(err) => { | |
171 | println!("Failed to get owner of group '{}' - {}", group, err); | |
172 | return group_info; | |
173 | }, | |
174 | }; | |
175 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
176 | return group_info; | |
177 | } | |
178 | ||
179 | let snapshots = match group.list_backups(&datastore.base_path()) { | |
180 | Ok(snapshots) => snapshots, | |
181 | Err(_) => { | |
182 | return group_info; | |
183 | }, | |
184 | }; | |
185 | ||
186 | let backup_count: u64 = snapshots.len() as u64; | |
187 | if backup_count == 0 { | |
188 | return group_info; | |
189 | } | |
190 | ||
191 | let last_backup = snapshots | |
192 | .iter() | |
193 | .fold(&snapshots[0], |last, curr| { | |
194 | if curr.is_finished() | |
195 | && curr.backup_dir.backup_time() > last.backup_dir.backup_time() { | |
196 | curr | |
197 | } else { | |
198 | last | |
199 | } | |
200 | }) | |
201 | .to_owned(); | |
202 | ||
203 | group_info.push(GroupListItem { | |
204 | backup_type: group.backup_type().to_string(), | |
205 | backup_id: group.backup_id().to_string(), | |
206 | last_backup: last_backup.backup_dir.backup_time(), | |
207 | owner: Some(owner), | |
208 | backup_count, | |
209 | files: last_backup.files, | |
210 | }); | |
211 | ||
212 | group_info | |
213 | }); | |
812c6f87 | 214 | |
0d08fcee | 215 | Ok(group_info) |
812c6f87 | 216 | } |
8f579717 | 217 | |
09b1f7b2 DM |
218 | #[api( |
219 | input: { | |
220 | properties: { | |
221 | store: { | |
222 | schema: DATASTORE_SCHEMA, | |
223 | }, | |
224 | "backup-type": { | |
225 | schema: BACKUP_TYPE_SCHEMA, | |
226 | }, | |
227 | "backup-id": { | |
228 | schema: BACKUP_ID_SCHEMA, | |
229 | }, | |
230 | "backup-time": { | |
231 | schema: BACKUP_TIME_SCHEMA, | |
232 | }, | |
233 | }, | |
234 | }, | |
235 | returns: { | |
236 | type: Array, | |
237 | description: "Returns the list of archive files inside a backup snapshots.", | |
238 | items: { | |
239 | type: BackupContent, | |
240 | } | |
241 | }, | |
bb34b589 | 242 | access: { |
54552dda DM |
243 | permission: &Permission::Privilege( |
244 | &["datastore", "{store}"], | |
245 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
246 | true), | |
bb34b589 | 247 | }, |
09b1f7b2 DM |
248 | )] |
249 | /// List snapshot files. | |
ea5f547f | 250 | pub fn list_snapshot_files( |
09b1f7b2 DM |
251 | store: String, |
252 | backup_type: String, | |
253 | backup_id: String, | |
254 | backup_time: i64, | |
01a13423 | 255 | _info: &ApiMethod, |
54552dda | 256 | rpcenv: &mut dyn RpcEnvironment, |
09b1f7b2 | 257 | ) -> Result<Vec<BackupContent>, Error> { |
01a13423 | 258 | |
e6dc35ac | 259 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
09b1f7b2 | 260 | let datastore = DataStore::lookup_datastore(&store)?; |
54552dda | 261 | |
e0e5b442 | 262 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; |
01a13423 | 263 | |
bff85572 | 264 | check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?; |
54552dda | 265 | |
d7c24397 | 266 | let info = BackupInfo::new(&datastore.base_path(), snapshot)?; |
01a13423 | 267 | |
70030b43 DM |
268 | let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?; |
269 | ||
270 | Ok(files) | |
01a13423 DM |
271 | } |
272 | ||
68a6a0ee DM |
273 | #[api( |
274 | input: { | |
275 | properties: { | |
276 | store: { | |
277 | schema: DATASTORE_SCHEMA, | |
278 | }, | |
279 | "backup-type": { | |
280 | schema: BACKUP_TYPE_SCHEMA, | |
281 | }, | |
282 | "backup-id": { | |
283 | schema: BACKUP_ID_SCHEMA, | |
284 | }, | |
285 | "backup-time": { | |
286 | schema: BACKUP_TIME_SCHEMA, | |
287 | }, | |
288 | }, | |
289 | }, | |
bb34b589 | 290 | access: { |
54552dda DM |
291 | permission: &Permission::Privilege( |
292 | &["datastore", "{store}"], | |
293 | PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE, | |
294 | true), | |
bb34b589 | 295 | }, |
68a6a0ee DM |
296 | )] |
297 | /// Delete backup snapshot. | |
298 | fn delete_snapshot( | |
299 | store: String, | |
300 | backup_type: String, | |
301 | backup_id: String, | |
302 | backup_time: i64, | |
6f62c924 | 303 | _info: &ApiMethod, |
54552dda | 304 | rpcenv: &mut dyn RpcEnvironment, |
6f62c924 DM |
305 | ) -> Result<Value, Error> { |
306 | ||
e6dc35ac | 307 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 308 | |
e0e5b442 | 309 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; |
68a6a0ee | 310 | let datastore = DataStore::lookup_datastore(&store)?; |
6f62c924 | 311 | |
bff85572 | 312 | check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?; |
54552dda | 313 | |
c9756b40 | 314 | datastore.remove_backup_dir(&snapshot, false)?; |
6f62c924 DM |
315 | |
316 | Ok(Value::Null) | |
317 | } | |
318 | ||
fc189b19 DM |
319 | #[api( |
320 | input: { | |
321 | properties: { | |
322 | store: { | |
323 | schema: DATASTORE_SCHEMA, | |
324 | }, | |
325 | "backup-type": { | |
326 | optional: true, | |
327 | schema: BACKUP_TYPE_SCHEMA, | |
328 | }, | |
329 | "backup-id": { | |
330 | optional: true, | |
331 | schema: BACKUP_ID_SCHEMA, | |
332 | }, | |
333 | }, | |
334 | }, | |
335 | returns: { | |
336 | type: Array, | |
337 | description: "Returns the list of snapshots.", | |
338 | items: { | |
339 | type: SnapshotListItem, | |
340 | } | |
341 | }, | |
bb34b589 | 342 | access: { |
54552dda DM |
343 | permission: &Permission::Privilege( |
344 | &["datastore", "{store}"], | |
345 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
346 | true), | |
bb34b589 | 347 | }, |
fc189b19 DM |
348 | )] |
349 | /// List backup snapshots. | |
f24fc116 | 350 | pub fn list_snapshots ( |
54552dda DM |
351 | store: String, |
352 | backup_type: Option<String>, | |
353 | backup_id: Option<String>, | |
354 | _param: Value, | |
184f17af | 355 | _info: &ApiMethod, |
54552dda | 356 | rpcenv: &mut dyn RpcEnvironment, |
fc189b19 | 357 | ) -> Result<Vec<SnapshotListItem>, Error> { |
184f17af | 358 | |
e6dc35ac | 359 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 360 | let user_info = CachedUserInfo::new()?; |
e6dc35ac | 361 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
184f17af | 362 | |
0d08fcee FG |
363 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; |
364 | ||
54552dda | 365 | let datastore = DataStore::lookup_datastore(&store)?; |
184f17af | 366 | |
c0977501 | 367 | let base_path = datastore.base_path(); |
184f17af | 368 | |
0d08fcee FG |
369 | let groups = match (backup_type, backup_id) { |
370 | (Some(backup_type), Some(backup_id)) => { | |
371 | let mut groups = Vec::with_capacity(1); | |
372 | groups.push(BackupGroup::new(backup_type, backup_id)); | |
373 | groups | |
374 | }, | |
375 | (Some(backup_type), None) => { | |
376 | BackupInfo::list_backup_groups(&base_path)? | |
377 | .into_iter() | |
378 | .filter(|group| group.backup_type() == backup_type) | |
379 | .collect() | |
380 | }, | |
381 | (None, Some(backup_id)) => { | |
382 | BackupInfo::list_backup_groups(&base_path)? | |
383 | .into_iter() | |
384 | .filter(|group| group.backup_id() == backup_id) | |
385 | .collect() | |
386 | }, | |
387 | _ => BackupInfo::list_backup_groups(&base_path)?, | |
388 | }; | |
54552dda | 389 | |
0d08fcee FG |
390 | let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| { |
391 | let backup_time = info.backup_dir.backup_time(); | |
1c090810 | 392 | |
0d08fcee | 393 | let (comment, verification, files, size) = match get_all_snapshot_files(&datastore, &info) { |
70030b43 | 394 | Ok((manifest, files)) => { |
70030b43 DM |
395 | // extract the first line from notes |
396 | let comment: Option<String> = manifest.unprotected["notes"] | |
397 | .as_str() | |
398 | .and_then(|notes| notes.lines().next()) | |
399 | .map(String::from); | |
400 | ||
3b2046d2 TL |
401 | let verify = manifest.unprotected["verify_state"].clone(); |
402 | let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) { | |
403 | Ok(verify) => verify, | |
404 | Err(err) => { | |
405 | eprintln!("error parsing verification state : '{}'", err); | |
406 | None | |
407 | } | |
408 | }; | |
409 | ||
0d08fcee FG |
410 | let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); |
411 | ||
412 | (comment, verify, files, size) | |
1c090810 DC |
413 | }, |
414 | Err(err) => { | |
415 | eprintln!("error during snapshot file listing: '{}'", err); | |
70030b43 | 416 | ( |
3b2046d2 | 417 | None, |
70030b43 DM |
418 | None, |
419 | info | |
420 | .files | |
0d08fcee | 421 | .into_iter() |
70030b43 DM |
422 | .map(|x| BackupContent { |
423 | filename: x.to_string(), | |
424 | size: None, | |
425 | crypt_mode: None, | |
426 | }) | |
0d08fcee FG |
427 | .collect(), |
428 | None, | |
70030b43 | 429 | ) |
1c090810 DC |
430 | }, |
431 | }; | |
432 | ||
0d08fcee | 433 | SnapshotListItem { |
fc189b19 DM |
434 | backup_type: group.backup_type().to_string(), |
435 | backup_id: group.backup_id().to_string(), | |
0d08fcee | 436 | backup_time, |
70030b43 | 437 | comment, |
3b2046d2 | 438 | verification, |
1c090810 DC |
439 | files, |
440 | size, | |
0d08fcee FG |
441 | owner, |
442 | } | |
443 | }; | |
184f17af | 444 | |
0d08fcee FG |
445 | groups |
446 | .iter() | |
447 | .try_fold(Vec::new(), |mut snapshots, group| { | |
448 | let owner = match datastore.get_owner(group) { | |
449 | Ok(auth_id) => auth_id, | |
450 | Err(err) => { | |
451 | eprintln!("Failed to get owner of group '{}/{}' - {}", | |
452 | &store, | |
453 | group, | |
454 | err); | |
455 | return Ok(snapshots); | |
456 | }, | |
457 | }; | |
458 | ||
459 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
460 | return Ok(snapshots); | |
461 | } | |
462 | ||
463 | let group_backups = group.list_backups(&datastore.base_path())?; | |
464 | ||
465 | snapshots.extend( | |
466 | group_backups | |
467 | .into_iter() | |
468 | .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info)) | |
469 | ); | |
470 | ||
471 | Ok(snapshots) | |
472 | }) | |
184f17af DM |
473 | } |
474 | ||
14e08625 | 475 | fn get_snapshots_count(store: &DataStore) -> Result<Counts, Error> { |
16f9f244 DC |
476 | let base_path = store.base_path(); |
477 | let backup_list = BackupInfo::list_backups(&base_path)?; | |
478 | let mut groups = HashSet::new(); | |
14e08625 DC |
479 | |
480 | let mut result = Counts { | |
481 | ct: None, | |
482 | host: None, | |
483 | vm: None, | |
484 | other: None, | |
485 | }; | |
486 | ||
16f9f244 DC |
487 | for info in backup_list { |
488 | let group = info.backup_dir.group(); | |
489 | ||
490 | let id = group.backup_id(); | |
491 | let backup_type = group.backup_type(); | |
492 | ||
493 | let mut new_id = false; | |
494 | ||
495 | if groups.insert(format!("{}-{}", &backup_type, &id)) { | |
496 | new_id = true; | |
497 | } | |
498 | ||
14e08625 DC |
499 | let mut counts = match backup_type { |
500 | "ct" => result.ct.take().unwrap_or(Default::default()), | |
501 | "host" => result.host.take().unwrap_or(Default::default()), | |
502 | "vm" => result.vm.take().unwrap_or(Default::default()), | |
503 | _ => result.other.take().unwrap_or(Default::default()), | |
504 | }; | |
505 | ||
506 | counts.snapshots += 1; | |
507 | if new_id { | |
508 | counts.groups +=1; | |
509 | } | |
510 | ||
511 | match backup_type { | |
512 | "ct" => result.ct = Some(counts), | |
513 | "host" => result.host = Some(counts), | |
514 | "vm" => result.vm = Some(counts), | |
515 | _ => result.other = Some(counts), | |
16f9f244 DC |
516 | } |
517 | } | |
518 | ||
519 | Ok(result) | |
520 | } | |
521 | ||
1dc117bb DM |
522 | #[api( |
523 | input: { | |
524 | properties: { | |
525 | store: { | |
526 | schema: DATASTORE_SCHEMA, | |
527 | }, | |
528 | }, | |
529 | }, | |
530 | returns: { | |
14e08625 | 531 | type: DataStoreStatus, |
1dc117bb | 532 | }, |
bb34b589 | 533 | access: { |
54552dda | 534 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), |
bb34b589 | 535 | }, |
1dc117bb DM |
536 | )] |
537 | /// Get datastore status. | |
ea5f547f | 538 | pub fn status( |
1dc117bb | 539 | store: String, |
0eecf38f DM |
540 | _info: &ApiMethod, |
541 | _rpcenv: &mut dyn RpcEnvironment, | |
14e08625 | 542 | ) -> Result<DataStoreStatus, Error> { |
1dc117bb | 543 | let datastore = DataStore::lookup_datastore(&store)?; |
14e08625 DC |
544 | let storage = crate::tools::disks::disk_usage(&datastore.base_path())?; |
545 | let counts = get_snapshots_count(&datastore)?; | |
16f9f244 DC |
546 | let gc_status = datastore.last_gc_status(); |
547 | ||
14e08625 DC |
548 | Ok(DataStoreStatus { |
549 | total: storage.total, | |
550 | used: storage.used, | |
551 | avail: storage.avail, | |
552 | gc_status, | |
553 | counts, | |
554 | }) | |
0eecf38f DM |
555 | } |
556 | ||
c2009e53 DM |
557 | #[api( |
558 | input: { | |
559 | properties: { | |
560 | store: { | |
561 | schema: DATASTORE_SCHEMA, | |
562 | }, | |
563 | "backup-type": { | |
564 | schema: BACKUP_TYPE_SCHEMA, | |
565 | optional: true, | |
566 | }, | |
567 | "backup-id": { | |
568 | schema: BACKUP_ID_SCHEMA, | |
569 | optional: true, | |
570 | }, | |
571 | "backup-time": { | |
572 | schema: BACKUP_TIME_SCHEMA, | |
573 | optional: true, | |
574 | }, | |
575 | }, | |
576 | }, | |
577 | returns: { | |
578 | schema: UPID_SCHEMA, | |
579 | }, | |
580 | access: { | |
09f6a240 | 581 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true), |
c2009e53 DM |
582 | }, |
583 | )] | |
584 | /// Verify backups. | |
585 | /// | |
586 | /// This function can verify a single backup snapshot, all backup from a backup group, | |
587 | /// or all backups in the datastore. | |
588 | pub fn verify( | |
589 | store: String, | |
590 | backup_type: Option<String>, | |
591 | backup_id: Option<String>, | |
592 | backup_time: Option<i64>, | |
593 | rpcenv: &mut dyn RpcEnvironment, | |
594 | ) -> Result<Value, Error> { | |
595 | let datastore = DataStore::lookup_datastore(&store)?; | |
596 | ||
09f6a240 | 597 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
8ea00f6e | 598 | let worker_id; |
c2009e53 DM |
599 | |
600 | let mut backup_dir = None; | |
601 | let mut backup_group = None; | |
133042b5 | 602 | let mut worker_type = "verify"; |
c2009e53 DM |
603 | |
604 | match (backup_type, backup_id, backup_time) { | |
605 | (Some(backup_type), Some(backup_id), Some(backup_time)) => { | |
4ebda996 | 606 | worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time); |
e0e5b442 | 607 | let dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
09f6a240 FG |
608 | |
609 | check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?; | |
610 | ||
c2009e53 | 611 | backup_dir = Some(dir); |
133042b5 | 612 | worker_type = "verify_snapshot"; |
c2009e53 DM |
613 | } |
614 | (Some(backup_type), Some(backup_id), None) => { | |
4ebda996 | 615 | worker_id = format!("{}:{}/{}", store, backup_type, backup_id); |
c2009e53 | 616 | let group = BackupGroup::new(backup_type, backup_id); |
09f6a240 FG |
617 | |
618 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; | |
619 | ||
c2009e53 | 620 | backup_group = Some(group); |
133042b5 | 621 | worker_type = "verify_group"; |
c2009e53 DM |
622 | } |
623 | (None, None, None) => { | |
8ea00f6e | 624 | worker_id = store.clone(); |
c2009e53 | 625 | } |
5a718dce | 626 | _ => bail!("parameters do not specify a backup group or snapshot"), |
c2009e53 DM |
627 | } |
628 | ||
c2009e53 DM |
629 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; |
630 | ||
631 | let upid_str = WorkerTask::new_thread( | |
133042b5 | 632 | worker_type, |
e7cb4dc5 | 633 | Some(worker_id.clone()), |
09f6a240 | 634 | auth_id.clone(), |
e7cb4dc5 WB |
635 | to_stdout, |
636 | move |worker| { | |
4f09d310 DM |
637 | let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); |
638 | let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); | |
639 | ||
adfdc369 | 640 | let failed_dirs = if let Some(backup_dir) = backup_dir { |
adfdc369 | 641 | let mut res = Vec::new(); |
f6b1d1cc WB |
642 | if !verify_backup_dir( |
643 | datastore, | |
644 | &backup_dir, | |
645 | verified_chunks, | |
646 | corrupt_chunks, | |
647 | worker.clone(), | |
648 | worker.upid().clone(), | |
d771a608 | 649 | None, |
f6b1d1cc | 650 | )? { |
adfdc369 DC |
651 | res.push(backup_dir.to_string()); |
652 | } | |
653 | res | |
c2009e53 | 654 | } else if let Some(backup_group) = backup_group { |
63d9aca9 DM |
655 | let (_count, failed_dirs) = verify_backup_group( |
656 | datastore, | |
657 | &backup_group, | |
658 | verified_chunks, | |
659 | corrupt_chunks, | |
660 | None, | |
661 | worker.clone(), | |
f6b1d1cc | 662 | worker.upid(), |
d771a608 | 663 | None, |
63d9aca9 DM |
664 | )?; |
665 | failed_dirs | |
c2009e53 | 666 | } else { |
09f6a240 FG |
667 | let privs = CachedUserInfo::new()? |
668 | .lookup_privs(&auth_id, &["datastore", &store]); | |
669 | ||
670 | let owner = if privs & PRIV_DATASTORE_VERIFY == 0 { | |
671 | Some(auth_id) | |
672 | } else { | |
673 | None | |
674 | }; | |
675 | ||
676 | verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)? | |
c2009e53 | 677 | }; |
adfdc369 | 678 | if failed_dirs.len() > 0 { |
1b1cab83 | 679 | worker.log("Failed to verify following snapshots/groups:"); |
adfdc369 DC |
680 | for dir in failed_dirs { |
681 | worker.log(format!("\t{}", dir)); | |
682 | } | |
1ffe0301 | 683 | bail!("verification failed - please check the log for details"); |
c2009e53 DM |
684 | } |
685 | Ok(()) | |
e7cb4dc5 WB |
686 | }, |
687 | )?; | |
c2009e53 DM |
688 | |
689 | Ok(json!(upid_str)) | |
690 | } | |
691 | ||
255f378a DM |
692 | #[macro_export] |
693 | macro_rules! add_common_prune_prameters { | |
552c2259 DM |
694 | ( [ $( $list1:tt )* ] ) => { |
695 | add_common_prune_prameters!([$( $list1 )* ] , []) | |
696 | }; | |
697 | ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => { | |
255f378a | 698 | [ |
552c2259 | 699 | $( $list1 )* |
255f378a | 700 | ( |
552c2259 | 701 | "keep-daily", |
255f378a | 702 | true, |
49ff1092 | 703 | &PRUNE_SCHEMA_KEEP_DAILY, |
255f378a | 704 | ), |
102d8d41 DM |
705 | ( |
706 | "keep-hourly", | |
707 | true, | |
49ff1092 | 708 | &PRUNE_SCHEMA_KEEP_HOURLY, |
102d8d41 | 709 | ), |
255f378a | 710 | ( |
552c2259 | 711 | "keep-last", |
255f378a | 712 | true, |
49ff1092 | 713 | &PRUNE_SCHEMA_KEEP_LAST, |
255f378a DM |
714 | ), |
715 | ( | |
552c2259 | 716 | "keep-monthly", |
255f378a | 717 | true, |
49ff1092 | 718 | &PRUNE_SCHEMA_KEEP_MONTHLY, |
255f378a DM |
719 | ), |
720 | ( | |
552c2259 | 721 | "keep-weekly", |
255f378a | 722 | true, |
49ff1092 | 723 | &PRUNE_SCHEMA_KEEP_WEEKLY, |
255f378a DM |
724 | ), |
725 | ( | |
726 | "keep-yearly", | |
727 | true, | |
49ff1092 | 728 | &PRUNE_SCHEMA_KEEP_YEARLY, |
255f378a | 729 | ), |
552c2259 | 730 | $( $list2 )* |
255f378a DM |
731 | ] |
732 | } | |
0eecf38f DM |
733 | } |
734 | ||
db1e061d DM |
735 | pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new( |
736 | "Returns the list of snapshots and a flag indicating if there are kept or removed.", | |
660a3489 | 737 | &PruneListItem::API_SCHEMA |
db1e061d DM |
738 | ).schema(); |
739 | ||
0ab08ac9 DM |
740 | const API_METHOD_PRUNE: ApiMethod = ApiMethod::new( |
741 | &ApiHandler::Sync(&prune), | |
255f378a | 742 | &ObjectSchema::new( |
0ab08ac9 DM |
743 | "Prune the datastore.", |
744 | &add_common_prune_prameters!([ | |
745 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
746 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
3b03abfe DM |
747 | ("dry-run", true, &BooleanSchema::new( |
748 | "Just show what prune would do, but do not delete anything.") | |
749 | .schema() | |
750 | ), | |
0ab08ac9 | 751 | ],[ |
66c49c21 | 752 | ("store", false, &DATASTORE_SCHEMA), |
0ab08ac9 | 753 | ]) |
db1e061d DM |
754 | )) |
755 | .returns(&API_RETURN_SCHEMA_PRUNE) | |
756 | .access(None, &Permission::Privilege( | |
54552dda DM |
757 | &["datastore", "{store}"], |
758 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, | |
759 | true) | |
760 | ); | |
255f378a | 761 | |
83b7db02 DM |
762 | fn prune( |
763 | param: Value, | |
764 | _info: &ApiMethod, | |
54552dda | 765 | rpcenv: &mut dyn RpcEnvironment, |
83b7db02 DM |
766 | ) -> Result<Value, Error> { |
767 | ||
54552dda | 768 | let store = tools::required_string_param(¶m, "store")?; |
9fdc3ef4 DM |
769 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
770 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
771 | ||
e6dc35ac | 772 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 773 | |
3b03abfe DM |
774 | let dry_run = param["dry-run"].as_bool().unwrap_or(false); |
775 | ||
9fdc3ef4 DM |
776 | let group = BackupGroup::new(backup_type, backup_id); |
777 | ||
54552dda DM |
778 | let datastore = DataStore::lookup_datastore(&store)?; |
779 | ||
bff85572 | 780 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; |
83b7db02 | 781 | |
9e3f0088 DM |
782 | let prune_options = PruneOptions { |
783 | keep_last: param["keep-last"].as_u64(), | |
102d8d41 | 784 | keep_hourly: param["keep-hourly"].as_u64(), |
9e3f0088 DM |
785 | keep_daily: param["keep-daily"].as_u64(), |
786 | keep_weekly: param["keep-weekly"].as_u64(), | |
787 | keep_monthly: param["keep-monthly"].as_u64(), | |
788 | keep_yearly: param["keep-yearly"].as_u64(), | |
789 | }; | |
8f579717 | 790 | |
4ebda996 | 791 | let worker_id = format!("{}:{}/{}", store, backup_type, backup_id); |
503995c7 | 792 | |
dda70154 DM |
793 | let mut prune_result = Vec::new(); |
794 | ||
795 | let list = group.list_backups(&datastore.base_path())?; | |
796 | ||
797 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
798 | ||
799 | prune_info.reverse(); // delete older snapshots first | |
800 | ||
801 | let keep_all = !prune_options.keeps_something(); | |
802 | ||
803 | if dry_run { | |
804 | for (info, mut keep) in prune_info { | |
805 | if keep_all { keep = true; } | |
806 | ||
807 | let backup_time = info.backup_dir.backup_time(); | |
808 | let group = info.backup_dir.group(); | |
809 | ||
810 | prune_result.push(json!({ | |
811 | "backup-type": group.backup_type(), | |
812 | "backup-id": group.backup_id(), | |
6a7be83e | 813 | "backup-time": backup_time, |
dda70154 DM |
814 | "keep": keep, |
815 | })); | |
816 | } | |
817 | return Ok(json!(prune_result)); | |
818 | } | |
819 | ||
820 | ||
163e9bbe | 821 | // We use a WorkerTask just to have a task log, but run synchrounously |
e6dc35ac | 822 | let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?; |
dda70154 | 823 | |
f1539300 SR |
824 | if keep_all { |
825 | worker.log("No prune selection - keeping all files."); | |
826 | } else { | |
827 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); | |
828 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
829 | store, backup_type, backup_id)); | |
830 | } | |
3b03abfe | 831 | |
f1539300 SR |
832 | for (info, mut keep) in prune_info { |
833 | if keep_all { keep = true; } | |
dda70154 | 834 | |
f1539300 SR |
835 | let backup_time = info.backup_dir.backup_time(); |
836 | let timestamp = info.backup_dir.backup_time_string(); | |
837 | let group = info.backup_dir.group(); | |
3b03abfe | 838 | |
3b03abfe | 839 | |
f1539300 SR |
840 | let msg = format!( |
841 | "{}/{}/{} {}", | |
842 | group.backup_type(), | |
843 | group.backup_id(), | |
844 | timestamp, | |
845 | if keep { "keep" } else { "remove" }, | |
846 | ); | |
847 | ||
848 | worker.log(msg); | |
849 | ||
850 | prune_result.push(json!({ | |
851 | "backup-type": group.backup_type(), | |
852 | "backup-id": group.backup_id(), | |
853 | "backup-time": backup_time, | |
854 | "keep": keep, | |
855 | })); | |
856 | ||
857 | if !(dry_run || keep) { | |
858 | if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) { | |
859 | worker.warn( | |
860 | format!( | |
861 | "failed to remove dir {:?}: {}", | |
862 | info.backup_dir.relative_path(), err | |
863 | ) | |
864 | ); | |
8f0b4c1f | 865 | } |
8f579717 | 866 | } |
f1539300 | 867 | } |
dd8e744f | 868 | |
f1539300 | 869 | worker.log_result(&Ok(())); |
83b7db02 | 870 | |
dda70154 | 871 | Ok(json!(prune_result)) |
83b7db02 DM |
872 | } |
873 | ||
dfc58d47 DM |
874 | #[api( |
875 | input: { | |
876 | properties: { | |
877 | store: { | |
878 | schema: DATASTORE_SCHEMA, | |
879 | }, | |
880 | }, | |
881 | }, | |
882 | returns: { | |
883 | schema: UPID_SCHEMA, | |
884 | }, | |
bb34b589 | 885 | access: { |
54552dda | 886 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false), |
bb34b589 | 887 | }, |
dfc58d47 DM |
888 | )] |
889 | /// Start garbage collection. | |
6049b71f | 890 | fn start_garbage_collection( |
dfc58d47 | 891 | store: String, |
6049b71f | 892 | _info: &ApiMethod, |
dd5495d6 | 893 | rpcenv: &mut dyn RpcEnvironment, |
6049b71f | 894 | ) -> Result<Value, Error> { |
15e9b4ed | 895 | |
3e6a7dee | 896 | let datastore = DataStore::lookup_datastore(&store)?; |
e6dc35ac | 897 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
15e9b4ed | 898 | |
4fdf5ddf DC |
899 | let job = Job::new("garbage_collection", &store) |
900 | .map_err(|_| format_err!("garbage collection already running"))?; | |
15e9b4ed | 901 | |
0f778e06 | 902 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; |
15e9b4ed | 903 | |
4fdf5ddf DC |
904 | let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout) |
905 | .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?; | |
0f778e06 DM |
906 | |
907 | Ok(json!(upid_str)) | |
15e9b4ed DM |
908 | } |
909 | ||
a92830dc DM |
910 | #[api( |
911 | input: { | |
912 | properties: { | |
913 | store: { | |
914 | schema: DATASTORE_SCHEMA, | |
915 | }, | |
916 | }, | |
917 | }, | |
918 | returns: { | |
919 | type: GarbageCollectionStatus, | |
bb34b589 DM |
920 | }, |
921 | access: { | |
922 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
923 | }, | |
a92830dc DM |
924 | )] |
925 | /// Garbage collection status. | |
5eeea607 | 926 | pub fn garbage_collection_status( |
a92830dc | 927 | store: String, |
6049b71f | 928 | _info: &ApiMethod, |
dd5495d6 | 929 | _rpcenv: &mut dyn RpcEnvironment, |
a92830dc | 930 | ) -> Result<GarbageCollectionStatus, Error> { |
691c89a0 | 931 | |
f2b99c34 DM |
932 | let datastore = DataStore::lookup_datastore(&store)?; |
933 | ||
f2b99c34 | 934 | let status = datastore.last_gc_status(); |
691c89a0 | 935 | |
a92830dc | 936 | Ok(status) |
691c89a0 DM |
937 | } |
938 | ||
bb34b589 | 939 | #[api( |
30fb6025 DM |
940 | returns: { |
941 | description: "List the accessible datastores.", | |
942 | type: Array, | |
943 | items: { | |
944 | description: "Datastore name and description.", | |
455e5f71 | 945 | type: DataStoreListItem, |
30fb6025 DM |
946 | }, |
947 | }, | |
bb34b589 | 948 | access: { |
54552dda | 949 | permission: &Permission::Anybody, |
bb34b589 DM |
950 | }, |
951 | )] | |
952 | /// Datastore list | |
6049b71f DM |
953 | fn get_datastore_list( |
954 | _param: Value, | |
955 | _info: &ApiMethod, | |
54552dda | 956 | rpcenv: &mut dyn RpcEnvironment, |
455e5f71 | 957 | ) -> Result<Vec<DataStoreListItem>, Error> { |
15e9b4ed | 958 | |
d0187a51 | 959 | let (config, _digest) = datastore::config()?; |
15e9b4ed | 960 | |
e6dc35ac | 961 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda DM |
962 | let user_info = CachedUserInfo::new()?; |
963 | ||
30fb6025 | 964 | let mut list = Vec::new(); |
54552dda | 965 | |
30fb6025 | 966 | for (store, (_, data)) in &config.sections { |
e6dc35ac | 967 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
54552dda | 968 | let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; |
30fb6025 | 969 | if allowed { |
455e5f71 FG |
970 | list.push( |
971 | DataStoreListItem { | |
972 | store: store.clone(), | |
973 | comment: data["comment"].as_str().map(String::from), | |
974 | } | |
975 | ); | |
30fb6025 | 976 | } |
54552dda DM |
977 | } |
978 | ||
30fb6025 | 979 | Ok(list.into()) |
15e9b4ed DM |
980 | } |
981 | ||
0ab08ac9 DM |
982 | #[sortable] |
983 | pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new( | |
984 | &ApiHandler::AsyncHttp(&download_file), | |
985 | &ObjectSchema::new( | |
986 | "Download single raw file from backup snapshot.", | |
987 | &sorted!([ | |
66c49c21 | 988 | ("store", false, &DATASTORE_SCHEMA), |
0ab08ac9 DM |
989 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
990 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
991 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
4191018c | 992 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), |
0ab08ac9 DM |
993 | ]), |
994 | ) | |
54552dda DM |
995 | ).access(None, &Permission::Privilege( |
996 | &["datastore", "{store}"], | |
997 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
998 | true) | |
999 | ); | |
691c89a0 | 1000 | |
9e47c0a5 DM |
1001 | fn download_file( |
1002 | _parts: Parts, | |
1003 | _req_body: Body, | |
1004 | param: Value, | |
255f378a | 1005 | _info: &ApiMethod, |
54552dda | 1006 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 1007 | ) -> ApiResponseFuture { |
9e47c0a5 | 1008 | |
ad51d02a DM |
1009 | async move { |
1010 | let store = tools::required_string_param(¶m, "store")?; | |
ad51d02a | 1011 | let datastore = DataStore::lookup_datastore(store)?; |
f14a8c9a | 1012 | |
e6dc35ac | 1013 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 1014 | |
ad51d02a | 1015 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); |
9e47c0a5 | 1016 | |
ad51d02a DM |
1017 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
1018 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1019 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
9e47c0a5 | 1020 | |
e0e5b442 | 1021 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
54552dda | 1022 | |
bff85572 | 1023 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
54552dda | 1024 | |
abdb9763 | 1025 | println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); |
9e47c0a5 | 1026 | |
ad51d02a DM |
1027 | let mut path = datastore.base_path(); |
1028 | path.push(backup_dir.relative_path()); | |
1029 | path.push(&file_name); | |
1030 | ||
ba694720 | 1031 | let file = tokio::fs::File::open(&path) |
8aa67ee7 WB |
1032 | .await |
1033 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; | |
ad51d02a | 1034 | |
db0cb9ce | 1035 | let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) |
ba694720 DC |
1036 | .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())) |
1037 | .map_err(move |err| { | |
1038 | eprintln!("error during streaming of '{:?}' - {}", &path, err); | |
1039 | err | |
1040 | }); | |
ad51d02a | 1041 | let body = Body::wrap_stream(payload); |
9e47c0a5 | 1042 | |
ad51d02a DM |
1043 | // fixme: set other headers ? |
1044 | Ok(Response::builder() | |
1045 | .status(StatusCode::OK) | |
1046 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1047 | .body(body) | |
1048 | .unwrap()) | |
1049 | }.boxed() | |
9e47c0a5 DM |
1050 | } |
1051 | ||
6ef9bb59 DC |
1052 | #[sortable] |
1053 | pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new( | |
1054 | &ApiHandler::AsyncHttp(&download_file_decoded), | |
1055 | &ObjectSchema::new( | |
1056 | "Download single decoded file from backup snapshot. Only works if it's not encrypted.", | |
1057 | &sorted!([ | |
1058 | ("store", false, &DATASTORE_SCHEMA), | |
1059 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1060 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1061 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1062 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), | |
1063 | ]), | |
1064 | ) | |
1065 | ).access(None, &Permission::Privilege( | |
1066 | &["datastore", "{store}"], | |
1067 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1068 | true) | |
1069 | ); | |
1070 | ||
1071 | fn download_file_decoded( | |
1072 | _parts: Parts, | |
1073 | _req_body: Body, | |
1074 | param: Value, | |
1075 | _info: &ApiMethod, | |
1076 | rpcenv: Box<dyn RpcEnvironment>, | |
1077 | ) -> ApiResponseFuture { | |
1078 | ||
1079 | async move { | |
1080 | let store = tools::required_string_param(¶m, "store")?; | |
1081 | let datastore = DataStore::lookup_datastore(store)?; | |
1082 | ||
e6dc35ac | 1083 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
6ef9bb59 DC |
1084 | |
1085 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); | |
1086 | ||
1087 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
1088 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1089 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
1090 | ||
e0e5b442 | 1091 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
6ef9bb59 | 1092 | |
bff85572 | 1093 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
6ef9bb59 | 1094 | |
2d55beec | 1095 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
6ef9bb59 | 1096 | for file in files { |
f28d9088 | 1097 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { |
6ef9bb59 DC |
1098 | bail!("cannot decode '{}' - is encrypted", file_name); |
1099 | } | |
1100 | } | |
1101 | ||
1102 | println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); | |
1103 | ||
1104 | let mut path = datastore.base_path(); | |
1105 | path.push(backup_dir.relative_path()); | |
1106 | path.push(&file_name); | |
1107 | ||
1108 | let extension = file_name.rsplitn(2, '.').next().unwrap(); | |
1109 | ||
1110 | let body = match extension { | |
1111 | "didx" => { | |
1112 | let index = DynamicIndexReader::open(&path) | |
1113 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
2d55beec FG |
1114 | let (csum, size) = index.compute_csum(); |
1115 | manifest.verify_file(&file_name, &csum, size)?; | |
6ef9bb59 | 1116 | |
14f6c9cb | 1117 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
6ef9bb59 | 1118 | let reader = AsyncIndexReader::new(index, chunk_reader); |
f386f512 | 1119 | Body::wrap_stream(AsyncReaderStream::new(reader) |
6ef9bb59 DC |
1120 | .map_err(move |err| { |
1121 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1122 | err | |
1123 | })) | |
1124 | }, | |
1125 | "fidx" => { | |
1126 | let index = FixedIndexReader::open(&path) | |
1127 | .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?; | |
1128 | ||
2d55beec FG |
1129 | let (csum, size) = index.compute_csum(); |
1130 | manifest.verify_file(&file_name, &csum, size)?; | |
1131 | ||
14f6c9cb | 1132 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
6ef9bb59 | 1133 | let reader = AsyncIndexReader::new(index, chunk_reader); |
f386f512 | 1134 | Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024) |
6ef9bb59 DC |
1135 | .map_err(move |err| { |
1136 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1137 | err | |
1138 | })) | |
1139 | }, | |
1140 | "blob" => { | |
1141 | let file = std::fs::File::open(&path) | |
8aa67ee7 | 1142 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; |
6ef9bb59 | 1143 | |
2d55beec FG |
1144 | // FIXME: load full blob to verify index checksum? |
1145 | ||
6ef9bb59 DC |
1146 | Body::wrap_stream( |
1147 | WrappedReaderStream::new(DataBlobReader::new(file, None)?) | |
1148 | .map_err(move |err| { | |
1149 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1150 | err | |
1151 | }) | |
1152 | ) | |
1153 | }, | |
1154 | extension => { | |
1155 | bail!("cannot download '{}' files", extension); | |
1156 | }, | |
1157 | }; | |
1158 | ||
1159 | // fixme: set other headers ? | |
1160 | Ok(Response::builder() | |
1161 | .status(StatusCode::OK) | |
1162 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1163 | .body(body) | |
1164 | .unwrap()) | |
1165 | }.boxed() | |
1166 | } | |
1167 | ||
552c2259 | 1168 | #[sortable] |
0ab08ac9 DM |
1169 | pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new( |
1170 | &ApiHandler::AsyncHttp(&upload_backup_log), | |
255f378a | 1171 | &ObjectSchema::new( |
54552dda | 1172 | "Upload the client backup log file into a backup snapshot ('client.log.blob').", |
552c2259 | 1173 | &sorted!([ |
66c49c21 | 1174 | ("store", false, &DATASTORE_SCHEMA), |
255f378a | 1175 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
0ab08ac9 | 1176 | ("backup-id", false, &BACKUP_ID_SCHEMA), |
255f378a | 1177 | ("backup-time", false, &BACKUP_TIME_SCHEMA), |
552c2259 | 1178 | ]), |
9e47c0a5 | 1179 | ) |
54552dda DM |
1180 | ).access( |
1181 | Some("Only the backup creator/owner is allowed to do this."), | |
1182 | &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false) | |
1183 | ); | |
9e47c0a5 | 1184 | |
07ee2235 DM |
1185 | fn upload_backup_log( |
1186 | _parts: Parts, | |
1187 | req_body: Body, | |
1188 | param: Value, | |
255f378a | 1189 | _info: &ApiMethod, |
54552dda | 1190 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 1191 | ) -> ApiResponseFuture { |
07ee2235 | 1192 | |
ad51d02a DM |
1193 | async move { |
1194 | let store = tools::required_string_param(¶m, "store")?; | |
ad51d02a | 1195 | let datastore = DataStore::lookup_datastore(store)?; |
07ee2235 | 1196 | |
96d65fbc | 1197 | let file_name = CLIENT_LOG_BLOB_NAME; |
07ee2235 | 1198 | |
ad51d02a DM |
1199 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
1200 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1201 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
07ee2235 | 1202 | |
e0e5b442 | 1203 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
07ee2235 | 1204 | |
e6dc35ac | 1205 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
bff85572 FG |
1206 | let owner = datastore.get_owner(backup_dir.group())?; |
1207 | check_backup_owner(&owner, &auth_id)?; | |
54552dda | 1208 | |
ad51d02a DM |
1209 | let mut path = datastore.base_path(); |
1210 | path.push(backup_dir.relative_path()); | |
1211 | path.push(&file_name); | |
07ee2235 | 1212 | |
ad51d02a DM |
1213 | if path.exists() { |
1214 | bail!("backup already contains a log."); | |
1215 | } | |
e128d4e8 | 1216 | |
ad51d02a | 1217 | println!("Upload backup log to {}/{}/{}/{}/{}", store, |
6a7be83e | 1218 | backup_type, backup_id, backup_dir.backup_time_string(), file_name); |
ad51d02a DM |
1219 | |
1220 | let data = req_body | |
1221 | .map_err(Error::from) | |
1222 | .try_fold(Vec::new(), |mut acc, chunk| { | |
1223 | acc.extend_from_slice(&*chunk); | |
1224 | future::ok::<_, Error>(acc) | |
1225 | }) | |
1226 | .await?; | |
1227 | ||
39f18b30 DM |
1228 | // always verify blob/CRC at server side |
1229 | let blob = DataBlob::load_from_reader(&mut &data[..])?; | |
1230 | ||
1231 | replace_file(&path, blob.raw_data(), CreateOptions::new())?; | |
ad51d02a DM |
1232 | |
1233 | // fixme: use correct formatter | |
1234 | Ok(crate::server::formatter::json_response(Ok(Value::Null))) | |
1235 | }.boxed() | |
07ee2235 DM |
1236 | } |
1237 | ||
5b1cfa01 DC |
1238 | #[api( |
1239 | input: { | |
1240 | properties: { | |
1241 | store: { | |
1242 | schema: DATASTORE_SCHEMA, | |
1243 | }, | |
1244 | "backup-type": { | |
1245 | schema: BACKUP_TYPE_SCHEMA, | |
1246 | }, | |
1247 | "backup-id": { | |
1248 | schema: BACKUP_ID_SCHEMA, | |
1249 | }, | |
1250 | "backup-time": { | |
1251 | schema: BACKUP_TIME_SCHEMA, | |
1252 | }, | |
1253 | "filepath": { | |
1254 | description: "Base64 encoded path.", | |
1255 | type: String, | |
1256 | } | |
1257 | }, | |
1258 | }, | |
1259 | access: { | |
1260 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), | |
1261 | }, | |
1262 | )] | |
1263 | /// Get the entries of the given path of the catalog | |
1264 | fn catalog( | |
1265 | store: String, | |
1266 | backup_type: String, | |
1267 | backup_id: String, | |
1268 | backup_time: i64, | |
1269 | filepath: String, | |
1270 | _param: Value, | |
1271 | _info: &ApiMethod, | |
1272 | rpcenv: &mut dyn RpcEnvironment, | |
1273 | ) -> Result<Value, Error> { | |
1274 | let datastore = DataStore::lookup_datastore(&store)?; | |
1275 | ||
e6dc35ac | 1276 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
5b1cfa01 | 1277 | |
e0e5b442 | 1278 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
5b1cfa01 | 1279 | |
bff85572 | 1280 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
5b1cfa01 | 1281 | |
9238cdf5 FG |
1282 | let file_name = CATALOG_NAME; |
1283 | ||
2d55beec | 1284 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
9238cdf5 FG |
1285 | for file in files { |
1286 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1287 | bail!("cannot decode '{}' - is encrypted", file_name); | |
1288 | } | |
1289 | } | |
1290 | ||
5b1cfa01 DC |
1291 | let mut path = datastore.base_path(); |
1292 | path.push(backup_dir.relative_path()); | |
9238cdf5 | 1293 | path.push(file_name); |
5b1cfa01 DC |
1294 | |
1295 | let index = DynamicIndexReader::open(&path) | |
1296 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1297 | ||
2d55beec FG |
1298 | let (csum, size) = index.compute_csum(); |
1299 | manifest.verify_file(&file_name, &csum, size)?; | |
1300 | ||
14f6c9cb | 1301 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
5b1cfa01 DC |
1302 | let reader = BufferedDynamicReader::new(index, chunk_reader); |
1303 | ||
1304 | let mut catalog_reader = CatalogReader::new(reader); | |
1305 | let mut current = catalog_reader.root()?; | |
1306 | let mut components = vec![]; | |
1307 | ||
1308 | ||
1309 | if filepath != "root" { | |
1310 | components = base64::decode(filepath)?; | |
1311 | if components.len() > 0 && components[0] == '/' as u8 { | |
1312 | components.remove(0); | |
1313 | } | |
1314 | for component in components.split(|c| *c == '/' as u8) { | |
1315 | if let Some(entry) = catalog_reader.lookup(¤t, component)? { | |
1316 | current = entry; | |
1317 | } else { | |
1318 | bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components)); | |
1319 | } | |
1320 | } | |
1321 | } | |
1322 | ||
1323 | let mut res = Vec::new(); | |
1324 | ||
1325 | for direntry in catalog_reader.read_dir(¤t)? { | |
1326 | let mut components = components.clone(); | |
1327 | components.push('/' as u8); | |
1328 | components.extend(&direntry.name); | |
1329 | let path = base64::encode(components); | |
1330 | let text = String::from_utf8_lossy(&direntry.name); | |
1331 | let mut entry = json!({ | |
1332 | "filepath": path, | |
1333 | "text": text, | |
1334 | "type": CatalogEntryType::from(&direntry.attr).to_string(), | |
1335 | "leaf": true, | |
1336 | }); | |
1337 | match direntry.attr { | |
1338 | DirEntryAttribute::Directory { start: _ } => { | |
1339 | entry["leaf"] = false.into(); | |
1340 | }, | |
1341 | DirEntryAttribute::File { size, mtime } => { | |
1342 | entry["size"] = size.into(); | |
1343 | entry["mtime"] = mtime.into(); | |
1344 | }, | |
1345 | _ => {}, | |
1346 | } | |
1347 | res.push(entry); | |
1348 | } | |
1349 | ||
1350 | Ok(res.into()) | |
1351 | } | |
1352 | ||
53a561a2 WB |
1353 | fn recurse_files<'a, T, W>( |
1354 | zip: &'a mut ZipEncoder<W>, | |
1355 | decoder: &'a mut Accessor<T>, | |
1356 | prefix: &'a Path, | |
804f6143 | 1357 | file: FileEntry<T>, |
53a561a2 | 1358 | ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> |
804f6143 DC |
1359 | where |
1360 | T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static, | |
1361 | W: tokio::io::AsyncWrite + Unpin + Send + 'static, | |
1362 | { | |
1363 | Box::pin(async move { | |
1364 | let metadata = file.entry().metadata(); | |
1365 | let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf(); | |
1366 | ||
1367 | match file.kind() { | |
1368 | EntryKind::File { .. } => { | |
1369 | let entry = ZipEntry::new( | |
1370 | path, | |
1371 | metadata.stat.mtime.secs, | |
1372 | metadata.stat.mode as u16, | |
1373 | true, | |
1374 | ); | |
1375 | zip.add_entry(entry, Some(file.contents().await?)) | |
e832860a WB |
1376 | .await |
1377 | .map_err(|err| format_err!("could not send file entry: {}", err))?; | |
804f6143 DC |
1378 | } |
1379 | EntryKind::Hardlink(_) => { | |
1380 | let realfile = decoder.follow_hardlink(&file).await?; | |
1381 | let entry = ZipEntry::new( | |
1382 | path, | |
1383 | metadata.stat.mtime.secs, | |
1384 | metadata.stat.mode as u16, | |
1385 | true, | |
1386 | ); | |
1387 | zip.add_entry(entry, Some(realfile.contents().await?)) | |
e832860a WB |
1388 | .await |
1389 | .map_err(|err| format_err!("could not send file entry: {}", err))?; | |
804f6143 DC |
1390 | } |
1391 | EntryKind::Directory => { | |
1392 | let dir = file.enter_directory().await?; | |
1393 | let mut readdir = dir.read_dir(); | |
1394 | let entry = ZipEntry::new( | |
1395 | path, | |
1396 | metadata.stat.mtime.secs, | |
1397 | metadata.stat.mode as u16, | |
1398 | false, | |
1399 | ); | |
1400 | zip.add_entry::<FileContents<T>>(entry, None).await?; | |
1401 | while let Some(entry) = readdir.next().await { | |
1402 | let entry = entry?.decode_entry().await?; | |
53a561a2 | 1403 | recurse_files(zip, decoder, prefix, entry).await?; |
804f6143 DC |
1404 | } |
1405 | } | |
1406 | _ => {} // ignore all else | |
1407 | }; | |
1408 | ||
53a561a2 | 1409 | Ok(()) |
804f6143 DC |
1410 | }) |
1411 | } | |
1412 | ||
d33d8f4e DC |
1413 | #[sortable] |
1414 | pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( | |
1415 | &ApiHandler::AsyncHttp(&pxar_file_download), | |
1416 | &ObjectSchema::new( | |
1ffe0301 | 1417 | "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.", |
d33d8f4e DC |
1418 | &sorted!([ |
1419 | ("store", false, &DATASTORE_SCHEMA), | |
1420 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1421 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1422 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1423 | ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), | |
1424 | ]), | |
1425 | ) | |
1426 | ).access(None, &Permission::Privilege( | |
1427 | &["datastore", "{store}"], | |
1428 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1429 | true) | |
1430 | ); | |
1431 | ||
1432 | fn pxar_file_download( | |
1433 | _parts: Parts, | |
1434 | _req_body: Body, | |
1435 | param: Value, | |
1436 | _info: &ApiMethod, | |
1437 | rpcenv: Box<dyn RpcEnvironment>, | |
1438 | ) -> ApiResponseFuture { | |
1439 | ||
1440 | async move { | |
1441 | let store = tools::required_string_param(¶m, "store")?; | |
1442 | let datastore = DataStore::lookup_datastore(&store)?; | |
1443 | ||
e6dc35ac | 1444 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
d33d8f4e DC |
1445 | |
1446 | let filepath = tools::required_string_param(¶m, "filepath")?.to_owned(); | |
1447 | ||
1448 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
1449 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1450 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
1451 | ||
e0e5b442 | 1452 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
d33d8f4e | 1453 | |
bff85572 | 1454 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
d33d8f4e | 1455 | |
d33d8f4e DC |
1456 | let mut components = base64::decode(&filepath)?; |
1457 | if components.len() > 0 && components[0] == '/' as u8 { | |
1458 | components.remove(0); | |
1459 | } | |
1460 | ||
1461 | let mut split = components.splitn(2, |c| *c == '/' as u8); | |
9238cdf5 | 1462 | let pxar_name = std::str::from_utf8(split.next().unwrap())?; |
d33d8f4e | 1463 | let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?; |
2d55beec | 1464 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
9238cdf5 FG |
1465 | for file in files { |
1466 | if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1467 | bail!("cannot decode '{}' - is encrypted", pxar_name); | |
1468 | } | |
1469 | } | |
d33d8f4e | 1470 | |
9238cdf5 FG |
1471 | let mut path = datastore.base_path(); |
1472 | path.push(backup_dir.relative_path()); | |
1473 | path.push(pxar_name); | |
d33d8f4e DC |
1474 | |
1475 | let index = DynamicIndexReader::open(&path) | |
1476 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1477 | ||
2d55beec FG |
1478 | let (csum, size) = index.compute_csum(); |
1479 | manifest.verify_file(&pxar_name, &csum, size)?; | |
1480 | ||
14f6c9cb | 1481 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
d33d8f4e DC |
1482 | let reader = BufferedDynamicReader::new(index, chunk_reader); |
1483 | let archive_size = reader.archive_size(); | |
1484 | let reader = LocalDynamicReadAt::new(reader); | |
1485 | ||
1486 | let decoder = Accessor::new(reader, archive_size).await?; | |
1487 | let root = decoder.open_root().await?; | |
1488 | let file = root | |
1489 | .lookup(OsStr::from_bytes(file_path)).await? | |
1490 | .ok_or(format_err!("error opening '{:?}'", file_path))?; | |
1491 | ||
804f6143 DC |
1492 | let body = match file.kind() { |
1493 | EntryKind::File { .. } => Body::wrap_stream( | |
1494 | AsyncReaderStream::new(file.contents().await?).map_err(move |err| { | |
1495 | eprintln!("error during streaming of file '{:?}' - {}", filepath, err); | |
1496 | err | |
1497 | }), | |
1498 | ), | |
1499 | EntryKind::Hardlink(_) => Body::wrap_stream( | |
1500 | AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?) | |
1501 | .map_err(move |err| { | |
1502 | eprintln!( | |
1503 | "error during streaming of hardlink '{:?}' - {}", | |
1504 | filepath, err | |
1505 | ); | |
1506 | err | |
1507 | }), | |
1508 | ), | |
1509 | EntryKind::Directory => { | |
1510 | let (sender, receiver) = tokio::sync::mpsc::channel(100); | |
1511 | let mut prefix = PathBuf::new(); | |
1512 | let mut components = file.entry().path().components(); | |
1513 | components.next_back(); // discar last | |
1514 | for comp in components { | |
1515 | prefix.push(comp); | |
1516 | } | |
d33d8f4e | 1517 | |
804f6143 | 1518 | let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024); |
804f6143 DC |
1519 | |
1520 | crate::server::spawn_internal_task(async move { | |
53a561a2 WB |
1521 | let mut zipencoder = ZipEncoder::new(channelwriter); |
1522 | let mut decoder = decoder; | |
1523 | recurse_files(&mut zipencoder, &mut decoder, &prefix, file) | |
804f6143 DC |
1524 | .await |
1525 | .map_err(|err| eprintln!("error during creating of zip: {}", err))?; | |
1526 | ||
1527 | zipencoder | |
1528 | .finish() | |
1529 | .await | |
1530 | .map_err(|err| eprintln!("error during finishing of zip: {}", err)) | |
1531 | }); | |
1532 | ||
1533 | Body::wrap_stream(receiver.map_err(move |err| { | |
1534 | eprintln!("error during streaming of zip '{:?}' - {}", filepath, err); | |
d33d8f4e | 1535 | err |
804f6143 DC |
1536 | })) |
1537 | } | |
1538 | other => bail!("cannot download file of type {:?}", other), | |
1539 | }; | |
d33d8f4e DC |
1540 | |
1541 | // fixme: set other headers ? | |
1542 | Ok(Response::builder() | |
1543 | .status(StatusCode::OK) | |
1544 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1545 | .body(body) | |
1546 | .unwrap()) | |
1547 | }.boxed() | |
1548 | } | |
1549 | ||
1a0d3d11 DM |
1550 | #[api( |
1551 | input: { | |
1552 | properties: { | |
1553 | store: { | |
1554 | schema: DATASTORE_SCHEMA, | |
1555 | }, | |
1556 | timeframe: { | |
1557 | type: RRDTimeFrameResolution, | |
1558 | }, | |
1559 | cf: { | |
1560 | type: RRDMode, | |
1561 | }, | |
1562 | }, | |
1563 | }, | |
1564 | access: { | |
1565 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1566 | }, | |
1567 | )] | |
1568 | /// Read datastore stats | |
1569 | fn get_rrd_stats( | |
1570 | store: String, | |
1571 | timeframe: RRDTimeFrameResolution, | |
1572 | cf: RRDMode, | |
1573 | _param: Value, | |
1574 | ) -> Result<Value, Error> { | |
1575 | ||
431cc7b1 DC |
1576 | create_value_from_rrd( |
1577 | &format!("datastore/{}", store), | |
1a0d3d11 DM |
1578 | &[ |
1579 | "total", "used", | |
c94e1f65 DM |
1580 | "read_ios", "read_bytes", |
1581 | "write_ios", "write_bytes", | |
1582 | "io_ticks", | |
1a0d3d11 DM |
1583 | ], |
1584 | timeframe, | |
1585 | cf, | |
1586 | ) | |
1587 | } | |
1588 | ||
912b3f5b DM |
1589 | #[api( |
1590 | input: { | |
1591 | properties: { | |
1592 | store: { | |
1593 | schema: DATASTORE_SCHEMA, | |
1594 | }, | |
1595 | "backup-type": { | |
1596 | schema: BACKUP_TYPE_SCHEMA, | |
1597 | }, | |
1598 | "backup-id": { | |
1599 | schema: BACKUP_ID_SCHEMA, | |
1600 | }, | |
1601 | "backup-time": { | |
1602 | schema: BACKUP_TIME_SCHEMA, | |
1603 | }, | |
1604 | }, | |
1605 | }, | |
1606 | access: { | |
1401f4be | 1607 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), |
912b3f5b DM |
1608 | }, |
1609 | )] | |
1610 | /// Get "notes" for a specific backup | |
1611 | fn get_notes( | |
1612 | store: String, | |
1613 | backup_type: String, | |
1614 | backup_id: String, | |
1615 | backup_time: i64, | |
1616 | rpcenv: &mut dyn RpcEnvironment, | |
1617 | ) -> Result<String, Error> { | |
1618 | let datastore = DataStore::lookup_datastore(&store)?; | |
1619 | ||
e6dc35ac | 1620 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
e0e5b442 | 1621 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
912b3f5b | 1622 | |
1401f4be | 1623 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?; |
912b3f5b | 1624 | |
883aa6d5 | 1625 | let (manifest, _) = datastore.load_manifest(&backup_dir)?; |
912b3f5b | 1626 | |
883aa6d5 | 1627 | let notes = manifest.unprotected["notes"] |
912b3f5b DM |
1628 | .as_str() |
1629 | .unwrap_or(""); | |
1630 | ||
1631 | Ok(String::from(notes)) | |
1632 | } | |
1633 | ||
1634 | #[api( | |
1635 | input: { | |
1636 | properties: { | |
1637 | store: { | |
1638 | schema: DATASTORE_SCHEMA, | |
1639 | }, | |
1640 | "backup-type": { | |
1641 | schema: BACKUP_TYPE_SCHEMA, | |
1642 | }, | |
1643 | "backup-id": { | |
1644 | schema: BACKUP_ID_SCHEMA, | |
1645 | }, | |
1646 | "backup-time": { | |
1647 | schema: BACKUP_TIME_SCHEMA, | |
1648 | }, | |
1649 | notes: { | |
1650 | description: "A multiline text.", | |
1651 | }, | |
1652 | }, | |
1653 | }, | |
1654 | access: { | |
b728a69e FG |
1655 | permission: &Permission::Privilege(&["datastore", "{store}"], |
1656 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, | |
1657 | true), | |
912b3f5b DM |
1658 | }, |
1659 | )] | |
1660 | /// Set "notes" for a specific backup | |
1661 | fn set_notes( | |
1662 | store: String, | |
1663 | backup_type: String, | |
1664 | backup_id: String, | |
1665 | backup_time: i64, | |
1666 | notes: String, | |
1667 | rpcenv: &mut dyn RpcEnvironment, | |
1668 | ) -> Result<(), Error> { | |
1669 | let datastore = DataStore::lookup_datastore(&store)?; | |
1670 | ||
e6dc35ac | 1671 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
e0e5b442 | 1672 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
912b3f5b | 1673 | |
b728a69e | 1674 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?; |
912b3f5b | 1675 | |
1a374fcf SR |
1676 | datastore.update_manifest(&backup_dir,|manifest| { |
1677 | manifest.unprotected["notes"] = notes.into(); | |
1678 | }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?; | |
912b3f5b DM |
1679 | |
1680 | Ok(()) | |
1681 | } | |
1682 | ||
72be0eb1 | 1683 | #[api( |
4940012d | 1684 | input: { |
72be0eb1 DW |
1685 | properties: { |
1686 | store: { | |
1687 | schema: DATASTORE_SCHEMA, | |
1688 | }, | |
1689 | "backup-type": { | |
1690 | schema: BACKUP_TYPE_SCHEMA, | |
1691 | }, | |
1692 | "backup-id": { | |
1693 | schema: BACKUP_ID_SCHEMA, | |
1694 | }, | |
1695 | "new-owner": { | |
e6dc35ac | 1696 | type: Authid, |
72be0eb1 DW |
1697 | }, |
1698 | }, | |
4940012d FG |
1699 | }, |
1700 | access: { | |
bff85572 FG |
1701 | permission: &Permission::Anybody, |
1702 | description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup" | |
4940012d | 1703 | }, |
72be0eb1 DW |
1704 | )] |
1705 | /// Change owner of a backup group | |
1706 | fn set_backup_owner( | |
1707 | store: String, | |
1708 | backup_type: String, | |
1709 | backup_id: String, | |
e6dc35ac | 1710 | new_owner: Authid, |
bff85572 | 1711 | rpcenv: &mut dyn RpcEnvironment, |
72be0eb1 DW |
1712 | ) -> Result<(), Error> { |
1713 | ||
1714 | let datastore = DataStore::lookup_datastore(&store)?; | |
1715 | ||
1716 | let backup_group = BackupGroup::new(backup_type, backup_id); | |
1717 | ||
bff85572 FG |
1718 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
1719 | ||
72be0eb1 DW |
1720 | let user_info = CachedUserInfo::new()?; |
1721 | ||
bff85572 FG |
1722 | let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
1723 | ||
1724 | let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 { | |
1725 | // High-privilege user/token | |
1726 | true | |
1727 | } else if (privs & PRIV_DATASTORE_BACKUP) != 0 { | |
1728 | let owner = datastore.get_owner(&backup_group)?; | |
1729 | ||
1730 | match (owner.is_token(), new_owner.is_token()) { | |
1731 | (true, true) => { | |
1732 | // API token to API token, owned by same user | |
1733 | let owner = owner.user(); | |
1734 | let new_owner = new_owner.user(); | |
1735 | owner == new_owner && Authid::from(owner.clone()) == auth_id | |
1736 | }, | |
1737 | (true, false) => { | |
1738 | // API token to API token owner | |
1739 | Authid::from(owner.user().clone()) == auth_id | |
1740 | && new_owner == auth_id | |
1741 | }, | |
1742 | (false, true) => { | |
1743 | // API token owner to API token | |
1744 | owner == auth_id | |
1745 | && Authid::from(new_owner.user().clone()) == auth_id | |
1746 | }, | |
1747 | (false, false) => { | |
1748 | // User to User, not allowed for unprivileged users | |
1749 | false | |
1750 | }, | |
1751 | } | |
1752 | } else { | |
1753 | false | |
1754 | }; | |
1755 | ||
1756 | if !allowed { | |
1757 | return Err(http_err!(UNAUTHORIZED, | |
1758 | "{} does not have permission to change owner of backup group '{}' to {}", | |
1759 | auth_id, | |
1760 | backup_group, | |
1761 | new_owner, | |
1762 | )); | |
1763 | } | |
1764 | ||
e6dc35ac FG |
1765 | if !user_info.is_active_auth_id(&new_owner) { |
1766 | bail!("{} '{}' is inactive or non-existent", | |
1767 | if new_owner.is_token() { | |
1768 | "API token".to_string() | |
1769 | } else { | |
1770 | "user".to_string() | |
1771 | }, | |
1772 | new_owner); | |
72be0eb1 DW |
1773 | } |
1774 | ||
1775 | datastore.set_owner(&backup_group, &new_owner, true)?; | |
1776 | ||
1777 | Ok(()) | |
1778 | } | |
1779 | ||
552c2259 | 1780 | #[sortable] |
255f378a | 1781 | const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ |
5b1cfa01 DC |
1782 | ( |
1783 | "catalog", | |
1784 | &Router::new() | |
1785 | .get(&API_METHOD_CATALOG) | |
1786 | ), | |
72be0eb1 DW |
1787 | ( |
1788 | "change-owner", | |
1789 | &Router::new() | |
1790 | .post(&API_METHOD_SET_BACKUP_OWNER) | |
1791 | ), | |
255f378a DM |
1792 | ( |
1793 | "download", | |
1794 | &Router::new() | |
1795 | .download(&API_METHOD_DOWNLOAD_FILE) | |
1796 | ), | |
6ef9bb59 DC |
1797 | ( |
1798 | "download-decoded", | |
1799 | &Router::new() | |
1800 | .download(&API_METHOD_DOWNLOAD_FILE_DECODED) | |
1801 | ), | |
255f378a DM |
1802 | ( |
1803 | "files", | |
1804 | &Router::new() | |
09b1f7b2 | 1805 | .get(&API_METHOD_LIST_SNAPSHOT_FILES) |
255f378a DM |
1806 | ), |
1807 | ( | |
1808 | "gc", | |
1809 | &Router::new() | |
1810 | .get(&API_METHOD_GARBAGE_COLLECTION_STATUS) | |
1811 | .post(&API_METHOD_START_GARBAGE_COLLECTION) | |
1812 | ), | |
1813 | ( | |
1814 | "groups", | |
1815 | &Router::new() | |
b31c8019 | 1816 | .get(&API_METHOD_LIST_GROUPS) |
255f378a | 1817 | ), |
912b3f5b DM |
1818 | ( |
1819 | "notes", | |
1820 | &Router::new() | |
1821 | .get(&API_METHOD_GET_NOTES) | |
1822 | .put(&API_METHOD_SET_NOTES) | |
1823 | ), | |
255f378a DM |
1824 | ( |
1825 | "prune", | |
1826 | &Router::new() | |
1827 | .post(&API_METHOD_PRUNE) | |
1828 | ), | |
d33d8f4e DC |
1829 | ( |
1830 | "pxar-file-download", | |
1831 | &Router::new() | |
1832 | .download(&API_METHOD_PXAR_FILE_DOWNLOAD) | |
1833 | ), | |
1a0d3d11 DM |
1834 | ( |
1835 | "rrd", | |
1836 | &Router::new() | |
1837 | .get(&API_METHOD_GET_RRD_STATS) | |
1838 | ), | |
255f378a DM |
1839 | ( |
1840 | "snapshots", | |
1841 | &Router::new() | |
fc189b19 | 1842 | .get(&API_METHOD_LIST_SNAPSHOTS) |
68a6a0ee | 1843 | .delete(&API_METHOD_DELETE_SNAPSHOT) |
255f378a DM |
1844 | ), |
1845 | ( | |
1846 | "status", | |
1847 | &Router::new() | |
1848 | .get(&API_METHOD_STATUS) | |
1849 | ), | |
1850 | ( | |
1851 | "upload-backup-log", | |
1852 | &Router::new() | |
1853 | .upload(&API_METHOD_UPLOAD_BACKUP_LOG) | |
1854 | ), | |
c2009e53 DM |
1855 | ( |
1856 | "verify", | |
1857 | &Router::new() | |
1858 | .post(&API_METHOD_VERIFY) | |
1859 | ), | |
255f378a DM |
1860 | ]; |
1861 | ||
ad51d02a | 1862 | const DATASTORE_INFO_ROUTER: Router = Router::new() |
255f378a DM |
1863 | .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS)) |
1864 | .subdirs(DATASTORE_INFO_SUBDIRS); | |
1865 | ||
1866 | ||
1867 | pub const ROUTER: Router = Router::new() | |
bb34b589 | 1868 | .get(&API_METHOD_GET_DATASTORE_LIST) |
255f378a | 1869 | .match_all("store", &DATASTORE_INFO_ROUTER); |