]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/backup/mod.rs
split out pbs-runtime module
[proxmox-backup.git] / src / api2 / backup / mod.rs
1 //! Backup protocol (HTTP2 upgrade)
2
3 use anyhow::{bail, format_err, Error};
4 use futures::*;
5 use hyper::header::{HeaderValue, UPGRADE};
6 use hyper::http::request::Parts;
7 use hyper::{Body, Response, Request, StatusCode};
8 use serde_json::{json, Value};
9
10 use proxmox::{sortable, identity, list_subdirs_api_method};
11 use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
12 use proxmox::api::router::SubdirMap;
13 use proxmox::api::schema::*;
14
15 use crate::tools;
16 use crate::server::{WorkerTask, H2Service};
17 use crate::backup::*;
18 use crate::api2::types::*;
19 use crate::config::acl::PRIV_DATASTORE_BACKUP;
20 use crate::config::cached_user_info::CachedUserInfo;
21 use crate::tools::fs::lock_dir_noblock_shared;
22
23 mod environment;
24 use environment::*;
25
26 mod upload_chunk;
27 use upload_chunk::*;
28
29 pub const ROUTER: Router = Router::new()
30 .upgrade(&API_METHOD_UPGRADE_BACKUP);
31
32 #[sortable]
33 pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
34 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
35 &ObjectSchema::new(
36 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
37 &sorted!([
38 ("store", false, &DATASTORE_SCHEMA),
39 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
40 ("backup-id", false, &BACKUP_ID_SCHEMA),
41 ("backup-time", false, &BACKUP_TIME_SCHEMA),
42 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
43 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
44 ]),
45 )
46 ).access(
47 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
48 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
49 &Permission::Anybody
50 );
51
52 fn upgrade_to_backup_protocol(
53 parts: Parts,
54 req_body: Body,
55 param: Value,
56 _info: &ApiMethod,
57 rpcenv: Box<dyn RpcEnvironment>,
58 ) -> ApiResponseFuture {
59
60 async move {
61 let debug = param["debug"].as_bool().unwrap_or(false);
62 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
63
64 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
65
66 let store = tools::required_string_param(&param, "store")?.to_owned();
67
68 let user_info = CachedUserInfo::new()?;
69 user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
70
71 let datastore = DataStore::lookup_datastore(&store)?;
72
73 let backup_type = tools::required_string_param(&param, "backup-type")?;
74 let backup_id = tools::required_string_param(&param, "backup-id")?;
75 let backup_time = tools::required_integer_param(&param, "backup-time")?;
76
77 let protocols = parts
78 .headers
79 .get("UPGRADE")
80 .ok_or_else(|| format_err!("missing Upgrade header"))?
81 .to_str()?;
82
83 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
84 bail!("invalid protocol name");
85 }
86
87 if parts.version >= http::version::Version::HTTP_2 {
88 bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
89 }
90
91 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
92
93 let env_type = rpcenv.env_type();
94
95 let backup_group = BackupGroup::new(backup_type, backup_id);
96
97 let worker_type = if backup_type == "host" && backup_id == "benchmark" {
98 if !benchmark {
99 bail!("unable to run benchmark without --benchmark flags");
100 }
101 "benchmark"
102 } else {
103 if benchmark {
104 bail!("benchmark flags is only allowed on 'host/benchmark'");
105 }
106 "backup"
107 };
108
109 // lock backup group to only allow one backup per group at a time
110 let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
111
112 // permission check
113 let correct_owner = owner == auth_id
114 || (owner.is_token()
115 && Authid::from(owner.user().clone()) == auth_id);
116 if !correct_owner && worker_type != "benchmark" {
117 // only the owner is allowed to create additional snapshots
118 bail!("backup owner check failed ({} != {})", auth_id, owner);
119 }
120
121 let last_backup = {
122 let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
123 if let Some(info) = info {
124 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
125 let verify = manifest.unprotected["verify_state"].clone();
126 match serde_json::from_value::<SnapshotVerifyState>(verify) {
127 Ok(verify) => {
128 match verify.state {
129 VerifyState::Ok => Some(info),
130 VerifyState::Failed => None,
131 }
132 },
133 Err(_) => {
134 // no verify state found, treat as valid
135 Some(info)
136 }
137 }
138 } else {
139 None
140 }
141 };
142
143 let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
144
145 let _last_guard = if let Some(last) = &last_backup {
146 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
147 bail!("backup timestamp is older than last backup.");
148 }
149
150 // lock last snapshot to prevent forgetting/pruning it during backup
151 let full_path = datastore.snapshot_path(&last.backup_dir);
152 Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
153 } else {
154 None
155 };
156
157 let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
158 if !is_new { bail!("backup directory already exists."); }
159
160
161 WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
162 let mut env = BackupEnvironment::new(
163 env_type, auth_id, worker.clone(), datastore, backup_dir);
164
165 env.debug = debug;
166 env.last_backup = last_backup;
167
168 env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
169
170 let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
171
172 let abort_future = worker.abort_future();
173
174 let env2 = env.clone();
175
176 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
177 .map_err(Error::from)
178 .and_then(move |conn| {
179 env2.debug("protocol upgrade done");
180
181 let mut http = hyper::server::conn::Http::new();
182 http.http2_only(true);
183 // increase window size: todo - find optiomal size
184 let window_size = 32*1024*1024; // max = (1 << 31) - 2
185 http.http2_initial_stream_window_size(window_size);
186 http.http2_initial_connection_window_size(window_size);
187 http.http2_max_frame_size(4*1024*1024);
188
189 let env3 = env2.clone();
190 http.serve_connection(conn, service)
191 .map(move |result| {
192 match result {
193 Err(err) => {
194 // Avoid Transport endpoint is not connected (os error 107)
195 // fixme: find a better way to test for that error
196 if err.to_string().starts_with("connection error") && env3.finished() {
197 Ok(())
198 } else {
199 Err(Error::from(err))
200 }
201 }
202 Ok(()) => Ok(()),
203 }
204 })
205 });
206 let mut abort_future = abort_future
207 .map(|_| Err(format_err!("task aborted")));
208
209 async move {
210 // keep flock until task ends
211 let _group_guard = _group_guard;
212 let snap_guard = snap_guard;
213 let _last_guard = _last_guard;
214
215 let res = select!{
216 req = req_fut => req,
217 abrt = abort_future => abrt,
218 };
219 if benchmark {
220 env.log("benchmark finished successfully");
221 pbs_runtime::block_in_place(|| env.remove_backup())?;
222 return Ok(());
223 }
224
225 let verify = |env: BackupEnvironment| {
226 if let Err(err) = env.verify_after_complete(snap_guard) {
227 env.log(format!(
228 "backup finished, but starting the requested verify task failed: {}",
229 err
230 ));
231 }
232 };
233
234 match (res, env.ensure_finished()) {
235 (Ok(_), Ok(())) => {
236 env.log("backup finished successfully");
237 verify(env);
238 Ok(())
239 },
240 (Err(err), Ok(())) => {
241 // ignore errors after finish
242 env.log(format!("backup had errors but finished: {}", err));
243 verify(env);
244 Ok(())
245 },
246 (Ok(_), Err(err)) => {
247 env.log(format!("backup ended and finish failed: {}", err));
248 env.log("removing unfinished backup");
249 pbs_runtime::block_in_place(|| env.remove_backup())?;
250 Err(err)
251 },
252 (Err(err), Err(_)) => {
253 env.log(format!("backup failed: {}", err));
254 env.log("removing failed backup");
255 pbs_runtime::block_in_place(|| env.remove_backup())?;
256 Err(err)
257 },
258 }
259 }
260 })?;
261
262 let response = Response::builder()
263 .status(StatusCode::SWITCHING_PROTOCOLS)
264 .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
265 .body(Body::empty())?;
266
267 Ok(response)
268 }.boxed()
269 }
270
271 const BACKUP_API_SUBDIRS: SubdirMap = &[
272 (
273 "blob", &Router::new()
274 .upload(&API_METHOD_UPLOAD_BLOB)
275 ),
276 (
277 "dynamic_chunk", &Router::new()
278 .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
279 ),
280 (
281 "dynamic_close", &Router::new()
282 .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
283 ),
284 (
285 "dynamic_index", &Router::new()
286 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
287 .put(&API_METHOD_DYNAMIC_APPEND)
288 ),
289 (
290 "finish", &Router::new()
291 .post(
292 &ApiMethod::new(
293 &ApiHandler::Sync(&finish_backup),
294 &ObjectSchema::new("Mark backup as finished.", &[])
295 )
296 )
297 ),
298 (
299 "fixed_chunk", &Router::new()
300 .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
301 ),
302 (
303 "fixed_close", &Router::new()
304 .post(&API_METHOD_CLOSE_FIXED_INDEX)
305 ),
306 (
307 "fixed_index", &Router::new()
308 .post(&API_METHOD_CREATE_FIXED_INDEX)
309 .put(&API_METHOD_FIXED_APPEND)
310 ),
311 (
312 "previous", &Router::new()
313 .download(&API_METHOD_DOWNLOAD_PREVIOUS)
314 ),
315 (
316 "previous_backup_time", &Router::new()
317 .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
318 ),
319 (
320 "speedtest", &Router::new()
321 .upload(&API_METHOD_UPLOAD_SPEEDTEST)
322 ),
323 ];
324
325 pub const BACKUP_API_ROUTER: Router = Router::new()
326 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
327 .subdirs(BACKUP_API_SUBDIRS);
328
329 #[sortable]
330 pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
331 &ApiHandler::Sync(&create_dynamic_index),
332 &ObjectSchema::new(
333 "Create dynamic chunk index file.",
334 &sorted!([
335 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
336 ]),
337 )
338 );
339
340 fn create_dynamic_index(
341 param: Value,
342 _info: &ApiMethod,
343 rpcenv: &mut dyn RpcEnvironment,
344 ) -> Result<Value, Error> {
345
346 let env: &BackupEnvironment = rpcenv.as_ref();
347
348 let name = tools::required_string_param(&param, "archive-name")?.to_owned();
349
350 let archive_name = name.clone();
351 if !archive_name.ends_with(".didx") {
352 bail!("wrong archive extension: '{}'", archive_name);
353 }
354
355 let mut path = env.backup_dir.relative_path();
356 path.push(archive_name);
357
358 let index = env.datastore.create_dynamic_writer(&path)?;
359 let wid = env.register_dynamic_writer(index, name)?;
360
361 env.log(format!("created new dynamic index {} ({:?})", wid, path));
362
363 Ok(json!(wid))
364 }
365
366 #[sortable]
367 pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
368 &ApiHandler::Sync(&create_fixed_index),
369 &ObjectSchema::new(
370 "Create fixed chunk index file.",
371 &sorted!([
372 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
373 ("size", false, &IntegerSchema::new("File size.")
374 .minimum(1)
375 .schema()
376 ),
377 ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
378 csum and reuse index for incremental backup if it matches.").schema()),
379 ]),
380 )
381 );
382
383 fn create_fixed_index(
384 param: Value,
385 _info: &ApiMethod,
386 rpcenv: &mut dyn RpcEnvironment,
387 ) -> Result<Value, Error> {
388
389 let env: &BackupEnvironment = rpcenv.as_ref();
390
391 let name = tools::required_string_param(&param, "archive-name")?.to_owned();
392 let size = tools::required_integer_param(&param, "size")? as usize;
393 let reuse_csum = param["reuse-csum"].as_str();
394
395 let archive_name = name.clone();
396 if !archive_name.ends_with(".fidx") {
397 bail!("wrong archive extension: '{}'", archive_name);
398 }
399
400 let mut path = env.backup_dir.relative_path();
401 path.push(&archive_name);
402
403 let chunk_size = 4096*1024; // todo: ??
404
405 // do incremental backup if csum is set
406 let mut reader = None;
407 let mut incremental = false;
408 if let Some(csum) = reuse_csum {
409 incremental = true;
410 let last_backup = match &env.last_backup {
411 Some(info) => info,
412 None => {
413 bail!("cannot reuse index - no valid previous backup exists");
414 }
415 };
416
417 let mut last_path = last_backup.backup_dir.relative_path();
418 last_path.push(&archive_name);
419
420 let index = match env.datastore.open_fixed_reader(last_path) {
421 Ok(index) => index,
422 Err(_) => {
423 bail!("cannot reuse index - no previous backup exists for archive");
424 }
425 };
426
427 let (old_csum, _) = index.compute_csum();
428 let old_csum = proxmox::tools::digest_to_hex(&old_csum);
429 if old_csum != csum {
430 bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
431 csum, old_csum);
432 }
433
434 reader = Some(index);
435 }
436
437 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
438
439 if let Some(reader) = reader {
440 writer.clone_data_from(&reader)?;
441 }
442
443 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
444
445 env.log(format!("created new fixed index {} ({:?})", wid, path));
446
447 Ok(json!(wid))
448 }
449
450 #[sortable]
451 pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
452 &ApiHandler::Sync(&dynamic_append),
453 &ObjectSchema::new(
454 "Append chunk to dynamic index writer.",
455 &sorted!([
456 (
457 "wid",
458 false,
459 &IntegerSchema::new("Dynamic writer ID.")
460 .minimum(1)
461 .maximum(256)
462 .schema()
463 ),
464 (
465 "digest-list",
466 false,
467 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
468 ),
469 (
470 "offset-list",
471 false,
472 &ArraySchema::new(
473 "Chunk offset list.",
474 &IntegerSchema::new("Corresponding chunk offsets.")
475 .minimum(0)
476 .schema()
477 ).schema()
478 ),
479 ]),
480 )
481 );
482
483 fn dynamic_append (
484 param: Value,
485 _info: &ApiMethod,
486 rpcenv: &mut dyn RpcEnvironment,
487 ) -> Result<Value, Error> {
488
489 let wid = tools::required_integer_param(&param, "wid")? as usize;
490 let digest_list = tools::required_array_param(&param, "digest-list")?;
491 let offset_list = tools::required_array_param(&param, "offset-list")?;
492
493 if offset_list.len() != digest_list.len() {
494 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
495 }
496
497 let env: &BackupEnvironment = rpcenv.as_ref();
498
499 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
500
501 for (i, item) in digest_list.iter().enumerate() {
502 let digest_str = item.as_str().unwrap();
503 let digest = proxmox::tools::hex_to_digest(digest_str)?;
504 let offset = offset_list[i].as_u64().unwrap();
505 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
506
507 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
508
509 env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
510 }
511
512 Ok(Value::Null)
513 }
514
515 #[sortable]
516 pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
517 &ApiHandler::Sync(&fixed_append),
518 &ObjectSchema::new(
519 "Append chunk to fixed index writer.",
520 &sorted!([
521 (
522 "wid",
523 false,
524 &IntegerSchema::new("Fixed writer ID.")
525 .minimum(1)
526 .maximum(256)
527 .schema()
528 ),
529 (
530 "digest-list",
531 false,
532 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
533 ),
534 (
535 "offset-list",
536 false,
537 &ArraySchema::new(
538 "Chunk offset list.",
539 &IntegerSchema::new("Corresponding chunk offsets.")
540 .minimum(0)
541 .schema()
542 ).schema()
543 )
544 ]),
545 )
546 );
547
548 fn fixed_append (
549 param: Value,
550 _info: &ApiMethod,
551 rpcenv: &mut dyn RpcEnvironment,
552 ) -> Result<Value, Error> {
553
554 let wid = tools::required_integer_param(&param, "wid")? as usize;
555 let digest_list = tools::required_array_param(&param, "digest-list")?;
556 let offset_list = tools::required_array_param(&param, "offset-list")?;
557
558 if offset_list.len() != digest_list.len() {
559 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
560 }
561
562 let env: &BackupEnvironment = rpcenv.as_ref();
563
564 env.debug(format!("fixed_append {} chunks", digest_list.len()));
565
566 for (i, item) in digest_list.iter().enumerate() {
567 let digest_str = item.as_str().unwrap();
568 let digest = proxmox::tools::hex_to_digest(digest_str)?;
569 let offset = offset_list[i].as_u64().unwrap();
570 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
571
572 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
573
574 env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
575 }
576
577 Ok(Value::Null)
578 }
579
580 #[sortable]
581 pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
582 &ApiHandler::Sync(&close_dynamic_index),
583 &ObjectSchema::new(
584 "Close dynamic index writer.",
585 &sorted!([
586 (
587 "wid",
588 false,
589 &IntegerSchema::new("Dynamic writer ID.")
590 .minimum(1)
591 .maximum(256)
592 .schema()
593 ),
594 (
595 "chunk-count",
596 false,
597 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
598 .minimum(1)
599 .schema()
600 ),
601 (
602 "size",
603 false,
604 &IntegerSchema::new("File size. This is used to verify that the server got all data.")
605 .minimum(1)
606 .schema()
607 ),
608 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
609 ]),
610 )
611 );
612
613 fn close_dynamic_index (
614 param: Value,
615 _info: &ApiMethod,
616 rpcenv: &mut dyn RpcEnvironment,
617 ) -> Result<Value, Error> {
618
619 let wid = tools::required_integer_param(&param, "wid")? as usize;
620 let chunk_count = tools::required_integer_param(&param, "chunk-count")? as u64;
621 let size = tools::required_integer_param(&param, "size")? as u64;
622 let csum_str = tools::required_string_param(&param, "csum")?;
623 let csum = proxmox::tools::hex_to_digest(csum_str)?;
624
625 let env: &BackupEnvironment = rpcenv.as_ref();
626
627 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
628
629 env.log(format!("successfully closed dynamic index {}", wid));
630
631 Ok(Value::Null)
632 }
633
634 #[sortable]
635 pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
636 &ApiHandler::Sync(&close_fixed_index),
637 &ObjectSchema::new(
638 "Close fixed index writer.",
639 &sorted!([
640 (
641 "wid",
642 false,
643 &IntegerSchema::new("Fixed writer ID.")
644 .minimum(1)
645 .maximum(256)
646 .schema()
647 ),
648 (
649 "chunk-count",
650 false,
651 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
652 .minimum(0)
653 .schema()
654 ),
655 (
656 "size",
657 false,
658 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
659 .minimum(0)
660 .schema()
661 ),
662 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
663 ]),
664 )
665 );
666
667 fn close_fixed_index (
668 param: Value,
669 _info: &ApiMethod,
670 rpcenv: &mut dyn RpcEnvironment,
671 ) -> Result<Value, Error> {
672
673 let wid = tools::required_integer_param(&param, "wid")? as usize;
674 let chunk_count = tools::required_integer_param(&param, "chunk-count")? as u64;
675 let size = tools::required_integer_param(&param, "size")? as u64;
676 let csum_str = tools::required_string_param(&param, "csum")?;
677 let csum = proxmox::tools::hex_to_digest(csum_str)?;
678
679 let env: &BackupEnvironment = rpcenv.as_ref();
680
681 env.fixed_writer_close(wid, chunk_count, size, csum)?;
682
683 env.log(format!("successfully closed fixed index {}", wid));
684
685 Ok(Value::Null)
686 }
687
688 fn finish_backup (
689 _param: Value,
690 _info: &ApiMethod,
691 rpcenv: &mut dyn RpcEnvironment,
692 ) -> Result<Value, Error> {
693
694 let env: &BackupEnvironment = rpcenv.as_ref();
695
696 env.finish_backup()?;
697 env.log("successfully finished backup");
698
699 Ok(Value::Null)
700 }
701
702 #[sortable]
703 pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
704 &ApiHandler::Sync(&get_previous_backup_time),
705 &ObjectSchema::new(
706 "Get previous backup time.",
707 &[],
708 )
709 );
710
711 fn get_previous_backup_time(
712 _param: Value,
713 _info: &ApiMethod,
714 rpcenv: &mut dyn RpcEnvironment,
715 ) -> Result<Value, Error> {
716
717 let env: &BackupEnvironment = rpcenv.as_ref();
718
719 let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
720
721 Ok(json!(backup_time))
722 }
723
724 #[sortable]
725 pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
726 &ApiHandler::AsyncHttp(&download_previous),
727 &ObjectSchema::new(
728 "Download archive from previous backup.",
729 &sorted!([
730 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
731 ]),
732 )
733 );
734
735 fn download_previous(
736 _parts: Parts,
737 _req_body: Body,
738 param: Value,
739 _info: &ApiMethod,
740 rpcenv: Box<dyn RpcEnvironment>,
741 ) -> ApiResponseFuture {
742
743 async move {
744 let env: &BackupEnvironment = rpcenv.as_ref();
745
746 let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
747
748 let last_backup = match &env.last_backup {
749 Some(info) => info,
750 None => bail!("no valid previous backup"),
751 };
752
753 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
754 path.push(&archive_name);
755
756 {
757 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
758 ArchiveType::FixedIndex => {
759 let index = env.datastore.open_fixed_reader(&path)?;
760 Some(Box::new(index))
761 }
762 ArchiveType::DynamicIndex => {
763 let index = env.datastore.open_dynamic_reader(&path)?;
764 Some(Box::new(index))
765 }
766 _ => { None }
767 };
768 if let Some(index) = index {
769 env.log(format!("register chunks in '{}' from previous backup.", archive_name));
770
771 for pos in 0..index.index_count() {
772 let info = index.chunk_info(pos).unwrap();
773 let size = info.range.end - info.range.start;
774 env.register_chunk(info.digest, size as u32)?;
775 }
776 }
777 }
778
779 env.log(format!("download '{}' from previous backup.", archive_name));
780 crate::api2::helpers::create_download_response(path).await
781 }.boxed()
782 }