]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/backup/mod.rs
api: rustfmt
[proxmox-backup.git] / src / api2 / backup / mod.rs
1 //! Backup protocol (HTTP2 upgrade)
2
3 use anyhow::{bail, format_err, Error};
4 use futures::*;
5 use hex::FromHex;
6 use hyper::header::{HeaderValue, UPGRADE};
7 use hyper::http::request::Parts;
8 use hyper::{Body, Request, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox_router::list_subdirs_api_method;
12 use proxmox_router::{
13 ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
14 };
15 use proxmox_schema::*;
16 use proxmox_sys::sortable;
17
18 use pbs_api_types::{
19 Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
20 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
21 DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
22 };
23 use pbs_config::CachedUserInfo;
24 use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
25 use pbs_datastore::index::IndexFile;
26 use pbs_datastore::manifest::{archive_type, ArchiveType};
27 use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
28 use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
29 use proxmox_rest_server::{H2Service, WorkerTask};
30 use proxmox_sys::fs::lock_dir_noblock_shared;
31
32 mod environment;
33 use environment::*;
34
35 mod upload_chunk;
36 use upload_chunk::*;
37
38 pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
39
40 #[sortable]
41 pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
42 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
43 &ObjectSchema::new(
44 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
45 &sorted!([
46 ("store", false, &DATASTORE_SCHEMA),
47 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
48 ("backup-id", false, &BACKUP_ID_SCHEMA),
49 ("backup-time", false, &BACKUP_TIME_SCHEMA),
50 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
51 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
52 ]),
53 )
54 ).access(
55 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
56 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
57 &Permission::Anybody
58 );
59
60 fn upgrade_to_backup_protocol(
61 parts: Parts,
62 req_body: Body,
63 param: Value,
64 _info: &ApiMethod,
65 rpcenv: Box<dyn RpcEnvironment>,
66 ) -> ApiResponseFuture {
67 async move {
68 let debug = param["debug"].as_bool().unwrap_or(false);
69 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
70
71 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
72
73 let store = required_string_param(&param, "store")?.to_owned();
74
75 let user_info = CachedUserInfo::new()?;
76 user_info.check_privs(
77 &auth_id,
78 &["datastore", &store],
79 PRIV_DATASTORE_BACKUP,
80 false,
81 )?;
82
83 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
84
85 let backup_type = required_string_param(&param, "backup-type")?;
86 let backup_id = required_string_param(&param, "backup-id")?;
87 let backup_time = required_integer_param(&param, "backup-time")?;
88
89 let protocols = parts
90 .headers
91 .get("UPGRADE")
92 .ok_or_else(|| format_err!("missing Upgrade header"))?
93 .to_str()?;
94
95 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
96 bail!("invalid protocol name");
97 }
98
99 if parts.version >= http::version::Version::HTTP_2 {
100 bail!(
101 "unexpected http version '{:?}' (expected version < 2)",
102 parts.version
103 );
104 }
105
106 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
107
108 let env_type = rpcenv.env_type();
109
110 let backup_group = BackupGroup::new(backup_type, backup_id);
111
112 let worker_type = if backup_type == "host" && backup_id == "benchmark" {
113 if !benchmark {
114 bail!("unable to run benchmark without --benchmark flags");
115 }
116 "benchmark"
117 } else {
118 if benchmark {
119 bail!("benchmark flags is only allowed on 'host/benchmark'");
120 }
121 "backup"
122 };
123
124 // lock backup group to only allow one backup per group at a time
125 let (owner, _group_guard) =
126 datastore.create_locked_backup_group(&backup_group, &auth_id)?;
127
128 // permission check
129 let correct_owner =
130 owner == auth_id || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
131 if !correct_owner && worker_type != "benchmark" {
132 // only the owner is allowed to create additional snapshots
133 bail!("backup owner check failed ({} != {})", auth_id, owner);
134 }
135
136 let last_backup = {
137 let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true)
138 .unwrap_or(None);
139 if let Some(info) = info {
140 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
141 let verify = manifest.unprotected["verify_state"].clone();
142 match serde_json::from_value::<SnapshotVerifyState>(verify) {
143 Ok(verify) => match verify.state {
144 VerifyState::Ok => Some(info),
145 VerifyState::Failed => None,
146 },
147 Err(_) => {
148 // no verify state found, treat as valid
149 Some(info)
150 }
151 }
152 } else {
153 None
154 }
155 };
156
157 let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
158
159 let _last_guard = if let Some(last) = &last_backup {
160 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
161 bail!("backup timestamp is older than last backup.");
162 }
163
164 // lock last snapshot to prevent forgetting/pruning it during backup
165 let full_path = datastore.snapshot_path(&last.backup_dir);
166 Some(lock_dir_noblock_shared(
167 &full_path,
168 "snapshot",
169 "base snapshot is already locked by another operation",
170 )?)
171 } else {
172 None
173 };
174
175 let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
176 if !is_new {
177 bail!("backup directory already exists.");
178 }
179
180 WorkerTask::spawn(
181 worker_type,
182 Some(worker_id),
183 auth_id.to_string(),
184 true,
185 move |worker| {
186 let mut env = BackupEnvironment::new(
187 env_type,
188 auth_id,
189 worker.clone(),
190 datastore,
191 backup_dir,
192 );
193
194 env.debug = debug;
195 env.last_backup = last_backup;
196
197 env.log(format!(
198 "starting new {} on datastore '{}': {:?}",
199 worker_type, store, path
200 ));
201
202 let service =
203 H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
204
205 let abort_future = worker.abort_future();
206
207 let env2 = env.clone();
208
209 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
210 .map_err(Error::from)
211 .and_then(move |conn| {
212 env2.debug("protocol upgrade done");
213
214 let mut http = hyper::server::conn::Http::new();
215 http.http2_only(true);
216 // increase window size: todo - find optiomal size
217 let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
218 http.http2_initial_stream_window_size(window_size);
219 http.http2_initial_connection_window_size(window_size);
220 http.http2_max_frame_size(4 * 1024 * 1024);
221
222 let env3 = env2.clone();
223 http.serve_connection(conn, service).map(move |result| {
224 match result {
225 Err(err) => {
226 // Avoid Transport endpoint is not connected (os error 107)
227 // fixme: find a better way to test for that error
228 if err.to_string().starts_with("connection error")
229 && env3.finished()
230 {
231 Ok(())
232 } else {
233 Err(Error::from(err))
234 }
235 }
236 Ok(()) => Ok(()),
237 }
238 })
239 });
240 let mut abort_future = abort_future.map(|_| Err(format_err!("task aborted")));
241
242 async move {
243 // keep flock until task ends
244 let _group_guard = _group_guard;
245 let snap_guard = snap_guard;
246 let _last_guard = _last_guard;
247
248 let res = select! {
249 req = req_fut => req,
250 abrt = abort_future => abrt,
251 };
252 if benchmark {
253 env.log("benchmark finished successfully");
254 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
255 return Ok(());
256 }
257
258 let verify = |env: BackupEnvironment| {
259 if let Err(err) = env.verify_after_complete(snap_guard) {
260 env.log(format!(
261 "backup finished, but starting the requested verify task failed: {}",
262 err
263 ));
264 }
265 };
266
267 match (res, env.ensure_finished()) {
268 (Ok(_), Ok(())) => {
269 env.log("backup finished successfully");
270 verify(env);
271 Ok(())
272 }
273 (Err(err), Ok(())) => {
274 // ignore errors after finish
275 env.log(format!("backup had errors but finished: {}", err));
276 verify(env);
277 Ok(())
278 }
279 (Ok(_), Err(err)) => {
280 env.log(format!("backup ended and finish failed: {}", err));
281 env.log("removing unfinished backup");
282 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
283 Err(err)
284 }
285 (Err(err), Err(_)) => {
286 env.log(format!("backup failed: {}", err));
287 env.log("removing failed backup");
288 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
289 Err(err)
290 }
291 }
292 }
293 },
294 )?;
295
296 let response = Response::builder()
297 .status(StatusCode::SWITCHING_PROTOCOLS)
298 .header(
299 UPGRADE,
300 HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
301 )
302 .body(Body::empty())?;
303
304 Ok(response)
305 }
306 .boxed()
307 }
308
309 const BACKUP_API_SUBDIRS: SubdirMap = &[
310 ("blob", &Router::new().upload(&API_METHOD_UPLOAD_BLOB)),
311 (
312 "dynamic_chunk",
313 &Router::new().upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK),
314 ),
315 (
316 "dynamic_close",
317 &Router::new().post(&API_METHOD_CLOSE_DYNAMIC_INDEX),
318 ),
319 (
320 "dynamic_index",
321 &Router::new()
322 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
323 .put(&API_METHOD_DYNAMIC_APPEND),
324 ),
325 (
326 "finish",
327 &Router::new().post(&ApiMethod::new(
328 &ApiHandler::Sync(&finish_backup),
329 &ObjectSchema::new("Mark backup as finished.", &[]),
330 )),
331 ),
332 (
333 "fixed_chunk",
334 &Router::new().upload(&API_METHOD_UPLOAD_FIXED_CHUNK),
335 ),
336 (
337 "fixed_close",
338 &Router::new().post(&API_METHOD_CLOSE_FIXED_INDEX),
339 ),
340 (
341 "fixed_index",
342 &Router::new()
343 .post(&API_METHOD_CREATE_FIXED_INDEX)
344 .put(&API_METHOD_FIXED_APPEND),
345 ),
346 (
347 "previous",
348 &Router::new().download(&API_METHOD_DOWNLOAD_PREVIOUS),
349 ),
350 (
351 "previous_backup_time",
352 &Router::new().get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME),
353 ),
354 (
355 "speedtest",
356 &Router::new().upload(&API_METHOD_UPLOAD_SPEEDTEST),
357 ),
358 ];
359
360 pub const BACKUP_API_ROUTER: Router = Router::new()
361 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
362 .subdirs(BACKUP_API_SUBDIRS);
363
364 #[sortable]
365 pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
366 &ApiHandler::Sync(&create_dynamic_index),
367 &ObjectSchema::new(
368 "Create dynamic chunk index file.",
369 &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
370 ),
371 );
372
373 fn create_dynamic_index(
374 param: Value,
375 _info: &ApiMethod,
376 rpcenv: &mut dyn RpcEnvironment,
377 ) -> Result<Value, Error> {
378 let env: &BackupEnvironment = rpcenv.as_ref();
379
380 let name = required_string_param(&param, "archive-name")?.to_owned();
381
382 let archive_name = name.clone();
383 if !archive_name.ends_with(".didx") {
384 bail!("wrong archive extension: '{}'", archive_name);
385 }
386
387 let mut path = env.backup_dir.relative_path();
388 path.push(archive_name);
389
390 let index = env.datastore.create_dynamic_writer(&path)?;
391 let wid = env.register_dynamic_writer(index, name)?;
392
393 env.log(format!("created new dynamic index {} ({:?})", wid, path));
394
395 Ok(json!(wid))
396 }
397
398 #[sortable]
399 pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
400 &ApiHandler::Sync(&create_fixed_index),
401 &ObjectSchema::new(
402 "Create fixed chunk index file.",
403 &sorted!([
404 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
405 (
406 "size",
407 false,
408 &IntegerSchema::new("File size.").minimum(1).schema()
409 ),
410 (
411 "reuse-csum",
412 true,
413 &StringSchema::new(
414 "If set, compare last backup's \
415 csum and reuse index for incremental backup if it matches."
416 )
417 .schema()
418 ),
419 ]),
420 ),
421 );
422
423 fn create_fixed_index(
424 param: Value,
425 _info: &ApiMethod,
426 rpcenv: &mut dyn RpcEnvironment,
427 ) -> Result<Value, Error> {
428 let env: &BackupEnvironment = rpcenv.as_ref();
429
430 let name = required_string_param(&param, "archive-name")?.to_owned();
431 let size = required_integer_param(&param, "size")? as usize;
432 let reuse_csum = param["reuse-csum"].as_str();
433
434 let archive_name = name.clone();
435 if !archive_name.ends_with(".fidx") {
436 bail!("wrong archive extension: '{}'", archive_name);
437 }
438
439 let mut path = env.backup_dir.relative_path();
440 path.push(&archive_name);
441
442 let chunk_size = 4096 * 1024; // todo: ??
443
444 // do incremental backup if csum is set
445 let mut reader = None;
446 let mut incremental = false;
447 if let Some(csum) = reuse_csum {
448 incremental = true;
449 let last_backup = match &env.last_backup {
450 Some(info) => info,
451 None => {
452 bail!("cannot reuse index - no valid previous backup exists");
453 }
454 };
455
456 let mut last_path = last_backup.backup_dir.relative_path();
457 last_path.push(&archive_name);
458
459 let index = match env.datastore.open_fixed_reader(last_path) {
460 Ok(index) => index,
461 Err(_) => {
462 bail!("cannot reuse index - no previous backup exists for archive");
463 }
464 };
465
466 let (old_csum, _) = index.compute_csum();
467 let old_csum = hex::encode(&old_csum);
468 if old_csum != csum {
469 bail!(
470 "expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
471 csum,
472 old_csum
473 );
474 }
475
476 reader = Some(index);
477 }
478
479 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
480
481 if let Some(reader) = reader {
482 writer.clone_data_from(&reader)?;
483 }
484
485 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
486
487 env.log(format!("created new fixed index {} ({:?})", wid, path));
488
489 Ok(json!(wid))
490 }
491
492 #[sortable]
493 pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
494 &ApiHandler::Sync(&dynamic_append),
495 &ObjectSchema::new(
496 "Append chunk to dynamic index writer.",
497 &sorted!([
498 (
499 "wid",
500 false,
501 &IntegerSchema::new("Dynamic writer ID.")
502 .minimum(1)
503 .maximum(256)
504 .schema()
505 ),
506 (
507 "digest-list",
508 false,
509 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
510 ),
511 (
512 "offset-list",
513 false,
514 &ArraySchema::new(
515 "Chunk offset list.",
516 &IntegerSchema::new("Corresponding chunk offsets.")
517 .minimum(0)
518 .schema()
519 )
520 .schema()
521 ),
522 ]),
523 ),
524 );
525
526 fn dynamic_append(
527 param: Value,
528 _info: &ApiMethod,
529 rpcenv: &mut dyn RpcEnvironment,
530 ) -> Result<Value, Error> {
531 let wid = required_integer_param(&param, "wid")? as usize;
532 let digest_list = required_array_param(&param, "digest-list")?;
533 let offset_list = required_array_param(&param, "offset-list")?;
534
535 if offset_list.len() != digest_list.len() {
536 bail!(
537 "offset list has wrong length ({} != {})",
538 offset_list.len(),
539 digest_list.len()
540 );
541 }
542
543 let env: &BackupEnvironment = rpcenv.as_ref();
544
545 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
546
547 for (i, item) in digest_list.iter().enumerate() {
548 let digest_str = item.as_str().unwrap();
549 let digest = <[u8; 32]>::from_hex(digest_str)?;
550 let offset = offset_list[i].as_u64().unwrap();
551 let size = env
552 .lookup_chunk(&digest)
553 .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
554
555 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
556
557 env.debug(format!(
558 "successfully added chunk {} to dynamic index {} (offset {}, size {})",
559 digest_str, wid, offset, size
560 ));
561 }
562
563 Ok(Value::Null)
564 }
565
566 #[sortable]
567 pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
568 &ApiHandler::Sync(&fixed_append),
569 &ObjectSchema::new(
570 "Append chunk to fixed index writer.",
571 &sorted!([
572 (
573 "wid",
574 false,
575 &IntegerSchema::new("Fixed writer ID.")
576 .minimum(1)
577 .maximum(256)
578 .schema()
579 ),
580 (
581 "digest-list",
582 false,
583 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
584 ),
585 (
586 "offset-list",
587 false,
588 &ArraySchema::new(
589 "Chunk offset list.",
590 &IntegerSchema::new("Corresponding chunk offsets.")
591 .minimum(0)
592 .schema()
593 )
594 .schema()
595 )
596 ]),
597 ),
598 );
599
600 fn fixed_append(
601 param: Value,
602 _info: &ApiMethod,
603 rpcenv: &mut dyn RpcEnvironment,
604 ) -> Result<Value, Error> {
605 let wid = required_integer_param(&param, "wid")? as usize;
606 let digest_list = required_array_param(&param, "digest-list")?;
607 let offset_list = required_array_param(&param, "offset-list")?;
608
609 if offset_list.len() != digest_list.len() {
610 bail!(
611 "offset list has wrong length ({} != {})",
612 offset_list.len(),
613 digest_list.len()
614 );
615 }
616
617 let env: &BackupEnvironment = rpcenv.as_ref();
618
619 env.debug(format!("fixed_append {} chunks", digest_list.len()));
620
621 for (i, item) in digest_list.iter().enumerate() {
622 let digest_str = item.as_str().unwrap();
623 let digest = <[u8; 32]>::from_hex(digest_str)?;
624 let offset = offset_list[i].as_u64().unwrap();
625 let size = env
626 .lookup_chunk(&digest)
627 .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
628
629 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
630
631 env.debug(format!(
632 "successfully added chunk {} to fixed index {} (offset {}, size {})",
633 digest_str, wid, offset, size
634 ));
635 }
636
637 Ok(Value::Null)
638 }
639
640 #[sortable]
641 pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
642 &ApiHandler::Sync(&close_dynamic_index),
643 &ObjectSchema::new(
644 "Close dynamic index writer.",
645 &sorted!([
646 (
647 "wid",
648 false,
649 &IntegerSchema::new("Dynamic writer ID.")
650 .minimum(1)
651 .maximum(256)
652 .schema()
653 ),
654 (
655 "chunk-count",
656 false,
657 &IntegerSchema::new(
658 "Chunk count. This is used to verify that the server got all chunks."
659 )
660 .minimum(1)
661 .schema()
662 ),
663 (
664 "size",
665 false,
666 &IntegerSchema::new(
667 "File size. This is used to verify that the server got all data."
668 )
669 .minimum(1)
670 .schema()
671 ),
672 (
673 "csum",
674 false,
675 &StringSchema::new("Digest list checksum.").schema()
676 ),
677 ]),
678 ),
679 );
680
681 fn close_dynamic_index(
682 param: Value,
683 _info: &ApiMethod,
684 rpcenv: &mut dyn RpcEnvironment,
685 ) -> Result<Value, Error> {
686 let wid = required_integer_param(&param, "wid")? as usize;
687 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
688 let size = required_integer_param(&param, "size")? as u64;
689 let csum_str = required_string_param(&param, "csum")?;
690 let csum = <[u8; 32]>::from_hex(csum_str)?;
691
692 let env: &BackupEnvironment = rpcenv.as_ref();
693
694 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
695
696 env.log(format!("successfully closed dynamic index {}", wid));
697
698 Ok(Value::Null)
699 }
700
701 #[sortable]
702 pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
703 &ApiHandler::Sync(&close_fixed_index),
704 &ObjectSchema::new(
705 "Close fixed index writer.",
706 &sorted!([
707 (
708 "wid",
709 false,
710 &IntegerSchema::new("Fixed writer ID.")
711 .minimum(1)
712 .maximum(256)
713 .schema()
714 ),
715 (
716 "chunk-count",
717 false,
718 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
719 .minimum(0)
720 .schema()
721 ),
722 (
723 "size",
724 false,
725 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
726 .minimum(0)
727 .schema()
728 ),
729 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
730 ]),
731 )
732 );
733
734 fn close_fixed_index(
735 param: Value,
736 _info: &ApiMethod,
737 rpcenv: &mut dyn RpcEnvironment,
738 ) -> Result<Value, Error> {
739 let wid = required_integer_param(&param, "wid")? as usize;
740 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
741 let size = required_integer_param(&param, "size")? as u64;
742 let csum_str = required_string_param(&param, "csum")?;
743 let csum = <[u8; 32]>::from_hex(csum_str)?;
744
745 let env: &BackupEnvironment = rpcenv.as_ref();
746
747 env.fixed_writer_close(wid, chunk_count, size, csum)?;
748
749 env.log(format!("successfully closed fixed index {}", wid));
750
751 Ok(Value::Null)
752 }
753
754 fn finish_backup(
755 _param: Value,
756 _info: &ApiMethod,
757 rpcenv: &mut dyn RpcEnvironment,
758 ) -> Result<Value, Error> {
759 let env: &BackupEnvironment = rpcenv.as_ref();
760
761 env.finish_backup()?;
762 env.log("successfully finished backup");
763
764 Ok(Value::Null)
765 }
766
767 #[sortable]
768 pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
769 &ApiHandler::Sync(&get_previous_backup_time),
770 &ObjectSchema::new("Get previous backup time.", &[]),
771 );
772
773 fn get_previous_backup_time(
774 _param: Value,
775 _info: &ApiMethod,
776 rpcenv: &mut dyn RpcEnvironment,
777 ) -> Result<Value, Error> {
778 let env: &BackupEnvironment = rpcenv.as_ref();
779
780 let backup_time = env
781 .last_backup
782 .as_ref()
783 .map(|info| info.backup_dir.backup_time());
784
785 Ok(json!(backup_time))
786 }
787
788 #[sortable]
789 pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
790 &ApiHandler::AsyncHttp(&download_previous),
791 &ObjectSchema::new(
792 "Download archive from previous backup.",
793 &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)]),
794 ),
795 );
796
797 fn download_previous(
798 _parts: Parts,
799 _req_body: Body,
800 param: Value,
801 _info: &ApiMethod,
802 rpcenv: Box<dyn RpcEnvironment>,
803 ) -> ApiResponseFuture {
804 async move {
805 let env: &BackupEnvironment = rpcenv.as_ref();
806
807 let archive_name = required_string_param(&param, "archive-name")?.to_owned();
808
809 let last_backup = match &env.last_backup {
810 Some(info) => info,
811 None => bail!("no valid previous backup"),
812 };
813
814 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
815 path.push(&archive_name);
816
817 {
818 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
819 ArchiveType::FixedIndex => {
820 let index = env.datastore.open_fixed_reader(&path)?;
821 Some(Box::new(index))
822 }
823 ArchiveType::DynamicIndex => {
824 let index = env.datastore.open_dynamic_reader(&path)?;
825 Some(Box::new(index))
826 }
827 _ => None,
828 };
829 if let Some(index) = index {
830 env.log(format!(
831 "register chunks in '{}' from previous backup.",
832 archive_name
833 ));
834
835 for pos in 0..index.index_count() {
836 let info = index.chunk_info(pos).unwrap();
837 let size = info.range.end - info.range.start;
838 env.register_chunk(info.digest, size as u32)?;
839 }
840 }
841 }
842
843 env.log(format!("download '{}' from previous backup.", archive_name));
844 crate::api2::helpers::create_download_response(path).await
845 }
846 .boxed()
847 }