]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/backup/mod.rs
move acl to pbs_config workspaces, pbs_api_types cleanups
[proxmox-backup.git] / src / api2 / backup / mod.rs
1 //! Backup protocol (HTTP2 upgrade)
2
3 use anyhow::{bail, format_err, Error};
4 use futures::*;
5 use hyper::header::{HeaderValue, UPGRADE};
6 use hyper::http::request::Parts;
7 use hyper::{Body, Response, Request, StatusCode};
8 use serde_json::{json, Value};
9
10 use proxmox::{sortable, identity, list_subdirs_api_method};
11 use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
12 use proxmox::api::router::SubdirMap;
13 use proxmox::api::schema::*;
14
15 use pbs_api_types::{
16 Authid, VerifyState, SnapshotVerifyState,
17 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
18 CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP,
19 };
20 use pbs_tools::fs::lock_dir_noblock_shared;
21 use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
22 use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
23 use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
24 use pbs_datastore::index::IndexFile;
25 use pbs_datastore::manifest::{archive_type, ArchiveType};
26
27 use crate::server::{WorkerTask, H2Service};
28 use crate::backup::DataStore;
29 use crate::config::cached_user_info::CachedUserInfo;
30
31 mod environment;
32 use environment::*;
33
34 mod upload_chunk;
35 use upload_chunk::*;
36
37 pub const ROUTER: Router = Router::new()
38 .upgrade(&API_METHOD_UPGRADE_BACKUP);
39
40 #[sortable]
41 pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
42 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
43 &ObjectSchema::new(
44 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
45 &sorted!([
46 ("store", false, &DATASTORE_SCHEMA),
47 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
48 ("backup-id", false, &BACKUP_ID_SCHEMA),
49 ("backup-time", false, &BACKUP_TIME_SCHEMA),
50 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
51 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
52 ]),
53 )
54 ).access(
55 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
56 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
57 &Permission::Anybody
58 );
59
60 fn upgrade_to_backup_protocol(
61 parts: Parts,
62 req_body: Body,
63 param: Value,
64 _info: &ApiMethod,
65 rpcenv: Box<dyn RpcEnvironment>,
66 ) -> ApiResponseFuture {
67
68 async move {
69 let debug = param["debug"].as_bool().unwrap_or(false);
70 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
71
72 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
73
74 let store = required_string_param(&param, "store")?.to_owned();
75
76 let user_info = CachedUserInfo::new()?;
77 user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
78
79 let datastore = DataStore::lookup_datastore(&store)?;
80
81 let backup_type = required_string_param(&param, "backup-type")?;
82 let backup_id = required_string_param(&param, "backup-id")?;
83 let backup_time = required_integer_param(&param, "backup-time")?;
84
85 let protocols = parts
86 .headers
87 .get("UPGRADE")
88 .ok_or_else(|| format_err!("missing Upgrade header"))?
89 .to_str()?;
90
91 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
92 bail!("invalid protocol name");
93 }
94
95 if parts.version >= http::version::Version::HTTP_2 {
96 bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
97 }
98
99 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
100
101 let env_type = rpcenv.env_type();
102
103 let backup_group = BackupGroup::new(backup_type, backup_id);
104
105 let worker_type = if backup_type == "host" && backup_id == "benchmark" {
106 if !benchmark {
107 bail!("unable to run benchmark without --benchmark flags");
108 }
109 "benchmark"
110 } else {
111 if benchmark {
112 bail!("benchmark flags is only allowed on 'host/benchmark'");
113 }
114 "backup"
115 };
116
117 // lock backup group to only allow one backup per group at a time
118 let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
119
120 // permission check
121 let correct_owner = owner == auth_id
122 || (owner.is_token()
123 && Authid::from(owner.user().clone()) == auth_id);
124 if !correct_owner && worker_type != "benchmark" {
125 // only the owner is allowed to create additional snapshots
126 bail!("backup owner check failed ({} != {})", auth_id, owner);
127 }
128
129 let last_backup = {
130 let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
131 if let Some(info) = info {
132 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
133 let verify = manifest.unprotected["verify_state"].clone();
134 match serde_json::from_value::<SnapshotVerifyState>(verify) {
135 Ok(verify) => {
136 match verify.state {
137 VerifyState::Ok => Some(info),
138 VerifyState::Failed => None,
139 }
140 },
141 Err(_) => {
142 // no verify state found, treat as valid
143 Some(info)
144 }
145 }
146 } else {
147 None
148 }
149 };
150
151 let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
152
153 let _last_guard = if let Some(last) = &last_backup {
154 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
155 bail!("backup timestamp is older than last backup.");
156 }
157
158 // lock last snapshot to prevent forgetting/pruning it during backup
159 let full_path = datastore.snapshot_path(&last.backup_dir);
160 Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
161 } else {
162 None
163 };
164
165 let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
166 if !is_new { bail!("backup directory already exists."); }
167
168
169 WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
170 let mut env = BackupEnvironment::new(
171 env_type, auth_id, worker.clone(), datastore, backup_dir);
172
173 env.debug = debug;
174 env.last_backup = last_backup;
175
176 env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
177
178 let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
179
180 let abort_future = worker.abort_future();
181
182 let env2 = env.clone();
183
184 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
185 .map_err(Error::from)
186 .and_then(move |conn| {
187 env2.debug("protocol upgrade done");
188
189 let mut http = hyper::server::conn::Http::new();
190 http.http2_only(true);
191 // increase window size: todo - find optiomal size
192 let window_size = 32*1024*1024; // max = (1 << 31) - 2
193 http.http2_initial_stream_window_size(window_size);
194 http.http2_initial_connection_window_size(window_size);
195 http.http2_max_frame_size(4*1024*1024);
196
197 let env3 = env2.clone();
198 http.serve_connection(conn, service)
199 .map(move |result| {
200 match result {
201 Err(err) => {
202 // Avoid Transport endpoint is not connected (os error 107)
203 // fixme: find a better way to test for that error
204 if err.to_string().starts_with("connection error") && env3.finished() {
205 Ok(())
206 } else {
207 Err(Error::from(err))
208 }
209 }
210 Ok(()) => Ok(()),
211 }
212 })
213 });
214 let mut abort_future = abort_future
215 .map(|_| Err(format_err!("task aborted")));
216
217 async move {
218 // keep flock until task ends
219 let _group_guard = _group_guard;
220 let snap_guard = snap_guard;
221 let _last_guard = _last_guard;
222
223 let res = select!{
224 req = req_fut => req,
225 abrt = abort_future => abrt,
226 };
227 if benchmark {
228 env.log("benchmark finished successfully");
229 pbs_runtime::block_in_place(|| env.remove_backup())?;
230 return Ok(());
231 }
232
233 let verify = |env: BackupEnvironment| {
234 if let Err(err) = env.verify_after_complete(snap_guard) {
235 env.log(format!(
236 "backup finished, but starting the requested verify task failed: {}",
237 err
238 ));
239 }
240 };
241
242 match (res, env.ensure_finished()) {
243 (Ok(_), Ok(())) => {
244 env.log("backup finished successfully");
245 verify(env);
246 Ok(())
247 },
248 (Err(err), Ok(())) => {
249 // ignore errors after finish
250 env.log(format!("backup had errors but finished: {}", err));
251 verify(env);
252 Ok(())
253 },
254 (Ok(_), Err(err)) => {
255 env.log(format!("backup ended and finish failed: {}", err));
256 env.log("removing unfinished backup");
257 pbs_runtime::block_in_place(|| env.remove_backup())?;
258 Err(err)
259 },
260 (Err(err), Err(_)) => {
261 env.log(format!("backup failed: {}", err));
262 env.log("removing failed backup");
263 pbs_runtime::block_in_place(|| env.remove_backup())?;
264 Err(err)
265 },
266 }
267 }
268 })?;
269
270 let response = Response::builder()
271 .status(StatusCode::SWITCHING_PROTOCOLS)
272 .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
273 .body(Body::empty())?;
274
275 Ok(response)
276 }.boxed()
277 }
278
279 const BACKUP_API_SUBDIRS: SubdirMap = &[
280 (
281 "blob", &Router::new()
282 .upload(&API_METHOD_UPLOAD_BLOB)
283 ),
284 (
285 "dynamic_chunk", &Router::new()
286 .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
287 ),
288 (
289 "dynamic_close", &Router::new()
290 .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
291 ),
292 (
293 "dynamic_index", &Router::new()
294 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
295 .put(&API_METHOD_DYNAMIC_APPEND)
296 ),
297 (
298 "finish", &Router::new()
299 .post(
300 &ApiMethod::new(
301 &ApiHandler::Sync(&finish_backup),
302 &ObjectSchema::new("Mark backup as finished.", &[])
303 )
304 )
305 ),
306 (
307 "fixed_chunk", &Router::new()
308 .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
309 ),
310 (
311 "fixed_close", &Router::new()
312 .post(&API_METHOD_CLOSE_FIXED_INDEX)
313 ),
314 (
315 "fixed_index", &Router::new()
316 .post(&API_METHOD_CREATE_FIXED_INDEX)
317 .put(&API_METHOD_FIXED_APPEND)
318 ),
319 (
320 "previous", &Router::new()
321 .download(&API_METHOD_DOWNLOAD_PREVIOUS)
322 ),
323 (
324 "previous_backup_time", &Router::new()
325 .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
326 ),
327 (
328 "speedtest", &Router::new()
329 .upload(&API_METHOD_UPLOAD_SPEEDTEST)
330 ),
331 ];
332
333 pub const BACKUP_API_ROUTER: Router = Router::new()
334 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
335 .subdirs(BACKUP_API_SUBDIRS);
336
337 #[sortable]
338 pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
339 &ApiHandler::Sync(&create_dynamic_index),
340 &ObjectSchema::new(
341 "Create dynamic chunk index file.",
342 &sorted!([
343 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
344 ]),
345 )
346 );
347
348 fn create_dynamic_index(
349 param: Value,
350 _info: &ApiMethod,
351 rpcenv: &mut dyn RpcEnvironment,
352 ) -> Result<Value, Error> {
353
354 let env: &BackupEnvironment = rpcenv.as_ref();
355
356 let name = required_string_param(&param, "archive-name")?.to_owned();
357
358 let archive_name = name.clone();
359 if !archive_name.ends_with(".didx") {
360 bail!("wrong archive extension: '{}'", archive_name);
361 }
362
363 let mut path = env.backup_dir.relative_path();
364 path.push(archive_name);
365
366 let index = env.datastore.create_dynamic_writer(&path)?;
367 let wid = env.register_dynamic_writer(index, name)?;
368
369 env.log(format!("created new dynamic index {} ({:?})", wid, path));
370
371 Ok(json!(wid))
372 }
373
374 #[sortable]
375 pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
376 &ApiHandler::Sync(&create_fixed_index),
377 &ObjectSchema::new(
378 "Create fixed chunk index file.",
379 &sorted!([
380 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
381 ("size", false, &IntegerSchema::new("File size.")
382 .minimum(1)
383 .schema()
384 ),
385 ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
386 csum and reuse index for incremental backup if it matches.").schema()),
387 ]),
388 )
389 );
390
391 fn create_fixed_index(
392 param: Value,
393 _info: &ApiMethod,
394 rpcenv: &mut dyn RpcEnvironment,
395 ) -> Result<Value, Error> {
396
397 let env: &BackupEnvironment = rpcenv.as_ref();
398
399 let name = required_string_param(&param, "archive-name")?.to_owned();
400 let size = required_integer_param(&param, "size")? as usize;
401 let reuse_csum = param["reuse-csum"].as_str();
402
403 let archive_name = name.clone();
404 if !archive_name.ends_with(".fidx") {
405 bail!("wrong archive extension: '{}'", archive_name);
406 }
407
408 let mut path = env.backup_dir.relative_path();
409 path.push(&archive_name);
410
411 let chunk_size = 4096*1024; // todo: ??
412
413 // do incremental backup if csum is set
414 let mut reader = None;
415 let mut incremental = false;
416 if let Some(csum) = reuse_csum {
417 incremental = true;
418 let last_backup = match &env.last_backup {
419 Some(info) => info,
420 None => {
421 bail!("cannot reuse index - no valid previous backup exists");
422 }
423 };
424
425 let mut last_path = last_backup.backup_dir.relative_path();
426 last_path.push(&archive_name);
427
428 let index = match env.datastore.open_fixed_reader(last_path) {
429 Ok(index) => index,
430 Err(_) => {
431 bail!("cannot reuse index - no previous backup exists for archive");
432 }
433 };
434
435 let (old_csum, _) = index.compute_csum();
436 let old_csum = proxmox::tools::digest_to_hex(&old_csum);
437 if old_csum != csum {
438 bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
439 csum, old_csum);
440 }
441
442 reader = Some(index);
443 }
444
445 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
446
447 if let Some(reader) = reader {
448 writer.clone_data_from(&reader)?;
449 }
450
451 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
452
453 env.log(format!("created new fixed index {} ({:?})", wid, path));
454
455 Ok(json!(wid))
456 }
457
458 #[sortable]
459 pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
460 &ApiHandler::Sync(&dynamic_append),
461 &ObjectSchema::new(
462 "Append chunk to dynamic index writer.",
463 &sorted!([
464 (
465 "wid",
466 false,
467 &IntegerSchema::new("Dynamic writer ID.")
468 .minimum(1)
469 .maximum(256)
470 .schema()
471 ),
472 (
473 "digest-list",
474 false,
475 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
476 ),
477 (
478 "offset-list",
479 false,
480 &ArraySchema::new(
481 "Chunk offset list.",
482 &IntegerSchema::new("Corresponding chunk offsets.")
483 .minimum(0)
484 .schema()
485 ).schema()
486 ),
487 ]),
488 )
489 );
490
491 fn dynamic_append (
492 param: Value,
493 _info: &ApiMethod,
494 rpcenv: &mut dyn RpcEnvironment,
495 ) -> Result<Value, Error> {
496
497 let wid = required_integer_param(&param, "wid")? as usize;
498 let digest_list = required_array_param(&param, "digest-list")?;
499 let offset_list = required_array_param(&param, "offset-list")?;
500
501 if offset_list.len() != digest_list.len() {
502 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
503 }
504
505 let env: &BackupEnvironment = rpcenv.as_ref();
506
507 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
508
509 for (i, item) in digest_list.iter().enumerate() {
510 let digest_str = item.as_str().unwrap();
511 let digest = proxmox::tools::hex_to_digest(digest_str)?;
512 let offset = offset_list[i].as_u64().unwrap();
513 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
514
515 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
516
517 env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
518 }
519
520 Ok(Value::Null)
521 }
522
523 #[sortable]
524 pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
525 &ApiHandler::Sync(&fixed_append),
526 &ObjectSchema::new(
527 "Append chunk to fixed index writer.",
528 &sorted!([
529 (
530 "wid",
531 false,
532 &IntegerSchema::new("Fixed writer ID.")
533 .minimum(1)
534 .maximum(256)
535 .schema()
536 ),
537 (
538 "digest-list",
539 false,
540 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
541 ),
542 (
543 "offset-list",
544 false,
545 &ArraySchema::new(
546 "Chunk offset list.",
547 &IntegerSchema::new("Corresponding chunk offsets.")
548 .minimum(0)
549 .schema()
550 ).schema()
551 )
552 ]),
553 )
554 );
555
556 fn fixed_append (
557 param: Value,
558 _info: &ApiMethod,
559 rpcenv: &mut dyn RpcEnvironment,
560 ) -> Result<Value, Error> {
561
562 let wid = required_integer_param(&param, "wid")? as usize;
563 let digest_list = required_array_param(&param, "digest-list")?;
564 let offset_list = required_array_param(&param, "offset-list")?;
565
566 if offset_list.len() != digest_list.len() {
567 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
568 }
569
570 let env: &BackupEnvironment = rpcenv.as_ref();
571
572 env.debug(format!("fixed_append {} chunks", digest_list.len()));
573
574 for (i, item) in digest_list.iter().enumerate() {
575 let digest_str = item.as_str().unwrap();
576 let digest = proxmox::tools::hex_to_digest(digest_str)?;
577 let offset = offset_list[i].as_u64().unwrap();
578 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
579
580 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
581
582 env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
583 }
584
585 Ok(Value::Null)
586 }
587
588 #[sortable]
589 pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
590 &ApiHandler::Sync(&close_dynamic_index),
591 &ObjectSchema::new(
592 "Close dynamic index writer.",
593 &sorted!([
594 (
595 "wid",
596 false,
597 &IntegerSchema::new("Dynamic writer ID.")
598 .minimum(1)
599 .maximum(256)
600 .schema()
601 ),
602 (
603 "chunk-count",
604 false,
605 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
606 .minimum(1)
607 .schema()
608 ),
609 (
610 "size",
611 false,
612 &IntegerSchema::new("File size. This is used to verify that the server got all data.")
613 .minimum(1)
614 .schema()
615 ),
616 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
617 ]),
618 )
619 );
620
621 fn close_dynamic_index (
622 param: Value,
623 _info: &ApiMethod,
624 rpcenv: &mut dyn RpcEnvironment,
625 ) -> Result<Value, Error> {
626
627 let wid = required_integer_param(&param, "wid")? as usize;
628 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
629 let size = required_integer_param(&param, "size")? as u64;
630 let csum_str = required_string_param(&param, "csum")?;
631 let csum = proxmox::tools::hex_to_digest(csum_str)?;
632
633 let env: &BackupEnvironment = rpcenv.as_ref();
634
635 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
636
637 env.log(format!("successfully closed dynamic index {}", wid));
638
639 Ok(Value::Null)
640 }
641
642 #[sortable]
643 pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
644 &ApiHandler::Sync(&close_fixed_index),
645 &ObjectSchema::new(
646 "Close fixed index writer.",
647 &sorted!([
648 (
649 "wid",
650 false,
651 &IntegerSchema::new("Fixed writer ID.")
652 .minimum(1)
653 .maximum(256)
654 .schema()
655 ),
656 (
657 "chunk-count",
658 false,
659 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
660 .minimum(0)
661 .schema()
662 ),
663 (
664 "size",
665 false,
666 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
667 .minimum(0)
668 .schema()
669 ),
670 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
671 ]),
672 )
673 );
674
675 fn close_fixed_index (
676 param: Value,
677 _info: &ApiMethod,
678 rpcenv: &mut dyn RpcEnvironment,
679 ) -> Result<Value, Error> {
680
681 let wid = required_integer_param(&param, "wid")? as usize;
682 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
683 let size = required_integer_param(&param, "size")? as u64;
684 let csum_str = required_string_param(&param, "csum")?;
685 let csum = proxmox::tools::hex_to_digest(csum_str)?;
686
687 let env: &BackupEnvironment = rpcenv.as_ref();
688
689 env.fixed_writer_close(wid, chunk_count, size, csum)?;
690
691 env.log(format!("successfully closed fixed index {}", wid));
692
693 Ok(Value::Null)
694 }
695
696 fn finish_backup (
697 _param: Value,
698 _info: &ApiMethod,
699 rpcenv: &mut dyn RpcEnvironment,
700 ) -> Result<Value, Error> {
701
702 let env: &BackupEnvironment = rpcenv.as_ref();
703
704 env.finish_backup()?;
705 env.log("successfully finished backup");
706
707 Ok(Value::Null)
708 }
709
710 #[sortable]
711 pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
712 &ApiHandler::Sync(&get_previous_backup_time),
713 &ObjectSchema::new(
714 "Get previous backup time.",
715 &[],
716 )
717 );
718
719 fn get_previous_backup_time(
720 _param: Value,
721 _info: &ApiMethod,
722 rpcenv: &mut dyn RpcEnvironment,
723 ) -> Result<Value, Error> {
724
725 let env: &BackupEnvironment = rpcenv.as_ref();
726
727 let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
728
729 Ok(json!(backup_time))
730 }
731
732 #[sortable]
733 pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
734 &ApiHandler::AsyncHttp(&download_previous),
735 &ObjectSchema::new(
736 "Download archive from previous backup.",
737 &sorted!([
738 ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
739 ]),
740 )
741 );
742
743 fn download_previous(
744 _parts: Parts,
745 _req_body: Body,
746 param: Value,
747 _info: &ApiMethod,
748 rpcenv: Box<dyn RpcEnvironment>,
749 ) -> ApiResponseFuture {
750
751 async move {
752 let env: &BackupEnvironment = rpcenv.as_ref();
753
754 let archive_name = required_string_param(&param, "archive-name")?.to_owned();
755
756 let last_backup = match &env.last_backup {
757 Some(info) => info,
758 None => bail!("no valid previous backup"),
759 };
760
761 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
762 path.push(&archive_name);
763
764 {
765 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
766 ArchiveType::FixedIndex => {
767 let index = env.datastore.open_fixed_reader(&path)?;
768 Some(Box::new(index))
769 }
770 ArchiveType::DynamicIndex => {
771 let index = env.datastore.open_dynamic_reader(&path)?;
772 Some(Box::new(index))
773 }
774 _ => { None }
775 };
776 if let Some(index) = index {
777 env.log(format!("register chunks in '{}' from previous backup.", archive_name));
778
779 for pos in 0..index.index_count() {
780 let info = index.chunk_info(pos).unwrap();
781 let size = info.range.end - info.range.start;
782 env.register_chunk(info.digest, size as u32)?;
783 }
784 }
785 }
786
787 env.log(format!("download '{}' from previous backup.", archive_name));
788 crate::api2::helpers::create_download_response(path).await
789 }.boxed()
790 }