]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/backup/mod.rs
api-types: introduce BackupType enum and Group/Dir api types
[proxmox-backup.git] / src / api2 / backup / mod.rs
CommitLineData
bf78f708
DM
1//! Backup protocol (HTTP2 upgrade)
2
f7d4e4b5 3use anyhow::{bail, format_err, Error};
92ac375a 4use futures::*;
dc7a5b34 5use hex::FromHex;
152764ec 6use hyper::header::{HeaderValue, UPGRADE};
152764ec 7use hyper::http::request::Parts;
dc7a5b34 8use hyper::{Body, Request, Response, StatusCode};
f9578f3c 9use serde_json::{json, Value};
152764ec 10
6ef1b649
WB
11use proxmox_router::list_subdirs_api_method;
12use proxmox_router::{
dc7a5b34 13 ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
6ef1b649
WB
14};
15use proxmox_schema::*;
dc7a5b34 16use proxmox_sys::sortable;
552c2259 17
8cc3760e 18use pbs_api_types::{
988d575d 19 Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
dc7a5b34
TL
20 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
21 DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
8cc3760e 22};
6d5d305d 23use pbs_config::CachedUserInfo;
b2065dc7
WB
24use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
25use pbs_datastore::index::IndexFile;
26use pbs_datastore::manifest::{archive_type, ArchiveType};
dc7a5b34
TL
27use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
28use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
29use proxmox_rest_server::{H2Service, WorkerTask};
30use proxmox_sys::fs::lock_dir_noblock_shared;
770a36e5 31
d95ced64
DM
32mod environment;
33use environment::*;
34
21ee7912
DM
35mod upload_chunk;
36use upload_chunk::*;
37
dc7a5b34 38pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
255f378a 39
552c2259 40#[sortable]
255f378a 41pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
329d40b5 42 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
255f378a
DM
43 &ObjectSchema::new(
44 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
552c2259 45 &sorted!([
66c49c21 46 ("store", false, &DATASTORE_SCHEMA),
255f378a
DM
47 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
48 ("backup-id", false, &BACKUP_ID_SCHEMA),
49 ("backup-time", false, &BACKUP_TIME_SCHEMA),
50 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
61d7b501 51 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
552c2259 52 ]),
152764ec 53 )
365f0f72
DM
54).access(
55 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
54552dda 56 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
365f0f72
DM
57 &Permission::Anybody
58);
152764ec 59
0aadd40b 60fn upgrade_to_backup_protocol(
152764ec
DM
61 parts: Parts,
62 req_body: Body,
0aadd40b 63 param: Value,
255f378a 64 _info: &ApiMethod,
dd5495d6 65 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 66) -> ApiResponseFuture {
dc7a5b34
TL
67 async move {
68 let debug = param["debug"].as_bool().unwrap_or(false);
69 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
0aadd40b 70
dc7a5b34 71 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
a42d1f55 72
dc7a5b34 73 let store = required_string_param(&param, "store")?.to_owned();
365f0f72 74
dc7a5b34
TL
75 let user_info = CachedUserInfo::new()?;
76 user_info.check_privs(
77 &auth_id,
78 &["datastore", &store],
79 PRIV_DATASTORE_BACKUP,
80 false,
81 )?;
365f0f72 82
dc7a5b34 83 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
365f0f72 84
988d575d 85 let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
dc7a5b34
TL
86 let backup_id = required_string_param(&param, "backup-id")?;
87 let backup_time = required_integer_param(&param, "backup-time")?;
21ee7912 88
dc7a5b34
TL
89 let protocols = parts
90 .headers
91 .get("UPGRADE")
92 .ok_or_else(|| format_err!("missing Upgrade header"))?
93 .to_str()?;
152764ec 94
dc7a5b34
TL
95 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
96 bail!("invalid protocol name");
97 }
152764ec 98
dc7a5b34
TL
99 if parts.version >= http::version::Version::HTTP_2 {
100 bail!(
101 "unexpected http version '{:?}' (expected version < 2)",
102 parts.version
103 );
104 }
152764ec 105
dc7a5b34 106 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
96e95fc1 107
dc7a5b34 108 let env_type = rpcenv.env_type();
d9bd06ea 109
dc7a5b34 110 let backup_group = BackupGroup::new(backup_type, backup_id);
92ac375a 111
988d575d 112 let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
dc7a5b34
TL
113 if !benchmark {
114 bail!("unable to run benchmark without --benchmark flags");
115 }
116 "benchmark"
117 } else {
118 if benchmark {
119 bail!("benchmark flags is only allowed on 'host/benchmark'");
120 }
121 "backup"
122 };
1fc82c41 123
dc7a5b34
TL
124 // lock backup group to only allow one backup per group at a time
125 let (owner, _group_guard) =
126 datastore.create_locked_backup_group(&backup_group, &auth_id)?;
127
128 // permission check
129 let correct_owner =
130 owner == auth_id || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
131 if !correct_owner && worker_type != "benchmark" {
132 // only the owner is allowed to create additional snapshots
133 bail!("backup owner check failed ({} != {})", auth_id, owner);
61d7b501 134 }
54552dda 135
dc7a5b34
TL
136 let last_backup = {
137 let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true)
138 .unwrap_or(None);
139 if let Some(info) = info {
140 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
141 let verify = manifest.unprotected["verify_state"].clone();
142 match serde_json::from_value::<SnapshotVerifyState>(verify) {
143 Ok(verify) => match verify.state {
d10332a1
SR
144 VerifyState::Ok => Some(info),
145 VerifyState::Failed => None,
dc7a5b34
TL
146 },
147 Err(_) => {
148 // no verify state found, treat as valid
149 Some(info)
0af2da04 150 }
0af2da04 151 }
dc7a5b34
TL
152 } else {
153 None
0af2da04 154 }
dc7a5b34 155 };
72375ce6 156
dc7a5b34 157 let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
a66ab8ae 158
dc7a5b34
TL
159 let _last_guard = if let Some(last) = &last_backup {
160 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
161 bail!("backup timestamp is older than last backup.");
162 }
bb105f9d 163
dc7a5b34
TL
164 // lock last snapshot to prevent forgetting/pruning it during backup
165 let full_path = datastore.snapshot_path(&last.backup_dir);
166 Some(lock_dir_noblock_shared(
167 &full_path,
168 "snapshot",
169 "base snapshot is already locked by another operation",
170 )?)
171 } else {
172 None
173 };
92ac375a 174
dc7a5b34
TL
175 let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
176 if !is_new {
177 bail!("backup directory already exists.");
178 }
92ac375a 179
dc7a5b34
TL
180 WorkerTask::spawn(
181 worker_type,
182 Some(worker_id),
183 auth_id.to_string(),
184 true,
185 move |worker| {
186 let mut env = BackupEnvironment::new(
187 env_type,
188 auth_id,
189 worker.clone(),
190 datastore,
191 backup_dir,
192 );
193
194 env.debug = debug;
195 env.last_backup = last_backup;
196
197 env.log(format!(
198 "starting new {} on datastore '{}': {:?}",
199 worker_type, store, path
200 ));
201
202 let service =
203 H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
204
205 let abort_future = worker.abort_future();
206
207 let env2 = env.clone();
208
209 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
210 .map_err(Error::from)
211 .and_then(move |conn| {
212 env2.debug("protocol upgrade done");
213
214 let mut http = hyper::server::conn::Http::new();
215 http.http2_only(true);
216 // increase window size: todo - find optiomal size
217 let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
218 http.http2_initial_stream_window_size(window_size);
219 http.http2_initial_connection_window_size(window_size);
220 http.http2_max_frame_size(4 * 1024 * 1024);
221
222 let env3 = env2.clone();
223 http.serve_connection(conn, service).map(move |result| {
224 match result {
225 Err(err) => {
226 // Avoid Transport endpoint is not connected (os error 107)
227 // fixme: find a better way to test for that error
228 if err.to_string().starts_with("connection error")
229 && env3.finished()
230 {
231 Ok(())
232 } else {
233 Err(Error::from(err))
234 }
b428af97 235 }
dc7a5b34 236 Ok(()) => Ok(()),
b428af97 237 }
dc7a5b34
TL
238 })
239 });
240 let mut abort_future = abort_future.map(|_| Err(format_err!("task aborted")));
241
242 async move {
243 // keep flock until task ends
244 let _group_guard = _group_guard;
245 let snap_guard = snap_guard;
246 let _last_guard = _last_guard;
247
248 let res = select! {
249 req = req_fut => req,
250 abrt = abort_future => abrt,
251 };
252 if benchmark {
253 env.log("benchmark finished successfully");
254 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
255 return Ok(());
256 }
0698f78d 257
dc7a5b34
TL
258 let verify = |env: BackupEnvironment| {
259 if let Err(err) = env.verify_after_complete(snap_guard) {
260 env.log(format!(
0698f78d
SR
261 "backup finished, but starting the requested verify task failed: {}",
262 err
263 ));
dc7a5b34
TL
264 }
265 };
090ac9f7 266
dc7a5b34
TL
267 match (res, env.ensure_finished()) {
268 (Ok(_), Ok(())) => {
269 env.log("backup finished successfully");
270 verify(env);
271 Ok(())
272 }
273 (Err(err), Ok(())) => {
274 // ignore errors after finish
275 env.log(format!("backup had errors but finished: {}", err));
276 verify(env);
277 Ok(())
278 }
279 (Ok(_), Err(err)) => {
280 env.log(format!("backup ended and finish failed: {}", err));
281 env.log("removing unfinished backup");
282 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
283 Err(err)
284 }
285 (Err(err), Err(_)) => {
286 env.log(format!("backup failed: {}", err));
287 env.log("removing failed backup");
288 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
289 Err(err)
290 }
291 }
292 }
293 },
294 )?;
295
296 let response = Response::builder()
297 .status(StatusCode::SWITCHING_PROTOCOLS)
298 .header(
299 UPGRADE,
300 HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
301 )
302 .body(Body::empty())?;
090ac9f7 303
dc7a5b34
TL
304 Ok(response)
305 }
306 .boxed()
152764ec 307}
92ac375a 308
5bc8e80a 309const BACKUP_API_SUBDIRS: SubdirMap = &[
dc7a5b34 310 ("blob", &Router::new().upload(&API_METHOD_UPLOAD_BLOB)),
255f378a 311 (
dc7a5b34
TL
312 "dynamic_chunk",
313 &Router::new().upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK),
255f378a
DM
314 ),
315 (
dc7a5b34
TL
316 "dynamic_close",
317 &Router::new().post(&API_METHOD_CLOSE_DYNAMIC_INDEX),
255f378a
DM
318 ),
319 (
dc7a5b34
TL
320 "dynamic_index",
321 &Router::new()
255f378a 322 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
dc7a5b34 323 .put(&API_METHOD_DYNAMIC_APPEND),
255f378a
DM
324 ),
325 (
dc7a5b34
TL
326 "finish",
327 &Router::new().post(&ApiMethod::new(
328 &ApiHandler::Sync(&finish_backup),
329 &ObjectSchema::new("Mark backup as finished.", &[]),
330 )),
255f378a
DM
331 ),
332 (
dc7a5b34
TL
333 "fixed_chunk",
334 &Router::new().upload(&API_METHOD_UPLOAD_FIXED_CHUNK),
255f378a
DM
335 ),
336 (
dc7a5b34
TL
337 "fixed_close",
338 &Router::new().post(&API_METHOD_CLOSE_FIXED_INDEX),
255f378a
DM
339 ),
340 (
dc7a5b34
TL
341 "fixed_index",
342 &Router::new()
255f378a 343 .post(&API_METHOD_CREATE_FIXED_INDEX)
dc7a5b34 344 .put(&API_METHOD_FIXED_APPEND),
255f378a 345 ),
b957aa81 346 (
dc7a5b34
TL
347 "previous",
348 &Router::new().download(&API_METHOD_DOWNLOAD_PREVIOUS),
b957aa81 349 ),
8b7f8d3f 350 (
dc7a5b34
TL
351 "previous_backup_time",
352 &Router::new().get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME),
8b7f8d3f 353 ),
255f378a 354 (
dc7a5b34
TL
355 "speedtest",
356 &Router::new().upload(&API_METHOD_UPLOAD_SPEEDTEST),
255f378a
DM
357 ),
358];
359
360pub const BACKUP_API_ROUTER: Router = Router::new()
361 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
362 .subdirs(BACKUP_API_SUBDIRS);
363
3d229a4a
DM
364#[sortable]
365pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
366 &ApiHandler::Sync(&create_dynamic_index),
367 &ObjectSchema::new(
368 "Create dynamic chunk index file.",
dc7a5b34
TL
369 &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
370 ),
3d229a4a
DM
371);
372
f9578f3c
DM
373fn create_dynamic_index(
374 param: Value,
3d229a4a 375 _info: &ApiMethod,
dd5495d6 376 rpcenv: &mut dyn RpcEnvironment,
f9578f3c 377) -> Result<Value, Error> {
f9578f3c 378 let env: &BackupEnvironment = rpcenv.as_ref();
f9578f3c 379
3c8c2827 380 let name = required_string_param(&param, "archive-name")?.to_owned();
f9578f3c 381
4af0ee05 382 let archive_name = name.clone();
0997967d 383 if !archive_name.ends_with(".didx") {
a42fa400 384 bail!("wrong archive extension: '{}'", archive_name);
f9578f3c
DM
385 }
386
6b95c7df 387 let mut path = env.backup_dir.relative_path();
f9578f3c
DM
388 path.push(archive_name);
389
976595e1 390 let index = env.datastore.create_dynamic_writer(&path)?;
8bea85b4 391 let wid = env.register_dynamic_writer(index, name)?;
f9578f3c 392
bb105f9d 393 env.log(format!("created new dynamic index {} ({:?})", wid, path));
f9578f3c 394
bb105f9d 395 Ok(json!(wid))
f9578f3c
DM
396}
397
552c2259 398#[sortable]
255f378a
DM
399pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
400 &ApiHandler::Sync(&create_fixed_index),
401 &ObjectSchema::new(
402 "Create fixed chunk index file.",
552c2259 403 &sorted!([
6227654a 404 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
dc7a5b34
TL
405 (
406 "size",
407 false,
408 &IntegerSchema::new("File size.").minimum(1).schema()
409 ),
410 (
411 "reuse-csum",
412 true,
413 &StringSchema::new(
414 "If set, compare last backup's \
415 csum and reuse index for incremental backup if it matches."
416 )
417 .schema()
255f378a 418 ),
552c2259 419 ]),
dc7a5b34 420 ),
255f378a 421);
a42fa400
DM
422
423fn create_fixed_index(
424 param: Value,
425 _info: &ApiMethod,
dd5495d6 426 rpcenv: &mut dyn RpcEnvironment,
a42fa400 427) -> Result<Value, Error> {
a42fa400
DM
428 let env: &BackupEnvironment = rpcenv.as_ref();
429
3c8c2827
WB
430 let name = required_string_param(&param, "archive-name")?.to_owned();
431 let size = required_integer_param(&param, "size")? as usize;
facd9801 432 let reuse_csum = param["reuse-csum"].as_str();
a42fa400 433
4af0ee05 434 let archive_name = name.clone();
0997967d 435 if !archive_name.ends_with(".fidx") {
a42fa400 436 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
437 }
438
439 let mut path = env.backup_dir.relative_path();
facd9801 440 path.push(&archive_name);
a42fa400 441
dc7a5b34 442 let chunk_size = 4096 * 1024; // todo: ??
a42fa400 443
facd9801
SR
444 // do incremental backup if csum is set
445 let mut reader = None;
446 let mut incremental = false;
447 if let Some(csum) = reuse_csum {
448 incremental = true;
449 let last_backup = match &env.last_backup {
450 Some(info) => info,
451 None => {
0af2da04 452 bail!("cannot reuse index - no valid previous backup exists");
facd9801
SR
453 }
454 };
455
456 let mut last_path = last_backup.backup_dir.relative_path();
457 last_path.push(&archive_name);
458
459 let index = match env.datastore.open_fixed_reader(last_path) {
460 Ok(index) => index,
461 Err(_) => {
462 bail!("cannot reuse index - no previous backup exists for archive");
463 }
464 };
465
466 let (old_csum, _) = index.compute_csum();
25877d05 467 let old_csum = hex::encode(&old_csum);
facd9801 468 if old_csum != csum {
dc7a5b34
TL
469 bail!(
470 "expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
471 csum,
472 old_csum
473 );
facd9801
SR
474 }
475
476 reader = Some(index);
477 }
478
479 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
480
481 if let Some(reader) = reader {
482 writer.clone_data_from(&reader)?;
483 }
484
485 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
a42fa400
DM
486
487 env.log(format!("created new fixed index {} ({:?})", wid, path));
488
489 Ok(json!(wid))
490}
491
552c2259 492#[sortable]
255f378a
DM
493pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
494 &ApiHandler::Sync(&dynamic_append),
495 &ObjectSchema::new(
496 "Append chunk to dynamic index writer.",
552c2259 497 &sorted!([
255f378a
DM
498 (
499 "wid",
500 false,
501 &IntegerSchema::new("Dynamic writer ID.")
502 .minimum(1)
503 .maximum(256)
504 .schema()
505 ),
506 (
507 "digest-list",
508 false,
509 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
510 ),
511 (
512 "offset-list",
513 false,
514 &ArraySchema::new(
515 "Chunk offset list.",
516 &IntegerSchema::new("Corresponding chunk offsets.")
517 .minimum(0)
518 .schema()
dc7a5b34
TL
519 )
520 .schema()
255f378a 521 ),
552c2259 522 ]),
dc7a5b34 523 ),
255f378a 524);
82ab7230 525
dc7a5b34 526fn dynamic_append(
82ab7230
DM
527 param: Value,
528 _info: &ApiMethod,
dd5495d6 529 rpcenv: &mut dyn RpcEnvironment,
82ab7230 530) -> Result<Value, Error> {
3c8c2827
WB
531 let wid = required_integer_param(&param, "wid")? as usize;
532 let digest_list = required_array_param(&param, "digest-list")?;
533 let offset_list = required_array_param(&param, "offset-list")?;
aa1b2e04 534
417cb073 535 if offset_list.len() != digest_list.len() {
dc7a5b34
TL
536 bail!(
537 "offset list has wrong length ({} != {})",
538 offset_list.len(),
539 digest_list.len()
540 );
417cb073
DM
541 }
542
82ab7230
DM
543 let env: &BackupEnvironment = rpcenv.as_ref();
544
39e60bd6
DM
545 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
546
417cb073 547 for (i, item) in digest_list.iter().enumerate() {
aa1b2e04 548 let digest_str = item.as_str().unwrap();
25877d05 549 let digest = <[u8; 32]>::from_hex(digest_str)?;
417cb073 550 let offset = offset_list[i].as_u64().unwrap();
dc7a5b34
TL
551 let size = env
552 .lookup_chunk(&digest)
553 .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 554
417cb073 555 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
82ab7230 556
dc7a5b34
TL
557 env.debug(format!(
558 "successfully added chunk {} to dynamic index {} (offset {}, size {})",
559 digest_str, wid, offset, size
560 ));
aa1b2e04 561 }
82ab7230
DM
562
563 Ok(Value::Null)
564}
565
552c2259 566#[sortable]
255f378a
DM
567pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
568 &ApiHandler::Sync(&fixed_append),
569 &ObjectSchema::new(
570 "Append chunk to fixed index writer.",
552c2259 571 &sorted!([
255f378a
DM
572 (
573 "wid",
574 false,
575 &IntegerSchema::new("Fixed writer ID.")
576 .minimum(1)
577 .maximum(256)
578 .schema()
579 ),
580 (
581 "digest-list",
582 false,
583 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
584 ),
585 (
586 "offset-list",
587 false,
588 &ArraySchema::new(
589 "Chunk offset list.",
590 &IntegerSchema::new("Corresponding chunk offsets.")
591 .minimum(0)
592 .schema()
dc7a5b34
TL
593 )
594 .schema()
a42fa400 595 )
552c2259 596 ]),
dc7a5b34 597 ),
255f378a 598);
a42fa400 599
dc7a5b34 600fn fixed_append(
a42fa400
DM
601 param: Value,
602 _info: &ApiMethod,
dd5495d6 603 rpcenv: &mut dyn RpcEnvironment,
a42fa400 604) -> Result<Value, Error> {
3c8c2827
WB
605 let wid = required_integer_param(&param, "wid")? as usize;
606 let digest_list = required_array_param(&param, "digest-list")?;
607 let offset_list = required_array_param(&param, "offset-list")?;
a42fa400 608
a42fa400 609 if offset_list.len() != digest_list.len() {
dc7a5b34
TL
610 bail!(
611 "offset list has wrong length ({} != {})",
612 offset_list.len(),
613 digest_list.len()
614 );
a42fa400
DM
615 }
616
617 let env: &BackupEnvironment = rpcenv.as_ref();
618
39e60bd6
DM
619 env.debug(format!("fixed_append {} chunks", digest_list.len()));
620
a42fa400
DM
621 for (i, item) in digest_list.iter().enumerate() {
622 let digest_str = item.as_str().unwrap();
25877d05 623 let digest = <[u8; 32]>::from_hex(digest_str)?;
a42fa400 624 let offset = offset_list[i].as_u64().unwrap();
dc7a5b34
TL
625 let size = env
626 .lookup_chunk(&digest)
627 .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 628
a42fa400
DM
629 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
630
dc7a5b34
TL
631 env.debug(format!(
632 "successfully added chunk {} to fixed index {} (offset {}, size {})",
633 digest_str, wid, offset, size
634 ));
a42fa400
DM
635 }
636
637 Ok(Value::Null)
638}
639
552c2259 640#[sortable]
255f378a
DM
641pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
642 &ApiHandler::Sync(&close_dynamic_index),
643 &ObjectSchema::new(
644 "Close dynamic index writer.",
552c2259 645 &sorted!([
255f378a
DM
646 (
647 "wid",
648 false,
649 &IntegerSchema::new("Dynamic writer ID.")
650 .minimum(1)
651 .maximum(256)
652 .schema()
653 ),
654 (
655 "chunk-count",
656 false,
dc7a5b34
TL
657 &IntegerSchema::new(
658 "Chunk count. This is used to verify that the server got all chunks."
659 )
660 .minimum(1)
661 .schema()
255f378a
DM
662 ),
663 (
664 "size",
665 false,
dc7a5b34
TL
666 &IntegerSchema::new(
667 "File size. This is used to verify that the server got all data."
668 )
669 .minimum(1)
670 .schema()
671 ),
672 (
673 "csum",
674 false,
675 &StringSchema::new("Digest list checksum.").schema()
255f378a 676 ),
552c2259 677 ]),
dc7a5b34 678 ),
255f378a 679);
a2077252 680
dc7a5b34 681fn close_dynamic_index(
a2077252
DM
682 param: Value,
683 _info: &ApiMethod,
dd5495d6 684 rpcenv: &mut dyn RpcEnvironment,
a2077252 685) -> Result<Value, Error> {
3c8c2827
WB
686 let wid = required_integer_param(&param, "wid")? as usize;
687 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
688 let size = required_integer_param(&param, "size")? as u64;
689 let csum_str = required_string_param(&param, "csum")?;
25877d05 690 let csum = <[u8; 32]>::from_hex(csum_str)?;
a2077252
DM
691
692 let env: &BackupEnvironment = rpcenv.as_ref();
693
fb6026b6 694 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
a2077252 695
add5861e 696 env.log(format!("successfully closed dynamic index {}", wid));
bb105f9d 697
a2077252
DM
698 Ok(Value::Null)
699}
700
552c2259 701#[sortable]
255f378a
DM
702pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
703 &ApiHandler::Sync(&close_fixed_index),
704 &ObjectSchema::new(
705 "Close fixed index writer.",
552c2259 706 &sorted!([
255f378a
DM
707 (
708 "wid",
709 false,
710 &IntegerSchema::new("Fixed writer ID.")
711 .minimum(1)
712 .maximum(256)
713 .schema()
714 ),
715 (
716 "chunk-count",
717 false,
facd9801
SR
718 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
719 .minimum(0)
255f378a
DM
720 .schema()
721 ),
722 (
723 "size",
724 false,
facd9801
SR
725 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
726 .minimum(0)
255f378a
DM
727 .schema()
728 ),
729 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 730 ]),
a42fa400 731 )
255f378a 732);
a42fa400 733
dc7a5b34 734fn close_fixed_index(
a42fa400
DM
735 param: Value,
736 _info: &ApiMethod,
dd5495d6 737 rpcenv: &mut dyn RpcEnvironment,
a42fa400 738) -> Result<Value, Error> {
3c8c2827
WB
739 let wid = required_integer_param(&param, "wid")? as usize;
740 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
741 let size = required_integer_param(&param, "size")? as u64;
742 let csum_str = required_string_param(&param, "csum")?;
25877d05 743 let csum = <[u8; 32]>::from_hex(csum_str)?;
a42fa400
DM
744
745 let env: &BackupEnvironment = rpcenv.as_ref();
746
fb6026b6 747 env.fixed_writer_close(wid, chunk_count, size, csum)?;
a42fa400 748
add5861e 749 env.log(format!("successfully closed fixed index {}", wid));
a42fa400
DM
750
751 Ok(Value::Null)
752}
a2077252 753
dc7a5b34 754fn finish_backup(
372724af
DM
755 _param: Value,
756 _info: &ApiMethod,
dd5495d6 757 rpcenv: &mut dyn RpcEnvironment,
372724af 758) -> Result<Value, Error> {
372724af
DM
759 let env: &BackupEnvironment = rpcenv.as_ref();
760
761 env.finish_backup()?;
add5861e 762 env.log("successfully finished backup");
372724af
DM
763
764 Ok(Value::Null)
765}
a2077252 766
8b7f8d3f
FG
767#[sortable]
768pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
769 &ApiHandler::Sync(&get_previous_backup_time),
dc7a5b34 770 &ObjectSchema::new("Get previous backup time.", &[]),
8b7f8d3f
FG
771);
772
773fn get_previous_backup_time(
774 _param: Value,
775 _info: &ApiMethod,
776 rpcenv: &mut dyn RpcEnvironment,
777) -> Result<Value, Error> {
8b7f8d3f
FG
778 let env: &BackupEnvironment = rpcenv.as_ref();
779
dc7a5b34
TL
780 let backup_time = env
781 .last_backup
782 .as_ref()
783 .map(|info| info.backup_dir.backup_time());
8b7f8d3f
FG
784
785 Ok(json!(backup_time))
786}
787
552c2259 788#[sortable]
b957aa81
DM
789pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
790 &ApiHandler::AsyncHttp(&download_previous),
255f378a 791 &ObjectSchema::new(
b957aa81 792 "Download archive from previous backup.",
dc7a5b34
TL
793 &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)]),
794 ),
255f378a 795);
a42fa400 796
b957aa81 797fn download_previous(
d3611366
DM
798 _parts: Parts,
799 _req_body: Body,
800 param: Value,
255f378a 801 _info: &ApiMethod,
dd5495d6 802 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 803) -> ApiResponseFuture {
ad51d02a
DM
804 async move {
805 let env: &BackupEnvironment = rpcenv.as_ref();
d3611366 806
3c8c2827 807 let archive_name = required_string_param(&param, "archive-name")?.to_owned();
d3611366 808
ad51d02a
DM
809 let last_backup = match &env.last_backup {
810 Some(info) => info,
0af2da04 811 None => bail!("no valid previous backup"),
ad51d02a
DM
812 };
813
b957aa81 814 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
ad51d02a
DM
815 path.push(&archive_name);
816
fe3e65c3
DM
817 {
818 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
819 ArchiveType::FixedIndex => {
820 let index = env.datastore.open_fixed_reader(&path)?;
821 Some(Box::new(index))
822 }
823 ArchiveType::DynamicIndex => {
824 let index = env.datastore.open_dynamic_reader(&path)?;
825 Some(Box::new(index))
826 }
dc7a5b34 827 _ => None,
fe3e65c3
DM
828 };
829 if let Some(index) = index {
dc7a5b34
TL
830 env.log(format!(
831 "register chunks in '{}' from previous backup.",
832 archive_name
833 ));
fe3e65c3
DM
834
835 for pos in 0..index.index_count() {
836 let info = index.chunk_info(pos).unwrap();
837 let size = info.range.end - info.range.start;
838 env.register_chunk(info.digest, size as u32)?;
839 }
840 }
841 }
842
843 env.log(format!("download '{}' from previous backup.", archive_name));
b957aa81 844 crate::api2::helpers::create_download_response(path).await
dc7a5b34
TL
845 }
846 .boxed()
a42fa400 847}