]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/backup/mod.rs
use new proxmox-async crate
[proxmox-backup.git] / src / api2 / backup / mod.rs
CommitLineData
bf78f708
DM
1//! Backup protocol (HTTP2 upgrade)
2
f7d4e4b5 3use anyhow::{bail, format_err, Error};
92ac375a 4use futures::*;
152764ec 5use hyper::header::{HeaderValue, UPGRADE};
152764ec 6use hyper::http::request::Parts;
89e9134a 7use hyper::{Body, Response, Request, StatusCode};
f9578f3c 8use serde_json::{json, Value};
152764ec 9
6ef1b649
WB
10use proxmox::{sortable, identity};
11use proxmox_router::list_subdirs_api_method;
12use proxmox_router::{
13 ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, SubdirMap, Permission,
14};
15use proxmox_schema::*;
552c2259 16
8cc3760e
DM
17use pbs_api_types::{
18 Authid, VerifyState, SnapshotVerifyState,
19 BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
6227654a 20 CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_ARCHIVE_NAME_SCHEMA,
8cc3760e 21};
770a36e5 22use pbs_tools::fs::lock_dir_noblock_shared;
3c8c2827 23use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
6d5d305d
DM
24use pbs_config::CachedUserInfo;
25use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
b2065dc7
WB
26use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
27use pbs_datastore::index::IndexFile;
28use pbs_datastore::manifest::{archive_type, ArchiveType};
f7348a23 29use proxmox_rest_server::{WorkerTask, H2Service};
770a36e5 30
d95ced64
DM
31mod environment;
32use environment::*;
33
21ee7912
DM
34mod upload_chunk;
35use upload_chunk::*;
36
255f378a
DM
37pub const ROUTER: Router = Router::new()
38 .upgrade(&API_METHOD_UPGRADE_BACKUP);
39
552c2259 40#[sortable]
255f378a 41pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
329d40b5 42 &ApiHandler::AsyncHttp(&upgrade_to_backup_protocol),
255f378a
DM
43 &ObjectSchema::new(
44 concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
552c2259 45 &sorted!([
66c49c21 46 ("store", false, &DATASTORE_SCHEMA),
255f378a
DM
47 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
48 ("backup-id", false, &BACKUP_ID_SCHEMA),
49 ("backup-time", false, &BACKUP_TIME_SCHEMA),
50 ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
61d7b501 51 ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
552c2259 52 ]),
152764ec 53 )
365f0f72
DM
54).access(
55 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
54552dda 56 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
365f0f72
DM
57 &Permission::Anybody
58);
152764ec 59
0aadd40b 60fn upgrade_to_backup_protocol(
152764ec
DM
61 parts: Parts,
62 req_body: Body,
0aadd40b 63 param: Value,
255f378a 64 _info: &ApiMethod,
dd5495d6 65 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 66) -> ApiResponseFuture {
0aadd40b 67
54552dda 68async move {
a42d1f55 69 let debug = param["debug"].as_bool().unwrap_or(false);
61d7b501 70 let benchmark = param["benchmark"].as_bool().unwrap_or(false);
a42d1f55 71
e6dc35ac 72 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
365f0f72 73
3c8c2827 74 let store = required_string_param(&param, "store")?.to_owned();
365f0f72
DM
75
76 let user_info = CachedUserInfo::new()?;
e6dc35ac 77 user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
365f0f72 78
bb105f9d 79 let datastore = DataStore::lookup_datastore(&store)?;
21ee7912 80
3c8c2827
WB
81 let backup_type = required_string_param(&param, "backup-type")?;
82 let backup_id = required_string_param(&param, "backup-id")?;
83 let backup_time = required_integer_param(&param, "backup-time")?;
152764ec
DM
84
85 let protocols = parts
86 .headers
87 .get("UPGRADE")
88 .ok_or_else(|| format_err!("missing Upgrade header"))?
89 .to_str()?;
90
986bef16 91 if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
152764ec
DM
92 bail!("invalid protocol name");
93 }
94
96e95fc1
DM
95 if parts.version >= http::version::Version::HTTP_2 {
96 bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
97 }
98
4ebda996 99 let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
d9bd06ea 100
58c8d7d9 101 let env_type = rpcenv.env_type();
92ac375a 102
51a4f63f 103 let backup_group = BackupGroup::new(backup_type, backup_id);
1fc82c41 104
61d7b501
HL
105 let worker_type = if backup_type == "host" && backup_id == "benchmark" {
106 if !benchmark {
107 bail!("unable to run benchmark without --benchmark flags");
108 }
109 "benchmark"
110 } else {
111 if benchmark {
112 bail!("benchmark flags is only allowed on 'host/benchmark'");
113 }
114 "backup"
115 };
116
1fc82c41 117 // lock backup group to only allow one backup per group at a time
e6dc35ac 118 let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
1fc82c41 119
54552dda 120 // permission check
bff85572
FG
121 let correct_owner = owner == auth_id
122 || (owner.is_token()
123 && Authid::from(owner.user().clone()) == auth_id);
124 if !correct_owner && worker_type != "benchmark" {
61d7b501 125 // only the owner is allowed to create additional snapshots
e6dc35ac 126 bail!("backup owner check failed ({} != {})", auth_id, owner);
54552dda
DM
127 }
128
0af2da04
SR
129 let last_backup = {
130 let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
131 if let Some(info) = info {
132 let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
133 let verify = manifest.unprotected["verify_state"].clone();
134 match serde_json::from_value::<SnapshotVerifyState>(verify) {
135 Ok(verify) => {
d10332a1
SR
136 match verify.state {
137 VerifyState::Ok => Some(info),
138 VerifyState::Failed => None,
0af2da04
SR
139 }
140 },
141 Err(_) => {
142 // no verify state found, treat as valid
143 Some(info)
144 }
145 }
146 } else {
147 None
148 }
149 };
150
44288184 151 let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
ca5d0b61 152
81f29351 153 let _last_guard = if let Some(last) = &last_backup {
ca5d0b61
DM
154 if backup_dir.backup_time() <= last.backup_dir.backup_time() {
155 bail!("backup timestamp is older than last backup.");
156 }
81f29351
SR
157
158 // lock last snapshot to prevent forgetting/pruning it during backup
159 let full_path = datastore.snapshot_path(&last.backup_dir);
7d6c4c39 160 Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
81f29351
SR
161 } else {
162 None
163 };
f9578f3c 164
0698f78d 165 let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
add5861e 166 if !is_new { bail!("backup directory already exists."); }
f9578f3c 167
61d7b501 168
049a22a3 169 WorkerTask::spawn(worker_type, Some(worker_id), auth_id.to_string(), true, move |worker| {
bb105f9d 170 let mut env = BackupEnvironment::new(
e6dc35ac 171 env_type, auth_id, worker.clone(), datastore, backup_dir);
b02a52e3 172
a42d1f55 173 env.debug = debug;
bb105f9d 174 env.last_backup = last_backup;
b02a52e3 175
61d7b501 176 env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
bb105f9d 177
255f378a 178 let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
72375ce6 179
a66ab8ae
DM
180 let abort_future = worker.abort_future();
181
372724af 182 let env2 = env.clone();
bb105f9d 183
89e9134a 184 let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
92ac375a 185 .map_err(Error::from)
152764ec 186 .and_then(move |conn| {
6650a242 187 env2.debug("protocol upgrade done");
92ac375a
DM
188
189 let mut http = hyper::server::conn::Http::new();
190 http.http2_only(true);
adec8ea2 191 // increase window size: todo - find optiomal size
771953f9
DM
192 let window_size = 32*1024*1024; // max = (1 << 31) - 2
193 http.http2_initial_stream_window_size(window_size);
194 http.http2_initial_connection_window_size(window_size);
cf9ea3c4 195 http.http2_max_frame_size(4*1024*1024);
92ac375a 196
b428af97 197 let env3 = env2.clone();
d9bd06ea 198 http.serve_connection(conn, service)
b428af97
DM
199 .map(move |result| {
200 match result {
201 Err(err) => {
202 // Avoid Transport endpoint is not connected (os error 107)
203 // fixme: find a better way to test for that error
204 if err.to_string().starts_with("connection error") && env3.finished() {
205 Ok(())
206 } else {
207 Err(Error::from(err))
208 }
209 }
210 Ok(()) => Ok(()),
211 }
212 })
59b2baa0 213 });
6650a242 214 let mut abort_future = abort_future
59b2baa0
WB
215 .map(|_| Err(format_err!("task aborted")));
216
6650a242 217 async move {
95bda2f2
SR
218 // keep flock until task ends
219 let _group_guard = _group_guard;
0698f78d 220 let snap_guard = snap_guard;
81f29351 221 let _last_guard = _last_guard;
95bda2f2 222
6650a242
DC
223 let res = select!{
224 req = req_fut => req,
225 abrt = abort_future => abrt,
226 };
61d7b501
HL
227 if benchmark {
228 env.log("benchmark finished successfully");
9a1b24b6 229 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
61d7b501
HL
230 return Ok(());
231 }
0698f78d
SR
232
233 let verify = |env: BackupEnvironment| {
234 if let Err(err) = env.verify_after_complete(snap_guard) {
235 env.log(format!(
236 "backup finished, but starting the requested verify task failed: {}",
237 err
238 ));
239 }
240 };
241
6650a242
DC
242 match (res, env.ensure_finished()) {
243 (Ok(_), Ok(())) => {
add5861e 244 env.log("backup finished successfully");
0698f78d 245 verify(env);
6650a242
DC
246 Ok(())
247 },
248 (Err(err), Ok(())) => {
249 // ignore errors after finish
250 env.log(format!("backup had errors but finished: {}", err));
0698f78d 251 verify(env);
6650a242
DC
252 Ok(())
253 },
254 (Ok(_), Err(err)) => {
255 env.log(format!("backup ended and finish failed: {}", err));
256 env.log("removing unfinished backup");
9a1b24b6 257 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
6650a242
DC
258 Err(err)
259 },
260 (Err(err), Err(_)) => {
261 env.log(format!("backup failed: {}", err));
262 env.log("removing failed backup");
9a1b24b6 263 proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
6650a242
DC
264 Err(err)
265 },
266 }
267 }
090ac9f7
DM
268 })?;
269
270 let response = Response::builder()
271 .status(StatusCode::SWITCHING_PROTOCOLS)
986bef16 272 .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
090ac9f7
DM
273 .body(Body::empty())?;
274
ad51d02a
DM
275 Ok(response)
276 }.boxed()
152764ec 277}
92ac375a 278
5bc8e80a 279const BACKUP_API_SUBDIRS: SubdirMap = &[
255f378a
DM
280 (
281 "blob", &Router::new()
282 .upload(&API_METHOD_UPLOAD_BLOB)
283 ),
284 (
285 "dynamic_chunk", &Router::new()
286 .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
287 ),
288 (
289 "dynamic_close", &Router::new()
290 .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
291 ),
292 (
293 "dynamic_index", &Router::new()
255f378a
DM
294 .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
295 .put(&API_METHOD_DYNAMIC_APPEND)
296 ),
297 (
298 "finish", &Router::new()
299 .post(
300 &ApiMethod::new(
301 &ApiHandler::Sync(&finish_backup),
302 &ObjectSchema::new("Mark backup as finished.", &[])
372724af 303 )
255f378a
DM
304 )
305 ),
306 (
307 "fixed_chunk", &Router::new()
308 .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
309 ),
310 (
311 "fixed_close", &Router::new()
312 .post(&API_METHOD_CLOSE_FIXED_INDEX)
313 ),
314 (
315 "fixed_index", &Router::new()
255f378a
DM
316 .post(&API_METHOD_CREATE_FIXED_INDEX)
317 .put(&API_METHOD_FIXED_APPEND)
318 ),
b957aa81
DM
319 (
320 "previous", &Router::new()
321 .download(&API_METHOD_DOWNLOAD_PREVIOUS)
322 ),
8b7f8d3f
FG
323 (
324 "previous_backup_time", &Router::new()
325 .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
326 ),
255f378a
DM
327 (
328 "speedtest", &Router::new()
329 .upload(&API_METHOD_UPLOAD_SPEEDTEST)
330 ),
331];
332
333pub const BACKUP_API_ROUTER: Router = Router::new()
334 .get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
335 .subdirs(BACKUP_API_SUBDIRS);
336
3d229a4a
DM
337#[sortable]
338pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
339 &ApiHandler::Sync(&create_dynamic_index),
340 &ObjectSchema::new(
341 "Create dynamic chunk index file.",
342 &sorted!([
6227654a 343 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
3d229a4a
DM
344 ]),
345 )
346);
347
f9578f3c
DM
348fn create_dynamic_index(
349 param: Value,
3d229a4a 350 _info: &ApiMethod,
dd5495d6 351 rpcenv: &mut dyn RpcEnvironment,
f9578f3c
DM
352) -> Result<Value, Error> {
353
354 let env: &BackupEnvironment = rpcenv.as_ref();
f9578f3c 355
3c8c2827 356 let name = required_string_param(&param, "archive-name")?.to_owned();
f9578f3c 357
4af0ee05 358 let archive_name = name.clone();
0997967d 359 if !archive_name.ends_with(".didx") {
a42fa400 360 bail!("wrong archive extension: '{}'", archive_name);
f9578f3c
DM
361 }
362
6b95c7df 363 let mut path = env.backup_dir.relative_path();
f9578f3c
DM
364 path.push(archive_name);
365
976595e1 366 let index = env.datastore.create_dynamic_writer(&path)?;
8bea85b4 367 let wid = env.register_dynamic_writer(index, name)?;
f9578f3c 368
bb105f9d 369 env.log(format!("created new dynamic index {} ({:?})", wid, path));
f9578f3c 370
bb105f9d 371 Ok(json!(wid))
f9578f3c
DM
372}
373
552c2259 374#[sortable]
255f378a
DM
375pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
376 &ApiHandler::Sync(&create_fixed_index),
377 &ObjectSchema::new(
378 "Create fixed chunk index file.",
552c2259 379 &sorted!([
6227654a 380 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
255f378a
DM
381 ("size", false, &IntegerSchema::new("File size.")
382 .minimum(1)
383 .schema()
384 ),
facd9801
SR
385 ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
386 csum and reuse index for incremental backup if it matches.").schema()),
552c2259 387 ]),
a42fa400 388 )
255f378a 389);
a42fa400
DM
390
391fn create_fixed_index(
392 param: Value,
393 _info: &ApiMethod,
dd5495d6 394 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
395) -> Result<Value, Error> {
396
397 let env: &BackupEnvironment = rpcenv.as_ref();
398
3c8c2827
WB
399 let name = required_string_param(&param, "archive-name")?.to_owned();
400 let size = required_integer_param(&param, "size")? as usize;
facd9801 401 let reuse_csum = param["reuse-csum"].as_str();
a42fa400 402
4af0ee05 403 let archive_name = name.clone();
0997967d 404 if !archive_name.ends_with(".fidx") {
a42fa400 405 bail!("wrong archive extension: '{}'", archive_name);
a42fa400
DM
406 }
407
408 let mut path = env.backup_dir.relative_path();
facd9801 409 path.push(&archive_name);
a42fa400
DM
410
411 let chunk_size = 4096*1024; // todo: ??
412
facd9801
SR
413 // do incremental backup if csum is set
414 let mut reader = None;
415 let mut incremental = false;
416 if let Some(csum) = reuse_csum {
417 incremental = true;
418 let last_backup = match &env.last_backup {
419 Some(info) => info,
420 None => {
0af2da04 421 bail!("cannot reuse index - no valid previous backup exists");
facd9801
SR
422 }
423 };
424
425 let mut last_path = last_backup.backup_dir.relative_path();
426 last_path.push(&archive_name);
427
428 let index = match env.datastore.open_fixed_reader(last_path) {
429 Ok(index) => index,
430 Err(_) => {
431 bail!("cannot reuse index - no previous backup exists for archive");
432 }
433 };
434
435 let (old_csum, _) = index.compute_csum();
436 let old_csum = proxmox::tools::digest_to_hex(&old_csum);
437 if old_csum != csum {
438 bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
439 csum, old_csum);
440 }
441
442 reader = Some(index);
443 }
444
445 let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
446
447 if let Some(reader) = reader {
448 writer.clone_data_from(&reader)?;
449 }
450
451 let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
a42fa400
DM
452
453 env.log(format!("created new fixed index {} ({:?})", wid, path));
454
455 Ok(json!(wid))
456}
457
552c2259 458#[sortable]
255f378a
DM
459pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
460 &ApiHandler::Sync(&dynamic_append),
461 &ObjectSchema::new(
462 "Append chunk to dynamic index writer.",
552c2259 463 &sorted!([
255f378a
DM
464 (
465 "wid",
466 false,
467 &IntegerSchema::new("Dynamic writer ID.")
468 .minimum(1)
469 .maximum(256)
470 .schema()
471 ),
472 (
473 "digest-list",
474 false,
475 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
476 ),
477 (
478 "offset-list",
479 false,
480 &ArraySchema::new(
481 "Chunk offset list.",
482 &IntegerSchema::new("Corresponding chunk offsets.")
483 .minimum(0)
484 .schema()
485 ).schema()
486 ),
552c2259 487 ]),
82ab7230 488 )
255f378a 489);
82ab7230
DM
490
491fn dynamic_append (
492 param: Value,
493 _info: &ApiMethod,
dd5495d6 494 rpcenv: &mut dyn RpcEnvironment,
82ab7230
DM
495) -> Result<Value, Error> {
496
3c8c2827
WB
497 let wid = required_integer_param(&param, "wid")? as usize;
498 let digest_list = required_array_param(&param, "digest-list")?;
499 let offset_list = required_array_param(&param, "offset-list")?;
aa1b2e04 500
417cb073
DM
501 if offset_list.len() != digest_list.len() {
502 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
503 }
504
82ab7230
DM
505 let env: &BackupEnvironment = rpcenv.as_ref();
506
39e60bd6
DM
507 env.debug(format!("dynamic_append {} chunks", digest_list.len()));
508
417cb073 509 for (i, item) in digest_list.iter().enumerate() {
aa1b2e04 510 let digest_str = item.as_str().unwrap();
bffd40d6 511 let digest = proxmox::tools::hex_to_digest(digest_str)?;
417cb073 512 let offset = offset_list[i].as_u64().unwrap();
aa1b2e04 513 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 514
417cb073 515 env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
82ab7230 516
add5861e 517 env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
aa1b2e04 518 }
82ab7230
DM
519
520 Ok(Value::Null)
521}
522
552c2259 523#[sortable]
255f378a
DM
524pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
525 &ApiHandler::Sync(&fixed_append),
526 &ObjectSchema::new(
527 "Append chunk to fixed index writer.",
552c2259 528 &sorted!([
255f378a
DM
529 (
530 "wid",
531 false,
532 &IntegerSchema::new("Fixed writer ID.")
533 .minimum(1)
534 .maximum(256)
535 .schema()
536 ),
537 (
538 "digest-list",
539 false,
540 &ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
541 ),
542 (
543 "offset-list",
544 false,
545 &ArraySchema::new(
546 "Chunk offset list.",
547 &IntegerSchema::new("Corresponding chunk offsets.")
548 .minimum(0)
549 .schema()
550 ).schema()
a42fa400 551 )
552c2259 552 ]),
a42fa400 553 )
255f378a 554);
a42fa400
DM
555
556fn fixed_append (
557 param: Value,
558 _info: &ApiMethod,
dd5495d6 559 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
560) -> Result<Value, Error> {
561
3c8c2827
WB
562 let wid = required_integer_param(&param, "wid")? as usize;
563 let digest_list = required_array_param(&param, "digest-list")?;
564 let offset_list = required_array_param(&param, "offset-list")?;
a42fa400 565
a42fa400
DM
566 if offset_list.len() != digest_list.len() {
567 bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
568 }
569
570 let env: &BackupEnvironment = rpcenv.as_ref();
571
39e60bd6
DM
572 env.debug(format!("fixed_append {} chunks", digest_list.len()));
573
a42fa400
DM
574 for (i, item) in digest_list.iter().enumerate() {
575 let digest_str = item.as_str().unwrap();
bffd40d6 576 let digest = proxmox::tools::hex_to_digest(digest_str)?;
a42fa400
DM
577 let offset = offset_list[i].as_u64().unwrap();
578 let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
39e60bd6 579
a42fa400
DM
580 env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
581
add5861e 582 env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
a42fa400
DM
583 }
584
585 Ok(Value::Null)
586}
587
552c2259 588#[sortable]
255f378a
DM
589pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
590 &ApiHandler::Sync(&close_dynamic_index),
591 &ObjectSchema::new(
592 "Close dynamic index writer.",
552c2259 593 &sorted!([
255f378a
DM
594 (
595 "wid",
596 false,
597 &IntegerSchema::new("Dynamic writer ID.")
598 .minimum(1)
599 .maximum(256)
600 .schema()
601 ),
602 (
603 "chunk-count",
604 false,
605 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
606 .minimum(1)
607 .schema()
608 ),
609 (
610 "size",
611 false,
612 &IntegerSchema::new("File size. This is used to verify that the server got all data.")
613 .minimum(1)
614 .schema()
615 ),
616 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 617 ]),
a2077252 618 )
255f378a 619);
a2077252
DM
620
621fn close_dynamic_index (
622 param: Value,
623 _info: &ApiMethod,
dd5495d6 624 rpcenv: &mut dyn RpcEnvironment,
a2077252
DM
625) -> Result<Value, Error> {
626
3c8c2827
WB
627 let wid = required_integer_param(&param, "wid")? as usize;
628 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
629 let size = required_integer_param(&param, "size")? as u64;
630 let csum_str = required_string_param(&param, "csum")?;
fb6026b6 631 let csum = proxmox::tools::hex_to_digest(csum_str)?;
a2077252
DM
632
633 let env: &BackupEnvironment = rpcenv.as_ref();
634
fb6026b6 635 env.dynamic_writer_close(wid, chunk_count, size, csum)?;
a2077252 636
add5861e 637 env.log(format!("successfully closed dynamic index {}", wid));
bb105f9d 638
a2077252
DM
639 Ok(Value::Null)
640}
641
552c2259 642#[sortable]
255f378a
DM
643pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
644 &ApiHandler::Sync(&close_fixed_index),
645 &ObjectSchema::new(
646 "Close fixed index writer.",
552c2259 647 &sorted!([
255f378a
DM
648 (
649 "wid",
650 false,
651 &IntegerSchema::new("Fixed writer ID.")
652 .minimum(1)
653 .maximum(256)
654 .schema()
655 ),
656 (
657 "chunk-count",
658 false,
facd9801
SR
659 &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
660 .minimum(0)
255f378a
DM
661 .schema()
662 ),
663 (
664 "size",
665 false,
facd9801
SR
666 &IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
667 .minimum(0)
255f378a
DM
668 .schema()
669 ),
670 ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
552c2259 671 ]),
a42fa400 672 )
255f378a 673);
a42fa400
DM
674
675fn close_fixed_index (
676 param: Value,
677 _info: &ApiMethod,
dd5495d6 678 rpcenv: &mut dyn RpcEnvironment,
a42fa400
DM
679) -> Result<Value, Error> {
680
3c8c2827
WB
681 let wid = required_integer_param(&param, "wid")? as usize;
682 let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
683 let size = required_integer_param(&param, "size")? as u64;
684 let csum_str = required_string_param(&param, "csum")?;
fb6026b6 685 let csum = proxmox::tools::hex_to_digest(csum_str)?;
a42fa400
DM
686
687 let env: &BackupEnvironment = rpcenv.as_ref();
688
fb6026b6 689 env.fixed_writer_close(wid, chunk_count, size, csum)?;
a42fa400 690
add5861e 691 env.log(format!("successfully closed fixed index {}", wid));
a42fa400
DM
692
693 Ok(Value::Null)
694}
a2077252 695
372724af
DM
696fn finish_backup (
697 _param: Value,
698 _info: &ApiMethod,
dd5495d6 699 rpcenv: &mut dyn RpcEnvironment,
372724af
DM
700) -> Result<Value, Error> {
701
702 let env: &BackupEnvironment = rpcenv.as_ref();
703
704 env.finish_backup()?;
add5861e 705 env.log("successfully finished backup");
372724af
DM
706
707 Ok(Value::Null)
708}
a2077252 709
8b7f8d3f
FG
710#[sortable]
711pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
712 &ApiHandler::Sync(&get_previous_backup_time),
713 &ObjectSchema::new(
714 "Get previous backup time.",
715 &[],
716 )
717);
718
719fn get_previous_backup_time(
720 _param: Value,
721 _info: &ApiMethod,
722 rpcenv: &mut dyn RpcEnvironment,
723) -> Result<Value, Error> {
724
725 let env: &BackupEnvironment = rpcenv.as_ref();
726
727 let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
728
729 Ok(json!(backup_time))
730}
731
552c2259 732#[sortable]
b957aa81
DM
733pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
734 &ApiHandler::AsyncHttp(&download_previous),
255f378a 735 &ObjectSchema::new(
b957aa81 736 "Download archive from previous backup.",
552c2259 737 &sorted!([
6227654a 738 ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)
552c2259 739 ]),
a42fa400 740 )
255f378a 741);
a42fa400 742
b957aa81 743fn download_previous(
d3611366
DM
744 _parts: Parts,
745 _req_body: Body,
746 param: Value,
255f378a 747 _info: &ApiMethod,
dd5495d6 748 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 749) -> ApiResponseFuture {
d3611366 750
ad51d02a
DM
751 async move {
752 let env: &BackupEnvironment = rpcenv.as_ref();
d3611366 753
3c8c2827 754 let archive_name = required_string_param(&param, "archive-name")?.to_owned();
d3611366 755
ad51d02a
DM
756 let last_backup = match &env.last_backup {
757 Some(info) => info,
0af2da04 758 None => bail!("no valid previous backup"),
ad51d02a
DM
759 };
760
b957aa81 761 let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
ad51d02a
DM
762 path.push(&archive_name);
763
fe3e65c3
DM
764 {
765 let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
766 ArchiveType::FixedIndex => {
767 let index = env.datastore.open_fixed_reader(&path)?;
768 Some(Box::new(index))
769 }
770 ArchiveType::DynamicIndex => {
771 let index = env.datastore.open_dynamic_reader(&path)?;
772 Some(Box::new(index))
773 }
774 _ => { None }
775 };
776 if let Some(index) = index {
777 env.log(format!("register chunks in '{}' from previous backup.", archive_name));
778
779 for pos in 0..index.index_count() {
780 let info = index.chunk_info(pos).unwrap();
781 let size = info.range.end - info.range.start;
782 env.register_chunk(info.digest, size as u32)?;
783 }
784 }
785 }
786
787 env.log(format!("download '{}' from previous backup.", archive_name));
b957aa81 788 crate::api2::helpers::create_download_response(path).await
ad51d02a 789 }.boxed()
a42fa400 790}