1 //! Backup protocol (HTTP2 upgrade)
3 use anyhow
::{bail, format_err, Error}
;
5 use hyper
::header
::{HeaderValue, UPGRADE}
;
6 use hyper
::http
::request
::Parts
;
7 use hyper
::{Body, Response, Request, StatusCode}
;
8 use serde_json
::{json, Value}
;
10 use proxmox
::{sortable, identity, list_subdirs_api_method}
;
11 use proxmox
::api
::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission}
;
12 use proxmox
::api
::router
::SubdirMap
;
13 use proxmox
::api
::schema
::*;
16 Authid
, VerifyState
, SnapshotVerifyState
,
17 BACKUP_ID_SCHEMA
, BACKUP_TIME_SCHEMA
, BACKUP_TYPE_SCHEMA
, DATASTORE_SCHEMA
,
18 CHUNK_DIGEST_SCHEMA
, PRIV_DATASTORE_BACKUP
,
20 use pbs_tools
::fs
::lock_dir_noblock_shared
;
21 use pbs_tools
::json
::{required_array_param, required_integer_param, required_string_param}
;
22 use pbs_datastore
::PROXMOX_BACKUP_PROTOCOL_ID_V1
;
23 use pbs_datastore
::backup_info
::{BackupDir, BackupGroup, BackupInfo}
;
24 use pbs_datastore
::index
::IndexFile
;
25 use pbs_datastore
::manifest
::{archive_type, ArchiveType}
;
27 use crate::server
::{WorkerTask, H2Service}
;
28 use crate::backup
::DataStore
;
29 use crate::config
::cached_user_info
::CachedUserInfo
;
37 pub const ROUTER
: Router
= Router
::new()
38 .upgrade(&API_METHOD_UPGRADE_BACKUP
);
41 pub const API_METHOD_UPGRADE_BACKUP
: ApiMethod
= ApiMethod
::new(
42 &ApiHandler
::AsyncHttp(&upgrade_to_backup_protocol
),
44 concat
!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1
!(), "')."),
46 ("store", false, &DATASTORE_SCHEMA
),
47 ("backup-type", false, &BACKUP_TYPE_SCHEMA
),
48 ("backup-id", false, &BACKUP_ID_SCHEMA
),
49 ("backup-time", false, &BACKUP_TIME_SCHEMA
),
50 ("debug", true, &BooleanSchema
::new("Enable verbose debug logging.").schema()),
51 ("benchmark", true, &BooleanSchema
::new("Job is a benchmark (do not keep data).").schema()),
55 // Note: parameter 'store' is no uri parameter, so we need to test inside function body
56 Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
60 fn upgrade_to_backup_protocol(
65 rpcenv
: Box
<dyn RpcEnvironment
>,
66 ) -> ApiResponseFuture
{
69 let debug
= param
["debug"].as_bool().unwrap_or(false);
70 let benchmark
= param
["benchmark"].as_bool().unwrap_or(false);
72 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
74 let store
= required_string_param(¶m
, "store")?
.to_owned();
76 let user_info
= CachedUserInfo
::new()?
;
77 user_info
.check_privs(&auth_id
, &["datastore", &store
], PRIV_DATASTORE_BACKUP
, false)?
;
79 let datastore
= DataStore
::lookup_datastore(&store
)?
;
81 let backup_type
= required_string_param(¶m
, "backup-type")?
;
82 let backup_id
= required_string_param(¶m
, "backup-id")?
;
83 let backup_time
= required_integer_param(¶m
, "backup-time")?
;
88 .ok_or_else(|| format_err
!("missing Upgrade header"))?
91 if protocols
!= PROXMOX_BACKUP_PROTOCOL_ID_V1
!() {
92 bail
!("invalid protocol name");
95 if parts
.version
>= http
::version
::Version
::HTTP_2
{
96 bail
!("unexpected http version '{:?}' (expected version < 2)", parts
.version
);
99 let worker_id
= format
!("{}:{}/{}", store
, backup_type
, backup_id
);
101 let env_type
= rpcenv
.env_type();
103 let backup_group
= BackupGroup
::new(backup_type
, backup_id
);
105 let worker_type
= if backup_type
== "host" && backup_id
== "benchmark" {
107 bail
!("unable to run benchmark without --benchmark flags");
112 bail
!("benchmark flags is only allowed on 'host/benchmark'");
117 // lock backup group to only allow one backup per group at a time
118 let (owner
, _group_guard
) = datastore
.create_locked_backup_group(&backup_group
, &auth_id
)?
;
121 let correct_owner
= owner
== auth_id
123 && Authid
::from(owner
.user().clone()) == auth_id
);
124 if !correct_owner
&& worker_type
!= "benchmark" {
125 // only the owner is allowed to create additional snapshots
126 bail
!("backup owner check failed ({} != {})", auth_id
, owner
);
130 let info
= BackupInfo
::last_backup(&datastore
.base_path(), &backup_group
, true).unwrap_or(None
);
131 if let Some(info
) = info
{
132 let (manifest
, _
) = datastore
.load_manifest(&info
.backup_dir
)?
;
133 let verify
= manifest
.unprotected
["verify_state"].clone();
134 match serde_json
::from_value
::<SnapshotVerifyState
>(verify
) {
137 VerifyState
::Ok
=> Some(info
),
138 VerifyState
::Failed
=> None
,
142 // no verify state found, treat as valid
151 let backup_dir
= BackupDir
::with_group(backup_group
, backup_time
)?
;
153 let _last_guard
= if let Some(last
) = &last_backup
{
154 if backup_dir
.backup_time() <= last
.backup_dir
.backup_time() {
155 bail
!("backup timestamp is older than last backup.");
158 // lock last snapshot to prevent forgetting/pruning it during backup
159 let full_path
= datastore
.snapshot_path(&last
.backup_dir
);
160 Some(lock_dir_noblock_shared(&full_path
, "snapshot", "base snapshot is already locked by another operation")?
)
165 let (path
, is_new
, snap_guard
) = datastore
.create_locked_backup_dir(&backup_dir
)?
;
166 if !is_new { bail!("backup directory already exists."); }
169 WorkerTask
::spawn(worker_type
, Some(worker_id
), auth_id
.clone(), true, move |worker
| {
170 let mut env
= BackupEnvironment
::new(
171 env_type
, auth_id
, worker
.clone(), datastore
, backup_dir
);
174 env
.last_backup
= last_backup
;
176 env
.log(format
!("starting new {} on datastore '{}': {:?}", worker_type
, store
, path
));
178 let service
= H2Service
::new(env
.clone(), worker
.clone(), &BACKUP_API_ROUTER
, debug
);
180 let abort_future
= worker
.abort_future();
182 let env2
= env
.clone();
184 let mut req_fut
= hyper
::upgrade
::on(Request
::from_parts(parts
, req_body
))
185 .map_err(Error
::from
)
186 .and_then(move |conn
| {
187 env2
.debug("protocol upgrade done");
189 let mut http
= hyper
::server
::conn
::Http
::new();
190 http
.http2_only(true);
191 // increase window size: todo - find optiomal size
192 let window_size
= 32*1024*1024; // max = (1 << 31) - 2
193 http
.http2_initial_stream_window_size(window_size
);
194 http
.http2_initial_connection_window_size(window_size
);
195 http
.http2_max_frame_size(4*1024*1024);
197 let env3
= env2
.clone();
198 http
.serve_connection(conn
, service
)
202 // Avoid Transport endpoint is not connected (os error 107)
203 // fixme: find a better way to test for that error
204 if err
.to_string().starts_with("connection error") && env3
.finished() {
207 Err(Error
::from(err
))
214 let mut abort_future
= abort_future
215 .map(|_
| Err(format_err
!("task aborted")));
218 // keep flock until task ends
219 let _group_guard
= _group_guard
;
220 let snap_guard
= snap_guard
;
221 let _last_guard
= _last_guard
;
224 req
= req_fut
=> req
,
225 abrt
= abort_future
=> abrt
,
228 env
.log("benchmark finished successfully");
229 pbs_runtime
::block_in_place(|| env
.remove_backup())?
;
233 let verify
= |env
: BackupEnvironment
| {
234 if let Err(err
) = env
.verify_after_complete(snap_guard
) {
236 "backup finished, but starting the requested verify task failed: {}",
242 match (res
, env
.ensure_finished()) {
244 env
.log("backup finished successfully");
248 (Err(err
), Ok(())) => {
249 // ignore errors after finish
250 env
.log(format
!("backup had errors but finished: {}", err
));
254 (Ok(_
), Err(err
)) => {
255 env
.log(format
!("backup ended and finish failed: {}", err
));
256 env
.log("removing unfinished backup");
257 pbs_runtime
::block_in_place(|| env
.remove_backup())?
;
260 (Err(err
), Err(_
)) => {
261 env
.log(format
!("backup failed: {}", err
));
262 env
.log("removing failed backup");
263 pbs_runtime
::block_in_place(|| env
.remove_backup())?
;
270 let response
= Response
::builder()
271 .status(StatusCode
::SWITCHING_PROTOCOLS
)
272 .header(UPGRADE
, HeaderValue
::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1
!()))
273 .body(Body
::empty())?
;
279 const BACKUP_API_SUBDIRS
: SubdirMap
= &[
281 "blob", &Router
::new()
282 .upload(&API_METHOD_UPLOAD_BLOB
)
285 "dynamic_chunk", &Router
::new()
286 .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK
)
289 "dynamic_close", &Router
::new()
290 .post(&API_METHOD_CLOSE_DYNAMIC_INDEX
)
293 "dynamic_index", &Router
::new()
294 .post(&API_METHOD_CREATE_DYNAMIC_INDEX
)
295 .put(&API_METHOD_DYNAMIC_APPEND
)
298 "finish", &Router
::new()
301 &ApiHandler
::Sync(&finish_backup
),
302 &ObjectSchema
::new("Mark backup as finished.", &[])
307 "fixed_chunk", &Router
::new()
308 .upload(&API_METHOD_UPLOAD_FIXED_CHUNK
)
311 "fixed_close", &Router
::new()
312 .post(&API_METHOD_CLOSE_FIXED_INDEX
)
315 "fixed_index", &Router
::new()
316 .post(&API_METHOD_CREATE_FIXED_INDEX
)
317 .put(&API_METHOD_FIXED_APPEND
)
320 "previous", &Router
::new()
321 .download(&API_METHOD_DOWNLOAD_PREVIOUS
)
324 "previous_backup_time", &Router
::new()
325 .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME
)
328 "speedtest", &Router
::new()
329 .upload(&API_METHOD_UPLOAD_SPEEDTEST
)
333 pub const BACKUP_API_ROUTER
: Router
= Router
::new()
334 .get(&list_subdirs_api_method
!(BACKUP_API_SUBDIRS
))
335 .subdirs(BACKUP_API_SUBDIRS
);
338 pub const API_METHOD_CREATE_DYNAMIC_INDEX
: ApiMethod
= ApiMethod
::new(
339 &ApiHandler
::Sync(&create_dynamic_index
),
341 "Create dynamic chunk index file.",
343 ("archive-name", false, &crate::api2
::types
::BACKUP_ARCHIVE_NAME_SCHEMA
),
348 fn create_dynamic_index(
351 rpcenv
: &mut dyn RpcEnvironment
,
352 ) -> Result
<Value
, Error
> {
354 let env
: &BackupEnvironment
= rpcenv
.as_ref();
356 let name
= required_string_param(¶m
, "archive-name")?
.to_owned();
358 let archive_name
= name
.clone();
359 if !archive_name
.ends_with(".didx") {
360 bail
!("wrong archive extension: '{}'", archive_name
);
363 let mut path
= env
.backup_dir
.relative_path();
364 path
.push(archive_name
);
366 let index
= env
.datastore
.create_dynamic_writer(&path
)?
;
367 let wid
= env
.register_dynamic_writer(index
, name
)?
;
369 env
.log(format
!("created new dynamic index {} ({:?})", wid
, path
));
375 pub const API_METHOD_CREATE_FIXED_INDEX
: ApiMethod
= ApiMethod
::new(
376 &ApiHandler
::Sync(&create_fixed_index
),
378 "Create fixed chunk index file.",
380 ("archive-name", false, &crate::api2
::types
::BACKUP_ARCHIVE_NAME_SCHEMA
),
381 ("size", false, &IntegerSchema
::new("File size.")
385 ("reuse-csum", true, &StringSchema
::new("If set, compare last backup's \
386 csum and reuse index for incremental backup if it matches.").schema()),
391 fn create_fixed_index(
394 rpcenv
: &mut dyn RpcEnvironment
,
395 ) -> Result
<Value
, Error
> {
397 let env
: &BackupEnvironment
= rpcenv
.as_ref();
399 let name
= required_string_param(¶m
, "archive-name")?
.to_owned();
400 let size
= required_integer_param(¶m
, "size")?
as usize;
401 let reuse_csum
= param
["reuse-csum"].as_str();
403 let archive_name
= name
.clone();
404 if !archive_name
.ends_with(".fidx") {
405 bail
!("wrong archive extension: '{}'", archive_name
);
408 let mut path
= env
.backup_dir
.relative_path();
409 path
.push(&archive_name
);
411 let chunk_size
= 4096*1024; // todo: ??
413 // do incremental backup if csum is set
414 let mut reader
= None
;
415 let mut incremental
= false;
416 if let Some(csum
) = reuse_csum
{
418 let last_backup
= match &env
.last_backup
{
421 bail
!("cannot reuse index - no valid previous backup exists");
425 let mut last_path
= last_backup
.backup_dir
.relative_path();
426 last_path
.push(&archive_name
);
428 let index
= match env
.datastore
.open_fixed_reader(last_path
) {
431 bail
!("cannot reuse index - no previous backup exists for archive");
435 let (old_csum
, _
) = index
.compute_csum();
436 let old_csum
= proxmox
::tools
::digest_to_hex(&old_csum
);
437 if old_csum
!= csum
{
438 bail
!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
442 reader
= Some(index
);
445 let mut writer
= env
.datastore
.create_fixed_writer(&path
, size
, chunk_size
)?
;
447 if let Some(reader
) = reader
{
448 writer
.clone_data_from(&reader
)?
;
451 let wid
= env
.register_fixed_writer(writer
, name
, size
, chunk_size
as u32, incremental
)?
;
453 env
.log(format
!("created new fixed index {} ({:?})", wid
, path
));
459 pub const API_METHOD_DYNAMIC_APPEND
: ApiMethod
= ApiMethod
::new(
460 &ApiHandler
::Sync(&dynamic_append
),
462 "Append chunk to dynamic index writer.",
467 &IntegerSchema
::new("Dynamic writer ID.")
475 &ArraySchema
::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA
).schema()
481 "Chunk offset list.",
482 &IntegerSchema
::new("Corresponding chunk offsets.")
494 rpcenv
: &mut dyn RpcEnvironment
,
495 ) -> Result
<Value
, Error
> {
497 let wid
= required_integer_param(¶m
, "wid")?
as usize;
498 let digest_list
= required_array_param(¶m
, "digest-list")?
;
499 let offset_list
= required_array_param(¶m
, "offset-list")?
;
501 if offset_list
.len() != digest_list
.len() {
502 bail
!("offset list has wrong length ({} != {})", offset_list
.len(), digest_list
.len());
505 let env
: &BackupEnvironment
= rpcenv
.as_ref();
507 env
.debug(format
!("dynamic_append {} chunks", digest_list
.len()));
509 for (i
, item
) in digest_list
.iter().enumerate() {
510 let digest_str
= item
.as_str().unwrap();
511 let digest
= proxmox
::tools
::hex_to_digest(digest_str
)?
;
512 let offset
= offset_list
[i
].as_u64().unwrap();
513 let size
= env
.lookup_chunk(&digest
).ok_or_else(|| format_err
!("no such chunk {}", digest_str
))?
;
515 env
.dynamic_writer_append_chunk(wid
, offset
, size
, &digest
)?
;
517 env
.debug(format
!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str
, wid
, offset
, size
));
524 pub const API_METHOD_FIXED_APPEND
: ApiMethod
= ApiMethod
::new(
525 &ApiHandler
::Sync(&fixed_append
),
527 "Append chunk to fixed index writer.",
532 &IntegerSchema
::new("Fixed writer ID.")
540 &ArraySchema
::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA
).schema()
546 "Chunk offset list.",
547 &IntegerSchema
::new("Corresponding chunk offsets.")
559 rpcenv
: &mut dyn RpcEnvironment
,
560 ) -> Result
<Value
, Error
> {
562 let wid
= required_integer_param(¶m
, "wid")?
as usize;
563 let digest_list
= required_array_param(¶m
, "digest-list")?
;
564 let offset_list
= required_array_param(¶m
, "offset-list")?
;
566 if offset_list
.len() != digest_list
.len() {
567 bail
!("offset list has wrong length ({} != {})", offset_list
.len(), digest_list
.len());
570 let env
: &BackupEnvironment
= rpcenv
.as_ref();
572 env
.debug(format
!("fixed_append {} chunks", digest_list
.len()));
574 for (i
, item
) in digest_list
.iter().enumerate() {
575 let digest_str
= item
.as_str().unwrap();
576 let digest
= proxmox
::tools
::hex_to_digest(digest_str
)?
;
577 let offset
= offset_list
[i
].as_u64().unwrap();
578 let size
= env
.lookup_chunk(&digest
).ok_or_else(|| format_err
!("no such chunk {}", digest_str
))?
;
580 env
.fixed_writer_append_chunk(wid
, offset
, size
, &digest
)?
;
582 env
.debug(format
!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str
, wid
, offset
, size
));
589 pub const API_METHOD_CLOSE_DYNAMIC_INDEX
: ApiMethod
= ApiMethod
::new(
590 &ApiHandler
::Sync(&close_dynamic_index
),
592 "Close dynamic index writer.",
597 &IntegerSchema
::new("Dynamic writer ID.")
605 &IntegerSchema
::new("Chunk count. This is used to verify that the server got all chunks.")
612 &IntegerSchema
::new("File size. This is used to verify that the server got all data.")
616 ("csum", false, &StringSchema
::new("Digest list checksum.").schema()),
621 fn close_dynamic_index (
624 rpcenv
: &mut dyn RpcEnvironment
,
625 ) -> Result
<Value
, Error
> {
627 let wid
= required_integer_param(¶m
, "wid")?
as usize;
628 let chunk_count
= required_integer_param(¶m
, "chunk-count")?
as u64;
629 let size
= required_integer_param(¶m
, "size")?
as u64;
630 let csum_str
= required_string_param(¶m
, "csum")?
;
631 let csum
= proxmox
::tools
::hex_to_digest(csum_str
)?
;
633 let env
: &BackupEnvironment
= rpcenv
.as_ref();
635 env
.dynamic_writer_close(wid
, chunk_count
, size
, csum
)?
;
637 env
.log(format
!("successfully closed dynamic index {}", wid
));
643 pub const API_METHOD_CLOSE_FIXED_INDEX
: ApiMethod
= ApiMethod
::new(
644 &ApiHandler
::Sync(&close_fixed_index
),
646 "Close fixed index writer.",
651 &IntegerSchema
::new("Fixed writer ID.")
659 &IntegerSchema
::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
666 &IntegerSchema
::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
670 ("csum", false, &StringSchema
::new("Digest list checksum.").schema()),
675 fn close_fixed_index (
678 rpcenv
: &mut dyn RpcEnvironment
,
679 ) -> Result
<Value
, Error
> {
681 let wid
= required_integer_param(¶m
, "wid")?
as usize;
682 let chunk_count
= required_integer_param(¶m
, "chunk-count")?
as u64;
683 let size
= required_integer_param(¶m
, "size")?
as u64;
684 let csum_str
= required_string_param(¶m
, "csum")?
;
685 let csum
= proxmox
::tools
::hex_to_digest(csum_str
)?
;
687 let env
: &BackupEnvironment
= rpcenv
.as_ref();
689 env
.fixed_writer_close(wid
, chunk_count
, size
, csum
)?
;
691 env
.log(format
!("successfully closed fixed index {}", wid
));
699 rpcenv
: &mut dyn RpcEnvironment
,
700 ) -> Result
<Value
, Error
> {
702 let env
: &BackupEnvironment
= rpcenv
.as_ref();
704 env
.finish_backup()?
;
705 env
.log("successfully finished backup");
711 pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME
: ApiMethod
= ApiMethod
::new(
712 &ApiHandler
::Sync(&get_previous_backup_time
),
714 "Get previous backup time.",
719 fn get_previous_backup_time(
722 rpcenv
: &mut dyn RpcEnvironment
,
723 ) -> Result
<Value
, Error
> {
725 let env
: &BackupEnvironment
= rpcenv
.as_ref();
727 let backup_time
= env
.last_backup
.as_ref().map(|info
| info
.backup_dir
.backup_time());
729 Ok(json
!(backup_time
))
733 pub const API_METHOD_DOWNLOAD_PREVIOUS
: ApiMethod
= ApiMethod
::new(
734 &ApiHandler
::AsyncHttp(&download_previous
),
736 "Download archive from previous backup.",
738 ("archive-name", false, &crate::api2
::types
::BACKUP_ARCHIVE_NAME_SCHEMA
)
743 fn download_previous(
748 rpcenv
: Box
<dyn RpcEnvironment
>,
749 ) -> ApiResponseFuture
{
752 let env
: &BackupEnvironment
= rpcenv
.as_ref();
754 let archive_name
= required_string_param(¶m
, "archive-name")?
.to_owned();
756 let last_backup
= match &env
.last_backup
{
758 None
=> bail
!("no valid previous backup"),
761 let mut path
= env
.datastore
.snapshot_path(&last_backup
.backup_dir
);
762 path
.push(&archive_name
);
765 let index
: Option
<Box
<dyn IndexFile
>> = match archive_type(&archive_name
)?
{
766 ArchiveType
::FixedIndex
=> {
767 let index
= env
.datastore
.open_fixed_reader(&path
)?
;
768 Some(Box
::new(index
))
770 ArchiveType
::DynamicIndex
=> {
771 let index
= env
.datastore
.open_dynamic_reader(&path
)?
;
772 Some(Box
::new(index
))
776 if let Some(index
) = index
{
777 env
.log(format
!("register chunks in '{}' from previous backup.", archive_name
));
779 for pos
in 0..index
.index_count() {
780 let info
= index
.chunk_info(pos
).unwrap();
781 let size
= info
.range
.end
- info
.range
.start
;
782 env
.register_chunk(info
.digest
, size
as u32)?
;
787 env
.log(format
!("download '{}' from previous backup.", archive_name
));
788 crate::api2
::helpers
::create_download_response(path
).await