1 ///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
2 use anyhow
::{bail, format_err, Error}
;
3 use lazy_static
::lazy_static
;
4 use log
::{info, error}
;
7 io
::{FromRawFd, RawFd}
,
11 use std
::sync
::{Arc, Mutex}
;
13 use tokio
::sync
::mpsc
;
14 use tokio_stream
::wrappers
::ReceiverStream
;
16 use proxmox
::api
::RpcEnvironmentType
;
17 use proxmox_backup
::client
::DEFAULT_VSOCK_PORT
;
18 use proxmox_backup
::server
::{rest::*, ApiConfig}
;
20 mod proxmox_restore_daemon
;
21 use proxmox_restore_daemon
::*;
23 /// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
24 /// We should never have more than a few requests in queue, so use a low number.
25 pub const MAX_PENDING
: usize = 32;
27 /// Will be present in base initramfs
28 pub const VM_DETECT_FILE
: &str = "/restore-vm-marker";
31 /// The current disks state. Use for accessing data on the attached snapshots.
32 pub static ref DISK_STATE
: Arc
<Mutex
<DiskState
>> = {
33 Arc
::new(Mutex
::new(DiskState
::scan().unwrap()))
37 /// This is expected to be run by 'proxmox-file-restore' within a mini-VM
38 fn main() -> Result
<(), Error
> {
39 if !Path
::new(VM_DETECT_FILE
).exists() {
41 "This binary is not supposed to be run manually, use 'proxmox-file-restore' instead."
45 // don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
46 // stdout to a serial terminal attached by QEMU)
47 env_logger
::from_env(env_logger
::Env
::default().default_filter_or("info"))
48 .write_style(env_logger
::WriteStyle
::Never
)
49 .format_timestamp_millis()
52 // the API may save some stuff there, e.g., the memcon tracking file
53 // we do not care much, but it's way less headache to just create it
54 std
::fs
::create_dir_all("/run/proxmox-backup")?
;
56 // scan all attached disks now, before starting the API
57 // this will panic and stop the VM if anything goes wrong
58 info
!("scanning all disks...");
60 let _disk_state
= DISK_STATE
.lock().unwrap();
63 info
!("disk scan complete, starting main runtime...");
65 pbs_runtime
::main(run())
68 async
fn run() -> Result
<(), Error
> {
71 let auth_config
= Arc
::new(
72 auth
::ticket_auth().map_err(|err
| format_err
!("reading ticket file failed: {}", err
))?
,
74 let config
= ApiConfig
::new("", &ROUTER
, RpcEnvironmentType
::PUBLIC
, auth_config
)?
;
75 let rest_server
= RestServer
::new(config
);
77 let vsock_fd
= get_vsock_fd()?
;
78 let connections
= accept_vsock_connections(vsock_fd
);
79 let receiver_stream
= ReceiverStream
::new(connections
);
80 let acceptor
= hyper
::server
::accept
::from_stream(receiver_stream
);
82 hyper
::Server
::builder(acceptor
).serve(rest_server
).await?
;
84 bail
!("hyper server exited");
87 fn accept_vsock_connections(
89 ) -> mpsc
::Receiver
<Result
<tokio
::net
::UnixStream
, Error
>> {
90 use nix
::sys
::socket
::*;
91 let (sender
, receiver
) = mpsc
::channel(MAX_PENDING
);
93 tokio
::spawn(async
move {
95 let stream
: Result
<tokio
::net
::UnixStream
, Error
> = tokio
::task
::block_in_place(|| {
96 // we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
97 let client_fd
= accept(vsock_fd
)?
;
98 let stream
= unsafe { net::UnixStream::from_raw_fd(client_fd) }
;
99 stream
.set_nonblocking(true)?
;
100 tokio
::net
::UnixStream
::from_std(stream
).map_err(|err
| err
.into())
105 if sender
.send(Ok(stream
)).await
.is_err() {
106 error
!("connection accept channel was closed");
110 error
!("error accepting vsock connetion: {}", err
);
119 fn get_vsock_fd() -> Result
<RawFd
, Error
> {
120 use nix
::sys
::socket
::*;
121 let sock_fd
= socket(
122 AddressFamily
::Vsock
,
127 let sock_addr
= VsockAddr
::new(libc
::VMADDR_CID_ANY
, DEFAULT_VSOCK_PORT
as u32);
128 bind(sock_fd
, &SockAddr
::Vsock(sock_addr
))?
;
129 listen(sock_fd
, MAX_PENDING
)?
;