]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-restore-daemon.rs
file-restore-daemon: add watchdog module
[proxmox-backup.git] / src / bin / proxmox-restore-daemon.rs
CommitLineData
dd9cef56
SR
1///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
2use anyhow::{bail, format_err, Error};
3use log::error;
4
5use std::os::unix::{
6 io::{FromRawFd, RawFd},
7 net,
8};
9use std::path::Path;
10use std::sync::Arc;
11
12use tokio::sync::mpsc;
13use tokio_stream::wrappers::ReceiverStream;
14
15use proxmox::api::RpcEnvironmentType;
16use proxmox_backup::client::DEFAULT_VSOCK_PORT;
17use proxmox_backup::server::{rest::*, ApiConfig};
18
19mod proxmox_restore_daemon;
20use proxmox_restore_daemon::*;
21
22/// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
23/// We should never have more than a few requests in queue, so use a low number.
24pub const MAX_PENDING: usize = 32;
25
26/// Will be present in base initramfs
27pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
28
29/// This is expected to be run by 'proxmox-file-restore' within a mini-VM
30fn main() -> Result<(), Error> {
31 if !Path::new(VM_DETECT_FILE).exists() {
32 bail!(concat!(
33 "This binary is not supposed to be run manually. ",
34 "Please use 'proxmox-file-restore' instead."
35 ));
36 }
37
38 // don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
39 // stdout to a serial terminal attached by QEMU)
40 env_logger::from_env(env_logger::Env::default().default_filter_or("info"))
41 .write_style(env_logger::WriteStyle::Never)
42 .init();
43
44 proxmox_backup::tools::runtime::main(run())
45}
46
47async fn run() -> Result<(), Error> {
a26ebad5
SR
48 watchdog_init();
49
dd9cef56
SR
50 let auth_config = Arc::new(
51 auth::ticket_auth().map_err(|err| format_err!("reading ticket file failed: {}", err))?,
52 );
53 let config = ApiConfig::new("", &ROUTER, RpcEnvironmentType::PUBLIC, auth_config)?;
54 let rest_server = RestServer::new(config);
55
56 let vsock_fd = get_vsock_fd()?;
57 let connections = accept_vsock_connections(vsock_fd);
58 let receiver_stream = ReceiverStream::new(connections);
59 let acceptor = hyper::server::accept::from_stream(receiver_stream);
60
61 hyper::Server::builder(acceptor).serve(rest_server).await?;
62
63 bail!("hyper server exited");
64}
65
66fn accept_vsock_connections(
67 vsock_fd: RawFd,
68) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
69 use nix::sys::socket::*;
70 let (sender, receiver) = mpsc::channel(MAX_PENDING);
71
72 tokio::spawn(async move {
73 loop {
74 let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
75 // we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
76 let client_fd = accept(vsock_fd)?;
77 let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
78 stream.set_nonblocking(true)?;
79 tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
80 });
81
82 match stream {
83 Ok(stream) => {
84 if sender.send(Ok(stream)).await.is_err() {
85 error!("connection accept channel was closed");
86 }
87 }
88 Err(err) => {
89 error!("error accepting vsock connetion: {}", err);
90 }
91 }
92 }
93 });
94
95 receiver
96}
97
98fn get_vsock_fd() -> Result<RawFd, Error> {
99 use nix::sys::socket::*;
100 let sock_fd = socket(
101 AddressFamily::Vsock,
102 SockType::Stream,
103 SockFlag::empty(),
104 None,
105 )?;
106 let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
107 bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
108 listen(sock_fd, MAX_PENDING)?;
109 Ok(sock_fd)
110}