]> git.proxmox.com Git - proxmox-backup.git/blob - proxmox-restore-daemon/src/main.rs
proxmox-rest-server: make get_index async
[proxmox-backup.git] / proxmox-restore-daemon / src / main.rs
1 ///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
2 use std::fs::File;
3 use std::io::prelude::*;
4 use std::os::unix::{
5 io::{FromRawFd, RawFd},
6 net,
7 };
8 use std::path::Path;
9 use std::sync::{Arc, Mutex};
10 use std::future::Future;
11 use std::pin::Pin;
12
13 use anyhow::{bail, format_err, Error};
14 use lazy_static::lazy_static;
15 use log::{error, info};
16 use tokio::sync::mpsc;
17 use tokio_stream::wrappers::ReceiverStream;
18 use http::request::Parts;
19 use http::Response;
20 use hyper::{Body, StatusCode};
21 use hyper::header;
22
23 use proxmox::api::RpcEnvironmentType;
24
25 use pbs_client::DEFAULT_VSOCK_PORT;
26 use proxmox_rest_server::{ApiConfig, RestServer};
27
28 mod proxmox_restore_daemon;
29 use proxmox_restore_daemon::*;
30
31 /// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
32 /// We should never have more than a few requests in queue, so use a low number.
33 pub const MAX_PENDING: usize = 32;
34
35 /// Will be present in base initramfs
36 pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
37
38 lazy_static! {
39 /// The current disks state. Use for accessing data on the attached snapshots.
40 pub static ref DISK_STATE: Arc<Mutex<DiskState>> = {
41 Arc::new(Mutex::new(DiskState::scan().unwrap()))
42 };
43 }
44
45 /// This is expected to be run by 'proxmox-file-restore' within a mini-VM
46 fn main() -> Result<(), Error> {
47 if !Path::new(VM_DETECT_FILE).exists() {
48 bail!(
49 "This binary is not supposed to be run manually, use 'proxmox-file-restore' instead."
50 );
51 }
52
53 // don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
54 // stdout to a serial terminal attached by QEMU)
55 env_logger::from_env(env_logger::Env::default().default_filter_or("info"))
56 .write_style(env_logger::WriteStyle::Never)
57 .format_timestamp_millis()
58 .init();
59
60 info!("setup basic system environment...");
61 setup_system_env().map_err(|err| format_err!("system environment setup failed: {}", err))?;
62
63 // scan all attached disks now, before starting the API
64 // this will panic and stop the VM if anything goes wrong
65 info!("scanning all disks...");
66 {
67 let _disk_state = DISK_STATE.lock().unwrap();
68 }
69
70 info!("disk scan complete, starting main runtime...");
71
72 pbs_runtime::main(run())
73 }
74
75 /// ensure we have our /run dirs, system users and stuff like that setup
76 fn setup_system_env() -> Result<(), Error> {
77 // the API may save some stuff there, e.g., the memcon tracking file
78 // we do not care much, but it's way less headache to just create it
79 std::fs::create_dir_all("/run/proxmox-backup")?;
80
81 // we now ensure that all lock files are owned by the backup user, and as we reuse the
82 // specialized REST module from pbs api/daemon we have some checks there for user/acl stuff
83 // that gets locked, and thus needs the backup system user to work.
84 std::fs::create_dir_all("/etc")?;
85 let mut passwd = File::create("/etc/passwd")?;
86 writeln!(passwd, "root:x:0:0:root:/root:/bin/sh")?;
87 writeln!(passwd, "backup:x:34:34:backup:/var/backups:/usr/sbin/nologin")?;
88
89 let mut group = File::create("/etc/group")?;
90 writeln!(group, "root:x:0:")?;
91 writeln!(group, "backup:x:34:")?;
92
93 Ok(())
94 }
95
96 fn get_index<'a>(
97 _auth_id: Option<String>,
98 _language: Option<String>,
99 _api: &'a ApiConfig,
100 _parts: Parts,
101 ) -> Pin<Box<dyn Future<Output = http::Response<Body>> + Send + 'a>> {
102 Box::pin(async move {
103
104 let index = "<center><h1>Proxmox Backup Restore Daemon/h1></center>";
105
106 Response::builder()
107 .status(StatusCode::OK)
108 .header(header::CONTENT_TYPE, "text/html")
109 .body(index.into())
110 .unwrap()
111 })
112 }
113
114 async fn run() -> Result<(), Error> {
115 watchdog_init();
116
117 let auth_config = Arc::new(
118 auth::ticket_auth().map_err(|err| format_err!("reading ticket file failed: {}", err))?,
119 );
120 let config = ApiConfig::new("", &ROUTER, RpcEnvironmentType::PUBLIC, auth_config, &get_index)?;
121 let rest_server = RestServer::new(config);
122
123 let vsock_fd = get_vsock_fd()?;
124 let connections = accept_vsock_connections(vsock_fd);
125 let receiver_stream = ReceiverStream::new(connections);
126 let acceptor = hyper::server::accept::from_stream(receiver_stream);
127
128 hyper::Server::builder(acceptor).serve(rest_server).await?;
129
130 bail!("hyper server exited");
131 }
132
133 fn accept_vsock_connections(
134 vsock_fd: RawFd,
135 ) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
136 use nix::sys::socket::*;
137 let (sender, receiver) = mpsc::channel(MAX_PENDING);
138
139 tokio::spawn(async move {
140 loop {
141 let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
142 // we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
143 let client_fd = accept(vsock_fd)?;
144 let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
145 stream.set_nonblocking(true)?;
146 tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
147 });
148
149 match stream {
150 Ok(stream) => {
151 if sender.send(Ok(stream)).await.is_err() {
152 error!("connection accept channel was closed");
153 }
154 }
155 Err(err) => {
156 error!("error accepting vsock connetion: {}", err);
157 }
158 }
159 }
160 });
161
162 receiver
163 }
164
165 fn get_vsock_fd() -> Result<RawFd, Error> {
166 use nix::sys::socket::*;
167 let sock_fd = socket(
168 AddressFamily::Vsock,
169 SockType::Stream,
170 SockFlag::empty(),
171 None,
172 )?;
173 let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
174 bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
175 listen(sock_fd, MAX_PENDING)?;
176 Ok(sock_fd)
177 }