]> git.proxmox.com Git - proxmox-backup.git/blob - proxmox-restore-daemon/src/main.rs
update proxmox-metrics dependency to 0.3.1
[proxmox-backup.git] / proxmox-restore-daemon / src / main.rs
1 ///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
2 use std::fs::File;
3 use std::io::prelude::*;
4 use std::os::unix::{
5 io::{FromRawFd, RawFd},
6 net,
7 };
8 use std::path::Path;
9 use std::sync::{Arc, Mutex};
10
11 use anyhow::{bail, format_err, Error};
12 use lazy_static::lazy_static;
13 use log::{error, info};
14 use tokio::sync::mpsc;
15 use tokio_stream::wrappers::ReceiverStream;
16
17 use proxmox_router::RpcEnvironmentType;
18
19 use pbs_client::DEFAULT_VSOCK_PORT;
20 use proxmox_rest_server::{ApiConfig, RestServer};
21
22 mod proxmox_restore_daemon;
23 use proxmox_restore_daemon::*;
24
25 /// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
26 /// We should never have more than a few requests in queue, so use a low number.
27 pub const MAX_PENDING: usize = 32;
28
29 /// Will be present in base initramfs
30 pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
31
32 lazy_static! {
33 /// The current disks state. Use for accessing data on the attached snapshots.
34 pub static ref DISK_STATE: Arc<Mutex<DiskState>> = {
35 Arc::new(Mutex::new(DiskState::scan().unwrap()))
36 };
37 }
38
39 fn init_disk_state() {
40 info!("scanning all disks...");
41 {
42 let _disk_state = DISK_STATE.lock().unwrap();
43 }
44
45 info!("disk scan complete.")
46 }
47
48 /// This is expected to be run by 'proxmox-file-restore' within a mini-VM
49 fn main() -> Result<(), Error> {
50 pbs_tools::setup_libc_malloc_opts();
51
52 if !Path::new(VM_DETECT_FILE).exists() {
53 bail!(
54 "This binary is not supposed to be run manually, use 'proxmox-file-restore' instead."
55 );
56 }
57
58 // don't have a real syslog (and no persistence), so use env_logger to print to a log file (via
59 // stdout to a serial terminal attached by QEMU)
60 env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
61 .write_style(env_logger::WriteStyle::Never)
62 .format_timestamp_millis()
63 .init();
64
65 info!("setup basic system environment...");
66 setup_system_env().map_err(|err| format_err!("system environment setup failed: {}", err))?;
67
68 proxmox_async::runtime::main(run())
69 }
70
71 /// ensure we have our /run dirs, system users and stuff like that setup
72 fn setup_system_env() -> Result<(), Error> {
73 // the API may save some stuff there, e.g., the memcon tracking file
74 // we do not care much, but it's way less headache to just create it
75 std::fs::create_dir_all("/run/proxmox-backup")?;
76
77 // we now ensure that all lock files are owned by the backup user, and as we reuse the
78 // specialized REST module from pbs api/daemon we have some checks there for user/acl stuff
79 // that gets locked, and thus needs the backup system user to work.
80 std::fs::create_dir_all("/etc")?;
81 let mut passwd = File::create("/etc/passwd")?;
82 writeln!(passwd, "root:x:0:0:root:/root:/bin/sh")?;
83 writeln!(
84 passwd,
85 "backup:x:34:34:backup:/var/backups:/usr/sbin/nologin"
86 )?;
87
88 let mut group = File::create("/etc/group")?;
89 writeln!(group, "root:x:0:")?;
90 writeln!(group, "backup:x:34:")?;
91
92 Ok(())
93 }
94
95 async fn run() -> Result<(), Error> {
96 watchdog_init();
97
98 let init_future = async move {
99 match tokio::time::timeout(
100 std::time::Duration::from_secs(120),
101 tokio::task::spawn_blocking(init_disk_state),
102 )
103 .await
104 {
105 Ok(res) => res.map_err(|err| format_err!("disk init failed: {}", err)),
106 Err(_) => bail!("disk init timed out after 120 seconds"),
107 }
108 };
109
110 let ticket =
111 auth::read_ticket().map_err(|err| format_err!("reading ticket file failed: {}", err))?;
112
113 let config = ApiConfig::new("", RpcEnvironmentType::PUBLIC)
114 .default_api2_handler(&ROUTER)
115 .index_handler_func(|_, _| auth::get_index())
116 .auth_handler_func(move |h, m| Box::pin(auth::check_auth(Arc::clone(&ticket), h, m)));
117 let rest_server = RestServer::new(config);
118
119 let vsock_fd = get_vsock_fd()?;
120 let connections = accept_vsock_connections(vsock_fd);
121 let receiver_stream = ReceiverStream::new(connections);
122 let acceptor = hyper::server::accept::from_stream(receiver_stream);
123
124 let hyper_future = async move {
125 hyper::Server::builder(acceptor)
126 .serve(rest_server)
127 .await
128 .map_err(|err| format_err!("hyper finished with error: {}", err))
129 };
130
131 tokio::try_join!(init_future, hyper_future)?;
132
133 bail!("hyper server exited");
134 }
135
136 fn accept_vsock_connections(
137 vsock_fd: RawFd,
138 ) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
139 use nix::sys::socket::*;
140 let (sender, receiver) = mpsc::channel(MAX_PENDING);
141
142 tokio::spawn(async move {
143 loop {
144 let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
145 // we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
146 let client_fd = accept(vsock_fd)?;
147 let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
148 stream.set_nonblocking(true)?;
149 tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
150 });
151
152 match stream {
153 Ok(stream) => {
154 if sender.send(Ok(stream)).await.is_err() {
155 error!("connection accept channel was closed");
156 }
157 }
158 Err(err) => {
159 error!("error accepting vsock connection: {}", err);
160 }
161 }
162 }
163 });
164
165 receiver
166 }
167
168 fn get_vsock_fd() -> Result<RawFd, Error> {
169 use nix::sys::socket::*;
170 let sock_fd = socket(
171 AddressFamily::Vsock,
172 SockType::Stream,
173 SockFlag::empty(),
174 None,
175 )?;
176 let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
177 bind(sock_fd, &sock_addr)?;
178 listen(sock_fd, MAX_PENDING)?;
179 Ok(sock_fd)
180 }