]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-restore-daemon.rs
move src/server/rest.rs to proxmox-rest-server crate
[proxmox-backup.git] / src / bin / proxmox-restore-daemon.rs
CommitLineData
dd9cef56 1///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
4c1b7761
WB
2use std::fs::File;
3use std::io::prelude::*;
dd9cef56
SR
4use std::os::unix::{
5 io::{FromRawFd, RawFd},
6 net,
7};
8use std::path::Path;
d32a8652 9use std::sync::{Arc, Mutex};
dd9cef56 10
4c1b7761
WB
11use anyhow::{bail, format_err, Error};
12use lazy_static::lazy_static;
13use log::{error, info};
dd9cef56
SR
14use tokio::sync::mpsc;
15use tokio_stream::wrappers::ReceiverStream;
7fa9a37c
DM
16use http::request::Parts;
17use http::Response;
18use hyper::{Body, StatusCode};
19use hyper::header;
dd9cef56
SR
20
21use proxmox::api::RpcEnvironmentType;
dd9cef56 22
4c1b7761 23use pbs_client::DEFAULT_VSOCK_PORT;
6fbf0acc 24use proxmox_rest_server::{ApiConfig, RestServer};
9edf96e6 25
dd9cef56
SR
26mod proxmox_restore_daemon;
27use proxmox_restore_daemon::*;
28
29/// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
30/// We should never have more than a few requests in queue, so use a low number.
31pub const MAX_PENDING: usize = 32;
32
33/// Will be present in base initramfs
34pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
35
d32a8652
SR
36lazy_static! {
37 /// The current disks state. Use for accessing data on the attached snapshots.
38 pub static ref DISK_STATE: Arc<Mutex<DiskState>> = {
39 Arc::new(Mutex::new(DiskState::scan().unwrap()))
40 };
41}
42
dd9cef56
SR
43/// This is expected to be run by 'proxmox-file-restore' within a mini-VM
44fn main() -> Result<(), Error> {
45 if !Path::new(VM_DETECT_FILE).exists() {
309e14eb
TL
46 bail!(
47 "This binary is not supposed to be run manually, use 'proxmox-file-restore' instead."
48 );
dd9cef56
SR
49 }
50
51 // don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
52 // stdout to a serial terminal attached by QEMU)
53 env_logger::from_env(env_logger::Env::default().default_filter_or("info"))
54 .write_style(env_logger::WriteStyle::Never)
ecd66eca 55 .format_timestamp_millis()
dd9cef56
SR
56 .init();
57
3f780ddf
TL
58 info!("setup basic system environment...");
59 setup_system_env().map_err(|err| format_err!("system environment setup failed: {}", err))?;
33d7292f 60
d32a8652
SR
61 // scan all attached disks now, before starting the API
62 // this will panic and stop the VM if anything goes wrong
9a06eb16 63 info!("scanning all disks...");
d32a8652
SR
64 {
65 let _disk_state = DISK_STATE.lock().unwrap();
66 }
67
9a06eb16
TL
68 info!("disk scan complete, starting main runtime...");
69
d420962f 70 pbs_runtime::main(run())
dd9cef56
SR
71}
72
73e1ba65
TL
73/// ensure we have our /run dirs, system users and stuff like that setup
74fn setup_system_env() -> Result<(), Error> {
75 // the API may save some stuff there, e.g., the memcon tracking file
76 // we do not care much, but it's way less headache to just create it
77 std::fs::create_dir_all("/run/proxmox-backup")?;
78
9edf96e6
TL
79 // we now ensure that all lock files are owned by the backup user, and as we reuse the
80 // specialized REST module from pbs api/daemon we have some checks there for user/acl stuff
81 // that gets locked, and thus needs the backup system user to work.
82 std::fs::create_dir_all("/etc")?;
83 let mut passwd = File::create("/etc/passwd")?;
84 writeln!(passwd, "root:x:0:0:root:/root:/bin/sh")?;
85 writeln!(passwd, "backup:x:34:34:backup:/var/backups:/usr/sbin/nologin")?;
86
87 let mut group = File::create("/etc/group")?;
88 writeln!(group, "root:x:0:")?;
89 writeln!(group, "backup:x:34:")?;
90
73e1ba65
TL
91 Ok(())
92}
93
7fa9a37c
DM
94fn get_index(
95 _auth_id: Option<String>,
96 _language: Option<String>,
97 _api: &ApiConfig,
98 _parts: Parts,
99) -> Response<Body> {
100
101 let index = "<center><h1>Proxmox Backup Restore Daemon/h1></center>";
102
103 Response::builder()
104 .status(StatusCode::OK)
105 .header(header::CONTENT_TYPE, "text/html")
106 .body(index.into())
107 .unwrap()
108}
109
dd9cef56 110async fn run() -> Result<(), Error> {
a26ebad5
SR
111 watchdog_init();
112
dd9cef56
SR
113 let auth_config = Arc::new(
114 auth::ticket_auth().map_err(|err| format_err!("reading ticket file failed: {}", err))?,
115 );
7fa9a37c 116 let config = ApiConfig::new("", &ROUTER, RpcEnvironmentType::PUBLIC, auth_config, get_index)?;
dd9cef56
SR
117 let rest_server = RestServer::new(config);
118
119 let vsock_fd = get_vsock_fd()?;
120 let connections = accept_vsock_connections(vsock_fd);
121 let receiver_stream = ReceiverStream::new(connections);
122 let acceptor = hyper::server::accept::from_stream(receiver_stream);
123
124 hyper::Server::builder(acceptor).serve(rest_server).await?;
125
126 bail!("hyper server exited");
127}
128
129fn accept_vsock_connections(
130 vsock_fd: RawFd,
131) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
132 use nix::sys::socket::*;
133 let (sender, receiver) = mpsc::channel(MAX_PENDING);
134
135 tokio::spawn(async move {
136 loop {
137 let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
138 // we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
139 let client_fd = accept(vsock_fd)?;
140 let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
141 stream.set_nonblocking(true)?;
142 tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
143 });
144
145 match stream {
146 Ok(stream) => {
147 if sender.send(Ok(stream)).await.is_err() {
148 error!("connection accept channel was closed");
149 }
150 }
151 Err(err) => {
152 error!("error accepting vsock connetion: {}", err);
153 }
154 }
155 }
156 });
157
158 receiver
159}
160
161fn get_vsock_fd() -> Result<RawFd, Error> {
162 use nix::sys::socket::*;
163 let sock_fd = socket(
164 AddressFamily::Vsock,
165 SockType::Stream,
166 SockFlag::empty(),
167 None,
168 )?;
169 let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
170 bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
171 listen(sock_fd, MAX_PENDING)?;
172 Ok(sock_fd)
173}