1 //! Helper to start a QEMU VM for single file restore.
2 use std
::fs
::{File, OpenOptions}
;
3 use std
::io
::prelude
::*;
4 use std
::os
::unix
::io
::{AsRawFd, FromRawFd}
;
5 use std
::path
::PathBuf
;
6 use std
::time
::Duration
;
8 use anyhow
::{bail, format_err, Error}
;
11 use nix
::sys
::signal
::{kill, Signal}
;
16 fs
::{create_path, file_read_string, make_tmp_file, CreateOptions}
,
19 use proxmox_backup
::backup
::backup_user
;
20 use proxmox_backup
::client
::{VsockClient, DEFAULT_VSOCK_PORT}
;
21 use proxmox_backup
::{buildcfg, tools}
;
23 use super::SnapRestoreDetails
;
25 const PBS_VM_NAME
: &str = "pbs-restore-vm";
26 const MAX_CID_TRIES
: u64 = 32;
28 fn create_restore_log_dir() -> Result
<String
, Error
> {
29 let logpath
= format
!("{}/file-restore", buildcfg
::PROXMOX_BACKUP_LOG_DIR
);
32 let backup_user
= backup_user()?
;
33 let opts
= CreateOptions
::new()
34 .owner(backup_user
.uid
)
35 .group(backup_user
.gid
);
37 let opts_root
= CreateOptions
::new()
38 .owner(nix
::unistd
::ROOT
)
39 .group(nix
::unistd
::Gid
::from_raw(0));
41 create_path(buildcfg
::PROXMOX_BACKUP_LOG_DIR
, None
, Some(opts
))?
;
42 create_path(&logpath
, None
, Some(opts_root
))?
;
45 .map_err(|err
: Error
| format_err
!("unable to create file-restore log dir - {}", err
))?
;
50 fn validate_img_existance() -> Result
<(), Error
> {
51 let kernel
= PathBuf
::from(buildcfg
::PROXMOX_BACKUP_KERNEL_FN
);
52 let initramfs
= PathBuf
::from(buildcfg
::PROXMOX_BACKUP_INITRAMFS_FN
);
53 if !kernel
.exists() || !initramfs
.exists() {
54 bail
!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed");
59 fn try_kill_vm(pid
: i32) -> Result
<(), Error
> {
60 let pid
= Pid
::from_raw(pid
);
61 if let Ok(()) = kill(pid
, None
) {
62 // process is running (and we could kill it), check if it is actually ours
63 // (if it errors assume we raced with the process's death and ignore it)
64 if let Ok(cmdline
) = file_read_string(format
!("/proc/{}/cmdline", pid
)) {
65 if cmdline
.split('
\0'
).any(|a
| a
== PBS_VM_NAME
) {
66 // yes, it's ours, kill it brutally with SIGKILL, no reason to take
67 // any chances - in this state it's most likely broken anyway
68 if let Err(err
) = kill(pid
, Signal
::SIGKILL
) {
70 "reaping broken VM (pid {}) with SIGKILL failed: {}",
82 async
fn create_temp_initramfs(ticket
: &str) -> Result
<(Fd
, String
), Error
> {
83 use std
::ffi
::CString
;
86 let (tmp_fd
, tmp_path
) =
87 make_tmp_file("/tmp/file-restore-qemu.initramfs.tmp", CreateOptions
::new())?
;
88 nix
::unistd
::unlink(&tmp_path
)?
;
89 tools
::fd_change_cloexec(tmp_fd
.0, false)?
;
91 let mut f
= File
::from_std(unsafe { std::fs::File::from_raw_fd(tmp_fd.0) }
);
92 let mut base
= File
::open(buildcfg
::PROXMOX_BACKUP_INITRAMFS_FN
).await?
;
94 tokio
::io
::copy(&mut base
, &mut f
).await?
;
96 let name
= CString
::new("ticket").unwrap();
97 tools
::cpio
::append_file(
102 (libc
::S_IFREG
| 0o400) as u16,
109 tools
::cpio
::append_trailer(&mut f
).await?
;
111 // forget the tokio file, we close the file descriptor via the returned Fd
114 let path
= format
!("/dev/fd/{}", &tmp_fd
.0);
118 pub async
fn start_vm(
119 // u16 so we can do wrapping_add without going too high
121 details
: &SnapRestoreDetails
,
122 files
: impl Iterator
<Item
= String
>,
124 ) -> Result
<(i32, i32), Error
> {
125 validate_img_existance()?
;
127 if let Err(_
) = std
::env
::var("PBS_PASSWORD") {
128 bail
!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
132 let (pid_fd
, pid_path
) = make_tmp_file("/tmp/file-restore-qemu.pid.tmp", CreateOptions
::new())?
;
133 nix
::unistd
::unlink(&pid_path
)?
;
134 tools
::fd_change_cloexec(pid_fd
.0, false)?
;
136 let (_ramfs_pid
, ramfs_path
) = create_temp_initramfs(ticket
).await?
;
138 let logpath
= create_restore_log_dir()?
;
139 let logfile
= &format
!("{}/qemu.log", logpath
);
140 let mut logrotate
= tools
::logrotate
::LogRotate
::new(logfile
, false)
141 .ok_or_else(|| format_err
!("could not get QEMU log file names"))?
;
143 if let Err(err
) = logrotate
.do_rotate(CreateOptions
::default(), Some(16)) {
144 eprintln
!("warning: logrotate for QEMU log file failed - {}", err
);
147 let mut logfd
= OpenOptions
::new()
151 tools
::fd_change_cloexec(logfd
.as_raw_fd(), false)?
;
153 // preface log file with start timestamp so one can see how long QEMU took to start
154 writeln
!(logfd
, "[{}] PBS file restore VM log", {
155 let now
= proxmox
::tools
::time
::epoch_i64();
156 proxmox
::tools
::time
::epoch_to_rfc3339(now
)?
162 "file,id=log,path=/dev/null,logfile=/dev/fd/{},logappend=on",
173 buildcfg
::PROXMOX_BACKUP_KERNEL_FN
,
180 &format
!("/dev/fd/{}", pid_fd
.as_raw_fd()),
185 // Generate drive arguments for all fidx files in backup snapshot
186 let mut drives
= Vec
::new();
189 if !file
.ends_with(".img.fidx") {
192 drives
.push("-drive".to_owned());
194 "file=pbs:repository={},,snapshot={},,archive={},read-only=on,if=none,id=drive{}",
195 details
.repo
, details
.snapshot
, file
, id
197 drives
.push("-device".to_owned());
198 // drive serial is used by VM to map .fidx files to /dev paths
199 drives
.push(format
!("virtio-blk-pci,drive=drive{},serial={}", id
, file
));
203 // Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value
204 let mut attempts
= 0;
206 let mut qemu_cmd
= std
::process
::Command
::new("qemu-system-x86_64");
207 qemu_cmd
.args(base_args
.iter());
208 qemu_cmd
.args(&drives
);
209 qemu_cmd
.arg("-device");
210 qemu_cmd
.arg(format
!(
211 "vhost-vsock-pci,guest-cid={},disable-legacy=on",
215 qemu_cmd
.stdout(std
::process
::Stdio
::null());
216 qemu_cmd
.stderr(std
::process
::Stdio
::piped());
218 let res
= tokio
::task
::block_in_place(|| qemu_cmd
.spawn()?
.wait_with_output())?
;
220 if res
.status
.success() {
221 // at this point QEMU is already daemonized and running, so if anything fails we
222 // technically leave behind a zombie-VM... this shouldn't matter, as it will stop
223 // itself soon enough (timer), and the following operations are unlikely to fail
224 let mut pid_file
= unsafe { File::from_raw_fd(pid_fd.as_raw_fd()) }
;
225 std
::mem
::forget(pid_fd
); // FD ownership is now in pid_fd/File
226 let mut pidstr
= String
::new();
227 pid_file
.read_to_string(&mut pidstr
)?
;
228 pid
= pidstr
.trim_end().parse().map_err(|err
| {
229 format_err
!("cannot parse PID returned by QEMU ('{}'): {}", &pidstr
, err
)
233 let out
= String
::from_utf8_lossy(&res
.stderr
);
234 if out
.contains("unable to set guest cid: Address already in use") {
236 if attempts
>= MAX_CID_TRIES
{
237 bail
!("CID '{}' in use, but max attempts reached, aborting", cid
);
239 // CID in use, try next higher one
240 eprintln
!("CID '{}' in use by other VM, attempting next one", cid
);
241 // skip special-meaning low values
242 cid
= cid
.wrapping_add(1).max(10);
245 bail
!("Starting VM failed. See output above for more information.");
250 // QEMU has started successfully, now wait for virtio socket to become ready
251 let pid_t
= Pid
::from_raw(pid
);
253 let client
= VsockClient
::new(cid
as i32, DEFAULT_VSOCK_PORT
, Some(ticket
.to_owned()));
255 time
::timeout(Duration
::from_secs(2), client
.get("api2/json/status", None
)).await
257 return Ok((pid
, cid
as i32));
259 if kill(pid_t
, None
).is_err() {
261 bail
!("VM exited before connection could be established");
263 time
::sleep(Duration
::from_millis(200)).await
;
267 if let Err(err
) = try_kill_vm(pid
) {
268 eprintln
!("killing failed VM failed: {}", err
);
270 bail
!("starting VM timed out");