]> git.proxmox.com Git - proxmox-backup.git/blob - src/tools.rs
depend on proxmox 0.1.31 - use Value to store result metadata
[proxmox-backup.git] / src / tools.rs
1 //! Tools and utilities
2 //!
3 //! This is a collection of small and useful tools.
4 use std::any::Any;
5 use std::collections::HashMap;
6 use std::hash::BuildHasher;
7 use std::fs::{File, OpenOptions};
8 use std::io::ErrorKind;
9 use std::io::Read;
10 use std::os::unix::io::{AsRawFd, RawFd};
11 use std::path::Path;
12 use std::time::Duration;
13
14 use anyhow::{bail, format_err, Error};
15 use serde_json::Value;
16 use openssl::hash::{hash, DigestBytes, MessageDigest};
17 use percent_encoding::AsciiSet;
18
19 use proxmox::tools::vec;
20
21 pub use proxmox::tools::fd::Fd;
22
23 pub mod acl;
24 pub mod async_io;
25 pub mod borrow;
26 pub mod daemon;
27 pub mod disks;
28 pub mod fs;
29 pub mod format;
30 pub mod lru_cache;
31 pub mod runtime;
32 pub mod ticket;
33 pub mod timer;
34 pub mod systemd;
35
36 mod wrapped_reader_stream;
37 pub use wrapped_reader_stream::*;
38
39 mod std_channel_writer;
40 pub use std_channel_writer::*;
41
42 pub mod xattr;
43
44 mod process_locker;
45 pub use process_locker::*;
46
47 mod file_logger;
48 pub use file_logger::*;
49
50 mod broadcast_future;
51 pub use broadcast_future::*;
52
53 /// The `BufferedRead` trait provides a single function
54 /// `buffered_read`. It returns a reference to an internal buffer. The
55 /// purpose of this traid is to avoid unnecessary data copies.
56 pub trait BufferedRead {
57 /// This functions tries to fill the internal buffers, then
58 /// returns a reference to the available data. It returns an empty
59 /// buffer if `offset` points to the end of the file.
60 fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
61 }
62
63 /// Directly map a type into a binary buffer. This is mostly useful
64 /// for reading structured data from a byte stream (file). You need to
65 /// make sure that the buffer location does not change, so please
66 /// avoid vec resize while you use such map.
67 ///
68 /// This function panics if the buffer is not large enough.
69 pub fn map_struct<T>(buffer: &[u8]) -> Result<&T, Error> {
70 if buffer.len() < ::std::mem::size_of::<T>() {
71 bail!("unable to map struct - buffer too small");
72 }
73 Ok(unsafe { &*(buffer.as_ptr() as *const T) })
74 }
75
76 /// Directly map a type into a mutable binary buffer. This is mostly
77 /// useful for writing structured data into a byte stream (file). You
78 /// need to make sure that the buffer location does not change, so
79 /// please avoid vec resize while you use such map.
80 ///
81 /// This function panics if the buffer is not large enough.
82 pub fn map_struct_mut<T>(buffer: &mut [u8]) -> Result<&mut T, Error> {
83 if buffer.len() < ::std::mem::size_of::<T>() {
84 bail!("unable to map struct - buffer too small");
85 }
86 Ok(unsafe { &mut *(buffer.as_ptr() as *mut T) })
87 }
88
89 /// Create a file lock using fntl. This function allows you to specify
90 /// a timeout if you want to avoid infinite blocking.
91 pub fn lock_file<F: AsRawFd>(
92 file: &mut F,
93 exclusive: bool,
94 timeout: Option<Duration>,
95 ) -> Result<(), Error> {
96 let lockarg = if exclusive {
97 nix::fcntl::FlockArg::LockExclusive
98 } else {
99 nix::fcntl::FlockArg::LockShared
100 };
101
102 let timeout = match timeout {
103 None => {
104 nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
105 return Ok(());
106 }
107 Some(t) => t,
108 };
109
110 // unblock the timeout signal temporarily
111 let _sigblock_guard = timer::unblock_timeout_signal();
112
113 // setup a timeout timer
114 let mut timer = timer::Timer::create(
115 timer::Clock::Realtime,
116 timer::TimerEvent::ThisThreadSignal(timer::SIGTIMEOUT),
117 )?;
118
119 timer.arm(
120 timer::TimerSpec::new()
121 .value(Some(timeout))
122 .interval(Some(Duration::from_millis(10))),
123 )?;
124
125 nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
126 Ok(())
127 }
128
129 /// Open or create a lock file (append mode). Then try to
130 /// aquire a lock using `lock_file()`.
131 pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
132 let path = path.as_ref();
133 let mut file = match OpenOptions::new().create(true).append(true).open(path) {
134 Ok(file) => file,
135 Err(err) => bail!("Unable to open lock {:?} - {}", path, err),
136 };
137 match lock_file(&mut file, true, Some(timeout)) {
138 Ok(_) => Ok(file),
139 Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
140 }
141 }
142
143 /// Split a file into equal sized chunks. The last chunk may be
144 /// smaller. Note: We cannot implement an `Iterator`, because iterators
145 /// cannot return a borrowed buffer ref (we want zero-copy)
146 pub fn file_chunker<C, R>(mut file: R, chunk_size: usize, mut chunk_cb: C) -> Result<(), Error>
147 where
148 C: FnMut(usize, &[u8]) -> Result<bool, Error>,
149 R: Read,
150 {
151 const READ_BUFFER_SIZE: usize = 4 * 1024 * 1024; // 4M
152
153 if chunk_size > READ_BUFFER_SIZE {
154 bail!("chunk size too large!");
155 }
156
157 let mut buf = vec::undefined(READ_BUFFER_SIZE);
158
159 let mut pos = 0;
160 let mut file_pos = 0;
161 loop {
162 let mut eof = false;
163 let mut tmp = &mut buf[..];
164 // try to read large portions, at least chunk_size
165 while pos < chunk_size {
166 match file.read(tmp) {
167 Ok(0) => {
168 eof = true;
169 break;
170 }
171 Ok(n) => {
172 pos += n;
173 if pos > chunk_size {
174 break;
175 }
176 tmp = &mut tmp[n..];
177 }
178 Err(ref e) if e.kind() == ErrorKind::Interrupted => { /* try again */ }
179 Err(e) => bail!("read chunk failed - {}", e.to_string()),
180 }
181 }
182 let mut start = 0;
183 while start + chunk_size <= pos {
184 if !(chunk_cb)(file_pos, &buf[start..start + chunk_size])? {
185 break;
186 }
187 file_pos += chunk_size;
188 start += chunk_size;
189 }
190 if eof {
191 if start < pos {
192 (chunk_cb)(file_pos, &buf[start..pos])?;
193 //file_pos += pos - start;
194 }
195 break;
196 } else {
197 let rest = pos - start;
198 if rest > 0 {
199 let ptr = buf.as_mut_ptr();
200 unsafe {
201 std::ptr::copy_nonoverlapping(ptr.add(start), ptr, rest);
202 }
203 pos = rest;
204 } else {
205 pos = 0;
206 }
207 }
208 }
209
210 Ok(())
211 }
212
213 pub fn json_object_to_query(data: Value) -> Result<String, Error> {
214 let mut query = url::form_urlencoded::Serializer::new(String::new());
215
216 let object = data.as_object().ok_or_else(|| {
217 format_err!("json_object_to_query: got wrong data type (expected object).")
218 })?;
219
220 for (key, value) in object {
221 match value {
222 Value::Bool(b) => {
223 query.append_pair(key, &b.to_string());
224 }
225 Value::Number(n) => {
226 query.append_pair(key, &n.to_string());
227 }
228 Value::String(s) => {
229 query.append_pair(key, &s);
230 }
231 Value::Array(arr) => {
232 for element in arr {
233 match element {
234 Value::Bool(b) => {
235 query.append_pair(key, &b.to_string());
236 }
237 Value::Number(n) => {
238 query.append_pair(key, &n.to_string());
239 }
240 Value::String(s) => {
241 query.append_pair(key, &s);
242 }
243 _ => bail!(
244 "json_object_to_query: unable to handle complex array data types."
245 ),
246 }
247 }
248 }
249 _ => bail!("json_object_to_query: unable to handle complex data types."),
250 }
251 }
252
253 Ok(query.finish())
254 }
255
256 pub fn required_string_param<'a>(param: &'a Value, name: &str) -> Result<&'a str, Error> {
257 match param[name].as_str() {
258 Some(s) => Ok(s),
259 None => bail!("missing parameter '{}'", name),
260 }
261 }
262
263 pub fn required_string_property<'a>(param: &'a Value, name: &str) -> Result<&'a str, Error> {
264 match param[name].as_str() {
265 Some(s) => Ok(s),
266 None => bail!("missing property '{}'", name),
267 }
268 }
269
270 pub fn required_integer_param<'a>(param: &'a Value, name: &str) -> Result<i64, Error> {
271 match param[name].as_i64() {
272 Some(s) => Ok(s),
273 None => bail!("missing parameter '{}'", name),
274 }
275 }
276
277 pub fn required_integer_property<'a>(param: &'a Value, name: &str) -> Result<i64, Error> {
278 match param[name].as_i64() {
279 Some(s) => Ok(s),
280 None => bail!("missing property '{}'", name),
281 }
282 }
283
284 pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result<Vec<Value>, Error> {
285 match param[name].as_array() {
286 Some(s) => Ok(s.to_vec()),
287 None => bail!("missing parameter '{}'", name),
288 }
289 }
290
291 pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<Vec<Value>, Error> {
292 match param[name].as_array() {
293 Some(s) => Ok(s.to_vec()),
294 None => bail!("missing property '{}'", name),
295 }
296 }
297
298 pub fn complete_file_name<S: BuildHasher>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String> {
299 let mut result = vec![];
300
301 use nix::fcntl::AtFlags;
302 use nix::fcntl::OFlag;
303 use nix::sys::stat::Mode;
304
305 let mut dirname = std::path::PathBuf::from(if arg.is_empty() { "./" } else { arg });
306
307 let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) {
308 Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR,
309 Err(_) => false,
310 };
311
312 if !is_dir {
313 if let Some(parent) = dirname.parent() {
314 dirname = parent.to_owned();
315 }
316 }
317
318 let mut dir =
319 match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) {
320 Ok(d) => d,
321 Err(_) => return result,
322 };
323
324 for item in dir.iter() {
325 if let Ok(entry) = item {
326 if let Ok(name) = entry.file_name().to_str() {
327 if name == "." || name == ".." {
328 continue;
329 }
330 let mut newpath = dirname.clone();
331 newpath.push(name);
332
333 if let Ok(stat) =
334 nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty())
335 {
336 if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR {
337 newpath.push("");
338 if let Some(newpath) = newpath.to_str() {
339 result.push(newpath.to_owned());
340 }
341 continue;
342 }
343 }
344 if let Some(newpath) = newpath.to_str() {
345 result.push(newpath.to_owned());
346 }
347 }
348 }
349 }
350
351 result
352 }
353
354 /// Scan directory for matching file names.
355 ///
356 /// Scan through all directory entries and call `callback()` function
357 /// if the entry name matches the regular expression. This function
358 /// used unix `openat()`, so you can pass absolute or relative file
359 /// names. This function simply skips non-UTF8 encoded names.
360 pub fn scandir<P, F>(
361 dirfd: RawFd,
362 path: &P,
363 regex: &regex::Regex,
364 mut callback: F,
365 ) -> Result<(), Error>
366 where
367 F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>,
368 P: ?Sized + nix::NixPath,
369 {
370 for entry in self::fs::scan_subdir(dirfd, path, regex)? {
371 let entry = entry?;
372 let file_type = match entry.file_type() {
373 Some(file_type) => file_type,
374 None => bail!("unable to detect file type"),
375 };
376
377 callback(
378 entry.parent_fd(),
379 unsafe { entry.file_name_utf8_unchecked() },
380 file_type,
381 )?;
382 }
383 Ok(())
384 }
385
386 /// Shortcut for md5 sums.
387 pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
388 hash(MessageDigest::md5(), data).map_err(Error::from)
389 }
390
391 pub fn get_hardware_address() -> Result<String, Error> {
392 static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
393
394 let contents = proxmox::tools::fs::file_get_contents(FILENAME)?;
395 let digest = md5sum(&contents)?;
396
397 Ok(proxmox::tools::bin_to_hex(&digest))
398 }
399
400 pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
401 if digest1 != digest2 {
402 bail!("detected modified configuration - file changed by other user? Try again.");
403 }
404 Ok(())
405 }
406
407 /// Extract authentication cookie from cookie header.
408 /// We assume cookie_name is already url encoded.
409 pub fn extract_auth_cookie(cookie: &str, cookie_name: &str) -> Option<String> {
410 for pair in cookie.split(';') {
411 let (name, value) = match pair.find('=') {
412 Some(i) => (pair[..i].trim(), pair[(i + 1)..].trim()),
413 None => return None, // Cookie format error
414 };
415
416 if name == cookie_name {
417 use percent_encoding::percent_decode;
418 if let Ok(value) = percent_decode(value.as_bytes()).decode_utf8() {
419 return Some(value.into());
420 } else {
421 return None; // Cookie format error
422 }
423 }
424 }
425
426 None
427 }
428
429 pub fn join(data: &Vec<String>, sep: char) -> String {
430 let mut list = String::new();
431
432 for item in data {
433 if !list.is_empty() {
434 list.push(sep);
435 }
436 list.push_str(item);
437 }
438
439 list
440 }
441
442 /// Detect modified configuration files
443 ///
444 /// This function fails with a resonable error message if checksums do not match.
445 pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
446 if digest1 != digest2 {
447 bail!("detected modified configuration - file changed by other user? Try again.");
448 }
449 Ok(())
450 }
451
452 /// normalize uri path
453 ///
454 /// Do not allow ".", "..", or hidden files ".XXXX"
455 /// Also remove empty path components
456 pub fn normalize_uri_path(path: &str) -> Result<(String, Vec<&str>), Error> {
457 let items = path.split('/');
458
459 let mut path = String::new();
460 let mut components = vec![];
461
462 for name in items {
463 if name.is_empty() {
464 continue;
465 }
466 if name.starts_with('.') {
467 bail!("Path contains illegal components.");
468 }
469 path.push('/');
470 path.push_str(name);
471 components.push(name);
472 }
473
474 Ok((path, components))
475 }
476
477 pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
478 use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
479 let mut flags = FdFlag::from_bits(fcntl(fd, F_GETFD)?)
480 .ok_or_else(|| format_err!("unhandled file flags"))?; // nix crate is stupid this way...
481 flags.set(FdFlag::FD_CLOEXEC, on);
482 fcntl(fd, F_SETFD(flags))?;
483 Ok(())
484 }
485
486 static mut SHUTDOWN_REQUESTED: bool = false;
487
488 pub fn request_shutdown() {
489 unsafe {
490 SHUTDOWN_REQUESTED = true;
491 }
492 crate::server::server_shutdown();
493 }
494
495 #[inline(always)]
496 pub fn shutdown_requested() -> bool {
497 unsafe { SHUTDOWN_REQUESTED }
498 }
499
500 pub fn fail_on_shutdown() -> Result<(), Error> {
501 if shutdown_requested() {
502 bail!("Server shutdown requested - aborting task");
503 }
504 Ok(())
505 }
506
507 // wrap nix::unistd::pipe2 + O_CLOEXEC into something returning guarded file descriptors
508 pub fn pipe() -> Result<(Fd, Fd), Error> {
509 let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
510 Ok((Fd(pin), Fd(pout)))
511 }
512
513 /// An easy way to convert types to Any
514 ///
515 /// Mostly useful to downcast trait objects (see RpcEnvironment).
516 pub trait AsAny {
517 fn as_any(&self) -> &dyn Any;
518 }
519
520 impl<T: Any> AsAny for T {
521 fn as_any(&self) -> &dyn Any {
522 self
523 }
524 }
525
526 /// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
527 pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
528 // The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
529 .add(0x20)
530 .add(0x7f)
531 // the DEFAULT_ENCODE_SET added:
532 .add(b' ')
533 .add(b'"')
534 .add(b'#')
535 .add(b'<')
536 .add(b'>')
537 .add(b'`')
538 .add(b'?')
539 .add(b'{')
540 .add(b'}');