]> git.proxmox.com Git - proxmox-backup.git/blob - src/tools.rs
c3c4541df928bd0d6f7818663a652376f1627025
[proxmox-backup.git] / src / tools.rs
1 //! Tools and utilities
2 //!
3 //! This is a collection of small and useful tools.
4 use std::any::Any;
5 use std::collections::HashMap;
6 use std::hash::BuildHasher;
7 use std::fs::{File, OpenOptions};
8 use std::io::ErrorKind;
9 use std::io::Read;
10 use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
11 use std::path::Path;
12 use std::time::Duration;
13
14 use failure::*;
15 use serde_json::Value;
16 use openssl::hash::{hash, DigestBytes, MessageDigest};
17 use percent_encoding::AsciiSet;
18
19 use proxmox::tools::vec;
20
21 pub mod acl;
22 pub mod async_io;
23 pub mod borrow;
24 pub mod daemon;
25 pub mod fs;
26 pub mod format;
27 pub mod lru_cache;
28 pub mod runtime;
29 pub mod ticket;
30 pub mod timer;
31
32 mod wrapped_reader_stream;
33 pub use wrapped_reader_stream::*;
34
35 mod std_channel_writer;
36 pub use std_channel_writer::*;
37
38 pub mod xattr;
39
40 mod process_locker;
41 pub use process_locker::*;
42
43 mod file_logger;
44 pub use file_logger::*;
45
46 mod broadcast_future;
47 pub use broadcast_future::*;
48
49 /// The `BufferedRead` trait provides a single function
50 /// `buffered_read`. It returns a reference to an internal buffer. The
51 /// purpose of this traid is to avoid unnecessary data copies.
52 pub trait BufferedRead {
53 /// This functions tries to fill the internal buffers, then
54 /// returns a reference to the available data. It returns an empty
55 /// buffer if `offset` points to the end of the file.
56 fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
57 }
58
59 /// Directly map a type into a binary buffer. This is mostly useful
60 /// for reading structured data from a byte stream (file). You need to
61 /// make sure that the buffer location does not change, so please
62 /// avoid vec resize while you use such map.
63 ///
64 /// This function panics if the buffer is not large enough.
65 pub fn map_struct<T>(buffer: &[u8]) -> Result<&T, Error> {
66 if buffer.len() < ::std::mem::size_of::<T>() {
67 bail!("unable to map struct - buffer too small");
68 }
69 Ok(unsafe { &*(buffer.as_ptr() as *const T) })
70 }
71
72 /// Directly map a type into a mutable binary buffer. This is mostly
73 /// useful for writing structured data into a byte stream (file). You
74 /// need to make sure that the buffer location does not change, so
75 /// please avoid vec resize while you use such map.
76 ///
77 /// This function panics if the buffer is not large enough.
78 pub fn map_struct_mut<T>(buffer: &mut [u8]) -> Result<&mut T, Error> {
79 if buffer.len() < ::std::mem::size_of::<T>() {
80 bail!("unable to map struct - buffer too small");
81 }
82 Ok(unsafe { &mut *(buffer.as_ptr() as *mut T) })
83 }
84
85 /// Create a file lock using fntl. This function allows you to specify
86 /// a timeout if you want to avoid infinite blocking.
87 pub fn lock_file<F: AsRawFd>(
88 file: &mut F,
89 exclusive: bool,
90 timeout: Option<Duration>,
91 ) -> Result<(), Error> {
92 let lockarg = if exclusive {
93 nix::fcntl::FlockArg::LockExclusive
94 } else {
95 nix::fcntl::FlockArg::LockShared
96 };
97
98 let timeout = match timeout {
99 None => {
100 nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
101 return Ok(());
102 }
103 Some(t) => t,
104 };
105
106 // unblock the timeout signal temporarily
107 let _sigblock_guard = timer::unblock_timeout_signal();
108
109 // setup a timeout timer
110 let mut timer = timer::Timer::create(
111 timer::Clock::Realtime,
112 timer::TimerEvent::ThisThreadSignal(timer::SIGTIMEOUT),
113 )?;
114
115 timer.arm(
116 timer::TimerSpec::new()
117 .value(Some(timeout))
118 .interval(Some(Duration::from_millis(10))),
119 )?;
120
121 nix::fcntl::flock(file.as_raw_fd(), lockarg)?;
122 Ok(())
123 }
124
125 /// Open or create a lock file (append mode). Then try to
126 /// aquire a lock using `lock_file()`.
127 pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
128 let path = path.as_ref();
129 let mut file = match OpenOptions::new().create(true).append(true).open(path) {
130 Ok(file) => file,
131 Err(err) => bail!("Unable to open lock {:?} - {}", path, err),
132 };
133 match lock_file(&mut file, true, Some(timeout)) {
134 Ok(_) => Ok(file),
135 Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
136 }
137 }
138
139 /// Split a file into equal sized chunks. The last chunk may be
140 /// smaller. Note: We cannot implement an `Iterator`, because iterators
141 /// cannot return a borrowed buffer ref (we want zero-copy)
142 pub fn file_chunker<C, R>(mut file: R, chunk_size: usize, mut chunk_cb: C) -> Result<(), Error>
143 where
144 C: FnMut(usize, &[u8]) -> Result<bool, Error>,
145 R: Read,
146 {
147 const READ_BUFFER_SIZE: usize = 4 * 1024 * 1024; // 4M
148
149 if chunk_size > READ_BUFFER_SIZE {
150 bail!("chunk size too large!");
151 }
152
153 let mut buf = vec::undefined(READ_BUFFER_SIZE);
154
155 let mut pos = 0;
156 let mut file_pos = 0;
157 loop {
158 let mut eof = false;
159 let mut tmp = &mut buf[..];
160 // try to read large portions, at least chunk_size
161 while pos < chunk_size {
162 match file.read(tmp) {
163 Ok(0) => {
164 eof = true;
165 break;
166 }
167 Ok(n) => {
168 pos += n;
169 if pos > chunk_size {
170 break;
171 }
172 tmp = &mut tmp[n..];
173 }
174 Err(ref e) if e.kind() == ErrorKind::Interrupted => { /* try again */ }
175 Err(e) => bail!("read chunk failed - {}", e.to_string()),
176 }
177 }
178 let mut start = 0;
179 while start + chunk_size <= pos {
180 if !(chunk_cb)(file_pos, &buf[start..start + chunk_size])? {
181 break;
182 }
183 file_pos += chunk_size;
184 start += chunk_size;
185 }
186 if eof {
187 if start < pos {
188 (chunk_cb)(file_pos, &buf[start..pos])?;
189 //file_pos += pos - start;
190 }
191 break;
192 } else {
193 let rest = pos - start;
194 if rest > 0 {
195 let ptr = buf.as_mut_ptr();
196 unsafe {
197 std::ptr::copy_nonoverlapping(ptr.add(start), ptr, rest);
198 }
199 pos = rest;
200 } else {
201 pos = 0;
202 }
203 }
204 }
205
206 Ok(())
207 }
208
209 pub fn json_object_to_query(data: Value) -> Result<String, Error> {
210 let mut query = url::form_urlencoded::Serializer::new(String::new());
211
212 let object = data.as_object().ok_or_else(|| {
213 format_err!("json_object_to_query: got wrong data type (expected object).")
214 })?;
215
216 for (key, value) in object {
217 match value {
218 Value::Bool(b) => {
219 query.append_pair(key, &b.to_string());
220 }
221 Value::Number(n) => {
222 query.append_pair(key, &n.to_string());
223 }
224 Value::String(s) => {
225 query.append_pair(key, &s);
226 }
227 Value::Array(arr) => {
228 for element in arr {
229 match element {
230 Value::Bool(b) => {
231 query.append_pair(key, &b.to_string());
232 }
233 Value::Number(n) => {
234 query.append_pair(key, &n.to_string());
235 }
236 Value::String(s) => {
237 query.append_pair(key, &s);
238 }
239 _ => bail!(
240 "json_object_to_query: unable to handle complex array data types."
241 ),
242 }
243 }
244 }
245 _ => bail!("json_object_to_query: unable to handle complex data types."),
246 }
247 }
248
249 Ok(query.finish())
250 }
251
252 pub fn required_string_param<'a>(param: &'a Value, name: &str) -> Result<&'a str, Error> {
253 match param[name].as_str() {
254 Some(s) => Ok(s),
255 None => bail!("missing parameter '{}'", name),
256 }
257 }
258
259 pub fn required_string_property<'a>(param: &'a Value, name: &str) -> Result<&'a str, Error> {
260 match param[name].as_str() {
261 Some(s) => Ok(s),
262 None => bail!("missing property '{}'", name),
263 }
264 }
265
266 pub fn required_integer_param<'a>(param: &'a Value, name: &str) -> Result<i64, Error> {
267 match param[name].as_i64() {
268 Some(s) => Ok(s),
269 None => bail!("missing parameter '{}'", name),
270 }
271 }
272
273 pub fn required_integer_property<'a>(param: &'a Value, name: &str) -> Result<i64, Error> {
274 match param[name].as_i64() {
275 Some(s) => Ok(s),
276 None => bail!("missing property '{}'", name),
277 }
278 }
279
280 pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result<Vec<Value>, Error> {
281 match param[name].as_array() {
282 Some(s) => Ok(s.to_vec()),
283 None => bail!("missing parameter '{}'", name),
284 }
285 }
286
287 pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<Vec<Value>, Error> {
288 match param[name].as_array() {
289 Some(s) => Ok(s.to_vec()),
290 None => bail!("missing property '{}'", name),
291 }
292 }
293
294 pub fn complete_file_name<S: BuildHasher>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String> {
295 let mut result = vec![];
296
297 use nix::fcntl::AtFlags;
298 use nix::fcntl::OFlag;
299 use nix::sys::stat::Mode;
300
301 let mut dirname = std::path::PathBuf::from(if arg.is_empty() { "./" } else { arg });
302
303 let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) {
304 Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR,
305 Err(_) => false,
306 };
307
308 if !is_dir {
309 if let Some(parent) = dirname.parent() {
310 dirname = parent.to_owned();
311 }
312 }
313
314 let mut dir =
315 match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) {
316 Ok(d) => d,
317 Err(_) => return result,
318 };
319
320 for item in dir.iter() {
321 if let Ok(entry) = item {
322 if let Ok(name) = entry.file_name().to_str() {
323 if name == "." || name == ".." {
324 continue;
325 }
326 let mut newpath = dirname.clone();
327 newpath.push(name);
328
329 if let Ok(stat) =
330 nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty())
331 {
332 if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR {
333 newpath.push("");
334 if let Some(newpath) = newpath.to_str() {
335 result.push(newpath.to_owned());
336 }
337 continue;
338 }
339 }
340 if let Some(newpath) = newpath.to_str() {
341 result.push(newpath.to_owned());
342 }
343 }
344 }
345 }
346
347 result
348 }
349
350 /// Scan directory for matching file names.
351 ///
352 /// Scan through all directory entries and call `callback()` function
353 /// if the entry name matches the regular expression. This function
354 /// used unix `openat()`, so you can pass absolute or relative file
355 /// names. This function simply skips non-UTF8 encoded names.
356 pub fn scandir<P, F>(
357 dirfd: RawFd,
358 path: &P,
359 regex: &regex::Regex,
360 mut callback: F,
361 ) -> Result<(), Error>
362 where
363 F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>,
364 P: ?Sized + nix::NixPath,
365 {
366 for entry in self::fs::scan_subdir(dirfd, path, regex)? {
367 let entry = entry?;
368 let file_type = match entry.file_type() {
369 Some(file_type) => file_type,
370 None => bail!("unable to detect file type"),
371 };
372
373 callback(
374 entry.parent_fd(),
375 unsafe { entry.file_name_utf8_unchecked() },
376 file_type,
377 )?;
378 }
379 Ok(())
380 }
381
382 /// Shortcut for md5 sums.
383 pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
384 hash(MessageDigest::md5(), data).map_err(Error::from)
385 }
386
387 pub fn get_hardware_address() -> Result<String, Error> {
388 static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub";
389
390 let contents = proxmox::tools::fs::file_get_contents(FILENAME)?;
391 let digest = md5sum(&contents)?;
392
393 Ok(proxmox::tools::bin_to_hex(&digest))
394 }
395
396 pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
397 if digest1 != digest2 {
398 bail!("detected modified configuration - file changed by other user? Try again.");
399 }
400 Ok(())
401 }
402
403 /// Extract authentication cookie from cookie header.
404 /// We assume cookie_name is already url encoded.
405 pub fn extract_auth_cookie(cookie: &str, cookie_name: &str) -> Option<String> {
406 for pair in cookie.split(';') {
407 let (name, value) = match pair.find('=') {
408 Some(i) => (pair[..i].trim(), pair[(i + 1)..].trim()),
409 None => return None, // Cookie format error
410 };
411
412 if name == cookie_name {
413 use percent_encoding::percent_decode;
414 if let Ok(value) = percent_decode(value.as_bytes()).decode_utf8() {
415 return Some(value.into());
416 } else {
417 return None; // Cookie format error
418 }
419 }
420 }
421
422 None
423 }
424
425 pub fn join(data: &Vec<String>, sep: char) -> String {
426 let mut list = String::new();
427
428 for item in data {
429 if !list.is_empty() {
430 list.push(sep);
431 }
432 list.push_str(item);
433 }
434
435 list
436 }
437
438 /// Detect modified configuration files
439 ///
440 /// This function fails with a resonable error message if checksums do not match.
441 pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
442 if digest1 != digest2 {
443 bail!("detected modified configuration - file changed by other user? Try again.");
444 }
445 Ok(())
446 }
447
448 /// normalize uri path
449 ///
450 /// Do not allow ".", "..", or hidden files ".XXXX"
451 /// Also remove empty path components
452 pub fn normalize_uri_path(path: &str) -> Result<(String, Vec<&str>), Error> {
453 let items = path.split('/');
454
455 let mut path = String::new();
456 let mut components = vec![];
457
458 for name in items {
459 if name.is_empty() {
460 continue;
461 }
462 if name.starts_with('.') {
463 bail!("Path contains illegal components.");
464 }
465 path.push('/');
466 path.push_str(name);
467 components.push(name);
468 }
469
470 Ok((path, components))
471 }
472
473 pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
474 use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
475 let mut flags = FdFlag::from_bits(fcntl(fd, F_GETFD)?)
476 .ok_or_else(|| format_err!("unhandled file flags"))?; // nix crate is stupid this way...
477 flags.set(FdFlag::FD_CLOEXEC, on);
478 fcntl(fd, F_SETFD(flags))?;
479 Ok(())
480 }
481
482 static mut SHUTDOWN_REQUESTED: bool = false;
483
484 pub fn request_shutdown() {
485 unsafe {
486 SHUTDOWN_REQUESTED = true;
487 }
488 crate::server::server_shutdown();
489 }
490
491 #[inline(always)]
492 pub fn shutdown_requested() -> bool {
493 unsafe { SHUTDOWN_REQUESTED }
494 }
495
496 pub fn fail_on_shutdown() -> Result<(), Error> {
497 if shutdown_requested() {
498 bail!("Server shutdown requested - aborting task");
499 }
500 Ok(())
501 }
502
503 /// Guard a raw file descriptor with a drop handler. This is mostly useful when access to an owned
504 /// `RawFd` is required without the corresponding handler object (such as when only the file
505 /// descriptor number is required in a closure which may be dropped instead of being executed).
506 pub struct Fd(pub RawFd);
507
508 impl Drop for Fd {
509 fn drop(&mut self) {
510 if self.0 != -1 {
511 unsafe {
512 libc::close(self.0);
513 }
514 }
515 }
516 }
517
518 impl AsRawFd for Fd {
519 fn as_raw_fd(&self) -> RawFd {
520 self.0
521 }
522 }
523
524 impl IntoRawFd for Fd {
525 fn into_raw_fd(mut self) -> RawFd {
526 let fd = self.0;
527 self.0 = -1;
528 fd
529 }
530 }
531
532 impl FromRawFd for Fd {
533 unsafe fn from_raw_fd(fd: RawFd) -> Self {
534 Self(fd)
535 }
536 }
537
538 // wrap nix::unistd::pipe2 + O_CLOEXEC into something returning guarded file descriptors
539 pub fn pipe() -> Result<(Fd, Fd), Error> {
540 let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
541 Ok((Fd(pin), Fd(pout)))
542 }
543
544 /// An easy way to convert types to Any
545 ///
546 /// Mostly useful to downcast trait objects (see RpcEnvironment).
547 pub trait AsAny {
548 fn as_any(&self) -> &dyn Any;
549 }
550
551 impl<T: Any> AsAny for T {
552 fn as_any(&self) -> &dyn Any {
553 self
554 }
555 }
556
557 /// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
558 pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
559 // The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
560 .add(0x20)
561 .add(0x7f)
562 // the DEFAULT_ENCODE_SET added:
563 .add(b' ')
564 .add(b'"')
565 .add(b'#')
566 .add(b'<')
567 .add(b'>')
568 .add(b'`')
569 .add(b'?')
570 .add(b'{')
571 .add(b'}');