]>
Commit | Line | Data |
---|---|---|
4ce7da51 | 1 | use std::sync::{Mutex, Arc}; |
2ab5acac | 2 | use std::path::{Path, PathBuf}; |
97168f92 | 3 | use std::os::unix::io::AsRawFd; |
6680878b DM |
4 | use std::future::Future; |
5 | use std::pin::Pin; | |
a2479cfa | 6 | |
f7d4e4b5 | 7 | use anyhow::{bail, format_err, Error}; |
a2479cfa | 8 | use futures::*; |
7fa9a37c DM |
9 | use http::request::Parts; |
10 | use http::Response; | |
11 | use hyper::{Body, StatusCode}; | |
12 | use hyper::header; | |
13 | use url::form_urlencoded; | |
ea368a06 | 14 | |
a2479cfa | 15 | use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype}; |
7c667013 | 16 | use tokio_stream::wrappers::ReceiverStream; |
7fa9a37c | 17 | use serde_json::{json, Value}; |
608806e8 | 18 | use http::{Method, HeaderMap}; |
a2479cfa | 19 | |
32413921 | 20 | use proxmox::sys::linux::socket::set_tcp_keepalive; |
fd6d2438 | 21 | use proxmox::tools::fs::CreateOptions; |
6ef1b649 WB |
22 | use proxmox_lang::try_block; |
23 | use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation}; | |
fd6d2438 | 24 | |
0e1edf19 | 25 | use pbs_tools::{task_log, task_warn}; |
6d5d305d | 26 | use pbs_datastore::DataStore; |
09340f28 DM |
27 | use proxmox_rrd::DST; |
28 | ||
48176b0a | 29 | use proxmox_rest_server::{ |
608806e8 | 30 | rotate_task_log_archive, extract_cookie , AuthError, ApiConfig, RestServer, RestEnvironment, |
0e1edf19 | 31 | ServerAdapter, WorkerTask, cleanup_old_tasks, |
48176b0a | 32 | }; |
a2479cfa | 33 | |
1298618a | 34 | use proxmox_backup::{ |
fa49d0fd | 35 | get_rrd_cache, initialize_rrd_cache, |
1298618a | 36 | server::{ |
608806e8 | 37 | auth::check_pbs_auth, |
1298618a DM |
38 | jobstate::{ |
39 | self, | |
40 | Job, | |
41 | }, | |
1298618a | 42 | }, |
1298618a DM |
43 | }; |
44 | ||
af06decd | 45 | use pbs_buildcfg::configdir; |
84af82e8 | 46 | use proxmox_systemd::time::{compute_next_event, parse_calendar_event}; |
6c76aa43 | 47 | use pbs_tools::logrotate::LogRotate; |
1298618a | 48 | |
89725197 DM |
49 | use pbs_api_types::{ |
50 | Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig, DataStoreConfig, | |
51 | PruneOptions, | |
52 | }; | |
e7d4be9d | 53 | |
8bca935f DM |
54 | use proxmox_rest_server::daemon; |
55 | ||
e3f41f21 | 56 | use proxmox_backup::server; |
d01e2420 | 57 | use proxmox_backup::auth_helpers::*; |
97168f92 | 58 | use proxmox_backup::tools::{ |
32413921 | 59 | PROXMOX_BACKUP_TCP_KEEPALIVE_TIME, |
97168f92 DM |
60 | disks::{ |
61 | DiskManage, | |
62 | zfs_pool_stats, | |
368f4c54 | 63 | get_pool_from_dataset, |
97168f92 | 64 | }, |
97168f92 | 65 | }; |
02c7a755 | 66 | |
e7d4be9d | 67 | |
a13573c2 | 68 | use proxmox_backup::api2::pull::do_sync_job; |
8513626b | 69 | use proxmox_backup::api2::tape::backup::do_tape_backup_job; |
1298618a | 70 | use proxmox_backup::server::do_verification_job; |
b8d90798 | 71 | use proxmox_backup::server::do_prune_job; |
a13573c2 | 72 | |
946c3e8a | 73 | fn main() -> Result<(), Error> { |
ac7513e3 DM |
74 | proxmox_backup::tools::setup_safe_path_env(); |
75 | ||
21211748 DM |
76 | let backup_uid = pbs_config::backup_user()?.uid; |
77 | let backup_gid = pbs_config::backup_group()?.gid; | |
843880f0 TL |
78 | let running_uid = nix::unistd::Uid::effective(); |
79 | let running_gid = nix::unistd::Gid::effective(); | |
80 | ||
81 | if running_uid != backup_uid || running_gid != backup_gid { | |
82 | bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid); | |
83 | } | |
84 | ||
d420962f | 85 | pbs_runtime::main(run()) |
4223d9f8 DM |
86 | } |
87 | ||
48176b0a | 88 | |
608806e8 DM |
89 | struct ProxmoxBackupProxyAdapter; |
90 | ||
91 | impl ServerAdapter for ProxmoxBackupProxyAdapter { | |
92 | ||
93 | fn get_index( | |
94 | &self, | |
95 | env: RestEnvironment, | |
96 | parts: Parts, | |
97 | ) -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> { | |
98 | Box::pin(get_index_future(env, parts)) | |
99 | } | |
100 | ||
101 | fn check_auth<'a>( | |
102 | &'a self, | |
103 | headers: &'a HeaderMap, | |
104 | method: &'a Method, | |
105 | ) -> Pin<Box<dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>> + Send + 'a>> { | |
106 | Box::pin(async move { | |
107 | check_pbs_auth(headers, method).await | |
108 | }) | |
109 | } | |
110 | } | |
111 | ||
48176b0a DM |
112 | fn extract_lang_header(headers: &http::HeaderMap) -> Option<String> { |
113 | if let Some(Ok(cookie)) = headers.get("COOKIE").map(|v| v.to_str()) { | |
114 | return extract_cookie(cookie, "PBSLangCookie"); | |
115 | } | |
116 | None | |
117 | } | |
118 | ||
6680878b | 119 | async fn get_index_future( |
48176b0a | 120 | env: RestEnvironment, |
7fa9a37c DM |
121 | parts: Parts, |
122 | ) -> Response<Body> { | |
123 | ||
48176b0a DM |
124 | let auth_id = env.get_auth_id(); |
125 | let api = env.api_config(); | |
126 | let language = extract_lang_header(&parts.headers); | |
127 | ||
128 | // fixme: make all IO async | |
6680878b | 129 | |
7fa9a37c DM |
130 | let (userid, csrf_token) = match auth_id { |
131 | Some(auth_id) => { | |
132 | let auth_id = auth_id.parse::<Authid>(); | |
133 | match auth_id { | |
134 | Ok(auth_id) if !auth_id.is_token() => { | |
135 | let userid = auth_id.user().clone(); | |
136 | let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), &userid); | |
137 | (Some(userid), Some(new_csrf_token)) | |
138 | } | |
139 | _ => (None, None) | |
140 | } | |
141 | } | |
142 | None => (None, None), | |
143 | }; | |
144 | ||
145 | let nodename = proxmox::tools::nodename(); | |
146 | let user = userid.as_ref().map(|u| u.as_str()).unwrap_or(""); | |
147 | ||
148 | let csrf_token = csrf_token.unwrap_or_else(|| String::from("")); | |
149 | ||
150 | let mut debug = false; | |
151 | let mut template_file = "index"; | |
152 | ||
153 | if let Some(query_str) = parts.uri.query() { | |
154 | for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() { | |
155 | if k == "debug" && v != "0" && v != "false" { | |
156 | debug = true; | |
157 | } else if k == "console" { | |
158 | template_file = "console"; | |
159 | } | |
160 | } | |
161 | } | |
162 | ||
163 | let mut lang = String::from(""); | |
164 | if let Some(language) = language { | |
165 | if Path::new(&format!("/usr/share/pbs-i18n/pbs-lang-{}.js", language)).exists() { | |
166 | lang = language; | |
167 | } | |
168 | } | |
169 | ||
170 | let data = json!({ | |
171 | "NodeName": nodename, | |
172 | "UserName": user, | |
173 | "CSRFPreventionToken": csrf_token, | |
174 | "language": lang, | |
175 | "debug": debug, | |
176 | }); | |
177 | ||
178 | let (ct, index) = match api.render_template(template_file, &data) { | |
179 | Ok(index) => ("text/html", index), | |
180 | Err(err) => ("text/plain", format!("Error rendering template: {}", err)), | |
181 | }; | |
182 | ||
183 | let mut resp = Response::builder() | |
184 | .status(StatusCode::OK) | |
185 | .header(header::CONTENT_TYPE, ct) | |
186 | .body(index.into()) | |
187 | .unwrap(); | |
188 | ||
189 | if let Some(userid) = userid { | |
190 | resp.extensions_mut().insert(Authid::from((userid, None))); | |
191 | } | |
192 | ||
193 | resp | |
194 | } | |
195 | ||
fda5797b | 196 | async fn run() -> Result<(), Error> { |
02c7a755 DM |
197 | if let Err(err) = syslog::init( |
198 | syslog::Facility::LOG_DAEMON, | |
199 | log::LevelFilter::Info, | |
200 | Some("proxmox-backup-proxy")) { | |
4223d9f8 | 201 | bail!("unable to inititialize syslog - {}", err); |
02c7a755 DM |
202 | } |
203 | ||
e1d367df DM |
204 | // Note: To debug early connection error use |
205 | // PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy | |
206 | let debug = std::env::var("PROXMOX_DEBUG").is_ok(); | |
207 | ||
d01e2420 DM |
208 | let _ = public_auth_key(); // load with lazy_static |
209 | let _ = csrf_secret(); // load with lazy_static | |
210 | ||
fa49d0fd DM |
211 | let rrd_cache = initialize_rrd_cache()?; |
212 | rrd_cache.apply_journal()?; | |
213 | ||
02c7a755 | 214 | let mut config = ApiConfig::new( |
af06decd | 215 | pbs_buildcfg::JS_DIR, |
26858dba SR |
216 | &proxmox_backup::api2::ROUTER, |
217 | RpcEnvironmentType::PUBLIC, | |
608806e8 | 218 | ProxmoxBackupProxyAdapter, |
26858dba | 219 | )?; |
02c7a755 | 220 | |
02c7a755 DM |
221 | config.add_alias("novnc", "/usr/share/novnc-pve"); |
222 | config.add_alias("extjs", "/usr/share/javascript/extjs"); | |
7f066a9b | 223 | config.add_alias("qrcodejs", "/usr/share/javascript/qrcodejs"); |
02c7a755 DM |
224 | config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); |
225 | config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); | |
abd4c4cb | 226 | config.add_alias("locale", "/usr/share/pbs-i18n"); |
02c7a755 | 227 | config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); |
9c01e73c | 228 | config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); |
02c7a755 | 229 | |
af06decd | 230 | let mut indexpath = PathBuf::from(pbs_buildcfg::JS_DIR); |
2ab5acac DC |
231 | indexpath.push("index.hbs"); |
232 | config.register_template("index", &indexpath)?; | |
01ca99da | 233 | config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?; |
2ab5acac | 234 | |
fd6d2438 | 235 | let backup_user = pbs_config::backup_user()?; |
49e25688 | 236 | let mut commando_sock = proxmox_rest_server::CommandSocket::new(proxmox_rest_server::our_ctrl_sock(), backup_user.gid); |
fd6d2438 DM |
237 | |
238 | let dir_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid); | |
239 | let file_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid); | |
a68768cf | 240 | |
0d5d15c9 | 241 | config.enable_access_log( |
fd6d2438 | 242 | pbs_buildcfg::API_ACCESS_LOG_FN, |
36b7085e DM |
243 | Some(dir_opts.clone()), |
244 | Some(file_opts.clone()), | |
245 | &mut commando_sock, | |
246 | )?; | |
247 | ||
248 | config.enable_auth_log( | |
249 | pbs_buildcfg::API_AUTH_LOG_FN, | |
0a33fba4 DM |
250 | Some(dir_opts.clone()), |
251 | Some(file_opts.clone()), | |
fd6d2438 DM |
252 | &mut commando_sock, |
253 | )?; | |
8e7e2223 | 254 | |
02c7a755 | 255 | let rest_server = RestServer::new(config); |
b9700a9f | 256 | proxmox_rest_server::init_worker_tasks(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(), file_opts.clone())?; |
02c7a755 | 257 | |
6d1f61b2 | 258 | //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes |
6d1f61b2 | 259 | |
4ce7da51 | 260 | // we build the initial acceptor here as we cannot start if this fails |
c381a162 | 261 | let acceptor = make_tls_acceptor()?; |
4ce7da51 | 262 | let acceptor = Arc::new(Mutex::new(acceptor)); |
6d1f61b2 | 263 | |
4ce7da51 | 264 | // to renew the acceptor we just add a command-socket handler |
a723c087 WB |
265 | commando_sock.register_command( |
266 | "reload-certificate".to_string(), | |
267 | { | |
4ce7da51 | 268 | let acceptor = Arc::clone(&acceptor); |
a723c087 | 269 | move |_value| -> Result<_, Error> { |
4ce7da51 DM |
270 | log::info!("reloading certificate"); |
271 | match make_tls_acceptor() { | |
272 | Err(err) => log::error!("error reloading certificate: {}", err), | |
273 | Ok(new_acceptor) => { | |
274 | let mut guard = acceptor.lock().unwrap(); | |
275 | *guard = new_acceptor; | |
276 | } | |
277 | } | |
a723c087 WB |
278 | Ok(Value::Null) |
279 | } | |
280 | }, | |
281 | )?; | |
0d176f36 | 282 | |
062cf75c DC |
283 | // to remove references for not configured datastores |
284 | commando_sock.register_command( | |
285 | "datastore-removed".to_string(), | |
286 | |_value| { | |
6d5d305d | 287 | if let Err(err) = DataStore::remove_unused_datastores() { |
062cf75c DC |
288 | log::error!("could not refresh datastores: {}", err); |
289 | } | |
290 | Ok(Value::Null) | |
291 | } | |
292 | )?; | |
293 | ||
a690ecac WB |
294 | let server = daemon::create_daemon( |
295 | ([0,0,0,0,0,0,0,0], 8007).into(), | |
d2654200 | 296 | move |listener| { |
97168f92 | 297 | |
4ce7da51 | 298 | let connections = accept_connections(listener, acceptor, debug); |
7c667013 | 299 | let connections = hyper::server::accept::from_stream(ReceiverStream::new(connections)); |
083ff3fd | 300 | |
d2654200 DM |
301 | Ok(async { |
302 | daemon::systemd_notify(daemon::SystemdNotify::Ready)?; | |
303 | ||
304 | hyper::Server::builder(connections) | |
083ff3fd | 305 | .serve(rest_server) |
fd6d2438 | 306 | .with_graceful_shutdown(proxmox_rest_server::shutdown_future()) |
083ff3fd | 307 | .map_err(Error::from) |
d2654200 DM |
308 | .await |
309 | }) | |
a2ca7137 | 310 | }, |
083ff3fd | 311 | ); |
a2ca7137 | 312 | |
b9700a9f | 313 | proxmox_rest_server::write_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?; |
d98c9a7a | 314 | |
fda5797b | 315 | let init_result: Result<(), Error> = try_block!({ |
b9700a9f | 316 | proxmox_rest_server::register_task_control_commands(&mut commando_sock)?; |
a68768cf | 317 | commando_sock.spawn()?; |
fd1b65cc DM |
318 | proxmox_rest_server::catch_shutdown_signal()?; |
319 | proxmox_rest_server::catch_reload_signal()?; | |
fda5797b WB |
320 | Ok(()) |
321 | }); | |
d607b886 | 322 | |
fda5797b WB |
323 | if let Err(err) = init_result { |
324 | bail!("unable to start daemon - {}", err); | |
325 | } | |
e3f41f21 | 326 | |
8545480a | 327 | start_task_scheduler(); |
eaeda365 | 328 | start_stat_generator(); |
8545480a | 329 | |
083ff3fd | 330 | server.await?; |
a546a8a0 | 331 | log::info!("server shutting down, waiting for active workers to complete"); |
fd6d2438 | 332 | proxmox_rest_server::last_worker_future().await?; |
fda5797b | 333 | log::info!("done - exit server"); |
e3f41f21 | 334 | |
4223d9f8 | 335 | Ok(()) |
02c7a755 | 336 | } |
8545480a | 337 | |
4ce7da51 | 338 | fn make_tls_acceptor() -> Result<SslAcceptor, Error> { |
c381a162 WB |
339 | let key_path = configdir!("/proxy.key"); |
340 | let cert_path = configdir!("/proxy.pem"); | |
341 | ||
342 | let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap(); | |
343 | acceptor.set_private_key_file(key_path, SslFiletype::PEM) | |
344 | .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?; | |
345 | acceptor.set_certificate_chain_file(cert_path) | |
346 | .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?; | |
347 | acceptor.check_private_key().unwrap(); | |
348 | ||
4ce7da51 | 349 | Ok(acceptor.build()) |
c381a162 WB |
350 | } |
351 | ||
a5e3be49 WB |
352 | type ClientStreamResult = |
353 | Result<std::pin::Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>, Error>; | |
354 | const MAX_PENDING_ACCEPTS: usize = 1024; | |
355 | ||
48aa2b93 | 356 | fn accept_connections( |
0bfcea6a | 357 | listener: tokio::net::TcpListener, |
4ce7da51 | 358 | acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>, |
e1d367df | 359 | debug: bool, |
a5e3be49 | 360 | ) -> tokio::sync::mpsc::Receiver<ClientStreamResult> { |
48aa2b93 | 361 | |
ea93bea7 | 362 | let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS); |
48aa2b93 | 363 | |
4ce7da51 | 364 | tokio::spawn(accept_connection(listener, acceptor, debug, sender)); |
a5e3be49 WB |
365 | |
366 | receiver | |
367 | } | |
368 | ||
369 | async fn accept_connection( | |
370 | listener: tokio::net::TcpListener, | |
4ce7da51 | 371 | acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>, |
a5e3be49 WB |
372 | debug: bool, |
373 | sender: tokio::sync::mpsc::Sender<ClientStreamResult>, | |
374 | ) { | |
ea93bea7 | 375 | let accept_counter = Arc::new(()); |
48aa2b93 | 376 | |
a5e3be49 | 377 | loop { |
4ce7da51 DM |
378 | let (sock, _addr) = match listener.accept().await { |
379 | Ok(conn) => conn, | |
380 | Err(err) => { | |
381 | eprintln!("error accepting tcp connection: {}", err); | |
cc269b9f | 382 | continue; |
a5e3be49 | 383 | } |
cc269b9f | 384 | }; |
48aa2b93 | 385 | |
cc269b9f WB |
386 | sock.set_nodelay(true).unwrap(); |
387 | let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME); | |
48aa2b93 | 388 | |
4ce7da51 DM |
389 | let ssl = { // limit acceptor_guard scope |
390 | // Acceptor can be reloaded using the command socket "reload-certificate" command | |
391 | let acceptor_guard = acceptor.lock().unwrap(); | |
392 | ||
393 | match openssl::ssl::Ssl::new(acceptor_guard.context()) { | |
394 | Ok(ssl) => ssl, | |
395 | Err(err) => { | |
396 | eprintln!("failed to create Ssl object from Acceptor context - {}", err); | |
397 | continue; | |
398 | }, | |
399 | } | |
cc269b9f | 400 | }; |
4ce7da51 | 401 | |
cc269b9f WB |
402 | let stream = match tokio_openssl::SslStream::new(ssl, sock) { |
403 | Ok(stream) => stream, | |
404 | Err(err) => { | |
405 | eprintln!("failed to create SslStream using ssl and connection socket - {}", err); | |
406 | continue; | |
407 | }, | |
408 | }; | |
409 | ||
410 | let mut stream = Box::pin(stream); | |
411 | let sender = sender.clone(); | |
412 | ||
413 | if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS { | |
414 | eprintln!("connection rejected - to many open connections"); | |
415 | continue; | |
48aa2b93 | 416 | } |
cc269b9f | 417 | |
b4931192 | 418 | let accept_counter = Arc::clone(&accept_counter); |
cc269b9f WB |
419 | tokio::spawn(async move { |
420 | let accept_future = tokio::time::timeout( | |
421 | Duration::new(10, 0), stream.as_mut().accept()); | |
422 | ||
423 | let result = accept_future.await; | |
424 | ||
425 | match result { | |
426 | Ok(Ok(())) => { | |
427 | if sender.send(Ok(stream)).await.is_err() && debug { | |
428 | eprintln!("detect closed connection channel"); | |
429 | } | |
430 | } | |
431 | Ok(Err(err)) => { | |
432 | if debug { | |
433 | eprintln!("https handshake failed - {}", err); | |
434 | } | |
435 | } | |
436 | Err(_) => { | |
437 | if debug { | |
438 | eprintln!("https handshake timeout"); | |
439 | } | |
440 | } | |
441 | } | |
442 | ||
443 | drop(accept_counter); // decrease reference count | |
444 | }); | |
a5e3be49 | 445 | } |
48aa2b93 DM |
446 | } |
447 | ||
eaeda365 | 448 | fn start_stat_generator() { |
fd6d2438 | 449 | let abort_future = proxmox_rest_server::shutdown_future(); |
eaeda365 DM |
450 | let future = Box::pin(run_stat_generator()); |
451 | let task = futures::future::select(future, abort_future); | |
452 | tokio::spawn(task.map(|_| ())); | |
453 | } | |
454 | ||
8545480a | 455 | fn start_task_scheduler() { |
fd6d2438 | 456 | let abort_future = proxmox_rest_server::shutdown_future(); |
8545480a DM |
457 | let future = Box::pin(run_task_scheduler()); |
458 | let task = futures::future::select(future, abort_future); | |
459 | tokio::spawn(task.map(|_| ())); | |
460 | } | |
461 | ||
6a7be83e | 462 | use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH}; |
8545480a DM |
463 | |
464 | fn next_minute() -> Result<Instant, Error> { | |
6a7be83e DM |
465 | let now = SystemTime::now(); |
466 | let epoch_now = now.duration_since(UNIX_EPOCH)?; | |
467 | let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); | |
8545480a DM |
468 | Ok(Instant::now() + epoch_next - epoch_now) |
469 | } | |
470 | ||
471 | async fn run_task_scheduler() { | |
472 | ||
473 | let mut count: usize = 0; | |
474 | ||
475 | loop { | |
476 | count += 1; | |
477 | ||
478 | let delay_target = match next_minute() { // try to run very minute | |
479 | Ok(d) => d, | |
480 | Err(err) => { | |
481 | eprintln!("task scheduler: compute next minute failed - {}", err); | |
0a8d773a | 482 | tokio::time::sleep_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await; |
8545480a DM |
483 | continue; |
484 | } | |
485 | }; | |
486 | ||
487 | if count > 2 { // wait 1..2 minutes before starting | |
488 | match schedule_tasks().catch_unwind().await { | |
489 | Err(panic) => { | |
490 | match panic.downcast::<&str>() { | |
491 | Ok(msg) => { | |
492 | eprintln!("task scheduler panic: {}", msg); | |
493 | } | |
494 | Err(_) => { | |
495 | eprintln!("task scheduler panic - unknown type"); | |
496 | } | |
497 | } | |
498 | } | |
499 | Ok(Err(err)) => { | |
500 | eprintln!("task scheduler failed - {:?}", err); | |
501 | } | |
502 | Ok(Ok(_)) => {} | |
503 | } | |
504 | } | |
505 | ||
0a8d773a | 506 | tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; |
8545480a DM |
507 | } |
508 | } | |
509 | ||
510 | async fn schedule_tasks() -> Result<(), Error> { | |
511 | ||
512 | schedule_datastore_garbage_collection().await; | |
25829a87 | 513 | schedule_datastore_prune().await; |
a6160cdf | 514 | schedule_datastore_sync_jobs().await; |
73df9c51 | 515 | schedule_datastore_verify_jobs().await; |
8513626b | 516 | schedule_tape_backup_jobs().await; |
9a760917 | 517 | schedule_task_log_rotate().await; |
8545480a DM |
518 | |
519 | Ok(()) | |
520 | } | |
521 | ||
8545480a DM |
522 | async fn schedule_datastore_garbage_collection() { |
523 | ||
e7d4be9d | 524 | let config = match pbs_config::datastore::config() { |
8545480a DM |
525 | Err(err) => { |
526 | eprintln!("unable to read datastore config - {}", err); | |
527 | return; | |
528 | } | |
529 | Ok((config, _digest)) => config, | |
530 | }; | |
531 | ||
532 | for (store, (_, store_config)) in config.sections { | |
533 | let datastore = match DataStore::lookup_datastore(&store) { | |
534 | Ok(datastore) => datastore, | |
535 | Err(err) => { | |
536 | eprintln!("lookup_datastore failed - {}", err); | |
537 | continue; | |
538 | } | |
539 | }; | |
540 | ||
25829a87 | 541 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { |
8545480a DM |
542 | Ok(c) => c, |
543 | Err(err) => { | |
544 | eprintln!("datastore config from_value failed - {}", err); | |
545 | continue; | |
546 | } | |
547 | }; | |
548 | ||
549 | let event_str = match store_config.gc_schedule { | |
550 | Some(event_str) => event_str, | |
551 | None => continue, | |
552 | }; | |
553 | ||
554 | let event = match parse_calendar_event(&event_str) { | |
555 | Ok(event) => event, | |
556 | Err(err) => { | |
557 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
558 | continue; | |
559 | } | |
560 | }; | |
561 | ||
562 | if datastore.garbage_collection_running() { continue; } | |
563 | ||
564 | let worker_type = "garbage_collection"; | |
565 | ||
b6ba5acd DC |
566 | let last = match jobstate::last_run_time(worker_type, &store) { |
567 | Ok(time) => time, | |
568 | Err(err) => { | |
569 | eprintln!("could not get last run time of {} {}: {}", worker_type, store, err); | |
570 | continue; | |
8545480a DM |
571 | } |
572 | }; | |
573 | ||
574 | let next = match compute_next_event(&event, last, false) { | |
15ec790a DC |
575 | Ok(Some(next)) => next, |
576 | Ok(None) => continue, | |
8545480a DM |
577 | Err(err) => { |
578 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
579 | continue; | |
580 | } | |
581 | }; | |
e693818a | 582 | |
6ef1b649 | 583 | let now = proxmox_time::epoch_i64(); |
6a7be83e | 584 | |
8545480a DM |
585 | if next > now { continue; } |
586 | ||
1cd951c9 | 587 | let job = match Job::new(worker_type, &store) { |
d7a122a0 DC |
588 | Ok(job) => job, |
589 | Err(_) => continue, // could not get lock | |
590 | }; | |
591 | ||
ad54df31 | 592 | let auth_id = Authid::root_auth_id(); |
d7a122a0 | 593 | |
c724f658 | 594 | if let Err(err) = crate::server::do_garbage_collection_job(job, datastore, auth_id, Some(event_str), false) { |
3b707fbb | 595 | eprintln!("unable to start garbage collection job on datastore {} - {}", store, err); |
8545480a DM |
596 | } |
597 | } | |
598 | } | |
25829a87 DM |
599 | |
600 | async fn schedule_datastore_prune() { | |
601 | ||
e7d4be9d | 602 | let config = match pbs_config::datastore::config() { |
25829a87 DM |
603 | Err(err) => { |
604 | eprintln!("unable to read datastore config - {}", err); | |
605 | return; | |
606 | } | |
607 | Ok((config, _digest)) => config, | |
608 | }; | |
609 | ||
610 | for (store, (_, store_config)) in config.sections { | |
25829a87 DM |
611 | |
612 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { | |
613 | Ok(c) => c, | |
614 | Err(err) => { | |
a6160cdf | 615 | eprintln!("datastore '{}' config from_value failed - {}", store, err); |
25829a87 DM |
616 | continue; |
617 | } | |
618 | }; | |
619 | ||
620 | let event_str = match store_config.prune_schedule { | |
621 | Some(event_str) => event_str, | |
622 | None => continue, | |
623 | }; | |
624 | ||
625 | let prune_options = PruneOptions { | |
626 | keep_last: store_config.keep_last, | |
627 | keep_hourly: store_config.keep_hourly, | |
628 | keep_daily: store_config.keep_daily, | |
629 | keep_weekly: store_config.keep_weekly, | |
630 | keep_monthly: store_config.keep_monthly, | |
631 | keep_yearly: store_config.keep_yearly, | |
632 | }; | |
633 | ||
89725197 | 634 | if !pbs_datastore::prune::keeps_something(&prune_options) { // no prune settings - keep all |
25829a87 DM |
635 | continue; |
636 | } | |
637 | ||
25829a87 | 638 | let worker_type = "prune"; |
b15751bf | 639 | if check_schedule(worker_type, &event_str, &store) { |
82c05b41 HL |
640 | let job = match Job::new(worker_type, &store) { |
641 | Ok(job) => job, | |
642 | Err(_) => continue, // could not get lock | |
643 | }; | |
25829a87 | 644 | |
ad54df31 | 645 | let auth_id = Authid::root_auth_id().clone(); |
82c05b41 HL |
646 | if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) { |
647 | eprintln!("unable to start datastore prune job {} - {}", &store, err); | |
25829a87 DM |
648 | } |
649 | }; | |
25829a87 DM |
650 | } |
651 | } | |
a6160cdf DM |
652 | |
653 | async fn schedule_datastore_sync_jobs() { | |
654 | ||
a6160cdf | 655 | |
a4e5a0fc | 656 | let config = match pbs_config::sync::config() { |
a6160cdf DM |
657 | Err(err) => { |
658 | eprintln!("unable to read sync job config - {}", err); | |
659 | return; | |
660 | } | |
661 | Ok((config, _digest)) => config, | |
662 | }; | |
663 | ||
a6160cdf DM |
664 | for (job_id, (_, job_config)) in config.sections { |
665 | let job_config: SyncJobConfig = match serde_json::from_value(job_config) { | |
666 | Ok(c) => c, | |
667 | Err(err) => { | |
668 | eprintln!("sync job config from_value failed - {}", err); | |
669 | continue; | |
670 | } | |
671 | }; | |
672 | ||
673 | let event_str = match job_config.schedule { | |
674 | Some(ref event_str) => event_str.clone(), | |
675 | None => continue, | |
676 | }; | |
677 | ||
c67b1fa7 | 678 | let worker_type = "syncjob"; |
b15751bf | 679 | if check_schedule(worker_type, &event_str, &job_id) { |
82c05b41 HL |
680 | let job = match Job::new(worker_type, &job_id) { |
681 | Ok(job) => job, | |
682 | Err(_) => continue, // could not get lock | |
683 | }; | |
a6160cdf | 684 | |
ad54df31 | 685 | let auth_id = Authid::root_auth_id().clone(); |
bfa942c0 | 686 | if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str), false) { |
82c05b41 | 687 | eprintln!("unable to start datastore sync job {} - {}", &job_id, err); |
a6160cdf DM |
688 | } |
689 | }; | |
a6160cdf DM |
690 | } |
691 | } | |
eaeda365 | 692 | |
73df9c51 | 693 | async fn schedule_datastore_verify_jobs() { |
1298618a | 694 | |
802189f7 | 695 | let config = match pbs_config::verify::config() { |
73df9c51 HL |
696 | Err(err) => { |
697 | eprintln!("unable to read verification job config - {}", err); | |
698 | return; | |
699 | } | |
700 | Ok((config, _digest)) => config, | |
701 | }; | |
702 | for (job_id, (_, job_config)) in config.sections { | |
703 | let job_config: VerificationJobConfig = match serde_json::from_value(job_config) { | |
704 | Ok(c) => c, | |
705 | Err(err) => { | |
706 | eprintln!("verification job config from_value failed - {}", err); | |
707 | continue; | |
708 | } | |
709 | }; | |
710 | let event_str = match job_config.schedule { | |
711 | Some(ref event_str) => event_str.clone(), | |
712 | None => continue, | |
713 | }; | |
82c05b41 | 714 | |
73df9c51 | 715 | let worker_type = "verificationjob"; |
ad54df31 | 716 | let auth_id = Authid::root_auth_id().clone(); |
b15751bf | 717 | if check_schedule(worker_type, &event_str, &job_id) { |
82c05b41 HL |
718 | let job = match Job::new(&worker_type, &job_id) { |
719 | Ok(job) => job, | |
720 | Err(_) => continue, // could not get lock | |
721 | }; | |
bfa942c0 | 722 | if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str), false) { |
82c05b41 | 723 | eprintln!("unable to start datastore verification job {} - {}", &job_id, err); |
73df9c51 HL |
724 | } |
725 | }; | |
73df9c51 HL |
726 | } |
727 | } | |
728 | ||
8513626b DM |
729 | async fn schedule_tape_backup_jobs() { |
730 | ||
e3619d41 | 731 | let config = match pbs_config::tape_job::config() { |
8513626b DM |
732 | Err(err) => { |
733 | eprintln!("unable to read tape job config - {}", err); | |
734 | return; | |
735 | } | |
736 | Ok((config, _digest)) => config, | |
737 | }; | |
738 | for (job_id, (_, job_config)) in config.sections { | |
739 | let job_config: TapeBackupJobConfig = match serde_json::from_value(job_config) { | |
740 | Ok(c) => c, | |
741 | Err(err) => { | |
742 | eprintln!("tape backup job config from_value failed - {}", err); | |
743 | continue; | |
744 | } | |
745 | }; | |
746 | let event_str = match job_config.schedule { | |
747 | Some(ref event_str) => event_str.clone(), | |
748 | None => continue, | |
749 | }; | |
750 | ||
751 | let worker_type = "tape-backup-job"; | |
752 | let auth_id = Authid::root_auth_id().clone(); | |
753 | if check_schedule(worker_type, &event_str, &job_id) { | |
754 | let job = match Job::new(&worker_type, &job_id) { | |
755 | Ok(job) => job, | |
756 | Err(_) => continue, // could not get lock | |
757 | }; | |
bfa942c0 | 758 | if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str), false) { |
7a61f89e | 759 | eprintln!("unable to start tape backup job {} - {}", &job_id, err); |
8513626b DM |
760 | } |
761 | }; | |
762 | } | |
763 | } | |
764 | ||
765 | ||
9a760917 | 766 | async fn schedule_task_log_rotate() { |
9a760917 DC |
767 | |
768 | let worker_type = "logrotate"; | |
72aa1834 | 769 | let job_id = "access-log_and_task-archive"; |
9a760917 | 770 | |
9a760917 DC |
771 | // schedule daily at 00:00 like normal logrotate |
772 | let schedule = "00:00"; | |
773 | ||
b15751bf | 774 | if !check_schedule(worker_type, schedule, job_id) { |
9a760917 DC |
775 | // if we never ran the rotation, schedule instantly |
776 | match jobstate::JobState::load(worker_type, job_id) { | |
777 | Ok(state) => match state { | |
778 | jobstate::JobState::Created { .. } => {}, | |
779 | _ => return, | |
780 | }, | |
781 | _ => return, | |
782 | } | |
783 | } | |
784 | ||
785 | let mut job = match Job::new(worker_type, job_id) { | |
786 | Ok(job) => job, | |
787 | Err(_) => return, // could not get lock | |
788 | }; | |
789 | ||
790 | if let Err(err) = WorkerTask::new_thread( | |
791 | worker_type, | |
72aa1834 | 792 | None, |
049a22a3 | 793 | Authid::root_auth_id().to_string(), |
9a760917 DC |
794 | false, |
795 | move |worker| { | |
796 | job.start(&worker.upid().to_string())?; | |
1ec0d70d | 797 | task_log!(worker, "starting task log rotation"); |
e4f5f59e | 798 | |
9a760917 | 799 | let result = try_block!({ |
b7f2be51 TL |
800 | let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file |
801 | let max_files = 20; // times twenty files gives > 100000 task entries | |
9a760917 DC |
802 | let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?; |
803 | if has_rotated { | |
1ec0d70d | 804 | task_log!(worker, "task log archive was rotated"); |
9a760917 | 805 | } else { |
1ec0d70d | 806 | task_log!(worker, "task log archive was not rotated"); |
9a760917 DC |
807 | } |
808 | ||
fe4cc5b1 TL |
809 | let max_size = 32 * 1024 * 1024 - 1; |
810 | let max_files = 14; | |
af06decd | 811 | let mut logrotate = LogRotate::new(pbs_buildcfg::API_ACCESS_LOG_FN, true) |
fe4cc5b1 TL |
812 | .ok_or_else(|| format_err!("could not get API access log file names"))?; |
813 | ||
fe7bdc9d | 814 | if logrotate.rotate(max_size, None, Some(max_files))? { |
fe4cc5b1 | 815 | println!("rotated access log, telling daemons to re-open log file"); |
36b7085e | 816 | pbs_runtime::block_on(command_reopen_access_logfiles())?; |
1ec0d70d | 817 | task_log!(worker, "API access log was rotated"); |
fe7bdc9d | 818 | } else { |
1ec0d70d | 819 | task_log!(worker, "API access log was not rotated"); |
fe7bdc9d TL |
820 | } |
821 | ||
af06decd | 822 | let mut logrotate = LogRotate::new(pbs_buildcfg::API_AUTH_LOG_FN, true) |
fe7bdc9d | 823 | .ok_or_else(|| format_err!("could not get API auth log file names"))?; |
fe4cc5b1 | 824 | |
fe7bdc9d | 825 | if logrotate.rotate(max_size, None, Some(max_files))? { |
36b7085e DM |
826 | println!("rotated auth log, telling daemons to re-open log file"); |
827 | pbs_runtime::block_on(command_reopen_auth_logfiles())?; | |
1ec0d70d | 828 | task_log!(worker, "API authentication log was rotated"); |
fe4cc5b1 | 829 | } else { |
1ec0d70d | 830 | task_log!(worker, "API authentication log was not rotated"); |
fe4cc5b1 TL |
831 | } |
832 | ||
0e1edf19 DC |
833 | if has_rotated { |
834 | task_log!(worker, "cleaning up old task logs"); | |
835 | if let Err(err) = cleanup_old_tasks(true) { | |
836 | task_warn!(worker, "could not completely cleanup old tasks: {}", err); | |
837 | } | |
838 | } | |
839 | ||
9a760917 DC |
840 | Ok(()) |
841 | }); | |
842 | ||
843 | let status = worker.create_state(&result); | |
844 | ||
845 | if let Err(err) = job.finish(status) { | |
846 | eprintln!("could not finish job state for {}: {}", worker_type, err); | |
847 | } | |
848 | ||
849 | result | |
850 | }, | |
851 | ) { | |
852 | eprintln!("unable to start task log rotation: {}", err); | |
853 | } | |
854 | ||
855 | } | |
856 | ||
36b7085e | 857 | async fn command_reopen_access_logfiles() -> Result<(), Error> { |
fe4cc5b1 TL |
858 | // only care about the most recent daemon instance for each, proxy & api, as other older ones |
859 | // should not respond to new requests anyway, but only finish their current one and then exit. | |
b9700a9f | 860 | let sock = proxmox_rest_server::our_ctrl_sock(); |
75442e81 | 861 | let f1 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-access-log-reopen\"}\n"); |
fe4cc5b1 | 862 | |
b9700a9f DM |
863 | let pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?; |
864 | let sock = proxmox_rest_server::ctrl_sock_from_pid(pid); | |
75442e81 | 865 | let f2 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-access-log-reopen\"}\n"); |
546b6a23 TL |
866 | |
867 | match futures::join!(f1, f2) { | |
868 | (Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)), | |
869 | (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)), | |
36b7085e DM |
870 | (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)), |
871 | _ => Ok(()), | |
872 | } | |
873 | } | |
874 | ||
875 | async fn command_reopen_auth_logfiles() -> Result<(), Error> { | |
876 | // only care about the most recent daemon instance for each, proxy & api, as other older ones | |
877 | // should not respond to new requests anyway, but only finish their current one and then exit. | |
b9700a9f | 878 | let sock = proxmox_rest_server::our_ctrl_sock(); |
75442e81 | 879 | let f1 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-auth-log-reopen\"}\n"); |
36b7085e | 880 | |
b9700a9f DM |
881 | let pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?; |
882 | let sock = proxmox_rest_server::ctrl_sock_from_pid(pid); | |
75442e81 | 883 | let f2 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-auth-log-reopen\"}\n"); |
36b7085e DM |
884 | |
885 | match futures::join!(f1, f2) { | |
886 | (Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)), | |
887 | (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)), | |
546b6a23 TL |
888 | (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)), |
889 | _ => Ok(()), | |
890 | } | |
fe4cc5b1 TL |
891 | } |
892 | ||
eaeda365 DM |
893 | async fn run_stat_generator() { |
894 | ||
895 | loop { | |
896 | let delay_target = Instant::now() + Duration::from_secs(10); | |
897 | ||
1d44f175 | 898 | generate_host_stats().await; |
eaeda365 | 899 | |
0a8d773a | 900 | tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; |
013fa7bb DM |
901 | |
902 | } | |
eaeda365 DM |
903 | |
904 | } | |
905 | ||
1d44f175 | 906 | fn rrd_update_gauge(name: &str, value: f64) { |
fa49d0fd DM |
907 | if let Ok(rrd_cache) = get_rrd_cache() { |
908 | if let Err(err) = rrd_cache.update_value(name, value, DST::Gauge) { | |
909 | eprintln!("rrd::update_value '{}' failed - {}", name, err); | |
910 | } | |
309ef20d DM |
911 | } |
912 | } | |
913 | ||
1d44f175 | 914 | fn rrd_update_derive(name: &str, value: f64) { |
fa49d0fd DM |
915 | if let Ok(rrd_cache) = get_rrd_cache() { |
916 | if let Err(err) = rrd_cache.update_value(name, value, DST::Derive) { | |
917 | eprintln!("rrd::update_value '{}' failed - {}", name, err); | |
918 | } | |
309ef20d DM |
919 | } |
920 | } | |
921 | ||
1d44f175 | 922 | async fn generate_host_stats() { |
8f0cec26 | 923 | use proxmox::sys::linux::procfs::{ |
485841da | 924 | read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg}; |
eaeda365 | 925 | |
d420962f | 926 | pbs_runtime::block_in_place(move || { |
4f951399 DM |
927 | |
928 | match read_proc_stat() { | |
929 | Ok(stat) => { | |
1d44f175 DM |
930 | rrd_update_gauge("host/cpu", stat.cpu); |
931 | rrd_update_gauge("host/iowait", stat.iowait_percent); | |
4f951399 DM |
932 | } |
933 | Err(err) => { | |
934 | eprintln!("read_proc_stat failed - {}", err); | |
eaeda365 DM |
935 | } |
936 | } | |
2c66a590 | 937 | |
4f951399 DM |
938 | match read_meminfo() { |
939 | Ok(meminfo) => { | |
1d44f175 DM |
940 | rrd_update_gauge("host/memtotal", meminfo.memtotal as f64); |
941 | rrd_update_gauge("host/memused", meminfo.memused as f64); | |
942 | rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64); | |
943 | rrd_update_gauge("host/swapused", meminfo.swapused as f64); | |
a4a3f7ca | 944 | } |
4f951399 DM |
945 | Err(err) => { |
946 | eprintln!("read_meminfo failed - {}", err); | |
a4a3f7ca DM |
947 | } |
948 | } | |
8f0cec26 | 949 | |
4f951399 DM |
950 | match read_proc_net_dev() { |
951 | Ok(netdev) => { | |
6f422880 | 952 | use pbs_config::network::is_physical_nic; |
4f951399 DM |
953 | let mut netin = 0; |
954 | let mut netout = 0; | |
955 | for item in netdev { | |
956 | if !is_physical_nic(&item.device) { continue; } | |
957 | netin += item.receive; | |
958 | netout += item.send; | |
959 | } | |
1d44f175 DM |
960 | rrd_update_derive("host/netin", netin as f64); |
961 | rrd_update_derive("host/netout", netout as f64); | |
8f0cec26 | 962 | } |
4f951399 DM |
963 | Err(err) => { |
964 | eprintln!("read_prox_net_dev failed - {}", err); | |
8f0cec26 DM |
965 | } |
966 | } | |
dd15c0aa | 967 | |
485841da DM |
968 | match read_loadavg() { |
969 | Ok(loadavg) => { | |
1d44f175 | 970 | rrd_update_gauge("host/loadavg", loadavg.0 as f64); |
485841da DM |
971 | } |
972 | Err(err) => { | |
973 | eprintln!("read_loadavg failed - {}", err); | |
974 | } | |
975 | } | |
976 | ||
8c03041a DM |
977 | let disk_manager = DiskManage::new(); |
978 | ||
1d44f175 | 979 | gather_disk_stats(disk_manager.clone(), Path::new("/"), "host"); |
91e5bb49 | 980 | |
e7d4be9d | 981 | match pbs_config::datastore::config() { |
d0833a70 | 982 | Ok((config, _)) => { |
e7d4be9d | 983 | let datastore_list: Vec<DataStoreConfig> = |
17c7b46a | 984 | config.convert_to_typed_array("datastore").unwrap_or_default(); |
d0833a70 DM |
985 | |
986 | for config in datastore_list { | |
8c03041a | 987 | |
91e5bb49 | 988 | let rrd_prefix = format!("datastore/{}", config.name); |
8c03041a | 989 | let path = std::path::Path::new(&config.path); |
1d44f175 | 990 | gather_disk_stats(disk_manager.clone(), path, &rrd_prefix); |
d0833a70 DM |
991 | } |
992 | } | |
993 | Err(err) => { | |
994 | eprintln!("read datastore config failed - {}", err); | |
995 | } | |
996 | } | |
997 | ||
4f951399 | 998 | }); |
eaeda365 | 999 | } |
dd15c0aa | 1000 | |
b15751bf DM |
1001 | fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool { |
1002 | let event = match parse_calendar_event(event_str) { | |
82c05b41 HL |
1003 | Ok(event) => event, |
1004 | Err(err) => { | |
1005 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
1006 | return false; | |
1007 | } | |
1008 | }; | |
1009 | ||
b15751bf | 1010 | let last = match jobstate::last_run_time(worker_type, &id) { |
82c05b41 HL |
1011 | Ok(time) => time, |
1012 | Err(err) => { | |
1013 | eprintln!("could not get last run time of {} {}: {}", worker_type, id, err); | |
1014 | return false; | |
1015 | } | |
1016 | }; | |
1017 | ||
1018 | let next = match compute_next_event(&event, last, false) { | |
1019 | Ok(Some(next)) => next, | |
1020 | Ok(None) => return false, | |
1021 | Err(err) => { | |
1022 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
1023 | return false; | |
1024 | } | |
1025 | }; | |
1026 | ||
6ef1b649 | 1027 | let now = proxmox_time::epoch_i64(); |
82c05b41 HL |
1028 | next <= now |
1029 | } | |
1030 | ||
1d44f175 | 1031 | fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) { |
91e5bb49 | 1032 | |
934f5bb8 | 1033 | match proxmox_backup::tools::disks::disk_usage(path) { |
33070956 | 1034 | Ok(status) => { |
91e5bb49 | 1035 | let rrd_key = format!("{}/total", rrd_prefix); |
1d44f175 | 1036 | rrd_update_gauge(&rrd_key, status.total as f64); |
91e5bb49 | 1037 | let rrd_key = format!("{}/used", rrd_prefix); |
1d44f175 | 1038 | rrd_update_gauge(&rrd_key, status.used as f64); |
91e5bb49 DM |
1039 | } |
1040 | Err(err) => { | |
1041 | eprintln!("read disk_usage on {:?} failed - {}", path, err); | |
1042 | } | |
1043 | } | |
1044 | ||
934f5bb8 DM |
1045 | match disk_manager.find_mounted_device(path) { |
1046 | Ok(None) => {}, | |
1047 | Ok(Some((fs_type, device, source))) => { | |
1048 | let mut device_stat = None; | |
1049 | match fs_type.as_str() { | |
1050 | "zfs" => { | |
368f4c54 DC |
1051 | if let Some(source) = source { |
1052 | let pool = get_pool_from_dataset(&source).unwrap_or(&source); | |
1053 | match zfs_pool_stats(pool) { | |
934f5bb8 DM |
1054 | Ok(stat) => device_stat = stat, |
1055 | Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err), | |
91e5bb49 DM |
1056 | } |
1057 | } | |
934f5bb8 DM |
1058 | } |
1059 | _ => { | |
1060 | if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) { | |
1061 | match disk.read_stat() { | |
1062 | Ok(stat) => device_stat = stat, | |
1063 | Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err), | |
91e5bb49 DM |
1064 | } |
1065 | } | |
1066 | } | |
91e5bb49 | 1067 | } |
934f5bb8 DM |
1068 | if let Some(stat) = device_stat { |
1069 | let rrd_key = format!("{}/read_ios", rrd_prefix); | |
1d44f175 | 1070 | rrd_update_derive(&rrd_key, stat.read_ios as f64); |
934f5bb8 | 1071 | let rrd_key = format!("{}/read_bytes", rrd_prefix); |
1d44f175 | 1072 | rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64); |
dd15c0aa | 1073 | |
934f5bb8 | 1074 | let rrd_key = format!("{}/write_ios", rrd_prefix); |
1d44f175 | 1075 | rrd_update_derive(&rrd_key, stat.write_ios as f64); |
934f5bb8 | 1076 | let rrd_key = format!("{}/write_bytes", rrd_prefix); |
1d44f175 | 1077 | rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64); |
dd15c0aa | 1078 | |
934f5bb8 | 1079 | let rrd_key = format!("{}/io_ticks", rrd_prefix); |
1d44f175 | 1080 | rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0); |
8c03041a DM |
1081 | } |
1082 | } | |
934f5bb8 DM |
1083 | Err(err) => { |
1084 | eprintln!("find_mounted_device failed - {}", err); | |
1085 | } | |
8c03041a | 1086 | } |
8c03041a | 1087 | } |