]>
Commit | Line | Data |
---|---|---|
4ce7da51 | 1 | use std::sync::{Mutex, Arc}; |
2ab5acac | 2 | use std::path::{Path, PathBuf}; |
97168f92 | 3 | use std::os::unix::io::AsRawFd; |
a2479cfa | 4 | |
f7d4e4b5 | 5 | use anyhow::{bail, format_err, Error}; |
a2479cfa | 6 | use futures::*; |
ea368a06 | 7 | |
a2479cfa | 8 | use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype}; |
7c667013 | 9 | use tokio_stream::wrappers::ReceiverStream; |
a723c087 | 10 | use serde_json::Value; |
a2479cfa | 11 | |
9ea4bce4 | 12 | use proxmox::try_block; |
a2479cfa | 13 | use proxmox::api::RpcEnvironmentType; |
32413921 | 14 | use proxmox::sys::linux::socket::set_tcp_keepalive; |
a2479cfa | 15 | |
1298618a DM |
16 | use proxmox_backup::{ |
17 | backup::DataStore, | |
18 | server::{ | |
26858dba | 19 | auth::default_api_auth, |
1298618a DM |
20 | WorkerTask, |
21 | ApiConfig, | |
22 | rest::*, | |
23 | jobstate::{ | |
24 | self, | |
25 | Job, | |
26 | }, | |
27 | rotate_task_log_archive, | |
28 | }, | |
1298618a DM |
29 | }; |
30 | ||
af06decd | 31 | use pbs_buildcfg::configdir; |
dd2162f6 | 32 | use pbs_systemd::time::{compute_next_event, parse_calendar_event}; |
6c76aa43 | 33 | use pbs_tools::logrotate::LogRotate; |
1298618a | 34 | |
e7d4be9d DM |
35 | use pbs_api_types::{Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig, DataStoreConfig}; |
36 | use pbs_datastore::prune::PruneOptions; | |
37 | ||
e3f41f21 | 38 | use proxmox_backup::server; |
d01e2420 | 39 | use proxmox_backup::auth_helpers::*; |
97168f92 | 40 | use proxmox_backup::tools::{ |
32413921 | 41 | PROXMOX_BACKUP_TCP_KEEPALIVE_TIME, |
e4f5f59e | 42 | daemon, |
97168f92 DM |
43 | disks::{ |
44 | DiskManage, | |
45 | zfs_pool_stats, | |
368f4c54 | 46 | get_pool_from_dataset, |
97168f92 | 47 | }, |
97168f92 | 48 | }; |
02c7a755 | 49 | |
e7d4be9d | 50 | |
a13573c2 | 51 | use proxmox_backup::api2::pull::do_sync_job; |
8513626b | 52 | use proxmox_backup::api2::tape::backup::do_tape_backup_job; |
1298618a | 53 | use proxmox_backup::server::do_verification_job; |
b8d90798 | 54 | use proxmox_backup::server::do_prune_job; |
a13573c2 | 55 | |
946c3e8a | 56 | fn main() -> Result<(), Error> { |
ac7513e3 DM |
57 | proxmox_backup::tools::setup_safe_path_env(); |
58 | ||
21211748 DM |
59 | let backup_uid = pbs_config::backup_user()?.uid; |
60 | let backup_gid = pbs_config::backup_group()?.gid; | |
843880f0 TL |
61 | let running_uid = nix::unistd::Uid::effective(); |
62 | let running_gid = nix::unistd::Gid::effective(); | |
63 | ||
64 | if running_uid != backup_uid || running_gid != backup_gid { | |
65 | bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid); | |
66 | } | |
67 | ||
d420962f | 68 | pbs_runtime::main(run()) |
4223d9f8 DM |
69 | } |
70 | ||
fda5797b | 71 | async fn run() -> Result<(), Error> { |
02c7a755 DM |
72 | if let Err(err) = syslog::init( |
73 | syslog::Facility::LOG_DAEMON, | |
74 | log::LevelFilter::Info, | |
75 | Some("proxmox-backup-proxy")) { | |
4223d9f8 | 76 | bail!("unable to inititialize syslog - {}", err); |
02c7a755 DM |
77 | } |
78 | ||
e1d367df DM |
79 | // Note: To debug early connection error use |
80 | // PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy | |
81 | let debug = std::env::var("PROXMOX_DEBUG").is_ok(); | |
82 | ||
d01e2420 DM |
83 | let _ = public_auth_key(); // load with lazy_static |
84 | let _ = csrf_secret(); // load with lazy_static | |
85 | ||
02c7a755 | 86 | let mut config = ApiConfig::new( |
af06decd | 87 | pbs_buildcfg::JS_DIR, |
26858dba SR |
88 | &proxmox_backup::api2::ROUTER, |
89 | RpcEnvironmentType::PUBLIC, | |
90 | default_api_auth(), | |
91 | )?; | |
02c7a755 | 92 | |
02c7a755 DM |
93 | config.add_alias("novnc", "/usr/share/novnc-pve"); |
94 | config.add_alias("extjs", "/usr/share/javascript/extjs"); | |
7f066a9b | 95 | config.add_alias("qrcodejs", "/usr/share/javascript/qrcodejs"); |
02c7a755 DM |
96 | config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); |
97 | config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); | |
abd4c4cb | 98 | config.add_alias("locale", "/usr/share/pbs-i18n"); |
02c7a755 | 99 | config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); |
9c01e73c | 100 | config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); |
02c7a755 | 101 | |
af06decd | 102 | let mut indexpath = PathBuf::from(pbs_buildcfg::JS_DIR); |
2ab5acac DC |
103 | indexpath.push("index.hbs"); |
104 | config.register_template("index", &indexpath)?; | |
01ca99da | 105 | config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?; |
2ab5acac | 106 | |
a68768cf TL |
107 | let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock()); |
108 | ||
af06decd | 109 | config.enable_file_log(pbs_buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?; |
8e7e2223 | 110 | |
02c7a755 DM |
111 | let rest_server = RestServer::new(config); |
112 | ||
6d1f61b2 | 113 | //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes |
6d1f61b2 | 114 | |
4ce7da51 | 115 | // we build the initial acceptor here as we cannot start if this fails |
c381a162 | 116 | let acceptor = make_tls_acceptor()?; |
4ce7da51 | 117 | let acceptor = Arc::new(Mutex::new(acceptor)); |
6d1f61b2 | 118 | |
4ce7da51 | 119 | // to renew the acceptor we just add a command-socket handler |
a723c087 WB |
120 | commando_sock.register_command( |
121 | "reload-certificate".to_string(), | |
122 | { | |
4ce7da51 | 123 | let acceptor = Arc::clone(&acceptor); |
a723c087 | 124 | move |_value| -> Result<_, Error> { |
4ce7da51 DM |
125 | log::info!("reloading certificate"); |
126 | match make_tls_acceptor() { | |
127 | Err(err) => log::error!("error reloading certificate: {}", err), | |
128 | Ok(new_acceptor) => { | |
129 | let mut guard = acceptor.lock().unwrap(); | |
130 | *guard = new_acceptor; | |
131 | } | |
132 | } | |
a723c087 WB |
133 | Ok(Value::Null) |
134 | } | |
135 | }, | |
136 | )?; | |
0d176f36 | 137 | |
062cf75c DC |
138 | // to remove references for not configured datastores |
139 | commando_sock.register_command( | |
140 | "datastore-removed".to_string(), | |
141 | |_value| { | |
142 | if let Err(err) = proxmox_backup::backup::DataStore::remove_unused_datastores() { | |
143 | log::error!("could not refresh datastores: {}", err); | |
144 | } | |
145 | Ok(Value::Null) | |
146 | } | |
147 | )?; | |
148 | ||
a690ecac WB |
149 | let server = daemon::create_daemon( |
150 | ([0,0,0,0,0,0,0,0], 8007).into(), | |
a723c087 | 151 | move |listener, ready| { |
97168f92 | 152 | |
4ce7da51 | 153 | let connections = accept_connections(listener, acceptor, debug); |
7c667013 | 154 | let connections = hyper::server::accept::from_stream(ReceiverStream::new(connections)); |
083ff3fd WB |
155 | |
156 | Ok(ready | |
48aa2b93 | 157 | .and_then(|_| hyper::Server::builder(connections) |
083ff3fd WB |
158 | .serve(rest_server) |
159 | .with_graceful_shutdown(server::shutdown_future()) | |
160 | .map_err(Error::from) | |
161 | ) | |
162 | .map_err(|err| eprintln!("server error: {}", err)) | |
163 | .map(|_| ()) | |
a690ecac | 164 | ) |
a2ca7137 | 165 | }, |
d7c6ad60 | 166 | "proxmox-backup-proxy.service", |
083ff3fd | 167 | ); |
a2ca7137 | 168 | |
af06decd | 169 | server::write_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?; |
d98c9a7a WB |
170 | daemon::systemd_notify(daemon::SystemdNotify::Ready)?; |
171 | ||
fda5797b | 172 | let init_result: Result<(), Error> = try_block!({ |
a68768cf TL |
173 | server::register_task_control_commands(&mut commando_sock)?; |
174 | commando_sock.spawn()?; | |
fda5797b WB |
175 | server::server_state_init()?; |
176 | Ok(()) | |
177 | }); | |
d607b886 | 178 | |
fda5797b WB |
179 | if let Err(err) = init_result { |
180 | bail!("unable to start daemon - {}", err); | |
181 | } | |
e3f41f21 | 182 | |
8545480a | 183 | start_task_scheduler(); |
eaeda365 | 184 | start_stat_generator(); |
8545480a | 185 | |
083ff3fd | 186 | server.await?; |
a546a8a0 WB |
187 | log::info!("server shutting down, waiting for active workers to complete"); |
188 | proxmox_backup::server::last_worker_future().await?; | |
fda5797b | 189 | log::info!("done - exit server"); |
e3f41f21 | 190 | |
4223d9f8 | 191 | Ok(()) |
02c7a755 | 192 | } |
8545480a | 193 | |
4ce7da51 | 194 | fn make_tls_acceptor() -> Result<SslAcceptor, Error> { |
c381a162 WB |
195 | let key_path = configdir!("/proxy.key"); |
196 | let cert_path = configdir!("/proxy.pem"); | |
197 | ||
198 | let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap(); | |
199 | acceptor.set_private_key_file(key_path, SslFiletype::PEM) | |
200 | .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?; | |
201 | acceptor.set_certificate_chain_file(cert_path) | |
202 | .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?; | |
203 | acceptor.check_private_key().unwrap(); | |
204 | ||
4ce7da51 | 205 | Ok(acceptor.build()) |
c381a162 WB |
206 | } |
207 | ||
a5e3be49 WB |
208 | type ClientStreamResult = |
209 | Result<std::pin::Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>, Error>; | |
210 | const MAX_PENDING_ACCEPTS: usize = 1024; | |
211 | ||
48aa2b93 | 212 | fn accept_connections( |
0bfcea6a | 213 | listener: tokio::net::TcpListener, |
4ce7da51 | 214 | acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>, |
e1d367df | 215 | debug: bool, |
a5e3be49 | 216 | ) -> tokio::sync::mpsc::Receiver<ClientStreamResult> { |
48aa2b93 | 217 | |
ea93bea7 | 218 | let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS); |
48aa2b93 | 219 | |
4ce7da51 | 220 | tokio::spawn(accept_connection(listener, acceptor, debug, sender)); |
a5e3be49 WB |
221 | |
222 | receiver | |
223 | } | |
224 | ||
225 | async fn accept_connection( | |
226 | listener: tokio::net::TcpListener, | |
4ce7da51 | 227 | acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>, |
a5e3be49 WB |
228 | debug: bool, |
229 | sender: tokio::sync::mpsc::Sender<ClientStreamResult>, | |
230 | ) { | |
ea93bea7 | 231 | let accept_counter = Arc::new(()); |
48aa2b93 | 232 | |
a5e3be49 | 233 | loop { |
4ce7da51 DM |
234 | let (sock, _addr) = match listener.accept().await { |
235 | Ok(conn) => conn, | |
236 | Err(err) => { | |
237 | eprintln!("error accepting tcp connection: {}", err); | |
cc269b9f | 238 | continue; |
a5e3be49 | 239 | } |
cc269b9f | 240 | }; |
48aa2b93 | 241 | |
cc269b9f WB |
242 | sock.set_nodelay(true).unwrap(); |
243 | let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME); | |
48aa2b93 | 244 | |
4ce7da51 DM |
245 | let ssl = { // limit acceptor_guard scope |
246 | // Acceptor can be reloaded using the command socket "reload-certificate" command | |
247 | let acceptor_guard = acceptor.lock().unwrap(); | |
248 | ||
249 | match openssl::ssl::Ssl::new(acceptor_guard.context()) { | |
250 | Ok(ssl) => ssl, | |
251 | Err(err) => { | |
252 | eprintln!("failed to create Ssl object from Acceptor context - {}", err); | |
253 | continue; | |
254 | }, | |
255 | } | |
cc269b9f | 256 | }; |
4ce7da51 | 257 | |
cc269b9f WB |
258 | let stream = match tokio_openssl::SslStream::new(ssl, sock) { |
259 | Ok(stream) => stream, | |
260 | Err(err) => { | |
261 | eprintln!("failed to create SslStream using ssl and connection socket - {}", err); | |
262 | continue; | |
263 | }, | |
264 | }; | |
265 | ||
266 | let mut stream = Box::pin(stream); | |
267 | let sender = sender.clone(); | |
268 | ||
269 | if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS { | |
270 | eprintln!("connection rejected - to many open connections"); | |
271 | continue; | |
48aa2b93 | 272 | } |
cc269b9f | 273 | |
b4931192 | 274 | let accept_counter = Arc::clone(&accept_counter); |
cc269b9f WB |
275 | tokio::spawn(async move { |
276 | let accept_future = tokio::time::timeout( | |
277 | Duration::new(10, 0), stream.as_mut().accept()); | |
278 | ||
279 | let result = accept_future.await; | |
280 | ||
281 | match result { | |
282 | Ok(Ok(())) => { | |
283 | if sender.send(Ok(stream)).await.is_err() && debug { | |
284 | eprintln!("detect closed connection channel"); | |
285 | } | |
286 | } | |
287 | Ok(Err(err)) => { | |
288 | if debug { | |
289 | eprintln!("https handshake failed - {}", err); | |
290 | } | |
291 | } | |
292 | Err(_) => { | |
293 | if debug { | |
294 | eprintln!("https handshake timeout"); | |
295 | } | |
296 | } | |
297 | } | |
298 | ||
299 | drop(accept_counter); // decrease reference count | |
300 | }); | |
a5e3be49 | 301 | } |
48aa2b93 DM |
302 | } |
303 | ||
eaeda365 DM |
304 | fn start_stat_generator() { |
305 | let abort_future = server::shutdown_future(); | |
306 | let future = Box::pin(run_stat_generator()); | |
307 | let task = futures::future::select(future, abort_future); | |
308 | tokio::spawn(task.map(|_| ())); | |
309 | } | |
310 | ||
8545480a DM |
311 | fn start_task_scheduler() { |
312 | let abort_future = server::shutdown_future(); | |
313 | let future = Box::pin(run_task_scheduler()); | |
314 | let task = futures::future::select(future, abort_future); | |
315 | tokio::spawn(task.map(|_| ())); | |
316 | } | |
317 | ||
6a7be83e | 318 | use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH}; |
8545480a DM |
319 | |
320 | fn next_minute() -> Result<Instant, Error> { | |
6a7be83e DM |
321 | let now = SystemTime::now(); |
322 | let epoch_now = now.duration_since(UNIX_EPOCH)?; | |
323 | let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); | |
8545480a DM |
324 | Ok(Instant::now() + epoch_next - epoch_now) |
325 | } | |
326 | ||
327 | async fn run_task_scheduler() { | |
328 | ||
329 | let mut count: usize = 0; | |
330 | ||
331 | loop { | |
332 | count += 1; | |
333 | ||
334 | let delay_target = match next_minute() { // try to run very minute | |
335 | Ok(d) => d, | |
336 | Err(err) => { | |
337 | eprintln!("task scheduler: compute next minute failed - {}", err); | |
0a8d773a | 338 | tokio::time::sleep_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await; |
8545480a DM |
339 | continue; |
340 | } | |
341 | }; | |
342 | ||
343 | if count > 2 { // wait 1..2 minutes before starting | |
344 | match schedule_tasks().catch_unwind().await { | |
345 | Err(panic) => { | |
346 | match panic.downcast::<&str>() { | |
347 | Ok(msg) => { | |
348 | eprintln!("task scheduler panic: {}", msg); | |
349 | } | |
350 | Err(_) => { | |
351 | eprintln!("task scheduler panic - unknown type"); | |
352 | } | |
353 | } | |
354 | } | |
355 | Ok(Err(err)) => { | |
356 | eprintln!("task scheduler failed - {:?}", err); | |
357 | } | |
358 | Ok(Ok(_)) => {} | |
359 | } | |
360 | } | |
361 | ||
0a8d773a | 362 | tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; |
8545480a DM |
363 | } |
364 | } | |
365 | ||
366 | async fn schedule_tasks() -> Result<(), Error> { | |
367 | ||
368 | schedule_datastore_garbage_collection().await; | |
25829a87 | 369 | schedule_datastore_prune().await; |
a6160cdf | 370 | schedule_datastore_sync_jobs().await; |
73df9c51 | 371 | schedule_datastore_verify_jobs().await; |
8513626b | 372 | schedule_tape_backup_jobs().await; |
9a760917 | 373 | schedule_task_log_rotate().await; |
8545480a DM |
374 | |
375 | Ok(()) | |
376 | } | |
377 | ||
8545480a DM |
378 | async fn schedule_datastore_garbage_collection() { |
379 | ||
e7d4be9d | 380 | let config = match pbs_config::datastore::config() { |
8545480a DM |
381 | Err(err) => { |
382 | eprintln!("unable to read datastore config - {}", err); | |
383 | return; | |
384 | } | |
385 | Ok((config, _digest)) => config, | |
386 | }; | |
387 | ||
388 | for (store, (_, store_config)) in config.sections { | |
389 | let datastore = match DataStore::lookup_datastore(&store) { | |
390 | Ok(datastore) => datastore, | |
391 | Err(err) => { | |
392 | eprintln!("lookup_datastore failed - {}", err); | |
393 | continue; | |
394 | } | |
395 | }; | |
396 | ||
25829a87 | 397 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { |
8545480a DM |
398 | Ok(c) => c, |
399 | Err(err) => { | |
400 | eprintln!("datastore config from_value failed - {}", err); | |
401 | continue; | |
402 | } | |
403 | }; | |
404 | ||
405 | let event_str = match store_config.gc_schedule { | |
406 | Some(event_str) => event_str, | |
407 | None => continue, | |
408 | }; | |
409 | ||
410 | let event = match parse_calendar_event(&event_str) { | |
411 | Ok(event) => event, | |
412 | Err(err) => { | |
413 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
414 | continue; | |
415 | } | |
416 | }; | |
417 | ||
418 | if datastore.garbage_collection_running() { continue; } | |
419 | ||
420 | let worker_type = "garbage_collection"; | |
421 | ||
b6ba5acd DC |
422 | let last = match jobstate::last_run_time(worker_type, &store) { |
423 | Ok(time) => time, | |
424 | Err(err) => { | |
425 | eprintln!("could not get last run time of {} {}: {}", worker_type, store, err); | |
426 | continue; | |
8545480a DM |
427 | } |
428 | }; | |
429 | ||
430 | let next = match compute_next_event(&event, last, false) { | |
15ec790a DC |
431 | Ok(Some(next)) => next, |
432 | Ok(None) => continue, | |
8545480a DM |
433 | Err(err) => { |
434 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
435 | continue; | |
436 | } | |
437 | }; | |
e693818a | 438 | |
6a7be83e DM |
439 | let now = proxmox::tools::time::epoch_i64(); |
440 | ||
8545480a DM |
441 | if next > now { continue; } |
442 | ||
1cd951c9 | 443 | let job = match Job::new(worker_type, &store) { |
d7a122a0 DC |
444 | Ok(job) => job, |
445 | Err(_) => continue, // could not get lock | |
446 | }; | |
447 | ||
ad54df31 | 448 | let auth_id = Authid::root_auth_id(); |
d7a122a0 | 449 | |
c724f658 | 450 | if let Err(err) = crate::server::do_garbage_collection_job(job, datastore, auth_id, Some(event_str), false) { |
3b707fbb | 451 | eprintln!("unable to start garbage collection job on datastore {} - {}", store, err); |
8545480a DM |
452 | } |
453 | } | |
454 | } | |
25829a87 DM |
455 | |
456 | async fn schedule_datastore_prune() { | |
457 | ||
e7d4be9d | 458 | let config = match pbs_config::datastore::config() { |
25829a87 DM |
459 | Err(err) => { |
460 | eprintln!("unable to read datastore config - {}", err); | |
461 | return; | |
462 | } | |
463 | Ok((config, _digest)) => config, | |
464 | }; | |
465 | ||
466 | for (store, (_, store_config)) in config.sections { | |
25829a87 DM |
467 | |
468 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { | |
469 | Ok(c) => c, | |
470 | Err(err) => { | |
a6160cdf | 471 | eprintln!("datastore '{}' config from_value failed - {}", store, err); |
25829a87 DM |
472 | continue; |
473 | } | |
474 | }; | |
475 | ||
476 | let event_str = match store_config.prune_schedule { | |
477 | Some(event_str) => event_str, | |
478 | None => continue, | |
479 | }; | |
480 | ||
481 | let prune_options = PruneOptions { | |
482 | keep_last: store_config.keep_last, | |
483 | keep_hourly: store_config.keep_hourly, | |
484 | keep_daily: store_config.keep_daily, | |
485 | keep_weekly: store_config.keep_weekly, | |
486 | keep_monthly: store_config.keep_monthly, | |
487 | keep_yearly: store_config.keep_yearly, | |
488 | }; | |
489 | ||
490 | if !prune_options.keeps_something() { // no prune settings - keep all | |
491 | continue; | |
492 | } | |
493 | ||
25829a87 | 494 | let worker_type = "prune"; |
b15751bf | 495 | if check_schedule(worker_type, &event_str, &store) { |
82c05b41 HL |
496 | let job = match Job::new(worker_type, &store) { |
497 | Ok(job) => job, | |
498 | Err(_) => continue, // could not get lock | |
499 | }; | |
25829a87 | 500 | |
ad54df31 | 501 | let auth_id = Authid::root_auth_id().clone(); |
82c05b41 HL |
502 | if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) { |
503 | eprintln!("unable to start datastore prune job {} - {}", &store, err); | |
25829a87 DM |
504 | } |
505 | }; | |
25829a87 DM |
506 | } |
507 | } | |
a6160cdf DM |
508 | |
509 | async fn schedule_datastore_sync_jobs() { | |
510 | ||
a6160cdf | 511 | |
a4e5a0fc | 512 | let config = match pbs_config::sync::config() { |
a6160cdf DM |
513 | Err(err) => { |
514 | eprintln!("unable to read sync job config - {}", err); | |
515 | return; | |
516 | } | |
517 | Ok((config, _digest)) => config, | |
518 | }; | |
519 | ||
a6160cdf DM |
520 | for (job_id, (_, job_config)) in config.sections { |
521 | let job_config: SyncJobConfig = match serde_json::from_value(job_config) { | |
522 | Ok(c) => c, | |
523 | Err(err) => { | |
524 | eprintln!("sync job config from_value failed - {}", err); | |
525 | continue; | |
526 | } | |
527 | }; | |
528 | ||
529 | let event_str = match job_config.schedule { | |
530 | Some(ref event_str) => event_str.clone(), | |
531 | None => continue, | |
532 | }; | |
533 | ||
c67b1fa7 | 534 | let worker_type = "syncjob"; |
b15751bf | 535 | if check_schedule(worker_type, &event_str, &job_id) { |
82c05b41 HL |
536 | let job = match Job::new(worker_type, &job_id) { |
537 | Ok(job) => job, | |
538 | Err(_) => continue, // could not get lock | |
539 | }; | |
a6160cdf | 540 | |
ad54df31 | 541 | let auth_id = Authid::root_auth_id().clone(); |
82c05b41 HL |
542 | if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) { |
543 | eprintln!("unable to start datastore sync job {} - {}", &job_id, err); | |
a6160cdf DM |
544 | } |
545 | }; | |
a6160cdf DM |
546 | } |
547 | } | |
eaeda365 | 548 | |
73df9c51 | 549 | async fn schedule_datastore_verify_jobs() { |
1298618a | 550 | |
802189f7 | 551 | let config = match pbs_config::verify::config() { |
73df9c51 HL |
552 | Err(err) => { |
553 | eprintln!("unable to read verification job config - {}", err); | |
554 | return; | |
555 | } | |
556 | Ok((config, _digest)) => config, | |
557 | }; | |
558 | for (job_id, (_, job_config)) in config.sections { | |
559 | let job_config: VerificationJobConfig = match serde_json::from_value(job_config) { | |
560 | Ok(c) => c, | |
561 | Err(err) => { | |
562 | eprintln!("verification job config from_value failed - {}", err); | |
563 | continue; | |
564 | } | |
565 | }; | |
566 | let event_str = match job_config.schedule { | |
567 | Some(ref event_str) => event_str.clone(), | |
568 | None => continue, | |
569 | }; | |
82c05b41 | 570 | |
73df9c51 | 571 | let worker_type = "verificationjob"; |
ad54df31 | 572 | let auth_id = Authid::root_auth_id().clone(); |
b15751bf | 573 | if check_schedule(worker_type, &event_str, &job_id) { |
82c05b41 HL |
574 | let job = match Job::new(&worker_type, &job_id) { |
575 | Ok(job) => job, | |
576 | Err(_) => continue, // could not get lock | |
577 | }; | |
578 | if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) { | |
579 | eprintln!("unable to start datastore verification job {} - {}", &job_id, err); | |
73df9c51 HL |
580 | } |
581 | }; | |
73df9c51 HL |
582 | } |
583 | } | |
584 | ||
8513626b DM |
585 | async fn schedule_tape_backup_jobs() { |
586 | ||
e3619d41 | 587 | let config = match pbs_config::tape_job::config() { |
8513626b DM |
588 | Err(err) => { |
589 | eprintln!("unable to read tape job config - {}", err); | |
590 | return; | |
591 | } | |
592 | Ok((config, _digest)) => config, | |
593 | }; | |
594 | for (job_id, (_, job_config)) in config.sections { | |
595 | let job_config: TapeBackupJobConfig = match serde_json::from_value(job_config) { | |
596 | Ok(c) => c, | |
597 | Err(err) => { | |
598 | eprintln!("tape backup job config from_value failed - {}", err); | |
599 | continue; | |
600 | } | |
601 | }; | |
602 | let event_str = match job_config.schedule { | |
603 | Some(ref event_str) => event_str.clone(), | |
604 | None => continue, | |
605 | }; | |
606 | ||
607 | let worker_type = "tape-backup-job"; | |
608 | let auth_id = Authid::root_auth_id().clone(); | |
609 | if check_schedule(worker_type, &event_str, &job_id) { | |
610 | let job = match Job::new(&worker_type, &job_id) { | |
611 | Ok(job) => job, | |
612 | Err(_) => continue, // could not get lock | |
613 | }; | |
5830e562 | 614 | if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str)) { |
7a61f89e | 615 | eprintln!("unable to start tape backup job {} - {}", &job_id, err); |
8513626b DM |
616 | } |
617 | }; | |
618 | } | |
619 | } | |
620 | ||
621 | ||
9a760917 | 622 | async fn schedule_task_log_rotate() { |
9a760917 DC |
623 | |
624 | let worker_type = "logrotate"; | |
72aa1834 | 625 | let job_id = "access-log_and_task-archive"; |
9a760917 | 626 | |
9a760917 DC |
627 | // schedule daily at 00:00 like normal logrotate |
628 | let schedule = "00:00"; | |
629 | ||
b15751bf | 630 | if !check_schedule(worker_type, schedule, job_id) { |
9a760917 DC |
631 | // if we never ran the rotation, schedule instantly |
632 | match jobstate::JobState::load(worker_type, job_id) { | |
633 | Ok(state) => match state { | |
634 | jobstate::JobState::Created { .. } => {}, | |
635 | _ => return, | |
636 | }, | |
637 | _ => return, | |
638 | } | |
639 | } | |
640 | ||
641 | let mut job = match Job::new(worker_type, job_id) { | |
642 | Ok(job) => job, | |
643 | Err(_) => return, // could not get lock | |
644 | }; | |
645 | ||
646 | if let Err(err) = WorkerTask::new_thread( | |
647 | worker_type, | |
72aa1834 | 648 | None, |
ad54df31 | 649 | Authid::root_auth_id().clone(), |
9a760917 DC |
650 | false, |
651 | move |worker| { | |
652 | job.start(&worker.upid().to_string())?; | |
3b82f3ee | 653 | worker.log("starting task log rotation".to_string()); |
e4f5f59e | 654 | |
9a760917 | 655 | let result = try_block!({ |
b7f2be51 TL |
656 | let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file |
657 | let max_files = 20; // times twenty files gives > 100000 task entries | |
9a760917 DC |
658 | let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?; |
659 | if has_rotated { | |
3b82f3ee | 660 | worker.log("task log archive was rotated".to_string()); |
9a760917 | 661 | } else { |
3b82f3ee | 662 | worker.log("task log archive was not rotated".to_string()); |
9a760917 DC |
663 | } |
664 | ||
fe4cc5b1 TL |
665 | let max_size = 32 * 1024 * 1024 - 1; |
666 | let max_files = 14; | |
af06decd | 667 | let mut logrotate = LogRotate::new(pbs_buildcfg::API_ACCESS_LOG_FN, true) |
fe4cc5b1 TL |
668 | .ok_or_else(|| format_err!("could not get API access log file names"))?; |
669 | ||
fe7bdc9d | 670 | if logrotate.rotate(max_size, None, Some(max_files))? { |
fe4cc5b1 | 671 | println!("rotated access log, telling daemons to re-open log file"); |
d420962f | 672 | pbs_runtime::block_on(command_reopen_logfiles())?; |
3b82f3ee | 673 | worker.log("API access log was rotated".to_string()); |
fe7bdc9d | 674 | } else { |
3b82f3ee | 675 | worker.log("API access log was not rotated".to_string()); |
fe7bdc9d TL |
676 | } |
677 | ||
af06decd | 678 | let mut logrotate = LogRotate::new(pbs_buildcfg::API_AUTH_LOG_FN, true) |
fe7bdc9d | 679 | .ok_or_else(|| format_err!("could not get API auth log file names"))?; |
fe4cc5b1 | 680 | |
fe7bdc9d | 681 | if logrotate.rotate(max_size, None, Some(max_files))? { |
3b82f3ee | 682 | worker.log("API authentication log was rotated".to_string()); |
fe4cc5b1 | 683 | } else { |
3b82f3ee | 684 | worker.log("API authentication log was not rotated".to_string()); |
fe4cc5b1 TL |
685 | } |
686 | ||
9a760917 DC |
687 | Ok(()) |
688 | }); | |
689 | ||
690 | let status = worker.create_state(&result); | |
691 | ||
692 | if let Err(err) = job.finish(status) { | |
693 | eprintln!("could not finish job state for {}: {}", worker_type, err); | |
694 | } | |
695 | ||
696 | result | |
697 | }, | |
698 | ) { | |
699 | eprintln!("unable to start task log rotation: {}", err); | |
700 | } | |
701 | ||
702 | } | |
703 | ||
fe4cc5b1 TL |
704 | async fn command_reopen_logfiles() -> Result<(), Error> { |
705 | // only care about the most recent daemon instance for each, proxy & api, as other older ones | |
706 | // should not respond to new requests anyway, but only finish their current one and then exit. | |
707 | let sock = server::our_ctrl_sock(); | |
45b8a032 | 708 | let f1 = server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n"); |
fe4cc5b1 | 709 | |
af06decd | 710 | let pid = server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?; |
fe4cc5b1 | 711 | let sock = server::ctrl_sock_from_pid(pid); |
45b8a032 | 712 | let f2 = server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n"); |
546b6a23 TL |
713 | |
714 | match futures::join!(f1, f2) { | |
715 | (Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)), | |
716 | (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)), | |
717 | (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)), | |
718 | _ => Ok(()), | |
719 | } | |
fe4cc5b1 TL |
720 | } |
721 | ||
eaeda365 DM |
722 | async fn run_stat_generator() { |
723 | ||
013fa7bb | 724 | let mut count = 0; |
eaeda365 | 725 | loop { |
013fa7bb | 726 | count += 1; |
a720894f | 727 | let save = if count >= 6 { count = 0; true } else { false }; |
013fa7bb | 728 | |
eaeda365 DM |
729 | let delay_target = Instant::now() + Duration::from_secs(10); |
730 | ||
013fa7bb | 731 | generate_host_stats(save).await; |
eaeda365 | 732 | |
0a8d773a | 733 | tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; |
013fa7bb DM |
734 | |
735 | } | |
eaeda365 DM |
736 | |
737 | } | |
738 | ||
013fa7bb | 739 | fn rrd_update_gauge(name: &str, value: f64, save: bool) { |
309ef20d | 740 | use proxmox_backup::rrd; |
013fa7bb | 741 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) { |
309ef20d DM |
742 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
743 | } | |
744 | } | |
745 | ||
013fa7bb | 746 | fn rrd_update_derive(name: &str, value: f64, save: bool) { |
309ef20d | 747 | use proxmox_backup::rrd; |
013fa7bb | 748 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) { |
309ef20d DM |
749 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
750 | } | |
751 | } | |
752 | ||
013fa7bb | 753 | async fn generate_host_stats(save: bool) { |
8f0cec26 | 754 | use proxmox::sys::linux::procfs::{ |
485841da | 755 | read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg}; |
eaeda365 | 756 | |
d420962f | 757 | pbs_runtime::block_in_place(move || { |
4f951399 DM |
758 | |
759 | match read_proc_stat() { | |
760 | Ok(stat) => { | |
013fa7bb DM |
761 | rrd_update_gauge("host/cpu", stat.cpu, save); |
762 | rrd_update_gauge("host/iowait", stat.iowait_percent, save); | |
4f951399 DM |
763 | } |
764 | Err(err) => { | |
765 | eprintln!("read_proc_stat failed - {}", err); | |
eaeda365 DM |
766 | } |
767 | } | |
2c66a590 | 768 | |
4f951399 DM |
769 | match read_meminfo() { |
770 | Ok(meminfo) => { | |
013fa7bb DM |
771 | rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save); |
772 | rrd_update_gauge("host/memused", meminfo.memused as f64, save); | |
773 | rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save); | |
774 | rrd_update_gauge("host/swapused", meminfo.swapused as f64, save); | |
a4a3f7ca | 775 | } |
4f951399 DM |
776 | Err(err) => { |
777 | eprintln!("read_meminfo failed - {}", err); | |
a4a3f7ca DM |
778 | } |
779 | } | |
8f0cec26 | 780 | |
4f951399 DM |
781 | match read_proc_net_dev() { |
782 | Ok(netdev) => { | |
6f422880 | 783 | use pbs_config::network::is_physical_nic; |
4f951399 DM |
784 | let mut netin = 0; |
785 | let mut netout = 0; | |
786 | for item in netdev { | |
787 | if !is_physical_nic(&item.device) { continue; } | |
788 | netin += item.receive; | |
789 | netout += item.send; | |
790 | } | |
013fa7bb DM |
791 | rrd_update_derive("host/netin", netin as f64, save); |
792 | rrd_update_derive("host/netout", netout as f64, save); | |
8f0cec26 | 793 | } |
4f951399 DM |
794 | Err(err) => { |
795 | eprintln!("read_prox_net_dev failed - {}", err); | |
8f0cec26 DM |
796 | } |
797 | } | |
dd15c0aa | 798 | |
485841da DM |
799 | match read_loadavg() { |
800 | Ok(loadavg) => { | |
013fa7bb | 801 | rrd_update_gauge("host/loadavg", loadavg.0 as f64, save); |
485841da DM |
802 | } |
803 | Err(err) => { | |
804 | eprintln!("read_loadavg failed - {}", err); | |
805 | } | |
806 | } | |
807 | ||
8c03041a DM |
808 | let disk_manager = DiskManage::new(); |
809 | ||
013fa7bb | 810 | gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save); |
91e5bb49 | 811 | |
e7d4be9d | 812 | match pbs_config::datastore::config() { |
d0833a70 | 813 | Ok((config, _)) => { |
e7d4be9d | 814 | let datastore_list: Vec<DataStoreConfig> = |
17c7b46a | 815 | config.convert_to_typed_array("datastore").unwrap_or_default(); |
d0833a70 DM |
816 | |
817 | for config in datastore_list { | |
8c03041a | 818 | |
91e5bb49 | 819 | let rrd_prefix = format!("datastore/{}", config.name); |
8c03041a | 820 | let path = std::path::Path::new(&config.path); |
013fa7bb | 821 | gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save); |
d0833a70 DM |
822 | } |
823 | } | |
824 | Err(err) => { | |
825 | eprintln!("read datastore config failed - {}", err); | |
826 | } | |
827 | } | |
828 | ||
4f951399 | 829 | }); |
eaeda365 | 830 | } |
dd15c0aa | 831 | |
b15751bf DM |
832 | fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool { |
833 | let event = match parse_calendar_event(event_str) { | |
82c05b41 HL |
834 | Ok(event) => event, |
835 | Err(err) => { | |
836 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
837 | return false; | |
838 | } | |
839 | }; | |
840 | ||
b15751bf | 841 | let last = match jobstate::last_run_time(worker_type, &id) { |
82c05b41 HL |
842 | Ok(time) => time, |
843 | Err(err) => { | |
844 | eprintln!("could not get last run time of {} {}: {}", worker_type, id, err); | |
845 | return false; | |
846 | } | |
847 | }; | |
848 | ||
849 | let next = match compute_next_event(&event, last, false) { | |
850 | Ok(Some(next)) => next, | |
851 | Ok(None) => return false, | |
852 | Err(err) => { | |
853 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
854 | return false; | |
855 | } | |
856 | }; | |
857 | ||
858 | let now = proxmox::tools::time::epoch_i64(); | |
859 | next <= now | |
860 | } | |
861 | ||
013fa7bb | 862 | fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) { |
91e5bb49 | 863 | |
934f5bb8 | 864 | match proxmox_backup::tools::disks::disk_usage(path) { |
33070956 | 865 | Ok(status) => { |
91e5bb49 | 866 | let rrd_key = format!("{}/total", rrd_prefix); |
33070956 | 867 | rrd_update_gauge(&rrd_key, status.total as f64, save); |
91e5bb49 | 868 | let rrd_key = format!("{}/used", rrd_prefix); |
33070956 | 869 | rrd_update_gauge(&rrd_key, status.used as f64, save); |
91e5bb49 DM |
870 | } |
871 | Err(err) => { | |
872 | eprintln!("read disk_usage on {:?} failed - {}", path, err); | |
873 | } | |
874 | } | |
875 | ||
934f5bb8 DM |
876 | match disk_manager.find_mounted_device(path) { |
877 | Ok(None) => {}, | |
878 | Ok(Some((fs_type, device, source))) => { | |
879 | let mut device_stat = None; | |
880 | match fs_type.as_str() { | |
881 | "zfs" => { | |
368f4c54 DC |
882 | if let Some(source) = source { |
883 | let pool = get_pool_from_dataset(&source).unwrap_or(&source); | |
884 | match zfs_pool_stats(pool) { | |
934f5bb8 DM |
885 | Ok(stat) => device_stat = stat, |
886 | Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err), | |
91e5bb49 DM |
887 | } |
888 | } | |
934f5bb8 DM |
889 | } |
890 | _ => { | |
891 | if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) { | |
892 | match disk.read_stat() { | |
893 | Ok(stat) => device_stat = stat, | |
894 | Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err), | |
91e5bb49 DM |
895 | } |
896 | } | |
897 | } | |
91e5bb49 | 898 | } |
934f5bb8 DM |
899 | if let Some(stat) = device_stat { |
900 | let rrd_key = format!("{}/read_ios", rrd_prefix); | |
901 | rrd_update_derive(&rrd_key, stat.read_ios as f64, save); | |
902 | let rrd_key = format!("{}/read_bytes", rrd_prefix); | |
903 | rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save); | |
dd15c0aa | 904 | |
934f5bb8 DM |
905 | let rrd_key = format!("{}/write_ios", rrd_prefix); |
906 | rrd_update_derive(&rrd_key, stat.write_ios as f64, save); | |
907 | let rrd_key = format!("{}/write_bytes", rrd_prefix); | |
908 | rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save); | |
dd15c0aa | 909 | |
934f5bb8 DM |
910 | let rrd_key = format!("{}/io_ticks", rrd_prefix); |
911 | rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save); | |
8c03041a DM |
912 | } |
913 | } | |
934f5bb8 DM |
914 | Err(err) => { |
915 | eprintln!("find_mounted_device failed - {}", err); | |
916 | } | |
8c03041a | 917 | } |
8c03041a | 918 | } |