]>
Commit | Line | Data |
---|---|---|
71f413cd | 1 | use std::sync::Arc; |
2ab5acac | 2 | use std::path::{Path, PathBuf}; |
97168f92 | 3 | use std::os::unix::io::AsRawFd; |
a2479cfa | 4 | |
f7d4e4b5 | 5 | use anyhow::{bail, format_err, Error}; |
a2479cfa | 6 | use futures::*; |
ea368a06 | 7 | |
a2479cfa | 8 | use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype}; |
7c667013 | 9 | use tokio_stream::wrappers::ReceiverStream; |
a2479cfa | 10 | |
9ea4bce4 | 11 | use proxmox::try_block; |
a2479cfa WB |
12 | use proxmox::api::RpcEnvironmentType; |
13 | ||
1298618a DM |
14 | use proxmox_backup::{ |
15 | backup::DataStore, | |
16 | server::{ | |
26858dba | 17 | auth::default_api_auth, |
1298618a DM |
18 | WorkerTask, |
19 | ApiConfig, | |
20 | rest::*, | |
21 | jobstate::{ | |
22 | self, | |
23 | Job, | |
24 | }, | |
25 | rotate_task_log_archive, | |
26 | }, | |
27 | tools::systemd::time::{ | |
28 | parse_calendar_event, | |
29 | compute_next_event, | |
30 | }, | |
31 | }; | |
32 | ||
33 | ||
1cd951c9 | 34 | use proxmox_backup::api2::types::Authid; |
a2ca7137 | 35 | use proxmox_backup::configdir; |
4a7de56e | 36 | use proxmox_backup::buildcfg; |
e3f41f21 | 37 | use proxmox_backup::server; |
d01e2420 | 38 | use proxmox_backup::auth_helpers::*; |
97168f92 | 39 | use proxmox_backup::tools::{ |
e4f5f59e | 40 | daemon, |
97168f92 DM |
41 | disks::{ |
42 | DiskManage, | |
43 | zfs_pool_stats, | |
368f4c54 | 44 | get_pool_from_dataset, |
97168f92 | 45 | }, |
fe4cc5b1 | 46 | logrotate::LogRotate, |
97168f92 DM |
47 | socket::{ |
48 | set_tcp_keepalive, | |
49 | PROXMOX_BACKUP_TCP_KEEPALIVE_TIME, | |
50 | }, | |
51 | }; | |
02c7a755 | 52 | |
a13573c2 | 53 | use proxmox_backup::api2::pull::do_sync_job; |
8513626b | 54 | use proxmox_backup::api2::tape::backup::do_tape_backup_job; |
1298618a | 55 | use proxmox_backup::server::do_verification_job; |
b8d90798 | 56 | use proxmox_backup::server::do_prune_job; |
a13573c2 | 57 | |
946c3e8a | 58 | fn main() -> Result<(), Error> { |
ac7513e3 DM |
59 | proxmox_backup::tools::setup_safe_path_env(); |
60 | ||
843880f0 TL |
61 | let backup_uid = proxmox_backup::backup::backup_user()?.uid; |
62 | let backup_gid = proxmox_backup::backup::backup_group()?.gid; | |
63 | let running_uid = nix::unistd::Uid::effective(); | |
64 | let running_gid = nix::unistd::Gid::effective(); | |
65 | ||
66 | if running_uid != backup_uid || running_gid != backup_gid { | |
67 | bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid); | |
68 | } | |
69 | ||
946c3e8a | 70 | proxmox_backup::tools::runtime::main(run()) |
4223d9f8 DM |
71 | } |
72 | ||
fda5797b | 73 | async fn run() -> Result<(), Error> { |
02c7a755 DM |
74 | if let Err(err) = syslog::init( |
75 | syslog::Facility::LOG_DAEMON, | |
76 | log::LevelFilter::Info, | |
77 | Some("proxmox-backup-proxy")) { | |
4223d9f8 | 78 | bail!("unable to inititialize syslog - {}", err); |
02c7a755 DM |
79 | } |
80 | ||
e1d367df DM |
81 | // Note: To debug early connection error use |
82 | // PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy | |
83 | let debug = std::env::var("PROXMOX_DEBUG").is_ok(); | |
84 | ||
d01e2420 DM |
85 | let _ = public_auth_key(); // load with lazy_static |
86 | let _ = csrf_secret(); // load with lazy_static | |
87 | ||
02c7a755 | 88 | let mut config = ApiConfig::new( |
26858dba SR |
89 | buildcfg::JS_DIR, |
90 | &proxmox_backup::api2::ROUTER, | |
91 | RpcEnvironmentType::PUBLIC, | |
92 | default_api_auth(), | |
93 | )?; | |
02c7a755 | 94 | |
02c7a755 DM |
95 | config.add_alias("novnc", "/usr/share/novnc-pve"); |
96 | config.add_alias("extjs", "/usr/share/javascript/extjs"); | |
7f066a9b | 97 | config.add_alias("qrcodejs", "/usr/share/javascript/qrcodejs"); |
02c7a755 DM |
98 | config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); |
99 | config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); | |
abd4c4cb | 100 | config.add_alias("locale", "/usr/share/pbs-i18n"); |
02c7a755 | 101 | config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); |
9c01e73c | 102 | config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); |
02c7a755 | 103 | |
2ab5acac DC |
104 | let mut indexpath = PathBuf::from(buildcfg::JS_DIR); |
105 | indexpath.push("index.hbs"); | |
106 | config.register_template("index", &indexpath)?; | |
01ca99da | 107 | config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?; |
2ab5acac | 108 | |
a68768cf TL |
109 | let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock()); |
110 | ||
fe4cc5b1 | 111 | config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?; |
8e7e2223 | 112 | |
02c7a755 DM |
113 | let rest_server = RestServer::new(config); |
114 | ||
6d1f61b2 DM |
115 | //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes |
116 | let key_path = configdir!("/proxy.key"); | |
117 | let cert_path = configdir!("/proxy.pem"); | |
118 | ||
62c74d77 | 119 | let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap(); |
6d1f61b2 DM |
120 | acceptor.set_private_key_file(key_path, SslFiletype::PEM) |
121 | .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?; | |
122 | acceptor.set_certificate_chain_file(cert_path) | |
123 | .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?; | |
124 | acceptor.check_private_key().unwrap(); | |
125 | ||
126 | let acceptor = Arc::new(acceptor.build()); | |
0d176f36 | 127 | |
a690ecac WB |
128 | let server = daemon::create_daemon( |
129 | ([0,0,0,0,0,0,0,0], 8007).into(), | |
083ff3fd | 130 | |listener, ready| { |
97168f92 | 131 | |
e1d367df | 132 | let connections = accept_connections(listener, acceptor, debug); |
7c667013 | 133 | let connections = hyper::server::accept::from_stream(ReceiverStream::new(connections)); |
083ff3fd WB |
134 | |
135 | Ok(ready | |
48aa2b93 | 136 | .and_then(|_| hyper::Server::builder(connections) |
083ff3fd WB |
137 | .serve(rest_server) |
138 | .with_graceful_shutdown(server::shutdown_future()) | |
139 | .map_err(Error::from) | |
140 | ) | |
141 | .map_err(|err| eprintln!("server error: {}", err)) | |
142 | .map(|_| ()) | |
a690ecac | 143 | ) |
a2ca7137 | 144 | }, |
d7c6ad60 | 145 | "proxmox-backup-proxy.service", |
083ff3fd | 146 | ); |
a2ca7137 | 147 | |
04b053d8 | 148 | server::write_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?; |
d98c9a7a WB |
149 | daemon::systemd_notify(daemon::SystemdNotify::Ready)?; |
150 | ||
fda5797b | 151 | let init_result: Result<(), Error> = try_block!({ |
a68768cf TL |
152 | server::register_task_control_commands(&mut commando_sock)?; |
153 | commando_sock.spawn()?; | |
fda5797b WB |
154 | server::server_state_init()?; |
155 | Ok(()) | |
156 | }); | |
d607b886 | 157 | |
fda5797b WB |
158 | if let Err(err) = init_result { |
159 | bail!("unable to start daemon - {}", err); | |
160 | } | |
e3f41f21 | 161 | |
8545480a | 162 | start_task_scheduler(); |
eaeda365 | 163 | start_stat_generator(); |
8545480a | 164 | |
083ff3fd | 165 | server.await?; |
a546a8a0 WB |
166 | log::info!("server shutting down, waiting for active workers to complete"); |
167 | proxmox_backup::server::last_worker_future().await?; | |
fda5797b | 168 | log::info!("done - exit server"); |
e3f41f21 | 169 | |
4223d9f8 | 170 | Ok(()) |
02c7a755 | 171 | } |
8545480a | 172 | |
48aa2b93 | 173 | fn accept_connections( |
0bfcea6a | 174 | listener: tokio::net::TcpListener, |
48aa2b93 | 175 | acceptor: Arc<openssl::ssl::SslAcceptor>, |
e1d367df | 176 | debug: bool, |
0f860f71 | 177 | ) -> tokio::sync::mpsc::Receiver<Result<std::pin::Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>, Error>> { |
48aa2b93 | 178 | |
ea93bea7 | 179 | const MAX_PENDING_ACCEPTS: usize = 1024; |
48aa2b93 | 180 | |
ea93bea7 | 181 | let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS); |
48aa2b93 | 182 | |
ea93bea7 | 183 | let accept_counter = Arc::new(()); |
48aa2b93 DM |
184 | |
185 | tokio::spawn(async move { | |
186 | loop { | |
187 | match listener.accept().await { | |
188 | Err(err) => { | |
189 | eprintln!("error accepting tcp connection: {}", err); | |
190 | } | |
191 | Ok((sock, _addr)) => { | |
192 | sock.set_nodelay(true).unwrap(); | |
193 | let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME); | |
194 | let acceptor = Arc::clone(&acceptor); | |
0f860f71 FG |
195 | |
196 | let ssl = match openssl::ssl::Ssl::new(acceptor.context()) { | |
197 | Ok(ssl) => ssl, | |
198 | Err(err) => { | |
199 | eprintln!("failed to create Ssl object from Acceptor context - {}", err); | |
200 | continue; | |
201 | }, | |
202 | }; | |
203 | let stream = match tokio_openssl::SslStream::new(ssl, sock) { | |
204 | Ok(stream) => stream, | |
205 | Err(err) => { | |
206 | eprintln!("failed to create SslStream using ssl and connection socket - {}", err); | |
207 | continue; | |
208 | }, | |
209 | }; | |
210 | ||
211 | let mut stream = Box::pin(stream); | |
212 | let sender = sender.clone(); | |
48aa2b93 | 213 | |
71f413cd | 214 | if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS { |
ea93bea7 | 215 | eprintln!("connection rejected - to many open connections"); |
48aa2b93 DM |
216 | continue; |
217 | } | |
48aa2b93 DM |
218 | |
219 | let accept_counter = accept_counter.clone(); | |
220 | tokio::spawn(async move { | |
221 | let accept_future = tokio::time::timeout( | |
0f860f71 | 222 | Duration::new(10, 0), stream.as_mut().accept()); |
48aa2b93 DM |
223 | |
224 | let result = accept_future.await; | |
225 | ||
226 | match result { | |
0f860f71 | 227 | Ok(Ok(())) => { |
3984a5fd FG |
228 | if sender.send(Ok(stream)).await.is_err() && debug { |
229 | eprintln!("detect closed connection channel"); | |
48aa2b93 DM |
230 | } |
231 | } | |
232 | Ok(Err(err)) => { | |
e1d367df DM |
233 | if debug { |
234 | eprintln!("https handshake failed - {}", err); | |
235 | } | |
48aa2b93 DM |
236 | } |
237 | Err(_) => { | |
e1d367df DM |
238 | if debug { |
239 | eprintln!("https handshake timeout"); | |
240 | } | |
48aa2b93 DM |
241 | } |
242 | } | |
243 | ||
71f413cd | 244 | drop(accept_counter); // decrease reference count |
48aa2b93 DM |
245 | }); |
246 | } | |
247 | } | |
248 | } | |
249 | }); | |
250 | ||
251 | receiver | |
252 | } | |
253 | ||
eaeda365 DM |
254 | fn start_stat_generator() { |
255 | let abort_future = server::shutdown_future(); | |
256 | let future = Box::pin(run_stat_generator()); | |
257 | let task = futures::future::select(future, abort_future); | |
258 | tokio::spawn(task.map(|_| ())); | |
259 | } | |
260 | ||
8545480a DM |
261 | fn start_task_scheduler() { |
262 | let abort_future = server::shutdown_future(); | |
263 | let future = Box::pin(run_task_scheduler()); | |
264 | let task = futures::future::select(future, abort_future); | |
265 | tokio::spawn(task.map(|_| ())); | |
266 | } | |
267 | ||
6a7be83e | 268 | use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH}; |
8545480a DM |
269 | |
270 | fn next_minute() -> Result<Instant, Error> { | |
6a7be83e DM |
271 | let now = SystemTime::now(); |
272 | let epoch_now = now.duration_since(UNIX_EPOCH)?; | |
273 | let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); | |
8545480a DM |
274 | Ok(Instant::now() + epoch_next - epoch_now) |
275 | } | |
276 | ||
277 | async fn run_task_scheduler() { | |
278 | ||
279 | let mut count: usize = 0; | |
280 | ||
281 | loop { | |
282 | count += 1; | |
283 | ||
284 | let delay_target = match next_minute() { // try to run very minute | |
285 | Ok(d) => d, | |
286 | Err(err) => { | |
287 | eprintln!("task scheduler: compute next minute failed - {}", err); | |
0a8d773a | 288 | tokio::time::sleep_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await; |
8545480a DM |
289 | continue; |
290 | } | |
291 | }; | |
292 | ||
293 | if count > 2 { // wait 1..2 minutes before starting | |
294 | match schedule_tasks().catch_unwind().await { | |
295 | Err(panic) => { | |
296 | match panic.downcast::<&str>() { | |
297 | Ok(msg) => { | |
298 | eprintln!("task scheduler panic: {}", msg); | |
299 | } | |
300 | Err(_) => { | |
301 | eprintln!("task scheduler panic - unknown type"); | |
302 | } | |
303 | } | |
304 | } | |
305 | Ok(Err(err)) => { | |
306 | eprintln!("task scheduler failed - {:?}", err); | |
307 | } | |
308 | Ok(Ok(_)) => {} | |
309 | } | |
310 | } | |
311 | ||
0a8d773a | 312 | tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; |
8545480a DM |
313 | } |
314 | } | |
315 | ||
316 | async fn schedule_tasks() -> Result<(), Error> { | |
317 | ||
318 | schedule_datastore_garbage_collection().await; | |
25829a87 | 319 | schedule_datastore_prune().await; |
a6160cdf | 320 | schedule_datastore_sync_jobs().await; |
73df9c51 | 321 | schedule_datastore_verify_jobs().await; |
8513626b | 322 | schedule_tape_backup_jobs().await; |
9a760917 | 323 | schedule_task_log_rotate().await; |
8545480a DM |
324 | |
325 | Ok(()) | |
326 | } | |
327 | ||
8545480a DM |
328 | async fn schedule_datastore_garbage_collection() { |
329 | ||
b9e7bcc2 DM |
330 | use proxmox_backup::config::{ |
331 | datastore::{ | |
332 | self, | |
333 | DataStoreConfig, | |
334 | }, | |
d7a122a0 | 335 | }; |
8545480a | 336 | |
25829a87 | 337 | let config = match datastore::config() { |
8545480a DM |
338 | Err(err) => { |
339 | eprintln!("unable to read datastore config - {}", err); | |
340 | return; | |
341 | } | |
342 | Ok((config, _digest)) => config, | |
343 | }; | |
344 | ||
345 | for (store, (_, store_config)) in config.sections { | |
346 | let datastore = match DataStore::lookup_datastore(&store) { | |
347 | Ok(datastore) => datastore, | |
348 | Err(err) => { | |
349 | eprintln!("lookup_datastore failed - {}", err); | |
350 | continue; | |
351 | } | |
352 | }; | |
353 | ||
25829a87 | 354 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { |
8545480a DM |
355 | Ok(c) => c, |
356 | Err(err) => { | |
357 | eprintln!("datastore config from_value failed - {}", err); | |
358 | continue; | |
359 | } | |
360 | }; | |
361 | ||
362 | let event_str = match store_config.gc_schedule { | |
363 | Some(event_str) => event_str, | |
364 | None => continue, | |
365 | }; | |
366 | ||
367 | let event = match parse_calendar_event(&event_str) { | |
368 | Ok(event) => event, | |
369 | Err(err) => { | |
370 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
371 | continue; | |
372 | } | |
373 | }; | |
374 | ||
375 | if datastore.garbage_collection_running() { continue; } | |
376 | ||
377 | let worker_type = "garbage_collection"; | |
378 | ||
b6ba5acd DC |
379 | let last = match jobstate::last_run_time(worker_type, &store) { |
380 | Ok(time) => time, | |
381 | Err(err) => { | |
382 | eprintln!("could not get last run time of {} {}: {}", worker_type, store, err); | |
383 | continue; | |
8545480a DM |
384 | } |
385 | }; | |
386 | ||
387 | let next = match compute_next_event(&event, last, false) { | |
15ec790a DC |
388 | Ok(Some(next)) => next, |
389 | Ok(None) => continue, | |
8545480a DM |
390 | Err(err) => { |
391 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
392 | continue; | |
393 | } | |
394 | }; | |
e693818a | 395 | |
6a7be83e DM |
396 | let now = proxmox::tools::time::epoch_i64(); |
397 | ||
8545480a DM |
398 | if next > now { continue; } |
399 | ||
1cd951c9 | 400 | let job = match Job::new(worker_type, &store) { |
d7a122a0 DC |
401 | Ok(job) => job, |
402 | Err(_) => continue, // could not get lock | |
403 | }; | |
404 | ||
ad54df31 | 405 | let auth_id = Authid::root_auth_id(); |
d7a122a0 | 406 | |
c724f658 | 407 | if let Err(err) = crate::server::do_garbage_collection_job(job, datastore, auth_id, Some(event_str), false) { |
3b707fbb | 408 | eprintln!("unable to start garbage collection job on datastore {} - {}", store, err); |
8545480a DM |
409 | } |
410 | } | |
411 | } | |
25829a87 DM |
412 | |
413 | async fn schedule_datastore_prune() { | |
414 | ||
1298618a DM |
415 | use proxmox_backup::{ |
416 | backup::{ | |
417 | PruneOptions, | |
1298618a DM |
418 | }, |
419 | config::datastore::{ | |
420 | self, | |
421 | DataStoreConfig, | |
422 | }, | |
9866de5e | 423 | }; |
25829a87 DM |
424 | |
425 | let config = match datastore::config() { | |
426 | Err(err) => { | |
427 | eprintln!("unable to read datastore config - {}", err); | |
428 | return; | |
429 | } | |
430 | Ok((config, _digest)) => config, | |
431 | }; | |
432 | ||
433 | for (store, (_, store_config)) in config.sections { | |
25829a87 DM |
434 | |
435 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { | |
436 | Ok(c) => c, | |
437 | Err(err) => { | |
a6160cdf | 438 | eprintln!("datastore '{}' config from_value failed - {}", store, err); |
25829a87 DM |
439 | continue; |
440 | } | |
441 | }; | |
442 | ||
443 | let event_str = match store_config.prune_schedule { | |
444 | Some(event_str) => event_str, | |
445 | None => continue, | |
446 | }; | |
447 | ||
448 | let prune_options = PruneOptions { | |
449 | keep_last: store_config.keep_last, | |
450 | keep_hourly: store_config.keep_hourly, | |
451 | keep_daily: store_config.keep_daily, | |
452 | keep_weekly: store_config.keep_weekly, | |
453 | keep_monthly: store_config.keep_monthly, | |
454 | keep_yearly: store_config.keep_yearly, | |
455 | }; | |
456 | ||
457 | if !prune_options.keeps_something() { // no prune settings - keep all | |
458 | continue; | |
459 | } | |
460 | ||
25829a87 | 461 | let worker_type = "prune"; |
b15751bf | 462 | if check_schedule(worker_type, &event_str, &store) { |
82c05b41 HL |
463 | let job = match Job::new(worker_type, &store) { |
464 | Ok(job) => job, | |
465 | Err(_) => continue, // could not get lock | |
466 | }; | |
25829a87 | 467 | |
ad54df31 | 468 | let auth_id = Authid::root_auth_id().clone(); |
82c05b41 HL |
469 | if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) { |
470 | eprintln!("unable to start datastore prune job {} - {}", &store, err); | |
25829a87 DM |
471 | } |
472 | }; | |
25829a87 DM |
473 | } |
474 | } | |
a6160cdf DM |
475 | |
476 | async fn schedule_datastore_sync_jobs() { | |
477 | ||
1298618a DM |
478 | use proxmox_backup::config::sync::{ |
479 | self, | |
480 | SyncJobConfig, | |
a6160cdf DM |
481 | }; |
482 | ||
483 | let config = match sync::config() { | |
484 | Err(err) => { | |
485 | eprintln!("unable to read sync job config - {}", err); | |
486 | return; | |
487 | } | |
488 | Ok((config, _digest)) => config, | |
489 | }; | |
490 | ||
a6160cdf DM |
491 | for (job_id, (_, job_config)) in config.sections { |
492 | let job_config: SyncJobConfig = match serde_json::from_value(job_config) { | |
493 | Ok(c) => c, | |
494 | Err(err) => { | |
495 | eprintln!("sync job config from_value failed - {}", err); | |
496 | continue; | |
497 | } | |
498 | }; | |
499 | ||
500 | let event_str = match job_config.schedule { | |
501 | Some(ref event_str) => event_str.clone(), | |
502 | None => continue, | |
503 | }; | |
504 | ||
c67b1fa7 | 505 | let worker_type = "syncjob"; |
b15751bf | 506 | if check_schedule(worker_type, &event_str, &job_id) { |
82c05b41 HL |
507 | let job = match Job::new(worker_type, &job_id) { |
508 | Ok(job) => job, | |
509 | Err(_) => continue, // could not get lock | |
510 | }; | |
a6160cdf | 511 | |
ad54df31 | 512 | let auth_id = Authid::root_auth_id().clone(); |
82c05b41 HL |
513 | if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) { |
514 | eprintln!("unable to start datastore sync job {} - {}", &job_id, err); | |
a6160cdf DM |
515 | } |
516 | }; | |
a6160cdf DM |
517 | } |
518 | } | |
eaeda365 | 519 | |
73df9c51 | 520 | async fn schedule_datastore_verify_jobs() { |
1298618a DM |
521 | |
522 | use proxmox_backup::config::verify::{ | |
523 | self, | |
524 | VerificationJobConfig, | |
73df9c51 | 525 | }; |
1298618a | 526 | |
73df9c51 HL |
527 | let config = match verify::config() { |
528 | Err(err) => { | |
529 | eprintln!("unable to read verification job config - {}", err); | |
530 | return; | |
531 | } | |
532 | Ok((config, _digest)) => config, | |
533 | }; | |
534 | for (job_id, (_, job_config)) in config.sections { | |
535 | let job_config: VerificationJobConfig = match serde_json::from_value(job_config) { | |
536 | Ok(c) => c, | |
537 | Err(err) => { | |
538 | eprintln!("verification job config from_value failed - {}", err); | |
539 | continue; | |
540 | } | |
541 | }; | |
542 | let event_str = match job_config.schedule { | |
543 | Some(ref event_str) => event_str.clone(), | |
544 | None => continue, | |
545 | }; | |
82c05b41 | 546 | |
73df9c51 | 547 | let worker_type = "verificationjob"; |
ad54df31 | 548 | let auth_id = Authid::root_auth_id().clone(); |
b15751bf | 549 | if check_schedule(worker_type, &event_str, &job_id) { |
82c05b41 HL |
550 | let job = match Job::new(&worker_type, &job_id) { |
551 | Ok(job) => job, | |
552 | Err(_) => continue, // could not get lock | |
553 | }; | |
554 | if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) { | |
555 | eprintln!("unable to start datastore verification job {} - {}", &job_id, err); | |
73df9c51 HL |
556 | } |
557 | }; | |
73df9c51 HL |
558 | } |
559 | } | |
560 | ||
8513626b DM |
561 | async fn schedule_tape_backup_jobs() { |
562 | ||
563 | use proxmox_backup::config::tape_job::{ | |
564 | self, | |
565 | TapeBackupJobConfig, | |
566 | }; | |
567 | ||
568 | let config = match tape_job::config() { | |
569 | Err(err) => { | |
570 | eprintln!("unable to read tape job config - {}", err); | |
571 | return; | |
572 | } | |
573 | Ok((config, _digest)) => config, | |
574 | }; | |
575 | for (job_id, (_, job_config)) in config.sections { | |
576 | let job_config: TapeBackupJobConfig = match serde_json::from_value(job_config) { | |
577 | Ok(c) => c, | |
578 | Err(err) => { | |
579 | eprintln!("tape backup job config from_value failed - {}", err); | |
580 | continue; | |
581 | } | |
582 | }; | |
583 | let event_str = match job_config.schedule { | |
584 | Some(ref event_str) => event_str.clone(), | |
585 | None => continue, | |
586 | }; | |
587 | ||
588 | let worker_type = "tape-backup-job"; | |
589 | let auth_id = Authid::root_auth_id().clone(); | |
590 | if check_schedule(worker_type, &event_str, &job_id) { | |
591 | let job = match Job::new(&worker_type, &job_id) { | |
592 | Ok(job) => job, | |
593 | Err(_) => continue, // could not get lock | |
594 | }; | |
5830e562 | 595 | if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str)) { |
8513626b DM |
596 | eprintln!("unable to start tape bvackup job {} - {}", &job_id, err); |
597 | } | |
598 | }; | |
599 | } | |
600 | } | |
601 | ||
602 | ||
9a760917 | 603 | async fn schedule_task_log_rotate() { |
9a760917 DC |
604 | |
605 | let worker_type = "logrotate"; | |
72aa1834 | 606 | let job_id = "access-log_and_task-archive"; |
9a760917 | 607 | |
9a760917 DC |
608 | // schedule daily at 00:00 like normal logrotate |
609 | let schedule = "00:00"; | |
610 | ||
b15751bf | 611 | if !check_schedule(worker_type, schedule, job_id) { |
9a760917 DC |
612 | // if we never ran the rotation, schedule instantly |
613 | match jobstate::JobState::load(worker_type, job_id) { | |
614 | Ok(state) => match state { | |
615 | jobstate::JobState::Created { .. } => {}, | |
616 | _ => return, | |
617 | }, | |
618 | _ => return, | |
619 | } | |
620 | } | |
621 | ||
622 | let mut job = match Job::new(worker_type, job_id) { | |
623 | Ok(job) => job, | |
624 | Err(_) => return, // could not get lock | |
625 | }; | |
626 | ||
627 | if let Err(err) = WorkerTask::new_thread( | |
628 | worker_type, | |
72aa1834 | 629 | None, |
ad54df31 | 630 | Authid::root_auth_id().clone(), |
9a760917 DC |
631 | false, |
632 | move |worker| { | |
633 | job.start(&worker.upid().to_string())?; | |
3b82f3ee | 634 | worker.log("starting task log rotation".to_string()); |
e4f5f59e | 635 | |
9a760917 | 636 | let result = try_block!({ |
b7f2be51 TL |
637 | let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file |
638 | let max_files = 20; // times twenty files gives > 100000 task entries | |
9a760917 DC |
639 | let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?; |
640 | if has_rotated { | |
3b82f3ee | 641 | worker.log("task log archive was rotated".to_string()); |
9a760917 | 642 | } else { |
3b82f3ee | 643 | worker.log("task log archive was not rotated".to_string()); |
9a760917 DC |
644 | } |
645 | ||
fe4cc5b1 TL |
646 | let max_size = 32 * 1024 * 1024 - 1; |
647 | let max_files = 14; | |
648 | let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true) | |
649 | .ok_or_else(|| format_err!("could not get API access log file names"))?; | |
650 | ||
fe7bdc9d | 651 | if logrotate.rotate(max_size, None, Some(max_files))? { |
fe4cc5b1 TL |
652 | println!("rotated access log, telling daemons to re-open log file"); |
653 | proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?; | |
3b82f3ee | 654 | worker.log("API access log was rotated".to_string()); |
fe7bdc9d | 655 | } else { |
3b82f3ee | 656 | worker.log("API access log was not rotated".to_string()); |
fe7bdc9d TL |
657 | } |
658 | ||
659 | let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true) | |
660 | .ok_or_else(|| format_err!("could not get API auth log file names"))?; | |
fe4cc5b1 | 661 | |
fe7bdc9d | 662 | if logrotate.rotate(max_size, None, Some(max_files))? { |
3b82f3ee | 663 | worker.log("API authentication log was rotated".to_string()); |
fe4cc5b1 | 664 | } else { |
3b82f3ee | 665 | worker.log("API authentication log was not rotated".to_string()); |
fe4cc5b1 TL |
666 | } |
667 | ||
9a760917 DC |
668 | Ok(()) |
669 | }); | |
670 | ||
671 | let status = worker.create_state(&result); | |
672 | ||
673 | if let Err(err) = job.finish(status) { | |
674 | eprintln!("could not finish job state for {}: {}", worker_type, err); | |
675 | } | |
676 | ||
677 | result | |
678 | }, | |
679 | ) { | |
680 | eprintln!("unable to start task log rotation: {}", err); | |
681 | } | |
682 | ||
683 | } | |
684 | ||
fe4cc5b1 TL |
685 | async fn command_reopen_logfiles() -> Result<(), Error> { |
686 | // only care about the most recent daemon instance for each, proxy & api, as other older ones | |
687 | // should not respond to new requests anyway, but only finish their current one and then exit. | |
688 | let sock = server::our_ctrl_sock(); | |
546b6a23 | 689 | let f1 = server::send_command(sock, serde_json::json!({ |
fe4cc5b1 | 690 | "command": "api-access-log-reopen", |
546b6a23 | 691 | })); |
fe4cc5b1 TL |
692 | |
693 | let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?; | |
694 | let sock = server::ctrl_sock_from_pid(pid); | |
546b6a23 | 695 | let f2 = server::send_command(sock, serde_json::json!({ |
fe4cc5b1 | 696 | "command": "api-access-log-reopen", |
546b6a23 TL |
697 | })); |
698 | ||
699 | match futures::join!(f1, f2) { | |
700 | (Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)), | |
701 | (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)), | |
702 | (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)), | |
703 | _ => Ok(()), | |
704 | } | |
fe4cc5b1 TL |
705 | } |
706 | ||
eaeda365 DM |
707 | async fn run_stat_generator() { |
708 | ||
013fa7bb | 709 | let mut count = 0; |
eaeda365 | 710 | loop { |
013fa7bb | 711 | count += 1; |
a720894f | 712 | let save = if count >= 6 { count = 0; true } else { false }; |
013fa7bb | 713 | |
eaeda365 DM |
714 | let delay_target = Instant::now() + Duration::from_secs(10); |
715 | ||
013fa7bb | 716 | generate_host_stats(save).await; |
eaeda365 | 717 | |
0a8d773a | 718 | tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; |
013fa7bb DM |
719 | |
720 | } | |
eaeda365 DM |
721 | |
722 | } | |
723 | ||
013fa7bb | 724 | fn rrd_update_gauge(name: &str, value: f64, save: bool) { |
309ef20d | 725 | use proxmox_backup::rrd; |
013fa7bb | 726 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) { |
309ef20d DM |
727 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
728 | } | |
729 | } | |
730 | ||
013fa7bb | 731 | fn rrd_update_derive(name: &str, value: f64, save: bool) { |
309ef20d | 732 | use proxmox_backup::rrd; |
013fa7bb | 733 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) { |
309ef20d DM |
734 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
735 | } | |
736 | } | |
737 | ||
013fa7bb | 738 | async fn generate_host_stats(save: bool) { |
8f0cec26 | 739 | use proxmox::sys::linux::procfs::{ |
485841da | 740 | read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg}; |
309ef20d | 741 | use proxmox_backup::config::datastore; |
8c03041a | 742 | |
eaeda365 | 743 | |
4f951399 DM |
744 | proxmox_backup::tools::runtime::block_in_place(move || { |
745 | ||
746 | match read_proc_stat() { | |
747 | Ok(stat) => { | |
013fa7bb DM |
748 | rrd_update_gauge("host/cpu", stat.cpu, save); |
749 | rrd_update_gauge("host/iowait", stat.iowait_percent, save); | |
4f951399 DM |
750 | } |
751 | Err(err) => { | |
752 | eprintln!("read_proc_stat failed - {}", err); | |
eaeda365 DM |
753 | } |
754 | } | |
2c66a590 | 755 | |
4f951399 DM |
756 | match read_meminfo() { |
757 | Ok(meminfo) => { | |
013fa7bb DM |
758 | rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save); |
759 | rrd_update_gauge("host/memused", meminfo.memused as f64, save); | |
760 | rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save); | |
761 | rrd_update_gauge("host/swapused", meminfo.swapused as f64, save); | |
a4a3f7ca | 762 | } |
4f951399 DM |
763 | Err(err) => { |
764 | eprintln!("read_meminfo failed - {}", err); | |
a4a3f7ca DM |
765 | } |
766 | } | |
8f0cec26 | 767 | |
4f951399 DM |
768 | match read_proc_net_dev() { |
769 | Ok(netdev) => { | |
770 | use proxmox_backup::config::network::is_physical_nic; | |
771 | let mut netin = 0; | |
772 | let mut netout = 0; | |
773 | for item in netdev { | |
774 | if !is_physical_nic(&item.device) { continue; } | |
775 | netin += item.receive; | |
776 | netout += item.send; | |
777 | } | |
013fa7bb DM |
778 | rrd_update_derive("host/netin", netin as f64, save); |
779 | rrd_update_derive("host/netout", netout as f64, save); | |
8f0cec26 | 780 | } |
4f951399 DM |
781 | Err(err) => { |
782 | eprintln!("read_prox_net_dev failed - {}", err); | |
8f0cec26 DM |
783 | } |
784 | } | |
dd15c0aa | 785 | |
485841da DM |
786 | match read_loadavg() { |
787 | Ok(loadavg) => { | |
013fa7bb | 788 | rrd_update_gauge("host/loadavg", loadavg.0 as f64, save); |
485841da DM |
789 | } |
790 | Err(err) => { | |
791 | eprintln!("read_loadavg failed - {}", err); | |
792 | } | |
793 | } | |
794 | ||
8c03041a DM |
795 | let disk_manager = DiskManage::new(); |
796 | ||
013fa7bb | 797 | gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save); |
91e5bb49 | 798 | |
d0833a70 DM |
799 | match datastore::config() { |
800 | Ok((config, _)) => { | |
801 | let datastore_list: Vec<datastore::DataStoreConfig> = | |
17c7b46a | 802 | config.convert_to_typed_array("datastore").unwrap_or_default(); |
d0833a70 DM |
803 | |
804 | for config in datastore_list { | |
8c03041a | 805 | |
91e5bb49 | 806 | let rrd_prefix = format!("datastore/{}", config.name); |
8c03041a | 807 | let path = std::path::Path::new(&config.path); |
013fa7bb | 808 | gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save); |
d0833a70 DM |
809 | } |
810 | } | |
811 | Err(err) => { | |
812 | eprintln!("read datastore config failed - {}", err); | |
813 | } | |
814 | } | |
815 | ||
4f951399 | 816 | }); |
eaeda365 | 817 | } |
dd15c0aa | 818 | |
b15751bf DM |
819 | fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool { |
820 | let event = match parse_calendar_event(event_str) { | |
82c05b41 HL |
821 | Ok(event) => event, |
822 | Err(err) => { | |
823 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
824 | return false; | |
825 | } | |
826 | }; | |
827 | ||
b15751bf | 828 | let last = match jobstate::last_run_time(worker_type, &id) { |
82c05b41 HL |
829 | Ok(time) => time, |
830 | Err(err) => { | |
831 | eprintln!("could not get last run time of {} {}: {}", worker_type, id, err); | |
832 | return false; | |
833 | } | |
834 | }; | |
835 | ||
836 | let next = match compute_next_event(&event, last, false) { | |
837 | Ok(Some(next)) => next, | |
838 | Ok(None) => return false, | |
839 | Err(err) => { | |
840 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
841 | return false; | |
842 | } | |
843 | }; | |
844 | ||
845 | let now = proxmox::tools::time::epoch_i64(); | |
846 | next <= now | |
847 | } | |
848 | ||
013fa7bb | 849 | fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) { |
91e5bb49 | 850 | |
934f5bb8 | 851 | match proxmox_backup::tools::disks::disk_usage(path) { |
33070956 | 852 | Ok(status) => { |
91e5bb49 | 853 | let rrd_key = format!("{}/total", rrd_prefix); |
33070956 | 854 | rrd_update_gauge(&rrd_key, status.total as f64, save); |
91e5bb49 | 855 | let rrd_key = format!("{}/used", rrd_prefix); |
33070956 | 856 | rrd_update_gauge(&rrd_key, status.used as f64, save); |
91e5bb49 DM |
857 | } |
858 | Err(err) => { | |
859 | eprintln!("read disk_usage on {:?} failed - {}", path, err); | |
860 | } | |
861 | } | |
862 | ||
934f5bb8 DM |
863 | match disk_manager.find_mounted_device(path) { |
864 | Ok(None) => {}, | |
865 | Ok(Some((fs_type, device, source))) => { | |
866 | let mut device_stat = None; | |
867 | match fs_type.as_str() { | |
868 | "zfs" => { | |
368f4c54 DC |
869 | if let Some(source) = source { |
870 | let pool = get_pool_from_dataset(&source).unwrap_or(&source); | |
871 | match zfs_pool_stats(pool) { | |
934f5bb8 DM |
872 | Ok(stat) => device_stat = stat, |
873 | Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err), | |
91e5bb49 DM |
874 | } |
875 | } | |
934f5bb8 DM |
876 | } |
877 | _ => { | |
878 | if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) { | |
879 | match disk.read_stat() { | |
880 | Ok(stat) => device_stat = stat, | |
881 | Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err), | |
91e5bb49 DM |
882 | } |
883 | } | |
884 | } | |
91e5bb49 | 885 | } |
934f5bb8 DM |
886 | if let Some(stat) = device_stat { |
887 | let rrd_key = format!("{}/read_ios", rrd_prefix); | |
888 | rrd_update_derive(&rrd_key, stat.read_ios as f64, save); | |
889 | let rrd_key = format!("{}/read_bytes", rrd_prefix); | |
890 | rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save); | |
dd15c0aa | 891 | |
934f5bb8 DM |
892 | let rrd_key = format!("{}/write_ios", rrd_prefix); |
893 | rrd_update_derive(&rrd_key, stat.write_ios as f64, save); | |
894 | let rrd_key = format!("{}/write_bytes", rrd_prefix); | |
895 | rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save); | |
dd15c0aa | 896 | |
934f5bb8 DM |
897 | let rrd_key = format!("{}/io_ticks", rrd_prefix); |
898 | rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save); | |
8c03041a DM |
899 | } |
900 | } | |
934f5bb8 DM |
901 | Err(err) => { |
902 | eprintln!("find_mounted_device failed - {}", err); | |
903 | } | |
8c03041a | 904 | } |
8c03041a | 905 | } |