]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-proxy.rs
api2: add verification admin endpoint and do_verification_job function
[proxmox-backup.git] / src / bin / proxmox-backup-proxy.rs
CommitLineData
c040ec22 1use std::sync::{Arc};
2ab5acac 2use std::path::{Path, PathBuf};
97168f92 3use std::os::unix::io::AsRawFd;
a2479cfa 4
f7d4e4b5 5use anyhow::{bail, format_err, Error};
a2479cfa
WB
6use futures::*;
7use hyper;
8use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
9
9ea4bce4 10use proxmox::try_block;
a2479cfa
WB
11use proxmox::api::RpcEnvironmentType;
12
e7cb4dc5 13use proxmox_backup::api2::types::Userid;
a2ca7137 14use proxmox_backup::configdir;
4a7de56e 15use proxmox_backup::buildcfg;
e3f41f21 16use proxmox_backup::server;
e57e1cd8 17use proxmox_backup::server::{ApiConfig, rest::*};
d01e2420 18use proxmox_backup::auth_helpers::*;
97168f92 19use proxmox_backup::tools::{
e4f5f59e 20 daemon,
97168f92
DM
21 disks::{
22 DiskManage,
23 zfs_pool_stats,
24 },
25 socket::{
26 set_tcp_keepalive,
27 PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
28 },
29};
02c7a755 30
a13573c2
DC
31use proxmox_backup::api2::pull::do_sync_job;
32
946c3e8a 33fn main() -> Result<(), Error> {
ac7513e3
DM
34 proxmox_backup::tools::setup_safe_path_env();
35
843880f0
TL
36 let backup_uid = proxmox_backup::backup::backup_user()?.uid;
37 let backup_gid = proxmox_backup::backup::backup_group()?.gid;
38 let running_uid = nix::unistd::Uid::effective();
39 let running_gid = nix::unistd::Gid::effective();
40
41 if running_uid != backup_uid || running_gid != backup_gid {
42 bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
43 }
44
946c3e8a 45 proxmox_backup::tools::runtime::main(run())
4223d9f8
DM
46}
47
fda5797b 48async fn run() -> Result<(), Error> {
02c7a755
DM
49 if let Err(err) = syslog::init(
50 syslog::Facility::LOG_DAEMON,
51 log::LevelFilter::Info,
52 Some("proxmox-backup-proxy")) {
4223d9f8 53 bail!("unable to inititialize syslog - {}", err);
02c7a755
DM
54 }
55
d01e2420
DM
56 let _ = public_auth_key(); // load with lazy_static
57 let _ = csrf_secret(); // load with lazy_static
58
02c7a755 59 let mut config = ApiConfig::new(
f9e3b110 60 buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
02c7a755 61
02c7a755
DM
62 config.add_alias("novnc", "/usr/share/novnc-pve");
63 config.add_alias("extjs", "/usr/share/javascript/extjs");
64 config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
65 config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
abd4c4cb 66 config.add_alias("locale", "/usr/share/pbs-i18n");
02c7a755 67 config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
2d694f8f 68 config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
9c01e73c 69 config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
02c7a755 70
2ab5acac
DC
71 let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
72 indexpath.push("index.hbs");
73 config.register_template("index", &indexpath)?;
01ca99da 74 config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
2ab5acac 75
8e7e2223
TL
76 config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
77
02c7a755
DM
78 let rest_server = RestServer::new(config);
79
6d1f61b2
DM
80 //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
81 let key_path = configdir!("/proxy.key");
82 let cert_path = configdir!("/proxy.pem");
83
62c74d77 84 let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
6d1f61b2
DM
85 acceptor.set_private_key_file(key_path, SslFiletype::PEM)
86 .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
87 acceptor.set_certificate_chain_file(cert_path)
88 .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
89 acceptor.check_private_key().unwrap();
90
91 let acceptor = Arc::new(acceptor.build());
0d176f36 92
a690ecac
WB
93 let server = daemon::create_daemon(
94 ([0,0,0,0,0,0,0,0], 8007).into(),
083ff3fd 95 |listener, ready| {
db0cb9ce 96 let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener)
a690ecac 97 .map_err(Error::from)
db0cb9ce 98 .try_filter_map(move |(sock, _addr)| {
fda5797b
WB
99 let acceptor = Arc::clone(&acceptor);
100 async move {
101 sock.set_nodelay(true).unwrap();
97168f92
DM
102
103 let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
104
fda5797b
WB
105 Ok(tokio_openssl::accept(&acceptor, sock)
106 .await
107 .ok() // handshake errors aren't be fatal, so return None to filter
108 )
a690ecac 109 }
a690ecac 110 });
db0cb9ce 111 let connections = proxmox_backup::tools::async_io::HyperAccept(connections);
083ff3fd
WB
112
113 Ok(ready
114 .and_then(|_| hyper::Server::builder(connections)
115 .serve(rest_server)
116 .with_graceful_shutdown(server::shutdown_future())
117 .map_err(Error::from)
118 )
119 .map_err(|err| eprintln!("server error: {}", err))
120 .map(|_| ())
a690ecac 121 )
a2ca7137 122 },
083ff3fd 123 );
a2ca7137 124
d98c9a7a
WB
125 daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
126
fda5797b
WB
127 let init_result: Result<(), Error> = try_block!({
128 server::create_task_control_socket()?;
129 server::server_state_init()?;
130 Ok(())
131 });
d607b886 132
fda5797b
WB
133 if let Err(err) = init_result {
134 bail!("unable to start daemon - {}", err);
135 }
e3f41f21 136
8545480a 137 start_task_scheduler();
eaeda365 138 start_stat_generator();
8545480a 139
083ff3fd 140 server.await?;
a546a8a0
WB
141 log::info!("server shutting down, waiting for active workers to complete");
142 proxmox_backup::server::last_worker_future().await?;
fda5797b 143 log::info!("done - exit server");
e3f41f21 144
4223d9f8 145 Ok(())
02c7a755 146}
8545480a 147
eaeda365
DM
148fn start_stat_generator() {
149 let abort_future = server::shutdown_future();
150 let future = Box::pin(run_stat_generator());
151 let task = futures::future::select(future, abort_future);
152 tokio::spawn(task.map(|_| ()));
153}
154
8545480a
DM
155fn start_task_scheduler() {
156 let abort_future = server::shutdown_future();
157 let future = Box::pin(run_task_scheduler());
158 let task = futures::future::select(future, abort_future);
159 tokio::spawn(task.map(|_| ()));
160}
161
6a7be83e 162use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
8545480a
DM
163
164fn next_minute() -> Result<Instant, Error> {
6a7be83e
DM
165 let now = SystemTime::now();
166 let epoch_now = now.duration_since(UNIX_EPOCH)?;
167 let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
8545480a
DM
168 Ok(Instant::now() + epoch_next - epoch_now)
169}
170
171async fn run_task_scheduler() {
172
173 let mut count: usize = 0;
174
175 loop {
176 count += 1;
177
178 let delay_target = match next_minute() { // try to run very minute
179 Ok(d) => d,
180 Err(err) => {
181 eprintln!("task scheduler: compute next minute failed - {}", err);
182 tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
183 continue;
184 }
185 };
186
187 if count > 2 { // wait 1..2 minutes before starting
188 match schedule_tasks().catch_unwind().await {
189 Err(panic) => {
190 match panic.downcast::<&str>() {
191 Ok(msg) => {
192 eprintln!("task scheduler panic: {}", msg);
193 }
194 Err(_) => {
195 eprintln!("task scheduler panic - unknown type");
196 }
197 }
198 }
199 Ok(Err(err)) => {
200 eprintln!("task scheduler failed - {:?}", err);
201 }
202 Ok(Ok(_)) => {}
203 }
204 }
205
206 tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
207 }
208}
209
210async fn schedule_tasks() -> Result<(), Error> {
211
212 schedule_datastore_garbage_collection().await;
25829a87 213 schedule_datastore_prune().await;
c040ec22 214 schedule_datastore_verification().await;
a6160cdf 215 schedule_datastore_sync_jobs().await;
9a760917 216 schedule_task_log_rotate().await;
8545480a
DM
217
218 Ok(())
219}
220
8545480a
DM
221async fn schedule_datastore_garbage_collection() {
222
223 use proxmox_backup::backup::DataStore;
224 use proxmox_backup::server::{UPID, WorkerTask};
d7a122a0
DC
225 use proxmox_backup::config::{
226 jobstate::{self, Job},
227 datastore::{self, DataStoreConfig}
228 };
8545480a
DM
229 use proxmox_backup::tools::systemd::time::{
230 parse_calendar_event, compute_next_event};
231
25829a87 232 let config = match datastore::config() {
8545480a
DM
233 Err(err) => {
234 eprintln!("unable to read datastore config - {}", err);
235 return;
236 }
237 Ok((config, _digest)) => config,
238 };
239
240 for (store, (_, store_config)) in config.sections {
241 let datastore = match DataStore::lookup_datastore(&store) {
242 Ok(datastore) => datastore,
243 Err(err) => {
244 eprintln!("lookup_datastore failed - {}", err);
245 continue;
246 }
247 };
248
25829a87 249 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
8545480a
DM
250 Ok(c) => c,
251 Err(err) => {
252 eprintln!("datastore config from_value failed - {}", err);
253 continue;
254 }
255 };
256
257 let event_str = match store_config.gc_schedule {
258 Some(event_str) => event_str,
259 None => continue,
260 };
261
262 let event = match parse_calendar_event(&event_str) {
263 Ok(event) => event,
264 Err(err) => {
265 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
266 continue;
267 }
268 };
269
270 if datastore.garbage_collection_running() { continue; }
271
272 let worker_type = "garbage_collection";
273
274 let stat = datastore.last_gc_status();
275 let last = if let Some(upid_str) = stat.upid {
276 match upid_str.parse::<UPID>() {
277 Ok(upid) => upid.starttime,
278 Err(err) => {
279 eprintln!("unable to parse upid '{}' - {}", upid_str, err);
280 continue;
281 }
282 }
283 } else {
d7a122a0
DC
284 match jobstate::last_run_time(worker_type, &store) {
285 Ok(time) => time,
8545480a 286 Err(err) => {
d7a122a0 287 eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
8545480a
DM
288 continue;
289 }
290 }
291 };
292
293 let next = match compute_next_event(&event, last, false) {
15ec790a
DC
294 Ok(Some(next)) => next,
295 Ok(None) => continue,
8545480a
DM
296 Err(err) => {
297 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
298 continue;
299 }
300 };
e693818a 301
6a7be83e
DM
302 let now = proxmox::tools::time::epoch_i64();
303
8545480a
DM
304 if next > now { continue; }
305
d7a122a0
DC
306 let mut job = match Job::new(worker_type, &store) {
307 Ok(job) => job,
308 Err(_) => continue, // could not get lock
309 };
310
8545480a
DM
311 let store2 = store.clone();
312
313 if let Err(err) = WorkerTask::new_thread(
314 worker_type,
315 Some(store.clone()),
e7cb4dc5 316 Userid::backup_userid().clone(),
8545480a
DM
317 false,
318 move |worker| {
d7a122a0
DC
319 job.start(&worker.upid().to_string())?;
320
8545480a
DM
321 worker.log(format!("starting garbage collection on store {}", store));
322 worker.log(format!("task triggered by schedule '{}'", event_str));
d7a122a0 323
f6b1d1cc 324 let result = datastore.garbage_collection(&*worker, worker.upid());
d7a122a0
DC
325
326 let status = worker.create_state(&result);
327
328 if let Err(err) = job.finish(status) {
329 eprintln!("could not finish job state for {}: {}", worker_type, err);
330 }
331
332 result
8545480a
DM
333 }
334 ) {
335 eprintln!("unable to start garbage collection on store {} - {}", store2, err);
336 }
337 }
338}
25829a87
DM
339
340async fn schedule_datastore_prune() {
341
342 use proxmox_backup::backup::{
6a7be83e 343 PruneOptions, DataStore, BackupGroup, compute_prune_info};
25829a87 344 use proxmox_backup::server::{WorkerTask};
9866de5e
DC
345 use proxmox_backup::config::{
346 jobstate::{self, Job},
347 datastore::{self, DataStoreConfig}
348 };
25829a87
DM
349 use proxmox_backup::tools::systemd::time::{
350 parse_calendar_event, compute_next_event};
351
352 let config = match datastore::config() {
353 Err(err) => {
354 eprintln!("unable to read datastore config - {}", err);
355 return;
356 }
357 Ok((config, _digest)) => config,
358 };
359
360 for (store, (_, store_config)) in config.sections {
361 let datastore = match DataStore::lookup_datastore(&store) {
362 Ok(datastore) => datastore,
363 Err(err) => {
a6160cdf 364 eprintln!("lookup_datastore '{}' failed - {}", store, err);
25829a87
DM
365 continue;
366 }
367 };
368
369 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
370 Ok(c) => c,
371 Err(err) => {
a6160cdf 372 eprintln!("datastore '{}' config from_value failed - {}", store, err);
25829a87
DM
373 continue;
374 }
375 };
376
377 let event_str = match store_config.prune_schedule {
378 Some(event_str) => event_str,
379 None => continue,
380 };
381
382 let prune_options = PruneOptions {
383 keep_last: store_config.keep_last,
384 keep_hourly: store_config.keep_hourly,
385 keep_daily: store_config.keep_daily,
386 keep_weekly: store_config.keep_weekly,
387 keep_monthly: store_config.keep_monthly,
388 keep_yearly: store_config.keep_yearly,
389 };
390
391 if !prune_options.keeps_something() { // no prune settings - keep all
392 continue;
393 }
394
395 let event = match parse_calendar_event(&event_str) {
396 Ok(event) => event,
397 Err(err) => {
398 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
399 continue;
400 }
401 };
402
25829a87
DM
403 let worker_type = "prune";
404
9866de5e
DC
405 let last = match jobstate::last_run_time(worker_type, &store) {
406 Ok(time) => time,
25829a87 407 Err(err) => {
9866de5e 408 eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
25829a87
DM
409 continue;
410 }
411 };
412
413 let next = match compute_next_event(&event, last, false) {
15ec790a
DC
414 Ok(Some(next)) => next,
415 Ok(None) => continue,
25829a87
DM
416 Err(err) => {
417 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
418 continue;
419 }
420 };
421
6a7be83e
DM
422 let now = proxmox::tools::time::epoch_i64();
423
25829a87
DM
424 if next > now { continue; }
425
9866de5e
DC
426 let mut job = match Job::new(worker_type, &store) {
427 Ok(job) => job,
428 Err(_) => continue, // could not get lock
429 };
430
25829a87
DM
431 let store2 = store.clone();
432
433 if let Err(err) = WorkerTask::new_thread(
434 worker_type,
435 Some(store.clone()),
e7cb4dc5 436 Userid::backup_userid().clone(),
25829a87
DM
437 false,
438 move |worker| {
9866de5e
DC
439
440 job.start(&worker.upid().to_string())?;
441
6c25588e 442 let result = try_block!({
9866de5e
DC
443
444 worker.log(format!("Starting datastore prune on store \"{}\"", store));
445 worker.log(format!("task triggered by schedule '{}'", event_str));
446 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
447
448 let base_path = datastore.base_path();
449
450 let groups = BackupGroup::list_groups(&base_path)?;
451 for group in groups {
452 let list = group.list_backups(&base_path)?;
453 let mut prune_info = compute_prune_info(list, &prune_options)?;
454 prune_info.reverse(); // delete older snapshots first
455
456 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
457 store, group.backup_type(), group.backup_id()));
458
459 for (info, keep) in prune_info {
460 worker.log(format!(
461 "{} {}/{}/{}",
462 if keep { "keep" } else { "remove" },
463 group.backup_type(), group.backup_id(),
464 info.backup_dir.backup_time_string()));
465 if !keep {
466 datastore.remove_backup_dir(&info.backup_dir, true)?;
467 }
25829a87
DM
468 }
469 }
9866de5e 470 Ok(())
6c25588e 471 });
9866de5e
DC
472
473 let status = worker.create_state(&result);
474
475 if let Err(err) = job.finish(status) {
476 eprintln!("could not finish job state for {}: {}", worker_type, err);
25829a87
DM
477 }
478
9866de5e 479 result
25829a87
DM
480 }
481 ) {
482 eprintln!("unable to start datastore prune on store {} - {}", store2, err);
483 }
484 }
485}
a6160cdf 486
c040ec22
HL
487async fn schedule_datastore_verification() {
488 use proxmox_backup::backup::{DataStore, verify_all_backups};
489 use proxmox_backup::server::{WorkerTask};
d7a122a0
DC
490 use proxmox_backup::config::{
491 jobstate::{self, Job},
492 datastore::{self, DataStoreConfig}
493 };
c040ec22
HL
494 use proxmox_backup::tools::systemd::time::{
495 parse_calendar_event, compute_next_event};
496
497 let config = match datastore::config() {
498 Err(err) => {
499 eprintln!("unable to read datastore config - {}", err);
500 return;
501 }
502 Ok((config, _digest)) => config,
503 };
504
505 for (store, (_, store_config)) in config.sections {
506 let datastore = match DataStore::lookup_datastore(&store) {
507 Ok(datastore) => datastore,
508 Err(err) => {
509 eprintln!("lookup_datastore failed - {}", err);
510 continue;
511 }
512 };
513
514 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
515 Ok(c) => c,
516 Err(err) => {
517 eprintln!("datastore config from_value failed - {}", err);
518 continue;
519 }
520 };
521
522 let event_str = match store_config.verify_schedule {
523 Some(event_str) => event_str,
524 None => continue,
525 };
526
527 let event = match parse_calendar_event(&event_str) {
528 Ok(event) => event,
529 Err(err) => {
530 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
531 continue;
532 }
533 };
534
535 let worker_type = "verify";
536
d7a122a0
DC
537 let last = match jobstate::last_run_time(worker_type, &store) {
538 Ok(time) => time,
c040ec22 539 Err(err) => {
d7a122a0 540 eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
c040ec22
HL
541 continue;
542 }
543 };
544
545 let next = match compute_next_event(&event, last, false) {
546 Ok(Some(next)) => next,
547 Ok(None) => continue,
548 Err(err) => {
549 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
550 continue;
551 }
552 };
553
554 let now = proxmox::tools::time::epoch_i64();
555
556 if next > now { continue; }
557
d7a122a0
DC
558 let mut job = match Job::new(worker_type, &store) {
559 Ok(job) => job,
560 Err(_) => continue, // could not get lock
561 };
562
c040ec22
HL
563 let worker_id = store.clone();
564 let store2 = store.clone();
565 if let Err(err) = WorkerTask::new_thread(
566 worker_type,
567 Some(worker_id),
568 Userid::backup_userid().clone(),
569 false,
570 move |worker| {
d7a122a0 571 job.start(&worker.upid().to_string())?;
c040ec22
HL
572 worker.log(format!("starting verification on store {}", store2));
573 worker.log(format!("task triggered by schedule '{}'", event_str));
d7a122a0 574 let result = try_block!({
f6b1d1cc
WB
575 let failed_dirs =
576 verify_all_backups(datastore, worker.clone(), worker.upid())?;
c040ec22
HL
577 if failed_dirs.len() > 0 {
578 worker.log("Failed to verify following snapshots:");
579 for dir in failed_dirs {
580 worker.log(format!("\t{}", dir));
581 }
d7a122a0
DC
582 Err(format_err!("verification failed - please check the log for details"))
583 } else {
584 Ok(())
c040ec22 585 }
d7a122a0
DC
586 });
587
588 let status = worker.create_state(&result);
589
590 if let Err(err) = job.finish(status) {
591 eprintln!("could not finish job state for {}: {}", worker_type, err);
c040ec22 592 }
d7a122a0
DC
593
594 result
c040ec22
HL
595 },
596 ) {
597 eprintln!("unable to start verification on store {} - {}", store, err);
598 }
599 }
600}
601
a6160cdf
DM
602async fn schedule_datastore_sync_jobs() {
603
604 use proxmox_backup::{
a13573c2 605 config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
a6160cdf
DM
606 tools::systemd::time::{ parse_calendar_event, compute_next_event },
607 };
608
609 let config = match sync::config() {
610 Err(err) => {
611 eprintln!("unable to read sync job config - {}", err);
612 return;
613 }
614 Ok((config, _digest)) => config,
615 };
616
a6160cdf
DM
617 for (job_id, (_, job_config)) in config.sections {
618 let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
619 Ok(c) => c,
620 Err(err) => {
621 eprintln!("sync job config from_value failed - {}", err);
622 continue;
623 }
624 };
625
626 let event_str = match job_config.schedule {
627 Some(ref event_str) => event_str.clone(),
628 None => continue,
629 };
630
631 let event = match parse_calendar_event(&event_str) {
632 Ok(event) => event,
633 Err(err) => {
634 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
635 continue;
636 }
637 };
638
c67b1fa7 639 let worker_type = "syncjob";
a6160cdf 640
a13573c2
DC
641 let last = match jobstate::last_run_time(worker_type, &job_id) {
642 Ok(time) => time,
a6160cdf 643 Err(err) => {
a13573c2 644 eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
a6160cdf
DM
645 continue;
646 }
647 };
648
649 let next = match compute_next_event(&event, last, false) {
15ec790a
DC
650 Ok(Some(next)) => next,
651 Ok(None) => continue,
a6160cdf
DM
652 Err(err) => {
653 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
654 continue;
655 }
656 };
657
6a7be83e
DM
658 let now = proxmox::tools::time::epoch_i64();
659
a6160cdf
DM
660 if next > now { continue; }
661
a13573c2 662 let job = match Job::new(worker_type, &job_id) {
93bb51fe 663 Ok(job) => job,
a13573c2 664 Err(_) => continue, // could not get lock
a6160cdf
DM
665 };
666
e7cb4dc5 667 let userid = Userid::backup_userid().clone();
a6160cdf 668
713b66b6 669 if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
a13573c2 670 eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
a6160cdf
DM
671 }
672 }
673}
eaeda365 674
9a760917
DC
675async fn schedule_task_log_rotate() {
676 use proxmox_backup::{
677 config::jobstate::{self, Job},
678 server::rotate_task_log_archive,
679 };
680 use proxmox_backup::server::WorkerTask;
681 use proxmox_backup::tools::systemd::time::{
682 parse_calendar_event, compute_next_event};
683
684 let worker_type = "logrotate";
685 let job_id = "task-archive";
686
687 let last = match jobstate::last_run_time(worker_type, job_id) {
688 Ok(time) => time,
689 Err(err) => {
690 eprintln!("could not get last run time of task log archive rotation: {}", err);
691 return;
692 }
693 };
694
695 // schedule daily at 00:00 like normal logrotate
696 let schedule = "00:00";
697
698 let event = match parse_calendar_event(schedule) {
699 Ok(event) => event,
700 Err(err) => {
701 // should not happen?
702 eprintln!("unable to parse schedule '{}' - {}", schedule, err);
703 return;
704 }
705 };
706
707 let next = match compute_next_event(&event, last, false) {
708 Ok(Some(next)) => next,
709 Ok(None) => return,
710 Err(err) => {
711 eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
712 return;
713 }
714 };
715
716 let now = proxmox::tools::time::epoch_i64();
717
718 if next > now {
719 // if we never ran the rotation, schedule instantly
720 match jobstate::JobState::load(worker_type, job_id) {
721 Ok(state) => match state {
722 jobstate::JobState::Created { .. } => {},
723 _ => return,
724 },
725 _ => return,
726 }
727 }
728
729 let mut job = match Job::new(worker_type, job_id) {
730 Ok(job) => job,
731 Err(_) => return, // could not get lock
732 };
733
734 if let Err(err) = WorkerTask::new_thread(
735 worker_type,
736 Some(job_id.to_string()),
737 Userid::backup_userid().clone(),
738 false,
739 move |worker| {
740 job.start(&worker.upid().to_string())?;
741 worker.log(format!("starting task log rotation"));
e4f5f59e 742
9a760917 743 let result = try_block!({
e4f5f59e
TL
744 // rotate task log archive
745 let max_size = 500000; // a normal entry has about 100b, so ~ 5000 entries/file
746 let max_files = 20; // times twenty files gives at least 100000 task entries
9a760917
DC
747 let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
748 if has_rotated {
749 worker.log(format!("task log archive was rotated"));
750 } else {
751 worker.log(format!("task log archive was not rotated"));
752 }
753
754 Ok(())
755 });
756
757 let status = worker.create_state(&result);
758
759 if let Err(err) = job.finish(status) {
760 eprintln!("could not finish job state for {}: {}", worker_type, err);
761 }
762
763 result
764 },
765 ) {
766 eprintln!("unable to start task log rotation: {}", err);
767 }
768
769}
770
eaeda365
DM
771async fn run_stat_generator() {
772
013fa7bb 773 let mut count = 0;
eaeda365 774 loop {
013fa7bb 775 count += 1;
a720894f 776 let save = if count >= 6 { count = 0; true } else { false };
013fa7bb 777
eaeda365
DM
778 let delay_target = Instant::now() + Duration::from_secs(10);
779
013fa7bb 780 generate_host_stats(save).await;
eaeda365
DM
781
782 tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
013fa7bb
DM
783
784 }
eaeda365
DM
785
786}
787
013fa7bb 788fn rrd_update_gauge(name: &str, value: f64, save: bool) {
309ef20d 789 use proxmox_backup::rrd;
013fa7bb 790 if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
309ef20d
DM
791 eprintln!("rrd::update_value '{}' failed - {}", name, err);
792 }
793}
794
013fa7bb 795fn rrd_update_derive(name: &str, value: f64, save: bool) {
309ef20d 796 use proxmox_backup::rrd;
013fa7bb 797 if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
309ef20d
DM
798 eprintln!("rrd::update_value '{}' failed - {}", name, err);
799 }
800}
801
013fa7bb 802async fn generate_host_stats(save: bool) {
8f0cec26 803 use proxmox::sys::linux::procfs::{
485841da 804 read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
309ef20d 805 use proxmox_backup::config::datastore;
8c03041a 806
eaeda365 807
4f951399
DM
808 proxmox_backup::tools::runtime::block_in_place(move || {
809
810 match read_proc_stat() {
811 Ok(stat) => {
013fa7bb
DM
812 rrd_update_gauge("host/cpu", stat.cpu, save);
813 rrd_update_gauge("host/iowait", stat.iowait_percent, save);
4f951399
DM
814 }
815 Err(err) => {
816 eprintln!("read_proc_stat failed - {}", err);
eaeda365
DM
817 }
818 }
2c66a590 819
4f951399
DM
820 match read_meminfo() {
821 Ok(meminfo) => {
013fa7bb
DM
822 rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
823 rrd_update_gauge("host/memused", meminfo.memused as f64, save);
824 rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
825 rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
a4a3f7ca 826 }
4f951399
DM
827 Err(err) => {
828 eprintln!("read_meminfo failed - {}", err);
a4a3f7ca
DM
829 }
830 }
8f0cec26 831
4f951399
DM
832 match read_proc_net_dev() {
833 Ok(netdev) => {
834 use proxmox_backup::config::network::is_physical_nic;
835 let mut netin = 0;
836 let mut netout = 0;
837 for item in netdev {
838 if !is_physical_nic(&item.device) { continue; }
839 netin += item.receive;
840 netout += item.send;
841 }
013fa7bb
DM
842 rrd_update_derive("host/netin", netin as f64, save);
843 rrd_update_derive("host/netout", netout as f64, save);
8f0cec26 844 }
4f951399
DM
845 Err(err) => {
846 eprintln!("read_prox_net_dev failed - {}", err);
8f0cec26
DM
847 }
848 }
dd15c0aa 849
485841da
DM
850 match read_loadavg() {
851 Ok(loadavg) => {
013fa7bb 852 rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
485841da
DM
853 }
854 Err(err) => {
855 eprintln!("read_loadavg failed - {}", err);
856 }
857 }
858
8c03041a
DM
859 let disk_manager = DiskManage::new();
860
013fa7bb 861 gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
91e5bb49 862
d0833a70
DM
863 match datastore::config() {
864 Ok((config, _)) => {
865 let datastore_list: Vec<datastore::DataStoreConfig> =
866 config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
867
868 for config in datastore_list {
8c03041a 869
91e5bb49 870 let rrd_prefix = format!("datastore/{}", config.name);
8c03041a 871 let path = std::path::Path::new(&config.path);
013fa7bb 872 gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
d0833a70
DM
873 }
874 }
875 Err(err) => {
876 eprintln!("read datastore config failed - {}", err);
877 }
878 }
879
4f951399 880 });
eaeda365 881}
dd15c0aa 882
013fa7bb 883fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
91e5bb49 884
934f5bb8 885 match proxmox_backup::tools::disks::disk_usage(path) {
33070956 886 Ok(status) => {
91e5bb49 887 let rrd_key = format!("{}/total", rrd_prefix);
33070956 888 rrd_update_gauge(&rrd_key, status.total as f64, save);
91e5bb49 889 let rrd_key = format!("{}/used", rrd_prefix);
33070956 890 rrd_update_gauge(&rrd_key, status.used as f64, save);
91e5bb49
DM
891 }
892 Err(err) => {
893 eprintln!("read disk_usage on {:?} failed - {}", path, err);
894 }
895 }
896
934f5bb8
DM
897 match disk_manager.find_mounted_device(path) {
898 Ok(None) => {},
899 Ok(Some((fs_type, device, source))) => {
900 let mut device_stat = None;
901 match fs_type.as_str() {
902 "zfs" => {
903 if let Some(pool) = source {
904 match zfs_pool_stats(&pool) {
905 Ok(stat) => device_stat = stat,
906 Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
91e5bb49
DM
907 }
908 }
934f5bb8
DM
909 }
910 _ => {
911 if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
912 match disk.read_stat() {
913 Ok(stat) => device_stat = stat,
914 Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
91e5bb49
DM
915 }
916 }
917 }
91e5bb49 918 }
934f5bb8
DM
919 if let Some(stat) = device_stat {
920 let rrd_key = format!("{}/read_ios", rrd_prefix);
921 rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
922 let rrd_key = format!("{}/read_bytes", rrd_prefix);
923 rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
dd15c0aa 924
934f5bb8
DM
925 let rrd_key = format!("{}/write_ios", rrd_prefix);
926 rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
927 let rrd_key = format!("{}/write_bytes", rrd_prefix);
928 rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
dd15c0aa 929
934f5bb8
DM
930 let rrd_key = format!("{}/io_ticks", rrd_prefix);
931 rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
8c03041a
DM
932 }
933 }
934f5bb8
DM
934 Err(err) => {
935 eprintln!("find_mounted_device failed - {}", err);
936 }
8c03041a 937 }
8c03041a 938}