]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-proxy.rs
server/worker_task: remove unecessary read_task_list
[proxmox-backup.git] / src / bin / proxmox-backup-proxy.rs
CommitLineData
c040ec22 1use std::sync::{Arc};
2ab5acac 2use std::path::{Path, PathBuf};
a2479cfa 3
f7d4e4b5 4use anyhow::{bail, format_err, Error};
a2479cfa
WB
5use futures::*;
6use hyper;
7use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
8
9ea4bce4 9use proxmox::try_block;
a2479cfa
WB
10use proxmox::api::RpcEnvironmentType;
11
e7cb4dc5 12use proxmox_backup::api2::types::Userid;
a2ca7137 13use proxmox_backup::configdir;
4a7de56e 14use proxmox_backup::buildcfg;
e3f41f21 15use proxmox_backup::server;
6a7be83e 16use proxmox_backup::tools::daemon;
e57e1cd8 17use proxmox_backup::server::{ApiConfig, rest::*};
d01e2420 18use proxmox_backup::auth_helpers::*;
5c264c8d 19use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
02c7a755 20
a13573c2
DC
21use proxmox_backup::api2::pull::do_sync_job;
22
946c3e8a 23fn main() -> Result<(), Error> {
ac7513e3
DM
24 proxmox_backup::tools::setup_safe_path_env();
25
843880f0
TL
26 let backup_uid = proxmox_backup::backup::backup_user()?.uid;
27 let backup_gid = proxmox_backup::backup::backup_group()?.gid;
28 let running_uid = nix::unistd::Uid::effective();
29 let running_gid = nix::unistd::Gid::effective();
30
31 if running_uid != backup_uid || running_gid != backup_gid {
32 bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
33 }
34
946c3e8a 35 proxmox_backup::tools::runtime::main(run())
4223d9f8
DM
36}
37
fda5797b 38async fn run() -> Result<(), Error> {
02c7a755
DM
39 if let Err(err) = syslog::init(
40 syslog::Facility::LOG_DAEMON,
41 log::LevelFilter::Info,
42 Some("proxmox-backup-proxy")) {
4223d9f8 43 bail!("unable to inititialize syslog - {}", err);
02c7a755
DM
44 }
45
d01e2420
DM
46 let _ = public_auth_key(); // load with lazy_static
47 let _ = csrf_secret(); // load with lazy_static
48
02c7a755 49 let mut config = ApiConfig::new(
f9e3b110 50 buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
02c7a755 51
02c7a755
DM
52 config.add_alias("novnc", "/usr/share/novnc-pve");
53 config.add_alias("extjs", "/usr/share/javascript/extjs");
54 config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
55 config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
abd4c4cb 56 config.add_alias("locale", "/usr/share/pbs-i18n");
02c7a755 57 config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
2d694f8f 58 config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
9c01e73c 59 config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
02c7a755 60
2ab5acac
DC
61 let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
62 indexpath.push("index.hbs");
63 config.register_template("index", &indexpath)?;
01ca99da 64 config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
2ab5acac 65
02c7a755
DM
66 let rest_server = RestServer::new(config);
67
6d1f61b2
DM
68 //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
69 let key_path = configdir!("/proxy.key");
70 let cert_path = configdir!("/proxy.pem");
71
72 let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
73 acceptor.set_private_key_file(key_path, SslFiletype::PEM)
74 .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
75 acceptor.set_certificate_chain_file(cert_path)
76 .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
77 acceptor.check_private_key().unwrap();
78
79 let acceptor = Arc::new(acceptor.build());
0d176f36 80
a690ecac
WB
81 let server = daemon::create_daemon(
82 ([0,0,0,0,0,0,0,0], 8007).into(),
083ff3fd 83 |listener, ready| {
db0cb9ce 84 let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener)
a690ecac 85 .map_err(Error::from)
db0cb9ce 86 .try_filter_map(move |(sock, _addr)| {
fda5797b
WB
87 let acceptor = Arc::clone(&acceptor);
88 async move {
89 sock.set_nodelay(true).unwrap();
fda5797b
WB
90 Ok(tokio_openssl::accept(&acceptor, sock)
91 .await
92 .ok() // handshake errors aren't be fatal, so return None to filter
93 )
a690ecac 94 }
a690ecac 95 });
db0cb9ce 96 let connections = proxmox_backup::tools::async_io::HyperAccept(connections);
083ff3fd
WB
97
98 Ok(ready
99 .and_then(|_| hyper::Server::builder(connections)
100 .serve(rest_server)
101 .with_graceful_shutdown(server::shutdown_future())
102 .map_err(Error::from)
103 )
104 .map_err(|err| eprintln!("server error: {}", err))
105 .map(|_| ())
a690ecac 106 )
a2ca7137 107 },
083ff3fd 108 );
a2ca7137 109
d98c9a7a
WB
110 daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
111
fda5797b
WB
112 let init_result: Result<(), Error> = try_block!({
113 server::create_task_control_socket()?;
114 server::server_state_init()?;
115 Ok(())
116 });
d607b886 117
fda5797b
WB
118 if let Err(err) = init_result {
119 bail!("unable to start daemon - {}", err);
120 }
e3f41f21 121
8545480a 122 start_task_scheduler();
eaeda365 123 start_stat_generator();
8545480a 124
083ff3fd 125 server.await?;
a546a8a0
WB
126 log::info!("server shutting down, waiting for active workers to complete");
127 proxmox_backup::server::last_worker_future().await?;
fda5797b 128 log::info!("done - exit server");
e3f41f21 129
4223d9f8 130 Ok(())
02c7a755 131}
8545480a 132
eaeda365
DM
133fn start_stat_generator() {
134 let abort_future = server::shutdown_future();
135 let future = Box::pin(run_stat_generator());
136 let task = futures::future::select(future, abort_future);
137 tokio::spawn(task.map(|_| ()));
138}
139
8545480a
DM
140fn start_task_scheduler() {
141 let abort_future = server::shutdown_future();
142 let future = Box::pin(run_task_scheduler());
143 let task = futures::future::select(future, abort_future);
144 tokio::spawn(task.map(|_| ()));
145}
146
6a7be83e 147use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
8545480a
DM
148
149fn next_minute() -> Result<Instant, Error> {
6a7be83e
DM
150 let now = SystemTime::now();
151 let epoch_now = now.duration_since(UNIX_EPOCH)?;
152 let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
8545480a
DM
153 Ok(Instant::now() + epoch_next - epoch_now)
154}
155
156async fn run_task_scheduler() {
157
158 let mut count: usize = 0;
159
160 loop {
161 count += 1;
162
163 let delay_target = match next_minute() { // try to run very minute
164 Ok(d) => d,
165 Err(err) => {
166 eprintln!("task scheduler: compute next minute failed - {}", err);
167 tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
168 continue;
169 }
170 };
171
172 if count > 2 { // wait 1..2 minutes before starting
173 match schedule_tasks().catch_unwind().await {
174 Err(panic) => {
175 match panic.downcast::<&str>() {
176 Ok(msg) => {
177 eprintln!("task scheduler panic: {}", msg);
178 }
179 Err(_) => {
180 eprintln!("task scheduler panic - unknown type");
181 }
182 }
183 }
184 Ok(Err(err)) => {
185 eprintln!("task scheduler failed - {:?}", err);
186 }
187 Ok(Ok(_)) => {}
188 }
189 }
190
191 tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
192 }
193}
194
195async fn schedule_tasks() -> Result<(), Error> {
196
197 schedule_datastore_garbage_collection().await;
25829a87 198 schedule_datastore_prune().await;
c040ec22 199 schedule_datastore_verification().await;
a6160cdf 200 schedule_datastore_sync_jobs().await;
8545480a
DM
201
202 Ok(())
203}
204
8545480a
DM
205async fn schedule_datastore_garbage_collection() {
206
207 use proxmox_backup::backup::DataStore;
208 use proxmox_backup::server::{UPID, WorkerTask};
d7a122a0
DC
209 use proxmox_backup::config::{
210 jobstate::{self, Job},
211 datastore::{self, DataStoreConfig}
212 };
8545480a
DM
213 use proxmox_backup::tools::systemd::time::{
214 parse_calendar_event, compute_next_event};
215
25829a87 216 let config = match datastore::config() {
8545480a
DM
217 Err(err) => {
218 eprintln!("unable to read datastore config - {}", err);
219 return;
220 }
221 Ok((config, _digest)) => config,
222 };
223
224 for (store, (_, store_config)) in config.sections {
225 let datastore = match DataStore::lookup_datastore(&store) {
226 Ok(datastore) => datastore,
227 Err(err) => {
228 eprintln!("lookup_datastore failed - {}", err);
229 continue;
230 }
231 };
232
25829a87 233 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
8545480a
DM
234 Ok(c) => c,
235 Err(err) => {
236 eprintln!("datastore config from_value failed - {}", err);
237 continue;
238 }
239 };
240
241 let event_str = match store_config.gc_schedule {
242 Some(event_str) => event_str,
243 None => continue,
244 };
245
246 let event = match parse_calendar_event(&event_str) {
247 Ok(event) => event,
248 Err(err) => {
249 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
250 continue;
251 }
252 };
253
254 if datastore.garbage_collection_running() { continue; }
255
256 let worker_type = "garbage_collection";
257
258 let stat = datastore.last_gc_status();
259 let last = if let Some(upid_str) = stat.upid {
260 match upid_str.parse::<UPID>() {
261 Ok(upid) => upid.starttime,
262 Err(err) => {
263 eprintln!("unable to parse upid '{}' - {}", upid_str, err);
264 continue;
265 }
266 }
267 } else {
d7a122a0
DC
268 match jobstate::last_run_time(worker_type, &store) {
269 Ok(time) => time,
8545480a 270 Err(err) => {
d7a122a0 271 eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
8545480a
DM
272 continue;
273 }
274 }
275 };
276
277 let next = match compute_next_event(&event, last, false) {
15ec790a
DC
278 Ok(Some(next)) => next,
279 Ok(None) => continue,
8545480a
DM
280 Err(err) => {
281 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
282 continue;
283 }
284 };
e693818a 285
6a7be83e
DM
286 let now = proxmox::tools::time::epoch_i64();
287
8545480a
DM
288 if next > now { continue; }
289
d7a122a0
DC
290 let mut job = match Job::new(worker_type, &store) {
291 Ok(job) => job,
292 Err(_) => continue, // could not get lock
293 };
294
8545480a
DM
295 let store2 = store.clone();
296
297 if let Err(err) = WorkerTask::new_thread(
298 worker_type,
299 Some(store.clone()),
e7cb4dc5 300 Userid::backup_userid().clone(),
8545480a
DM
301 false,
302 move |worker| {
d7a122a0
DC
303 job.start(&worker.upid().to_string())?;
304
8545480a
DM
305 worker.log(format!("starting garbage collection on store {}", store));
306 worker.log(format!("task triggered by schedule '{}'", event_str));
d7a122a0
DC
307
308 let result = datastore.garbage_collection(&worker);
309
310 let status = worker.create_state(&result);
311
312 if let Err(err) = job.finish(status) {
313 eprintln!("could not finish job state for {}: {}", worker_type, err);
314 }
315
316 result
8545480a
DM
317 }
318 ) {
319 eprintln!("unable to start garbage collection on store {} - {}", store2, err);
320 }
321 }
322}
25829a87
DM
323
324async fn schedule_datastore_prune() {
325
326 use proxmox_backup::backup::{
6a7be83e 327 PruneOptions, DataStore, BackupGroup, compute_prune_info};
25829a87 328 use proxmox_backup::server::{WorkerTask};
9866de5e
DC
329 use proxmox_backup::config::{
330 jobstate::{self, Job},
331 datastore::{self, DataStoreConfig}
332 };
25829a87
DM
333 use proxmox_backup::tools::systemd::time::{
334 parse_calendar_event, compute_next_event};
335
336 let config = match datastore::config() {
337 Err(err) => {
338 eprintln!("unable to read datastore config - {}", err);
339 return;
340 }
341 Ok((config, _digest)) => config,
342 };
343
344 for (store, (_, store_config)) in config.sections {
345 let datastore = match DataStore::lookup_datastore(&store) {
346 Ok(datastore) => datastore,
347 Err(err) => {
a6160cdf 348 eprintln!("lookup_datastore '{}' failed - {}", store, err);
25829a87
DM
349 continue;
350 }
351 };
352
353 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
354 Ok(c) => c,
355 Err(err) => {
a6160cdf 356 eprintln!("datastore '{}' config from_value failed - {}", store, err);
25829a87
DM
357 continue;
358 }
359 };
360
361 let event_str = match store_config.prune_schedule {
362 Some(event_str) => event_str,
363 None => continue,
364 };
365
366 let prune_options = PruneOptions {
367 keep_last: store_config.keep_last,
368 keep_hourly: store_config.keep_hourly,
369 keep_daily: store_config.keep_daily,
370 keep_weekly: store_config.keep_weekly,
371 keep_monthly: store_config.keep_monthly,
372 keep_yearly: store_config.keep_yearly,
373 };
374
375 if !prune_options.keeps_something() { // no prune settings - keep all
376 continue;
377 }
378
379 let event = match parse_calendar_event(&event_str) {
380 Ok(event) => event,
381 Err(err) => {
382 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
383 continue;
384 }
385 };
386
25829a87
DM
387 let worker_type = "prune";
388
9866de5e
DC
389 let last = match jobstate::last_run_time(worker_type, &store) {
390 Ok(time) => time,
25829a87 391 Err(err) => {
9866de5e 392 eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
25829a87
DM
393 continue;
394 }
395 };
396
397 let next = match compute_next_event(&event, last, false) {
15ec790a
DC
398 Ok(Some(next)) => next,
399 Ok(None) => continue,
25829a87
DM
400 Err(err) => {
401 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
402 continue;
403 }
404 };
405
6a7be83e
DM
406 let now = proxmox::tools::time::epoch_i64();
407
25829a87
DM
408 if next > now { continue; }
409
9866de5e
DC
410 let mut job = match Job::new(worker_type, &store) {
411 Ok(job) => job,
412 Err(_) => continue, // could not get lock
413 };
414
25829a87
DM
415 let store2 = store.clone();
416
417 if let Err(err) = WorkerTask::new_thread(
418 worker_type,
419 Some(store.clone()),
e7cb4dc5 420 Userid::backup_userid().clone(),
25829a87
DM
421 false,
422 move |worker| {
9866de5e
DC
423
424 job.start(&worker.upid().to_string())?;
425
6c25588e 426 let result = try_block!({
9866de5e
DC
427
428 worker.log(format!("Starting datastore prune on store \"{}\"", store));
429 worker.log(format!("task triggered by schedule '{}'", event_str));
430 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
431
432 let base_path = datastore.base_path();
433
434 let groups = BackupGroup::list_groups(&base_path)?;
435 for group in groups {
436 let list = group.list_backups(&base_path)?;
437 let mut prune_info = compute_prune_info(list, &prune_options)?;
438 prune_info.reverse(); // delete older snapshots first
439
440 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
441 store, group.backup_type(), group.backup_id()));
442
443 for (info, keep) in prune_info {
444 worker.log(format!(
445 "{} {}/{}/{}",
446 if keep { "keep" } else { "remove" },
447 group.backup_type(), group.backup_id(),
448 info.backup_dir.backup_time_string()));
449 if !keep {
450 datastore.remove_backup_dir(&info.backup_dir, true)?;
451 }
25829a87
DM
452 }
453 }
9866de5e 454 Ok(())
6c25588e 455 });
9866de5e
DC
456
457 let status = worker.create_state(&result);
458
459 if let Err(err) = job.finish(status) {
460 eprintln!("could not finish job state for {}: {}", worker_type, err);
25829a87
DM
461 }
462
9866de5e 463 result
25829a87
DM
464 }
465 ) {
466 eprintln!("unable to start datastore prune on store {} - {}", store2, err);
467 }
468 }
469}
a6160cdf 470
c040ec22
HL
471async fn schedule_datastore_verification() {
472 use proxmox_backup::backup::{DataStore, verify_all_backups};
473 use proxmox_backup::server::{WorkerTask};
d7a122a0
DC
474 use proxmox_backup::config::{
475 jobstate::{self, Job},
476 datastore::{self, DataStoreConfig}
477 };
c040ec22
HL
478 use proxmox_backup::tools::systemd::time::{
479 parse_calendar_event, compute_next_event};
480
481 let config = match datastore::config() {
482 Err(err) => {
483 eprintln!("unable to read datastore config - {}", err);
484 return;
485 }
486 Ok((config, _digest)) => config,
487 };
488
489 for (store, (_, store_config)) in config.sections {
490 let datastore = match DataStore::lookup_datastore(&store) {
491 Ok(datastore) => datastore,
492 Err(err) => {
493 eprintln!("lookup_datastore failed - {}", err);
494 continue;
495 }
496 };
497
498 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
499 Ok(c) => c,
500 Err(err) => {
501 eprintln!("datastore config from_value failed - {}", err);
502 continue;
503 }
504 };
505
506 let event_str = match store_config.verify_schedule {
507 Some(event_str) => event_str,
508 None => continue,
509 };
510
511 let event = match parse_calendar_event(&event_str) {
512 Ok(event) => event,
513 Err(err) => {
514 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
515 continue;
516 }
517 };
518
519 let worker_type = "verify";
520
d7a122a0
DC
521 let last = match jobstate::last_run_time(worker_type, &store) {
522 Ok(time) => time,
c040ec22 523 Err(err) => {
d7a122a0 524 eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
c040ec22
HL
525 continue;
526 }
527 };
528
529 let next = match compute_next_event(&event, last, false) {
530 Ok(Some(next)) => next,
531 Ok(None) => continue,
532 Err(err) => {
533 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
534 continue;
535 }
536 };
537
538 let now = proxmox::tools::time::epoch_i64();
539
540 if next > now { continue; }
541
d7a122a0
DC
542 let mut job = match Job::new(worker_type, &store) {
543 Ok(job) => job,
544 Err(_) => continue, // could not get lock
545 };
546
c040ec22
HL
547 let worker_id = store.clone();
548 let store2 = store.clone();
549 if let Err(err) = WorkerTask::new_thread(
550 worker_type,
551 Some(worker_id),
552 Userid::backup_userid().clone(),
553 false,
554 move |worker| {
d7a122a0 555 job.start(&worker.upid().to_string())?;
c040ec22
HL
556 worker.log(format!("starting verification on store {}", store2));
557 worker.log(format!("task triggered by schedule '{}'", event_str));
d7a122a0
DC
558 let result = try_block!({
559 let failed_dirs = verify_all_backups(datastore, worker.clone())?;
c040ec22
HL
560 if failed_dirs.len() > 0 {
561 worker.log("Failed to verify following snapshots:");
562 for dir in failed_dirs {
563 worker.log(format!("\t{}", dir));
564 }
d7a122a0
DC
565 Err(format_err!("verification failed - please check the log for details"))
566 } else {
567 Ok(())
c040ec22 568 }
d7a122a0
DC
569 });
570
571 let status = worker.create_state(&result);
572
573 if let Err(err) = job.finish(status) {
574 eprintln!("could not finish job state for {}: {}", worker_type, err);
c040ec22 575 }
d7a122a0
DC
576
577 result
c040ec22
HL
578 },
579 ) {
580 eprintln!("unable to start verification on store {} - {}", store, err);
581 }
582 }
583}
584
a6160cdf
DM
585async fn schedule_datastore_sync_jobs() {
586
587 use proxmox_backup::{
a13573c2 588 config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
a6160cdf
DM
589 tools::systemd::time::{ parse_calendar_event, compute_next_event },
590 };
591
592 let config = match sync::config() {
593 Err(err) => {
594 eprintln!("unable to read sync job config - {}", err);
595 return;
596 }
597 Ok((config, _digest)) => config,
598 };
599
a6160cdf
DM
600 for (job_id, (_, job_config)) in config.sections {
601 let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
602 Ok(c) => c,
603 Err(err) => {
604 eprintln!("sync job config from_value failed - {}", err);
605 continue;
606 }
607 };
608
609 let event_str = match job_config.schedule {
610 Some(ref event_str) => event_str.clone(),
611 None => continue,
612 };
613
614 let event = match parse_calendar_event(&event_str) {
615 Ok(event) => event,
616 Err(err) => {
617 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
618 continue;
619 }
620 };
621
c67b1fa7 622 let worker_type = "syncjob";
a6160cdf 623
a13573c2
DC
624 let last = match jobstate::last_run_time(worker_type, &job_id) {
625 Ok(time) => time,
a6160cdf 626 Err(err) => {
a13573c2 627 eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
a6160cdf
DM
628 continue;
629 }
630 };
631
632 let next = match compute_next_event(&event, last, false) {
15ec790a
DC
633 Ok(Some(next)) => next,
634 Ok(None) => continue,
a6160cdf
DM
635 Err(err) => {
636 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
637 continue;
638 }
639 };
640
6a7be83e
DM
641 let now = proxmox::tools::time::epoch_i64();
642
a6160cdf
DM
643 if next > now { continue; }
644
a13573c2 645 let job = match Job::new(worker_type, &job_id) {
93bb51fe 646 Ok(job) => job,
a13573c2 647 Err(_) => continue, // could not get lock
a6160cdf
DM
648 };
649
e7cb4dc5 650 let userid = Userid::backup_userid().clone();
a6160cdf 651
713b66b6 652 if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
a13573c2 653 eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
a6160cdf
DM
654 }
655 }
656}
eaeda365
DM
657
658async fn run_stat_generator() {
659
013fa7bb 660 let mut count = 0;
eaeda365 661 loop {
013fa7bb 662 count += 1;
a720894f 663 let save = if count >= 6 { count = 0; true } else { false };
013fa7bb 664
eaeda365
DM
665 let delay_target = Instant::now() + Duration::from_secs(10);
666
013fa7bb 667 generate_host_stats(save).await;
eaeda365
DM
668
669 tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
013fa7bb
DM
670
671 }
eaeda365
DM
672
673}
674
013fa7bb 675fn rrd_update_gauge(name: &str, value: f64, save: bool) {
309ef20d 676 use proxmox_backup::rrd;
013fa7bb 677 if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
309ef20d
DM
678 eprintln!("rrd::update_value '{}' failed - {}", name, err);
679 }
680}
681
013fa7bb 682fn rrd_update_derive(name: &str, value: f64, save: bool) {
309ef20d 683 use proxmox_backup::rrd;
013fa7bb 684 if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
309ef20d
DM
685 eprintln!("rrd::update_value '{}' failed - {}", name, err);
686 }
687}
688
013fa7bb 689async fn generate_host_stats(save: bool) {
8f0cec26 690 use proxmox::sys::linux::procfs::{
485841da 691 read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
309ef20d 692 use proxmox_backup::config::datastore;
8c03041a 693
eaeda365 694
4f951399
DM
695 proxmox_backup::tools::runtime::block_in_place(move || {
696
697 match read_proc_stat() {
698 Ok(stat) => {
013fa7bb
DM
699 rrd_update_gauge("host/cpu", stat.cpu, save);
700 rrd_update_gauge("host/iowait", stat.iowait_percent, save);
4f951399
DM
701 }
702 Err(err) => {
703 eprintln!("read_proc_stat failed - {}", err);
eaeda365
DM
704 }
705 }
2c66a590 706
4f951399
DM
707 match read_meminfo() {
708 Ok(meminfo) => {
013fa7bb
DM
709 rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
710 rrd_update_gauge("host/memused", meminfo.memused as f64, save);
711 rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
712 rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
a4a3f7ca 713 }
4f951399
DM
714 Err(err) => {
715 eprintln!("read_meminfo failed - {}", err);
a4a3f7ca
DM
716 }
717 }
8f0cec26 718
4f951399
DM
719 match read_proc_net_dev() {
720 Ok(netdev) => {
721 use proxmox_backup::config::network::is_physical_nic;
722 let mut netin = 0;
723 let mut netout = 0;
724 for item in netdev {
725 if !is_physical_nic(&item.device) { continue; }
726 netin += item.receive;
727 netout += item.send;
728 }
013fa7bb
DM
729 rrd_update_derive("host/netin", netin as f64, save);
730 rrd_update_derive("host/netout", netout as f64, save);
8f0cec26 731 }
4f951399
DM
732 Err(err) => {
733 eprintln!("read_prox_net_dev failed - {}", err);
8f0cec26
DM
734 }
735 }
dd15c0aa 736
485841da
DM
737 match read_loadavg() {
738 Ok(loadavg) => {
013fa7bb 739 rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
485841da
DM
740 }
741 Err(err) => {
742 eprintln!("read_loadavg failed - {}", err);
743 }
744 }
745
8c03041a
DM
746 let disk_manager = DiskManage::new();
747
013fa7bb 748 gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
91e5bb49 749
d0833a70
DM
750 match datastore::config() {
751 Ok((config, _)) => {
752 let datastore_list: Vec<datastore::DataStoreConfig> =
753 config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
754
755 for config in datastore_list {
8c03041a 756
91e5bb49 757 let rrd_prefix = format!("datastore/{}", config.name);
8c03041a 758 let path = std::path::Path::new(&config.path);
013fa7bb 759 gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
d0833a70
DM
760 }
761 }
762 Err(err) => {
763 eprintln!("read datastore config failed - {}", err);
764 }
765 }
766
4f951399 767 });
eaeda365 768}
dd15c0aa 769
013fa7bb 770fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
91e5bb49 771
934f5bb8 772 match proxmox_backup::tools::disks::disk_usage(path) {
33070956 773 Ok(status) => {
91e5bb49 774 let rrd_key = format!("{}/total", rrd_prefix);
33070956 775 rrd_update_gauge(&rrd_key, status.total as f64, save);
91e5bb49 776 let rrd_key = format!("{}/used", rrd_prefix);
33070956 777 rrd_update_gauge(&rrd_key, status.used as f64, save);
91e5bb49
DM
778 }
779 Err(err) => {
780 eprintln!("read disk_usage on {:?} failed - {}", path, err);
781 }
782 }
783
934f5bb8
DM
784 match disk_manager.find_mounted_device(path) {
785 Ok(None) => {},
786 Ok(Some((fs_type, device, source))) => {
787 let mut device_stat = None;
788 match fs_type.as_str() {
789 "zfs" => {
790 if let Some(pool) = source {
791 match zfs_pool_stats(&pool) {
792 Ok(stat) => device_stat = stat,
793 Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
91e5bb49
DM
794 }
795 }
934f5bb8
DM
796 }
797 _ => {
798 if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
799 match disk.read_stat() {
800 Ok(stat) => device_stat = stat,
801 Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
91e5bb49
DM
802 }
803 }
804 }
91e5bb49 805 }
934f5bb8
DM
806 if let Some(stat) = device_stat {
807 let rrd_key = format!("{}/read_ios", rrd_prefix);
808 rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
809 let rrd_key = format!("{}/read_bytes", rrd_prefix);
810 rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
dd15c0aa 811
934f5bb8
DM
812 let rrd_key = format!("{}/write_ios", rrd_prefix);
813 rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
814 let rrd_key = format!("{}/write_bytes", rrd_prefix);
815 rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
dd15c0aa 816
934f5bb8
DM
817 let rrd_key = format!("{}/io_ticks", rrd_prefix);
818 rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
8c03041a
DM
819 }
820 }
934f5bb8
DM
821 Err(err) => {
822 eprintln!("find_mounted_device failed - {}", err);
823 }
8c03041a 824 }
8c03041a 825}