]>
Commit | Line | Data |
---|---|---|
a2479cfa | 1 | use std::sync::Arc; |
2ab5acac | 2 | use std::path::{Path, PathBuf}; |
a2479cfa | 3 | |
f7d4e4b5 | 4 | use anyhow::{bail, format_err, Error}; |
a2479cfa WB |
5 | use futures::*; |
6 | use hyper; | |
7 | use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype}; | |
8 | ||
9ea4bce4 | 9 | use proxmox::try_block; |
a2479cfa WB |
10 | use proxmox::api::RpcEnvironmentType; |
11 | ||
e7cb4dc5 | 12 | use proxmox_backup::api2::types::Userid; |
a2ca7137 | 13 | use proxmox_backup::configdir; |
4a7de56e | 14 | use proxmox_backup::buildcfg; |
e3f41f21 | 15 | use proxmox_backup::server; |
e693818a | 16 | use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64}; |
e57e1cd8 | 17 | use proxmox_backup::server::{ApiConfig, rest::*}; |
d01e2420 | 18 | use proxmox_backup::auth_helpers::*; |
5c264c8d | 19 | use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats }; |
02c7a755 | 20 | |
a13573c2 DC |
21 | use proxmox_backup::api2::pull::do_sync_job; |
22 | ||
946c3e8a | 23 | fn main() -> Result<(), Error> { |
ac7513e3 DM |
24 | proxmox_backup::tools::setup_safe_path_env(); |
25 | ||
843880f0 TL |
26 | let backup_uid = proxmox_backup::backup::backup_user()?.uid; |
27 | let backup_gid = proxmox_backup::backup::backup_group()?.gid; | |
28 | let running_uid = nix::unistd::Uid::effective(); | |
29 | let running_gid = nix::unistd::Gid::effective(); | |
30 | ||
31 | if running_uid != backup_uid || running_gid != backup_gid { | |
32 | bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid); | |
33 | } | |
34 | ||
946c3e8a | 35 | proxmox_backup::tools::runtime::main(run()) |
4223d9f8 DM |
36 | } |
37 | ||
fda5797b | 38 | async fn run() -> Result<(), Error> { |
02c7a755 DM |
39 | if let Err(err) = syslog::init( |
40 | syslog::Facility::LOG_DAEMON, | |
41 | log::LevelFilter::Info, | |
42 | Some("proxmox-backup-proxy")) { | |
4223d9f8 | 43 | bail!("unable to inititialize syslog - {}", err); |
02c7a755 DM |
44 | } |
45 | ||
d01e2420 DM |
46 | let _ = public_auth_key(); // load with lazy_static |
47 | let _ = csrf_secret(); // load with lazy_static | |
48 | ||
02c7a755 | 49 | let mut config = ApiConfig::new( |
f9e3b110 | 50 | buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?; |
02c7a755 | 51 | |
02c7a755 DM |
52 | config.add_alias("novnc", "/usr/share/novnc-pve"); |
53 | config.add_alias("extjs", "/usr/share/javascript/extjs"); | |
54 | config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); | |
55 | config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); | |
56 | config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); | |
2d694f8f | 57 | config.add_alias("css", "/usr/share/javascript/proxmox-backup/css"); |
9c01e73c | 58 | config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); |
02c7a755 | 59 | |
2ab5acac DC |
60 | let mut indexpath = PathBuf::from(buildcfg::JS_DIR); |
61 | indexpath.push("index.hbs"); | |
62 | config.register_template("index", &indexpath)?; | |
01ca99da | 63 | config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?; |
2ab5acac | 64 | |
02c7a755 DM |
65 | let rest_server = RestServer::new(config); |
66 | ||
6d1f61b2 DM |
67 | //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes |
68 | let key_path = configdir!("/proxy.key"); | |
69 | let cert_path = configdir!("/proxy.pem"); | |
70 | ||
71 | let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); | |
72 | acceptor.set_private_key_file(key_path, SslFiletype::PEM) | |
73 | .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?; | |
74 | acceptor.set_certificate_chain_file(cert_path) | |
75 | .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?; | |
76 | acceptor.check_private_key().unwrap(); | |
77 | ||
78 | let acceptor = Arc::new(acceptor.build()); | |
0d176f36 | 79 | |
a690ecac WB |
80 | let server = daemon::create_daemon( |
81 | ([0,0,0,0,0,0,0,0], 8007).into(), | |
083ff3fd | 82 | |listener, ready| { |
db0cb9ce | 83 | let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener) |
a690ecac | 84 | .map_err(Error::from) |
db0cb9ce | 85 | .try_filter_map(move |(sock, _addr)| { |
fda5797b WB |
86 | let acceptor = Arc::clone(&acceptor); |
87 | async move { | |
88 | sock.set_nodelay(true).unwrap(); | |
89 | sock.set_send_buffer_size(1024*1024).unwrap(); | |
90 | sock.set_recv_buffer_size(1024*1024).unwrap(); | |
91 | Ok(tokio_openssl::accept(&acceptor, sock) | |
92 | .await | |
93 | .ok() // handshake errors aren't be fatal, so return None to filter | |
94 | ) | |
a690ecac | 95 | } |
a690ecac | 96 | }); |
db0cb9ce | 97 | let connections = proxmox_backup::tools::async_io::HyperAccept(connections); |
083ff3fd WB |
98 | |
99 | Ok(ready | |
100 | .and_then(|_| hyper::Server::builder(connections) | |
101 | .serve(rest_server) | |
102 | .with_graceful_shutdown(server::shutdown_future()) | |
103 | .map_err(Error::from) | |
104 | ) | |
105 | .map_err(|err| eprintln!("server error: {}", err)) | |
106 | .map(|_| ()) | |
a690ecac | 107 | ) |
a2ca7137 | 108 | }, |
083ff3fd | 109 | ); |
a2ca7137 | 110 | |
d98c9a7a WB |
111 | daemon::systemd_notify(daemon::SystemdNotify::Ready)?; |
112 | ||
fda5797b WB |
113 | let init_result: Result<(), Error> = try_block!({ |
114 | server::create_task_control_socket()?; | |
115 | server::server_state_init()?; | |
116 | Ok(()) | |
117 | }); | |
d607b886 | 118 | |
fda5797b WB |
119 | if let Err(err) = init_result { |
120 | bail!("unable to start daemon - {}", err); | |
121 | } | |
e3f41f21 | 122 | |
8545480a | 123 | start_task_scheduler(); |
eaeda365 | 124 | start_stat_generator(); |
8545480a | 125 | |
083ff3fd | 126 | server.await?; |
a546a8a0 WB |
127 | log::info!("server shutting down, waiting for active workers to complete"); |
128 | proxmox_backup::server::last_worker_future().await?; | |
fda5797b | 129 | log::info!("done - exit server"); |
e3f41f21 | 130 | |
4223d9f8 | 131 | Ok(()) |
02c7a755 | 132 | } |
8545480a | 133 | |
eaeda365 DM |
134 | fn start_stat_generator() { |
135 | let abort_future = server::shutdown_future(); | |
136 | let future = Box::pin(run_stat_generator()); | |
137 | let task = futures::future::select(future, abort_future); | |
138 | tokio::spawn(task.map(|_| ())); | |
139 | } | |
140 | ||
8545480a DM |
141 | fn start_task_scheduler() { |
142 | let abort_future = server::shutdown_future(); | |
143 | let future = Box::pin(run_task_scheduler()); | |
144 | let task = futures::future::select(future, abort_future); | |
145 | tokio::spawn(task.map(|_| ())); | |
146 | } | |
147 | ||
e693818a | 148 | use std::time:: {Instant, Duration}; |
8545480a DM |
149 | |
150 | fn next_minute() -> Result<Instant, Error> { | |
e693818a | 151 | let epoch_now = epoch_now()?; |
8545480a DM |
152 | let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); |
153 | Ok(Instant::now() + epoch_next - epoch_now) | |
154 | } | |
155 | ||
156 | async fn run_task_scheduler() { | |
157 | ||
158 | let mut count: usize = 0; | |
159 | ||
160 | loop { | |
161 | count += 1; | |
162 | ||
163 | let delay_target = match next_minute() { // try to run very minute | |
164 | Ok(d) => d, | |
165 | Err(err) => { | |
166 | eprintln!("task scheduler: compute next minute failed - {}", err); | |
167 | tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await; | |
168 | continue; | |
169 | } | |
170 | }; | |
171 | ||
172 | if count > 2 { // wait 1..2 minutes before starting | |
173 | match schedule_tasks().catch_unwind().await { | |
174 | Err(panic) => { | |
175 | match panic.downcast::<&str>() { | |
176 | Ok(msg) => { | |
177 | eprintln!("task scheduler panic: {}", msg); | |
178 | } | |
179 | Err(_) => { | |
180 | eprintln!("task scheduler panic - unknown type"); | |
181 | } | |
182 | } | |
183 | } | |
184 | Ok(Err(err)) => { | |
185 | eprintln!("task scheduler failed - {:?}", err); | |
186 | } | |
187 | Ok(Ok(_)) => {} | |
188 | } | |
189 | } | |
190 | ||
191 | tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await; | |
192 | } | |
193 | } | |
194 | ||
195 | async fn schedule_tasks() -> Result<(), Error> { | |
196 | ||
197 | schedule_datastore_garbage_collection().await; | |
25829a87 | 198 | schedule_datastore_prune().await; |
a6160cdf | 199 | schedule_datastore_sync_jobs().await; |
8545480a DM |
200 | |
201 | Ok(()) | |
202 | } | |
203 | ||
25829a87 | 204 | fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> { |
8545480a DM |
205 | |
206 | let list = proxmox_backup::server::read_task_list()?; | |
207 | ||
25829a87 DM |
208 | let mut last: Option<&server::UPID> = None; |
209 | ||
210 | for entry in list.iter() { | |
8545480a | 211 | if entry.upid.worker_type == worker_type { |
25829a87 | 212 | if let Some(ref id) = entry.upid.worker_id { |
8545480a | 213 | if id == worker_id { |
25829a87 DM |
214 | match last { |
215 | Some(ref upid) => { | |
216 | if upid.starttime < entry.upid.starttime { | |
217 | last = Some(&entry.upid) | |
218 | } | |
219 | } | |
220 | None => { | |
221 | last = Some(&entry.upid) | |
222 | } | |
223 | } | |
8545480a DM |
224 | } |
225 | } | |
226 | } | |
227 | } | |
228 | ||
25829a87 | 229 | Ok(last.cloned()) |
8545480a DM |
230 | } |
231 | ||
232 | ||
233 | async fn schedule_datastore_garbage_collection() { | |
234 | ||
235 | use proxmox_backup::backup::DataStore; | |
236 | use proxmox_backup::server::{UPID, WorkerTask}; | |
25829a87 | 237 | use proxmox_backup::config::datastore::{self, DataStoreConfig}; |
8545480a DM |
238 | use proxmox_backup::tools::systemd::time::{ |
239 | parse_calendar_event, compute_next_event}; | |
240 | ||
25829a87 | 241 | let config = match datastore::config() { |
8545480a DM |
242 | Err(err) => { |
243 | eprintln!("unable to read datastore config - {}", err); | |
244 | return; | |
245 | } | |
246 | Ok((config, _digest)) => config, | |
247 | }; | |
248 | ||
249 | for (store, (_, store_config)) in config.sections { | |
250 | let datastore = match DataStore::lookup_datastore(&store) { | |
251 | Ok(datastore) => datastore, | |
252 | Err(err) => { | |
253 | eprintln!("lookup_datastore failed - {}", err); | |
254 | continue; | |
255 | } | |
256 | }; | |
257 | ||
25829a87 | 258 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { |
8545480a DM |
259 | Ok(c) => c, |
260 | Err(err) => { | |
261 | eprintln!("datastore config from_value failed - {}", err); | |
262 | continue; | |
263 | } | |
264 | }; | |
265 | ||
266 | let event_str = match store_config.gc_schedule { | |
267 | Some(event_str) => event_str, | |
268 | None => continue, | |
269 | }; | |
270 | ||
271 | let event = match parse_calendar_event(&event_str) { | |
272 | Ok(event) => event, | |
273 | Err(err) => { | |
274 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
275 | continue; | |
276 | } | |
277 | }; | |
278 | ||
279 | if datastore.garbage_collection_running() { continue; } | |
280 | ||
281 | let worker_type = "garbage_collection"; | |
282 | ||
283 | let stat = datastore.last_gc_status(); | |
284 | let last = if let Some(upid_str) = stat.upid { | |
285 | match upid_str.parse::<UPID>() { | |
286 | Ok(upid) => upid.starttime, | |
287 | Err(err) => { | |
288 | eprintln!("unable to parse upid '{}' - {}", upid_str, err); | |
289 | continue; | |
290 | } | |
291 | } | |
292 | } else { | |
25829a87 DM |
293 | match lookup_last_worker(worker_type, &store) { |
294 | Ok(Some(upid)) => upid.starttime, | |
295 | Ok(None) => 0, | |
8545480a DM |
296 | Err(err) => { |
297 | eprintln!("lookup_last_job_start failed: {}", err); | |
298 | continue; | |
299 | } | |
300 | } | |
301 | }; | |
302 | ||
303 | let next = match compute_next_event(&event, last, false) { | |
15ec790a DC |
304 | Ok(Some(next)) => next, |
305 | Ok(None) => continue, | |
8545480a DM |
306 | Err(err) => { |
307 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
308 | continue; | |
309 | } | |
310 | }; | |
e693818a DC |
311 | |
312 | let now = match epoch_now_u64() { | |
313 | Ok(epoch_now) => epoch_now as i64, | |
8545480a DM |
314 | Err(err) => { |
315 | eprintln!("query system time failed - {}", err); | |
316 | continue; | |
317 | } | |
318 | }; | |
319 | if next > now { continue; } | |
320 | ||
321 | let store2 = store.clone(); | |
322 | ||
323 | if let Err(err) = WorkerTask::new_thread( | |
324 | worker_type, | |
325 | Some(store.clone()), | |
e7cb4dc5 | 326 | Userid::backup_userid().clone(), |
8545480a DM |
327 | false, |
328 | move |worker| { | |
329 | worker.log(format!("starting garbage collection on store {}", store)); | |
330 | worker.log(format!("task triggered by schedule '{}'", event_str)); | |
331 | datastore.garbage_collection(&worker) | |
332 | } | |
333 | ) { | |
334 | eprintln!("unable to start garbage collection on store {} - {}", store2, err); | |
335 | } | |
336 | } | |
337 | } | |
25829a87 DM |
338 | |
339 | async fn schedule_datastore_prune() { | |
340 | ||
341 | use proxmox_backup::backup::{ | |
342 | PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info}; | |
343 | use proxmox_backup::server::{WorkerTask}; | |
344 | use proxmox_backup::config::datastore::{self, DataStoreConfig}; | |
345 | use proxmox_backup::tools::systemd::time::{ | |
346 | parse_calendar_event, compute_next_event}; | |
347 | ||
348 | let config = match datastore::config() { | |
349 | Err(err) => { | |
350 | eprintln!("unable to read datastore config - {}", err); | |
351 | return; | |
352 | } | |
353 | Ok((config, _digest)) => config, | |
354 | }; | |
355 | ||
356 | for (store, (_, store_config)) in config.sections { | |
357 | let datastore = match DataStore::lookup_datastore(&store) { | |
358 | Ok(datastore) => datastore, | |
359 | Err(err) => { | |
a6160cdf | 360 | eprintln!("lookup_datastore '{}' failed - {}", store, err); |
25829a87 DM |
361 | continue; |
362 | } | |
363 | }; | |
364 | ||
365 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { | |
366 | Ok(c) => c, | |
367 | Err(err) => { | |
a6160cdf | 368 | eprintln!("datastore '{}' config from_value failed - {}", store, err); |
25829a87 DM |
369 | continue; |
370 | } | |
371 | }; | |
372 | ||
373 | let event_str = match store_config.prune_schedule { | |
374 | Some(event_str) => event_str, | |
375 | None => continue, | |
376 | }; | |
377 | ||
378 | let prune_options = PruneOptions { | |
379 | keep_last: store_config.keep_last, | |
380 | keep_hourly: store_config.keep_hourly, | |
381 | keep_daily: store_config.keep_daily, | |
382 | keep_weekly: store_config.keep_weekly, | |
383 | keep_monthly: store_config.keep_monthly, | |
384 | keep_yearly: store_config.keep_yearly, | |
385 | }; | |
386 | ||
387 | if !prune_options.keeps_something() { // no prune settings - keep all | |
388 | continue; | |
389 | } | |
390 | ||
391 | let event = match parse_calendar_event(&event_str) { | |
392 | Ok(event) => event, | |
393 | Err(err) => { | |
394 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
395 | continue; | |
396 | } | |
397 | }; | |
398 | ||
25829a87 DM |
399 | let worker_type = "prune"; |
400 | ||
401 | let last = match lookup_last_worker(worker_type, &store) { | |
a8d7033c DM |
402 | Ok(Some(upid)) => { |
403 | if proxmox_backup::server::worker_is_active_local(&upid) { | |
404 | continue; | |
405 | } | |
406 | upid.starttime | |
407 | } | |
25829a87 DM |
408 | Ok(None) => 0, |
409 | Err(err) => { | |
410 | eprintln!("lookup_last_job_start failed: {}", err); | |
411 | continue; | |
412 | } | |
413 | }; | |
414 | ||
415 | let next = match compute_next_event(&event, last, false) { | |
15ec790a DC |
416 | Ok(Some(next)) => next, |
417 | Ok(None) => continue, | |
25829a87 DM |
418 | Err(err) => { |
419 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
420 | continue; | |
421 | } | |
422 | }; | |
423 | ||
e693818a DC |
424 | let now = match epoch_now_u64() { |
425 | Ok(epoch_now) => epoch_now as i64, | |
25829a87 DM |
426 | Err(err) => { |
427 | eprintln!("query system time failed - {}", err); | |
428 | continue; | |
429 | } | |
430 | }; | |
431 | if next > now { continue; } | |
432 | ||
433 | let store2 = store.clone(); | |
434 | ||
435 | if let Err(err) = WorkerTask::new_thread( | |
436 | worker_type, | |
437 | Some(store.clone()), | |
e7cb4dc5 | 438 | Userid::backup_userid().clone(), |
25829a87 DM |
439 | false, |
440 | move |worker| { | |
441 | worker.log(format!("Starting datastore prune on store \"{}\"", store)); | |
a6160cdf | 442 | worker.log(format!("task triggered by schedule '{}'", event_str)); |
25829a87 DM |
443 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); |
444 | ||
445 | let base_path = datastore.base_path(); | |
446 | ||
447 | let groups = BackupGroup::list_groups(&base_path)?; | |
448 | for group in groups { | |
449 | let list = group.list_backups(&base_path)?; | |
450 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
451 | prune_info.reverse(); // delete older snapshots first | |
452 | ||
453 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
454 | store, group.backup_type(), group.backup_id())); | |
455 | ||
456 | for (info, keep) in prune_info { | |
457 | worker.log(format!( | |
458 | "{} {}/{}/{}", | |
459 | if keep { "keep" } else { "remove" }, | |
460 | group.backup_type(), group.backup_id(), | |
461 | BackupDir::backup_time_to_string(info.backup_dir.backup_time()))); | |
462 | ||
463 | if !keep { | |
c9756b40 | 464 | datastore.remove_backup_dir(&info.backup_dir, true)?; |
25829a87 DM |
465 | } |
466 | } | |
467 | } | |
468 | ||
469 | Ok(()) | |
470 | } | |
471 | ) { | |
472 | eprintln!("unable to start datastore prune on store {} - {}", store2, err); | |
473 | } | |
474 | } | |
475 | } | |
a6160cdf DM |
476 | |
477 | async fn schedule_datastore_sync_jobs() { | |
478 | ||
479 | use proxmox_backup::{ | |
a13573c2 | 480 | config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} }, |
a6160cdf DM |
481 | tools::systemd::time::{ parse_calendar_event, compute_next_event }, |
482 | }; | |
483 | ||
484 | let config = match sync::config() { | |
485 | Err(err) => { | |
486 | eprintln!("unable to read sync job config - {}", err); | |
487 | return; | |
488 | } | |
489 | Ok((config, _digest)) => config, | |
490 | }; | |
491 | ||
a6160cdf DM |
492 | for (job_id, (_, job_config)) in config.sections { |
493 | let job_config: SyncJobConfig = match serde_json::from_value(job_config) { | |
494 | Ok(c) => c, | |
495 | Err(err) => { | |
496 | eprintln!("sync job config from_value failed - {}", err); | |
497 | continue; | |
498 | } | |
499 | }; | |
500 | ||
501 | let event_str = match job_config.schedule { | |
502 | Some(ref event_str) => event_str.clone(), | |
503 | None => continue, | |
504 | }; | |
505 | ||
506 | let event = match parse_calendar_event(&event_str) { | |
507 | Ok(event) => event, | |
508 | Err(err) => { | |
509 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
510 | continue; | |
511 | } | |
512 | }; | |
513 | ||
c67b1fa7 | 514 | let worker_type = "syncjob"; |
a6160cdf | 515 | |
a13573c2 DC |
516 | let last = match jobstate::last_run_time(worker_type, &job_id) { |
517 | Ok(time) => time, | |
a6160cdf | 518 | Err(err) => { |
a13573c2 | 519 | eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err); |
a6160cdf DM |
520 | continue; |
521 | } | |
522 | }; | |
523 | ||
524 | let next = match compute_next_event(&event, last, false) { | |
15ec790a DC |
525 | Ok(Some(next)) => next, |
526 | Ok(None) => continue, | |
a6160cdf DM |
527 | Err(err) => { |
528 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
529 | continue; | |
530 | } | |
531 | }; | |
532 | ||
e693818a DC |
533 | let now = match epoch_now_u64() { |
534 | Ok(epoch_now) => epoch_now as i64, | |
a6160cdf DM |
535 | Err(err) => { |
536 | eprintln!("query system time failed - {}", err); | |
537 | continue; | |
538 | } | |
539 | }; | |
540 | if next > now { continue; } | |
541 | ||
a13573c2 | 542 | let job = match Job::new(worker_type, &job_id) { |
93bb51fe | 543 | Ok(job) => job, |
a13573c2 | 544 | Err(_) => continue, // could not get lock |
a6160cdf DM |
545 | }; |
546 | ||
e7cb4dc5 | 547 | let userid = Userid::backup_userid().clone(); |
a6160cdf | 548 | |
713b66b6 | 549 | if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) { |
a13573c2 | 550 | eprintln!("unable to start datastore sync job {} - {}", &job_id, err); |
a6160cdf DM |
551 | } |
552 | } | |
553 | } | |
eaeda365 DM |
554 | |
555 | async fn run_stat_generator() { | |
556 | ||
013fa7bb | 557 | let mut count = 0; |
eaeda365 | 558 | loop { |
013fa7bb | 559 | count += 1; |
a720894f | 560 | let save = if count >= 6 { count = 0; true } else { false }; |
013fa7bb | 561 | |
eaeda365 DM |
562 | let delay_target = Instant::now() + Duration::from_secs(10); |
563 | ||
013fa7bb | 564 | generate_host_stats(save).await; |
eaeda365 DM |
565 | |
566 | tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await; | |
013fa7bb DM |
567 | |
568 | } | |
eaeda365 DM |
569 | |
570 | } | |
571 | ||
013fa7bb | 572 | fn rrd_update_gauge(name: &str, value: f64, save: bool) { |
309ef20d | 573 | use proxmox_backup::rrd; |
013fa7bb | 574 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) { |
309ef20d DM |
575 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
576 | } | |
577 | } | |
578 | ||
013fa7bb | 579 | fn rrd_update_derive(name: &str, value: f64, save: bool) { |
309ef20d | 580 | use proxmox_backup::rrd; |
013fa7bb | 581 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) { |
309ef20d DM |
582 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
583 | } | |
584 | } | |
585 | ||
013fa7bb | 586 | async fn generate_host_stats(save: bool) { |
8f0cec26 | 587 | use proxmox::sys::linux::procfs::{ |
485841da | 588 | read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg}; |
309ef20d | 589 | use proxmox_backup::config::datastore; |
8c03041a | 590 | |
eaeda365 | 591 | |
4f951399 DM |
592 | proxmox_backup::tools::runtime::block_in_place(move || { |
593 | ||
594 | match read_proc_stat() { | |
595 | Ok(stat) => { | |
013fa7bb DM |
596 | rrd_update_gauge("host/cpu", stat.cpu, save); |
597 | rrd_update_gauge("host/iowait", stat.iowait_percent, save); | |
4f951399 DM |
598 | } |
599 | Err(err) => { | |
600 | eprintln!("read_proc_stat failed - {}", err); | |
eaeda365 DM |
601 | } |
602 | } | |
2c66a590 | 603 | |
4f951399 DM |
604 | match read_meminfo() { |
605 | Ok(meminfo) => { | |
013fa7bb DM |
606 | rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save); |
607 | rrd_update_gauge("host/memused", meminfo.memused as f64, save); | |
608 | rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save); | |
609 | rrd_update_gauge("host/swapused", meminfo.swapused as f64, save); | |
a4a3f7ca | 610 | } |
4f951399 DM |
611 | Err(err) => { |
612 | eprintln!("read_meminfo failed - {}", err); | |
a4a3f7ca DM |
613 | } |
614 | } | |
8f0cec26 | 615 | |
4f951399 DM |
616 | match read_proc_net_dev() { |
617 | Ok(netdev) => { | |
618 | use proxmox_backup::config::network::is_physical_nic; | |
619 | let mut netin = 0; | |
620 | let mut netout = 0; | |
621 | for item in netdev { | |
622 | if !is_physical_nic(&item.device) { continue; } | |
623 | netin += item.receive; | |
624 | netout += item.send; | |
625 | } | |
013fa7bb DM |
626 | rrd_update_derive("host/netin", netin as f64, save); |
627 | rrd_update_derive("host/netout", netout as f64, save); | |
8f0cec26 | 628 | } |
4f951399 DM |
629 | Err(err) => { |
630 | eprintln!("read_prox_net_dev failed - {}", err); | |
8f0cec26 DM |
631 | } |
632 | } | |
dd15c0aa | 633 | |
485841da DM |
634 | match read_loadavg() { |
635 | Ok(loadavg) => { | |
013fa7bb | 636 | rrd_update_gauge("host/loadavg", loadavg.0 as f64, save); |
485841da DM |
637 | } |
638 | Err(err) => { | |
639 | eprintln!("read_loadavg failed - {}", err); | |
640 | } | |
641 | } | |
642 | ||
8c03041a DM |
643 | let disk_manager = DiskManage::new(); |
644 | ||
013fa7bb | 645 | gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save); |
91e5bb49 | 646 | |
d0833a70 DM |
647 | match datastore::config() { |
648 | Ok((config, _)) => { | |
649 | let datastore_list: Vec<datastore::DataStoreConfig> = | |
650 | config.convert_to_typed_array("datastore").unwrap_or(Vec::new()); | |
651 | ||
652 | for config in datastore_list { | |
8c03041a | 653 | |
91e5bb49 | 654 | let rrd_prefix = format!("datastore/{}", config.name); |
8c03041a | 655 | let path = std::path::Path::new(&config.path); |
013fa7bb | 656 | gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save); |
d0833a70 DM |
657 | } |
658 | } | |
659 | Err(err) => { | |
660 | eprintln!("read datastore config failed - {}", err); | |
661 | } | |
662 | } | |
663 | ||
4f951399 | 664 | }); |
eaeda365 | 665 | } |
dd15c0aa | 666 | |
013fa7bb | 667 | fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) { |
91e5bb49 | 668 | |
934f5bb8 | 669 | match proxmox_backup::tools::disks::disk_usage(path) { |
33070956 | 670 | Ok(status) => { |
91e5bb49 | 671 | let rrd_key = format!("{}/total", rrd_prefix); |
33070956 | 672 | rrd_update_gauge(&rrd_key, status.total as f64, save); |
91e5bb49 | 673 | let rrd_key = format!("{}/used", rrd_prefix); |
33070956 | 674 | rrd_update_gauge(&rrd_key, status.used as f64, save); |
91e5bb49 DM |
675 | } |
676 | Err(err) => { | |
677 | eprintln!("read disk_usage on {:?} failed - {}", path, err); | |
678 | } | |
679 | } | |
680 | ||
934f5bb8 DM |
681 | match disk_manager.find_mounted_device(path) { |
682 | Ok(None) => {}, | |
683 | Ok(Some((fs_type, device, source))) => { | |
684 | let mut device_stat = None; | |
685 | match fs_type.as_str() { | |
686 | "zfs" => { | |
687 | if let Some(pool) = source { | |
688 | match zfs_pool_stats(&pool) { | |
689 | Ok(stat) => device_stat = stat, | |
690 | Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err), | |
91e5bb49 DM |
691 | } |
692 | } | |
934f5bb8 DM |
693 | } |
694 | _ => { | |
695 | if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) { | |
696 | match disk.read_stat() { | |
697 | Ok(stat) => device_stat = stat, | |
698 | Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err), | |
91e5bb49 DM |
699 | } |
700 | } | |
701 | } | |
91e5bb49 | 702 | } |
934f5bb8 DM |
703 | if let Some(stat) = device_stat { |
704 | let rrd_key = format!("{}/read_ios", rrd_prefix); | |
705 | rrd_update_derive(&rrd_key, stat.read_ios as f64, save); | |
706 | let rrd_key = format!("{}/read_bytes", rrd_prefix); | |
707 | rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save); | |
dd15c0aa | 708 | |
934f5bb8 DM |
709 | let rrd_key = format!("{}/write_ios", rrd_prefix); |
710 | rrd_update_derive(&rrd_key, stat.write_ios as f64, save); | |
711 | let rrd_key = format!("{}/write_bytes", rrd_prefix); | |
712 | rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save); | |
dd15c0aa | 713 | |
934f5bb8 DM |
714 | let rrd_key = format!("{}/io_ticks", rrd_prefix); |
715 | rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save); | |
8c03041a DM |
716 | } |
717 | } | |
934f5bb8 DM |
718 | Err(err) => { |
719 | eprintln!("find_mounted_device failed - {}", err); | |
720 | } | |
8c03041a | 721 | } |
8c03041a | 722 | } |