]>
Commit | Line | Data |
---|---|---|
a2479cfa | 1 | use std::sync::Arc; |
91e5bb49 | 2 | use std::path::Path; |
a2479cfa | 3 | |
f7d4e4b5 | 4 | use anyhow::{bail, format_err, Error}; |
a2479cfa WB |
5 | use futures::*; |
6 | use hyper; | |
7 | use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype}; | |
8 | ||
9ea4bce4 | 9 | use proxmox::try_block; |
a2479cfa WB |
10 | use proxmox::api::RpcEnvironmentType; |
11 | ||
a2ca7137 | 12 | use proxmox_backup::configdir; |
4a7de56e | 13 | use proxmox_backup::buildcfg; |
e3f41f21 | 14 | use proxmox_backup::server; |
e693818a | 15 | use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64}; |
e57e1cd8 | 16 | use proxmox_backup::server::{ApiConfig, rest::*}; |
d01e2420 | 17 | use proxmox_backup::auth_helpers::*; |
5c264c8d | 18 | use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats }; |
02c7a755 | 19 | |
d973aa82 WB |
20 | fn main() { |
21 | if let Err(err) = proxmox_backup::tools::runtime::main(run()) { | |
4223d9f8 DM |
22 | eprintln!("Error: {}", err); |
23 | std::process::exit(-1); | |
24 | } | |
25 | } | |
26 | ||
fda5797b | 27 | async fn run() -> Result<(), Error> { |
02c7a755 DM |
28 | if let Err(err) = syslog::init( |
29 | syslog::Facility::LOG_DAEMON, | |
30 | log::LevelFilter::Info, | |
31 | Some("proxmox-backup-proxy")) { | |
4223d9f8 | 32 | bail!("unable to inititialize syslog - {}", err); |
02c7a755 DM |
33 | } |
34 | ||
d01e2420 DM |
35 | let _ = public_auth_key(); // load with lazy_static |
36 | let _ = csrf_secret(); // load with lazy_static | |
37 | ||
02c7a755 | 38 | let mut config = ApiConfig::new( |
f9e3b110 | 39 | buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?; |
02c7a755 DM |
40 | |
41 | // add default dirs which includes jquery and bootstrap | |
42 | // my $base = '/usr/share/libpve-http-server-perl'; | |
43 | // add_dirs($self->{dirs}, '/css/' => "$base/css/"); | |
44 | // add_dirs($self->{dirs}, '/js/' => "$base/js/"); | |
45 | // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/"); | |
46 | config.add_alias("novnc", "/usr/share/novnc-pve"); | |
47 | config.add_alias("extjs", "/usr/share/javascript/extjs"); | |
48 | config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); | |
49 | config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); | |
50 | config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); | |
2d694f8f | 51 | config.add_alias("css", "/usr/share/javascript/proxmox-backup/css"); |
9c01e73c | 52 | config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); |
02c7a755 DM |
53 | |
54 | let rest_server = RestServer::new(config); | |
55 | ||
6d1f61b2 DM |
56 | //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes |
57 | let key_path = configdir!("/proxy.key"); | |
58 | let cert_path = configdir!("/proxy.pem"); | |
59 | ||
60 | let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); | |
61 | acceptor.set_private_key_file(key_path, SslFiletype::PEM) | |
62 | .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?; | |
63 | acceptor.set_certificate_chain_file(cert_path) | |
64 | .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?; | |
65 | acceptor.check_private_key().unwrap(); | |
66 | ||
67 | let acceptor = Arc::new(acceptor.build()); | |
0d176f36 | 68 | |
a690ecac WB |
69 | let server = daemon::create_daemon( |
70 | ([0,0,0,0,0,0,0,0], 8007).into(), | |
083ff3fd | 71 | |listener, ready| { |
db0cb9ce | 72 | let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener) |
a690ecac | 73 | .map_err(Error::from) |
db0cb9ce | 74 | .try_filter_map(move |(sock, _addr)| { |
fda5797b WB |
75 | let acceptor = Arc::clone(&acceptor); |
76 | async move { | |
77 | sock.set_nodelay(true).unwrap(); | |
78 | sock.set_send_buffer_size(1024*1024).unwrap(); | |
79 | sock.set_recv_buffer_size(1024*1024).unwrap(); | |
80 | Ok(tokio_openssl::accept(&acceptor, sock) | |
81 | .await | |
82 | .ok() // handshake errors aren't be fatal, so return None to filter | |
83 | ) | |
a690ecac | 84 | } |
a690ecac | 85 | }); |
db0cb9ce | 86 | let connections = proxmox_backup::tools::async_io::HyperAccept(connections); |
083ff3fd WB |
87 | |
88 | Ok(ready | |
89 | .and_then(|_| hyper::Server::builder(connections) | |
90 | .serve(rest_server) | |
91 | .with_graceful_shutdown(server::shutdown_future()) | |
92 | .map_err(Error::from) | |
93 | ) | |
94 | .map_err(|err| eprintln!("server error: {}", err)) | |
95 | .map(|_| ()) | |
a690ecac | 96 | ) |
a2ca7137 | 97 | }, |
083ff3fd | 98 | ); |
a2ca7137 | 99 | |
d98c9a7a WB |
100 | daemon::systemd_notify(daemon::SystemdNotify::Ready)?; |
101 | ||
fda5797b WB |
102 | let init_result: Result<(), Error> = try_block!({ |
103 | server::create_task_control_socket()?; | |
104 | server::server_state_init()?; | |
105 | Ok(()) | |
106 | }); | |
d607b886 | 107 | |
fda5797b WB |
108 | if let Err(err) = init_result { |
109 | bail!("unable to start daemon - {}", err); | |
110 | } | |
e3f41f21 | 111 | |
8545480a | 112 | start_task_scheduler(); |
eaeda365 | 113 | start_stat_generator(); |
8545480a | 114 | |
083ff3fd | 115 | server.await?; |
a546a8a0 WB |
116 | log::info!("server shutting down, waiting for active workers to complete"); |
117 | proxmox_backup::server::last_worker_future().await?; | |
fda5797b | 118 | log::info!("done - exit server"); |
e3f41f21 | 119 | |
4223d9f8 | 120 | Ok(()) |
02c7a755 | 121 | } |
8545480a | 122 | |
eaeda365 DM |
123 | fn start_stat_generator() { |
124 | let abort_future = server::shutdown_future(); | |
125 | let future = Box::pin(run_stat_generator()); | |
126 | let task = futures::future::select(future, abort_future); | |
127 | tokio::spawn(task.map(|_| ())); | |
128 | } | |
129 | ||
8545480a DM |
130 | fn start_task_scheduler() { |
131 | let abort_future = server::shutdown_future(); | |
132 | let future = Box::pin(run_task_scheduler()); | |
133 | let task = futures::future::select(future, abort_future); | |
134 | tokio::spawn(task.map(|_| ())); | |
135 | } | |
136 | ||
e693818a | 137 | use std::time:: {Instant, Duration}; |
8545480a DM |
138 | |
139 | fn next_minute() -> Result<Instant, Error> { | |
e693818a | 140 | let epoch_now = epoch_now()?; |
8545480a DM |
141 | let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); |
142 | Ok(Instant::now() + epoch_next - epoch_now) | |
143 | } | |
144 | ||
145 | async fn run_task_scheduler() { | |
146 | ||
147 | let mut count: usize = 0; | |
148 | ||
149 | loop { | |
150 | count += 1; | |
151 | ||
152 | let delay_target = match next_minute() { // try to run very minute | |
153 | Ok(d) => d, | |
154 | Err(err) => { | |
155 | eprintln!("task scheduler: compute next minute failed - {}", err); | |
156 | tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await; | |
157 | continue; | |
158 | } | |
159 | }; | |
160 | ||
161 | if count > 2 { // wait 1..2 minutes before starting | |
162 | match schedule_tasks().catch_unwind().await { | |
163 | Err(panic) => { | |
164 | match panic.downcast::<&str>() { | |
165 | Ok(msg) => { | |
166 | eprintln!("task scheduler panic: {}", msg); | |
167 | } | |
168 | Err(_) => { | |
169 | eprintln!("task scheduler panic - unknown type"); | |
170 | } | |
171 | } | |
172 | } | |
173 | Ok(Err(err)) => { | |
174 | eprintln!("task scheduler failed - {:?}", err); | |
175 | } | |
176 | Ok(Ok(_)) => {} | |
177 | } | |
178 | } | |
179 | ||
180 | tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await; | |
181 | } | |
182 | } | |
183 | ||
184 | async fn schedule_tasks() -> Result<(), Error> { | |
185 | ||
186 | schedule_datastore_garbage_collection().await; | |
25829a87 | 187 | schedule_datastore_prune().await; |
a6160cdf | 188 | schedule_datastore_sync_jobs().await; |
8545480a DM |
189 | |
190 | Ok(()) | |
191 | } | |
192 | ||
25829a87 | 193 | fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> { |
8545480a DM |
194 | |
195 | let list = proxmox_backup::server::read_task_list()?; | |
196 | ||
25829a87 DM |
197 | let mut last: Option<&server::UPID> = None; |
198 | ||
199 | for entry in list.iter() { | |
8545480a | 200 | if entry.upid.worker_type == worker_type { |
25829a87 | 201 | if let Some(ref id) = entry.upid.worker_id { |
8545480a | 202 | if id == worker_id { |
25829a87 DM |
203 | match last { |
204 | Some(ref upid) => { | |
205 | if upid.starttime < entry.upid.starttime { | |
206 | last = Some(&entry.upid) | |
207 | } | |
208 | } | |
209 | None => { | |
210 | last = Some(&entry.upid) | |
211 | } | |
212 | } | |
8545480a DM |
213 | } |
214 | } | |
215 | } | |
216 | } | |
217 | ||
25829a87 | 218 | Ok(last.cloned()) |
8545480a DM |
219 | } |
220 | ||
221 | ||
222 | async fn schedule_datastore_garbage_collection() { | |
223 | ||
224 | use proxmox_backup::backup::DataStore; | |
225 | use proxmox_backup::server::{UPID, WorkerTask}; | |
25829a87 | 226 | use proxmox_backup::config::datastore::{self, DataStoreConfig}; |
8545480a DM |
227 | use proxmox_backup::tools::systemd::time::{ |
228 | parse_calendar_event, compute_next_event}; | |
229 | ||
25829a87 | 230 | let config = match datastore::config() { |
8545480a DM |
231 | Err(err) => { |
232 | eprintln!("unable to read datastore config - {}", err); | |
233 | return; | |
234 | } | |
235 | Ok((config, _digest)) => config, | |
236 | }; | |
237 | ||
238 | for (store, (_, store_config)) in config.sections { | |
239 | let datastore = match DataStore::lookup_datastore(&store) { | |
240 | Ok(datastore) => datastore, | |
241 | Err(err) => { | |
242 | eprintln!("lookup_datastore failed - {}", err); | |
243 | continue; | |
244 | } | |
245 | }; | |
246 | ||
25829a87 | 247 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { |
8545480a DM |
248 | Ok(c) => c, |
249 | Err(err) => { | |
250 | eprintln!("datastore config from_value failed - {}", err); | |
251 | continue; | |
252 | } | |
253 | }; | |
254 | ||
255 | let event_str = match store_config.gc_schedule { | |
256 | Some(event_str) => event_str, | |
257 | None => continue, | |
258 | }; | |
259 | ||
260 | let event = match parse_calendar_event(&event_str) { | |
261 | Ok(event) => event, | |
262 | Err(err) => { | |
263 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
264 | continue; | |
265 | } | |
266 | }; | |
267 | ||
268 | if datastore.garbage_collection_running() { continue; } | |
269 | ||
270 | let worker_type = "garbage_collection"; | |
271 | ||
272 | let stat = datastore.last_gc_status(); | |
273 | let last = if let Some(upid_str) = stat.upid { | |
274 | match upid_str.parse::<UPID>() { | |
275 | Ok(upid) => upid.starttime, | |
276 | Err(err) => { | |
277 | eprintln!("unable to parse upid '{}' - {}", upid_str, err); | |
278 | continue; | |
279 | } | |
280 | } | |
281 | } else { | |
25829a87 DM |
282 | match lookup_last_worker(worker_type, &store) { |
283 | Ok(Some(upid)) => upid.starttime, | |
284 | Ok(None) => 0, | |
8545480a DM |
285 | Err(err) => { |
286 | eprintln!("lookup_last_job_start failed: {}", err); | |
287 | continue; | |
288 | } | |
289 | } | |
290 | }; | |
291 | ||
292 | let next = match compute_next_event(&event, last, false) { | |
293 | Ok(next) => next, | |
294 | Err(err) => { | |
295 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
296 | continue; | |
297 | } | |
298 | }; | |
e693818a DC |
299 | |
300 | let now = match epoch_now_u64() { | |
301 | Ok(epoch_now) => epoch_now as i64, | |
8545480a DM |
302 | Err(err) => { |
303 | eprintln!("query system time failed - {}", err); | |
304 | continue; | |
305 | } | |
306 | }; | |
307 | if next > now { continue; } | |
308 | ||
309 | let store2 = store.clone(); | |
310 | ||
311 | if let Err(err) = WorkerTask::new_thread( | |
312 | worker_type, | |
313 | Some(store.clone()), | |
a6160cdf | 314 | "backup@pam", |
8545480a DM |
315 | false, |
316 | move |worker| { | |
317 | worker.log(format!("starting garbage collection on store {}", store)); | |
318 | worker.log(format!("task triggered by schedule '{}'", event_str)); | |
319 | datastore.garbage_collection(&worker) | |
320 | } | |
321 | ) { | |
322 | eprintln!("unable to start garbage collection on store {} - {}", store2, err); | |
323 | } | |
324 | } | |
325 | } | |
25829a87 DM |
326 | |
327 | async fn schedule_datastore_prune() { | |
328 | ||
329 | use proxmox_backup::backup::{ | |
330 | PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info}; | |
331 | use proxmox_backup::server::{WorkerTask}; | |
332 | use proxmox_backup::config::datastore::{self, DataStoreConfig}; | |
333 | use proxmox_backup::tools::systemd::time::{ | |
334 | parse_calendar_event, compute_next_event}; | |
335 | ||
336 | let config = match datastore::config() { | |
337 | Err(err) => { | |
338 | eprintln!("unable to read datastore config - {}", err); | |
339 | return; | |
340 | } | |
341 | Ok((config, _digest)) => config, | |
342 | }; | |
343 | ||
344 | for (store, (_, store_config)) in config.sections { | |
345 | let datastore = match DataStore::lookup_datastore(&store) { | |
346 | Ok(datastore) => datastore, | |
347 | Err(err) => { | |
a6160cdf | 348 | eprintln!("lookup_datastore '{}' failed - {}", store, err); |
25829a87 DM |
349 | continue; |
350 | } | |
351 | }; | |
352 | ||
353 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { | |
354 | Ok(c) => c, | |
355 | Err(err) => { | |
a6160cdf | 356 | eprintln!("datastore '{}' config from_value failed - {}", store, err); |
25829a87 DM |
357 | continue; |
358 | } | |
359 | }; | |
360 | ||
361 | let event_str = match store_config.prune_schedule { | |
362 | Some(event_str) => event_str, | |
363 | None => continue, | |
364 | }; | |
365 | ||
366 | let prune_options = PruneOptions { | |
367 | keep_last: store_config.keep_last, | |
368 | keep_hourly: store_config.keep_hourly, | |
369 | keep_daily: store_config.keep_daily, | |
370 | keep_weekly: store_config.keep_weekly, | |
371 | keep_monthly: store_config.keep_monthly, | |
372 | keep_yearly: store_config.keep_yearly, | |
373 | }; | |
374 | ||
375 | if !prune_options.keeps_something() { // no prune settings - keep all | |
376 | continue; | |
377 | } | |
378 | ||
379 | let event = match parse_calendar_event(&event_str) { | |
380 | Ok(event) => event, | |
381 | Err(err) => { | |
382 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
383 | continue; | |
384 | } | |
385 | }; | |
386 | ||
25829a87 DM |
387 | let worker_type = "prune"; |
388 | ||
389 | let last = match lookup_last_worker(worker_type, &store) { | |
a8d7033c DM |
390 | Ok(Some(upid)) => { |
391 | if proxmox_backup::server::worker_is_active_local(&upid) { | |
392 | continue; | |
393 | } | |
394 | upid.starttime | |
395 | } | |
25829a87 DM |
396 | Ok(None) => 0, |
397 | Err(err) => { | |
398 | eprintln!("lookup_last_job_start failed: {}", err); | |
399 | continue; | |
400 | } | |
401 | }; | |
402 | ||
403 | let next = match compute_next_event(&event, last, false) { | |
404 | Ok(next) => next, | |
405 | Err(err) => { | |
406 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
407 | continue; | |
408 | } | |
409 | }; | |
410 | ||
e693818a DC |
411 | let now = match epoch_now_u64() { |
412 | Ok(epoch_now) => epoch_now as i64, | |
25829a87 DM |
413 | Err(err) => { |
414 | eprintln!("query system time failed - {}", err); | |
415 | continue; | |
416 | } | |
417 | }; | |
418 | if next > now { continue; } | |
419 | ||
420 | let store2 = store.clone(); | |
421 | ||
422 | if let Err(err) = WorkerTask::new_thread( | |
423 | worker_type, | |
424 | Some(store.clone()), | |
a6160cdf | 425 | "backup@pam", |
25829a87 DM |
426 | false, |
427 | move |worker| { | |
428 | worker.log(format!("Starting datastore prune on store \"{}\"", store)); | |
a6160cdf | 429 | worker.log(format!("task triggered by schedule '{}'", event_str)); |
25829a87 DM |
430 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); |
431 | ||
432 | let base_path = datastore.base_path(); | |
433 | ||
434 | let groups = BackupGroup::list_groups(&base_path)?; | |
435 | for group in groups { | |
436 | let list = group.list_backups(&base_path)?; | |
437 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
438 | prune_info.reverse(); // delete older snapshots first | |
439 | ||
440 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
441 | store, group.backup_type(), group.backup_id())); | |
442 | ||
443 | for (info, keep) in prune_info { | |
444 | worker.log(format!( | |
445 | "{} {}/{}/{}", | |
446 | if keep { "keep" } else { "remove" }, | |
447 | group.backup_type(), group.backup_id(), | |
448 | BackupDir::backup_time_to_string(info.backup_dir.backup_time()))); | |
449 | ||
450 | if !keep { | |
451 | datastore.remove_backup_dir(&info.backup_dir)?; | |
452 | } | |
453 | } | |
454 | } | |
455 | ||
456 | Ok(()) | |
457 | } | |
458 | ) { | |
459 | eprintln!("unable to start datastore prune on store {} - {}", store2, err); | |
460 | } | |
461 | } | |
462 | } | |
a6160cdf DM |
463 | |
464 | async fn schedule_datastore_sync_jobs() { | |
465 | ||
466 | use proxmox_backup::{ | |
467 | backup::DataStore, | |
07ad6470 | 468 | client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store }, |
a6160cdf DM |
469 | server::{ WorkerTask }, |
470 | config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} }, | |
471 | tools::systemd::time::{ parse_calendar_event, compute_next_event }, | |
472 | }; | |
473 | ||
474 | let config = match sync::config() { | |
475 | Err(err) => { | |
476 | eprintln!("unable to read sync job config - {}", err); | |
477 | return; | |
478 | } | |
479 | Ok((config, _digest)) => config, | |
480 | }; | |
481 | ||
482 | let remote_config = match remote::config() { | |
483 | Err(err) => { | |
484 | eprintln!("unable to read remote config - {}", err); | |
485 | return; | |
486 | } | |
487 | Ok((config, _digest)) => config, | |
488 | }; | |
489 | ||
490 | for (job_id, (_, job_config)) in config.sections { | |
491 | let job_config: SyncJobConfig = match serde_json::from_value(job_config) { | |
492 | Ok(c) => c, | |
493 | Err(err) => { | |
494 | eprintln!("sync job config from_value failed - {}", err); | |
495 | continue; | |
496 | } | |
497 | }; | |
498 | ||
499 | let event_str = match job_config.schedule { | |
500 | Some(ref event_str) => event_str.clone(), | |
501 | None => continue, | |
502 | }; | |
503 | ||
504 | let event = match parse_calendar_event(&event_str) { | |
505 | Ok(event) => event, | |
506 | Err(err) => { | |
507 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
508 | continue; | |
509 | } | |
510 | }; | |
511 | ||
c67b1fa7 | 512 | let worker_type = "syncjob"; |
a6160cdf | 513 | |
dc58194e | 514 | let last = match lookup_last_worker(worker_type, &job_id) { |
04ad7bc4 DM |
515 | Ok(Some(upid)) => { |
516 | if proxmox_backup::server::worker_is_active_local(&upid) { | |
517 | continue; | |
518 | } | |
519 | upid.starttime | |
520 | }, | |
a6160cdf DM |
521 | Ok(None) => 0, |
522 | Err(err) => { | |
523 | eprintln!("lookup_last_job_start failed: {}", err); | |
524 | continue; | |
525 | } | |
526 | }; | |
527 | ||
528 | let next = match compute_next_event(&event, last, false) { | |
529 | Ok(next) => next, | |
530 | Err(err) => { | |
531 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
532 | continue; | |
533 | } | |
534 | }; | |
535 | ||
e693818a DC |
536 | let now = match epoch_now_u64() { |
537 | Ok(epoch_now) => epoch_now as i64, | |
a6160cdf DM |
538 | Err(err) => { |
539 | eprintln!("query system time failed - {}", err); | |
540 | continue; | |
541 | } | |
542 | }; | |
543 | if next > now { continue; } | |
544 | ||
545 | ||
546 | let job_id2 = job_id.clone(); | |
547 | ||
548 | let tgt_store = match DataStore::lookup_datastore(&job_config.store) { | |
549 | Ok(datastore) => datastore, | |
550 | Err(err) => { | |
551 | eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err); | |
552 | continue; | |
553 | } | |
554 | }; | |
555 | ||
556 | let remote: Remote = match remote_config.lookup("remote", &job_config.remote) { | |
557 | Ok(remote) => remote, | |
558 | Err(err) => { | |
559 | eprintln!("remote_config lookup failed: {}", err); | |
560 | continue; | |
561 | } | |
562 | }; | |
563 | ||
564 | let username = String::from("backup@pam"); | |
565 | ||
566 | let delete = job_config.remove_vanished.unwrap_or(true); | |
567 | ||
568 | if let Err(err) = WorkerTask::spawn( | |
569 | worker_type, | |
f5056656 | 570 | Some(job_id.clone()), |
a6160cdf DM |
571 | &username.clone(), |
572 | false, | |
573 | move |worker| async move { | |
574 | worker.log(format!("Starting datastore sync job '{}'", job_id)); | |
575 | worker.log(format!("task triggered by schedule '{}'", event_str)); | |
576 | worker.log(format!("Sync datastore '{}' from '{}/{}'", | |
577 | job_config.store, job_config.remote, job_config.remote_store)); | |
578 | ||
579 | let options = HttpClientOptions::new() | |
580 | .password(Some(remote.password.clone())) | |
581 | .fingerprint(remote.fingerprint.clone()); | |
582 | ||
583 | let client = HttpClient::new(&remote.host, &remote.userid, options)?; | |
584 | let _auth_info = client.login() // make sure we can auth | |
585 | .await | |
586 | .map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?; | |
587 | ||
588 | let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store); | |
589 | ||
07ad6470 | 590 | pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?; |
a6160cdf DM |
591 | |
592 | Ok(()) | |
593 | } | |
594 | ) { | |
595 | eprintln!("unable to start datastore sync job {} - {}", job_id2, err); | |
596 | } | |
597 | } | |
598 | } | |
eaeda365 DM |
599 | |
600 | async fn run_stat_generator() { | |
601 | ||
013fa7bb | 602 | let mut count = 0; |
eaeda365 | 603 | loop { |
013fa7bb | 604 | count += 1; |
a720894f | 605 | let save = if count >= 6 { count = 0; true } else { false }; |
013fa7bb | 606 | |
eaeda365 DM |
607 | let delay_target = Instant::now() + Duration::from_secs(10); |
608 | ||
013fa7bb | 609 | generate_host_stats(save).await; |
eaeda365 DM |
610 | |
611 | tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await; | |
013fa7bb DM |
612 | |
613 | } | |
eaeda365 DM |
614 | |
615 | } | |
616 | ||
013fa7bb | 617 | fn rrd_update_gauge(name: &str, value: f64, save: bool) { |
309ef20d | 618 | use proxmox_backup::rrd; |
013fa7bb | 619 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) { |
309ef20d DM |
620 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
621 | } | |
622 | } | |
623 | ||
013fa7bb | 624 | fn rrd_update_derive(name: &str, value: f64, save: bool) { |
309ef20d | 625 | use proxmox_backup::rrd; |
013fa7bb | 626 | if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) { |
309ef20d DM |
627 | eprintln!("rrd::update_value '{}' failed - {}", name, err); |
628 | } | |
629 | } | |
630 | ||
013fa7bb | 631 | async fn generate_host_stats(save: bool) { |
8f0cec26 | 632 | use proxmox::sys::linux::procfs::{ |
485841da | 633 | read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg}; |
309ef20d | 634 | use proxmox_backup::config::datastore; |
8c03041a | 635 | |
eaeda365 | 636 | |
4f951399 DM |
637 | proxmox_backup::tools::runtime::block_in_place(move || { |
638 | ||
639 | match read_proc_stat() { | |
640 | Ok(stat) => { | |
013fa7bb DM |
641 | rrd_update_gauge("host/cpu", stat.cpu, save); |
642 | rrd_update_gauge("host/iowait", stat.iowait_percent, save); | |
4f951399 DM |
643 | } |
644 | Err(err) => { | |
645 | eprintln!("read_proc_stat failed - {}", err); | |
eaeda365 DM |
646 | } |
647 | } | |
2c66a590 | 648 | |
4f951399 DM |
649 | match read_meminfo() { |
650 | Ok(meminfo) => { | |
013fa7bb DM |
651 | rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save); |
652 | rrd_update_gauge("host/memused", meminfo.memused as f64, save); | |
653 | rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save); | |
654 | rrd_update_gauge("host/swapused", meminfo.swapused as f64, save); | |
a4a3f7ca | 655 | } |
4f951399 DM |
656 | Err(err) => { |
657 | eprintln!("read_meminfo failed - {}", err); | |
a4a3f7ca DM |
658 | } |
659 | } | |
8f0cec26 | 660 | |
4f951399 DM |
661 | match read_proc_net_dev() { |
662 | Ok(netdev) => { | |
663 | use proxmox_backup::config::network::is_physical_nic; | |
664 | let mut netin = 0; | |
665 | let mut netout = 0; | |
666 | for item in netdev { | |
667 | if !is_physical_nic(&item.device) { continue; } | |
668 | netin += item.receive; | |
669 | netout += item.send; | |
670 | } | |
013fa7bb DM |
671 | rrd_update_derive("host/netin", netin as f64, save); |
672 | rrd_update_derive("host/netout", netout as f64, save); | |
8f0cec26 | 673 | } |
4f951399 DM |
674 | Err(err) => { |
675 | eprintln!("read_prox_net_dev failed - {}", err); | |
8f0cec26 DM |
676 | } |
677 | } | |
dd15c0aa | 678 | |
485841da DM |
679 | match read_loadavg() { |
680 | Ok(loadavg) => { | |
013fa7bb | 681 | rrd_update_gauge("host/loadavg", loadavg.0 as f64, save); |
485841da DM |
682 | } |
683 | Err(err) => { | |
684 | eprintln!("read_loadavg failed - {}", err); | |
685 | } | |
686 | } | |
687 | ||
8c03041a DM |
688 | let disk_manager = DiskManage::new(); |
689 | ||
013fa7bb | 690 | gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save); |
91e5bb49 | 691 | |
d0833a70 DM |
692 | match datastore::config() { |
693 | Ok((config, _)) => { | |
694 | let datastore_list: Vec<datastore::DataStoreConfig> = | |
695 | config.convert_to_typed_array("datastore").unwrap_or(Vec::new()); | |
696 | ||
697 | for config in datastore_list { | |
8c03041a | 698 | |
91e5bb49 | 699 | let rrd_prefix = format!("datastore/{}", config.name); |
8c03041a | 700 | let path = std::path::Path::new(&config.path); |
013fa7bb | 701 | gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save); |
d0833a70 DM |
702 | } |
703 | } | |
704 | Err(err) => { | |
705 | eprintln!("read datastore config failed - {}", err); | |
706 | } | |
707 | } | |
708 | ||
4f951399 | 709 | }); |
eaeda365 | 710 | } |
dd15c0aa | 711 | |
013fa7bb | 712 | fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) { |
91e5bb49 | 713 | |
934f5bb8 | 714 | match proxmox_backup::tools::disks::disk_usage(path) { |
33070956 | 715 | Ok(status) => { |
91e5bb49 | 716 | let rrd_key = format!("{}/total", rrd_prefix); |
33070956 | 717 | rrd_update_gauge(&rrd_key, status.total as f64, save); |
91e5bb49 | 718 | let rrd_key = format!("{}/used", rrd_prefix); |
33070956 | 719 | rrd_update_gauge(&rrd_key, status.used as f64, save); |
91e5bb49 DM |
720 | } |
721 | Err(err) => { | |
722 | eprintln!("read disk_usage on {:?} failed - {}", path, err); | |
723 | } | |
724 | } | |
725 | ||
934f5bb8 DM |
726 | match disk_manager.find_mounted_device(path) { |
727 | Ok(None) => {}, | |
728 | Ok(Some((fs_type, device, source))) => { | |
729 | let mut device_stat = None; | |
730 | match fs_type.as_str() { | |
731 | "zfs" => { | |
732 | if let Some(pool) = source { | |
733 | match zfs_pool_stats(&pool) { | |
734 | Ok(stat) => device_stat = stat, | |
735 | Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err), | |
91e5bb49 DM |
736 | } |
737 | } | |
934f5bb8 DM |
738 | } |
739 | _ => { | |
740 | if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) { | |
741 | match disk.read_stat() { | |
742 | Ok(stat) => device_stat = stat, | |
743 | Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err), | |
91e5bb49 DM |
744 | } |
745 | } | |
746 | } | |
91e5bb49 | 747 | } |
934f5bb8 DM |
748 | if let Some(stat) = device_stat { |
749 | let rrd_key = format!("{}/read_ios", rrd_prefix); | |
750 | rrd_update_derive(&rrd_key, stat.read_ios as f64, save); | |
751 | let rrd_key = format!("{}/read_bytes", rrd_prefix); | |
752 | rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save); | |
dd15c0aa | 753 | |
934f5bb8 DM |
754 | let rrd_key = format!("{}/write_ios", rrd_prefix); |
755 | rrd_update_derive(&rrd_key, stat.write_ios as f64, save); | |
756 | let rrd_key = format!("{}/write_bytes", rrd_prefix); | |
757 | rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save); | |
dd15c0aa | 758 | |
934f5bb8 DM |
759 | let rrd_key = format!("{}/io_ticks", rrd_prefix); |
760 | rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save); | |
8c03041a DM |
761 | } |
762 | } | |
934f5bb8 DM |
763 | Err(err) => { |
764 | eprintln!("find_mounted_device failed - {}", err); | |
765 | } | |
8c03041a | 766 | } |
8c03041a | 767 | } |