]>
Commit | Line | Data |
---|---|---|
a2479cfa WB |
1 | use std::sync::Arc; |
2 | ||
f7d4e4b5 | 3 | use anyhow::{bail, format_err, Error}; |
a2479cfa WB |
4 | use futures::*; |
5 | use hyper; | |
6 | use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype}; | |
7 | ||
9ea4bce4 | 8 | use proxmox::try_block; |
a2479cfa WB |
9 | use proxmox::api::RpcEnvironmentType; |
10 | ||
a2ca7137 | 11 | use proxmox_backup::configdir; |
4a7de56e | 12 | use proxmox_backup::buildcfg; |
e3f41f21 | 13 | use proxmox_backup::server; |
a690ecac | 14 | use proxmox_backup::tools::daemon; |
e57e1cd8 | 15 | use proxmox_backup::server::{ApiConfig, rest::*}; |
d01e2420 | 16 | use proxmox_backup::auth_helpers::*; |
02c7a755 | 17 | |
d973aa82 WB |
18 | fn main() { |
19 | if let Err(err) = proxmox_backup::tools::runtime::main(run()) { | |
4223d9f8 DM |
20 | eprintln!("Error: {}", err); |
21 | std::process::exit(-1); | |
22 | } | |
23 | } | |
24 | ||
fda5797b | 25 | async fn run() -> Result<(), Error> { |
02c7a755 DM |
26 | if let Err(err) = syslog::init( |
27 | syslog::Facility::LOG_DAEMON, | |
28 | log::LevelFilter::Info, | |
29 | Some("proxmox-backup-proxy")) { | |
4223d9f8 | 30 | bail!("unable to inititialize syslog - {}", err); |
02c7a755 DM |
31 | } |
32 | ||
d01e2420 DM |
33 | let _ = public_auth_key(); // load with lazy_static |
34 | let _ = csrf_secret(); // load with lazy_static | |
35 | ||
02c7a755 | 36 | let mut config = ApiConfig::new( |
f9e3b110 | 37 | buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?; |
02c7a755 DM |
38 | |
39 | // add default dirs which includes jquery and bootstrap | |
40 | // my $base = '/usr/share/libpve-http-server-perl'; | |
41 | // add_dirs($self->{dirs}, '/css/' => "$base/css/"); | |
42 | // add_dirs($self->{dirs}, '/js/' => "$base/js/"); | |
43 | // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/"); | |
44 | config.add_alias("novnc", "/usr/share/novnc-pve"); | |
45 | config.add_alias("extjs", "/usr/share/javascript/extjs"); | |
46 | config.add_alias("fontawesome", "/usr/share/fonts-font-awesome"); | |
47 | config.add_alias("xtermjs", "/usr/share/pve-xtermjs"); | |
48 | config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit"); | |
2d694f8f | 49 | config.add_alias("css", "/usr/share/javascript/proxmox-backup/css"); |
9c01e73c | 50 | config.add_alias("docs", "/usr/share/doc/proxmox-backup/html"); |
02c7a755 DM |
51 | |
52 | let rest_server = RestServer::new(config); | |
53 | ||
6d1f61b2 DM |
54 | //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes |
55 | let key_path = configdir!("/proxy.key"); | |
56 | let cert_path = configdir!("/proxy.pem"); | |
57 | ||
58 | let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); | |
59 | acceptor.set_private_key_file(key_path, SslFiletype::PEM) | |
60 | .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?; | |
61 | acceptor.set_certificate_chain_file(cert_path) | |
62 | .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?; | |
63 | acceptor.check_private_key().unwrap(); | |
64 | ||
65 | let acceptor = Arc::new(acceptor.build()); | |
0d176f36 | 66 | |
a690ecac WB |
67 | let server = daemon::create_daemon( |
68 | ([0,0,0,0,0,0,0,0], 8007).into(), | |
083ff3fd | 69 | |listener, ready| { |
db0cb9ce | 70 | let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener) |
a690ecac | 71 | .map_err(Error::from) |
db0cb9ce | 72 | .try_filter_map(move |(sock, _addr)| { |
fda5797b WB |
73 | let acceptor = Arc::clone(&acceptor); |
74 | async move { | |
75 | sock.set_nodelay(true).unwrap(); | |
76 | sock.set_send_buffer_size(1024*1024).unwrap(); | |
77 | sock.set_recv_buffer_size(1024*1024).unwrap(); | |
78 | Ok(tokio_openssl::accept(&acceptor, sock) | |
79 | .await | |
80 | .ok() // handshake errors aren't be fatal, so return None to filter | |
81 | ) | |
a690ecac | 82 | } |
a690ecac | 83 | }); |
db0cb9ce | 84 | let connections = proxmox_backup::tools::async_io::HyperAccept(connections); |
083ff3fd WB |
85 | |
86 | Ok(ready | |
87 | .and_then(|_| hyper::Server::builder(connections) | |
88 | .serve(rest_server) | |
89 | .with_graceful_shutdown(server::shutdown_future()) | |
90 | .map_err(Error::from) | |
91 | ) | |
92 | .map_err(|err| eprintln!("server error: {}", err)) | |
93 | .map(|_| ()) | |
a690ecac | 94 | ) |
a2ca7137 | 95 | }, |
083ff3fd | 96 | ); |
a2ca7137 | 97 | |
d98c9a7a WB |
98 | daemon::systemd_notify(daemon::SystemdNotify::Ready)?; |
99 | ||
fda5797b WB |
100 | let init_result: Result<(), Error> = try_block!({ |
101 | server::create_task_control_socket()?; | |
102 | server::server_state_init()?; | |
103 | Ok(()) | |
104 | }); | |
d607b886 | 105 | |
fda5797b WB |
106 | if let Err(err) = init_result { |
107 | bail!("unable to start daemon - {}", err); | |
108 | } | |
e3f41f21 | 109 | |
8545480a | 110 | start_task_scheduler(); |
eaeda365 | 111 | start_stat_generator(); |
8545480a | 112 | |
083ff3fd | 113 | server.await?; |
a546a8a0 WB |
114 | log::info!("server shutting down, waiting for active workers to complete"); |
115 | proxmox_backup::server::last_worker_future().await?; | |
fda5797b | 116 | log::info!("done - exit server"); |
e3f41f21 | 117 | |
4223d9f8 | 118 | Ok(()) |
02c7a755 | 119 | } |
8545480a | 120 | |
eaeda365 DM |
121 | fn start_stat_generator() { |
122 | let abort_future = server::shutdown_future(); | |
123 | let future = Box::pin(run_stat_generator()); | |
124 | let task = futures::future::select(future, abort_future); | |
125 | tokio::spawn(task.map(|_| ())); | |
126 | } | |
127 | ||
8545480a DM |
128 | fn start_task_scheduler() { |
129 | let abort_future = server::shutdown_future(); | |
130 | let future = Box::pin(run_task_scheduler()); | |
131 | let task = futures::future::select(future, abort_future); | |
132 | tokio::spawn(task.map(|_| ())); | |
133 | } | |
134 | ||
135 | use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH}; | |
136 | ||
137 | fn next_minute() -> Result<Instant, Error> { | |
138 | let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?; | |
139 | let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60); | |
140 | Ok(Instant::now() + epoch_next - epoch_now) | |
141 | } | |
142 | ||
143 | async fn run_task_scheduler() { | |
144 | ||
145 | let mut count: usize = 0; | |
146 | ||
147 | loop { | |
148 | count += 1; | |
149 | ||
150 | let delay_target = match next_minute() { // try to run very minute | |
151 | Ok(d) => d, | |
152 | Err(err) => { | |
153 | eprintln!("task scheduler: compute next minute failed - {}", err); | |
154 | tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await; | |
155 | continue; | |
156 | } | |
157 | }; | |
158 | ||
159 | if count > 2 { // wait 1..2 minutes before starting | |
160 | match schedule_tasks().catch_unwind().await { | |
161 | Err(panic) => { | |
162 | match panic.downcast::<&str>() { | |
163 | Ok(msg) => { | |
164 | eprintln!("task scheduler panic: {}", msg); | |
165 | } | |
166 | Err(_) => { | |
167 | eprintln!("task scheduler panic - unknown type"); | |
168 | } | |
169 | } | |
170 | } | |
171 | Ok(Err(err)) => { | |
172 | eprintln!("task scheduler failed - {:?}", err); | |
173 | } | |
174 | Ok(Ok(_)) => {} | |
175 | } | |
176 | } | |
177 | ||
178 | tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await; | |
179 | } | |
180 | } | |
181 | ||
182 | async fn schedule_tasks() -> Result<(), Error> { | |
183 | ||
184 | schedule_datastore_garbage_collection().await; | |
25829a87 | 185 | schedule_datastore_prune().await; |
a6160cdf | 186 | schedule_datastore_sync_jobs().await; |
8545480a DM |
187 | |
188 | Ok(()) | |
189 | } | |
190 | ||
25829a87 | 191 | fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> { |
8545480a DM |
192 | |
193 | let list = proxmox_backup::server::read_task_list()?; | |
194 | ||
25829a87 DM |
195 | let mut last: Option<&server::UPID> = None; |
196 | ||
197 | for entry in list.iter() { | |
8545480a | 198 | if entry.upid.worker_type == worker_type { |
25829a87 | 199 | if let Some(ref id) = entry.upid.worker_id { |
8545480a | 200 | if id == worker_id { |
25829a87 DM |
201 | match last { |
202 | Some(ref upid) => { | |
203 | if upid.starttime < entry.upid.starttime { | |
204 | last = Some(&entry.upid) | |
205 | } | |
206 | } | |
207 | None => { | |
208 | last = Some(&entry.upid) | |
209 | } | |
210 | } | |
8545480a DM |
211 | } |
212 | } | |
213 | } | |
214 | } | |
215 | ||
25829a87 | 216 | Ok(last.cloned()) |
8545480a DM |
217 | } |
218 | ||
219 | ||
220 | async fn schedule_datastore_garbage_collection() { | |
221 | ||
222 | use proxmox_backup::backup::DataStore; | |
223 | use proxmox_backup::server::{UPID, WorkerTask}; | |
25829a87 | 224 | use proxmox_backup::config::datastore::{self, DataStoreConfig}; |
8545480a DM |
225 | use proxmox_backup::tools::systemd::time::{ |
226 | parse_calendar_event, compute_next_event}; | |
227 | ||
25829a87 | 228 | let config = match datastore::config() { |
8545480a DM |
229 | Err(err) => { |
230 | eprintln!("unable to read datastore config - {}", err); | |
231 | return; | |
232 | } | |
233 | Ok((config, _digest)) => config, | |
234 | }; | |
235 | ||
236 | for (store, (_, store_config)) in config.sections { | |
237 | let datastore = match DataStore::lookup_datastore(&store) { | |
238 | Ok(datastore) => datastore, | |
239 | Err(err) => { | |
240 | eprintln!("lookup_datastore failed - {}", err); | |
241 | continue; | |
242 | } | |
243 | }; | |
244 | ||
25829a87 | 245 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { |
8545480a DM |
246 | Ok(c) => c, |
247 | Err(err) => { | |
248 | eprintln!("datastore config from_value failed - {}", err); | |
249 | continue; | |
250 | } | |
251 | }; | |
252 | ||
253 | let event_str = match store_config.gc_schedule { | |
254 | Some(event_str) => event_str, | |
255 | None => continue, | |
256 | }; | |
257 | ||
258 | let event = match parse_calendar_event(&event_str) { | |
259 | Ok(event) => event, | |
260 | Err(err) => { | |
261 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
262 | continue; | |
263 | } | |
264 | }; | |
265 | ||
266 | if datastore.garbage_collection_running() { continue; } | |
267 | ||
268 | let worker_type = "garbage_collection"; | |
269 | ||
270 | let stat = datastore.last_gc_status(); | |
271 | let last = if let Some(upid_str) = stat.upid { | |
272 | match upid_str.parse::<UPID>() { | |
273 | Ok(upid) => upid.starttime, | |
274 | Err(err) => { | |
275 | eprintln!("unable to parse upid '{}' - {}", upid_str, err); | |
276 | continue; | |
277 | } | |
278 | } | |
279 | } else { | |
25829a87 DM |
280 | match lookup_last_worker(worker_type, &store) { |
281 | Ok(Some(upid)) => upid.starttime, | |
282 | Ok(None) => 0, | |
8545480a DM |
283 | Err(err) => { |
284 | eprintln!("lookup_last_job_start failed: {}", err); | |
285 | continue; | |
286 | } | |
287 | } | |
288 | }; | |
289 | ||
290 | let next = match compute_next_event(&event, last, false) { | |
291 | Ok(next) => next, | |
292 | Err(err) => { | |
293 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
294 | continue; | |
295 | } | |
296 | }; | |
297 | let now = match SystemTime::now().duration_since(UNIX_EPOCH) { | |
298 | Ok(epoch_now) => epoch_now.as_secs() as i64, | |
299 | Err(err) => { | |
300 | eprintln!("query system time failed - {}", err); | |
301 | continue; | |
302 | } | |
303 | }; | |
304 | if next > now { continue; } | |
305 | ||
306 | let store2 = store.clone(); | |
307 | ||
308 | if let Err(err) = WorkerTask::new_thread( | |
309 | worker_type, | |
310 | Some(store.clone()), | |
a6160cdf | 311 | "backup@pam", |
8545480a DM |
312 | false, |
313 | move |worker| { | |
314 | worker.log(format!("starting garbage collection on store {}", store)); | |
315 | worker.log(format!("task triggered by schedule '{}'", event_str)); | |
316 | datastore.garbage_collection(&worker) | |
317 | } | |
318 | ) { | |
319 | eprintln!("unable to start garbage collection on store {} - {}", store2, err); | |
320 | } | |
321 | } | |
322 | } | |
25829a87 DM |
323 | |
324 | async fn schedule_datastore_prune() { | |
325 | ||
326 | use proxmox_backup::backup::{ | |
327 | PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info}; | |
328 | use proxmox_backup::server::{WorkerTask}; | |
329 | use proxmox_backup::config::datastore::{self, DataStoreConfig}; | |
330 | use proxmox_backup::tools::systemd::time::{ | |
331 | parse_calendar_event, compute_next_event}; | |
332 | ||
333 | let config = match datastore::config() { | |
334 | Err(err) => { | |
335 | eprintln!("unable to read datastore config - {}", err); | |
336 | return; | |
337 | } | |
338 | Ok((config, _digest)) => config, | |
339 | }; | |
340 | ||
341 | for (store, (_, store_config)) in config.sections { | |
342 | let datastore = match DataStore::lookup_datastore(&store) { | |
343 | Ok(datastore) => datastore, | |
344 | Err(err) => { | |
a6160cdf | 345 | eprintln!("lookup_datastore '{}' failed - {}", store, err); |
25829a87 DM |
346 | continue; |
347 | } | |
348 | }; | |
349 | ||
350 | let store_config: DataStoreConfig = match serde_json::from_value(store_config) { | |
351 | Ok(c) => c, | |
352 | Err(err) => { | |
a6160cdf | 353 | eprintln!("datastore '{}' config from_value failed - {}", store, err); |
25829a87 DM |
354 | continue; |
355 | } | |
356 | }; | |
357 | ||
358 | let event_str = match store_config.prune_schedule { | |
359 | Some(event_str) => event_str, | |
360 | None => continue, | |
361 | }; | |
362 | ||
363 | let prune_options = PruneOptions { | |
364 | keep_last: store_config.keep_last, | |
365 | keep_hourly: store_config.keep_hourly, | |
366 | keep_daily: store_config.keep_daily, | |
367 | keep_weekly: store_config.keep_weekly, | |
368 | keep_monthly: store_config.keep_monthly, | |
369 | keep_yearly: store_config.keep_yearly, | |
370 | }; | |
371 | ||
372 | if !prune_options.keeps_something() { // no prune settings - keep all | |
373 | continue; | |
374 | } | |
375 | ||
376 | let event = match parse_calendar_event(&event_str) { | |
377 | Ok(event) => event, | |
378 | Err(err) => { | |
379 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
380 | continue; | |
381 | } | |
382 | }; | |
383 | ||
384 | //fixme: if last_prune_job_stzill_running { continue; } | |
385 | ||
386 | let worker_type = "prune"; | |
387 | ||
388 | let last = match lookup_last_worker(worker_type, &store) { | |
389 | Ok(Some(upid)) => upid.starttime, | |
390 | Ok(None) => 0, | |
391 | Err(err) => { | |
392 | eprintln!("lookup_last_job_start failed: {}", err); | |
393 | continue; | |
394 | } | |
395 | }; | |
396 | ||
397 | let next = match compute_next_event(&event, last, false) { | |
398 | Ok(next) => next, | |
399 | Err(err) => { | |
400 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
401 | continue; | |
402 | } | |
403 | }; | |
404 | ||
405 | let now = match SystemTime::now().duration_since(UNIX_EPOCH) { | |
406 | Ok(epoch_now) => epoch_now.as_secs() as i64, | |
407 | Err(err) => { | |
408 | eprintln!("query system time failed - {}", err); | |
409 | continue; | |
410 | } | |
411 | }; | |
412 | if next > now { continue; } | |
413 | ||
414 | let store2 = store.clone(); | |
415 | ||
416 | if let Err(err) = WorkerTask::new_thread( | |
417 | worker_type, | |
418 | Some(store.clone()), | |
a6160cdf | 419 | "backup@pam", |
25829a87 DM |
420 | false, |
421 | move |worker| { | |
422 | worker.log(format!("Starting datastore prune on store \"{}\"", store)); | |
a6160cdf | 423 | worker.log(format!("task triggered by schedule '{}'", event_str)); |
25829a87 DM |
424 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); |
425 | ||
426 | let base_path = datastore.base_path(); | |
427 | ||
428 | let groups = BackupGroup::list_groups(&base_path)?; | |
429 | for group in groups { | |
430 | let list = group.list_backups(&base_path)?; | |
431 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
432 | prune_info.reverse(); // delete older snapshots first | |
433 | ||
434 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
435 | store, group.backup_type(), group.backup_id())); | |
436 | ||
437 | for (info, keep) in prune_info { | |
438 | worker.log(format!( | |
439 | "{} {}/{}/{}", | |
440 | if keep { "keep" } else { "remove" }, | |
441 | group.backup_type(), group.backup_id(), | |
442 | BackupDir::backup_time_to_string(info.backup_dir.backup_time()))); | |
443 | ||
444 | if !keep { | |
445 | datastore.remove_backup_dir(&info.backup_dir)?; | |
446 | } | |
447 | } | |
448 | } | |
449 | ||
450 | Ok(()) | |
451 | } | |
452 | ) { | |
453 | eprintln!("unable to start datastore prune on store {} - {}", store2, err); | |
454 | } | |
455 | } | |
456 | } | |
a6160cdf DM |
457 | |
458 | async fn schedule_datastore_sync_jobs() { | |
459 | ||
460 | use proxmox_backup::{ | |
461 | backup::DataStore, | |
07ad6470 | 462 | client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store }, |
a6160cdf DM |
463 | server::{ WorkerTask }, |
464 | config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} }, | |
465 | tools::systemd::time::{ parse_calendar_event, compute_next_event }, | |
466 | }; | |
467 | ||
468 | let config = match sync::config() { | |
469 | Err(err) => { | |
470 | eprintln!("unable to read sync job config - {}", err); | |
471 | return; | |
472 | } | |
473 | Ok((config, _digest)) => config, | |
474 | }; | |
475 | ||
476 | let remote_config = match remote::config() { | |
477 | Err(err) => { | |
478 | eprintln!("unable to read remote config - {}", err); | |
479 | return; | |
480 | } | |
481 | Ok((config, _digest)) => config, | |
482 | }; | |
483 | ||
484 | for (job_id, (_, job_config)) in config.sections { | |
485 | let job_config: SyncJobConfig = match serde_json::from_value(job_config) { | |
486 | Ok(c) => c, | |
487 | Err(err) => { | |
488 | eprintln!("sync job config from_value failed - {}", err); | |
489 | continue; | |
490 | } | |
491 | }; | |
492 | ||
493 | let event_str = match job_config.schedule { | |
494 | Some(ref event_str) => event_str.clone(), | |
495 | None => continue, | |
496 | }; | |
497 | ||
498 | let event = match parse_calendar_event(&event_str) { | |
499 | Ok(event) => event, | |
500 | Err(err) => { | |
501 | eprintln!("unable to parse schedule '{}' - {}", event_str, err); | |
502 | continue; | |
503 | } | |
504 | }; | |
505 | ||
506 | //fixme: if last_sync_job_still_running { continue; } | |
507 | ||
508 | let worker_type = "sync"; | |
509 | ||
510 | let last = match lookup_last_worker(worker_type, &job_config.store) { | |
511 | Ok(Some(upid)) => upid.starttime, | |
512 | Ok(None) => 0, | |
513 | Err(err) => { | |
514 | eprintln!("lookup_last_job_start failed: {}", err); | |
515 | continue; | |
516 | } | |
517 | }; | |
518 | ||
519 | let next = match compute_next_event(&event, last, false) { | |
520 | Ok(next) => next, | |
521 | Err(err) => { | |
522 | eprintln!("compute_next_event for '{}' failed - {}", event_str, err); | |
523 | continue; | |
524 | } | |
525 | }; | |
526 | ||
527 | let now = match SystemTime::now().duration_since(UNIX_EPOCH) { | |
528 | Ok(epoch_now) => epoch_now.as_secs() as i64, | |
529 | Err(err) => { | |
530 | eprintln!("query system time failed - {}", err); | |
531 | continue; | |
532 | } | |
533 | }; | |
534 | if next > now { continue; } | |
535 | ||
536 | ||
537 | let job_id2 = job_id.clone(); | |
538 | ||
539 | let tgt_store = match DataStore::lookup_datastore(&job_config.store) { | |
540 | Ok(datastore) => datastore, | |
541 | Err(err) => { | |
542 | eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err); | |
543 | continue; | |
544 | } | |
545 | }; | |
546 | ||
547 | let remote: Remote = match remote_config.lookup("remote", &job_config.remote) { | |
548 | Ok(remote) => remote, | |
549 | Err(err) => { | |
550 | eprintln!("remote_config lookup failed: {}", err); | |
551 | continue; | |
552 | } | |
553 | }; | |
554 | ||
555 | let username = String::from("backup@pam"); | |
556 | ||
557 | let delete = job_config.remove_vanished.unwrap_or(true); | |
558 | ||
559 | if let Err(err) = WorkerTask::spawn( | |
560 | worker_type, | |
561 | Some(job_config.store.clone()), | |
562 | &username.clone(), | |
563 | false, | |
564 | move |worker| async move { | |
565 | worker.log(format!("Starting datastore sync job '{}'", job_id)); | |
566 | worker.log(format!("task triggered by schedule '{}'", event_str)); | |
567 | worker.log(format!("Sync datastore '{}' from '{}/{}'", | |
568 | job_config.store, job_config.remote, job_config.remote_store)); | |
569 | ||
570 | let options = HttpClientOptions::new() | |
571 | .password(Some(remote.password.clone())) | |
572 | .fingerprint(remote.fingerprint.clone()); | |
573 | ||
574 | let client = HttpClient::new(&remote.host, &remote.userid, options)?; | |
575 | let _auth_info = client.login() // make sure we can auth | |
576 | .await | |
577 | .map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?; | |
578 | ||
579 | let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store); | |
580 | ||
07ad6470 | 581 | pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?; |
a6160cdf DM |
582 | |
583 | Ok(()) | |
584 | } | |
585 | ) { | |
586 | eprintln!("unable to start datastore sync job {} - {}", job_id2, err); | |
587 | } | |
588 | } | |
589 | } | |
eaeda365 DM |
590 | |
591 | async fn run_stat_generator() { | |
592 | ||
593 | loop { | |
594 | let delay_target = Instant::now() + Duration::from_secs(10); | |
595 | ||
596 | generate_host_stats().await; | |
597 | ||
598 | tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await; | |
599 | } | |
600 | ||
601 | } | |
602 | ||
603 | async fn generate_host_stats() { | |
8f0cec26 DM |
604 | use proxmox::sys::linux::procfs::{ |
605 | read_meminfo, read_proc_stat, read_proc_net_dev}; | |
eaeda365 DM |
606 | use proxmox_backup::rrd; |
607 | ||
4f951399 DM |
608 | proxmox_backup::tools::runtime::block_in_place(move || { |
609 | ||
610 | match read_proc_stat() { | |
611 | Ok(stat) => { | |
612 | if let Err(err) = rrd::update_value("host/cpu", stat.cpu, rrd::DST::Gauge) { | |
613 | eprintln!("rrd::update_value 'host/cpu' failed - {}", err); | |
614 | } | |
615 | } | |
616 | Err(err) => { | |
617 | eprintln!("read_proc_stat failed - {}", err); | |
eaeda365 DM |
618 | } |
619 | } | |
4f951399 DM |
620 | match read_meminfo() { |
621 | Ok(meminfo) => { | |
622 | if let Err(err) = rrd::update_value("host/memtotal", meminfo.memtotal as f64, rrd::DST::Gauge) { | |
623 | eprintln!("rrd::update_value 'host/memtotal' failed - {}", err); | |
624 | } | |
625 | if let Err(err) = rrd::update_value("host/memused", meminfo.memused as f64, rrd::DST::Gauge) { | |
626 | eprintln!("rrd::update_value 'host/memused' failed - {}", err); | |
627 | } | |
c25c9d8d DM |
628 | if let Err(err) = rrd::update_value("host/swaptotal", meminfo.swaptotal as f64, rrd::DST::Gauge) { |
629 | eprintln!("rrd::update_value 'host/swaptotal' failed - {}", err); | |
630 | } | |
631 | if let Err(err) = rrd::update_value("host/swapused", meminfo.swapused as f64, rrd::DST::Gauge) { | |
632 | eprintln!("rrd::update_value 'host/swapused' failed - {}", err); | |
633 | } | |
a4a3f7ca | 634 | } |
4f951399 DM |
635 | Err(err) => { |
636 | eprintln!("read_meminfo failed - {}", err); | |
a4a3f7ca DM |
637 | } |
638 | } | |
8f0cec26 | 639 | |
4f951399 DM |
640 | match read_proc_net_dev() { |
641 | Ok(netdev) => { | |
642 | use proxmox_backup::config::network::is_physical_nic; | |
643 | let mut netin = 0; | |
644 | let mut netout = 0; | |
645 | for item in netdev { | |
646 | if !is_physical_nic(&item.device) { continue; } | |
647 | netin += item.receive; | |
648 | netout += item.send; | |
649 | } | |
650 | if let Err(err) = rrd::update_value("host/netin", netin as f64, rrd::DST::Derive) { | |
651 | eprintln!("rrd::update_value 'host/netin' failed - {}", err); | |
652 | } | |
653 | if let Err(err) = rrd::update_value("host/netout", netout as f64, rrd::DST::Derive) { | |
654 | eprintln!("rrd::update_value 'host/netout' failed - {}", err); | |
655 | } | |
8f0cec26 | 656 | } |
4f951399 DM |
657 | Err(err) => { |
658 | eprintln!("read_prox_net_dev failed - {}", err); | |
8f0cec26 DM |
659 | } |
660 | } | |
dd15c0aa DM |
661 | |
662 | match disk_usage(std::path::Path::new("/")) { | |
663 | Ok((total, used, _avail)) => { | |
664 | if let Err(err) = rrd::update_value("host/roottotal", total as f64, rrd::DST::Gauge) { | |
665 | eprintln!("rrd::update_value 'host/roottotal' failed - {}", err); | |
666 | } | |
667 | if let Err(err) = rrd::update_value("host/rootused", used as f64, rrd::DST::Gauge) { | |
668 | eprintln!("rrd::update_value 'host/rootused' failed - {}", err); | |
669 | } | |
670 | } | |
671 | Err(err) => { | |
672 | eprintln!("read root disk_usage failed - {}", err); | |
673 | } | |
674 | } | |
675 | ||
4f951399 | 676 | }); |
eaeda365 | 677 | } |
dd15c0aa DM |
678 | |
679 | // Returns (total, used, avail) | |
680 | fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> { | |
681 | ||
682 | let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() }; | |
683 | ||
684 | use nix::NixPath; | |
685 | ||
686 | let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?; | |
687 | nix::errno::Errno::result(res)?; | |
688 | ||
689 | let bsize = stat.f_bsize as u64; | |
690 | ||
691 | Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize)) | |
692 | } |