]> git.proxmox.com Git - proxmox-backup.git/blame - src/bin/proxmox-backup-proxy.rs
add simple rrd implementation
[proxmox-backup.git] / src / bin / proxmox-backup-proxy.rs
CommitLineData
a2479cfa
WB
1use std::sync::Arc;
2
f7d4e4b5 3use anyhow::{bail, format_err, Error};
a2479cfa
WB
4use futures::*;
5use hyper;
6use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
7
9ea4bce4 8use proxmox::try_block;
a2479cfa
WB
9use proxmox::api::RpcEnvironmentType;
10
a2ca7137 11use proxmox_backup::configdir;
4a7de56e 12use proxmox_backup::buildcfg;
e3f41f21 13use proxmox_backup::server;
a690ecac 14use proxmox_backup::tools::daemon;
e57e1cd8 15use proxmox_backup::server::{ApiConfig, rest::*};
d01e2420 16use proxmox_backup::auth_helpers::*;
02c7a755 17
d973aa82
WB
18fn main() {
19 if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
4223d9f8
DM
20 eprintln!("Error: {}", err);
21 std::process::exit(-1);
22 }
23}
24
fda5797b 25async fn run() -> Result<(), Error> {
02c7a755
DM
26 if let Err(err) = syslog::init(
27 syslog::Facility::LOG_DAEMON,
28 log::LevelFilter::Info,
29 Some("proxmox-backup-proxy")) {
4223d9f8 30 bail!("unable to inititialize syslog - {}", err);
02c7a755
DM
31 }
32
d01e2420
DM
33 let _ = public_auth_key(); // load with lazy_static
34 let _ = csrf_secret(); // load with lazy_static
35
02c7a755 36 let mut config = ApiConfig::new(
f9e3b110 37 buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
02c7a755
DM
38
39 // add default dirs which includes jquery and bootstrap
40 // my $base = '/usr/share/libpve-http-server-perl';
41 // add_dirs($self->{dirs}, '/css/' => "$base/css/");
42 // add_dirs($self->{dirs}, '/js/' => "$base/js/");
43 // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
44 config.add_alias("novnc", "/usr/share/novnc-pve");
45 config.add_alias("extjs", "/usr/share/javascript/extjs");
46 config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
47 config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
48 config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
2d694f8f 49 config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
9c01e73c 50 config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
02c7a755
DM
51
52 let rest_server = RestServer::new(config);
53
6d1f61b2
DM
54 //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
55 let key_path = configdir!("/proxy.key");
56 let cert_path = configdir!("/proxy.pem");
57
58 let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
59 acceptor.set_private_key_file(key_path, SslFiletype::PEM)
60 .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
61 acceptor.set_certificate_chain_file(cert_path)
62 .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
63 acceptor.check_private_key().unwrap();
64
65 let acceptor = Arc::new(acceptor.build());
0d176f36 66
a690ecac
WB
67 let server = daemon::create_daemon(
68 ([0,0,0,0,0,0,0,0], 8007).into(),
083ff3fd 69 |listener, ready| {
db0cb9ce 70 let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener)
a690ecac 71 .map_err(Error::from)
db0cb9ce 72 .try_filter_map(move |(sock, _addr)| {
fda5797b
WB
73 let acceptor = Arc::clone(&acceptor);
74 async move {
75 sock.set_nodelay(true).unwrap();
76 sock.set_send_buffer_size(1024*1024).unwrap();
77 sock.set_recv_buffer_size(1024*1024).unwrap();
78 Ok(tokio_openssl::accept(&acceptor, sock)
79 .await
80 .ok() // handshake errors aren't be fatal, so return None to filter
81 )
a690ecac 82 }
a690ecac 83 });
db0cb9ce 84 let connections = proxmox_backup::tools::async_io::HyperAccept(connections);
083ff3fd
WB
85
86 Ok(ready
87 .and_then(|_| hyper::Server::builder(connections)
88 .serve(rest_server)
89 .with_graceful_shutdown(server::shutdown_future())
90 .map_err(Error::from)
91 )
92 .map_err(|err| eprintln!("server error: {}", err))
93 .map(|_| ())
a690ecac 94 )
a2ca7137 95 },
083ff3fd 96 );
a2ca7137 97
d98c9a7a
WB
98 daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
99
fda5797b
WB
100 let init_result: Result<(), Error> = try_block!({
101 server::create_task_control_socket()?;
102 server::server_state_init()?;
103 Ok(())
104 });
d607b886 105
fda5797b
WB
106 if let Err(err) = init_result {
107 bail!("unable to start daemon - {}", err);
108 }
e3f41f21 109
8545480a
DM
110 start_task_scheduler();
111
083ff3fd 112 server.await?;
a546a8a0
WB
113 log::info!("server shutting down, waiting for active workers to complete");
114 proxmox_backup::server::last_worker_future().await?;
fda5797b 115 log::info!("done - exit server");
e3f41f21 116
4223d9f8 117 Ok(())
02c7a755 118}
8545480a
DM
119
120fn start_task_scheduler() {
121 let abort_future = server::shutdown_future();
122 let future = Box::pin(run_task_scheduler());
123 let task = futures::future::select(future, abort_future);
124 tokio::spawn(task.map(|_| ()));
125}
126
127use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
128
129fn next_minute() -> Result<Instant, Error> {
130 let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
131 let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
132 Ok(Instant::now() + epoch_next - epoch_now)
133}
134
135async fn run_task_scheduler() {
136
137 let mut count: usize = 0;
138
139 loop {
140 count += 1;
141
142 let delay_target = match next_minute() { // try to run very minute
143 Ok(d) => d,
144 Err(err) => {
145 eprintln!("task scheduler: compute next minute failed - {}", err);
146 tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
147 continue;
148 }
149 };
150
151 if count > 2 { // wait 1..2 minutes before starting
152 match schedule_tasks().catch_unwind().await {
153 Err(panic) => {
154 match panic.downcast::<&str>() {
155 Ok(msg) => {
156 eprintln!("task scheduler panic: {}", msg);
157 }
158 Err(_) => {
159 eprintln!("task scheduler panic - unknown type");
160 }
161 }
162 }
163 Ok(Err(err)) => {
164 eprintln!("task scheduler failed - {:?}", err);
165 }
166 Ok(Ok(_)) => {}
167 }
168 }
169
170 tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
171 }
172}
173
174async fn schedule_tasks() -> Result<(), Error> {
175
176 schedule_datastore_garbage_collection().await;
25829a87 177 schedule_datastore_prune().await;
a6160cdf 178 schedule_datastore_sync_jobs().await;
8545480a
DM
179
180 Ok(())
181}
182
25829a87 183fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
8545480a
DM
184
185 let list = proxmox_backup::server::read_task_list()?;
186
25829a87
DM
187 let mut last: Option<&server::UPID> = None;
188
189 for entry in list.iter() {
8545480a 190 if entry.upid.worker_type == worker_type {
25829a87 191 if let Some(ref id) = entry.upid.worker_id {
8545480a 192 if id == worker_id {
25829a87
DM
193 match last {
194 Some(ref upid) => {
195 if upid.starttime < entry.upid.starttime {
196 last = Some(&entry.upid)
197 }
198 }
199 None => {
200 last = Some(&entry.upid)
201 }
202 }
8545480a
DM
203 }
204 }
205 }
206 }
207
25829a87 208 Ok(last.cloned())
8545480a
DM
209}
210
211
212async fn schedule_datastore_garbage_collection() {
213
214 use proxmox_backup::backup::DataStore;
215 use proxmox_backup::server::{UPID, WorkerTask};
25829a87 216 use proxmox_backup::config::datastore::{self, DataStoreConfig};
8545480a
DM
217 use proxmox_backup::tools::systemd::time::{
218 parse_calendar_event, compute_next_event};
219
25829a87 220 let config = match datastore::config() {
8545480a
DM
221 Err(err) => {
222 eprintln!("unable to read datastore config - {}", err);
223 return;
224 }
225 Ok((config, _digest)) => config,
226 };
227
228 for (store, (_, store_config)) in config.sections {
229 let datastore = match DataStore::lookup_datastore(&store) {
230 Ok(datastore) => datastore,
231 Err(err) => {
232 eprintln!("lookup_datastore failed - {}", err);
233 continue;
234 }
235 };
236
25829a87 237 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
8545480a
DM
238 Ok(c) => c,
239 Err(err) => {
240 eprintln!("datastore config from_value failed - {}", err);
241 continue;
242 }
243 };
244
245 let event_str = match store_config.gc_schedule {
246 Some(event_str) => event_str,
247 None => continue,
248 };
249
250 let event = match parse_calendar_event(&event_str) {
251 Ok(event) => event,
252 Err(err) => {
253 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
254 continue;
255 }
256 };
257
258 if datastore.garbage_collection_running() { continue; }
259
260 let worker_type = "garbage_collection";
261
262 let stat = datastore.last_gc_status();
263 let last = if let Some(upid_str) = stat.upid {
264 match upid_str.parse::<UPID>() {
265 Ok(upid) => upid.starttime,
266 Err(err) => {
267 eprintln!("unable to parse upid '{}' - {}", upid_str, err);
268 continue;
269 }
270 }
271 } else {
25829a87
DM
272 match lookup_last_worker(worker_type, &store) {
273 Ok(Some(upid)) => upid.starttime,
274 Ok(None) => 0,
8545480a
DM
275 Err(err) => {
276 eprintln!("lookup_last_job_start failed: {}", err);
277 continue;
278 }
279 }
280 };
281
282 let next = match compute_next_event(&event, last, false) {
283 Ok(next) => next,
284 Err(err) => {
285 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
286 continue;
287 }
288 };
289 let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
290 Ok(epoch_now) => epoch_now.as_secs() as i64,
291 Err(err) => {
292 eprintln!("query system time failed - {}", err);
293 continue;
294 }
295 };
296 if next > now { continue; }
297
298 let store2 = store.clone();
299
300 if let Err(err) = WorkerTask::new_thread(
301 worker_type,
302 Some(store.clone()),
a6160cdf 303 "backup@pam",
8545480a
DM
304 false,
305 move |worker| {
306 worker.log(format!("starting garbage collection on store {}", store));
307 worker.log(format!("task triggered by schedule '{}'", event_str));
308 datastore.garbage_collection(&worker)
309 }
310 ) {
311 eprintln!("unable to start garbage collection on store {} - {}", store2, err);
312 }
313 }
314}
25829a87
DM
315
316async fn schedule_datastore_prune() {
317
318 use proxmox_backup::backup::{
319 PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
320 use proxmox_backup::server::{WorkerTask};
321 use proxmox_backup::config::datastore::{self, DataStoreConfig};
322 use proxmox_backup::tools::systemd::time::{
323 parse_calendar_event, compute_next_event};
324
325 let config = match datastore::config() {
326 Err(err) => {
327 eprintln!("unable to read datastore config - {}", err);
328 return;
329 }
330 Ok((config, _digest)) => config,
331 };
332
333 for (store, (_, store_config)) in config.sections {
334 let datastore = match DataStore::lookup_datastore(&store) {
335 Ok(datastore) => datastore,
336 Err(err) => {
a6160cdf 337 eprintln!("lookup_datastore '{}' failed - {}", store, err);
25829a87
DM
338 continue;
339 }
340 };
341
342 let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
343 Ok(c) => c,
344 Err(err) => {
a6160cdf 345 eprintln!("datastore '{}' config from_value failed - {}", store, err);
25829a87
DM
346 continue;
347 }
348 };
349
350 let event_str = match store_config.prune_schedule {
351 Some(event_str) => event_str,
352 None => continue,
353 };
354
355 let prune_options = PruneOptions {
356 keep_last: store_config.keep_last,
357 keep_hourly: store_config.keep_hourly,
358 keep_daily: store_config.keep_daily,
359 keep_weekly: store_config.keep_weekly,
360 keep_monthly: store_config.keep_monthly,
361 keep_yearly: store_config.keep_yearly,
362 };
363
364 if !prune_options.keeps_something() { // no prune settings - keep all
365 continue;
366 }
367
368 let event = match parse_calendar_event(&event_str) {
369 Ok(event) => event,
370 Err(err) => {
371 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
372 continue;
373 }
374 };
375
376 //fixme: if last_prune_job_stzill_running { continue; }
377
378 let worker_type = "prune";
379
380 let last = match lookup_last_worker(worker_type, &store) {
381 Ok(Some(upid)) => upid.starttime,
382 Ok(None) => 0,
383 Err(err) => {
384 eprintln!("lookup_last_job_start failed: {}", err);
385 continue;
386 }
387 };
388
389 let next = match compute_next_event(&event, last, false) {
390 Ok(next) => next,
391 Err(err) => {
392 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
393 continue;
394 }
395 };
396
397 let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
398 Ok(epoch_now) => epoch_now.as_secs() as i64,
399 Err(err) => {
400 eprintln!("query system time failed - {}", err);
401 continue;
402 }
403 };
404 if next > now { continue; }
405
406 let store2 = store.clone();
407
408 if let Err(err) = WorkerTask::new_thread(
409 worker_type,
410 Some(store.clone()),
a6160cdf 411 "backup@pam",
25829a87
DM
412 false,
413 move |worker| {
414 worker.log(format!("Starting datastore prune on store \"{}\"", store));
a6160cdf 415 worker.log(format!("task triggered by schedule '{}'", event_str));
25829a87
DM
416 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
417
418 let base_path = datastore.base_path();
419
420 let groups = BackupGroup::list_groups(&base_path)?;
421 for group in groups {
422 let list = group.list_backups(&base_path)?;
423 let mut prune_info = compute_prune_info(list, &prune_options)?;
424 prune_info.reverse(); // delete older snapshots first
425
426 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
427 store, group.backup_type(), group.backup_id()));
428
429 for (info, keep) in prune_info {
430 worker.log(format!(
431 "{} {}/{}/{}",
432 if keep { "keep" } else { "remove" },
433 group.backup_type(), group.backup_id(),
434 BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
435
436 if !keep {
437 datastore.remove_backup_dir(&info.backup_dir)?;
438 }
439 }
440 }
441
442 Ok(())
443 }
444 ) {
445 eprintln!("unable to start datastore prune on store {} - {}", store2, err);
446 }
447 }
448}
a6160cdf
DM
449
450async fn schedule_datastore_sync_jobs() {
451
452 use proxmox_backup::{
453 backup::DataStore,
07ad6470 454 client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
a6160cdf
DM
455 server::{ WorkerTask },
456 config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
457 tools::systemd::time::{ parse_calendar_event, compute_next_event },
458 };
459
460 let config = match sync::config() {
461 Err(err) => {
462 eprintln!("unable to read sync job config - {}", err);
463 return;
464 }
465 Ok((config, _digest)) => config,
466 };
467
468 let remote_config = match remote::config() {
469 Err(err) => {
470 eprintln!("unable to read remote config - {}", err);
471 return;
472 }
473 Ok((config, _digest)) => config,
474 };
475
476 for (job_id, (_, job_config)) in config.sections {
477 let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
478 Ok(c) => c,
479 Err(err) => {
480 eprintln!("sync job config from_value failed - {}", err);
481 continue;
482 }
483 };
484
485 let event_str = match job_config.schedule {
486 Some(ref event_str) => event_str.clone(),
487 None => continue,
488 };
489
490 let event = match parse_calendar_event(&event_str) {
491 Ok(event) => event,
492 Err(err) => {
493 eprintln!("unable to parse schedule '{}' - {}", event_str, err);
494 continue;
495 }
496 };
497
498 //fixme: if last_sync_job_still_running { continue; }
499
500 let worker_type = "sync";
501
502 let last = match lookup_last_worker(worker_type, &job_config.store) {
503 Ok(Some(upid)) => upid.starttime,
504 Ok(None) => 0,
505 Err(err) => {
506 eprintln!("lookup_last_job_start failed: {}", err);
507 continue;
508 }
509 };
510
511 let next = match compute_next_event(&event, last, false) {
512 Ok(next) => next,
513 Err(err) => {
514 eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
515 continue;
516 }
517 };
518
519 let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
520 Ok(epoch_now) => epoch_now.as_secs() as i64,
521 Err(err) => {
522 eprintln!("query system time failed - {}", err);
523 continue;
524 }
525 };
526 if next > now { continue; }
527
528
529 let job_id2 = job_id.clone();
530
531 let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
532 Ok(datastore) => datastore,
533 Err(err) => {
534 eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
535 continue;
536 }
537 };
538
539 let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
540 Ok(remote) => remote,
541 Err(err) => {
542 eprintln!("remote_config lookup failed: {}", err);
543 continue;
544 }
545 };
546
547 let username = String::from("backup@pam");
548
549 let delete = job_config.remove_vanished.unwrap_or(true);
550
551 if let Err(err) = WorkerTask::spawn(
552 worker_type,
553 Some(job_config.store.clone()),
554 &username.clone(),
555 false,
556 move |worker| async move {
557 worker.log(format!("Starting datastore sync job '{}'", job_id));
558 worker.log(format!("task triggered by schedule '{}'", event_str));
559 worker.log(format!("Sync datastore '{}' from '{}/{}'",
560 job_config.store, job_config.remote, job_config.remote_store));
561
562 let options = HttpClientOptions::new()
563 .password(Some(remote.password.clone()))
564 .fingerprint(remote.fingerprint.clone());
565
566 let client = HttpClient::new(&remote.host, &remote.userid, options)?;
567 let _auth_info = client.login() // make sure we can auth
568 .await
569 .map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
570
571 let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
572
07ad6470 573 pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
a6160cdf
DM
574
575 Ok(())
576 }
577 ) {
578 eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
579 }
580 }
581}