3 use anyhow
::{bail, format_err, Error}
;
6 use openssl
::ssl
::{SslMethod, SslAcceptor, SslFiletype}
;
8 use proxmox
::try_block
;
9 use proxmox
::api
::RpcEnvironmentType
;
11 use proxmox_backup
::configdir
;
12 use proxmox_backup
::buildcfg
;
13 use proxmox_backup
::server
;
14 use proxmox_backup
::tools
::daemon
;
15 use proxmox_backup
::server
::{ApiConfig, rest::*}
;
16 use proxmox_backup
::auth_helpers
::*;
19 if let Err(err
) = proxmox_backup
::tools
::runtime
::main(run()) {
20 eprintln
!("Error: {}", err
);
21 std
::process
::exit(-1);
25 async
fn run() -> Result
<(), Error
> {
26 if let Err(err
) = syslog
::init(
27 syslog
::Facility
::LOG_DAEMON
,
28 log
::LevelFilter
::Info
,
29 Some("proxmox-backup-proxy")) {
30 bail
!("unable to inititialize syslog - {}", err
);
33 let _
= public_auth_key(); // load with lazy_static
34 let _
= csrf_secret(); // load with lazy_static
36 let mut config
= ApiConfig
::new(
37 buildcfg
::JS_DIR
, &proxmox_backup
::api2
::ROUTER
, RpcEnvironmentType
::PUBLIC
)?
;
39 // add default dirs which includes jquery and bootstrap
40 // my $base = '/usr/share/libpve-http-server-perl';
41 // add_dirs($self->{dirs}, '/css/' => "$base/css/");
42 // add_dirs($self->{dirs}, '/js/' => "$base/js/");
43 // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
44 config
.add_alias("novnc", "/usr/share/novnc-pve");
45 config
.add_alias("extjs", "/usr/share/javascript/extjs");
46 config
.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
47 config
.add_alias("xtermjs", "/usr/share/pve-xtermjs");
48 config
.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
49 config
.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
50 config
.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
52 let rest_server
= RestServer
::new(config
);
54 //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
55 let key_path
= configdir
!("/proxy.key");
56 let cert_path
= configdir
!("/proxy.pem");
58 let mut acceptor
= SslAcceptor
::mozilla_intermediate(SslMethod
::tls()).unwrap();
59 acceptor
.set_private_key_file(key_path
, SslFiletype
::PEM
)
60 .map_err(|err
| format_err
!("unable to read proxy key {} - {}", key_path
, err
))?
;
61 acceptor
.set_certificate_chain_file(cert_path
)
62 .map_err(|err
| format_err
!("unable to read proxy cert {} - {}", cert_path
, err
))?
;
63 acceptor
.check_private_key().unwrap();
65 let acceptor
= Arc
::new(acceptor
.build());
67 let server
= daemon
::create_daemon(
68 ([0,0,0,0,0,0,0,0], 8007).into(),
70 let connections
= proxmox_backup
::tools
::async_io
::StaticIncoming
::from(listener
)
72 .try_filter_map(move |(sock
, _addr
)| {
73 let acceptor
= Arc
::clone(&acceptor
);
75 sock
.set_nodelay(true).unwrap();
76 sock
.set_send_buffer_size(1024*1024).unwrap();
77 sock
.set_recv_buffer_size(1024*1024).unwrap();
78 Ok(tokio_openssl
::accept(&acceptor
, sock
)
80 .ok() // handshake errors aren't be fatal, so return None to filter
84 let connections
= proxmox_backup
::tools
::async_io
::HyperAccept(connections
);
87 .and_then(|_
| hyper
::Server
::builder(connections
)
89 .with_graceful_shutdown(server
::shutdown_future())
92 .map_err(|err
| eprintln
!("server error: {}", err
))
98 daemon
::systemd_notify(daemon
::SystemdNotify
::Ready
)?
;
100 let init_result
: Result
<(), Error
> = try_block
!({
101 server
::create_task_control_socket()?
;
102 server
::server_state_init()?
;
106 if let Err(err
) = init_result
{
107 bail
!("unable to start daemon - {}", err
);
110 start_task_scheduler();
111 start_stat_generator();
114 log
::info
!("server shutting down, waiting for active workers to complete");
115 proxmox_backup
::server
::last_worker_future().await?
;
116 log
::info
!("done - exit server");
121 fn start_stat_generator() {
122 let abort_future
= server
::shutdown_future();
123 let future
= Box
::pin(run_stat_generator());
124 let task
= futures
::future
::select(future
, abort_future
);
125 tokio
::spawn(task
.map(|_
| ()));
128 fn start_task_scheduler() {
129 let abort_future
= server
::shutdown_future();
130 let future
= Box
::pin(run_task_scheduler());
131 let task
= futures
::future
::select(future
, abort_future
);
132 tokio
::spawn(task
.map(|_
| ()));
135 use std
::time
:: {Instant, Duration, SystemTime, UNIX_EPOCH}
;
137 fn next_minute() -> Result
<Instant
, Error
> {
138 let epoch_now
= SystemTime
::now().duration_since(UNIX_EPOCH
)?
;
139 let epoch_next
= Duration
::from_secs((epoch_now
.as_secs()/60 + 1)*60);
140 Ok(Instant
::now() + epoch_next
- epoch_now
)
143 async
fn run_task_scheduler() {
145 let mut count
: usize = 0;
150 let delay_target
= match next_minute() { // try to run very minute
153 eprintln
!("task scheduler: compute next minute failed - {}", err
);
154 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(Instant
::now() + Duration
::from_secs(60))).await
;
159 if count
> 2 { // wait 1..2 minutes before starting
160 match schedule_tasks().catch_unwind().await
{
162 match panic
.downcast
::<&str>() {
164 eprintln
!("task scheduler panic: {}", msg
);
167 eprintln
!("task scheduler panic - unknown type");
172 eprintln
!("task scheduler failed - {:?}", err
);
178 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(delay_target
)).await
;
182 async
fn schedule_tasks() -> Result
<(), Error
> {
184 schedule_datastore_garbage_collection().await
;
185 schedule_datastore_prune().await
;
186 schedule_datastore_sync_jobs().await
;
191 fn lookup_last_worker(worker_type
: &str, worker_id
: &str) -> Result
<Option
<server
::UPID
>, Error
> {
193 let list
= proxmox_backup
::server
::read_task_list()?
;
195 let mut last
: Option
<&server
::UPID
> = None
;
197 for entry
in list
.iter() {
198 if entry
.upid
.worker_type
== worker_type
{
199 if let Some(ref id
) = entry
.upid
.worker_id
{
203 if upid
.starttime
< entry
.upid
.starttime
{
204 last
= Some(&entry
.upid
)
208 last
= Some(&entry
.upid
)
220 async
fn schedule_datastore_garbage_collection() {
222 use proxmox_backup
::backup
::DataStore
;
223 use proxmox_backup
::server
::{UPID, WorkerTask}
;
224 use proxmox_backup
::config
::datastore
::{self, DataStoreConfig}
;
225 use proxmox_backup
::tools
::systemd
::time
::{
226 parse_calendar_event
, compute_next_event
};
228 let config
= match datastore
::config() {
230 eprintln
!("unable to read datastore config - {}", err
);
233 Ok((config
, _digest
)) => config
,
236 for (store
, (_
, store_config
)) in config
.sections
{
237 let datastore
= match DataStore
::lookup_datastore(&store
) {
238 Ok(datastore
) => datastore
,
240 eprintln
!("lookup_datastore failed - {}", err
);
245 let store_config
: DataStoreConfig
= match serde_json
::from_value(store_config
) {
248 eprintln
!("datastore config from_value failed - {}", err
);
253 let event_str
= match store_config
.gc_schedule
{
254 Some(event_str
) => event_str
,
258 let event
= match parse_calendar_event(&event_str
) {
261 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
266 if datastore
.garbage_collection_running() { continue; }
268 let worker_type
= "garbage_collection";
270 let stat
= datastore
.last_gc_status();
271 let last
= if let Some(upid_str
) = stat
.upid
{
272 match upid_str
.parse
::<UPID
>() {
273 Ok(upid
) => upid
.starttime
,
275 eprintln
!("unable to parse upid '{}' - {}", upid_str
, err
);
280 match lookup_last_worker(worker_type
, &store
) {
281 Ok(Some(upid
)) => upid
.starttime
,
284 eprintln
!("lookup_last_job_start failed: {}", err
);
290 let next
= match compute_next_event(&event
, last
, false) {
293 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
297 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
298 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
300 eprintln
!("query system time failed - {}", err
);
304 if next
> now { continue; }
306 let store2
= store
.clone();
308 if let Err(err
) = WorkerTask
::new_thread(
314 worker
.log(format
!("starting garbage collection on store {}", store
));
315 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
316 datastore
.garbage_collection(&worker
)
319 eprintln
!("unable to start garbage collection on store {} - {}", store2
, err
);
324 async
fn schedule_datastore_prune() {
326 use proxmox_backup
::backup
::{
327 PruneOptions
, DataStore
, BackupGroup
, BackupDir
, compute_prune_info
};
328 use proxmox_backup
::server
::{WorkerTask}
;
329 use proxmox_backup
::config
::datastore
::{self, DataStoreConfig}
;
330 use proxmox_backup
::tools
::systemd
::time
::{
331 parse_calendar_event
, compute_next_event
};
333 let config
= match datastore
::config() {
335 eprintln
!("unable to read datastore config - {}", err
);
338 Ok((config
, _digest
)) => config
,
341 for (store
, (_
, store_config
)) in config
.sections
{
342 let datastore
= match DataStore
::lookup_datastore(&store
) {
343 Ok(datastore
) => datastore
,
345 eprintln
!("lookup_datastore '{}' failed - {}", store
, err
);
350 let store_config
: DataStoreConfig
= match serde_json
::from_value(store_config
) {
353 eprintln
!("datastore '{}' config from_value failed - {}", store
, err
);
358 let event_str
= match store_config
.prune_schedule
{
359 Some(event_str
) => event_str
,
363 let prune_options
= PruneOptions
{
364 keep_last
: store_config
.keep_last
,
365 keep_hourly
: store_config
.keep_hourly
,
366 keep_daily
: store_config
.keep_daily
,
367 keep_weekly
: store_config
.keep_weekly
,
368 keep_monthly
: store_config
.keep_monthly
,
369 keep_yearly
: store_config
.keep_yearly
,
372 if !prune_options
.keeps_something() { // no prune settings - keep all
376 let event
= match parse_calendar_event(&event_str
) {
379 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
384 //fixme: if last_prune_job_stzill_running { continue; }
386 let worker_type
= "prune";
388 let last
= match lookup_last_worker(worker_type
, &store
) {
389 Ok(Some(upid
)) => upid
.starttime
,
392 eprintln
!("lookup_last_job_start failed: {}", err
);
397 let next
= match compute_next_event(&event
, last
, false) {
400 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
405 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
406 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
408 eprintln
!("query system time failed - {}", err
);
412 if next
> now { continue; }
414 let store2
= store
.clone();
416 if let Err(err
) = WorkerTask
::new_thread(
422 worker
.log(format
!("Starting datastore prune on store \"{}\"", store
));
423 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
424 worker
.log(format
!("retention options: {}", prune_options
.cli_options_string()));
426 let base_path
= datastore
.base_path();
428 let groups
= BackupGroup
::list_groups(&base_path
)?
;
429 for group
in groups
{
430 let list
= group
.list_backups(&base_path
)?
;
431 let mut prune_info
= compute_prune_info(list
, &prune_options
)?
;
432 prune_info
.reverse(); // delete older snapshots first
434 worker
.log(format
!("Starting prune on store \"{}\" group \"{}/{}\"",
435 store
, group
.backup_type(), group
.backup_id()));
437 for (info
, keep
) in prune_info
{
440 if keep { "keep" }
else { "remove" }
,
441 group
.backup_type(), group
.backup_id(),
442 BackupDir
::backup_time_to_string(info
.backup_dir
.backup_time())));
445 datastore
.remove_backup_dir(&info
.backup_dir
)?
;
453 eprintln
!("unable to start datastore prune on store {} - {}", store2
, err
);
458 async
fn schedule_datastore_sync_jobs() {
460 use proxmox_backup
::{
462 client
::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store }
,
463 server
::{ WorkerTask }
,
464 config
::{ sync::{self, SyncJobConfig}
, remote
::{self, Remote}
},
465 tools
::systemd
::time
::{ parse_calendar_event, compute_next_event }
,
468 let config
= match sync
::config() {
470 eprintln
!("unable to read sync job config - {}", err
);
473 Ok((config
, _digest
)) => config
,
476 let remote_config
= match remote
::config() {
478 eprintln
!("unable to read remote config - {}", err
);
481 Ok((config
, _digest
)) => config
,
484 for (job_id
, (_
, job_config
)) in config
.sections
{
485 let job_config
: SyncJobConfig
= match serde_json
::from_value(job_config
) {
488 eprintln
!("sync job config from_value failed - {}", err
);
493 let event_str
= match job_config
.schedule
{
494 Some(ref event_str
) => event_str
.clone(),
498 let event
= match parse_calendar_event(&event_str
) {
501 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
506 //fixme: if last_sync_job_still_running { continue; }
508 let worker_type
= "sync";
510 let last
= match lookup_last_worker(worker_type
, &job_config
.store
) {
511 Ok(Some(upid
)) => upid
.starttime
,
514 eprintln
!("lookup_last_job_start failed: {}", err
);
519 let next
= match compute_next_event(&event
, last
, false) {
522 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
527 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
528 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
530 eprintln
!("query system time failed - {}", err
);
534 if next
> now { continue; }
537 let job_id2
= job_id
.clone();
539 let tgt_store
= match DataStore
::lookup_datastore(&job_config
.store
) {
540 Ok(datastore
) => datastore
,
542 eprintln
!("lookup_datastore '{}' failed - {}", job_config
.store
, err
);
547 let remote
: Remote
= match remote_config
.lookup("remote", &job_config
.remote
) {
548 Ok(remote
) => remote
,
550 eprintln
!("remote_config lookup failed: {}", err
);
555 let username
= String
::from("backup@pam");
557 let delete
= job_config
.remove_vanished
.unwrap_or(true);
559 if let Err(err
) = WorkerTask
::spawn(
561 Some(job_config
.store
.clone()),
564 move |worker
| async
move {
565 worker
.log(format
!("Starting datastore sync job '{}'", job_id
));
566 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
567 worker
.log(format
!("Sync datastore '{}' from '{}/{}'",
568 job_config
.store
, job_config
.remote
, job_config
.remote_store
));
570 let options
= HttpClientOptions
::new()
571 .password(Some(remote
.password
.clone()))
572 .fingerprint(remote
.fingerprint
.clone());
574 let client
= HttpClient
::new(&remote
.host
, &remote
.userid
, options
)?
;
575 let _auth_info
= client
.login() // make sure we can auth
577 .map_err(|err
| format_err
!("remote connection to '{}' failed - {}", remote
.host
, err
))?
;
579 let src_repo
= BackupRepository
::new(Some(remote
.userid
), Some(remote
.host
), job_config
.remote_store
);
581 pull_store(&worker
, &client
, &src_repo
, tgt_store
, delete
, username
).await?
;
586 eprintln
!("unable to start datastore sync job {} - {}", job_id2
, err
);
591 async
fn run_stat_generator() {
594 let delay_target
= Instant
::now() + Duration
::from_secs(10);
596 generate_host_stats().await
;
598 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(delay_target
)).await
;
603 async
fn generate_host_stats() {
604 use proxmox
::sys
::linux
::procfs
::read_proc_stat
;
605 use proxmox_backup
::rrd
;
607 match read_proc_stat() {
609 if let Err(err
) = rrd
::update_value("host/cpu", stat
.cpu
) {
610 eprintln
!("rrd::update_value 'host/cpu' failed - {}", err
);
614 eprintln
!("read_proc_stat failed - {}", err
);