3 use anyhow
::{bail, format_err, Error}
;
6 use openssl
::ssl
::{SslMethod, SslAcceptor, SslFiletype}
;
8 use proxmox
::try_block
;
9 use proxmox
::api
::RpcEnvironmentType
;
11 use proxmox_backup
::configdir
;
12 use proxmox_backup
::buildcfg
;
13 use proxmox_backup
::server
;
14 use proxmox_backup
::tools
::daemon
;
15 use proxmox_backup
::server
::{ApiConfig, rest::*}
;
16 use proxmox_backup
::auth_helpers
::*;
19 if let Err(err
) = proxmox_backup
::tools
::runtime
::main(run()) {
20 eprintln
!("Error: {}", err
);
21 std
::process
::exit(-1);
25 async
fn run() -> Result
<(), Error
> {
26 if let Err(err
) = syslog
::init(
27 syslog
::Facility
::LOG_DAEMON
,
28 log
::LevelFilter
::Info
,
29 Some("proxmox-backup-proxy")) {
30 bail
!("unable to inititialize syslog - {}", err
);
33 let _
= public_auth_key(); // load with lazy_static
34 let _
= csrf_secret(); // load with lazy_static
36 let mut config
= ApiConfig
::new(
37 buildcfg
::JS_DIR
, &proxmox_backup
::api2
::ROUTER
, RpcEnvironmentType
::PUBLIC
)?
;
39 // add default dirs which includes jquery and bootstrap
40 // my $base = '/usr/share/libpve-http-server-perl';
41 // add_dirs($self->{dirs}, '/css/' => "$base/css/");
42 // add_dirs($self->{dirs}, '/js/' => "$base/js/");
43 // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
44 config
.add_alias("novnc", "/usr/share/novnc-pve");
45 config
.add_alias("extjs", "/usr/share/javascript/extjs");
46 config
.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
47 config
.add_alias("xtermjs", "/usr/share/pve-xtermjs");
48 config
.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
49 config
.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
50 config
.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
52 let rest_server
= RestServer
::new(config
);
54 //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
55 let key_path
= configdir
!("/proxy.key");
56 let cert_path
= configdir
!("/proxy.pem");
58 let mut acceptor
= SslAcceptor
::mozilla_intermediate(SslMethod
::tls()).unwrap();
59 acceptor
.set_private_key_file(key_path
, SslFiletype
::PEM
)
60 .map_err(|err
| format_err
!("unable to read proxy key {} - {}", key_path
, err
))?
;
61 acceptor
.set_certificate_chain_file(cert_path
)
62 .map_err(|err
| format_err
!("unable to read proxy cert {} - {}", cert_path
, err
))?
;
63 acceptor
.check_private_key().unwrap();
65 let acceptor
= Arc
::new(acceptor
.build());
67 let server
= daemon
::create_daemon(
68 ([0,0,0,0,0,0,0,0], 8007).into(),
70 let connections
= proxmox_backup
::tools
::async_io
::StaticIncoming
::from(listener
)
72 .try_filter_map(move |(sock
, _addr
)| {
73 let acceptor
= Arc
::clone(&acceptor
);
75 sock
.set_nodelay(true).unwrap();
76 sock
.set_send_buffer_size(1024*1024).unwrap();
77 sock
.set_recv_buffer_size(1024*1024).unwrap();
78 Ok(tokio_openssl
::accept(&acceptor
, sock
)
80 .ok() // handshake errors aren't be fatal, so return None to filter
84 let connections
= proxmox_backup
::tools
::async_io
::HyperAccept(connections
);
87 .and_then(|_
| hyper
::Server
::builder(connections
)
89 .with_graceful_shutdown(server
::shutdown_future())
92 .map_err(|err
| eprintln
!("server error: {}", err
))
98 daemon
::systemd_notify(daemon
::SystemdNotify
::Ready
)?
;
100 let init_result
: Result
<(), Error
> = try_block
!({
101 server
::create_task_control_socket()?
;
102 server
::server_state_init()?
;
106 if let Err(err
) = init_result
{
107 bail
!("unable to start daemon - {}", err
);
110 start_task_scheduler();
113 log
::info
!("server shutting down, waiting for active workers to complete");
114 proxmox_backup
::server
::last_worker_future().await?
;
115 log
::info
!("done - exit server");
120 fn start_task_scheduler() {
121 let abort_future
= server
::shutdown_future();
122 let future
= Box
::pin(run_task_scheduler());
123 let task
= futures
::future
::select(future
, abort_future
);
124 tokio
::spawn(task
.map(|_
| ()));
127 use std
::time
:: {Instant, Duration, SystemTime, UNIX_EPOCH}
;
129 fn next_minute() -> Result
<Instant
, Error
> {
130 let epoch_now
= SystemTime
::now().duration_since(UNIX_EPOCH
)?
;
131 let epoch_next
= Duration
::from_secs((epoch_now
.as_secs()/60 + 1)*60);
132 Ok(Instant
::now() + epoch_next
- epoch_now
)
135 async
fn run_task_scheduler() {
137 let mut count
: usize = 0;
142 let delay_target
= match next_minute() { // try to run very minute
145 eprintln
!("task scheduler: compute next minute failed - {}", err
);
146 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(Instant
::now() + Duration
::from_secs(60))).await
;
151 if count
> 2 { // wait 1..2 minutes before starting
152 match schedule_tasks().catch_unwind().await
{
154 match panic
.downcast
::<&str>() {
156 eprintln
!("task scheduler panic: {}", msg
);
159 eprintln
!("task scheduler panic - unknown type");
164 eprintln
!("task scheduler failed - {:?}", err
);
170 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(delay_target
)).await
;
174 async
fn schedule_tasks() -> Result
<(), Error
> {
176 schedule_datastore_garbage_collection().await
;
177 schedule_datastore_prune().await
;
178 schedule_datastore_sync_jobs().await
;
183 fn lookup_last_worker(worker_type
: &str, worker_id
: &str) -> Result
<Option
<server
::UPID
>, Error
> {
185 let list
= proxmox_backup
::server
::read_task_list()?
;
187 let mut last
: Option
<&server
::UPID
> = None
;
189 for entry
in list
.iter() {
190 if entry
.upid
.worker_type
== worker_type
{
191 if let Some(ref id
) = entry
.upid
.worker_id
{
195 if upid
.starttime
< entry
.upid
.starttime
{
196 last
= Some(&entry
.upid
)
200 last
= Some(&entry
.upid
)
212 async
fn schedule_datastore_garbage_collection() {
214 use proxmox_backup
::backup
::DataStore
;
215 use proxmox_backup
::server
::{UPID, WorkerTask}
;
216 use proxmox_backup
::config
::datastore
::{self, DataStoreConfig}
;
217 use proxmox_backup
::tools
::systemd
::time
::{
218 parse_calendar_event
, compute_next_event
};
220 let config
= match datastore
::config() {
222 eprintln
!("unable to read datastore config - {}", err
);
225 Ok((config
, _digest
)) => config
,
228 for (store
, (_
, store_config
)) in config
.sections
{
229 let datastore
= match DataStore
::lookup_datastore(&store
) {
230 Ok(datastore
) => datastore
,
232 eprintln
!("lookup_datastore failed - {}", err
);
237 let store_config
: DataStoreConfig
= match serde_json
::from_value(store_config
) {
240 eprintln
!("datastore config from_value failed - {}", err
);
245 let event_str
= match store_config
.gc_schedule
{
246 Some(event_str
) => event_str
,
250 let event
= match parse_calendar_event(&event_str
) {
253 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
258 if datastore
.garbage_collection_running() { continue; }
260 let worker_type
= "garbage_collection";
262 let stat
= datastore
.last_gc_status();
263 let last
= if let Some(upid_str
) = stat
.upid
{
264 match upid_str
.parse
::<UPID
>() {
265 Ok(upid
) => upid
.starttime
,
267 eprintln
!("unable to parse upid '{}' - {}", upid_str
, err
);
272 match lookup_last_worker(worker_type
, &store
) {
273 Ok(Some(upid
)) => upid
.starttime
,
276 eprintln
!("lookup_last_job_start failed: {}", err
);
282 let next
= match compute_next_event(&event
, last
, false) {
285 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
289 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
290 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
292 eprintln
!("query system time failed - {}", err
);
296 if next
> now { continue; }
298 let store2
= store
.clone();
300 if let Err(err
) = WorkerTask
::new_thread(
306 worker
.log(format
!("starting garbage collection on store {}", store
));
307 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
308 datastore
.garbage_collection(&worker
)
311 eprintln
!("unable to start garbage collection on store {} - {}", store2
, err
);
316 async
fn schedule_datastore_prune() {
318 use proxmox_backup
::backup
::{
319 PruneOptions
, DataStore
, BackupGroup
, BackupDir
, compute_prune_info
};
320 use proxmox_backup
::server
::{WorkerTask}
;
321 use proxmox_backup
::config
::datastore
::{self, DataStoreConfig}
;
322 use proxmox_backup
::tools
::systemd
::time
::{
323 parse_calendar_event
, compute_next_event
};
325 let config
= match datastore
::config() {
327 eprintln
!("unable to read datastore config - {}", err
);
330 Ok((config
, _digest
)) => config
,
333 for (store
, (_
, store_config
)) in config
.sections
{
334 let datastore
= match DataStore
::lookup_datastore(&store
) {
335 Ok(datastore
) => datastore
,
337 eprintln
!("lookup_datastore '{}' failed - {}", store
, err
);
342 let store_config
: DataStoreConfig
= match serde_json
::from_value(store_config
) {
345 eprintln
!("datastore '{}' config from_value failed - {}", store
, err
);
350 let event_str
= match store_config
.prune_schedule
{
351 Some(event_str
) => event_str
,
355 let prune_options
= PruneOptions
{
356 keep_last
: store_config
.keep_last
,
357 keep_hourly
: store_config
.keep_hourly
,
358 keep_daily
: store_config
.keep_daily
,
359 keep_weekly
: store_config
.keep_weekly
,
360 keep_monthly
: store_config
.keep_monthly
,
361 keep_yearly
: store_config
.keep_yearly
,
364 if !prune_options
.keeps_something() { // no prune settings - keep all
368 let event
= match parse_calendar_event(&event_str
) {
371 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
376 //fixme: if last_prune_job_stzill_running { continue; }
378 let worker_type
= "prune";
380 let last
= match lookup_last_worker(worker_type
, &store
) {
381 Ok(Some(upid
)) => upid
.starttime
,
384 eprintln
!("lookup_last_job_start failed: {}", err
);
389 let next
= match compute_next_event(&event
, last
, false) {
392 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
397 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
398 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
400 eprintln
!("query system time failed - {}", err
);
404 if next
> now { continue; }
406 let store2
= store
.clone();
408 if let Err(err
) = WorkerTask
::new_thread(
414 worker
.log(format
!("Starting datastore prune on store \"{}\"", store
));
415 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
416 worker
.log(format
!("retention options: {}", prune_options
.cli_options_string()));
418 let base_path
= datastore
.base_path();
420 let groups
= BackupGroup
::list_groups(&base_path
)?
;
421 for group
in groups
{
422 let list
= group
.list_backups(&base_path
)?
;
423 let mut prune_info
= compute_prune_info(list
, &prune_options
)?
;
424 prune_info
.reverse(); // delete older snapshots first
426 worker
.log(format
!("Starting prune on store \"{}\" group \"{}/{}\"",
427 store
, group
.backup_type(), group
.backup_id()));
429 for (info
, keep
) in prune_info
{
432 if keep { "keep" }
else { "remove" }
,
433 group
.backup_type(), group
.backup_id(),
434 BackupDir
::backup_time_to_string(info
.backup_dir
.backup_time())));
437 datastore
.remove_backup_dir(&info
.backup_dir
)?
;
445 eprintln
!("unable to start datastore prune on store {} - {}", store2
, err
);
450 async
fn schedule_datastore_sync_jobs() {
452 use proxmox_backup
::{
454 client
::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store }
,
455 server
::{ WorkerTask }
,
456 config
::{ sync::{self, SyncJobConfig}
, remote
::{self, Remote}
},
457 tools
::systemd
::time
::{ parse_calendar_event, compute_next_event }
,
460 let config
= match sync
::config() {
462 eprintln
!("unable to read sync job config - {}", err
);
465 Ok((config
, _digest
)) => config
,
468 let remote_config
= match remote
::config() {
470 eprintln
!("unable to read remote config - {}", err
);
473 Ok((config
, _digest
)) => config
,
476 for (job_id
, (_
, job_config
)) in config
.sections
{
477 let job_config
: SyncJobConfig
= match serde_json
::from_value(job_config
) {
480 eprintln
!("sync job config from_value failed - {}", err
);
485 let event_str
= match job_config
.schedule
{
486 Some(ref event_str
) => event_str
.clone(),
490 let event
= match parse_calendar_event(&event_str
) {
493 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
498 //fixme: if last_sync_job_still_running { continue; }
500 let worker_type
= "sync";
502 let last
= match lookup_last_worker(worker_type
, &job_config
.store
) {
503 Ok(Some(upid
)) => upid
.starttime
,
506 eprintln
!("lookup_last_job_start failed: {}", err
);
511 let next
= match compute_next_event(&event
, last
, false) {
514 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
519 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
520 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
522 eprintln
!("query system time failed - {}", err
);
526 if next
> now { continue; }
529 let job_id2
= job_id
.clone();
531 let tgt_store
= match DataStore
::lookup_datastore(&job_config
.store
) {
532 Ok(datastore
) => datastore
,
534 eprintln
!("lookup_datastore '{}' failed - {}", job_config
.store
, err
);
539 let remote
: Remote
= match remote_config
.lookup("remote", &job_config
.remote
) {
540 Ok(remote
) => remote
,
542 eprintln
!("remote_config lookup failed: {}", err
);
547 let username
= String
::from("backup@pam");
549 let delete
= job_config
.remove_vanished
.unwrap_or(true);
551 if let Err(err
) = WorkerTask
::spawn(
553 Some(job_config
.store
.clone()),
556 move |worker
| async
move {
557 worker
.log(format
!("Starting datastore sync job '{}'", job_id
));
558 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
559 worker
.log(format
!("Sync datastore '{}' from '{}/{}'",
560 job_config
.store
, job_config
.remote
, job_config
.remote_store
));
562 let options
= HttpClientOptions
::new()
563 .password(Some(remote
.password
.clone()))
564 .fingerprint(remote
.fingerprint
.clone());
566 let client
= HttpClient
::new(&remote
.host
, &remote
.userid
, options
)?
;
567 let _auth_info
= client
.login() // make sure we can auth
569 .map_err(|err
| format_err
!("remote connection to '{}' failed - {}", remote
.host
, err
))?
;
571 let src_repo
= BackupRepository
::new(Some(remote
.userid
), Some(remote
.host
), job_config
.remote_store
);
573 pull_store(&worker
, &client
, &src_repo
, tgt_store
, delete
, username
).await?
;
578 eprintln
!("unable to start datastore sync job {} - {}", job_id2
, err
);