4 use anyhow
::{bail, format_err, Error}
;
7 use openssl
::ssl
::{SslMethod, SslAcceptor, SslFiletype}
;
9 use proxmox
::try_block
;
10 use proxmox
::api
::RpcEnvironmentType
;
12 use proxmox_backup
::configdir
;
13 use proxmox_backup
::buildcfg
;
14 use proxmox_backup
::server
;
15 use proxmox_backup
::tools
::daemon
;
16 use proxmox_backup
::server
::{ApiConfig, rest::*}
;
17 use proxmox_backup
::auth_helpers
::*;
18 use proxmox_backup
::tools
::disks
::{ DiskManage, zfs_pool_stats }
;
21 if let Err(err
) = proxmox_backup
::tools
::runtime
::main(run()) {
22 eprintln
!("Error: {}", err
);
23 std
::process
::exit(-1);
27 async
fn run() -> Result
<(), Error
> {
28 if let Err(err
) = syslog
::init(
29 syslog
::Facility
::LOG_DAEMON
,
30 log
::LevelFilter
::Info
,
31 Some("proxmox-backup-proxy")) {
32 bail
!("unable to inititialize syslog - {}", err
);
35 let _
= public_auth_key(); // load with lazy_static
36 let _
= csrf_secret(); // load with lazy_static
38 let mut config
= ApiConfig
::new(
39 buildcfg
::JS_DIR
, &proxmox_backup
::api2
::ROUTER
, RpcEnvironmentType
::PUBLIC
)?
;
41 // add default dirs which includes jquery and bootstrap
42 // my $base = '/usr/share/libpve-http-server-perl';
43 // add_dirs($self->{dirs}, '/css/' => "$base/css/");
44 // add_dirs($self->{dirs}, '/js/' => "$base/js/");
45 // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
46 config
.add_alias("novnc", "/usr/share/novnc-pve");
47 config
.add_alias("extjs", "/usr/share/javascript/extjs");
48 config
.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
49 config
.add_alias("xtermjs", "/usr/share/pve-xtermjs");
50 config
.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
51 config
.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
52 config
.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
54 let rest_server
= RestServer
::new(config
);
56 //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
57 let key_path
= configdir
!("/proxy.key");
58 let cert_path
= configdir
!("/proxy.pem");
60 let mut acceptor
= SslAcceptor
::mozilla_intermediate(SslMethod
::tls()).unwrap();
61 acceptor
.set_private_key_file(key_path
, SslFiletype
::PEM
)
62 .map_err(|err
| format_err
!("unable to read proxy key {} - {}", key_path
, err
))?
;
63 acceptor
.set_certificate_chain_file(cert_path
)
64 .map_err(|err
| format_err
!("unable to read proxy cert {} - {}", cert_path
, err
))?
;
65 acceptor
.check_private_key().unwrap();
67 let acceptor
= Arc
::new(acceptor
.build());
69 let server
= daemon
::create_daemon(
70 ([0,0,0,0,0,0,0,0], 8007).into(),
72 let connections
= proxmox_backup
::tools
::async_io
::StaticIncoming
::from(listener
)
74 .try_filter_map(move |(sock
, _addr
)| {
75 let acceptor
= Arc
::clone(&acceptor
);
77 sock
.set_nodelay(true).unwrap();
78 sock
.set_send_buffer_size(1024*1024).unwrap();
79 sock
.set_recv_buffer_size(1024*1024).unwrap();
80 Ok(tokio_openssl
::accept(&acceptor
, sock
)
82 .ok() // handshake errors aren't be fatal, so return None to filter
86 let connections
= proxmox_backup
::tools
::async_io
::HyperAccept(connections
);
89 .and_then(|_
| hyper
::Server
::builder(connections
)
91 .with_graceful_shutdown(server
::shutdown_future())
94 .map_err(|err
| eprintln
!("server error: {}", err
))
100 daemon
::systemd_notify(daemon
::SystemdNotify
::Ready
)?
;
102 let init_result
: Result
<(), Error
> = try_block
!({
103 server
::create_task_control_socket()?
;
104 server
::server_state_init()?
;
108 if let Err(err
) = init_result
{
109 bail
!("unable to start daemon - {}", err
);
112 start_task_scheduler();
113 start_stat_generator();
116 log
::info
!("server shutting down, waiting for active workers to complete");
117 proxmox_backup
::server
::last_worker_future().await?
;
118 log
::info
!("done - exit server");
123 fn start_stat_generator() {
124 let abort_future
= server
::shutdown_future();
125 let future
= Box
::pin(run_stat_generator());
126 let task
= futures
::future
::select(future
, abort_future
);
127 tokio
::spawn(task
.map(|_
| ()));
130 fn start_task_scheduler() {
131 let abort_future
= server
::shutdown_future();
132 let future
= Box
::pin(run_task_scheduler());
133 let task
= futures
::future
::select(future
, abort_future
);
134 tokio
::spawn(task
.map(|_
| ()));
137 use std
::time
:: {Instant, Duration, SystemTime, UNIX_EPOCH}
;
139 fn next_minute() -> Result
<Instant
, Error
> {
140 let epoch_now
= SystemTime
::now().duration_since(UNIX_EPOCH
)?
;
141 let epoch_next
= Duration
::from_secs((epoch_now
.as_secs()/60 + 1)*60);
142 Ok(Instant
::now() + epoch_next
- epoch_now
)
145 async
fn run_task_scheduler() {
147 let mut count
: usize = 0;
152 let delay_target
= match next_minute() { // try to run very minute
155 eprintln
!("task scheduler: compute next minute failed - {}", err
);
156 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(Instant
::now() + Duration
::from_secs(60))).await
;
161 if count
> 2 { // wait 1..2 minutes before starting
162 match schedule_tasks().catch_unwind().await
{
164 match panic
.downcast
::<&str>() {
166 eprintln
!("task scheduler panic: {}", msg
);
169 eprintln
!("task scheduler panic - unknown type");
174 eprintln
!("task scheduler failed - {:?}", err
);
180 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(delay_target
)).await
;
184 async
fn schedule_tasks() -> Result
<(), Error
> {
186 schedule_datastore_garbage_collection().await
;
187 schedule_datastore_prune().await
;
188 schedule_datastore_sync_jobs().await
;
193 fn lookup_last_worker(worker_type
: &str, worker_id
: &str) -> Result
<Option
<server
::UPID
>, Error
> {
195 let list
= proxmox_backup
::server
::read_task_list()?
;
197 let mut last
: Option
<&server
::UPID
> = None
;
199 for entry
in list
.iter() {
200 if entry
.upid
.worker_type
== worker_type
{
201 if let Some(ref id
) = entry
.upid
.worker_id
{
205 if upid
.starttime
< entry
.upid
.starttime
{
206 last
= Some(&entry
.upid
)
210 last
= Some(&entry
.upid
)
222 async
fn schedule_datastore_garbage_collection() {
224 use proxmox_backup
::backup
::DataStore
;
225 use proxmox_backup
::server
::{UPID, WorkerTask}
;
226 use proxmox_backup
::config
::datastore
::{self, DataStoreConfig}
;
227 use proxmox_backup
::tools
::systemd
::time
::{
228 parse_calendar_event
, compute_next_event
};
230 let config
= match datastore
::config() {
232 eprintln
!("unable to read datastore config - {}", err
);
235 Ok((config
, _digest
)) => config
,
238 for (store
, (_
, store_config
)) in config
.sections
{
239 let datastore
= match DataStore
::lookup_datastore(&store
) {
240 Ok(datastore
) => datastore
,
242 eprintln
!("lookup_datastore failed - {}", err
);
247 let store_config
: DataStoreConfig
= match serde_json
::from_value(store_config
) {
250 eprintln
!("datastore config from_value failed - {}", err
);
255 let event_str
= match store_config
.gc_schedule
{
256 Some(event_str
) => event_str
,
260 let event
= match parse_calendar_event(&event_str
) {
263 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
268 if datastore
.garbage_collection_running() { continue; }
270 let worker_type
= "garbage_collection";
272 let stat
= datastore
.last_gc_status();
273 let last
= if let Some(upid_str
) = stat
.upid
{
274 match upid_str
.parse
::<UPID
>() {
275 Ok(upid
) => upid
.starttime
,
277 eprintln
!("unable to parse upid '{}' - {}", upid_str
, err
);
282 match lookup_last_worker(worker_type
, &store
) {
283 Ok(Some(upid
)) => upid
.starttime
,
286 eprintln
!("lookup_last_job_start failed: {}", err
);
292 let next
= match compute_next_event(&event
, last
, false) {
295 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
299 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
300 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
302 eprintln
!("query system time failed - {}", err
);
306 if next
> now { continue; }
308 let store2
= store
.clone();
310 if let Err(err
) = WorkerTask
::new_thread(
316 worker
.log(format
!("starting garbage collection on store {}", store
));
317 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
318 datastore
.garbage_collection(&worker
)
321 eprintln
!("unable to start garbage collection on store {} - {}", store2
, err
);
326 async
fn schedule_datastore_prune() {
328 use proxmox_backup
::backup
::{
329 PruneOptions
, DataStore
, BackupGroup
, BackupDir
, compute_prune_info
};
330 use proxmox_backup
::server
::{WorkerTask}
;
331 use proxmox_backup
::config
::datastore
::{self, DataStoreConfig}
;
332 use proxmox_backup
::tools
::systemd
::time
::{
333 parse_calendar_event
, compute_next_event
};
335 let config
= match datastore
::config() {
337 eprintln
!("unable to read datastore config - {}", err
);
340 Ok((config
, _digest
)) => config
,
343 for (store
, (_
, store_config
)) in config
.sections
{
344 let datastore
= match DataStore
::lookup_datastore(&store
) {
345 Ok(datastore
) => datastore
,
347 eprintln
!("lookup_datastore '{}' failed - {}", store
, err
);
352 let store_config
: DataStoreConfig
= match serde_json
::from_value(store_config
) {
355 eprintln
!("datastore '{}' config from_value failed - {}", store
, err
);
360 let event_str
= match store_config
.prune_schedule
{
361 Some(event_str
) => event_str
,
365 let prune_options
= PruneOptions
{
366 keep_last
: store_config
.keep_last
,
367 keep_hourly
: store_config
.keep_hourly
,
368 keep_daily
: store_config
.keep_daily
,
369 keep_weekly
: store_config
.keep_weekly
,
370 keep_monthly
: store_config
.keep_monthly
,
371 keep_yearly
: store_config
.keep_yearly
,
374 if !prune_options
.keeps_something() { // no prune settings - keep all
378 let event
= match parse_calendar_event(&event_str
) {
381 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
386 let worker_type
= "prune";
388 let last
= match lookup_last_worker(worker_type
, &store
) {
390 if proxmox_backup
::server
::worker_is_active_local(&upid
) {
397 eprintln
!("lookup_last_job_start failed: {}", err
);
402 let next
= match compute_next_event(&event
, last
, false) {
405 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
410 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
411 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
413 eprintln
!("query system time failed - {}", err
);
417 if next
> now { continue; }
419 let store2
= store
.clone();
421 if let Err(err
) = WorkerTask
::new_thread(
427 worker
.log(format
!("Starting datastore prune on store \"{}\"", store
));
428 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
429 worker
.log(format
!("retention options: {}", prune_options
.cli_options_string()));
431 let base_path
= datastore
.base_path();
433 let groups
= BackupGroup
::list_groups(&base_path
)?
;
434 for group
in groups
{
435 let list
= group
.list_backups(&base_path
)?
;
436 let mut prune_info
= compute_prune_info(list
, &prune_options
)?
;
437 prune_info
.reverse(); // delete older snapshots first
439 worker
.log(format
!("Starting prune on store \"{}\" group \"{}/{}\"",
440 store
, group
.backup_type(), group
.backup_id()));
442 for (info
, keep
) in prune_info
{
445 if keep { "keep" }
else { "remove" }
,
446 group
.backup_type(), group
.backup_id(),
447 BackupDir
::backup_time_to_string(info
.backup_dir
.backup_time())));
450 datastore
.remove_backup_dir(&info
.backup_dir
)?
;
458 eprintln
!("unable to start datastore prune on store {} - {}", store2
, err
);
463 async
fn schedule_datastore_sync_jobs() {
465 use proxmox_backup
::{
467 client
::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store }
,
468 server
::{ WorkerTask }
,
469 config
::{ sync::{self, SyncJobConfig}
, remote
::{self, Remote}
},
470 tools
::systemd
::time
::{ parse_calendar_event, compute_next_event }
,
473 let config
= match sync
::config() {
475 eprintln
!("unable to read sync job config - {}", err
);
478 Ok((config
, _digest
)) => config
,
481 let remote_config
= match remote
::config() {
483 eprintln
!("unable to read remote config - {}", err
);
486 Ok((config
, _digest
)) => config
,
489 for (job_id
, (_
, job_config
)) in config
.sections
{
490 let job_config
: SyncJobConfig
= match serde_json
::from_value(job_config
) {
493 eprintln
!("sync job config from_value failed - {}", err
);
498 let event_str
= match job_config
.schedule
{
499 Some(ref event_str
) => event_str
.clone(),
503 let event
= match parse_calendar_event(&event_str
) {
506 eprintln
!("unable to parse schedule '{}' - {}", event_str
, err
);
511 let worker_type
= "syncjob";
513 let last
= match lookup_last_worker(worker_type
, &job_id
) {
515 if proxmox_backup
::server
::worker_is_active_local(&upid
) {
522 eprintln
!("lookup_last_job_start failed: {}", err
);
527 let next
= match compute_next_event(&event
, last
, false) {
530 eprintln
!("compute_next_event for '{}' failed - {}", event_str
, err
);
535 let now
= match SystemTime
::now().duration_since(UNIX_EPOCH
) {
536 Ok(epoch_now
) => epoch_now
.as_secs() as i64,
538 eprintln
!("query system time failed - {}", err
);
542 if next
> now { continue; }
545 let job_id2
= job_id
.clone();
547 let tgt_store
= match DataStore
::lookup_datastore(&job_config
.store
) {
548 Ok(datastore
) => datastore
,
550 eprintln
!("lookup_datastore '{}' failed - {}", job_config
.store
, err
);
555 let remote
: Remote
= match remote_config
.lookup("remote", &job_config
.remote
) {
556 Ok(remote
) => remote
,
558 eprintln
!("remote_config lookup failed: {}", err
);
563 let username
= String
::from("backup@pam");
565 let delete
= job_config
.remove_vanished
.unwrap_or(true);
567 if let Err(err
) = WorkerTask
::spawn(
569 Some(job_id
.clone()),
572 move |worker
| async
move {
573 worker
.log(format
!("Starting datastore sync job '{}'", job_id
));
574 worker
.log(format
!("task triggered by schedule '{}'", event_str
));
575 worker
.log(format
!("Sync datastore '{}' from '{}/{}'",
576 job_config
.store
, job_config
.remote
, job_config
.remote_store
));
578 let options
= HttpClientOptions
::new()
579 .password(Some(remote
.password
.clone()))
580 .fingerprint(remote
.fingerprint
.clone());
582 let client
= HttpClient
::new(&remote
.host
, &remote
.userid
, options
)?
;
583 let _auth_info
= client
.login() // make sure we can auth
585 .map_err(|err
| format_err
!("remote connection to '{}' failed - {}", remote
.host
, err
))?
;
587 let src_repo
= BackupRepository
::new(Some(remote
.userid
), Some(remote
.host
), job_config
.remote_store
);
589 pull_store(&worker
, &client
, &src_repo
, tgt_store
, delete
, username
).await?
;
594 eprintln
!("unable to start datastore sync job {} - {}", job_id2
, err
);
599 async
fn run_stat_generator() {
604 let save
= if count
>= 6 { count = 0; true }
else { false }
;
606 let delay_target
= Instant
::now() + Duration
::from_secs(10);
608 generate_host_stats(save
).await
;
610 tokio
::time
::delay_until(tokio
::time
::Instant
::from_std(delay_target
)).await
;
616 fn rrd_update_gauge(name
: &str, value
: f64, save
: bool
) {
617 use proxmox_backup
::rrd
;
618 if let Err(err
) = rrd
::update_value(name
, value
, rrd
::DST
::Gauge
, save
) {
619 eprintln
!("rrd::update_value '{}' failed - {}", name
, err
);
623 fn rrd_update_derive(name
: &str, value
: f64, save
: bool
) {
624 use proxmox_backup
::rrd
;
625 if let Err(err
) = rrd
::update_value(name
, value
, rrd
::DST
::Derive
, save
) {
626 eprintln
!("rrd::update_value '{}' failed - {}", name
, err
);
630 async
fn generate_host_stats(save
: bool
) {
631 use proxmox
::sys
::linux
::procfs
::{
632 read_meminfo
, read_proc_stat
, read_proc_net_dev
, read_loadavg
};
633 use proxmox_backup
::config
::datastore
;
636 proxmox_backup
::tools
::runtime
::block_in_place(move || {
638 match read_proc_stat() {
640 rrd_update_gauge("host/cpu", stat
.cpu
, save
);
641 rrd_update_gauge("host/iowait", stat
.iowait_percent
, save
);
644 eprintln
!("read_proc_stat failed - {}", err
);
648 match read_meminfo() {
650 rrd_update_gauge("host/memtotal", meminfo
.memtotal
as f64, save
);
651 rrd_update_gauge("host/memused", meminfo
.memused
as f64, save
);
652 rrd_update_gauge("host/swaptotal", meminfo
.swaptotal
as f64, save
);
653 rrd_update_gauge("host/swapused", meminfo
.swapused
as f64, save
);
656 eprintln
!("read_meminfo failed - {}", err
);
660 match read_proc_net_dev() {
662 use proxmox_backup
::config
::network
::is_physical_nic
;
666 if !is_physical_nic(&item
.device
) { continue; }
667 netin
+= item
.receive
;
670 rrd_update_derive("host/netin", netin
as f64, save
);
671 rrd_update_derive("host/netout", netout
as f64, save
);
674 eprintln
!("read_prox_net_dev failed - {}", err
);
678 match read_loadavg() {
680 rrd_update_gauge("host/loadavg", loadavg
.0 as f64, save
);
683 eprintln
!("read_loadavg failed - {}", err
);
687 let disk_manager
= DiskManage
::new();
689 gather_disk_stats(disk_manager
.clone(), Path
::new("/"), "host", save
);
691 match datastore
::config() {
693 let datastore_list
: Vec
<datastore
::DataStoreConfig
> =
694 config
.convert_to_typed_array("datastore").unwrap_or(Vec
::new());
696 for config
in datastore_list
{
698 let rrd_prefix
= format
!("datastore/{}", config
.name
);
699 let path
= std
::path
::Path
::new(&config
.path
);
700 gather_disk_stats(disk_manager
.clone(), path
, &rrd_prefix
, save
);
704 eprintln
!("read datastore config failed - {}", err
);
711 fn gather_disk_stats(disk_manager
: Arc
<DiskManage
>, path
: &Path
, rrd_prefix
: &str, save
: bool
) {
713 match proxmox_backup
::tools
::disks
::disk_usage(path
) {
715 let rrd_key
= format
!("{}/total", rrd_prefix
);
716 rrd_update_gauge(&rrd_key
, status
.total
as f64, save
);
717 let rrd_key
= format
!("{}/used", rrd_prefix
);
718 rrd_update_gauge(&rrd_key
, status
.used
as f64, save
);
721 eprintln
!("read disk_usage on {:?} failed - {}", path
, err
);
725 match disk_manager
.find_mounted_device(path
) {
727 Ok(Some((fs_type
, device
, source
))) => {
728 let mut device_stat
= None
;
729 match fs_type
.as_str() {
731 if let Some(pool
) = source
{
732 match zfs_pool_stats(&pool
) {
733 Ok(stat
) => device_stat
= stat
,
734 Err(err
) => eprintln
!("zfs_pool_stats({:?}) failed - {}", pool
, err
),
739 if let Ok(disk
) = disk_manager
.clone().disk_by_dev_num(device
.into_dev_t()) {
740 match disk
.read_stat() {
741 Ok(stat
) => device_stat
= stat
,
742 Err(err
) => eprintln
!("disk.read_stat {:?} failed - {}", path
, err
),
747 if let Some(stat
) = device_stat
{
748 let rrd_key
= format
!("{}/read_ios", rrd_prefix
);
749 rrd_update_derive(&rrd_key
, stat
.read_ios
as f64, save
);
750 let rrd_key
= format
!("{}/read_bytes", rrd_prefix
);
751 rrd_update_derive(&rrd_key
, (stat
.read_sectors
*512) as f64, save
);
753 let rrd_key
= format
!("{}/write_ios", rrd_prefix
);
754 rrd_update_derive(&rrd_key
, stat
.write_ios
as f64, save
);
755 let rrd_key
= format
!("{}/write_bytes", rrd_prefix
);
756 rrd_update_derive(&rrd_key
, (stat
.write_sectors
*512) as f64, save
);
758 let rrd_key
= format
!("{}/io_ticks", rrd_prefix
);
759 rrd_update_derive(&rrd_key
, (stat
.io_ticks
as f64)/1000.0, save
);
763 eprintln
!("find_mounted_device failed - {}", err
);