-use std::sync::Arc;
+use std::sync::{Arc};
use std::path::{Path, PathBuf};
+use std::os::unix::io::AsRawFd;
use anyhow::{bail, format_err, Error};
use futures::*;
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
-use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
use proxmox_backup::server::{ApiConfig, rest::*};
use proxmox_backup::auth_helpers::*;
-use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
+use proxmox_backup::tools::{
+ daemon,
+ disks::{
+ DiskManage,
+ zfs_pool_stats,
+ },
+ socket::{
+ set_tcp_keepalive,
+ PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
+ },
+};
use proxmox_backup::api2::pull::do_sync_job;
+use proxmox_backup::backup::do_verification_job;
fn main() -> Result<(), Error> {
proxmox_backup::tools::setup_safe_path_env();
+ let backup_uid = proxmox_backup::backup::backup_user()?.uid;
+ let backup_gid = proxmox_backup::backup::backup_group()?.gid;
+ let running_uid = nix::unistd::Uid::effective();
+ let running_gid = nix::unistd::Gid::effective();
+
+ if running_uid != backup_uid || running_gid != backup_gid {
+ bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
+ }
+
proxmox_backup::tools::runtime::main(run())
}
let mut config = ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
- // add default dirs which includes jquery and bootstrap
- // my $base = '/usr/share/libpve-http-server-perl';
- // add_dirs($self->{dirs}, '/css/' => "$base/css/");
- // add_dirs($self->{dirs}, '/js/' => "$base/js/");
- // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
config.add_alias("novnc", "/usr/share/novnc-pve");
config.add_alias("extjs", "/usr/share/javascript/extjs");
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
+ config.add_alias("locale", "/usr/share/pbs-i18n");
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
config.register_template("index", &indexpath)?;
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
+ config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
+
let rest_server = RestServer::new(config);
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
let key_path = configdir!("/proxy.key");
let cert_path = configdir!("/proxy.pem");
- let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
+ let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
acceptor.set_certificate_chain_file(cert_path)
let acceptor = Arc::clone(&acceptor);
async move {
sock.set_nodelay(true).unwrap();
- sock.set_send_buffer_size(1024*1024).unwrap();
- sock.set_recv_buffer_size(1024*1024).unwrap();
+
+ let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
+
Ok(tokio_openssl::accept(&acceptor, sock)
.await
.ok() // handshake errors aren't be fatal, so return None to filter
tokio::spawn(task.map(|_| ()));
}
-use std::time:: {Instant, Duration};
+use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
fn next_minute() -> Result<Instant, Error> {
- let epoch_now = epoch_now()?;
- let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
+ let now = SystemTime::now();
+ let epoch_now = now.duration_since(UNIX_EPOCH)?;
+ let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
Ok(Instant::now() + epoch_next - epoch_now)
}
schedule_datastore_garbage_collection().await;
schedule_datastore_prune().await;
schedule_datastore_sync_jobs().await;
+ schedule_datastore_verify_jobs().await;
+ schedule_task_log_rotate().await;
Ok(())
}
-fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
-
- let list = proxmox_backup::server::read_task_list()?;
-
- let mut last: Option<&server::UPID> = None;
-
- for entry in list.iter() {
- if entry.upid.worker_type == worker_type {
- if let Some(ref id) = entry.upid.worker_id {
- if id == worker_id {
- match last {
- Some(ref upid) => {
- if upid.starttime < entry.upid.starttime {
- last = Some(&entry.upid)
- }
- }
- None => {
- last = Some(&entry.upid)
- }
- }
- }
- }
- }
- }
-
- Ok(last.cloned())
-}
-
-
async fn schedule_datastore_garbage_collection() {
use proxmox_backup::backup::DataStore;
use proxmox_backup::server::{UPID, WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
}
}
} else {
- match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => upid.starttime,
- Ok(None) => 0,
+ match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
}
};
let next = match compute_next_event(&event, last, false) {
- Ok(next) => next,
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
- let now = match epoch_now_u64() {
- Ok(epoch_now) => epoch_now as i64,
- Err(err) => {
- eprintln!("query system time failed - {}", err);
- continue;
- }
- };
+ let now = proxmox::tools::time::epoch_i64();
+
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
Userid::backup_userid().clone(),
false,
move |worker| {
+ job.start(&worker.upid().to_string())?;
+
worker.log(format!("starting garbage collection on store {}", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
- datastore.garbage_collection(&worker)
+
+ let result = datastore.garbage_collection(&*worker, worker.upid());
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
+ }
+
+ result
}
) {
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
async fn schedule_datastore_prune() {
use proxmox_backup::backup::{
- PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
+ PruneOptions, DataStore, BackupGroup, compute_prune_info};
use proxmox_backup::server::{WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
let worker_type = "prune";
- let last = match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => {
- if proxmox_backup::server::worker_is_active_local(&upid) {
- continue;
- }
- upid.starttime
- }
- Ok(None) => 0,
+ let last = match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
};
let next = match compute_next_event(&event, last, false) {
- Ok(next) => next,
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
- let now = match epoch_now_u64() {
- Ok(epoch_now) => epoch_now as i64,
- Err(err) => {
- eprintln!("query system time failed - {}", err);
- continue;
- }
- };
+ let now = proxmox::tools::time::epoch_i64();
+
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
Userid::backup_userid().clone(),
false,
move |worker| {
- worker.log(format!("Starting datastore prune on store \"{}\"", store));
- worker.log(format!("task triggered by schedule '{}'", event_str));
- worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- let base_path = datastore.base_path();
+ job.start(&worker.upid().to_string())?;
+
+ let result = try_block!({
- let groups = BackupGroup::list_groups(&base_path)?;
- for group in groups {
- let list = group.list_backups(&base_path)?;
- let mut prune_info = compute_prune_info(list, &prune_options)?;
- prune_info.reverse(); // delete older snapshots first
+ worker.log(format!("Starting datastore prune on store \"{}\"", store));
+ worker.log(format!("task triggered by schedule '{}'", event_str));
+ worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, group.backup_type(), group.backup_id()));
+ let base_path = datastore.base_path();
- for (info, keep) in prune_info {
- worker.log(format!(
- "{} {}/{}/{}",
- if keep { "keep" } else { "remove" },
- group.backup_type(), group.backup_id(),
- BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
+ let groups = BackupGroup::list_groups(&base_path)?;
+ for group in groups {
+ let list = group.list_backups(&base_path)?;
+ let mut prune_info = compute_prune_info(list, &prune_options)?;
+ prune_info.reverse(); // delete older snapshots first
- if !keep {
- datastore.remove_backup_dir(&info.backup_dir, true)?;
+ worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
+ store, group.backup_type(), group.backup_id()));
+
+ for (info, keep) in prune_info {
+ worker.log(format!(
+ "{} {}/{}/{}",
+ if keep { "keep" } else { "remove" },
+ group.backup_type(), group.backup_id(),
+ info.backup_dir.backup_time_string()));
+ if !keep {
+ datastore.remove_backup_dir(&info.backup_dir, true)?;
+ }
}
}
+ Ok(())
+ });
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
}
- Ok(())
+ result
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
};
let next = match compute_next_event(&event, last, false) {
- Ok(next) => next,
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
- let now = match epoch_now_u64() {
- Ok(epoch_now) => epoch_now as i64,
- Err(err) => {
- eprintln!("query system time failed - {}", err);
- continue;
- }
- };
+ let now = proxmox::tools::time::epoch_i64();
+
if next > now { continue; }
let job = match Job::new(worker_type, &job_id) {
}
}
+async fn schedule_datastore_verify_jobs() {
+ use proxmox_backup::{
+ config::{verify::{self, VerificationJobConfig}, jobstate::{self, Job}},
+ tools::systemd::time::{parse_calendar_event, compute_next_event},
+ };
+ let config = match verify::config() {
+ Err(err) => {
+ eprintln!("unable to read verification job config - {}", err);
+ return;
+ }
+ Ok((config, _digest)) => config,
+ };
+ for (job_id, (_, job_config)) in config.sections {
+ let job_config: VerificationJobConfig = match serde_json::from_value(job_config) {
+ Ok(c) => c,
+ Err(err) => {
+ eprintln!("verification job config from_value failed - {}", err);
+ continue;
+ }
+ };
+ let event_str = match job_config.schedule {
+ Some(ref event_str) => event_str.clone(),
+ None => continue,
+ };
+ let event = match parse_calendar_event(&event_str) {
+ Ok(event) => event,
+ Err(err) => {
+ eprintln!("unable to parse schedule '{}' - {}", event_str, err);
+ continue;
+ }
+ };
+ let worker_type = "verificationjob";
+ let last = match jobstate::last_run_time(worker_type, &job_id) {
+ Ok(time) => time,
+ Err(err) => {
+ eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
+ continue;
+ }
+ };
+ let next = match compute_next_event(&event, last, false) {
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
+ Err(err) => {
+ eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
+ continue;
+ }
+ };
+ let now = proxmox::tools::time::epoch_i64();
+ if next > now { continue; }
+ let job = match Job::new(worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+ let userid = Userid::backup_userid().clone();
+ if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
+ eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
+ }
+ }
+}
+
+async fn schedule_task_log_rotate() {
+ use proxmox_backup::{
+ config::jobstate::{self, Job},
+ server::rotate_task_log_archive,
+ };
+ use proxmox_backup::server::WorkerTask;
+ use proxmox_backup::tools::systemd::time::{
+ parse_calendar_event, compute_next_event};
+
+ let worker_type = "logrotate";
+ let job_id = "task-archive";
+
+ let last = match jobstate::last_run_time(worker_type, job_id) {
+ Ok(time) => time,
+ Err(err) => {
+ eprintln!("could not get last run time of task log archive rotation: {}", err);
+ return;
+ }
+ };
+
+ // schedule daily at 00:00 like normal logrotate
+ let schedule = "00:00";
+
+ let event = match parse_calendar_event(schedule) {
+ Ok(event) => event,
+ Err(err) => {
+ // should not happen?
+ eprintln!("unable to parse schedule '{}' - {}", schedule, err);
+ return;
+ }
+ };
+
+ let next = match compute_next_event(&event, last, false) {
+ Ok(Some(next)) => next,
+ Ok(None) => return,
+ Err(err) => {
+ eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
+ return;
+ }
+ };
+
+ let now = proxmox::tools::time::epoch_i64();
+
+ if next > now {
+ // if we never ran the rotation, schedule instantly
+ match jobstate::JobState::load(worker_type, job_id) {
+ Ok(state) => match state {
+ jobstate::JobState::Created { .. } => {},
+ _ => return,
+ },
+ _ => return,
+ }
+ }
+
+ let mut job = match Job::new(worker_type, job_id) {
+ Ok(job) => job,
+ Err(_) => return, // could not get lock
+ };
+
+ if let Err(err) = WorkerTask::new_thread(
+ worker_type,
+ Some(job_id.to_string()),
+ Userid::backup_userid().clone(),
+ false,
+ move |worker| {
+ job.start(&worker.upid().to_string())?;
+ worker.log(format!("starting task log rotation"));
+
+ let result = try_block!({
+ // rotate task log archive
+ let max_size = 500000; // a normal entry has about 100b, so ~ 5000 entries/file
+ let max_files = 20; // times twenty files gives at least 100000 task entries
+ let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
+ if has_rotated {
+ worker.log(format!("task log archive was rotated"));
+ } else {
+ worker.log(format!("task log archive was not rotated"));
+ }
+
+ Ok(())
+ });
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
+ }
+
+ result
+ },
+ ) {
+ eprintln!("unable to start task log rotation: {}", err);
+ }
+
+}
+
async fn run_stat_generator() {
let mut count = 0;