};
-use proxmox_backup::api2::types::Userid;
+use proxmox_backup::api2::types::Authid;
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
DiskManage,
zfs_pool_stats,
},
+ logrotate::LogRotate,
socket::{
set_tcp_keepalive,
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
use proxmox_backup::api2::pull::do_sync_job;
use proxmox_backup::server::do_verification_job;
+use proxmox_backup::server::do_prune_job;
fn main() -> Result<(), Error> {
proxmox_backup::tools::setup_safe_path_env();
config.register_template("index", &indexpath)?;
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
- config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
+ let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
+
+ config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
let rest_server = RestServer::new(config);
},
);
+ server::write_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
let init_result: Result<(), Error> = try_block!({
- server::create_task_control_socket()?;
+ server::register_task_control_commands(&mut commando_sock)?;
+ commando_sock.spawn()?;
server::server_state_init()?;
Ok(())
});
async fn schedule_datastore_garbage_collection() {
- use proxmox_backup::config::datastore::{
- self,
- DataStoreConfig,
+ use proxmox_backup::config::{
+ datastore::{
+ self,
+ DataStoreConfig,
+ },
};
let config = match datastore::config() {
if next > now { continue; }
- let mut job = match Job::new(worker_type, &store) {
+ let job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
- let store2 = store.clone();
-
- if let Err(err) = WorkerTask::new_thread(
- worker_type,
- Some(store.clone()),
- Userid::backup_userid().clone(),
- false,
- move |worker| {
- job.start(&worker.upid().to_string())?;
-
- worker.log(format!("starting garbage collection on store {}", store));
- worker.log(format!("task triggered by schedule '{}'", event_str));
-
- let result = datastore.garbage_collection(&*worker, worker.upid());
-
- let status = worker.create_state(&result);
-
- if let Err(err) = job.finish(status) {
- eprintln!("could not finish job state for {}: {}", worker_type, err);
- }
+ let auth_id = Authid::backup_auth_id();
- result
- }
- ) {
- eprintln!("unable to start garbage collection on store {} - {}", store2, err);
+ if let Err(err) = crate::server::do_garbage_collection_job(job, datastore, auth_id, Some(event_str)) {
+ eprintln!("unable to start garbage collection job on datastore {} - {}", store, err);
}
}
}
use proxmox_backup::{
backup::{
PruneOptions,
- BackupGroup,
- compute_prune_info,
},
config::datastore::{
self,
};
for (store, (_, store_config)) in config.sections {
- let datastore = match DataStore::lookup_datastore(&store) {
- Ok(datastore) => datastore,
- Err(err) => {
- eprintln!("lookup_datastore '{}' failed - {}", store, err);
- continue;
- }
- };
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
continue;
}
- let event = match parse_calendar_event(&event_str) {
- Ok(event) => event,
- Err(err) => {
- eprintln!("unable to parse schedule '{}' - {}", event_str, err);
- continue;
- }
- };
-
let worker_type = "prune";
+ if check_schedule(worker_type, &event_str, &store) {
+ let job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
- let last = match jobstate::last_run_time(worker_type, &store) {
- Ok(time) => time,
- Err(err) => {
- eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
- continue;
+ let auth_id = Authid::backup_auth_id().clone();
+ if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) {
+ eprintln!("unable to start datastore prune job {} - {}", &store, err);
}
};
-
- let next = match compute_next_event(&event, last, false) {
- Ok(Some(next)) => next,
- Ok(None) => continue,
- Err(err) => {
- eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
- continue;
- }
- };
-
- let now = proxmox::tools::time::epoch_i64();
-
- if next > now { continue; }
-
- let mut job = match Job::new(worker_type, &store) {
- Ok(job) => job,
- Err(_) => continue, // could not get lock
- };
-
- let store2 = store.clone();
-
- if let Err(err) = WorkerTask::new_thread(
- worker_type,
- Some(store.clone()),
- Userid::backup_userid().clone(),
- false,
- move |worker| {
-
- job.start(&worker.upid().to_string())?;
-
- let result = try_block!({
-
- worker.log(format!("Starting datastore prune on store \"{}\"", store));
- worker.log(format!("task triggered by schedule '{}'", event_str));
- worker.log(format!("retention options: {}", prune_options.cli_options_string()));
-
- let base_path = datastore.base_path();
-
- let groups = BackupGroup::list_groups(&base_path)?;
- for group in groups {
- let list = group.list_backups(&base_path)?;
- let mut prune_info = compute_prune_info(list, &prune_options)?;
- prune_info.reverse(); // delete older snapshots first
-
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, group.backup_type(), group.backup_id()));
-
- for (info, keep) in prune_info {
- worker.log(format!(
- "{} {}/{}/{}",
- if keep { "keep" } else { "remove" },
- group.backup_type(), group.backup_id(),
- info.backup_dir.backup_time_string()));
- if !keep {
- datastore.remove_backup_dir(&info.backup_dir, true)?;
- }
- }
- }
- Ok(())
- });
-
- let status = worker.create_state(&result);
-
- if let Err(err) = job.finish(status) {
- eprintln!("could not finish job state for {}: {}", worker_type, err);
- }
-
- result
- }
- ) {
- eprintln!("unable to start datastore prune on store {} - {}", store2, err);
- }
}
}
None => continue,
};
- let event = match parse_calendar_event(&event_str) {
- Ok(event) => event,
- Err(err) => {
- eprintln!("unable to parse schedule '{}' - {}", event_str, err);
- continue;
- }
- };
-
let worker_type = "syncjob";
+ if check_schedule(worker_type, &event_str, &job_id) {
+ let job = match Job::new(worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
- let last = match jobstate::last_run_time(worker_type, &job_id) {
- Ok(time) => time,
- Err(err) => {
- eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
- continue;
- }
- };
-
- let next = match compute_next_event(&event, last, false) {
- Ok(Some(next)) => next,
- Ok(None) => continue,
- Err(err) => {
- eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
- continue;
+ let auth_id = Authid::backup_auth_id().clone();
+ if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
+ eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
};
-
- let now = proxmox::tools::time::epoch_i64();
-
- if next > now { continue; }
-
- let job = match Job::new(worker_type, &job_id) {
- Ok(job) => job,
- Err(_) => continue, // could not get lock
- };
-
- let userid = Userid::backup_userid().clone();
-
- if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
- eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
- }
}
}
Some(ref event_str) => event_str.clone(),
None => continue,
};
- let event = match parse_calendar_event(&event_str) {
- Ok(event) => event,
- Err(err) => {
- eprintln!("unable to parse schedule '{}' - {}", event_str, err);
- continue;
- }
- };
+
let worker_type = "verificationjob";
- let last = match jobstate::last_run_time(worker_type, &job_id) {
- Ok(time) => time,
- Err(err) => {
- eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
- continue;
+ let auth_id = Authid::backup_auth_id().clone();
+ if check_schedule(worker_type, &event_str, &job_id) {
+ let job = match Job::new(&worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+ if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
+ eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
}
};
- let next = match compute_next_event(&event, last, false) {
- Ok(Some(next)) => next,
- Ok(None) => continue,
- Err(err) => {
- eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
- continue;
- }
- };
- let now = proxmox::tools::time::epoch_i64();
- if next > now { continue; }
- let job = match Job::new(worker_type, &job_id) {
- Ok(job) => job,
- Err(_) => continue, // could not get lock
- };
- let userid = Userid::backup_userid().clone();
- if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
- eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
- }
}
}
let worker_type = "logrotate";
let job_id = "task_archive";
- let last = match jobstate::last_run_time(worker_type, job_id) {
- Ok(time) => time,
- Err(err) => {
- eprintln!("could not get last run time of task log archive rotation: {}", err);
- return;
- }
- };
-
// schedule daily at 00:00 like normal logrotate
let schedule = "00:00";
- let event = match parse_calendar_event(schedule) {
- Ok(event) => event,
- Err(err) => {
- // should not happen?
- eprintln!("unable to parse schedule '{}' - {}", schedule, err);
- return;
- }
- };
-
- let next = match compute_next_event(&event, last, false) {
- Ok(Some(next)) => next,
- Ok(None) => return,
- Err(err) => {
- eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
- return;
- }
- };
-
- let now = proxmox::tools::time::epoch_i64();
-
- if next > now {
+ if !check_schedule(worker_type, schedule, job_id) {
// if we never ran the rotation, schedule instantly
match jobstate::JobState::load(worker_type, job_id) {
Ok(state) => match state {
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(job_id.to_string()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
worker.log(format!("task log archive was not rotated"));
}
+ let max_size = 32 * 1024 * 1024 - 1;
+ let max_files = 14;
+ let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true)
+ .ok_or_else(|| format_err!("could not get API access log file names"))?;
+
+ let has_rotated = logrotate.rotate(max_size, None, Some(max_files))?;
+ if has_rotated {
+ println!("rotated access log, telling daemons to re-open log file");
+ proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
+
+ worker.log(format!("API access log was rotated"));
+ } else {
+ worker.log(format!("API access log was not rotated"));
+ }
+
Ok(())
});
}
+async fn command_reopen_logfiles() -> Result<(), Error> {
+ // only care about the most recent daemon instance for each, proxy & api, as other older ones
+ // should not respond to new requests anyway, but only finish their current one and then exit.
+ let sock = server::our_ctrl_sock();
+ server::send_command(sock, serde_json::json!({
+ "command": "api-access-log-reopen",
+ })).await?;
+
+ let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
+ let sock = server::ctrl_sock_from_pid(pid);
+ server::send_command(sock, serde_json::json!({
+ "command": "api-access-log-reopen",
+ })).await?;
+ Ok(())
+}
+
async fn run_stat_generator() {
let mut count = 0;
});
}
+fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
+ let event = match parse_calendar_event(event_str) {
+ Ok(event) => event,
+ Err(err) => {
+ eprintln!("unable to parse schedule '{}' - {}", event_str, err);
+ return false;
+ }
+ };
+
+ let last = match jobstate::last_run_time(worker_type, &id) {
+ Ok(time) => time,
+ Err(err) => {
+ eprintln!("could not get last run time of {} {}: {}", worker_type, id, err);
+ return false;
+ }
+ };
+
+ let next = match compute_next_event(&event, last, false) {
+ Ok(Some(next)) => next,
+ Ok(None) => return false,
+ Err(err) => {
+ eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
+ return false;
+ }
+ };
+
+ let now = proxmox::tools::time::epoch_i64();
+ next <= now
+}
+
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
match proxmox_backup::tools::disks::disk_usage(path) {