-use std::sync::Arc;
+use std::sync::{Mutex, Arc};
use std::path::{Path, PathBuf};
use std::os::unix::io::AsRawFd;
+use std::future::Future;
+use std::pin::Pin;
use anyhow::{bail, format_err, Error};
use futures::*;
-use hyper;
-use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
+use http::request::Parts;
+use http::Response;
+use hyper::{Body, StatusCode};
+use hyper::header;
+use url::form_urlencoded;
-use proxmox::try_block;
-use proxmox::api::RpcEnvironmentType;
+use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
+use tokio_stream::wrappers::ReceiverStream;
+use serde_json::{json, Value};
+use http::{Method, HeaderMap};
+
+use proxmox_sys::linux::socket::set_tcp_keepalive;
+use proxmox_sys::fs::CreateOptions;
+use proxmox_lang::try_block;
+use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation};
+use proxmox_http::client::{RateLimitedStream, ShareableRateLimit};
+use proxmox_sys::{task_log, task_warn};
+use proxmox_sys::logrotate::LogRotate;
+
+use pbs_datastore::DataStore;
+
+use proxmox_rest_server::{
+ rotate_task_log_archive, extract_cookie , AuthError, ApiConfig, RestServer, RestEnvironment,
+ ServerAdapter, WorkerTask, cleanup_old_tasks,
+};
+use proxmox_backup::rrd_cache::{
+ initialize_rrd_cache, rrd_update_gauge, rrd_update_derive, rrd_sync_journal,
+};
use proxmox_backup::{
- backup::DataStore,
+ TRAFFIC_CONTROL_CACHE,
server::{
- WorkerTask,
- ApiConfig,
- rest::*,
+ auth::check_pbs_auth,
jobstate::{
self,
Job,
},
- rotate_task_log_archive,
- },
- tools::systemd::time::{
- parse_calendar_event,
- compute_next_event,
},
};
+use pbs_buildcfg::configdir;
+use proxmox_time::CalendarEvent;
+
+use pbs_api_types::{
+ Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig, DataStoreConfig,
+ PruneOptions,
+};
+
+use proxmox_rest_server::daemon;
-use proxmox_backup::api2::types::Authid;
-use proxmox_backup::configdir;
-use proxmox_backup::buildcfg;
use proxmox_backup::server;
use proxmox_backup::auth_helpers::*;
use proxmox_backup::tools::{
- daemon,
+ PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
disks::{
DiskManage,
- zfs_pool_stats,
- },
- logrotate::LogRotate,
- socket::{
- set_tcp_keepalive,
- PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
+ zfs_dataset_stats,
},
};
+
use proxmox_backup::api2::pull::do_sync_job;
+use proxmox_backup::api2::tape::backup::do_tape_backup_job;
use proxmox_backup::server::do_verification_job;
use proxmox_backup::server::do_prune_job;
fn main() -> Result<(), Error> {
+ pbs_tools::setup_libc_malloc_opts();
+
proxmox_backup::tools::setup_safe_path_env();
- let backup_uid = proxmox_backup::backup::backup_user()?.uid;
- let backup_gid = proxmox_backup::backup::backup_group()?.gid;
+ let backup_uid = pbs_config::backup_user()?.uid;
+ let backup_gid = pbs_config::backup_group()?.gid;
let running_uid = nix::unistd::Uid::effective();
let running_gid = nix::unistd::Gid::effective();
bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
}
- proxmox_backup::tools::runtime::main(run())
+ proxmox_async::runtime::main(run())
+}
+
+
+struct ProxmoxBackupProxyAdapter;
+
+impl ServerAdapter for ProxmoxBackupProxyAdapter {
+
+ fn get_index(
+ &self,
+ env: RestEnvironment,
+ parts: Parts,
+ ) -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> {
+ Box::pin(get_index_future(env, parts))
+ }
+
+ fn check_auth<'a>(
+ &'a self,
+ headers: &'a HeaderMap,
+ method: &'a Method,
+ ) -> Pin<Box<dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>> + Send + 'a>> {
+ Box::pin(async move {
+ check_pbs_auth(headers, method).await
+ })
+ }
+}
+
+fn extract_lang_header(headers: &http::HeaderMap) -> Option<String> {
+ if let Some(Ok(cookie)) = headers.get("COOKIE").map(|v| v.to_str()) {
+ return extract_cookie(cookie, "PBSLangCookie");
+ }
+ None
+}
+
+async fn get_index_future(
+ env: RestEnvironment,
+ parts: Parts,
+) -> Response<Body> {
+
+ let auth_id = env.get_auth_id();
+ let api = env.api_config();
+ let language = extract_lang_header(&parts.headers);
+
+ // fixme: make all IO async
+
+ let (userid, csrf_token) = match auth_id {
+ Some(auth_id) => {
+ let auth_id = auth_id.parse::<Authid>();
+ match auth_id {
+ Ok(auth_id) if !auth_id.is_token() => {
+ let userid = auth_id.user().clone();
+ let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
+ (Some(userid), Some(new_csrf_token))
+ }
+ _ => (None, None)
+ }
+ }
+ None => (None, None),
+ };
+
+ let nodename = proxmox_sys::nodename();
+ let user = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
+
+ let csrf_token = csrf_token.unwrap_or_else(|| String::from(""));
+
+ let mut debug = false;
+ let mut template_file = "index";
+
+ if let Some(query_str) = parts.uri.query() {
+ for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
+ if k == "debug" && v != "0" && v != "false" {
+ debug = true;
+ } else if k == "console" {
+ template_file = "console";
+ }
+ }
+ }
+
+ let mut lang = String::from("");
+ if let Some(language) = language {
+ if Path::new(&format!("/usr/share/pbs-i18n/pbs-lang-{}.js", language)).exists() {
+ lang = language;
+ }
+ }
+
+ let data = json!({
+ "NodeName": nodename,
+ "UserName": user,
+ "CSRFPreventionToken": csrf_token,
+ "language": lang,
+ "debug": debug,
+ });
+
+ let (ct, index) = match api.render_template(template_file, &data) {
+ Ok(index) => ("text/html", index),
+ Err(err) => ("text/plain", format!("Error rendering template: {}", err)),
+ };
+
+ let mut resp = Response::builder()
+ .status(StatusCode::OK)
+ .header(header::CONTENT_TYPE, ct)
+ .body(index.into())
+ .unwrap();
+
+ if let Some(userid) = userid {
+ resp.extensions_mut().insert(Authid::from((userid, None)));
+ }
+
+ resp
}
async fn run() -> Result<(), Error> {
let _ = public_auth_key(); // load with lazy_static
let _ = csrf_secret(); // load with lazy_static
+ let rrd_cache = initialize_rrd_cache()?;
+ rrd_cache.apply_journal()?;
+
let mut config = ApiConfig::new(
- buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
+ pbs_buildcfg::JS_DIR,
+ &proxmox_backup::api2::ROUTER,
+ RpcEnvironmentType::PUBLIC,
+ ProxmoxBackupProxyAdapter,
+ )?;
config.add_alias("novnc", "/usr/share/novnc-pve");
config.add_alias("extjs", "/usr/share/javascript/extjs");
+ config.add_alias("qrcodejs", "/usr/share/javascript/qrcodejs");
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
config.add_alias("locale", "/usr/share/pbs-i18n");
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
- let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
+ let mut indexpath = PathBuf::from(pbs_buildcfg::JS_DIR);
indexpath.push("index.hbs");
config.register_template("index", &indexpath)?;
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
- let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
+ let backup_user = pbs_config::backup_user()?;
+ let mut commando_sock = proxmox_rest_server::CommandSocket::new(proxmox_rest_server::our_ctrl_sock(), backup_user.gid);
- config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
+ let dir_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+ let file_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+
+ config.enable_access_log(
+ pbs_buildcfg::API_ACCESS_LOG_FN,
+ Some(dir_opts.clone()),
+ Some(file_opts.clone()),
+ &mut commando_sock,
+ )?;
+
+ config.enable_auth_log(
+ pbs_buildcfg::API_AUTH_LOG_FN,
+ Some(dir_opts.clone()),
+ Some(file_opts.clone()),
+ &mut commando_sock,
+ )?;
let rest_server = RestServer::new(config);
+ proxmox_rest_server::init_worker_tasks(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(), file_opts.clone())?;
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
- let key_path = configdir!("/proxy.key");
- let cert_path = configdir!("/proxy.pem");
- let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
- acceptor.set_private_key_file(key_path, SslFiletype::PEM)
- .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
- acceptor.set_certificate_chain_file(cert_path)
- .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
- acceptor.check_private_key().unwrap();
-
- let acceptor = Arc::new(acceptor.build());
+ // we build the initial acceptor here as we cannot start if this fails
+ let acceptor = make_tls_acceptor()?;
+ let acceptor = Arc::new(Mutex::new(acceptor));
+
+ // to renew the acceptor we just add a command-socket handler
+ commando_sock.register_command(
+ "reload-certificate".to_string(),
+ {
+ let acceptor = Arc::clone(&acceptor);
+ move |_value| -> Result<_, Error> {
+ log::info!("reloading certificate");
+ match make_tls_acceptor() {
+ Err(err) => log::error!("error reloading certificate: {}", err),
+ Ok(new_acceptor) => {
+ let mut guard = acceptor.lock().unwrap();
+ *guard = new_acceptor;
+ }
+ }
+ Ok(Value::Null)
+ }
+ },
+ )?;
+
+ // to remove references for not configured datastores
+ commando_sock.register_command(
+ "datastore-removed".to_string(),
+ |_value| {
+ if let Err(err) = DataStore::remove_unused_datastores() {
+ log::error!("could not refresh datastores: {}", err);
+ }
+ Ok(Value::Null)
+ }
+ )?;
let server = daemon::create_daemon(
([0,0,0,0,0,0,0,0], 8007).into(),
- |listener, ready| {
+ move |listener| {
let connections = accept_connections(listener, acceptor, debug);
- let connections = hyper::server::accept::from_stream(connections);
+ let connections = hyper::server::accept::from_stream(ReceiverStream::new(connections));
- Ok(ready
- .and_then(|_| hyper::Server::builder(connections)
+ Ok(async {
+ daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
+
+ hyper::Server::builder(connections)
.serve(rest_server)
- .with_graceful_shutdown(server::shutdown_future())
+ .with_graceful_shutdown(proxmox_rest_server::shutdown_future())
.map_err(Error::from)
- )
- .map_err(|err| eprintln!("server error: {}", err))
- .map(|_| ())
- )
+ .await
+ })
},
);
- server::write_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
- daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
+ proxmox_rest_server::write_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
let init_result: Result<(), Error> = try_block!({
- server::register_task_control_commands(&mut commando_sock)?;
+ proxmox_rest_server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
- server::server_state_init()?;
+ proxmox_rest_server::catch_shutdown_signal()?;
+ proxmox_rest_server::catch_reload_signal()?;
Ok(())
});
start_task_scheduler();
start_stat_generator();
+ start_traffic_control_updater();
server.await?;
log::info!("server shutting down, waiting for active workers to complete");
- proxmox_backup::server::last_worker_future().await?;
+ proxmox_rest_server::last_worker_future().await?;
log::info!("done - exit server");
Ok(())
}
+fn make_tls_acceptor() -> Result<SslAcceptor, Error> {
+ let key_path = configdir!("/proxy.key");
+ let cert_path = configdir!("/proxy.pem");
+
+ let (config, _) = proxmox_backup::config::node::config()?;
+ let ciphers_tls_1_3 = config.ciphers_tls_1_3;
+ let ciphers_tls_1_2 = config.ciphers_tls_1_2;
+
+ let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
+ if let Some(ciphers) = ciphers_tls_1_3.as_deref() {
+ acceptor.set_ciphersuites(ciphers)?;
+ }
+ if let Some(ciphers) = ciphers_tls_1_2.as_deref() {
+ acceptor.set_cipher_list(ciphers)?;
+ }
+ acceptor.set_private_key_file(key_path, SslFiletype::PEM)
+ .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
+ acceptor.set_certificate_chain_file(cert_path)
+ .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
+ acceptor.set_options(openssl::ssl::SslOptions::NO_RENEGOTIATION);
+ acceptor.check_private_key().unwrap();
+
+ Ok(acceptor.build())
+}
+
+type ClientStreamResult =
+ Result<std::pin::Pin<Box<tokio_openssl::SslStream<RateLimitedStream<tokio::net::TcpStream>>>>, Error>;
+const MAX_PENDING_ACCEPTS: usize = 1024;
+
fn accept_connections(
- mut listener: tokio::net::TcpListener,
- acceptor: Arc<openssl::ssl::SslAcceptor>,
+ listener: tokio::net::TcpListener,
+ acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>,
debug: bool,
-) -> tokio::sync::mpsc::Receiver<Result<tokio_openssl::SslStream<tokio::net::TcpStream>, Error>> {
-
- const MAX_PENDING_ACCEPTS: usize = 1024;
+) -> tokio::sync::mpsc::Receiver<ClientStreamResult> {
let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS);
+ tokio::spawn(accept_connection(listener, acceptor, debug, sender));
+
+ receiver
+}
+
+async fn accept_connection(
+ listener: tokio::net::TcpListener,
+ acceptor: Arc<Mutex<openssl::ssl::SslAcceptor>>,
+ debug: bool,
+ sender: tokio::sync::mpsc::Sender<ClientStreamResult>,
+) {
let accept_counter = Arc::new(());
- tokio::spawn(async move {
- loop {
- match listener.accept().await {
+ loop {
+ let (sock, _addr) = match listener.accept().await {
+ Ok(conn) => conn,
+ Err(err) => {
+ eprintln!("error accepting tcp connection: {}", err);
+ continue;
+ }
+ };
+
+ sock.set_nodelay(true).unwrap();
+ let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
+
+ let peer = sock.peer_addr().ok();
+ let sock = RateLimitedStream::with_limiter_update_cb(sock, move || lookup_rate_limiter(peer));
+
+ let ssl = { // limit acceptor_guard scope
+ // Acceptor can be reloaded using the command socket "reload-certificate" command
+ let acceptor_guard = acceptor.lock().unwrap();
+
+ match openssl::ssl::Ssl::new(acceptor_guard.context()) {
+ Ok(ssl) => ssl,
Err(err) => {
- eprintln!("error accepting tcp connection: {}", err);
- }
- Ok((sock, _addr)) => {
- sock.set_nodelay(true).unwrap();
- let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
- let acceptor = Arc::clone(&acceptor);
- let mut sender = sender.clone();
-
- if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS {
- eprintln!("connection rejected - to many open connections");
- continue;
- }
+ eprintln!("failed to create Ssl object from Acceptor context - {}", err);
+ continue;
+ },
+ }
+ };
- let accept_counter = accept_counter.clone();
- tokio::spawn(async move {
- let accept_future = tokio::time::timeout(
- Duration::new(10, 0), tokio_openssl::accept(&acceptor, sock));
-
- let result = accept_future.await;
-
- match result {
- Ok(Ok(connection)) => {
- if let Err(_) = sender.send(Ok(connection)).await {
- if debug {
- eprintln!("detect closed connection channel");
- }
- }
- }
- Ok(Err(err)) => {
- if debug {
- eprintln!("https handshake failed - {}", err);
- }
- }
- Err(_) => {
- if debug {
- eprintln!("https handshake timeout");
- }
- }
- }
+ let stream = match tokio_openssl::SslStream::new(ssl, sock) {
+ Ok(stream) => stream,
+ Err(err) => {
+ eprintln!("failed to create SslStream using ssl and connection socket - {}", err);
+ continue;
+ },
+ };
+
+ let mut stream = Box::pin(stream);
+ let sender = sender.clone();
+
+ if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS {
+ eprintln!("connection rejected - to many open connections");
+ continue;
+ }
+
+ let accept_counter = Arc::clone(&accept_counter);
+ tokio::spawn(async move {
+ let accept_future = tokio::time::timeout(
+ Duration::new(10, 0), stream.as_mut().accept());
- drop(accept_counter); // decrease reference count
- });
+ let result = accept_future.await;
+
+ match result {
+ Ok(Ok(())) => {
+ if sender.send(Ok(stream)).await.is_err() && debug {
+ eprintln!("detect closed connection channel");
+ }
+ }
+ Ok(Err(err)) => {
+ if debug {
+ eprintln!("https handshake failed - {}", err);
+ }
+ }
+ Err(_) => {
+ if debug {
+ eprintln!("https handshake timeout");
+ }
}
}
- }
- });
- receiver
+ drop(accept_counter); // decrease reference count
+ });
+ }
}
fn start_stat_generator() {
- let abort_future = server::shutdown_future();
+ let abort_future = proxmox_rest_server::shutdown_future();
let future = Box::pin(run_stat_generator());
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
}
fn start_task_scheduler() {
- let abort_future = server::shutdown_future();
+ let abort_future = proxmox_rest_server::shutdown_future();
let future = Box::pin(run_task_scheduler());
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
}
+fn start_traffic_control_updater() {
+ let abort_future = proxmox_rest_server::shutdown_future();
+ let future = Box::pin(run_traffic_control_updater());
+ let task = futures::future::select(future, abort_future);
+ tokio::spawn(task.map(|_| ()));
+}
+
use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
fn next_minute() -> Result<Instant, Error> {
Ok(d) => d,
Err(err) => {
eprintln!("task scheduler: compute next minute failed - {}", err);
- tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
+ tokio::time::sleep_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
continue;
}
};
}
}
- tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
+ tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
}
}
schedule_datastore_prune().await;
schedule_datastore_sync_jobs().await;
schedule_datastore_verify_jobs().await;
+ schedule_tape_backup_jobs().await;
schedule_task_log_rotate().await;
Ok(())
async fn schedule_datastore_garbage_collection() {
- use proxmox_backup::config::{
- datastore::{
- self,
- DataStoreConfig,
- },
- };
-
- let config = match datastore::config() {
+ let config = match pbs_config::datastore::config() {
Err(err) => {
eprintln!("unable to read datastore config - {}", err);
return;
None => continue,
};
- let event = match parse_calendar_event(&event_str) {
+ let event: CalendarEvent = match event_str.parse() {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
}
};
- let next = match compute_next_event(&event, last, false) {
+ let next = match event.compute_next_event(last) {
Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => {
}
};
- let now = proxmox::tools::time::epoch_i64();
+ let now = proxmox_time::epoch_i64();
if next > now { continue; }
async fn schedule_datastore_prune() {
- use proxmox_backup::{
- backup::{
- PruneOptions,
- },
- config::datastore::{
- self,
- DataStoreConfig,
- },
- };
-
- let config = match datastore::config() {
+ let config = match pbs_config::datastore::config() {
Err(err) => {
eprintln!("unable to read datastore config - {}", err);
return;
keep_yearly: store_config.keep_yearly,
};
- if !prune_options.keeps_something() { // no prune settings - keep all
+ if !pbs_datastore::prune::keeps_something(&prune_options) { // no prune settings - keep all
continue;
}
async fn schedule_datastore_sync_jobs() {
- use proxmox_backup::config::sync::{
- self,
- SyncJobConfig,
- };
- let config = match sync::config() {
+ let config = match pbs_config::sync::config() {
Err(err) => {
eprintln!("unable to read sync job config - {}", err);
return;
};
let auth_id = Authid::root_auth_id().clone();
- if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
+ if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str), false) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
};
async fn schedule_datastore_verify_jobs() {
- use proxmox_backup::config::verify::{
- self,
- VerificationJobConfig,
- };
-
- let config = match verify::config() {
+ let config = match pbs_config::verify::config() {
Err(err) => {
eprintln!("unable to read verification job config - {}", err);
return;
let worker_type = "verificationjob";
let auth_id = Authid::root_auth_id().clone();
if check_schedule(worker_type, &event_str, &job_id) {
- let job = match Job::new(&worker_type, &job_id) {
+ let job = match Job::new(worker_type, &job_id) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
- if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
+ if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str), false) {
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
}
};
}
}
+async fn schedule_tape_backup_jobs() {
+
+ let config = match pbs_config::tape_job::config() {
+ Err(err) => {
+ eprintln!("unable to read tape job config - {}", err);
+ return;
+ }
+ Ok((config, _digest)) => config,
+ };
+ for (job_id, (_, job_config)) in config.sections {
+ let job_config: TapeBackupJobConfig = match serde_json::from_value(job_config) {
+ Ok(c) => c,
+ Err(err) => {
+ eprintln!("tape backup job config from_value failed - {}", err);
+ continue;
+ }
+ };
+ let event_str = match job_config.schedule {
+ Some(ref event_str) => event_str.clone(),
+ None => continue,
+ };
+
+ let worker_type = "tape-backup-job";
+ let auth_id = Authid::root_auth_id().clone();
+ if check_schedule(worker_type, &event_str, &job_id) {
+ let job = match Job::new(worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+ if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str), false) {
+ eprintln!("unable to start tape backup job {} - {}", &job_id, err);
+ }
+ };
+ }
+}
+
+
async fn schedule_task_log_rotate() {
let worker_type = "logrotate";
if let Err(err) = WorkerTask::new_thread(
worker_type,
None,
- Authid::root_auth_id().clone(),
+ Authid::root_auth_id().to_string(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
- worker.log(format!("starting task log rotation"));
+ task_log!(worker, "starting task log rotation");
let result = try_block!({
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
let max_files = 20; // times twenty files gives > 100000 task entries
- let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
+
+ let user = pbs_config::backup_user()?;
+ let options = proxmox_sys::fs::CreateOptions::new()
+ .owner(user.uid)
+ .group(user.gid);
+
+ let has_rotated = rotate_task_log_archive(
+ max_size,
+ true,
+ Some(max_files),
+ Some(options.clone()),
+ )?;
+
if has_rotated {
- worker.log(format!("task log archive was rotated"));
+ task_log!(worker, "task log archive was rotated");
} else {
- worker.log(format!("task log archive was not rotated"));
+ task_log!(worker, "task log archive was not rotated");
}
let max_size = 32 * 1024 * 1024 - 1;
let max_files = 14;
- let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true)
- .ok_or_else(|| format_err!("could not get API access log file names"))?;
- if logrotate.rotate(max_size, None, Some(max_files))? {
+
+ let mut logrotate = LogRotate::new(
+ pbs_buildcfg::API_ACCESS_LOG_FN,
+ true,
+ Some(max_files),
+ Some(options.clone()),
+ )?;
+
+ if logrotate.rotate(max_size)? {
println!("rotated access log, telling daemons to re-open log file");
- proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
- worker.log(format!("API access log was rotated"));
+ proxmox_async::runtime::block_on(command_reopen_access_logfiles())?;
+ task_log!(worker, "API access log was rotated");
} else {
- worker.log(format!("API access log was not rotated"));
+ task_log!(worker, "API access log was not rotated");
}
- let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
- .ok_or_else(|| format_err!("could not get API auth log file names"))?;
-
- if logrotate.rotate(max_size, None, Some(max_files))? {
- worker.log(format!("API access log was rotated"));
+ let mut logrotate = LogRotate::new(
+ pbs_buildcfg::API_AUTH_LOG_FN,
+ true,
+ Some(max_files),
+ Some(options),
+ )?;
+
+ if logrotate.rotate(max_size)? {
+ println!("rotated auth log, telling daemons to re-open log file");
+ proxmox_async::runtime::block_on(command_reopen_auth_logfiles())?;
+ task_log!(worker, "API authentication log was rotated");
} else {
- worker.log(format!("API access log was not rotated"));
+ task_log!(worker, "API authentication log was not rotated");
+ }
+
+ if has_rotated {
+ task_log!(worker, "cleaning up old task logs");
+ if let Err(err) = cleanup_old_tasks(true) {
+ task_warn!(worker, "could not completely cleanup old tasks: {}", err);
+ }
}
Ok(())
}
-async fn command_reopen_logfiles() -> Result<(), Error> {
+async fn command_reopen_access_logfiles() -> Result<(), Error> {
// only care about the most recent daemon instance for each, proxy & api, as other older ones
// should not respond to new requests anyway, but only finish their current one and then exit.
- let sock = server::our_ctrl_sock();
- let f1 = server::send_command(sock, serde_json::json!({
- "command": "api-access-log-reopen",
- }));
+ let sock = proxmox_rest_server::our_ctrl_sock();
+ let f1 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
- let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
- let sock = server::ctrl_sock_from_pid(pid);
- let f2 = server::send_command(sock, serde_json::json!({
- "command": "api-access-log-reopen",
- }));
+ let pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
+ let sock = proxmox_rest_server::ctrl_sock_from_pid(pid);
+ let f2 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
+
+ match futures::join!(f1, f2) {
+ (Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
+ (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)),
+ (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)),
+ _ => Ok(()),
+ }
+}
+
+async fn command_reopen_auth_logfiles() -> Result<(), Error> {
+ // only care about the most recent daemon instance for each, proxy & api, as other older ones
+ // should not respond to new requests anyway, but only finish their current one and then exit.
+ let sock = proxmox_rest_server::our_ctrl_sock();
+ let f1 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-auth-log-reopen\"}\n");
+
+ let pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
+ let sock = proxmox_rest_server::ctrl_sock_from_pid(pid);
+ let f2 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-auth-log-reopen\"}\n");
match futures::join!(f1, f2) {
(Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
async fn run_stat_generator() {
- let mut count = 0;
loop {
- count += 1;
- let save = if count >= 6 { count = 0; true } else { false };
-
let delay_target = Instant::now() + Duration::from_secs(10);
- generate_host_stats(save).await;
+ generate_host_stats().await;
- tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
+ rrd_sync_journal();
- }
+ tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
-}
+ }
-fn rrd_update_gauge(name: &str, value: f64, save: bool) {
- use proxmox_backup::rrd;
- if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
- eprintln!("rrd::update_value '{}' failed - {}", name, err);
- }
}
-fn rrd_update_derive(name: &str, value: f64, save: bool) {
- use proxmox_backup::rrd;
- if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
- eprintln!("rrd::update_value '{}' failed - {}", name, err);
+async fn generate_host_stats() {
+ match tokio::task::spawn_blocking(generate_host_stats_sync).await {
+ Ok(()) => (),
+ Err(err) => log::error!("generate_host_stats paniced: {}", err),
}
}
-async fn generate_host_stats(save: bool) {
- use proxmox::sys::linux::procfs::{
+fn generate_host_stats_sync() {
+ use proxmox_sys::linux::procfs::{
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
- use proxmox_backup::config::datastore;
-
- proxmox_backup::tools::runtime::block_in_place(move || {
-
- match read_proc_stat() {
- Ok(stat) => {
- rrd_update_gauge("host/cpu", stat.cpu, save);
- rrd_update_gauge("host/iowait", stat.iowait_percent, save);
- }
- Err(err) => {
- eprintln!("read_proc_stat failed - {}", err);
- }
+ match read_proc_stat() {
+ Ok(stat) => {
+ rrd_update_gauge("host/cpu", stat.cpu);
+ rrd_update_gauge("host/iowait", stat.iowait_percent);
+ }
+ Err(err) => {
+ eprintln!("read_proc_stat failed - {}", err);
}
+ }
- match read_meminfo() {
- Ok(meminfo) => {
- rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
- rrd_update_gauge("host/memused", meminfo.memused as f64, save);
- rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
- rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
- }
- Err(err) => {
- eprintln!("read_meminfo failed - {}", err);
- }
+ match read_meminfo() {
+ Ok(meminfo) => {
+ rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
+ rrd_update_gauge("host/memused", meminfo.memused as f64);
+ rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
+ rrd_update_gauge("host/swapused", meminfo.swapused as f64);
+ }
+ Err(err) => {
+ eprintln!("read_meminfo failed - {}", err);
}
+ }
- match read_proc_net_dev() {
- Ok(netdev) => {
- use proxmox_backup::config::network::is_physical_nic;
- let mut netin = 0;
- let mut netout = 0;
- for item in netdev {
- if !is_physical_nic(&item.device) { continue; }
- netin += item.receive;
- netout += item.send;
- }
- rrd_update_derive("host/netin", netin as f64, save);
- rrd_update_derive("host/netout", netout as f64, save);
- }
- Err(err) => {
- eprintln!("read_prox_net_dev failed - {}", err);
+ match read_proc_net_dev() {
+ Ok(netdev) => {
+ use pbs_config::network::is_physical_nic;
+ let mut netin = 0;
+ let mut netout = 0;
+ for item in netdev {
+ if !is_physical_nic(&item.device) { continue; }
+ netin += item.receive;
+ netout += item.send;
}
+ rrd_update_derive("host/netin", netin as f64);
+ rrd_update_derive("host/netout", netout as f64);
+ }
+ Err(err) => {
+ eprintln!("read_prox_net_dev failed - {}", err);
}
+ }
- match read_loadavg() {
- Ok(loadavg) => {
- rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
- }
- Err(err) => {
- eprintln!("read_loadavg failed - {}", err);
- }
+ match read_loadavg() {
+ Ok(loadavg) => {
+ rrd_update_gauge("host/loadavg", loadavg.0 as f64);
}
+ Err(err) => {
+ eprintln!("read_loadavg failed - {}", err);
+ }
+ }
- let disk_manager = DiskManage::new();
+ let disk_manager = DiskManage::new();
- gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
+ gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
- match datastore::config() {
- Ok((config, _)) => {
- let datastore_list: Vec<datastore::DataStoreConfig> =
- config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
+ match pbs_config::datastore::config() {
+ Ok((config, _)) => {
+ let datastore_list: Vec<DataStoreConfig> =
+ config.convert_to_typed_array("datastore").unwrap_or_default();
- for config in datastore_list {
+ for config in datastore_list {
- let rrd_prefix = format!("datastore/{}", config.name);
- let path = std::path::Path::new(&config.path);
- gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
- }
- }
- Err(err) => {
- eprintln!("read datastore config failed - {}", err);
+ let rrd_prefix = format!("datastore/{}", config.name);
+ let path = std::path::Path::new(&config.path);
+ gather_disk_stats(disk_manager.clone(), path, &rrd_prefix);
}
}
-
- });
+ Err(err) => {
+ eprintln!("read datastore config failed - {}", err);
+ }
+ }
}
fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
- let event = match parse_calendar_event(event_str) {
+ let event: CalendarEvent = match event_str.parse() {
Ok(event) => event,
Err(err) => {
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
}
};
- let last = match jobstate::last_run_time(worker_type, &id) {
+ let last = match jobstate::last_run_time(worker_type, id) {
Ok(time) => time,
Err(err) => {
eprintln!("could not get last run time of {} {}: {}", worker_type, id, err);
}
};
- let next = match compute_next_event(&event, last, false) {
+ let next = match event.compute_next_event(last) {
Ok(Some(next)) => next,
Ok(None) => return false,
Err(err) => {
}
};
- let now = proxmox::tools::time::epoch_i64();
+ let now = proxmox_time::epoch_i64();
next <= now
}
-fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
+fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) {
match proxmox_backup::tools::disks::disk_usage(path) {
Ok(status) => {
let rrd_key = format!("{}/total", rrd_prefix);
- rrd_update_gauge(&rrd_key, status.total as f64, save);
+ rrd_update_gauge(&rrd_key, status.total as f64);
let rrd_key = format!("{}/used", rrd_prefix);
- rrd_update_gauge(&rrd_key, status.used as f64, save);
+ rrd_update_gauge(&rrd_key, status.used as f64);
}
Err(err) => {
eprintln!("read disk_usage on {:?} failed - {}", path, err);
Ok(None) => {},
Ok(Some((fs_type, device, source))) => {
let mut device_stat = None;
- match fs_type.as_str() {
- "zfs" => {
- if let Some(pool) = source {
- match zfs_pool_stats(&pool) {
- Ok(stat) => device_stat = stat,
- Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
- }
+ match (fs_type.as_str(), source) {
+ ("zfs", Some(source)) => match source.into_string() {
+ Ok(dataset) => match zfs_dataset_stats(&dataset) {
+ Ok(stat) => device_stat = Some(stat),
+ Err(err) => eprintln!("zfs_dataset_stats({:?}) failed - {}", dataset, err),
+ },
+ Err(source) => {
+ eprintln!("zfs_pool_stats({:?}) failed - invalid characters", source)
}
- }
+ },
_ => {
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
match disk.read_stat() {
}
if let Some(stat) = device_stat {
let rrd_key = format!("{}/read_ios", rrd_prefix);
- rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
+ rrd_update_derive(&rrd_key, stat.read_ios as f64);
let rrd_key = format!("{}/read_bytes", rrd_prefix);
- rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
+ rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64);
let rrd_key = format!("{}/write_ios", rrd_prefix);
- rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
+ rrd_update_derive(&rrd_key, stat.write_ios as f64);
let rrd_key = format!("{}/write_bytes", rrd_prefix);
- rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
+ rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64);
let rrd_key = format!("{}/io_ticks", rrd_prefix);
- rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
+ rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0);
}
}
Err(err) => {
}
}
}
+
+// Rate Limiter lookup
+
+// Test WITH
+// proxmox-backup-client restore vm/201/2021-10-22T09:55:56Z drive-scsi0.img img1.img --repository localhost:store2
+
+async fn run_traffic_control_updater() {
+
+ loop {
+ let delay_target = Instant::now() + Duration::from_secs(1);
+
+ {
+ let mut cache = TRAFFIC_CONTROL_CACHE.lock().unwrap();
+ cache.compute_current_rates();
+ }
+
+ tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
+ }
+
+}
+
+fn lookup_rate_limiter(
+ peer: Option<std::net::SocketAddr>,
+) -> (Option<Arc<dyn ShareableRateLimit>>, Option<Arc<dyn ShareableRateLimit>>) {
+ let mut cache = TRAFFIC_CONTROL_CACHE.lock().unwrap();
+
+ let now = proxmox_time::epoch_i64();
+
+ cache.reload(now);
+
+ let (_rule_name, read_limiter, write_limiter) = cache.lookup_rate_limiter(peer, now);
+
+ (read_limiter, write_limiter)
+}