use anyhow::{bail, format_err, Error};
use futures::*;
-use hyper;
+
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
+use tokio_stream::wrappers::ReceiverStream;
use proxmox::try_block;
use proxmox::api::RpcEnvironmentType;
use proxmox_backup::{
backup::DataStore,
server::{
+ auth::default_api_auth,
WorkerTask,
ApiConfig,
rest::*,
disks::{
DiskManage,
zfs_pool_stats,
+ get_pool_from_dataset,
},
logrotate::LogRotate,
socket::{
};
use proxmox_backup::api2::pull::do_sync_job;
+use proxmox_backup::api2::tape::backup::do_tape_backup_job;
use proxmox_backup::server::do_verification_job;
use proxmox_backup::server::do_prune_job;
let _ = csrf_secret(); // load with lazy_static
let mut config = ApiConfig::new(
- buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
+ buildcfg::JS_DIR,
+ &proxmox_backup::api2::ROUTER,
+ RpcEnvironmentType::PUBLIC,
+ default_api_auth(),
+ )?;
config.add_alias("novnc", "/usr/share/novnc-pve");
config.add_alias("extjs", "/usr/share/javascript/extjs");
+ config.add_alias("qrcodejs", "/usr/share/javascript/qrcodejs");
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
config.add_alias("locale", "/usr/share/pbs-i18n");
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
- config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
|listener, ready| {
let connections = accept_connections(listener, acceptor, debug);
- let connections = hyper::server::accept::from_stream(connections);
+ let connections = hyper::server::accept::from_stream(ReceiverStream::new(connections));
Ok(ready
.and_then(|_| hyper::Server::builder(connections)
.map(|_| ())
)
},
+ "proxmox-backup-proxy.service",
);
server::write_pid(buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
Ok(())
}
+type ClientStreamResult =
+ Result<std::pin::Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>, Error>;
+const MAX_PENDING_ACCEPTS: usize = 1024;
+
fn accept_connections(
- mut listener: tokio::net::TcpListener,
+ listener: tokio::net::TcpListener,
acceptor: Arc<openssl::ssl::SslAcceptor>,
debug: bool,
-) -> tokio::sync::mpsc::Receiver<Result<tokio_openssl::SslStream<tokio::net::TcpStream>, Error>> {
-
- const MAX_PENDING_ACCEPTS: usize = 1024;
+) -> tokio::sync::mpsc::Receiver<ClientStreamResult> {
let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS);
+ tokio::spawn(accept_connection(listener, acceptor, debug, sender));
+
+ receiver
+}
+
+async fn accept_connection(
+ listener: tokio::net::TcpListener,
+ acceptor: Arc<openssl::ssl::SslAcceptor>,
+ debug: bool,
+ sender: tokio::sync::mpsc::Sender<ClientStreamResult>,
+) {
let accept_counter = Arc::new(());
- tokio::spawn(async move {
- loop {
- match listener.accept().await {
- Err(err) => {
- eprintln!("error accepting tcp connection: {}", err);
- }
- Ok((sock, _addr)) => {
- sock.set_nodelay(true).unwrap();
- let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
- let acceptor = Arc::clone(&acceptor);
- let mut sender = sender.clone();
-
- if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS {
- eprintln!("connection rejected - to many open connections");
- continue;
- }
+ loop {
+ let (sock, _addr) = match listener.accept().await {
+ Ok(conn) => conn,
+ Err(err) => {
+ eprintln!("error accepting tcp connection: {}", err);
+ continue;
+ }
+ };
- let accept_counter = accept_counter.clone();
- tokio::spawn(async move {
- let accept_future = tokio::time::timeout(
- Duration::new(10, 0), tokio_openssl::accept(&acceptor, sock));
-
- let result = accept_future.await;
-
- match result {
- Ok(Ok(connection)) => {
- if let Err(_) = sender.send(Ok(connection)).await {
- if debug {
- eprintln!("detect closed connection channel");
- }
- }
- }
- Ok(Err(err)) => {
- if debug {
- eprintln!("https handshake failed - {}", err);
- }
- }
- Err(_) => {
- if debug {
- eprintln!("https handshake timeout");
- }
- }
- }
+ sock.set_nodelay(true).unwrap();
+ let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
+ let acceptor = Arc::clone(&acceptor);
+
+ let ssl = match openssl::ssl::Ssl::new(acceptor.context()) {
+ Ok(ssl) => ssl,
+ Err(err) => {
+ eprintln!("failed to create Ssl object from Acceptor context - {}", err);
+ continue;
+ },
+ };
+ let stream = match tokio_openssl::SslStream::new(ssl, sock) {
+ Ok(stream) => stream,
+ Err(err) => {
+ eprintln!("failed to create SslStream using ssl and connection socket - {}", err);
+ continue;
+ },
+ };
+
+ let mut stream = Box::pin(stream);
+ let sender = sender.clone();
+
+ if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS {
+ eprintln!("connection rejected - to many open connections");
+ continue;
+ }
- drop(accept_counter); // decrease reference count
- });
+ let accept_counter = Arc::clone(&accept_counter);
+ tokio::spawn(async move {
+ let accept_future = tokio::time::timeout(
+ Duration::new(10, 0), stream.as_mut().accept());
+
+ let result = accept_future.await;
+
+ match result {
+ Ok(Ok(())) => {
+ if sender.send(Ok(stream)).await.is_err() && debug {
+ eprintln!("detect closed connection channel");
+ }
+ }
+ Ok(Err(err)) => {
+ if debug {
+ eprintln!("https handshake failed - {}", err);
+ }
+ }
+ Err(_) => {
+ if debug {
+ eprintln!("https handshake timeout");
+ }
}
}
- }
- });
- receiver
+ drop(accept_counter); // decrease reference count
+ });
+ }
}
fn start_stat_generator() {
Ok(d) => d,
Err(err) => {
eprintln!("task scheduler: compute next minute failed - {}", err);
- tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
+ tokio::time::sleep_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
continue;
}
};
}
}
- tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
+ tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
}
}
schedule_datastore_prune().await;
schedule_datastore_sync_jobs().await;
schedule_datastore_verify_jobs().await;
+ schedule_tape_backup_jobs().await;
schedule_task_log_rotate().await;
Ok(())
Err(_) => continue, // could not get lock
};
- let auth_id = Authid::backup_auth_id();
+ let auth_id = Authid::root_auth_id();
if let Err(err) = crate::server::do_garbage_collection_job(job, datastore, auth_id, Some(event_str), false) {
eprintln!("unable to start garbage collection job on datastore {} - {}", store, err);
Err(_) => continue, // could not get lock
};
- let auth_id = Authid::backup_auth_id().clone();
+ let auth_id = Authid::root_auth_id().clone();
if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) {
eprintln!("unable to start datastore prune job {} - {}", &store, err);
}
Err(_) => continue, // could not get lock
};
- let auth_id = Authid::backup_auth_id().clone();
+ let auth_id = Authid::root_auth_id().clone();
if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
};
let worker_type = "verificationjob";
- let auth_id = Authid::backup_auth_id().clone();
+ let auth_id = Authid::root_auth_id().clone();
if check_schedule(worker_type, &event_str, &job_id) {
let job = match Job::new(&worker_type, &job_id) {
Ok(job) => job,
}
}
+async fn schedule_tape_backup_jobs() {
+
+ use proxmox_backup::config::tape_job::{
+ self,
+ TapeBackupJobConfig,
+ };
+
+ let config = match tape_job::config() {
+ Err(err) => {
+ eprintln!("unable to read tape job config - {}", err);
+ return;
+ }
+ Ok((config, _digest)) => config,
+ };
+ for (job_id, (_, job_config)) in config.sections {
+ let job_config: TapeBackupJobConfig = match serde_json::from_value(job_config) {
+ Ok(c) => c,
+ Err(err) => {
+ eprintln!("tape backup job config from_value failed - {}", err);
+ continue;
+ }
+ };
+ let event_str = match job_config.schedule {
+ Some(ref event_str) => event_str.clone(),
+ None => continue,
+ };
+
+ let worker_type = "tape-backup-job";
+ let auth_id = Authid::root_auth_id().clone();
+ if check_schedule(worker_type, &event_str, &job_id) {
+ let job = match Job::new(&worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+ if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str)) {
+ eprintln!("unable to start tape backup job {} - {}", &job_id, err);
+ }
+ };
+ }
+}
+
+
async fn schedule_task_log_rotate() {
let worker_type = "logrotate";
if let Err(err) = WorkerTask::new_thread(
worker_type,
None,
- Authid::backup_auth_id().clone(),
+ Authid::root_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
- worker.log(format!("starting task log rotation"));
+ worker.log("starting task log rotation".to_string());
let result = try_block!({
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
let max_files = 20; // times twenty files gives > 100000 task entries
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
if has_rotated {
- worker.log(format!("task log archive was rotated"));
+ worker.log("task log archive was rotated".to_string());
} else {
- worker.log(format!("task log archive was not rotated"));
+ worker.log("task log archive was not rotated".to_string());
}
let max_size = 32 * 1024 * 1024 - 1;
let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true)
.ok_or_else(|| format_err!("could not get API access log file names"))?;
- let has_rotated = logrotate.rotate(max_size, None, Some(max_files))?;
- if has_rotated {
+ if logrotate.rotate(max_size, None, Some(max_files))? {
println!("rotated access log, telling daemons to re-open log file");
proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
+ worker.log("API access log was rotated".to_string());
+ } else {
+ worker.log("API access log was not rotated".to_string());
+ }
+
+ let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
+ .ok_or_else(|| format_err!("could not get API auth log file names"))?;
- worker.log(format!("API access log was rotated"));
+ if logrotate.rotate(max_size, None, Some(max_files))? {
+ worker.log("API authentication log was rotated".to_string());
} else {
- worker.log(format!("API access log was not rotated"));
+ worker.log("API authentication log was not rotated".to_string());
}
Ok(())
// only care about the most recent daemon instance for each, proxy & api, as other older ones
// should not respond to new requests anyway, but only finish their current one and then exit.
let sock = server::our_ctrl_sock();
- server::send_command(sock, serde_json::json!({
+ let f1 = server::send_command(sock, serde_json::json!({
"command": "api-access-log-reopen",
- })).await?;
+ }));
let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
let sock = server::ctrl_sock_from_pid(pid);
- server::send_command(sock, serde_json::json!({
+ let f2 = server::send_command(sock, serde_json::json!({
"command": "api-access-log-reopen",
- })).await?;
- Ok(())
+ }));
+
+ match futures::join!(f1, f2) {
+ (Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
+ (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)),
+ (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)),
+ _ => Ok(()),
+ }
}
async fn run_stat_generator() {
generate_host_stats(save).await;
- tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
+ tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
}
match datastore::config() {
Ok((config, _)) => {
let datastore_list: Vec<datastore::DataStoreConfig> =
- config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
+ config.convert_to_typed_array("datastore").unwrap_or_default();
for config in datastore_list {
let mut device_stat = None;
match fs_type.as_str() {
"zfs" => {
- if let Some(pool) = source {
- match zfs_pool_stats(&pool) {
+ if let Some(source) = source {
+ let pool = get_pool_from_dataset(&source).unwrap_or(&source);
+ match zfs_pool_stats(pool) {
Ok(stat) => device_stat = stat,
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
}