let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
- config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
+ config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
let rest_server = RestServer::new(config);
DiskManage,
zfs_pool_stats,
},
+ logrotate::LogRotate,
socket::{
set_tcp_keepalive,
PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
- config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
+ config.enable_file_log(buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
let rest_server = RestServer::new(config);
worker.log(format!("task log archive was not rotated"));
}
+ let max_size = 32 * 1024 * 1024 - 1;
+ let max_files = 14;
+ let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true)
+ .ok_or_else(|| format_err!("could not get API access log file names"))?;
+
+ let has_rotated = logrotate.rotate(max_size, None, Some(max_files))?;
+ if has_rotated {
+ println!("rotated access log, telling daemons to re-open log file");
+ proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
+
+ worker.log(format!("API access log was rotated"));
+ } else {
+ worker.log(format!("API access log was not rotated"));
+ }
+
Ok(())
});
}
+async fn command_reopen_logfiles() -> Result<(), Error> {
+ // only care about the most recent daemon instance for each, proxy & api, as other older ones
+ // should not respond to new requests anyway, but only finish their current one and then exit.
+ let sock = server::our_ctrl_sock();
+ server::send_command(sock, serde_json::json!({
+ "command": "api-access-log-reopen",
+ })).await?;
+
+ let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
+ let sock = server::ctrl_sock_from_pid(pid);
+ server::send_command(sock, serde_json::json!({
+ "command": "api-access-log-reopen",
+ })).await?;
+ Ok(())
+}
+
async fn run_stat_generator() {
let mut count = 0;
use std::path::PathBuf;
use std::time::SystemTime;
use std::fs::metadata;
-use std::sync::{Mutex, RwLock};
+use std::sync::{Arc, Mutex, RwLock};
use anyhow::{bail, Error, format_err};
use hyper::Method;
env_type: RpcEnvironmentType,
templates: RwLock<Handlebars<'static>>,
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
- request_log: Option<Mutex<FileLogger>>,
+ request_log: Option<Arc<Mutex<FileLogger>>>,
}
impl ApiConfig {
}
}
- pub fn enable_file_log<P>(&mut self, path: P) -> Result<(), Error>
+ pub fn enable_file_log<P>(
+ &mut self,
+ path: P,
+ commando_sock: &mut super::CommandoSocket,
+ ) -> Result<(), Error>
where
P: Into<PathBuf>
{
owned_by_backup: true,
..Default::default()
};
- self.request_log = Some(Mutex::new(FileLogger::new(&path, logger_options)?));
+ let request_log = Arc::new(Mutex::new(FileLogger::new(&path, logger_options)?));
+ self.request_log = Some(Arc::clone(&request_log));
+
+ commando_sock.register_command("api-access-log-reopen".into(), move |_args| {
+ println!("re-opening log file");
+ request_log.lock().unwrap().reopen()?;
+ Ok(serde_json::Value::Null)
+ })?;
Ok(())
}
- pub fn get_file_log(&self) -> Option<&Mutex<FileLogger>> {
+
+ pub fn get_file_log(&self) -> Option<&Arc<Mutex<FileLogger>>> {
self.request_log.as_ref()
}
}
}
fn log_response(
- logfile: Option<&Mutex<FileLogger>>,
+ logfile: Option<&Arc<Mutex<FileLogger>>>,
peer: &std::net::SocketAddr,
method: hyper::Method,
path_query: &str,