-use std::sync::Arc;
+use std::sync::{Arc};
+use std::path::{Path, PathBuf};
+use std::os::unix::io::AsRawFd;
use anyhow::{bail, format_err, Error};
use futures::*;
use proxmox::try_block;
use proxmox::api::RpcEnvironmentType;
+use proxmox_backup::api2::types::Userid;
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
-use proxmox_backup::tools::daemon;
use proxmox_backup::server::{ApiConfig, rest::*};
use proxmox_backup::auth_helpers::*;
-
-fn main() {
- if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
- eprintln!("Error: {}", err);
- std::process::exit(-1);
+use proxmox_backup::tools::{
+ daemon,
+ disks::{
+ DiskManage,
+ zfs_pool_stats,
+ },
+ socket::{
+ set_tcp_keepalive,
+ PROXMOX_BACKUP_TCP_KEEPALIVE_TIME,
+ },
+};
+
+use proxmox_backup::api2::pull::do_sync_job;
+use proxmox_backup::backup::do_verification_job;
+
+fn main() -> Result<(), Error> {
+ proxmox_backup::tools::setup_safe_path_env();
+
+ let backup_uid = proxmox_backup::backup::backup_user()?.uid;
+ let backup_gid = proxmox_backup::backup::backup_group()?.gid;
+ let running_uid = nix::unistd::Uid::effective();
+ let running_gid = nix::unistd::Gid::effective();
+
+ if running_uid != backup_uid || running_gid != backup_gid {
+ bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
}
+
+ proxmox_backup::tools::runtime::main(run())
}
async fn run() -> Result<(), Error> {
let mut config = ApiConfig::new(
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
- // add default dirs which includes jquery and bootstrap
- // my $base = '/usr/share/libpve-http-server-perl';
- // add_dirs($self->{dirs}, '/css/' => "$base/css/");
- // add_dirs($self->{dirs}, '/js/' => "$base/js/");
- // add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
config.add_alias("novnc", "/usr/share/novnc-pve");
config.add_alias("extjs", "/usr/share/javascript/extjs");
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
+ config.add_alias("locale", "/usr/share/pbs-i18n");
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
+ let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
+ indexpath.push("index.hbs");
+ config.register_template("index", &indexpath)?;
+ config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
+
+ config.enable_file_log(buildcfg::API_ACCESS_LOG_FN)?;
+
let rest_server = RestServer::new(config);
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
let key_path = configdir!("/proxy.key");
let cert_path = configdir!("/proxy.pem");
- let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
+ let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
acceptor.set_certificate_chain_file(cert_path)
let acceptor = Arc::clone(&acceptor);
async move {
sock.set_nodelay(true).unwrap();
- sock.set_send_buffer_size(1024*1024).unwrap();
- sock.set_recv_buffer_size(1024*1024).unwrap();
+
+ let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
+
Ok(tokio_openssl::accept(&acceptor, sock)
.await
.ok() // handshake errors aren't be fatal, so return None to filter
tokio::spawn(task.map(|_| ()));
}
-use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
+use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
fn next_minute() -> Result<Instant, Error> {
- let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
- let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
+ let now = SystemTime::now();
+ let epoch_now = now.duration_since(UNIX_EPOCH)?;
+ let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
Ok(Instant::now() + epoch_next - epoch_now)
}
schedule_datastore_garbage_collection().await;
schedule_datastore_prune().await;
schedule_datastore_sync_jobs().await;
+ schedule_datastore_verify_jobs().await;
+ schedule_task_log_rotate().await;
Ok(())
}
-fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
-
- let list = proxmox_backup::server::read_task_list()?;
-
- let mut last: Option<&server::UPID> = None;
-
- for entry in list.iter() {
- if entry.upid.worker_type == worker_type {
- if let Some(ref id) = entry.upid.worker_id {
- if id == worker_id {
- match last {
- Some(ref upid) => {
- if upid.starttime < entry.upid.starttime {
- last = Some(&entry.upid)
- }
- }
- None => {
- last = Some(&entry.upid)
- }
- }
- }
- }
- }
- }
-
- Ok(last.cloned())
-}
-
-
async fn schedule_datastore_garbage_collection() {
use proxmox_backup::backup::DataStore;
use proxmox_backup::server::{UPID, WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
}
}
} else {
- match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => upid.starttime,
- Ok(None) => 0,
+ match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
}
};
let next = match compute_next_event(&event, last, false) {
- Ok(next) => next,
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
- let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
- Ok(epoch_now) => epoch_now.as_secs() as i64,
- Err(err) => {
- eprintln!("query system time failed - {}", err);
- continue;
- }
- };
+
+ let now = proxmox::tools::time::epoch_i64();
+
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
- "backup@pam",
+ Userid::backup_userid().clone(),
false,
move |worker| {
+ job.start(&worker.upid().to_string())?;
+
worker.log(format!("starting garbage collection on store {}", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
- datastore.garbage_collection(&worker)
+
+ let result = datastore.garbage_collection(&*worker, worker.upid());
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
+ }
+
+ result
}
) {
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
async fn schedule_datastore_prune() {
use proxmox_backup::backup::{
- PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
+ PruneOptions, DataStore, BackupGroup, compute_prune_info};
use proxmox_backup::server::{WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
}
};
- //fixme: if last_prune_job_stzill_running { continue; }
-
let worker_type = "prune";
- let last = match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => upid.starttime,
- Ok(None) => 0,
+ let last = match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
};
let next = match compute_next_event(&event, last, false) {
- Ok(next) => next,
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
- let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
- Ok(epoch_now) => epoch_now.as_secs() as i64,
- Err(err) => {
- eprintln!("query system time failed - {}", err);
- continue;
- }
- };
+ let now = proxmox::tools::time::epoch_i64();
+
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
- "backup@pam",
+ Userid::backup_userid().clone(),
false,
move |worker| {
- worker.log(format!("Starting datastore prune on store \"{}\"", store));
- worker.log(format!("task triggered by schedule '{}'", event_str));
- worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- let base_path = datastore.base_path();
+ job.start(&worker.upid().to_string())?;
- let groups = BackupGroup::list_groups(&base_path)?;
- for group in groups {
- let list = group.list_backups(&base_path)?;
- let mut prune_info = compute_prune_info(list, &prune_options)?;
- prune_info.reverse(); // delete older snapshots first
+ let result = try_block!({
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, group.backup_type(), group.backup_id()));
+ worker.log(format!("Starting datastore prune on store \"{}\"", store));
+ worker.log(format!("task triggered by schedule '{}'", event_str));
+ worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- for (info, keep) in prune_info {
- worker.log(format!(
- "{} {}/{}/{}",
- if keep { "keep" } else { "remove" },
- group.backup_type(), group.backup_id(),
- BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
+ let base_path = datastore.base_path();
- if !keep {
- datastore.remove_backup_dir(&info.backup_dir)?;
+ let groups = BackupGroup::list_groups(&base_path)?;
+ for group in groups {
+ let list = group.list_backups(&base_path)?;
+ let mut prune_info = compute_prune_info(list, &prune_options)?;
+ prune_info.reverse(); // delete older snapshots first
+
+ worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
+ store, group.backup_type(), group.backup_id()));
+
+ for (info, keep) in prune_info {
+ worker.log(format!(
+ "{} {}/{}/{}",
+ if keep { "keep" } else { "remove" },
+ group.backup_type(), group.backup_id(),
+ info.backup_dir.backup_time_string()));
+ if !keep {
+ datastore.remove_backup_dir(&info.backup_dir, true)?;
+ }
}
}
+ Ok(())
+ });
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
}
- Ok(())
+ result
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
async fn schedule_datastore_sync_jobs() {
use proxmox_backup::{
- backup::DataStore,
- client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
- server::{ WorkerTask },
- config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
+ config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
tools::systemd::time::{ parse_calendar_event, compute_next_event },
};
Ok((config, _digest)) => config,
};
- let remote_config = match remote::config() {
- Err(err) => {
- eprintln!("unable to read remote config - {}", err);
- return;
- }
- Ok((config, _digest)) => config,
- };
-
for (job_id, (_, job_config)) in config.sections {
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
}
};
- //fixme: if last_sync_job_still_running { continue; }
-
- let worker_type = "sync";
+ let worker_type = "syncjob";
- let last = match lookup_last_worker(worker_type, &job_config.store) {
- Ok(Some(upid)) => upid.starttime,
- Ok(None) => 0,
+ let last = match jobstate::last_run_time(worker_type, &job_id) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
continue;
}
};
let next = match compute_next_event(&event, last, false) {
- Ok(next) => next,
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
- let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
- Ok(epoch_now) => epoch_now.as_secs() as i64,
- Err(err) => {
- eprintln!("query system time failed - {}", err);
- continue;
- }
- };
+ let now = proxmox::tools::time::epoch_i64();
+
if next > now { continue; }
+ let job = match Job::new(worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
- let job_id2 = job_id.clone();
+ let userid = Userid::backup_userid().clone();
- let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
- Ok(datastore) => datastore,
+ if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
+ eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
+ }
+ }
+}
+
+async fn schedule_datastore_verify_jobs() {
+ use proxmox_backup::{
+ config::{verify::{self, VerificationJobConfig}, jobstate::{self, Job}},
+ tools::systemd::time::{parse_calendar_event, compute_next_event},
+ };
+ let config = match verify::config() {
+ Err(err) => {
+ eprintln!("unable to read verification job config - {}", err);
+ return;
+ }
+ Ok((config, _digest)) => config,
+ };
+ for (job_id, (_, job_config)) in config.sections {
+ let job_config: VerificationJobConfig = match serde_json::from_value(job_config) {
+ Ok(c) => c,
Err(err) => {
- eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
+ eprintln!("verification job config from_value failed - {}", err);
continue;
}
};
-
- let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
- Ok(remote) => remote,
+ let event_str = match job_config.schedule {
+ Some(ref event_str) => event_str.clone(),
+ None => continue,
+ };
+ let event = match parse_calendar_event(&event_str) {
+ Ok(event) => event,
+ Err(err) => {
+ eprintln!("unable to parse schedule '{}' - {}", event_str, err);
+ continue;
+ }
+ };
+ let worker_type = "verificationjob";
+ let last = match jobstate::last_run_time(worker_type, &job_id) {
+ Ok(time) => time,
+ Err(err) => {
+ eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
+ continue;
+ }
+ };
+ let next = match compute_next_event(&event, last, false) {
+ Ok(Some(next)) => next,
+ Ok(None) => continue,
Err(err) => {
- eprintln!("remote_config lookup failed: {}", err);
+ eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
continue;
}
};
+ let now = proxmox::tools::time::epoch_i64();
+ if next > now { continue; }
+ let job = match Job::new(worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+ let userid = Userid::backup_userid().clone();
+ if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
+ eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
+ }
+ }
+}
- let username = String::from("backup@pam");
+async fn schedule_task_log_rotate() {
+ use proxmox_backup::{
+ config::jobstate::{self, Job},
+ server::rotate_task_log_archive,
+ };
+ use proxmox_backup::server::WorkerTask;
+ use proxmox_backup::tools::systemd::time::{
+ parse_calendar_event, compute_next_event};
- let delete = job_config.remove_vanished.unwrap_or(true);
+ let worker_type = "logrotate";
+ let job_id = "task-archive";
- if let Err(err) = WorkerTask::spawn(
- worker_type,
- Some(job_config.store.clone()),
- &username.clone(),
- false,
- move |worker| async move {
- worker.log(format!("Starting datastore sync job '{}'", job_id));
- worker.log(format!("task triggered by schedule '{}'", event_str));
- worker.log(format!("Sync datastore '{}' from '{}/{}'",
- job_config.store, job_config.remote, job_config.remote_store));
+ let last = match jobstate::last_run_time(worker_type, job_id) {
+ Ok(time) => time,
+ Err(err) => {
+ eprintln!("could not get last run time of task log archive rotation: {}", err);
+ return;
+ }
+ };
+
+ // schedule daily at 00:00 like normal logrotate
+ let schedule = "00:00";
- let options = HttpClientOptions::new()
- .password(Some(remote.password.clone()))
- .fingerprint(remote.fingerprint.clone());
+ let event = match parse_calendar_event(schedule) {
+ Ok(event) => event,
+ Err(err) => {
+ // should not happen?
+ eprintln!("unable to parse schedule '{}' - {}", schedule, err);
+ return;
+ }
+ };
+
+ let next = match compute_next_event(&event, last, false) {
+ Ok(Some(next)) => next,
+ Ok(None) => return,
+ Err(err) => {
+ eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
+ return;
+ }
+ };
- let client = HttpClient::new(&remote.host, &remote.userid, options)?;
- let _auth_info = client.login() // make sure we can auth
- .await
- .map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
+ let now = proxmox::tools::time::epoch_i64();
- let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
+ if next > now {
+ // if we never ran the rotation, schedule instantly
+ match jobstate::JobState::load(worker_type, job_id) {
+ Ok(state) => match state {
+ jobstate::JobState::Created { .. } => {},
+ _ => return,
+ },
+ _ => return,
+ }
+ }
- pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
+ let mut job = match Job::new(worker_type, job_id) {
+ Ok(job) => job,
+ Err(_) => return, // could not get lock
+ };
+
+ if let Err(err) = WorkerTask::new_thread(
+ worker_type,
+ Some(job_id.to_string()),
+ Userid::backup_userid().clone(),
+ false,
+ move |worker| {
+ job.start(&worker.upid().to_string())?;
+ worker.log(format!("starting task log rotation"));
+
+ let result = try_block!({
+ // rotate task log archive
+ let max_size = 500000; // a normal entry has about 100b, so ~ 5000 entries/file
+ let max_files = 20; // times twenty files gives at least 100000 task entries
+ let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
+ if has_rotated {
+ worker.log(format!("task log archive was rotated"));
+ } else {
+ worker.log(format!("task log archive was not rotated"));
+ }
Ok(())
+ });
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
}
- ) {
- eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
- }
+
+ result
+ },
+ ) {
+ eprintln!("unable to start task log rotation: {}", err);
}
+
}
async fn run_stat_generator() {
+ let mut count = 0;
loop {
+ count += 1;
+ let save = if count >= 6 { count = 0; true } else { false };
+
let delay_target = Instant::now() + Duration::from_secs(10);
- generate_host_stats().await;
+ generate_host_stats(save).await;
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
+
+ }
+
+}
+
+fn rrd_update_gauge(name: &str, value: f64, save: bool) {
+ use proxmox_backup::rrd;
+ if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
+ eprintln!("rrd::update_value '{}' failed - {}", name, err);
}
+}
+fn rrd_update_derive(name: &str, value: f64, save: bool) {
+ use proxmox_backup::rrd;
+ if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
+ eprintln!("rrd::update_value '{}' failed - {}", name, err);
+ }
}
-async fn generate_host_stats() {
+async fn generate_host_stats(save: bool) {
use proxmox::sys::linux::procfs::{
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
- use proxmox_backup::{ rrd, config::datastore };
+ use proxmox_backup::config::datastore;
+
proxmox_backup::tools::runtime::block_in_place(move || {
match read_proc_stat() {
Ok(stat) => {
- if let Err(err) = rrd::update_value("host/cpu", stat.cpu, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/cpu' failed - {}", err);
- }
- if let Err(err) = rrd::update_value("host/iowait", stat.iowait_percent, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/iowait' failed - {}", err);
- }
+ rrd_update_gauge("host/cpu", stat.cpu, save);
+ rrd_update_gauge("host/iowait", stat.iowait_percent, save);
}
Err(err) => {
eprintln!("read_proc_stat failed - {}", err);
match read_meminfo() {
Ok(meminfo) => {
- if let Err(err) = rrd::update_value("host/memtotal", meminfo.memtotal as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/memtotal' failed - {}", err);
- }
- if let Err(err) = rrd::update_value("host/memused", meminfo.memused as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/memused' failed - {}", err);
- }
- if let Err(err) = rrd::update_value("host/swaptotal", meminfo.swaptotal as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/swaptotal' failed - {}", err);
- }
- if let Err(err) = rrd::update_value("host/swapused", meminfo.swapused as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/swapused' failed - {}", err);
- }
+ rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
+ rrd_update_gauge("host/memused", meminfo.memused as f64, save);
+ rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
+ rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
}
Err(err) => {
eprintln!("read_meminfo failed - {}", err);
netin += item.receive;
netout += item.send;
}
- if let Err(err) = rrd::update_value("host/netin", netin as f64, rrd::DST::Derive) {
- eprintln!("rrd::update_value 'host/netin' failed - {}", err);
- }
- if let Err(err) = rrd::update_value("host/netout", netout as f64, rrd::DST::Derive) {
- eprintln!("rrd::update_value 'host/netout' failed - {}", err);
- }
+ rrd_update_derive("host/netin", netin as f64, save);
+ rrd_update_derive("host/netout", netout as f64, save);
}
Err(err) => {
eprintln!("read_prox_net_dev failed - {}", err);
match read_loadavg() {
Ok(loadavg) => {
- if let Err(err) = rrd::update_value("host/loadavg", loadavg.0 as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/roottotal' failed - {}", err);
- }
+ rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
}
Err(err) => {
eprintln!("read_loadavg failed - {}", err);
}
}
- match disk_usage(std::path::Path::new("/")) {
- Ok((total, used, _avail)) => {
- if let Err(err) = rrd::update_value("host/roottotal", total as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/roottotal' failed - {}", err);
- }
- if let Err(err) = rrd::update_value("host/rootused", used as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value 'host/rootused' failed - {}", err);
- }
- }
- Err(err) => {
- eprintln!("read root disk_usage failed - {}", err);
- }
- }
+ let disk_manager = DiskManage::new();
+
+ gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
match datastore::config() {
Ok((config, _)) => {
config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
for config in datastore_list {
- match disk_usage(std::path::Path::new(&config.path)) {
- Ok((total, used, _avail)) => {
- let rrd_key = format!("datastore/{}", config.name);
- if let Err(err) = rrd::update_value(&rrd_key, total as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value '{}' failed - {}", rrd_key, err);
- }
- if let Err(err) = rrd::update_value(&rrd_key, used as f64, rrd::DST::Gauge) {
- eprintln!("rrd::update_value '{}' failed - {}", rrd_key, err);
- }
- }
- Err(err) => {
- eprintln!("read disk_usage on {:?} failed - {}", config.path, err);
- }
- }
+
+ let rrd_prefix = format!("datastore/{}", config.name);
+ let path = std::path::Path::new(&config.path);
+ gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
}
}
Err(err) => {
});
}
-// Returns (total, used, avail)
-fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
-
- let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
+fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
- use nix::NixPath;
+ match proxmox_backup::tools::disks::disk_usage(path) {
+ Ok(status) => {
+ let rrd_key = format!("{}/total", rrd_prefix);
+ rrd_update_gauge(&rrd_key, status.total as f64, save);
+ let rrd_key = format!("{}/used", rrd_prefix);
+ rrd_update_gauge(&rrd_key, status.used as f64, save);
+ }
+ Err(err) => {
+ eprintln!("read disk_usage on {:?} failed - {}", path, err);
+ }
+ }
- let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
- nix::errno::Errno::result(res)?;
+ match disk_manager.find_mounted_device(path) {
+ Ok(None) => {},
+ Ok(Some((fs_type, device, source))) => {
+ let mut device_stat = None;
+ match fs_type.as_str() {
+ "zfs" => {
+ if let Some(pool) = source {
+ match zfs_pool_stats(&pool) {
+ Ok(stat) => device_stat = stat,
+ Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
+ }
+ }
+ }
+ _ => {
+ if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
+ match disk.read_stat() {
+ Ok(stat) => device_stat = stat,
+ Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
+ }
+ }
+ }
+ }
+ if let Some(stat) = device_stat {
+ let rrd_key = format!("{}/read_ios", rrd_prefix);
+ rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
+ let rrd_key = format!("{}/read_bytes", rrd_prefix);
+ rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
- let bsize = stat.f_bsize as u64;
+ let rrd_key = format!("{}/write_ios", rrd_prefix);
+ rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
+ let rrd_key = format!("{}/write_bytes", rrd_prefix);
+ rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
- Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
+ let rrd_key = format!("{}/io_ticks", rrd_prefix);
+ rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
+ }
+ }
+ Err(err) => {
+ eprintln!("find_mounted_device failed - {}", err);
+ }
+ }
}