use proxmox_sys::fs::CreateOptions;
-use proxmox_rrd::rrd::{CF, DST, RRA, RRD};
+use proxmox_rrd::rrd::{AggregationFn, Archive, DataSourceType, Database};
pub const RRA_INDEX_SCHEMA: Schema = IntegerSchema::new("Index of the RRA.").minimum(0).schema();
pub struct RRAConfig {
/// Time resolution
pub r: u64,
- pub cf: CF,
+ pub cf: AggregationFn,
/// Number of data points
pub n: u64,
}
)]
/// Dump the RRD file in JSON format
pub fn dump_rrd(path: String) -> Result<(), Error> {
- let rrd = RRD::load(&PathBuf::from(path), false)?;
+ let rrd = Database::load(&PathBuf::from(path), false)?;
serde_json::to_writer_pretty(std::io::stdout(), &rrd)?;
println!();
Ok(())
)]
/// RRD file information
pub fn rrd_info(path: String) -> Result<(), Error> {
- let rrd = RRD::load(&PathBuf::from(path), false)?;
+ let rrd = Database::load(&PathBuf::from(path), false)?;
println!("DST: {:?}", rrd.source.dst);
.map(|v| v as f64)
.unwrap_or_else(proxmox_time::epoch_f64);
- let mut rrd = RRD::load(&path, false)?;
+ let mut rrd = Database::load(&path, false)?;
rrd.update(time, value);
rrd.save(&path, CreateOptions::new(), false)?;
/// Fetch data from the RRD file
pub fn fetch_rrd(
path: String,
- cf: CF,
+ cf: AggregationFn,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
) -> Result<(), Error> {
- let rrd = RRD::load(&PathBuf::from(path), false)?;
+ let rrd = Database::load(&PathBuf::from(path), false)?;
let data = rrd.extract_data(cf, resolution, start, end)?;
/// Return the Unix timestamp of the first time slot inside the
/// specified RRA (slot start time)
pub fn first_update_time(path: String, rra_index: usize) -> Result<(), Error> {
- let rrd = RRD::load(&PathBuf::from(path), false)?;
+ let rrd = Database::load(&PathBuf::from(path), false)?;
if rra_index >= rrd.rra_list.len() {
bail!("rra-index is out of range");
)]
/// Return the Unix timestamp of the last update
pub fn last_update_time(path: String) -> Result<(), Error> {
- let rrd = RRD::load(&PathBuf::from(path), false)?;
+ let rrd = Database::load(&PathBuf::from(path), false)?;
println!("{}", rrd.source.last_update);
Ok(())
)]
/// Return the time and value from the last update
pub fn last_update(path: String) -> Result<(), Error> {
- let rrd = RRD::load(&PathBuf::from(path), false)?;
+ let rrd = Database::load(&PathBuf::from(path), false)?;
let result = json!({
"time": rrd.source.last_update,
},
)]
/// Create a new RRD file
-pub fn create_rrd(dst: DST, path: String, rra: Vec<String>) -> Result<(), Error> {
+pub fn create_rrd(dst: DataSourceType, path: String, rra: Vec<String>) -> Result<(), Error> {
let mut rra_list = Vec::new();
for item in rra.iter() {
let rra: RRAConfig =
serde_json::from_value(RRAConfig::API_SCHEMA.parse_property_string(item)?)?;
println!("GOT {:?}", rra);
- rra_list.push(RRA::new(rra.cf, rra.r, rra.n as usize));
+ rra_list.push(Archive::new(rra.cf, rra.r, rra.n as usize));
}
let path = PathBuf::from(path);
- let rrd = RRD::new(dst, rra_list);
+ let rrd = Database::new(dst, rra_list);
rrd.save(&path, CreateOptions::new(), false)?;
pub fn resize_rrd(path: String, rra_index: usize, slots: i64) -> Result<(), Error> {
let path = PathBuf::from(&path);
- let mut rrd = RRD::load(&path, false)?;
+ let mut rrd = Database::load(&path, false)?;
if rra_index >= rrd.rra_list.len() {
bail!("rra-index is out of range");
.extract_data(rra_start, rra_end, rrd.source.last_update)
.into();
- let mut new_rra = RRA::new(rra.cf, rra.resolution, new_slots as usize);
+ let mut new_rra = Archive::new(rra.cf, rra.resolution, new_slots as usize);
new_rra.last_count = rra.last_count;
new_rra.insert_data(start, reso, data)?;
use proxmox_sys::fs::{create_path, CreateOptions};
-use crate::rrd::{CF, DST, RRA, RRD};
+use crate::rrd::{AggregationFn, Archive, DataSourceType, Database};
use crate::Entry;
mod journal;
///
/// This cache is designed to run as single instance (no concurrent
/// access from other processes).
-pub struct RRDCache {
+pub struct Cache {
config: Arc<CacheConfig>,
state: Arc<RwLock<JournalState>>,
rrd_map: Arc<RwLock<RRDMap>>,
dir_options: CreateOptions,
}
-impl RRDCache {
+impl Cache {
/// Creates a new instance
///
/// `basedir`: All files are stored relative to this path.
file_options: Option<CreateOptions>,
dir_options: Option<CreateOptions>,
apply_interval: f64,
- load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DST) -> RRD,
+ load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DataSourceType) -> Database,
) -> Result<Self, Error> {
let basedir = basedir.as_ref().to_owned();
/// * cf=maximum,r=7*86400,n=570 => 10year
///
/// The resulting data file size is about 80KB.
- pub fn create_proxmox_backup_default_rrd(dst: DST) -> RRD {
+ pub fn create_proxmox_backup_default_rrd(dst: DataSourceType) -> Database {
let rra_list = vec![
// 1 min * 1440 => 1 day
- RRA::new(CF::Average, 60, 1440),
- RRA::new(CF::Maximum, 60, 1440),
+ Archive::new(AggregationFn::Average, 60, 1440),
+ Archive::new(AggregationFn::Maximum, 60, 1440),
// 30 min * 1440 => 30 days ~ 1 month
- RRA::new(CF::Average, 30 * 60, 1440),
- RRA::new(CF::Maximum, 30 * 60, 1440),
+ Archive::new(AggregationFn::Average, 30 * 60, 1440),
+ Archive::new(AggregationFn::Maximum, 30 * 60, 1440),
// 6 h * 1440 => 360 days ~ 1 year
- RRA::new(CF::Average, 6 * 3600, 1440),
- RRA::new(CF::Maximum, 6 * 3600, 1440),
+ Archive::new(AggregationFn::Average, 6 * 3600, 1440),
+ Archive::new(AggregationFn::Maximum, 6 * 3600, 1440),
// 1 week * 570 => 10 years
- RRA::new(CF::Average, 7 * 86400, 570),
- RRA::new(CF::Maximum, 7 * 86400, 570),
+ Archive::new(AggregationFn::Average, 7 * 86400, 570),
+ Archive::new(AggregationFn::Maximum, 7 * 86400, 570),
];
- RRD::new(dst, rra_list)
+ Database::new(dst, rra_list)
}
/// Sync the journal data to disk (using `fdatasync` syscall)
rel_path: &str,
time: f64,
value: f64,
- dst: DST,
+ dst: DataSourceType,
) -> Result<(), Error> {
let journal_applied = self.apply_journal()?;
&self,
base: &str,
name: &str,
- cf: CF,
+ cf: AggregationFn,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
const RRD_JOURNAL_NAME: &str = "rrd.journal";
use crate::cache::CacheConfig;
-use crate::rrd::DST;
+use crate::rrd::DataSourceType;
// shared state behind RwLock
pub struct JournalState {
pub struct JournalEntry {
pub time: f64,
pub value: f64,
- pub dst: DST,
+ pub dst: DataSourceType,
pub rel_path: String,
}
.map_err(|_| format_err!("unable to parse data source type"))?;
let dst = match dst {
- 0 => DST::Gauge,
- 1 => DST::Derive,
+ 0 => DataSourceType::Gauge,
+ 1 => DataSourceType::Derive,
_ => bail!("got strange value for data source type '{}'", dst),
};
&mut self,
time: f64,
value: f64,
- dst: DST,
+ dst: DataSourceType,
rel_path: &str,
) -> Result<(), Error> {
let journal_entry = format!("{}:{}:{}:{}\n", time, value, dst as u8, rel_path);
use proxmox_sys::fs::create_path;
-use crate::rrd::{CF, DST, RRD};
+use crate::rrd::{AggregationFn, DataSourceType, Database};
use super::CacheConfig;
use crate::Entry;
pub struct RRDMap {
config: Arc<CacheConfig>,
- map: HashMap<String, RRD>,
- load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DST) -> RRD,
+ map: HashMap<String, Database>,
+ load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DataSourceType) -> Database,
}
impl RRDMap {
pub(crate) fn new(
config: Arc<CacheConfig>,
- load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DST) -> RRD,
+ load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DataSourceType) -> Database,
) -> Self {
Self {
config,
rel_path: &str,
time: f64,
value: f64,
- dst: DST,
+ dst: DataSourceType,
new_only: bool,
) -> Result<(), Error> {
if let Some(rrd) = self.map.get_mut(rel_path) {
&self,
base: &str,
name: &str,
- cf: CF,
+ cf: AggregationFn,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// RRD data source type
-pub enum DST {
+pub enum DataSourceType {
/// Gauge values are stored unmodified.
Gauge,
/// Stores the difference to the previous value.
#[api()]
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
-/// Consolidation function
-pub enum CF {
+/// Aggregation function
+pub enum AggregationFn {
/// Average
Average,
/// Maximum
/// Data source specification
pub struct DataSource {
/// Data source type
- pub dst: DST,
+ pub dst: DataSourceType,
/// Last update time (epoch)
pub last_update: f64,
/// Stores the last value, used to compute differential value for
impl DataSource {
/// Create a new Instance
- pub fn new(dst: DST) -> Self {
+ pub fn new(dst: DataSourceType) -> Self {
Self {
dst,
last_update: 0.0,
}
// derive counter value
- let is_counter = self.dst == DST::Counter;
+ let is_counter = self.dst == DataSourceType::Counter;
- if is_counter || self.dst == DST::Derive {
+ if is_counter || self.dst == DataSourceType::Derive {
let time_diff = time - self.last_update;
let diff = if self.last_value.is_nan() {
#[derive(Serialize, Deserialize)]
/// Round Robin Archive
-pub struct RRA {
+pub struct Archive {
/// Number of seconds spanned by a single data entry.
pub resolution: u64,
/// Consolidation function.
- pub cf: CF,
+ pub cf: AggregationFn,
/// Count values computed inside this update interval.
pub last_count: u64,
/// The actual data entries.
pub data: Vec<f64>,
}
-impl RRA {
+impl Archive {
/// Creates a new instance
- pub fn new(cf: CF, resolution: u64, points: usize) -> Self {
+ pub fn new(cf: AggregationFn, resolution: u64, points: usize) -> Self {
Self {
cf,
resolution,
self.last_count = 1;
} else {
let new_value = match self.cf {
- CF::Maximum => {
+ AggregationFn::Maximum => {
if last_value > value {
last_value
} else {
value
}
}
- CF::Minimum => {
+ AggregationFn::Minimum => {
if last_value < value {
last_value
} else {
value
}
}
- CF::Last => value,
- CF::Average => {
+ AggregationFn::Last => value,
+ AggregationFn::Average => {
(last_value * (self.last_count as f64)) / (new_count as f64)
+ value / (new_count as f64)
}
#[derive(Serialize, Deserialize)]
/// Round Robin Database
-pub struct RRD {
+pub struct Database {
/// The data source definition
pub source: DataSource,
/// List of round robin archives
- pub rra_list: Vec<RRA>,
+ pub rra_list: Vec<Archive>,
}
-impl RRD {
+impl Database {
/// Creates a new Instance
- pub fn new(dst: DST, rra_list: Vec<RRA>) -> RRD {
+ pub fn new(dst: DataSourceType, rra_list: Vec<Archive>) -> Database {
let source = DataSource::new(dst);
- RRD { source, rra_list }
+ Database { source, rra_list }
}
fn from_raw(raw: &[u8]) -> Result<Self, Error> {
bail!("not an rrd file - file is too small ({})", raw.len());
}
- let rrd: RRD = match &raw[0..8] {
+ let rrd: Database = match &raw[0..8] {
#[cfg(feature = "rrd_v1")]
magic if magic == crate::rrd_v1::PROXMOX_RRD_MAGIC_1_0 => {
let v1 = crate::rrd_v1::RRDv1::from_raw(raw)?;
v1.to_rrd_v2()
.map_err(|err| format_err!("unable to convert from old V1 format - {err}"))?
}
- magic if magic == PROXMOX_RRD_MAGIC_2_0 => {
- serde_cbor::from_slice(&raw[8..])
- .map_err(|err| format_err!("unable to decode RRD file - {err}"))?
- }
- _ => bail!("not an rrd file - unknown magic number")
+ magic if magic == PROXMOX_RRD_MAGIC_2_0 => serde_cbor::from_slice(&raw[8..])
+ .map_err(|err| format_err!("unable to decode RRD file - {err}"))?,
+ _ => bail!("not an rrd file - unknown magic number"),
};
if rrd.source.last_update < 0.0 {
/// Extract data from the archive
///
- /// This selects the RRA with specified [CF] and (minimum)
+ /// This selects the RRA with specified [AggregationFn] and (minimum)
/// resolution, and extract data from `start` to `end`.
///
/// `start`: Start time. If not specified, we simply extract 10 data points.
/// `end`: End time. Default is to use the current time.
pub fn extract_data(
&self,
- cf: CF,
+ cf: AggregationFn,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
) -> Result<Entry, Error> {
- let mut rra: Option<&RRA> = None;
+ let mut rra: Option<&Archive> = None;
for item in self.rra_list.iter() {
if item.cf != cf {
continue;
#[test]
fn basic_rra_maximum_gauge_test() -> Result<(), Error> {
- let rra = RRA::new(CF::Maximum, 60, 5);
- let mut rrd = RRD::new(DST::Gauge, vec![rra]);
+ let rra = Archive::new(AggregationFn::Maximum, 60, 5);
+ let mut rrd = Database::new(DataSourceType::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
start,
resolution,
data,
- } = rrd.extract_data(CF::Maximum, 60, Some(0), Some(5 * 60))?;
+ } = rrd.extract_data(AggregationFn::Maximum, 60, Some(0), Some(5 * 60))?;
assert_eq!(start, 0);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(3.0), Some(5.0), Some(7.0), Some(9.0)]);
#[test]
fn basic_rra_minimum_gauge_test() -> Result<(), Error> {
- let rra = RRA::new(CF::Minimum, 60, 5);
- let mut rrd = RRD::new(DST::Gauge, vec![rra]);
+ let rra = Archive::new(AggregationFn::Minimum, 60, 5);
+ let mut rrd = Database::new(DataSourceType::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
start,
resolution,
data,
- } = rrd.extract_data(CF::Minimum, 60, Some(0), Some(5 * 60))?;
+ } = rrd.extract_data(AggregationFn::Minimum, 60, Some(0), Some(5 * 60))?;
assert_eq!(start, 0);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(2.0), Some(4.0), Some(6.0), Some(8.0)]);
#[test]
fn basic_rra_last_gauge_test() -> Result<(), Error> {
- let rra = RRA::new(CF::Last, 60, 5);
- let mut rrd = RRD::new(DST::Gauge, vec![rra]);
+ let rra = Archive::new(AggregationFn::Last, 60, 5);
+ let mut rrd = Database::new(DataSourceType::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
}
assert!(
- rrd.extract_data(CF::Average, 60, Some(0), Some(5 * 60))
+ rrd.extract_data(AggregationFn::Average, 60, Some(0), Some(5 * 60))
.is_err(),
"CF::Average should not exist"
);
start,
resolution,
data,
- } = rrd.extract_data(CF::Last, 60, Some(0), Some(20 * 60))?;
+ } = rrd.extract_data(AggregationFn::Last, 60, Some(0), Some(20 * 60))?;
assert_eq!(start, 0);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(3.0), Some(5.0), Some(7.0), Some(9.0)]);
#[test]
fn basic_rra_average_derive_test() -> Result<(), Error> {
- let rra = RRA::new(CF::Average, 60, 5);
- let mut rrd = RRD::new(DST::Derive, vec![rra]);
+ let rra = Archive::new(AggregationFn::Average, 60, 5);
+ let mut rrd = Database::new(DataSourceType::Derive, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, (i * 60) as f64);
start,
resolution,
data,
- } = rrd.extract_data(CF::Average, 60, Some(60), Some(5 * 60))?;
+ } = rrd.extract_data(AggregationFn::Average, 60, Some(60), Some(5 * 60))?;
assert_eq!(start, 60);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(1.0), Some(2.0), Some(2.0), Some(2.0), None]);
#[test]
fn basic_rra_average_gauge_test() -> Result<(), Error> {
- let rra = RRA::new(CF::Average, 60, 5);
- let mut rrd = RRD::new(DST::Gauge, vec![rra]);
+ let rra = Archive::new(AggregationFn::Average, 60, 5);
+ let mut rrd = Database::new(DataSourceType::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
start,
resolution,
data,
- } = rrd.extract_data(CF::Average, 60, Some(60), Some(5 * 60))?;
+ } = rrd.extract_data(AggregationFn::Average, 60, Some(60), Some(5 * 60))?;
assert_eq!(start, 60);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(2.5), Some(4.5), Some(6.5), Some(8.5), None]);
start,
resolution,
data,
- } = rrd.extract_data(CF::Average, 60, Some(60), Some(5 * 60))?;
+ } = rrd.extract_data(AggregationFn::Average, 60, Some(60), Some(5 * 60))?;
assert_eq!(start, 60);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(4.5), Some(6.5), Some(8.5), Some(10.5)]);
start,
resolution,
data,
- } = rrd.extract_data(CF::Average, 60, Some(3 * 60), Some(8 * 60))?;
+ } = rrd.extract_data(AggregationFn::Average, 60, Some(3 * 60), Some(8 * 60))?;
assert_eq!(start, 3 * 60);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(6.5), Some(8.5), Some(10.5), Some(12.5), None]);
start,
resolution,
data,
- } = rrd.extract_data(CF::Average, 60, Some(100 * 30), Some(100 * 30 + 5 * 60))?;
+ } = rrd.extract_data(
+ AggregationFn::Average,
+ 60,
+ Some(100 * 30),
+ Some(100 * 30 + 5 * 60),
+ )?;
assert_eq!(start, 100 * 30);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(100.0), None, None, None, None]);
start,
resolution,
data,
- } = rrd.extract_data(CF::Average, 60, Some(100 * 30), Some(60))?;
+ } = rrd.extract_data(AggregationFn::Average, 60, Some(100 * 30), Some(60))?;
assert_eq!(start, 100 * 30);
assert_eq!(resolution, 60);
assert_eq!(data, []);
// openssl::sha::sha256(b"Proxmox Round Robin Database file v1.0")[0..8];
pub const PROXMOX_RRD_MAGIC_1_0: [u8; 8] = [206, 46, 26, 212, 172, 158, 5, 186];
-use crate::rrd::{DataSource, CF, DST, RRA, RRD};
+use crate::rrd::{AggregationFn, Archive, DataSource, DataSourceType, Database};
bitflags! {
/// Flags to specify the data source type and consolidation function
/// Round Robin Archive with [RRD_DATA_ENTRIES] data slots.
///
-/// This data structure is used inside [RRD] and directly written to the
+/// This data structure is used inside [Database] and directly written to the
/// RRD files.
#[repr(C)]
pub struct RRAv1 {
}
}
-/// Round Robin Database file format with fixed number of [RRA]s
+/// Round Robin Database file format with fixed number of [Archive]s
#[repr(C)]
// Note: Avoid alignment problems by using 8byte types only
pub struct RRDv1 {
Ok(rrd)
}
- pub fn to_rrd_v2(&self) -> Result<RRD, Error> {
+ pub fn to_rrd_v2(&self) -> Result<Database, Error> {
let mut rra_list = Vec::new();
// old format v1:
// Try to convert to new, higher capacity format
// compute daily average (merge old self.day_avg and self.hour_avg
- let mut day_avg = RRA::new(CF::Average, 60, 1440);
+ let mut day_avg = Archive::new(AggregationFn::Average, 60, 1440);
let (start, reso, data) = self.day_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 30, data);
day_avg.insert_data(start, reso, data)?;
// compute daily maximum (merge old self.day_max and self.hour_max
- let mut day_max = RRA::new(CF::Maximum, 60, 1440);
+ let mut day_max = Archive::new(AggregationFn::Maximum, 60, 1440);
let (start, reso, data) = self.day_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 30, data);
// compute monthly average (merge old self.month_avg,
// self.week_avg and self.day_avg)
- let mut month_avg = RRA::new(CF::Average, 30 * 60, 1440);
+ let mut month_avg = Archive::new(AggregationFn::Average, 30 * 60, 1440);
let (start, reso, data) = self.month_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 24, data);
// compute monthly maximum (merge old self.month_max,
// self.week_max and self.day_max)
- let mut month_max = RRA::new(CF::Maximum, 30 * 60, 1440);
+ let mut month_max = Archive::new(AggregationFn::Maximum, 30 * 60, 1440);
let (start, reso, data) = self.month_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 24, data);
month_max.insert_data(start, reso, data)?;
// compute yearly average (merge old self.year_avg)
- let mut year_avg = RRA::new(CF::Average, 6 * 3600, 1440);
+ let mut year_avg = Archive::new(AggregationFn::Average, 6 * 3600, 1440);
let (start, reso, data) = self.year_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 28, data);
year_avg.insert_data(start, reso, data)?;
// compute yearly maximum (merge old self.year_avg)
- let mut year_max = RRA::new(CF::Maximum, 6 * 3600, 1440);
+ let mut year_max = Archive::new(AggregationFn::Maximum, 6 * 3600, 1440);
let (start, reso, data) = self.year_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 28, data);
year_max.insert_data(start, reso, data)?;
// compute decade average (merge old self.year_avg)
- let mut decade_avg = RRA::new(CF::Average, 7 * 86400, 570);
+ let mut decade_avg = Archive::new(AggregationFn::Average, 7 * 86400, 570);
let (start, reso, data) = self.year_avg.extract_data();
decade_avg.insert_data(start, reso, data)?;
// compute decade maximum (merge old self.year_max)
- let mut decade_max = RRA::new(CF::Maximum, 7 * 86400, 570);
+ let mut decade_max = Archive::new(AggregationFn::Maximum, 7 * 86400, 570);
let (start, reso, data) = self.year_max.extract_data();
decade_max.insert_data(start, reso, data)?;
// use values from hour_avg for source (all RRAv1 must have the same config)
let dst = if self.hour_avg.flags.contains(RRAFlags::DST_COUNTER) {
- DST::Counter
+ DataSourceType::Counter
} else if self.hour_avg.flags.contains(RRAFlags::DST_DERIVE) {
- DST::Derive
+ DataSourceType::Derive
} else {
- DST::Gauge
+ DataSourceType::Gauge
};
let source = DataSource {
last_value: f64::NAN,
last_update: self.hour_avg.last_update, // IMPORTANT!
};
- Ok(RRD { source, rra_list })
+ Ok(Database { source, rra_list })
}
}
use anyhow::{bail, Error};
-use proxmox_rrd::rrd::RRD;
+use proxmox_rrd::rrd::Database;
use proxmox_sys::fs::CreateOptions;
fn compare_file(fn1: &str, fn2: &str) -> Result<(), Error> {
#[cfg(feature = "rrd_v1")]
fn upgrade_from_rrd_v1() -> Result<(), Error> {
const RRD_V1_FN: &str = "./tests/testdata/cpu.rrd_v1";
- let rrd = RRD::load(Path::new(RRD_V1_FN), true)?;
+ let rrd = Database::load(Path::new(RRD_V1_FN), true)?;
const RRD_V2_NEW_FN: &str = "./tests/testdata/cpu.rrd_v2.upgraded";
let new_path = Path::new(RRD_V2_NEW_FN);
// make sure we can load and save RRD v2
#[test]
fn load_and_save_rrd_v2() -> Result<(), Error> {
- let rrd = RRD::load(Path::new(RRD_V2_FN), true)?;
+ let rrd = Database::load(Path::new(RRD_V2_FN), true)?;
const RRD_V2_NEW_FN: &str = "./tests/testdata/cpu.rrd_v2.saved";
let new_path = Path::new(RRD_V2_NEW_FN);