use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
- Authid, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig, TapeBackupJobSetup,
- TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT,
- PRIV_TAPE_WRITE, UPID_SCHEMA,
+ Authid, BackupNamespace, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig,
+ TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ,
+ PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
task_log!(worker, "update media online status");
let changer_name = update_media_online_status(&setup.drive)?;
- let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
+ let root_namespace = setup.ns.clone().unwrap_or_default();
+ let ns_magic = !root_namespace.is_root() || setup.recursion_depth != Some(0);
- let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?;
+ let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
- // FIXME: Namespaces! Probably just recurse for now? Not sure about the usage here...
+ let mut pool_writer =
+ PoolWriter::new(pool, &setup.drive, worker, email, force_media_set, ns_magic)?;
let mut group_list = Vec::new();
- let root_namespace = if let Some(ns) = &setup.ns {
- ns.clone()
- } else {
- Default::default()
- };
let namespaces =
datastore.recursive_iter_backup_ns_ok(root_namespace, setup.recursion_depth)?;
for ns in namespaces {
drive::{lock_tape_device, request_and_load_media, set_tape_device_state, TapeDriver},
file_formats::{
CatalogArchiveHeader, ChunkArchiveDecoder, ChunkArchiveHeader, SnapshotArchiveHeader,
- PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
- PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
- PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
- PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
+ PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
+ PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
+ PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2,
},
lock_media_set, Inventory, MediaCatalog, MediaId, MediaSet, MediaSetCatalog,
TAPE_STATUS_DIR,
}
match header.content_magic {
- PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
+ | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
bail!("unexpected snapshot archive version (v1.0)");
}
- PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
reader.skip_data()?; // read all data
}
- PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
+ PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
bail!("missing MediaContentHeader");
}
- if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
+ if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
+ || header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1
+ {
task_log!(worker, "found catalog at pos {}", current_file_number);
let header_data = reader.read_exact_allocated(header.size as usize)?;
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
-use crate::tape::file_formats::{CatalogArchiveHeader, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0};
+use crate::tape::file_formats::CatalogArchiveHeader;
/// Write a media catalog to the tape
///
media_set_uuid: &Uuid,
seq_nr: usize,
file: &mut File,
+ version: [u8; 8],
) -> Result<Option<Uuid>, std::io::Error> {
let archive_header = CatalogArchiveHeader {
uuid: uuid.clone(),
.as_bytes()
.to_vec();
- let header = MediaContentHeader::new(
- PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
- header_data.len() as u32,
- );
+ let header = MediaContentHeader::new(version, header_data.len() as u32);
let content_uuid: Uuid = header.uuid.into();
let leom = writer.write_header(&header, &header_data)?;
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.1")[0..8];
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1: [u8; 8] = [218, 22, 21, 208, 17, 226, 154, 98];
+// v1.2 introduced an optional, in-line namespace prefix for the snapshot field
+// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.2")[0..8];
+pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2: [u8; 8] = [98, 16, 54, 155, 186, 16, 51, 29];
// openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.0")[0..8];
pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] =
[183, 207, 199, 37, 158, 153, 30, 115];
+// v1.1 introduced an optional, in-line namespace prefix for the snapshot field
+// openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.1")[0..8];
+pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1: [u8; 8] = [179, 236, 113, 240, 173, 236, 2, 96];
lazy_static::lazy_static! {
// Map content magic numbers to human readable names.
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, "Proxmox Backup Chunk Archive v1.1");
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, "Proxmox Backup Snapshot Archive v1.1");
+ map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2, "Proxmox Backup Snapshot Archive v1.2");
map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, "Proxmox Backup Catalog Archive v1.0");
+ map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1, "Proxmox Backup Catalog Archive v1.1");
map
};
}
use pbs_datastore::SnapshotReader;
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
-use crate::tape::file_formats::{SnapshotArchiveHeader, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1};
+use crate::tape::file_formats::{
+ SnapshotArchiveHeader, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2,
+};
/// Write a set of files as `pxar` archive to the tape
///
.as_bytes()
.to_vec();
- let header = MediaContentHeader::new(
- PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
- header_data.len() as u32,
- );
+ let version_magic = if backup_dir.backup_ns().is_root() {
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
+ } else {
+ PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2
+ };
+
+ let header = MediaContentHeader::new(version_magic, header_data.len() as u32);
let content_uuid = header.uuid.into();
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();
MediaCatalog, MediaId, MediaPool, COMMIT_BLOCK_SIZE, MAX_CHUNK_ARCHIVE_SIZE, TAPE_STATUS_DIR,
};
+use super::file_formats::{
+ PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
+};
+
struct PoolWriterState {
drive: Box<dyn TapeDriver>,
// Media Uuid from loaded media
status: Option<PoolWriterState>,
catalog_set: Arc<Mutex<CatalogSet>>,
notify_email: Option<String>,
+ ns_magic: bool,
}
impl PoolWriter {
worker: &WorkerTask,
notify_email: Option<String>,
force_media_set: bool,
+ ns_magic: bool,
) -> Result<Self, Error> {
let current_time = proxmox_time::epoch_i64();
status: None,
catalog_set: Arc::new(Mutex::new(catalog_set)),
notify_email,
+ ns_magic,
})
}
/// archive is marked incomplete. The caller should mark the media
/// as full and try again using another media.
pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result<bool, Error> {
+ let catalog_magic = self.catalog_version();
+
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
let mut file = Self::open_catalog_file(uuid)?;
- let done = tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)?
- .is_some();
+ let done = tape_write_catalog(
+ writer.as_mut(),
+ uuid,
+ media_set.uuid(),
+ seq_nr,
+ &mut file,
+ catalog_magic,
+ )?
+ .is_some();
Ok(done)
}
}
media_list = &media_list[..(media_list.len() - 1)];
+ let catalog_magic = self.catalog_version();
+
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
task_log!(worker, "write catalog for previous media: {}", uuid);
- if tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)?
- .is_none()
+ if tape_write_catalog(
+ writer.as_mut(),
+ uuid,
+ media_set.uuid(),
+ seq_nr,
+ &mut file,
+ catalog_magic,
+ )?
+ .is_none()
{
bail!("got EOM while writing start catalog");
}
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set))
}
+
+ pub(crate) fn catalog_version(&self) -> [u8; 8] {
+ if self.ns_magic {
+ PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1
+ } else {
+ PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
+ }
+ }
}
/// write up to <max_size> of chunks