use IO::File;
use IPC::Open3;
use JSON;
+use POSIX qw(EINTR EAGAIN);
use PVE::Cluster qw(cfs_read_file);
use PVE::INotify;
use PVE::IPCC;
use PVE::JSONSchema;
+use PVE::PBSClient;
+use PVE::RESTEnvironment qw(log_warn);
use PVE::QMPClient;
use PVE::Storage::Plugin;
use PVE::Storage::PBSPlugin;
use PVE::Storage;
use PVE::Tools;
use PVE::VZDump;
+use PVE::Format qw(render_duration render_bytes);
use PVE::QemuConfig;
use PVE::QemuServer;
+use PVE::QemuServer::Helpers;
use PVE::QemuServer::Machine;
use PVE::QemuServer::Monitor qw(mon_cmd);
if defined($conf->{name});
$self->{vm_was_running} = 1;
+ $self->{vm_was_paused} = 0;
if (!PVE::QemuServer::check_running($vmid)) {
$self->{vm_was_running} = 0;
+ } elsif (PVE::QemuServer::vm_is_paused($vmid, 0)) {
+ # Do not treat a suspended VM as paused, as it would cause us to skip
+ # fs-freeze even if the VM wakes up before we reach qga_fs_freeze.
+ $self->{vm_was_paused} = 1;
}
$task->{hostname} = $conf->{name};
if (!$volume->{included}) {
$self->loginfo("exclude disk '$name' '$volid' ($volume->{reason})");
next;
- } elsif ($self->{vm_was_running} && $volume_config->{iothread}) {
- if (!PVE::QemuServer::Machine::runs_at_least_qemu_version($vmid, 4, 0, 1)) {
- die "disk '$name' '$volid' (iothread=on) can't use backup feature with running QEMU " .
- "version < 4.0.1! Either set backup=no for this drive or upgrade QEMU and restart VM\n";
- }
+ } elsif ($self->{vm_was_running} && $volume_config->{iothread} &&
+ !PVE::QemuServer::Machine::runs_at_least_qemu_version($vmid, 4, 0, 1)) {
+ die "disk '$name' '$volid' (iothread=on) can't use backup feature with running QEMU " .
+ "version < 4.0.1! Either set backup=no for this drive or upgrade QEMU and restart VM\n";
} else {
my $log = "include disk '$name' '$volid'";
if (defined(my $size = $volume_config->{size})) {
}
next if !$path;
- my ($size, $format) = eval { PVE::Storage::volume_size_info($self->{storecfg}, $volid, 5) };
- die "no such volume '$volid'\n" if $@;
+ my ($size, $format);
+ if ($storeid) {
+ # The call in list context can be expensive for certain plugins like RBD, just get size
+ $size = eval { PVE::Storage::volume_size_info($self->{storecfg}, $volid, 5) };
+ die "cannot determine size of volume '$volid' - $@\n" if $@;
+
+ my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
+ $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
+ } else {
+ ($size, $format) = eval {
+ PVE::Storage::volume_size_info($self->{storecfg}, $volid, 5);
+ };
+ die "cannot determine size and format of volume '$volid' - $@\n" if $@;
+ }
my $diskinfo = {
path => $path,
volid => $volid,
storeid => $storeid,
+ size => $size,
format => $format,
virtdev => $ds,
qmdevice => "drive-$ds",
};
+ if ($ds eq 'tpmstate0') {
+ # TPM drive only exists for backup, which is reflected in the name
+ $diskinfo->{qmdevice} = 'drive-tpmstate0-backup';
+ $task->{tpmpath} = $path;
+ }
+
if (-b $path) {
$diskinfo->{type} = 'block';
} else {
sub suspend_vm {
my ($self, $task, $vmid) = @_;
+ return if $self->{vm_was_paused};
+
$self->cmd ("qm suspend $vmid --skiplock");
}
sub resume_vm {
my ($self, $task, $vmid) = @_;
+ return if $self->{vm_was_paused};
+
$self->cmd ("qm resume $vmid --skiplock");
}
my $firewall_src = "/etc/pve/firewall/$vmid.fw";
my $firewall_dest = "$task->{tmpdir}/qemu-server.fw";
- my $outfd = IO::File->new (">$outfile") ||
- die "unable to open '$outfile'";
- my $conffd = IO::File->new ($conffile, 'r') ||
- die "unable open '$conffile'";
+ my $outfd = IO::File->new(">$outfile") or die "unable to open '$outfile' - $!\n";
+ my $conffd = IO::File->new($conffile, 'r') or die "unable to open '$conffile' - $!\n";
my $found_snapshot;
my $found_pending;
+ my $found_cloudinit;
while (defined (my $line = <$conffd>)) {
next if $line =~ m/^\#vzdump\#/; # just to be sure
next if $line =~ m/^\#qmdump\#/; # just to be sure
if ($line =~ m/^\[(.*)\]\s*$/) {
if ($1 =~ m/PENDING/i) {
$found_pending = 1;
+ } elsif ($1 =~ m/special:cloudinit/) {
+ $found_cloudinit = 1;
} else {
$found_snapshot = 1;
}
}
- next if $found_snapshot || $found_pending; # skip all snapshots and pending changes config data
+ next if $found_snapshot || $found_pending || $found_cloudinit; # skip all snapshots,pending changes and cloudinit config data
if ($line =~ m/^unused\d+:\s*(\S+)\s*/) {
$self->loginfo("skip unused drive '$1' (not included into backup)");
}
}
-# number, [precision=1]
-my $num2str = sub {
- return sprintf( "%." . ( $_[1] || 1 ) . "f", $_[0] );
-};
-sub bytes_to_human {
- my ($bytes, $precission) = @_;
+my $bitmap_action_to_human = sub {
+ my ($self, $info) = @_;
- return $num2str->($bytes, $precission) . ' B' if $bytes < 1024;
- my $kb = $bytes/1024;
+ my $action = $info->{action};
- return $num2str->($kb, $precission) . " KiB" if $kb < 1024;
- my $mb = $kb/1024;
-
- return $num2str->($mb, $precission) . " MiB" if $mb < 1024;
- my $gb = $mb/1024;
-
- return $num2str->($gb, $precission) . " GiB" if $gb < 1024;
- my $tb = $gb/1024;
-
- return $num2str->($tb, $precission) . " TiB";
+ if ($action eq "not-used") {
+ return "disabled (no support)";
+ } elsif ($action eq "not-used-removed") {
+ return "disabled (old bitmap cleared)";
+ } elsif ($action eq "new") {
+ return "created new";
+ } elsif ($action eq "used") {
+ if ($info->{dirty} == 0) {
+ return "OK (drive clean)";
+ } else {
+ my $size = render_bytes($info->{size}, 1);
+ my $dirty = render_bytes($info->{dirty}, 1);
+ return "OK ($dirty of $size dirty)";
+ }
+ } elsif ($action eq "invalid") {
+ return "existing bitmap was invalid and has been cleared";
+ } else {
+ return "unknown";
+ }
};
my $query_backup_status_loop = sub {
- my ($self, $vmid, $job_uuid) = @_;
+ my ($self, $vmid, $job_uuid, $qemu_support) = @_;
my $starttime = time ();
my $last_time = $starttime;
- my ($last_percent, $last_total, $last_target, $last_zero, $last_transferred) = (-1, 0, 0, 0, 0);
+ my ($last_percent, $last_total, $last_target, $last_zero, $last_transferred) = (-1, 0, 0, 0, 0);
my ($transferred, $reused);
- my $first_round = 1;
my $get_mbps = sub {
my ($mb, $delta) = @_;
return "0 B/s" if $mb <= 0;
my $bw = int(($mb / $delta));
- return bytes_to_human($bw) . "/s";
+ return render_bytes($bw, 1) . "/s";
};
+ my $target = 0;
+ my $last_reused = 0;
+ my $has_query_bitmap = $qemu_support && $qemu_support->{'query-bitmap-info'};
+ my $is_template = PVE::QemuConfig->is_template($self->{vmlist}->{$vmid});
+ if ($has_query_bitmap) {
+ my $total = 0;
+ my $bitmap_info = mon_cmd($vmid, 'query-pbs-bitmap-info');
+ for my $info (sort { $a->{drive} cmp $b->{drive} } @$bitmap_info) {
+ if (!$is_template) {
+ my $text = $bitmap_action_to_human->($self, $info);
+ my $drive = $info->{drive};
+ $drive =~ s/^drive-//; # for consistency
+ $self->loginfo("$drive: dirty-bitmap status: $text");
+ }
+ $target += $info->{dirty};
+ $total += $info->{size};
+ $last_reused += $info->{size} - $info->{dirty};
+ }
+ if ($target < $total) {
+ my $total_h = render_bytes($total, 1);
+ my $target_h = render_bytes($target, 1);
+ $self->loginfo("using fast incremental mode (dirty-bitmap), $target_h dirty of $total_h total");
+ }
+ }
+
+ my $last_finishing = 0;
while(1) {
my $status = mon_cmd($vmid, 'query-backup');
my $total = $status->{total} || 0;
my $dirty = $status->{dirty};
- my $target = (defined($dirty) && $dirty < $total) ? $dirty : $total;
+ $target = (defined($dirty) && $dirty < $total) ? $dirty : $total if !$has_query_bitmap;
$transferred = $status->{transferred} || 0;
$reused = $status->{reused};
- my $percent = $target ? int(($transferred * 100)/$target) : 0;
+ my $percent = $target ? int(($transferred * 100)/$target) : 100;
my $zero = $status->{'zero-bytes'} || 0;
die "got unexpected uuid\n" if !$status->{uuid} || ($status->{uuid} ne $job_uuid);
my $duration = $ctime - $starttime;
my $rbytes = $transferred - $last_transferred;
- my $wbytes = $rbytes - ($zero - $last_zero);
+ my $wbytes;
+ if ($reused) {
+ # reused includes zero bytes for PBS
+ $wbytes = $rbytes - ($reused - $last_reused);
+ } else {
+ $wbytes = $rbytes - ($zero - $last_zero);
+ }
my $timediff = ($ctime - $last_time) || 1; # fixme
my $mbps_read = $get_mbps->($rbytes, $timediff);
my $mbps_write = $get_mbps->($wbytes, $timediff);
- my $target_h = bytes_to_human($target);
- my $transferred_h = bytes_to_human($transferred);
+ my $target_h = render_bytes($target, 1);
+ my $transferred_h = render_bytes($transferred, 1);
- if ($first_round && $target != $total) {
- my $total_h = bytes_to_human($total);
- $self->loginfo("using fast incremental mode (dirty-bitmap), $target_h dirty of $total_h total");
- }
-
- my $statusline = "status: $percent% ($transferred_h of $target_h), duration $duration"
- .", read: $mbps_read, write: $mbps_write";
+ my $statusline = sprintf("%3d%% ($transferred_h of $target_h) in %s"
+ .", read: $mbps_read, write: $mbps_write", $percent, render_duration($duration));
my $res = $status->{status} || 'unknown';
if ($res ne 'active') {
- $self->loginfo($statusline);
+ if ($last_percent < 100) {
+ $self->loginfo($statusline);
+ }
if ($res ne 'done') {
die (($status->{errmsg} || "unknown error") . "\n") if $res eq 'error';
die "got unexpected status '$res'\n";
$last_zero = $zero if $zero;
$last_transferred = $transferred if $transferred;
$last_time = $ctime;
+ $last_reused = $reused;
+
+ if (!$last_finishing && $status->{finishing}) {
+ $self->loginfo("Waiting for server to finish backup validation...");
+ }
+ $last_finishing = $status->{finishing};
}
sleep(1);
- $first_round = 0;
}
my $duration = time() - $starttime;
- if ($transferred && $duration) {
- my $transferred_h = bytes_to_human($transferred, 2);
- my $mbps = $get_mbps->($transferred, $duration);
- if ($reused) {
- my $reused_h = bytes_to_human($reused, 2);
- my $reuse_per = int($reused * 100 / $last_total);
- $self->loginfo("backup was done incrementally, reused $reused_h (${reuse_per}%) from last backup");
- $self->loginfo("transferred $transferred_h in $duration seconds ($mbps)");
- } else {
- $self->loginfo("transferred $transferred_h in $duration seconds ($mbps)");
- }
- }
if ($last_zero) {
my $zero_per = $last_target ? int(($last_zero * 100)/$last_target) : 0;
- my $zero_h = bytes_to_human($last_zero, 2);
- $self->loginfo("Backup is sparse: ${zero_per}% ($zero_h) zero data");
+ my $zero_h = render_bytes($last_zero);
+ $self->loginfo("backup is sparse: $zero_h (${zero_per}%) total zero data");
+ }
+ if ($reused) {
+ my $reused_h = render_bytes($reused);
+ my $reuse_per = int($reused * 100 / $last_total);
+ $self->loginfo("backup was done incrementally, reused $reused_h (${reuse_per}%)");
+ }
+ if ($transferred) {
+ my $transferred_h = render_bytes($transferred);
+ if ($duration) {
+ my $mbps = $get_mbps->($transferred, $duration);
+ $self->loginfo("transferred $transferred_h in $duration seconds ($mbps)");
+ } else {
+ $self->loginfo("transferred $transferred_h in <1 seconds");
+ }
}
return {
};
};
-sub archive_pbs {
+my $attach_tpmstate_drive = sub {
my ($self, $task, $vmid) = @_;
- my $conffile = "$task->{tmpdir}/qemu-server.conf";
- my $firewall = "$task->{tmpdir}/qemu-server.fw";
+ return if !$task->{tpmpath};
+
+ # unconditionally try to remove the tpmstate-named drive - it only exists
+ # for backing up, and avoids errors if left over from some previous event
+ eval { PVE::QemuServer::qemu_drivedel($vmid, "tpmstate0-backup"); };
+
+ $self->loginfo('attaching TPM drive to QEMU for backup');
+
+ my $drive = "file=$task->{tpmpath},if=none,read-only=on,id=drive-tpmstate0-backup";
+ $drive =~ s/\\/\\\\/g;
+ my $ret = PVE::QemuServer::Monitor::hmp_cmd($vmid, "drive_add auto \"$drive\"");
+ die "attaching TPM drive failed - $ret\n" if $ret !~ m/OK/s;
+};
+
+my $detach_tpmstate_drive = sub {
+ my ($task, $vmid) = @_;
+ return if !$task->{tpmpath} || !PVE::QemuServer::check_running($vmid);
+ eval { PVE::QemuServer::qemu_drivedel($vmid, "tpmstate0-backup"); };
+};
+
+my sub add_backup_performance_options {
+ my ($qmp_param, $perf, $qemu_support) = @_;
+
+ return if !$perf || scalar(keys $perf->%*) == 0;
+
+ if (!$qemu_support) {
+ my $settings_string = join(', ', sort keys $perf->%*);
+ log_warn("ignoring setting(s): $settings_string - issue checking if supported");
+ return;
+ }
+
+ if (defined($perf->{'max-workers'})) {
+ if ($qemu_support->{'backup-max-workers'}) {
+ $qmp_param->{'max-workers'} = int($perf->{'max-workers'});
+ } else {
+ log_warn("ignoring 'max-workers' setting - not supported by running QEMU");
+ }
+ }
+}
+
+sub get_and_check_pbs_encryption_config {
+ my ($self) = @_;
my $opts = $self->{vzdump}->{opts};
my $scfg = $opts->{scfg};
- my $starttime = time();
+ my $keyfile = PVE::Storage::PBSPlugin::pbs_encryption_key_file_name($scfg, $opts->{storage});
+ my $master_keyfile = PVE::Storage::PBSPlugin::pbs_master_pubkey_file_name($scfg, $opts->{storage});
- my $server = $scfg->{server};
- my $datastore = $scfg->{datastore};
- my $username = $scfg->{username} // 'root@pam';
- my $fingerprint = $scfg->{fingerprint};
+ if (-e $keyfile) {
+ if (-e $master_keyfile) {
+ $self->loginfo("enabling encryption with master key feature");
+ return ($keyfile, $master_keyfile);
+ } elsif ($scfg->{'master-pubkey'}) {
+ die "master public key configured but no key file found\n";
+ } else {
+ $self->loginfo("enabling encryption");
+ return ($keyfile, undef);
+ }
+ } else {
+ my $encryption_fp = $scfg->{'encryption-key'};
+ die "encryption configured ('$encryption_fp') but no encryption key file found!\n"
+ if $encryption_fp;
+ if (-e $master_keyfile) {
+ $self->log(
+ 'warn',
+ "backup target storage is configured with master-key, but no encryption key set!"
+ ." Ignoring master key settings and creating unencrypted backup."
+ );
+ }
+ return (undef, undef);
+ }
+ die "internal error - unhandled case for getting & checking PBS encryption ($keyfile, $master_keyfile)!";
+}
- my $repo = "$username\@$server:$datastore";
- my $password = PVE::Storage::PBSPlugin::pbs_get_password($scfg, $opts->{storage});
+my sub cleanup_fleecing_images {
+ my ($self, $disks) = @_;
- my $diskcount = scalar(@{$task->{disks}});
- if (PVE::QemuConfig->is_template($self->{vmlist}->{$vmid}) || !$diskcount) {
- my @pathlist;
- # FIXME: accumulate disk sizes to use for backup job (email) log
- foreach my $di (@{$task->{disks}}) {
+ for my $di ($disks->@*) {
+ if (my $volid = $di->{'fleece-volid'}) {
+ eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
+ $self->log('warn', "error removing fleecing image '$volid' - $@") if $@;
+ }
+ }
+}
+
+my sub allocate_fleecing_images {
+ my ($self, $disks, $vmid, $fleecing_storeid, $format) = @_;
+
+ die "internal error - no fleecing storage specified\n" if !$fleecing_storeid;
+
+ # TODO what about potential left-over images from a failed attempt? Just
+ # auto-remove? While unlikely, could conflict with manually created image from user...
+
+ eval {
+ my $n = 0; # counter for fleecing image names
+
+ for my $di ($disks->@*) {
+ next if $di->{virtdev} =~ m/^(?:tpmstate|efidisk)\d$/; # too small to be worth it
if ($di->{type} eq 'block' || $di->{type} eq 'file') {
- push @pathlist, "$di->{qmdevice}.img:$di->{path}";
+ my $scfg = PVE::Storage::storage_config($self->{storecfg}, $fleecing_storeid);
+ my $name = "vm-$vmid-fleece-$n";
+ $name .= ".$format" if $scfg->{path};
+
+ my $size = PVE::Tools::convert_size($di->{size}, 'b' => 'kb');
+
+ $di->{'fleece-volid'} = PVE::Storage::vdisk_alloc(
+ $self->{storecfg}, $fleecing_storeid, $vmid, $format, $name, $size);
+
+ $n++;
} else {
- die "implement me (type $di->{type})";
+ die "implement me (type '$di->{type}')";
}
}
+ };
+ if (my $err = $@) {
+ cleanup_fleecing_images($self, $disks);
+ die $err;
+ }
+}
- if (!$diskcount) {
- $self->loginfo("backup contains no disks");
+my sub detach_fleecing_images {
+ my ($disks, $vmid) = @_;
+
+ return if !PVE::QemuServer::Helpers::vm_running_locally($vmid);
+
+ for my $di ($disks->@*) {
+ if (my $volid = $di->{'fleece-volid'}) {
+ my $devid = "$di->{qmdevice}-fleecing";
+ $devid =~ s/^drive-//; # re-added by qemu_drivedel()
+ eval { PVE::QemuServer::qemu_drivedel($vmid, $devid) };
+ }
+ }
+}
+
+my sub attach_fleecing_images {
+ my ($self, $disks, $vmid, $format) = @_;
+
+ # unconditionally try to remove potential left-overs from a previous backup
+ detach_fleecing_images($disks, $vmid);
+
+ my $vollist = [ map { $_->{'fleece-volid'} } grep { $_->{'fleece-volid'} } $disks->@* ];
+ PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
+
+ for my $di ($disks->@*) {
+ if (my $volid = $di->{'fleece-volid'}) {
+ $self->loginfo("$di->{qmdevice}: attaching fleecing image $volid to QEMU");
+
+ my $path = PVE::Storage::path($self->{storecfg}, $volid);
+ my $devid = "$di->{qmdevice}-fleecing";
+ my $drive = "file=$path,if=none,id=$devid,format=$format,discard=unmap";
+ # Specify size explicitly, to make it work if storage backend rounded up size for
+ # fleecing image when allocating.
+ $drive .= ",size=$di->{size}" if $format eq 'raw';
+ $drive =~ s/\\/\\\\/g;
+ my $ret = PVE::QemuServer::Monitor::hmp_cmd($vmid, "drive_add auto \"$drive\"");
+ die "attaching fleecing image $volid failed - $ret\n" if $ret !~ m/OK/s;
}
+ }
+}
+
+my sub check_and_prepare_fleecing {
+ my ($self, $vmid, $fleecing_opts, $disks, $is_template, $qemu_support) = @_;
+
+ # Even if the VM was started specifically for fleecing, it's possible that the VM is resumed and
+ # then starts doing IO. For VMs that are not resumed the fleecing images will just stay empty,
+ # so there is no big cost.
+
+ my $use_fleecing = $fleecing_opts && $fleecing_opts->{enabled} && !$is_template;
+
+ if ($use_fleecing && !defined($qemu_support->{'backup-fleecing'})) {
+ $self->log(
+ 'warn',
+ "running QEMU version does not support backup fleecing - continuing without",
+ );
+ $use_fleecing = 0;
+ }
+
+ if ($use_fleecing) {
+ my ($default_format, $valid_formats) = PVE::Storage::storage_default_format(
+ $self->{storecfg}, $fleecing_opts->{storage});
+ my $format = scalar(grep { $_ eq 'qcow2' } $valid_formats->@*) ? 'qcow2' : 'raw';
+
+ allocate_fleecing_images($self, $disks, $vmid, $fleecing_opts->{storage}, $format);
+ attach_fleecing_images($self, $disks, $vmid, $format);
+ }
+
+ return $use_fleecing;
+}
+
+sub archive_pbs {
+ my ($self, $task, $vmid) = @_;
+
+ my $conffile = "$task->{tmpdir}/qemu-server.conf";
+ my $firewall = "$task->{tmpdir}/qemu-server.fw";
+
+ my $opts = $self->{vzdump}->{opts};
+ my $scfg = $opts->{scfg};
+
+ my $starttime = time();
+
+ my $fingerprint = $scfg->{fingerprint};
+ my $repo = PVE::PBSClient::get_repository($scfg);
+ my $password = PVE::Storage::PBSPlugin::pbs_get_password($scfg, $opts->{storage});
+ my ($keyfile, $master_keyfile) = $self->get_and_check_pbs_encryption_config();
+
+ my $diskcount = scalar(@{$task->{disks}});
+ # proxmox-backup-client can only handle raw files and block devs, so only use it (directly) for
+ # disk-less VMs
+ if (!$diskcount) {
+ $self->loginfo("backup contains no disks");
local $ENV{PBS_PASSWORD} = $password;
local $ENV{PBS_FINGERPRINT} = $fingerprint if defined($fingerprint);
'--backup-id', "$vmid",
'--backup-time', $task->{backup_time},
];
+ if (defined(my $ns = $scfg->{namespace})) {
+ push @$cmd, '--ns', $ns;
+ }
+ if (defined($keyfile)) {
+ push @$cmd, '--keyfile', $keyfile;
+ push @$cmd, '--master-pubkey-file', $master_keyfile if defined($master_keyfile);
+ }
push @$cmd, "qemu-server.conf:$conffile";
push @$cmd, "fw.conf:$firewall" if -e $firewall;
- push @$cmd, @pathlist if scalar(@pathlist);
$self->loginfo("starting template backup");
$self->loginfo(join(' ', @$cmd));
# get list early so we die on unkown drive types before doing anything
my $devlist = _get_task_devlist($task);
+ my $use_fleecing;
$self->enforce_vm_running_for_backup($vmid);
+ $self->{qmeventd_fh} = PVE::QemuServer::register_qmeventd_handle($vmid);
my $backup_job_uuid;
eval {
die "interrupted by signal\n";
};
+ my $qemu_support = eval { mon_cmd($vmid, "query-proxmox-support") };
+ my $err = $@;
+ if (!$qemu_support || $err) {
+ die "query-proxmox-support returned empty value\n" if !$err;
+ if ($err =~ m/The command query-proxmox-support has not been found/) {
+ die "PBS backups are not supported by the running QEMU version. Please make "
+ . "sure you've installed the latest version and the VM has been restarted.\n";
+ } else {
+ die "QMP command query-proxmox-support failed - $err\n";
+ }
+ }
+
+ # pve-qemu supports it since 5.2.0-1 (PVE 6.4), so safe to die since PVE 8
+ die "master key configured but running QEMU version does not support master keys\n"
+ if !defined($qemu_support->{'pbs-masterkey'}) && defined($master_keyfile);
+
+ $attach_tpmstate_drive->($self, $task, $vmid);
+
+ my $is_template = PVE::QemuConfig->is_template($self->{vmlist}->{$vmid});
+
+ $use_fleecing = check_and_prepare_fleecing(
+ $self, $vmid, $opts->{fleecing}, $task->{disks}, $is_template, $qemu_support);
+
my $fs_frozen = $self->qga_fs_freeze($task, $vmid);
my $params = {
password => $password,
devlist => $devlist,
'config-file' => $conffile,
- 'use-dirty-bitmap' => JSON::true,
};
+ $params->{fleecing} = JSON::true if $use_fleecing;
+
+ if (defined(my $ns = $scfg->{namespace})) {
+ $params->{'backup-ns'} = $ns;
+ }
+
+ $params->{speed} = $opts->{bwlimit}*1024 if $opts->{bwlimit};
+ add_backup_performance_options($params, $opts->{performance}, $qemu_support);
+
$params->{fingerprint} = $fingerprint if defined($fingerprint);
$params->{'firewall-file'} = $firewall if -e $firewall;
- $params->{timeout} = 60; # give some time to connect to the backup server
+ $params->{encrypt} = defined($keyfile) ? JSON::true : JSON::false;
+ if (defined($keyfile)) {
+ $params->{keyfile} = $keyfile;
+ $params->{"master-keyfile"} = $master_keyfile if defined($master_keyfile);
+ }
+
+ $params->{'use-dirty-bitmap'} = JSON::true
+ if $qemu_support->{'pbs-dirty-bitmap'} && !$is_template;
+
+ $params->{timeout} = 125; # give some time to connect to the backup server
my $res = eval { mon_cmd($vmid, "backup", %$params) };
my $qmperr = $@;
$self->resume_vm_after_job_start($task, $vmid);
- my $res = $query_backup_status_loop->($self, $vmid, $backup_job_uuid);
- $task->{size} = $res->{total};
+ my $stat = $query_backup_status_loop->($self, $vmid, $backup_job_uuid, $qemu_support);
+ $task->{size} = $stat->{total};
};
my $err = $@;
if ($err) {
$self->logerr($err);
- $self->mon_backup_cancel($vmid) if defined($backup_job_uuid);
+ $self->mon_backup_cancel($vmid);
+ $self->resume_vm_after_job_start($task, $vmid);
}
$self->restore_vm_power_state($vmid);
+ if ($use_fleecing) {
+ detach_fleecing_images($task->{disks}, $vmid);
+ cleanup_fleecing_images($self, $task->{disks});
+ }
+
die $err if $err;
}
$speed = $opts->{bwlimit}*1024;
}
+ my $is_template = PVE::QemuConfig->is_template($self->{vmlist}->{$vmid});
+
my $diskcount = scalar(@{$task->{disks}});
- if (PVE::QemuConfig->is_template($self->{vmlist}->{$vmid}) || !$diskcount) {
+ if ($is_template || !$diskcount) {
my @pathlist;
foreach my $di (@{$task->{disks}}) {
if ($di->{type} eq 'block' || $di->{type} eq 'file') {
}
my $devlist = _get_task_devlist($task);
+ my $use_fleecing;
$self->enforce_vm_running_for_backup($vmid);
+ $self->{qmeventd_fh} = PVE::QemuServer::register_qmeventd_handle($vmid);
my $cpid;
my $backup_job_uuid;
die "interrupted by signal\n";
};
+ # Currently, failing to determine Proxmox support is not critical here, because it's only
+ # used for performance settings like 'max-workers'.
+ my $qemu_support = eval { mon_cmd($vmid, "query-proxmox-support") };
+ log_warn($@) if $@;
+
+ $attach_tpmstate_drive->($self, $task, $vmid);
+
+ $use_fleecing = check_and_prepare_fleecing(
+ $self, $vmid, $opts->{fleecing}, $task->{disks}, $is_template, $qemu_support);
+
my $outfh;
if ($opts->{stdout}) {
$outfh = $opts->{stdout};
devlist => $devlist
};
$params->{'firewall-file'} = $firewall if -e $firewall;
+ $params->{fleecing} = JSON::true if $use_fleecing;
+ add_backup_performance_options($params, $opts->{performance}, $qemu_support);
$qmpclient->queue_cmd($vmid, $backup_cb, 'backup', %$params);
};
my $err = $@;
if ($err) {
$self->logerr($err);
- $self->mon_backup_cancel($vmid) if defined($backup_job_uuid);
+ $self->mon_backup_cancel($vmid);
+ $self->resume_vm_after_job_start($task, $vmid);
}
$self->restore_vm_power_state($vmid);
+ if ($use_fleecing) {
+ detach_fleecing_images($task->{disks}, $vmid);
+ cleanup_fleecing_images($self, $task->{disks});
+ }
+
if ($err) {
if ($cpid) {
kill(9, $cpid);
sub qga_fs_freeze {
my ($self, $task, $vmid) = @_;
- return if !$self->{vmlist}->{$vmid}->{agent} || $task->{mode} eq 'stop' || !$self->{vm_was_running};
+ return if !$self->{vmlist}->{$vmid}->{agent} || $task->{mode} eq 'stop' || !$self->{vm_was_running} || $self->{vm_was_paused};
if (!PVE::QemuServer::qga_check_running($vmid, 1)) {
$self->loginfo("skipping guest-agent 'fs-freeze', agent configured but not running?");
return;
}
+ my $freeze = PVE::QemuServer::get_qga_key($self->{vmlist}->{$vmid}, 'freeze-fs-on-backup') // 1;
+ if (!$freeze) {
+ $self->loginfo("skipping guest-agent 'fs-freeze', disabled in VM options");
+ return;
+ }
+
$self->loginfo("issuing guest-agent 'fs-freeze' command");
eval { mon_cmd($vmid, "guest-fsfreeze-freeze") };
$self->logerr($@) if $@;
# start with skiplock
my $params = {
skiplock => 1,
+ skiptemplate => 1,
paused => 1,
};
PVE::QemuServer::vm_start($self->{storecfg}, $vmid, $params);
die $@ if $@;
}
-# resume VM againe once we got in a clear state (stop mode backup of running VM)
+# resume VM again once in a clear state (stop mode backup of running VM)
sub resume_vm_after_job_start {
my ($self, $task, $vmid) = @_;
- return if !$self->{vm_was_running};
+ return if !$self->{vm_was_running} || $self->{vm_was_paused};
if (my $stoptime = $task->{vmstoptime}) {
my $delay = time() - $task->{vmstoptime};
} else {
$self->loginfo("resuming VM again");
}
- mon_cmd($vmid, 'cont');
+ mon_cmd($vmid, 'cont', timeout => 45);
}
# stop again if VM was not running before
sub cleanup {
my ($self, $task, $vmid) = @_;
- # nothing to do ?
+ $detach_tpmstate_drive->($task, $vmid);
+
+ if ($self->{qmeventd_fh}) {
+ close($self->{qmeventd_fh});
+ }
}
1;