use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
+use PVE::ReplicationConfig;
+use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
use PVE::QemuMigrate;
# Note: $pool is only needed when creating a VM, because pool permissions
# are automatically inherited if VM already exists inside a pool.
+my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
my $create_disks = sub {
my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $settings, $default_storage) = @_;
my $vollist = [];
my $res = {};
- PVE::QemuServer::foreach_drive($settings, sub {
+
+ my $code = sub {
my ($ds, $disk) = @_;
my $volid = $disk->{file};
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
- } elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
+ } elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
}
- });
+ };
+
+ eval { PVE::QemuServer::foreach_drive($settings, $code); };
# free allocated images on error
if (my $err = $@) {
push @delete, $opt;
}
+ my $repl_conf = PVE::ReplicationConfig->new();
+ my $is_replicated = $repl_conf->check_for_existing_jobs($vmid, 1);
+ my $check_replication = sub {
+ my ($drive) = @_;
+ return if !$is_replicated;
+ my $volid = $drive->{file};
+ return if !$volid || !($drive->{replicate}//1);
+ return if PVE::QemuServer::drive_is_cdrom($drive);
+ my ($storeid, $format);
+ if ($volid =~ $NEW_DISK_RE) {
+ $storeid = $2;
+ $format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
+ } else {
+ ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
+ $format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
+ }
+ return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+ die "cannot add non-replicatable volume to a replicated VM\n";
+ };
+
foreach my $opt (keys %$param) {
if (PVE::QemuServer::is_valid_drivename($opt)) {
# cleanup drive path
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+ $check_replication->($drive);
$param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
} elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
foreach my $opt (@delete) {
$modified->{$opt} = 1;
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- if (!defined($conf->{$opt})) {
+ if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
warn "cannot delete '$opt' - not set in current configuration!\n";
$modified->{$opt} = 0;
next;
if defined($conf->{pending}->{$opt});
&$create_disks($rpcenv, $authuser, $conf->{pending}, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
- } elsif ($opt eq "replicate") {
- # check if all volumes have replicate feature
- PVE::QemuServer::get_replicatable_volumes($storecfg, $conf);
- my $repl = PVE::JSONSchema::check_format('pve-replicate', $param->{opt});
- PVE::Cluster::check_node_exists($repl->{target});
- $conf->{$opt} = $param->{$opt};
} else {
$conf->{pending}->{$opt} = $param->{$opt};
}
die "unable to remove VM $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
+ # do not allow destroy if there are replication jobs
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+
# early tests (repeat after locking)
die "VM $vmid is running - destroy failed\n"
if PVE::QemuServer::check_running($vmid);
$cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
my $sock = IO::Socket::IP->new(
+ ReuseAddr => 1,
Listen => 1,
LocalPort => $port,
Proto => 'tcp',
my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
- print "Executing HA start for VM $vmid\n";
+ print "Requesting HA start for VM $vmid\n";
PVE::Tools::run_command($cmd);
my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
- print "Executing HA stop for VM $vmid\n";
+ print "Requesting HA stop for VM $vmid\n";
PVE::Tools::run_command($cmd);
my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
- print "Executing HA stop for VM $vmid\n";
+ print "Requesting HA stop for VM $vmid\n";
PVE::Tools::run_command($cmd);
$newconf->{$opt} = $value; # simply copy configuration
} else {
if ($param->{full}) {
- die "Full clone feature is not available"
+ die "Full clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
$fullclone->{$opt} = 1;
} else {
# not full means clone instead of copy
- die "Linked clone feature is not available"
+ die "Linked clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running);
}
$drives->{$opt} = $drive;
my $cmd = ['ha-manager', 'migrate', $service, $target];
- print "Executing HA migrate for VM $vmid to node $target\n";
+ print "Requesting HA migration for VM $vmid to node $target\n";
PVE::Tools::run_command($cmd);
} else {
my $realcmd = sub {
- my $upid = shift;
-
PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
};
- return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
}
}});
PVE::QemuConfig->snapshot_rollback($vmid, $snapname);
};
- return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ # hold migration lock, this makes sure that nobody create replication snapshots
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $worker);
}});
__PACKAGE__->register_method({