use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::INotify;
use PVE::Cluster qw(cfs_read_file);
+use PVE::RRD;
+use PVE::DataCenterConfig;
use PVE::AccessControl;
use PVE::Firewall;
use PVE::Storage;
use PVE::LXC::Create;
use PVE::LXC::Migrate;
use PVE::GuestHelpers;
+use PVE::VZDump::Plugin;
use PVE::API2::LXC::Config;
use PVE::API2::LXC::Status;
use PVE::API2::LXC::Snapshot;
__PACKAGE__->register_method ({
subclass => "PVE::API2::LXC::Config",
- path => '{vmid}/config',
+ path => '{vmid}/config',
});
__PACKAGE__->register_method ({
type => 'boolean',
description => "Mark this as restore task.",
},
+ unique => {
+ optional => 1,
+ type => 'boolean',
+ description => "Assign a unique random ethernet address.",
+ requires => 'restore',
+ },
pool => {
optional => 1,
type => 'string', format => 'pve-poolid',
"OpenSSH format).",
},
bwlimit => {
- description => "Override i/o bandwidth limit (in KiB/s).",
+ description => "Override I/O bandwidth limit (in KiB/s).",
optional => 1,
type => 'number',
minimum => '0',
+ default => 'restore limit from datacenter or storage config',
},
start => {
optional => 1,
# 'unprivileged' is read-only, so we can't pass it to update_pct_config
my $unprivileged = extract_param($param, 'unprivileged');
my $restore = extract_param($param, 'restore');
+ my $unique = extract_param($param, 'unique');
+
+ # used to skip firewall config restore if user lacks permission
+ my $skip_fw_config_restore = 0;
if ($restore) {
# fixme: limit allowed parameters
}
-
+
my $force = extract_param($param, 'force');
if (!($same_container_exists && $restore && $force)) {
} elsif ($restore && $force && $same_container_exists &&
$rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) {
# OK: user has VM.Backup permissions, and want to restore an existing VM
+
+ # we don't want to restore a container-provided FW conf in this case
+ # since the user is lacking permission to configure the container's FW
+ $skip_fw_config_restore = 1;
} else {
raise_perm_exc();
}
my $archive;
if ($ostemplate eq '-') {
- die "pipe requires cli environment\n"
- if $rpcenv->{type} ne 'cli';
- die "pipe can only be used with restore tasks\n"
+ die "pipe requires cli environment\n"
+ if $rpcenv->{type} ne 'cli';
+ die "pipe can only be used with restore tasks\n"
if !$restore;
$archive = '-';
die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs});
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate);
- $archive = PVE::Storage::abs_filesystem_path($storage_cfg, $ostemplate);
+ $archive = $ostemplate;
}
my %used_storages;
my $check_and_activate_storage = sub {
my ($sid) = @_;
- my $scfg = PVE::Storage::storage_check_node($storage_cfg, $sid, $node);
+ my $scfg = PVE::Storage::storage_check_enabled($storage_cfg, $sid, $node);
raise_param_exc({ storage => "storage '$sid' does not support container directories"})
if !$scfg->{content}->{rootdir};
# check storage access, activate storage
my $delayed_mp_param = {};
- PVE::LXC::Config->foreach_mountpoint($mp_param, sub {
+ PVE::LXC::Config->foreach_volume($mp_param, sub {
my ($ms, $mountpoint) = @_;
my $volid = $mountpoint->{volume};
my $code = sub {
my $old_conf = PVE::LXC::Config->load_config($vmid);
+ my $was_template;
my $vollist = [];
eval {
my $orig_mp_param; # only used if $restore
if ($restore) {
die "can't overwrite running container\n" if PVE::LXC::check_running($vmid);
- if ($is_root && $archive ne '-') {
+ if ($archive ne '-') {
my $orig_conf;
- ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($archive);
- # When we're root call 'restore_configuration' with ristricted=0,
+ print "recovering backed-up configuration from '$archive'\n";
+ ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid);
+
+ $was_template = delete $orig_conf->{template};
+
+ # When we're root call 'restore_configuration' with restricted=0,
# causing it to restore the raw lxc entries, among which there may be
# 'lxc.idmap' entries. We need to make sure that the extracted contents
# of the container match up with the restored configuration afterwards:
- $conf->{lxc} = [grep { $_->[0] eq 'lxc.idmap' } @{$orig_conf->{lxc}}];
+ $conf->{lxc} = $orig_conf->{lxc} if $is_root;
+
+ $conf->{unprivileged} = $orig_conf->{unprivileged}
+ if !defined($unprivileged) && defined($orig_conf->{unprivileged});
}
}
if ($storage_only_mode) {
if ($restore) {
if (!defined($orig_mp_param)) {
- (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($archive);
+ print "recovering backed-up configuration from '$archive'\n";
+ (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid);
}
$mp_param = $orig_mp_param;
die "rootfs configuration could not be recovered, please check and specify manually!\n"
if !defined($mp_param->{rootfs});
- PVE::LXC::Config->foreach_mountpoint($mp_param, sub {
+ PVE::LXC::Config->foreach_volume($mp_param, sub {
my ($ms, $mountpoint) = @_;
my $type = $mountpoint->{type};
if ($type eq 'volume') {
eval {
my $rootdir = PVE::LXC::mount_all($vmid, $storage_cfg, $conf, 1);
$bwlimit = PVE::Storage::get_bandwidth_limit('restore', [keys %used_storages], $bwlimit);
- PVE::LXC::Create::restore_archive($archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit);
+ print "restoring '$archive' now..\n"
+ if $restore && $archive ne '-';
+ PVE::LXC::Create::restore_archive($storage_cfg, $archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit);
if ($restore) {
- PVE::LXC::Create::restore_configuration($vmid, $rootdir, $conf, !$is_root);
+ print "merging backed-up and given configuration..\n";
+ PVE::LXC::Create::restore_configuration($vmid, $storage_cfg, $archive, $rootdir, $conf, !$is_root, $unique, $skip_fw_config_restore);
+ my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
+ $lxc_setup->template_fixup($conf);
} else {
my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); # detect OS
PVE::LXC::Config->write_config($vmid, $conf); # safe config (after OS detection)
foreach my $mp (keys %$delayed_mp_param) {
$conf->{$mp} = $delayed_mp_param->{$mp};
}
+ # If the template flag was set, we try to convert again to template after restore
+ if ($was_template) {
+ print STDERR "Convert restored container to template...\n";
+ PVE::LXC::template_create($vmid, $conf);
+ $conf->{template} = 1;
+ }
PVE::LXC::Config->write_config($vmid, $conf);
};
if (my $err = $@) {
PVE::LXC::destroy_disks($storage_cfg, $vollist);
- eval { PVE::LXC::destroy_config($vmid) };
+ eval { PVE::LXC::Config->destroy_config($vmid) };
warn $@ if $@;
die "$emsg $err";
}
my $res = [
{ subdir => 'config' },
+ { subdir => 'pending' },
{ subdir => 'status' },
{ subdir => 'vncproxy' },
{ subdir => 'termproxy' },
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_graph(
+ return PVE::RRD::create_rrd_graph(
"pve2-vm/$param->{vmid}", $param->{timeframe},
$param->{ds}, $param->{cf});
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_data(
+ return PVE::RRD::create_rrd_data(
"pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf});
}});
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }),
+ force => {
+ type => 'boolean',
+ description => "Force destroy, even if running.",
+ default => 0,
+ optional => 1,
+ },
+ purge => {
+ type => 'boolean',
+ description => "Remove container from all related configurations."
+ ." For example, backup jobs, replication jobs or HA."
+ ." Related ACLs and Firewall entries will *always* be removed.",
+ default => 0,
+ optional => 1,
+ },
+ 'destroy-unreferenced-disks' => {
+ type => 'boolean',
+ description => "If set, destroy additionally all disks with the VMID from all"
+ ." enabled storages which are not referenced in the config.",
+ optional => 1,
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
-
my $vmid = $param->{vmid};
# test if container exists
+
my $conf = PVE::LXC::Config->load_config($vmid);
+ my $early_checks = sub {
+ my ($conf) = @_;
+ PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid");
+ PVE::LXC::Config->check_lock($conf);
- my $storage_cfg = cfs_read_file("storage.cfg");
+ my $ha_managed = PVE::HA::Config::service_is_configured("ct:$vmid");
- PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid");
+ if (!$param->{purge}) {
+ die "unable to remove CT $vmid - used in HA resources and purge parameter not set.\n"
+ if $ha_managed;
- die "unable to remove CT $vmid - used in HA resources\n"
- if PVE::HA::Config::vm_is_ha_managed($vmid);
+ # do not allow destroy if there are replication jobs without purge
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
- # do not allow destroy if there are replication jobs
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
+ return $ha_managed;
+ };
- my $running_error_msg = "unable to destroy CT $vmid - container is running\n";
+ $early_checks->($conf);
- die $running_error_msg if PVE::LXC::check_running($vmid); # check early
+ my $running_error_msg = "unable to destroy CT $vmid - container is running\n";
+ die $running_error_msg if !$param->{force} && PVE::LXC::check_running($vmid); # check early
my $code = sub {
# reload config after lock
$conf = PVE::LXC::Config->load_config($vmid);
- PVE::LXC::Config->check_lock($conf);
+ my $ha_managed = $early_checks->($conf);
+
+ if (PVE::LXC::check_running($vmid)) {
+ die $running_error_msg if !$param->{force};
+ warn "forced to stop CT $vmid before destroying!\n";
+ if (!$ha_managed) {
+ PVE::LXC::vm_stop($vmid, 1);
+ } else {
+ run_command(['ha-manager', 'crm-command', 'stop', "ct:$vmid", '120']);
+ }
+ }
- die $running_error_msg if PVE::LXC::check_running($vmid);
+ my $storage_cfg = cfs_read_file("storage.cfg");
+ PVE::LXC::destroy_lxc_container(
+ $storage_cfg,
+ $vmid,
+ $conf,
+ { lock => 'destroyed' },
+ $param->{'destroy-unreferenced-disks'},
+ );
- PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $conf);
PVE::AccessControl::remove_vm_access($vmid);
PVE::Firewall::remove_vmfw_conf($vmid);
+ if ($param->{purge}) {
+ print "purging CT $vmid from related configurations..\n";
+ PVE::ReplicationConfig::remove_vmid_jobs($vmid);
+ PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+
+ if ($ha_managed) {
+ PVE::HA::Config::delete_service_from_config("ct:$vmid");
+ print "NOTE: removed CT $vmid from HA resource configuration.\n";
+ }
+ }
+
+ # only now remove the zombie config, else we can have reuse race
+ PVE::LXC::Config->destroy_config($vmid);
};
my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); };
-
+
return $rpcenv->fork_worker('vzdestroy', $vmid, $authuser, $realcmd);
}});
optional => 1,
default => 180,
},
- force => {
- type => 'boolean',
- description => "Force migration despite local bind / device" .
- " mounts. NOTE: deprecated, use 'shared' property of mount point instead.",
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
optional => 1,
+ type => 'number',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
},
},
},
die "you can't convert a CT to template if the CT is running\n"
if PVE::LXC::check_running($vmid);
- my $scfg = PVE::Storage::config();
- PVE::LXC::Config->foreach_mountpoint($conf, sub {
- my ($ms, $mp) = @_;
-
- my ($sid) =PVE::Storage::parse_volume_id($mp->{volume}, 0);
- die "Directory storage '$sid' does not support container templates!\n"
- if $scfg->{ids}->{$sid}->{path};
- });
-
my $realcmd = sub {
PVE::LXC::template_create($vmid, $conf);
description => "Target node. Only allowed if the original VM is on shared storage.",
optional => 1,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'number',
+ minimum => '0',
+ default => 'clone limit from datacenter or storage config',
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
+ my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
-
my $newid = extract_param($param, 'newid');
-
my $pool = extract_param($param, 'pool');
-
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
}
-
my $snapname = extract_param($param, 'snapname');
-
my $storage = extract_param($param, 'storage');
-
my $target = extract_param($param, 'target');
-
my $localnode = PVE::INotify::nodename();
- undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
PVE::Cluster::check_node_exists($target) if $target;
PVE::Storage::storage_check_enabled($storecfg, $storage);
if ($target) {
# check if storage is available on target node
- PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ PVE::Storage::storage_check_enabled($storecfg, $storage, $target);
# clone only works if target storage is shared
my $scfg = PVE::Storage::storage_config($storecfg, $storage);
die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
PVE::Cluster::check_cfs_quorum();
- my $conffile;
my $newconf = {};
my $mountpoints = {};
my $fullclone = {};
my $vollist = [];
my $running;
- PVE::LXC::Config->lock_config($vmid, sub {
- my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk');
+ PVE::LXC::Config->create_and_lock_config($newid, 0);
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
- $running = PVE::LXC::check_running($vmid) || 0;
+ my $lock_and_reload = sub {
+ my ($vmid, $code) = @_;
+ return PVE::LXC::Config->lock_config($vmid, sub {
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ die "Lost 'create' config lock, aborting.\n"
+ if !PVE::LXC::Config->has_lock($conf, 'create');
- my $full = extract_param($param, 'full');
- if (!defined($full)) {
- $full = !PVE::LXC::Config->is_template($src_conf);
- }
- die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full;
+ return $code->($conf);
+ });
+ };
- eval {
- die "snapshot '$snapname' does not exist\n"
- if $snapname && !defined($src_conf->{snapshots}->{$snapname});
+ my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk');
+ $running = PVE::LXC::check_running($vmid) || 0;
- my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf;
+ my $full = extract_param($param, 'full');
+ if (!defined($full)) {
+ $full = !PVE::LXC::Config->is_template($src_conf);
+ }
- $conffile = PVE::LXC::Config->config_file($newid);
- die "unable to create CT $newid: config file already exists\n"
- if -f $conffile;
+ eval {
+ die "parameter 'storage' not allowed for linked clones\n"
+ if defined($storage) && !$full;
- my $sharedvm = 1;
- foreach my $opt (keys %$src_conf) {
- next if $opt =~ m/^unused\d+$/;
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined($src_conf->{snapshots}->{$snapname});
- my $value = $src_conf->{$opt};
+ my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf;
- if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
- my $mp = $opt eq 'rootfs' ?
- PVE::LXC::Config->parse_ct_rootfs($value) :
- PVE::LXC::Config->parse_ct_mountpoint($value);
+ my $sharedvm = 1;
+ for my $opt (sort keys %$src_conf) {
+ next if $opt =~ m/^unused\d+$/;
- if ($mp->{type} eq 'volume') {
- my $volid = $mp->{volume};
+ my $value = $src_conf->{$opt};
- my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- $sid = $storage if defined($storage);
- my $scfg = PVE::Storage::storage_config($storecfg, $sid);
- if (!$scfg->{shared}) {
- $sharedvm = 0;
- warn "found non-shared volume: $volid\n" if $target;
- }
+ if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
+ my $mp = PVE::LXC::Config->parse_volume($opt, $value);
- $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ if ($mp->{type} eq 'volume') {
+ my $volid = $mp->{volume};
- if ($full) {
- die "Cannot do full clones on a running container without snapshots\n"
- if $running && !defined($snapname);
- $fullclone->{$opt} = 1;
- } else {
- # not full means clone instead of copy
- die "Linked clone feature for '$volid' is not available\n"
- if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running);
- }
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ $sid = $storage if defined($storage);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ if (!$scfg->{shared}) {
+ $sharedvm = 0;
+ warn "found non-shared volume: $volid\n" if $target;
+ }
- $mountpoints->{$opt} = $mp;
- push @$vollist, $volid;
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ if ($full) {
+ die "Cannot do full clones on a running container without snapshots\n"
+ if $running && !defined($snapname);
+ $fullclone->{$opt} = 1;
} else {
- # TODO: allow bind mounts?
- die "unable to clone mountpint '$opt' (type $mp->{type})\n";
+ # not full means clone instead of copy
+ die "Linked clone feature for '$volid' is not available\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running, {'valid_target_formats' => ['raw', 'subvol']});
}
- } elsif ($opt =~ m/^net(\d+)$/) {
- # always change MAC! address
- my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
- my $net = PVE::LXC::Config->parse_lxc_network($value);
- $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
- $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net);
+
+ $mountpoints->{$opt} = $mp;
+ push @$vollist, $volid;
+
} else {
- # copy everything else
- $newconf->{$opt} = $value;
+ # TODO: allow bind mounts?
+ die "unable to clone mountpoint '$opt' (type $mp->{type})\n";
}
+ } elsif ($opt =~ m/^net(\d+)$/) {
+ # always change MAC! address
+ my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
+ my $net = PVE::LXC::Config->parse_lxc_network($value);
+ $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
+ $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net);
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
}
- die "can't clone CT to node '$target' (CT uses local storage)\n"
- if $target && !$sharedvm;
+ }
+ die "can't clone CT to node '$target' (CT uses local storage)\n"
+ if $target && !$sharedvm;
- # Replace the 'disk' lock with a 'create' lock.
- $newconf->{lock} = 'create';
+ # Replace the 'disk' lock with a 'create' lock.
+ $newconf->{lock} = 'create';
- delete $newconf->{template};
- if ($param->{hostname}) {
- $newconf->{hostname} = $param->{hostname};
- }
+ delete $newconf->{snapshots};
+ delete $newconf->{pending};
+ delete $newconf->{template};
+ if ($param->{hostname}) {
+ $newconf->{hostname} = $param->{hostname};
+ }
- if ($param->{description}) {
- $newconf->{description} = $param->{description};
- }
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
- # create empty/temp config - this fails if CT already exists on other node
+ $lock_and_reload->($newid, sub {
PVE::LXC::Config->write_config($newid, $newconf);
+ });
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
+ warn "Failed to remove source CT config lock - $@\n" if $@;
+
+ eval {
+ $lock_and_reload->($newid, sub {
+ PVE::LXC::Config->destroy_config($newid);
+ PVE::Firewall::remove_vmfw_conf($newid);
+ });
};
- if (my $err = $@) {
- eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
- warn $@ if $@;
- die $err;
- }
- });
+ warn "Failed to remove target CT config - $@\n" if $@;
+
+ die $err;
+ }
my $update_conf = sub {
my ($key, $value) = @_;
- return PVE::LXC::Config->lock_config($newid, sub {
- my $conf = PVE::LXC::Config->load_config($newid);
- die "Lost 'create' config lock, aborting.\n"
- if !PVE::LXC::Config->has_lock($conf, 'create');
+ return $lock_and_reload->($newid, sub {
+ my $conf = shift;
$conf->{$key} = $value;
PVE::LXC::Config->write_config($newid, $conf);
});
local $SIG{HUP} = sub { die "interrupted by signal\n"; };
PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+ my $bwlimit = extract_param($param, 'bwlimit');
foreach my $opt (keys %$mountpoints) {
my $mp = $mountpoints->{$opt};
my $newvolid;
if ($fullclone->{$opt}) {
print "create full clone of mountpoint $opt ($volid)\n";
- my $target_storage = $storage // PVE::Storage::parse_volume_id($volid);
- $newvolid = PVE::LXC::copy_volume($mp, $newid, $target_storage, $storecfg, $newconf, $snapname);
+ my $source_storage = PVE::Storage::parse_volume_id($volid);
+ my $target_storage = $storage // $source_storage;
+ my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', [$source_storage, $target_storage], $bwlimit);
+ $newvolid = PVE::LXC::copy_volume($mp, $newid, $target_storage, $storecfg, $newconf, $snapname, $clonelimit);
} else {
print "create linked clone of mount point $opt ($volid)\n";
$newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname);
}
PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
- PVE::LXC::Config->remove_lock($newid, 'create');
- if ($target) {
- # always deactivate volumes - avoid lvm LVs to be active on several nodes
- PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
- PVE::Storage::deactivate_volumes($storecfg, $newvollist);
-
- my $newconffile = PVE::LXC::Config->config_file($newid, $target);
- die "Failed to move config to node '$target' - rename failed: $!\n"
- if !rename($conffile, $newconffile);
- }
+ $lock_and_reload->($newid, sub {
+ my $conf = shift;
+ my $rootdir = PVE::LXC::mount_all($newid, $storecfg, $conf, 1);
+ eval {
+ my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
+ $lxc_setup->post_clone_hook($conf);
+ };
+ my $err = $@;
+ eval { PVE::LXC::umount_all($newid, $storecfg, $conf, 1); };
+ if ($err) {
+ warn "$@\n" if $@;
+ die $err;
+ } else {
+ die $@ if $@;
+ }
+ });
};
my $err = $@;
-
# Unlock the source config in any case:
eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
warn $@ if $@;
if ($err) {
# Now cleanup the config & disks:
- unlink $conffile;
-
sleep 1; # some storages like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
eval { PVE::Storage::vdisk_free($storecfg, $volid); };
warn $@ if $@;
}
+
+ eval {
+ $lock_and_reload->($newid, sub {
+ PVE::LXC::Config->destroy_config($newid);
+ PVE::Firewall::remove_vmfw_conf($newid);
+ });
+ };
+ warn "Failed to remove target CT config - $@\n" if $@;
+
die "clone failed: $err";
}
+ $lock_and_reload->($newid, sub {
+ PVE::LXC::Config->remove_lock($newid, 'create');
+
+ if ($target) {
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
+ PVE::Storage::deactivate_volumes($storecfg, $newvollist);
+
+ PVE::LXC::Config->move_config_to_node($newid, $target);
+ }
+ });
+
return;
};
- PVE::Firewall::clone_vmfw_conf($vmid, $newid);
return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd);
}});
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::LXC::Config->mountpoint_names()],
+ enum => [PVE::LXC::Config->valid_volume_keys()],
},
size => {
type => 'string',
my $running = PVE::LXC::check_running($vmid);
my $disk = $param->{disk};
- my $mp = $disk eq 'rootfs' ? PVE::LXC::Config->parse_ct_rootfs($conf->{$disk}) :
- PVE::LXC::Config->parse_ct_mountpoint($conf->{$disk});
+ my $mp = PVE::LXC::Config->parse_volume($disk, $conf->{$disk});
my $volid = $mp->{volume};
PVE::Storage::activate_volumes($storage_cfg, [$volid]);
my $size = PVE::Storage::volume_size_info($storage_cfg, $volid, 5);
+
+ die "Could not determine current size of volume '$volid'\n" if !defined($size);
+
$newsize += $size if $ext;
$newsize = int($newsize);
die "unable to shrink disk size\n" if $newsize < $size;
- return if $size == $newsize;
+ die "disk is already at specified size\n" if $size == $newsize;
PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr");
my $realcmd = sub {
vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
volume => {
type => 'string',
- enum => [ PVE::LXC::Config->mountpoint_names() ],
+ enum => [ PVE::LXC::Config->valid_volume_keys() ],
description => "Volume which will be moved.",
},
storage => get_standard_option('pve-storage-id', {
description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
maxLength => 40,
optional => 1,
- }
+ },
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'number',
+ minimum => '0',
+ default => 'clone limit from datacenter or storage config',
+ },
},
},
returns => {
die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid);
- if ($mpkey eq 'rootfs') {
- $mpdata = PVE::LXC::Config->parse_ct_rootfs($conf->{$mpkey});
- } elsif ($mpkey =~ m/mp\d+/) {
- $mpdata = PVE::LXC::Config->parse_ct_mountpoint($conf->{$mpkey});
- } else {
- die "Can't parse $mpkey\n";
- }
+ $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey});
$old_volid = $mpdata->{volume};
die "you can't move a volume with snapshots and delete the source\n"
eval {
PVE::Storage::activate_volumes($storage_cfg, [ $old_volid ]);
- $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf);
- $mpdata->{volume} = $new_volid;
+ my $bwlimit = extract_param($param, 'bwlimit');
+ my $source_storage = PVE::Storage::parse_volume_id($old_volid);
+ my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$source_storage, $storage], $bwlimit);
+ $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf, undef, $movelimit);
+ if (PVE::LXC::Config->is_template($conf)) {
+ PVE::Storage::activate_volumes($storage_cfg, [ $new_volid ]);
+ my $template_volid = PVE::Storage::vdisk_create_base($storage_cfg, $new_volid);
+ $mpdata->{volume} = $template_volid;
+ } else {
+ $mpdata->{volume} = $new_volid;
+ }
PVE::LXC::Config->lock_config($vmid, sub {
my $digest = $conf->{digest};
PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]);
PVE::Storage::vdisk_free($storage_cfg, $old_volid);
};
- warn $@ if $@;
+ if (my $err = $@) {
+ warn $err;
+ PVE::LXC::Config->lock_config($vmid, sub {
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->add_unused_volume($conf, $old_volid);
+ PVE::LXC::Config->write_config($vmid, $conf);
+ });
+ }
}
};
my $err = $@;
return $task;
}});
+__PACKAGE__->register_method({
+ name => 'vm_pending',
+ path => '{vmid}/pending',
+ method => 'GET',
+ proxyto => 'node',
+ description => 'Get container configuration, including pending changes.',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ },
+ },
+ returns => {
+ type => "array",
+ items => {
+ type => "object",
+ properties => {
+ key => {
+ description => 'Configuration option name.',
+ type => 'string',
+ },
+ value => {
+ description => 'Current value.',
+ type => 'string',
+ optional => 1,
+ },
+ pending => {
+ description => 'Pending value.',
+ type => 'string',
+ optional => 1,
+ },
+ delete => {
+ description => "Indicates a pending delete request if present and not 0.",
+ type => 'integer',
+ minimum => 0,
+ maximum => 2,
+ optional => 1,
+ },
+ },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $conf = PVE::LXC::Config->load_config($param->{vmid});
+
+ my $pending_delete_hash = PVE::LXC::Config->parse_pending_delete($conf->{pending}->{delete});
+
+ return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
+ }});
+
1;