X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=src%2FPVE%2FAPI2%2FLXC.pm;h=61eaaf7c24df37d6650989549924fed69a89efee;hb=e90ddc4c6a998ee22ba217e1ef97bd3d82a12aba;hp=6de121f09b19960dff60b28145c7d0670f5d8116;hpb=5ec3949cc1a78c724a91a30547d6ba11bfb294f0;p=pve-container.git diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm index 6de121f..61eaaf7 100644 --- a/src/PVE/API2/LXC.pm +++ b/src/PVE/API2/LXC.pm @@ -8,6 +8,8 @@ use PVE::Tools qw(extract_param run_command); use PVE::Exception qw(raise raise_param_exc raise_perm_exc); use PVE::INotify; use PVE::Cluster qw(cfs_read_file); +use PVE::RRD; +use PVE::DataCenterConfig; use PVE::AccessControl; use PVE::Firewall; use PVE::Storage; @@ -18,6 +20,7 @@ use PVE::LXC; use PVE::LXC::Create; use PVE::LXC::Migrate; use PVE::GuestHelpers; +use PVE::VZDump::Plugin; use PVE::API2::LXC::Config; use PVE::API2::LXC::Status; use PVE::API2::LXC::Snapshot; @@ -33,9 +36,21 @@ BEGIN { } } +my $check_storage_access_migrate = sub { + my ($rpcenv, $authuser, $storecfg, $storage, $node) = @_; + + PVE::Storage::storage_check_enabled($storecfg, $storage, $node); + + $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']); + + my $scfg = PVE::Storage::storage_config($storecfg, $storage); + die "storage '$storage' does not support CT rootdirs\n" + if !$scfg->{content}->{rootdir}; +}; + __PACKAGE__->register_method ({ subclass => "PVE::API2::LXC::Config", - path => '{vmid}/config', + path => '{vmid}/config', }); __PACKAGE__->register_method ({ @@ -144,6 +159,12 @@ __PACKAGE__->register_method({ type => 'boolean', description => "Mark this as restore task.", }, + unique => { + optional => 1, + type => 'boolean', + description => "Assign a unique random ethernet address.", + requires => 'restore', + }, pool => { optional => 1, type => 'string', format => 'pve-poolid', @@ -161,10 +182,11 @@ __PACKAGE__->register_method({ "OpenSSH format).", }, bwlimit => { - description => "Override i/o bandwidth limit (in KiB/s).", + description => "Override I/O bandwidth limit (in KiB/s).", optional => 1, type => 'number', minimum => '0', + default => 'restore limit from datacenter or storage config', }, start => { optional => 1, @@ -197,11 +219,15 @@ __PACKAGE__->register_method({ # 'unprivileged' is read-only, so we can't pass it to update_pct_config my $unprivileged = extract_param($param, 'unprivileged'); my $restore = extract_param($param, 'restore'); + my $unique = extract_param($param, 'unique'); + + # used to skip firewall config restore if user lacks permission + my $skip_fw_config_restore = 0; if ($restore) { # fixme: limit allowed parameters } - + my $force = extract_param($param, 'force'); if (!($same_container_exists && $restore && $force)) { @@ -229,6 +255,17 @@ __PACKAGE__->register_method({ } elsif ($restore && $force && $same_container_exists && $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) { # OK: user has VM.Backup permissions, and want to restore an existing VM + + # we don't want to restore a container-provided FW conf in this case + # since the user is lacking permission to configure the container's FW + $skip_fw_config_restore = 1; + + # error out if a user tries to change from unprivileged to privileged + # explicit change is checked here, implicit is checked down below or happening in root-only paths + my $conf = PVE::LXC::Config->load_config($vmid); + if ($conf->{unprivileged} && defined($unprivileged) && !$unprivileged) { + raise_perm_exc("cannot change from unprivileged to privileged without VM.Allocate"); + } } else { raise_perm_exc(); } @@ -236,28 +273,28 @@ __PACKAGE__->register_method({ my $ostemplate = extract_param($param, 'ostemplate'); my $storage = extract_param($param, 'storage') // 'local'; - PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []); + PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, undef, $param, [], $unprivileged); my $storage_cfg = cfs_read_file("storage.cfg"); my $archive; if ($ostemplate eq '-') { - die "pipe requires cli environment\n" - if $rpcenv->{type} ne 'cli'; - die "pipe can only be used with restore tasks\n" + die "pipe requires cli environment\n" + if $rpcenv->{type} ne 'cli'; + die "pipe can only be used with restore tasks\n" if !$restore; $archive = '-'; die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs}); } else { PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate); - $archive = PVE::Storage::abs_filesystem_path($storage_cfg, $ostemplate); + $archive = $ostemplate; } my %used_storages; my $check_and_activate_storage = sub { my ($sid) = @_; - my $scfg = PVE::Storage::storage_check_node($storage_cfg, $sid, $node); + my $scfg = PVE::Storage::storage_check_enabled($storage_cfg, $sid, $node); raise_param_exc({ storage => "storage '$sid' does not support container directories"}) if !$scfg->{content}->{rootdir}; @@ -298,7 +335,7 @@ __PACKAGE__->register_method({ # check storage access, activate storage my $delayed_mp_param = {}; - PVE::LXC::Config->foreach_mountpoint($mp_param, sub { + PVE::LXC::Config->foreach_volume($mp_param, sub { my ($ms, $mountpoint) = @_; my $volid = $mountpoint->{volume}; @@ -325,33 +362,48 @@ __PACKAGE__->register_method({ eval { PVE::LXC::Config->create_and_lock_config($vmid, $force) }; die "$emsg $@" if $@; + my $remove_lock = 1; + my $code = sub { my $old_conf = PVE::LXC::Config->load_config($vmid); + my $was_template; - my $vollist = []; eval { my $orig_mp_param; # only used if $restore if ($restore) { die "can't overwrite running container\n" if PVE::LXC::check_running($vmid); - if ($is_root && $archive ne '-') { + if ($archive ne '-') { my $orig_conf; - ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($archive); - # When we're root call 'restore_configuration' with ristricted=0, + print "recovering backed-up configuration from '$archive'\n"; + ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid); + + $was_template = delete $orig_conf->{template}; + + # When we're root call 'restore_configuration' with restricted=0, # causing it to restore the raw lxc entries, among which there may be # 'lxc.idmap' entries. We need to make sure that the extracted contents # of the container match up with the restored configuration afterwards: - $conf->{lxc} = [grep { $_->[0] eq 'lxc.idmap' } @{$orig_conf->{lxc}}]; + $conf->{lxc} = $orig_conf->{lxc} if $is_root; + + $conf->{unprivileged} = $orig_conf->{unprivileged} + if !defined($unprivileged) && defined($orig_conf->{unprivileged}); + + # implicit privileged change is checked here + if ($old_conf->{unprivileged} && !$conf->{unprivileged}) { + $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Allocate']); + } } } if ($storage_only_mode) { if ($restore) { if (!defined($orig_mp_param)) { - (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($archive); + print "recovering backed-up configuration from '$archive'\n"; + (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid); } $mp_param = $orig_mp_param; die "rootfs configuration could not be recovered, please check and specify manually!\n" if !defined($mp_param->{rootfs}); - PVE::LXC::Config->foreach_mountpoint($mp_param, sub { + PVE::LXC::Config->foreach_volume($mp_param, sub { my ($ms, $mountpoint) = @_; my $type = $mountpoint->{type}; if ($type eq 'volume') { @@ -382,7 +434,14 @@ __PACKAGE__->register_method({ $mp_param->{rootfs} = "$storage:4"; # defaults to 4GB } } + }; + die "$emsg $@" if $@; + + # up until here we did not modify the container, besides the lock + $remove_lock = 0; + my $vollist = []; + eval { $vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf); # we always have the 'create' lock so check for more than 1 entry @@ -394,10 +453,15 @@ __PACKAGE__->register_method({ eval { my $rootdir = PVE::LXC::mount_all($vmid, $storage_cfg, $conf, 1); $bwlimit = PVE::Storage::get_bandwidth_limit('restore', [keys %used_storages], $bwlimit); - PVE::LXC::Create::restore_archive($archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit); + print "restoring '$archive' now..\n" + if $restore && $archive ne '-'; + PVE::LXC::Create::restore_archive($storage_cfg, $archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit); if ($restore) { - PVE::LXC::Create::restore_configuration($vmid, $rootdir, $conf, !$is_root); + print "merging backed-up and given configuration..\n"; + PVE::LXC::Create::restore_configuration($vmid, $storage_cfg, $archive, $rootdir, $conf, !$is_root, $unique, $skip_fw_config_restore); + my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); + $lxc_setup->template_fixup($conf); } else { my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); # detect OS PVE::LXC::Config->write_config($vmid, $conf); # safe config (after OS detection) @@ -415,11 +479,17 @@ __PACKAGE__->register_method({ foreach my $mp (keys %$delayed_mp_param) { $conf->{$mp} = $delayed_mp_param->{$mp}; } + # If the template flag was set, we try to convert again to template after restore + if ($was_template) { + print STDERR "Convert restored container to template...\n"; + PVE::LXC::template_create($vmid, $conf); + $conf->{template} = 1; + } PVE::LXC::Config->write_config($vmid, $conf); }; if (my $err = $@) { PVE::LXC::destroy_disks($storage_cfg, $vollist); - eval { PVE::LXC::destroy_config($vmid) }; + eval { PVE::LXC::Config->destroy_config($vmid) }; warn $@ if $@; die "$emsg $err"; } @@ -430,7 +500,18 @@ __PACKAGE__->register_method({ }; my $workername = $restore ? 'vzrestore' : 'vzcreate'; - my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); }; + my $realcmd = sub { + eval { + PVE::LXC::Config->lock_config($vmid, $code); + }; + if (my $err = $@) { + # if we aborted before changing the container, we must remove the create lock + if ($remove_lock) { + PVE::LXC::Config->remove_lock($vmid, 'create'); + } + die $err; + } + }; return $rpcenv->fork_worker($workername, $vmid, $authuser, $realcmd); }}); @@ -469,6 +550,7 @@ __PACKAGE__->register_method({ my $res = [ { subdir => 'config' }, + { subdir => 'pending' }, { subdir => 'status' }, { subdir => 'vncproxy' }, { subdir => 'termproxy' }, @@ -528,7 +610,7 @@ __PACKAGE__->register_method({ code => sub { my ($param) = @_; - return PVE::Cluster::create_rrd_graph( + return PVE::RRD::create_rrd_graph( "pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{ds}, $param->{cf}); @@ -571,7 +653,7 @@ __PACKAGE__->register_method({ code => sub { my ($param) = @_; - return PVE::Cluster::create_rrd_data( + return PVE::RRD::create_rrd_data( "pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf}); }}); @@ -590,6 +672,26 @@ __PACKAGE__->register_method({ properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }), + force => { + type => 'boolean', + description => "Force destroy, even if running.", + default => 0, + optional => 1, + }, + purge => { + type => 'boolean', + description => "Remove container from all related configurations." + ." For example, backup jobs, replication jobs or HA." + ." Related ACLs and Firewall entries will *always* be removed.", + default => 0, + optional => 1, + }, + 'destroy-unreferenced-disks' => { + type => 'boolean', + description => "If set, destroy additionally all disks with the VMID from all" + ." enabled storages which are not referenced in the config.", + optional => 1, + }, }, }, returns => { @@ -599,43 +701,79 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); - my $vmid = $param->{vmid}; # test if container exists + my $conf = PVE::LXC::Config->load_config($vmid); + my $early_checks = sub { + my ($conf) = @_; + PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid"); + PVE::LXC::Config->check_lock($conf); - my $storage_cfg = cfs_read_file("storage.cfg"); + my $ha_managed = PVE::HA::Config::service_is_configured("ct:$vmid"); - PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid"); + if (!$param->{purge}) { + die "unable to remove CT $vmid - used in HA resources and purge parameter not set.\n" + if $ha_managed; - die "unable to remove CT $vmid - used in HA resources\n" - if PVE::HA::Config::vm_is_ha_managed($vmid); + # do not allow destroy if there are replication jobs without purge + my $repl_conf = PVE::ReplicationConfig->new(); + $repl_conf->check_for_existing_jobs($vmid); + } + + return $ha_managed; + }; - # do not allow destroy if there are replication jobs - my $repl_conf = PVE::ReplicationConfig->new(); - $repl_conf->check_for_existing_jobs($vmid); + $early_checks->($conf); my $running_error_msg = "unable to destroy CT $vmid - container is running\n"; - - die $running_error_msg if PVE::LXC::check_running($vmid); # check early + die $running_error_msg if !$param->{force} && PVE::LXC::check_running($vmid); # check early my $code = sub { # reload config after lock $conf = PVE::LXC::Config->load_config($vmid); - PVE::LXC::Config->check_lock($conf); + my $ha_managed = $early_checks->($conf); + + if (PVE::LXC::check_running($vmid)) { + die $running_error_msg if !$param->{force}; + warn "forced to stop CT $vmid before destroying!\n"; + if (!$ha_managed) { + PVE::LXC::vm_stop($vmid, 1); + } else { + run_command(['ha-manager', 'crm-command', 'stop', "ct:$vmid", '120']); + } + } - die $running_error_msg if PVE::LXC::check_running($vmid); + my $storage_cfg = cfs_read_file("storage.cfg"); + PVE::LXC::destroy_lxc_container( + $storage_cfg, + $vmid, + $conf, + { lock => 'destroyed' }, + $param->{'destroy-unreferenced-disks'}, + ); - PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $conf); PVE::AccessControl::remove_vm_access($vmid); PVE::Firewall::remove_vmfw_conf($vmid); + if ($param->{purge}) { + print "purging CT $vmid from related configurations..\n"; + PVE::ReplicationConfig::remove_vmid_jobs($vmid); + PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid); + + if ($ha_managed) { + PVE::HA::Config::delete_service_from_config("ct:$vmid"); + print "NOTE: removed CT $vmid from HA resource configuration.\n"; + } + } + + # only now remove the zombie config, else we can have reuse race + PVE::LXC::Config->destroy_config($vmid); }; my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); }; - + return $rpcenv->fork_worker('vzdestroy', $vmid, $authuser, $realcmd); }}); @@ -965,6 +1103,7 @@ __PACKAGE__->register_method({ description => "Target node.", completion => \&PVE::Cluster::complete_migration_target, }), + 'target-storage' => get_standard_option('pve-targetstorage'), online => { type => 'boolean', description => "Use online/live migration.", @@ -981,11 +1120,12 @@ __PACKAGE__->register_method({ optional => 1, default => 180, }, - force => { - type => 'boolean', - description => "Force migration despite local bind / device" . - " mounts. NOTE: deprecated, use 'shared' property of mount point instead.", + bwlimit => { + description => "Override I/O bandwidth limit (in KiB/s).", optional => 1, + type => 'number', + minimum => '0', + default => 'migrate limit from datacenter or storage config', }, }, }, @@ -1022,6 +1162,25 @@ __PACKAGE__->register_method({ if !$param->{online} && !$param->{restart}; } + if (my $targetstorage = delete $param->{'target-storage'}) { + my $storecfg = PVE::Storage::config(); + my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ targetstorage => "failed to parse storage map: $@" }) + if $@; + + $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']) + if !defined($storagemap->{identity}); + + foreach my $target_sid (values %{$storagemap->{entries}}) { + $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $target_sid, $target); + } + + $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storagemap->{default}, $target) + if $storagemap->{default}; + + $param->{storagemap} = $storagemap; + } + if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { my $hacmd = sub { @@ -1162,15 +1321,6 @@ __PACKAGE__->register_method({ die "you can't convert a CT to template if the CT is running\n" if PVE::LXC::check_running($vmid); - my $scfg = PVE::Storage::config(); - PVE::LXC::Config->foreach_mountpoint($conf, sub { - my ($ms, $mp) = @_; - - my ($sid) =PVE::Storage::parse_volume_id($mp->{volume}, 0); - die "Directory storage '$sid' does not support container templates!\n" - if $scfg->{ids}->{$sid}->{path}; - }); - my $realcmd = sub { PVE::LXC::template_create($vmid, $conf); @@ -1250,6 +1400,13 @@ __PACKAGE__->register_method({ description => "Target node. Only allowed if the original VM is on shared storage.", optional => 1, }), + bwlimit => { + description => "Override I/O bandwidth limit (in KiB/s).", + optional => 1, + type => 'number', + minimum => '0', + default => 'clone limit from datacenter or storage config', + }, }, }, returns => { @@ -1259,30 +1416,21 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); - my $newid = extract_param($param, 'newid'); - my $pool = extract_param($param, 'pool'); - if (defined($pool)) { $rpcenv->check_pool_exist($pool); } - my $snapname = extract_param($param, 'snapname'); - my $storage = extract_param($param, 'storage'); - my $target = extract_param($param, 'target'); - my $localnode = PVE::INotify::nodename(); - undef $target if $target && ($target eq $localnode || $target eq 'localhost'); + undef $target if $target && ($target eq $localnode || $target eq 'localhost'); PVE::Cluster::check_node_exists($target) if $target; @@ -1293,7 +1441,7 @@ __PACKAGE__->register_method({ PVE::Storage::storage_check_enabled($storecfg, $storage); if ($target) { # check if storage is available on target node - PVE::Storage::storage_check_node($storecfg, $storage, $target); + PVE::Storage::storage_check_enabled($storecfg, $storage, $target); # clone only works if target storage is shared my $scfg = PVE::Storage::storage_config($storecfg, $storage); die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared}; @@ -1302,118 +1450,132 @@ __PACKAGE__->register_method({ PVE::Cluster::check_cfs_quorum(); - my $conffile; my $newconf = {}; my $mountpoints = {}; my $fullclone = {}; my $vollist = []; my $running; - PVE::LXC::Config->lock_config($vmid, sub { - my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk'); + PVE::LXC::Config->create_and_lock_config($newid, 0); + PVE::Firewall::clone_vmfw_conf($vmid, $newid); - $running = PVE::LXC::check_running($vmid) || 0; + my $lock_and_reload = sub { + my ($vmid, $code) = @_; + return PVE::LXC::Config->lock_config($vmid, sub { + my $conf = PVE::LXC::Config->load_config($vmid); + die "Lost 'create' config lock, aborting.\n" + if !PVE::LXC::Config->has_lock($conf, 'create'); - my $full = extract_param($param, 'full'); - if (!defined($full)) { - $full = !PVE::LXC::Config->is_template($src_conf); - } - die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full; + return $code->($conf); + }); + }; - eval { - die "snapshot '$snapname' does not exist\n" - if $snapname && !defined($src_conf->{snapshots}->{$snapname}); + my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk'); + $running = PVE::LXC::check_running($vmid) || 0; - my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf; + my $full = extract_param($param, 'full'); + if (!defined($full)) { + $full = !PVE::LXC::Config->is_template($src_conf); + } - $conffile = PVE::LXC::Config->config_file($newid); - die "unable to create CT $newid: config file already exists\n" - if -f $conffile; + eval { + die "parameter 'storage' not allowed for linked clones\n" + if defined($storage) && !$full; - my $sharedvm = 1; - foreach my $opt (keys %$src_conf) { - next if $opt =~ m/^unused\d+$/; + die "snapshot '$snapname' does not exist\n" + if $snapname && !defined($src_conf->{snapshots}->{$snapname}); - my $value = $src_conf->{$opt}; + my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf; - if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { - my $mp = $opt eq 'rootfs' ? - PVE::LXC::Config->parse_ct_rootfs($value) : - PVE::LXC::Config->parse_ct_mountpoint($value); + my $sharedvm = 1; + for my $opt (sort keys %$src_conf) { + next if $opt =~ m/^unused\d+$/; - if ($mp->{type} eq 'volume') { - my $volid = $mp->{volume}; + my $value = $src_conf->{$opt}; - my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - $sid = $storage if defined($storage); - my $scfg = PVE::Storage::storage_config($storecfg, $sid); - if (!$scfg->{shared}) { - $sharedvm = 0; - warn "found non-shared volume: $volid\n" if $target; - } + if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { + my $mp = PVE::LXC::Config->parse_volume($opt, $value); - $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); + if ($mp->{type} eq 'volume') { + my $volid = $mp->{volume}; - if ($full) { - die "Cannot do full clones on a running container without snapshots\n" - if $running && !defined($snapname); - $fullclone->{$opt} = 1; - } else { - # not full means clone instead of copy - die "Linked clone feature for '$volid' is not available\n" - if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running); - } + my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); + $sid = $storage if defined($storage); + my $scfg = PVE::Storage::storage_config($storecfg, $sid); + if (!$scfg->{shared}) { + $sharedvm = 0; + warn "found non-shared volume: $volid\n" if $target; + } - $mountpoints->{$opt} = $mp; - push @$vollist, $volid; + $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); + if ($full) { + die "Cannot do full clones on a running container without snapshots\n" + if $running && !defined($snapname); + $fullclone->{$opt} = 1; } else { - # TODO: allow bind mounts? - die "unable to clone mountpint '$opt' (type $mp->{type})\n"; + # not full means clone instead of copy + die "Linked clone feature for '$volid' is not available\n" + if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running, {'valid_target_formats' => ['raw', 'subvol']}); } - } elsif ($opt =~ m/^net(\d+)$/) { - # always change MAC! address - my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg'); - my $net = PVE::LXC::Config->parse_lxc_network($value); - $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix}); - $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net); + + $mountpoints->{$opt} = $mp; + push @$vollist, $volid; + } else { - # copy everything else - $newconf->{$opt} = $value; + # TODO: allow bind mounts? + die "unable to clone mountpoint '$opt' (type $mp->{type})\n"; } + } elsif ($opt =~ m/^net(\d+)$/) { + # always change MAC! address + my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg'); + my $net = PVE::LXC::Config->parse_lxc_network($value); + $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix}); + $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net); + } else { + # copy everything else + $newconf->{$opt} = $value; } - die "can't clone CT to node '$target' (CT uses local storage)\n" - if $target && !$sharedvm; + } + die "can't clone CT to node '$target' (CT uses local storage)\n" + if $target && !$sharedvm; - # Replace the 'disk' lock with a 'create' lock. - $newconf->{lock} = 'create'; + # Replace the 'disk' lock with a 'create' lock. + $newconf->{lock} = 'create'; - delete $newconf->{template}; - if ($param->{hostname}) { - $newconf->{hostname} = $param->{hostname}; - } + # delete all snapshot related config options + delete $newconf->@{qw(snapshots parent snaptime snapstate)}; - if ($param->{description}) { - $newconf->{description} = $param->{description}; - } + delete $newconf->{pending}; + delete $newconf->{template}; - # create empty/temp config - this fails if CT already exists on other node + $newconf->{hostname} = $param->{hostname} if $param->{hostname}; + $newconf->{description} = $param->{description} if $param->{description}; + + $lock_and_reload->($newid, sub { PVE::LXC::Config->write_config($newid, $newconf); + }); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn "Failed to remove source CT config lock - $@\n" if $@; + + eval { + $lock_and_reload->($newid, sub { + PVE::LXC::Config->destroy_config($newid); + PVE::Firewall::remove_vmfw_conf($newid); + }); }; - if (my $err = $@) { - eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; - warn $@ if $@; - die $err; - } - }); + warn "Failed to remove target CT config - $@\n" if $@; + + die $err; + } my $update_conf = sub { my ($key, $value) = @_; - return PVE::LXC::Config->lock_config($newid, sub { - my $conf = PVE::LXC::Config->load_config($newid); - die "Lost 'create' config lock, aborting.\n" - if !PVE::LXC::Config->has_lock($conf, 'create'); + return $lock_and_reload->($newid, sub { + my $conf = shift; $conf->{$key} = $value; PVE::LXC::Config->write_config($newid, $conf); }); @@ -1434,6 +1596,7 @@ __PACKAGE__->register_method({ local $SIG{HUP} = sub { die "interrupted by signal\n"; }; PVE::Storage::activate_volumes($storecfg, $vollist, $snapname); + my $bwlimit = extract_param($param, 'bwlimit'); foreach my $opt (keys %$mountpoints) { my $mp = $mountpoints->{$opt}; @@ -1442,8 +1605,10 @@ __PACKAGE__->register_method({ my $newvolid; if ($fullclone->{$opt}) { print "create full clone of mountpoint $opt ($volid)\n"; - my $target_storage = $storage // PVE::Storage::parse_volume_id($volid); - $newvolid = PVE::LXC::copy_volume($mp, $newid, $target_storage, $storecfg, $newconf, $snapname); + my $source_storage = PVE::Storage::parse_volume_id($volid); + my $target_storage = $storage // $source_storage; + my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', [$source_storage, $target_storage], $bwlimit); + $newvolid = PVE::LXC::copy_volume($mp, $newid, $target_storage, $storecfg, $newconf, $snapname, $clonelimit); } else { print "create linked clone of mount point $opt ($volid)\n"; $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname); @@ -1456,41 +1621,64 @@ __PACKAGE__->register_method({ } PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; - PVE::LXC::Config->remove_lock($newid, 'create'); - if ($target) { - # always deactivate volumes - avoid lvm LVs to be active on several nodes - PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running; - PVE::Storage::deactivate_volumes($storecfg, $newvollist); - - my $newconffile = PVE::LXC::Config->config_file($newid, $target); - die "Failed to move config to node '$target' - rename failed: $!\n" - if !rename($conffile, $newconffile); - } + $lock_and_reload->($newid, sub { + my $conf = shift; + my $rootdir = PVE::LXC::mount_all($newid, $storecfg, $conf, 1); + eval { + my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); + $lxc_setup->post_clone_hook($conf); + }; + my $err = $@; + eval { PVE::LXC::umount_all($newid, $storecfg, $conf, 1); }; + if ($err) { + warn "$@\n" if $@; + die $err; + } else { + die $@ if $@; + } + }); }; my $err = $@; - # Unlock the source config in any case: eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; warn $@ if $@; if ($err) { # Now cleanup the config & disks: - unlink $conffile; - sleep 1; # some storages like rbd need to wait before release volume - really? foreach my $volid (@$newvollist) { eval { PVE::Storage::vdisk_free($storecfg, $volid); }; warn $@ if $@; } + + eval { + $lock_and_reload->($newid, sub { + PVE::LXC::Config->destroy_config($newid); + PVE::Firewall::remove_vmfw_conf($newid); + }); + }; + warn "Failed to remove target CT config - $@\n" if $@; + die "clone failed: $err"; } + $lock_and_reload->($newid, sub { + PVE::LXC::Config->remove_lock($newid, 'create'); + + if ($target) { + # always deactivate volumes - avoid lvm LVs to be active on several nodes + PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running; + PVE::Storage::deactivate_volumes($storecfg, $newvollist); + + PVE::LXC::Config->move_config_to_node($newid, $target); + } + }); + return; }; - PVE::Firewall::clone_vmfw_conf($vmid, $newid); return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); }}); @@ -1513,7 +1701,7 @@ __PACKAGE__->register_method({ disk => { type => 'string', description => "The disk you want to resize.", - enum => [PVE::LXC::Config->mountpoint_names()], + enum => [PVE::LXC::Config->valid_volume_keys()], }, size => { type => 'string', @@ -1552,8 +1740,6 @@ __PACKAGE__->register_method({ die "no options specified\n" if !scalar(keys %$param); - PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $param, []); - my $storage_cfg = cfs_read_file("storage.cfg"); my $code = sub { @@ -1561,13 +1747,14 @@ __PACKAGE__->register_method({ my $conf = PVE::LXC::Config->load_config($vmid); PVE::LXC::Config->check_lock($conf); + PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $conf, $param, [], $conf->{unprivileged}); + PVE::Tools::assert_if_modified($digest, $conf->{digest}); my $running = PVE::LXC::check_running($vmid); my $disk = $param->{disk}; - my $mp = $disk eq 'rootfs' ? PVE::LXC::Config->parse_ct_rootfs($conf->{$disk}) : - PVE::LXC::Config->parse_ct_mountpoint($conf->{$disk}); + my $mp = PVE::LXC::Config->parse_volume($disk, $conf->{$disk}); my $volid = $mp->{volume}; @@ -1587,12 +1774,15 @@ __PACKAGE__->register_method({ PVE::Storage::activate_volumes($storage_cfg, [$volid]); my $size = PVE::Storage::volume_size_info($storage_cfg, $volid, 5); + + die "Could not determine current size of volume '$volid'\n" if !defined($size); + $newsize += $size if $ext; $newsize = int($newsize); die "unable to shrink disk size\n" if $newsize < $size; - return if $size == $newsize; + die "disk is already at specified size\n" if $size == $newsize; PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr"); my $realcmd = sub { @@ -1655,42 +1845,70 @@ __PACKAGE__->register_method({ method => 'POST', protected => 1, proxyto => 'node', - description => "Move a rootfs-/mp-volume to a different storage", + description => "Move a rootfs-/mp-volume to a different storage or to a different container.", permissions => { description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " . - "and 'Datastore.AllocateSpace' permissions on the storage.", - check => - [ 'and', - ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], - ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]], - ], + "and 'Datastore.AllocateSpace' permissions on the storage. To move ". + "a volume to another container, you need the permissions on the ". + "target container as well.", + check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], }, parameters => { additionalProperties => 0, properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), + 'target-vmid' => get_standard_option('pve-vmid', { + completion => \&PVE::LXC::complete_ctid, + optional => 1, + }), volume => { type => 'string', - enum => [ PVE::LXC::Config->mountpoint_names() ], + #TODO: check how to handle unused mount points as the mp parameter is not configured + enum => [ PVE::LXC::Config->valid_volume_keys_with_unused() ], description => "Volume which will be moved.", }, storage => get_standard_option('pve-storage-id', { description => "Target Storage.", completion => \&PVE::Storage::complete_storage_enabled, + optional => 1, }), delete => { type => 'boolean', - description => "Delete the original volume after successful copy. By default the original is kept as an unused volume entry.", + description => "Delete the original volume after successful copy. By default the " . + "original is kept as an unused volume entry.", optional => 1, default => 0, }, digest => { type => 'string', - description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + description => 'Prevent changes if current configuration file has different SHA1 " . + "digest. This can be used to prevent concurrent modifications.', maxLength => 40, optional => 1, - } + }, + bwlimit => { + description => "Override I/O bandwidth limit (in KiB/s).", + optional => 1, + type => 'number', + minimum => '0', + default => 'clone limit from datacenter or storage config', + }, + 'target-volume' => { + type => 'string', + description => "The config key the volume will be moved to. Default is the " . + "source volume key.", + enum => [PVE::LXC::Config->valid_volume_keys_with_unused()], + optional => 1, + }, + 'target-digest' => { + type => 'string', + description => 'Prevent changes if current configuration file of the target " . + "container has a different SHA1 digest. This can be used to prevent " . + "concurrent modifications.', + maxLength => 40, + optional => 1, + }, }, }, returns => { @@ -1705,40 +1923,59 @@ __PACKAGE__->register_method({ my $vmid = extract_param($param, 'vmid'); + my $target_vmid = extract_param($param, 'target-vmid'); + my $storage = extract_param($param, 'storage'); my $mpkey = extract_param($param, 'volume'); + my $target_mpkey = extract_param($param, 'target-volume') // $mpkey; + + my $digest = extract_param($param, 'digest'); + + my $target_digest = extract_param($param, 'target-digest'); + my $lockname = 'disk'; my ($mpdata, $old_volid); - PVE::LXC::Config->lock_config($vmid, sub { - my $conf = PVE::LXC::Config->load_config($vmid); - PVE::LXC::Config->check_lock($conf); + die "either set storage or target-vmid, but not both\n" + if $storage && $target_vmid; - die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid); + my $storecfg = PVE::Storage::config(); - if ($mpkey eq 'rootfs') { - $mpdata = PVE::LXC::Config->parse_ct_rootfs($conf->{$mpkey}); - } elsif ($mpkey =~ m/mp\d+/) { - $mpdata = PVE::LXC::Config->parse_ct_mountpoint($conf->{$mpkey}); - } else { - die "Can't parse $mpkey\n"; - } - $old_volid = $mpdata->{volume}; + my $move_to_storage_checks = sub { + PVE::LXC::Config->lock_config($vmid, sub { + my $conf = PVE::LXC::Config->load_config($vmid); + PVE::LXC::Config->check_lock($conf); - die "you can't move a volume with snapshots and delete the source\n" - if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid); + die "cannot move volumes of a running container\n" + if PVE::LXC::check_running($vmid); - PVE::Tools::assert_if_modified($param->{digest}, $conf->{digest}); + if ($mpkey =~ m/^unused\d+$/) { + die "cannot move volume '$mpkey', only configured volumes can be moved to ". + "another storage\n"; + } - PVE::LXC::Config->set_lock($vmid, $lockname); - }); + $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey}); + $old_volid = $mpdata->{volume}; - my $realcmd = sub { + die "you can't move a volume with snapshots and delete the source\n" + if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid); + + PVE::Tools::assert_if_modified($digest, $conf->{digest}); + + PVE::LXC::Config->set_lock($vmid, $lockname); + }); + }; + + my $storage_realcmd = sub { eval { - PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: move --volume $mpkey --storage $storage"); + PVE::Cluster::log_msg( + 'info', + $authuser, + "move volume CT $vmid: move --volume $mpkey --storage $storage" + ); my $conf = PVE::LXC::Config->load_config($vmid); my $storage_cfg = PVE::Storage::config(); @@ -1747,15 +1984,39 @@ __PACKAGE__->register_method({ eval { PVE::Storage::activate_volumes($storage_cfg, [ $old_volid ]); - $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf); - $mpdata->{volume} = $new_volid; + my $bwlimit = extract_param($param, 'bwlimit'); + my $source_storage = PVE::Storage::parse_volume_id($old_volid); + my $movelimit = PVE::Storage::get_bandwidth_limit( + 'move', + [$source_storage, $storage], + $bwlimit + ); + $new_volid = PVE::LXC::copy_volume( + $mpdata, + $vmid, + $storage, + $storage_cfg, + $conf, + undef, + $movelimit + ); + if (PVE::LXC::Config->is_template($conf)) { + PVE::Storage::activate_volumes($storage_cfg, [ $new_volid ]); + my $template_volid = PVE::Storage::vdisk_create_base($storage_cfg, $new_volid); + $mpdata->{volume} = $template_volid; + } else { + $mpdata->{volume} = $new_volid; + } PVE::LXC::Config->lock_config($vmid, sub { my $digest = $conf->{digest}; $conf = PVE::LXC::Config->load_config($vmid); PVE::Tools::assert_if_modified($digest, $conf->{digest}); - $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint($mpdata, $mpkey eq 'rootfs'); + $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint( + $mpdata, + $mpkey eq 'rootfs' + ); PVE::LXC::Config->add_unused_volume($conf, $old_volid) if !$param->{delete}; @@ -1782,7 +2043,14 @@ __PACKAGE__->register_method({ PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]); PVE::Storage::vdisk_free($storage_cfg, $old_volid); }; - warn $@ if $@; + if (my $err = $@) { + warn $err; + PVE::LXC::Config->lock_config($vmid, sub { + my $conf = PVE::LXC::Config->load_config($vmid); + PVE::LXC::Config->add_unused_volume($conf, $old_volid); + PVE::LXC::Config->write_config($vmid, $conf); + }); + } } }; my $err = $@; @@ -1790,15 +2058,254 @@ __PACKAGE__->register_method({ warn $@ if $@; die $err if $err; }; - my $task = eval { - $rpcenv->fork_worker('move_volume', $vmid, $authuser, $realcmd); + + my $load_and_check_reassign_configs = sub { + my $vmlist = PVE::Cluster::get_vmlist()->{ids}; + + die "Cannot move to/from 'rootfs'\n" if $mpkey eq "rootfs" || $target_mpkey eq "rootfs"; + + if ($mpkey =~ m/^unused\d+$/ && $target_mpkey !~ m/^unused\d+$/) { + die "Moving an unused volume to a used one is not possible\n"; + } + die "could not find CT ${vmid}\n" if !exists($vmlist->{$vmid}); + die "could not find CT ${target_vmid}\n" if !exists($vmlist->{$target_vmid}); + + my $source_node = $vmlist->{$vmid}->{node}; + my $target_node = $vmlist->{$target_vmid}->{node}; + + die "Both containers need to be on the same node ($source_node != $target_node)\n" + if $source_node ne $target_node; + + my $source_conf = PVE::LXC::Config->load_config($vmid); + PVE::LXC::Config->check_lock($source_conf); + my $target_conf = PVE::LXC::Config->load_config($target_vmid); + PVE::LXC::Config->check_lock($target_conf); + + die "Can't move volumes from or to template CT\n" + if ($source_conf->{template} || $target_conf->{template}); + + if ($digest) { + eval { PVE::Tools::assert_if_modified($digest, $source_conf->{digest}) }; + die "Container ${vmid}: $@" if $@; + } + + if ($target_digest) { + eval { PVE::Tools::assert_if_modified($target_digest, $target_conf->{digest}) }; + die "Container ${target_vmid}: $@" if $@; + } + + die "volume '${mpkey}' for container '$vmid' does not exist\n" + if !defined($source_conf->{$mpkey}); + + die "Target volume key '${target_mpkey}' is already in use for container '$target_vmid'\n" + if exists $target_conf->{$target_mpkey}; + + my $drive = PVE::LXC::Config->parse_volume( + $mpkey, + $source_conf->{$mpkey}, + ); + + my $source_volid = $drive->{volume}; + + die "Volume '${mpkey}' has no associated image\n" + if !$source_volid; + die "Cannot move volume used by a snapshot to another container\n" + if PVE::LXC::Config->is_volume_in_use_by_snapshots($source_conf, $source_volid); + die "Storage does not support moving of this disk to another container\n" + if !PVE::Storage::volume_has_feature($storecfg, 'rename', $source_volid); + die "Cannot move a bindmount or device mount to another container\n" + if $drive->{type} ne "volume"; + die "Cannot move volume to another container while the source is running - detach first\n" + if PVE::LXC::check_running($vmid) && $mpkey !~ m/^unused\d+$/; + + my $repl_conf = PVE::ReplicationConfig->new(); + if ($repl_conf->check_for_existing_jobs($target_vmid, 1)) { + my ($storeid, undef) = PVE::Storage::parse_volume_id($source_volid); + my $format = (PVE::Storage::parse_volname($storecfg, $source_volid))[6]; + + die "Cannot move volume on storage '$storeid' to a replicated container - missing replication support\n" + if !PVE::Storage::storage_can_replicate($storecfg, $storeid, $format); + } + + return ($source_conf, $target_conf, $drive); }; - if (my $err = $@) { - eval { PVE::LXC::Config->remove_lock($vmid, $lockname) }; - warn $@ if $@; - die $err; + + my $logfunc = sub { + my ($msg) = @_; + print STDERR "$msg\n"; + }; + + my $volume_reassignfn = sub { + return PVE::LXC::Config->lock_config($vmid, sub { + return PVE::LXC::Config->lock_config($target_vmid, sub { + my ($source_conf, $target_conf, $drive) = &$load_and_check_reassign_configs(); + my $source_volid = $drive->{volume}; + + my $target_unused = $target_mpkey =~ m/^unused\d+$/; + + print "moving volume '$mpkey' from container '$vmid' to '$target_vmid'\n"; + + my ($storage, $source_volname) = PVE::Storage::parse_volume_id($source_volid); + + my $fmt = (PVE::Storage::parse_volname($storecfg, $source_volid))[6]; + + my $new_volid = PVE::Storage::rename_volume( + $storecfg, + $source_volid, + $target_vmid, + ); + + $drive->{volume} = $new_volid; + + delete $source_conf->{$mpkey}; + print "removing volume '${mpkey}' from container '${vmid}' config\n"; + PVE::LXC::Config->write_config($vmid, $source_conf); + + my $drive_string; + + if ($target_unused) { + $drive_string = $new_volid; + } else { + $drive_string = PVE::LXC::Config->print_volume($target_mpkey, $drive); + } + + if ($target_unused) { + $target_conf->{$target_mpkey} = $drive_string; + } else { + my $running = PVE::LXC::check_running($target_vmid); + my $param = { $target_mpkey => $drive_string }; + my $errors = PVE::LXC::Config->update_pct_config( + $target_vmid, + $target_conf, + $running, + $param + ); + + foreach my $key (keys %$errors) { + $rpcenv->warn($errors->{$key}); + } + } + + PVE::LXC::Config->write_config($target_vmid, $target_conf); + $target_conf = PVE::LXC::Config->load_config($target_vmid); + + PVE::LXC::update_lxc_config($target_vmid, $target_conf) if !$target_unused; + print "target container '$target_vmid' updated with '$target_mpkey'\n"; + + # remove possible replication snapshots + if (PVE::Storage::volume_has_feature($storecfg,'replicate', $source_volid)) { + eval { + PVE::Replication::prepare( + $storecfg, + [$new_volid], + undef, + 1, + undef, + $logfunc, + ) + }; + if (my $err = $@) { + $rpcenv->warn("Failed to remove replication snapshots on volume ". + "'${target_mpkey}'. Manual cleanup could be necessary. " . + "Error: ${err}\n"); + } + } + }); + }); + }; + + if ($target_vmid && $storage) { + my $msg = "either set 'storage' or 'target-vmid', but not both"; + raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg }); + } elsif ($target_vmid) { + $rpcenv->check_vm_perm($authuser, $target_vmid, undef, ['VM.Config.Disk']) + if $authuser ne 'root@pam'; + + if ($vmid eq $target_vmid) { + my $msg = "must be different than source VMID to move disk to another container"; + raise_param_exc({ 'target-vmid' => $msg }); + } + + my (undef, undef, $drive) = &$load_and_check_reassign_configs(); + my $storeid = PVE::Storage::parse_volume_id($drive->{volume}); + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); + return $rpcenv->fork_worker( + 'move_volume', + "${vmid}-${mpkey}>${target_vmid}-${target_mpkey}", + $authuser, + $volume_reassignfn + ); + } elsif ($storage) { + $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']); + &$move_to_storage_checks(); + my $task = eval { + $rpcenv->fork_worker('move_volume', $vmid, $authuser, $storage_realcmd); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, $lockname) }; + warn $@ if $@; + die $err; + } + return $task; + } else { + my $msg = "both 'storage' and 'target-vmid' missing, either needs to be set"; + raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg }); } - return $task; }}); +__PACKAGE__->register_method({ + name => 'vm_pending', + path => '{vmid}/pending', + method => 'GET', + proxyto => 'node', + description => 'Get container configuration, including pending changes.', + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), + }, + }, + returns => { + type => "array", + items => { + type => "object", + properties => { + key => { + description => 'Configuration option name.', + type => 'string', + }, + value => { + description => 'Current value.', + type => 'string', + optional => 1, + }, + pending => { + description => 'Pending value.', + type => 'string', + optional => 1, + }, + delete => { + description => "Indicates a pending delete request if present and not 0.", + type => 'integer', + minimum => 0, + maximum => 2, + optional => 1, + }, + }, + }, + }, + code => sub { + my ($param) = @_; + + my $conf = PVE::LXC::Config->load_config($param->{vmid}); + + my $pending_delete_hash = PVE::LXC::Config->parse_pending_delete($conf->{pending}->{delete}); + + return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash); + }}); + 1;