X-Git-Url: https://git.proxmox.com/?p=pve-container.git;a=blobdiff_plain;f=src%2FPVE%2FAPI2%2FLXC.pm;h=733826e0f23ecd04d906e02eaf2ec4ef4fa212c3;hp=4965f5d9169aa26a729c552c651d90e81b948e3a;hb=HEAD;hpb=59b0ce5516ed79346041acf67b8664eb0d3725df diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm index 4965f5d..138288c 100644 --- a/src/PVE/API2/LXC.pm +++ b/src/PVE/API2/LXC.pm @@ -3,6 +3,9 @@ package PVE::API2::LXC; use strict; use warnings; +use IO::Socket::UNIX; +use Socket qw(SOCK_STREAM); + use PVE::SafeSyslog; use PVE::Tools qw(extract_param run_command); use PVE::Exception qw(raise raise_param_exc raise_perm_exc); @@ -36,6 +39,18 @@ BEGIN { } } +my $check_storage_access_migrate = sub { + my ($rpcenv, $authuser, $storecfg, $storage, $node) = @_; + + PVE::Storage::storage_check_enabled($storecfg, $storage, $node); + + $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']); + + my $scfg = PVE::Storage::storage_config($storecfg, $storage); + die "storage '$storage' does not support CT rootdirs\n" + if !$scfg->{content}->{rootdir}; +}; + __PACKAGE__->register_method ({ subclass => "PVE::API2::LXC::Config", path => '{vmid}/config', @@ -209,6 +224,9 @@ __PACKAGE__->register_method({ my $restore = extract_param($param, 'restore'); my $unique = extract_param($param, 'unique'); + $param->{cpuunits} = PVE::CGroup::clamp_cpu_shares($param->{cpuunits}) + if defined($param->{cpuunits}); # clamp value depending on cgroup version + # used to skip firewall config restore if user lacks permission my $skip_fw_config_restore = 0; @@ -231,10 +249,7 @@ __PACKAGE__->register_method({ PVE::Tools::validate_ssh_public_keys($ssh_keys) if defined($ssh_keys); my $pool = extract_param($param, 'pool'); - if (defined($pool)) { - $rpcenv->check_pool_exist($pool); - $rpcenv->check_perm_modify($authuser, "/pool/$pool"); - } + $rpcenv->check_pool_exist($pool) if defined($pool); if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) { # OK @@ -247,6 +262,13 @@ __PACKAGE__->register_method({ # we don't want to restore a container-provided FW conf in this case # since the user is lacking permission to configure the container's FW $skip_fw_config_restore = 1; + + # error out if a user tries to change from unprivileged to privileged + # explicit change is checked here, implicit is checked down below or happening in root-only paths + my $conf = PVE::LXC::Config->load_config($vmid); + if ($conf->{unprivileged} && defined($unprivileged) && !$unprivileged) { + raise_perm_exc("cannot change from unprivileged to privileged without VM.Allocate"); + } } else { raise_perm_exc(); } @@ -254,7 +276,7 @@ __PACKAGE__->register_method({ my $ostemplate = extract_param($param, 'ostemplate'); my $storage = extract_param($param, 'storage') // 'local'; - PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []); + PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, undef, $param, [], $unprivileged); my $storage_cfg = cfs_read_file("storage.cfg"); @@ -267,7 +289,15 @@ __PACKAGE__->register_method({ $archive = '-'; die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs}); } else { - PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate); + my $content_type = $restore ? 'backup' : 'vztmpl'; + PVE::Storage::check_volume_access( + $rpcenv, + $authuser, + $storage_cfg, + $vmid, + $ostemplate, + $content_type, + ); $archive = $ostemplate; } @@ -275,7 +305,7 @@ __PACKAGE__->register_method({ my $check_and_activate_storage = sub { my ($sid) = @_; - my $scfg = PVE::Storage::storage_check_node($storage_cfg, $sid, $node); + my $scfg = PVE::Storage::storage_check_enabled($storage_cfg, $sid, $node); raise_param_exc({ storage => "storage '$sid' does not support container directories"}) if !$scfg->{content}->{rootdir}; @@ -343,6 +373,8 @@ __PACKAGE__->register_method({ eval { PVE::LXC::Config->create_and_lock_config($vmid, $force) }; die "$emsg $@" if $@; + my $destroy_config_on_error = !$same_container_exists; + my $code = sub { my $old_conf = PVE::LXC::Config->load_config($vmid); my $was_template; @@ -357,6 +389,14 @@ __PACKAGE__->register_method({ print "recovering backed-up configuration from '$archive'\n"; ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid); + for my $opt (keys %$orig_conf) { + # early check before disks are created + # the "real" check is in later on when actually merging the configs + if ($opt =~ /^net\d+$/ && !defined($param->{$opt})) { + PVE::LXC::check_bridge_access($rpcenv, $authuser, $orig_conf->{$opt}); + } + } + $was_template = delete $orig_conf->{template}; # When we're root call 'restore_configuration' with restricted=0, @@ -367,6 +407,11 @@ __PACKAGE__->register_method({ $conf->{unprivileged} = $orig_conf->{unprivileged} if !defined($unprivileged) && defined($orig_conf->{unprivileged}); + + # implicit privileged change is checked here + if ($old_conf->{unprivileged} && !$conf->{unprivileged}) { + $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Allocate']); + } } } if ($storage_only_mode) { @@ -410,6 +455,9 @@ __PACKAGE__->register_method({ } } + # up until here we did not modify the container, besides the lock + $destroy_config_on_error = 1; + $vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf); # we always have the 'create' lock so check for more than 1 entry @@ -428,6 +476,7 @@ __PACKAGE__->register_method({ if ($restore) { print "merging backed-up and given configuration..\n"; PVE::LXC::Create::restore_configuration($vmid, $storage_cfg, $archive, $rootdir, $conf, !$is_root, $unique, $skip_fw_config_restore); + PVE::LXC::create_ifaces_ipams_ips($conf, $vmid) if $unique; my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); $lxc_setup->template_fixup($conf); } else { @@ -456,19 +505,38 @@ __PACKAGE__->register_method({ PVE::LXC::Config->write_config($vmid, $conf); }; if (my $err = $@) { - PVE::LXC::destroy_disks($storage_cfg, $vollist); - eval { PVE::LXC::Config->destroy_config($vmid) }; + eval { PVE::LXC::delete_ifaces_ipams_ips($conf, $vmid) }; warn $@ if $@; + PVE::LXC::destroy_disks($storage_cfg, $vollist); + if ($destroy_config_on_error) { + eval { PVE::LXC::Config->destroy_config($vmid) }; + warn $@ if $@; + + if (!$skip_fw_config_restore) { # Only if user has permission to change the fw + PVE::Firewall::remove_vmfw_conf($vmid); + warn $@ if $@; + } + } die "$emsg $err"; } PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; - - PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node }) - if $start_after_create; }; my $workername = $restore ? 'vzrestore' : 'vzcreate'; - my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); }; + my $realcmd = sub { + eval { + PVE::LXC::Config->lock_config($vmid, $code); + }; + if (my $err = $@) { + # if we aborted before changing the container, we must remove the create lock + if (!$destroy_config_on_error) { + PVE::LXC::Config->remove_lock($vmid, 'create'); + } + die $err; + } elsif ($start_after_create) { + PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node }); + } + }; return $rpcenv->fork_worker($workername, $vmid, $authuser, $realcmd); }}); @@ -1041,6 +1109,174 @@ __PACKAGE__->register_method ({ }}); +__PACKAGE__->register_method({ + name => 'remote_migrate_vm', + path => '{vmid}/remote_migrate', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Migrate the container to another cluster. Creates a new migration task. EXPERIMENTAL feature!", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), + 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }), + 'target-endpoint' => get_standard_option('proxmox-remote', { + description => "Remote target endpoint", + }), + online => { + type => 'boolean', + description => "Use online/live migration.", + optional => 1, + }, + restart => { + type => 'boolean', + description => "Use restart migration", + optional => 1, + }, + timeout => { + type => 'integer', + description => "Timeout in seconds for shutdown for restart migration", + optional => 1, + default => 180, + }, + delete => { + type => 'boolean', + description => "Delete the original CT and related data after successful migration. By default the original CT is kept on the source cluster in a stopped state.", + optional => 1, + default => 0, + }, + 'target-storage' => get_standard_option('pve-targetstorage', { + optional => 0, + }), + 'target-bridge' => { + type => 'string', + description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.", + format => 'bridge-pair-list', + }, + bwlimit => { + description => "Override I/O bandwidth limit (in KiB/s).", + optional => 1, + type => 'number', + minimum => '0', + default => 'migrate limit from datacenter or storage config', + }, + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); + + my $source_vmid = extract_param($param, 'vmid'); + my $target_endpoint = extract_param($param, 'target-endpoint'); + my $target_vmid = extract_param($param, 'target-vmid') // $source_vmid; + + my $delete = extract_param($param, 'delete') // 0; + + PVE::Cluster::check_cfs_quorum(); + + # test if CT exists + my $conf = PVE::LXC::Config->load_config($source_vmid); + PVE::LXC::Config->check_lock($conf); + + # try to detect errors early + if (PVE::LXC::check_running($source_vmid)) { + die "can't migrate running container without --online or --restart\n" + if !$param->{online} && !$param->{restart}; + } + + raise_param_exc({ vmid => "cannot migrate HA-managed CT to remote cluster" }) + if PVE::HA::Config::vm_is_ha_managed($source_vmid); + + my $remote = PVE::JSONSchema::parse_property_string('proxmox-remote', $target_endpoint); + + # TODO: move this as helper somewhere appropriate? + my $conn_args = { + protocol => 'https', + host => $remote->{host}, + port => $remote->{port} // 8006, + apitoken => $remote->{apitoken}, + }; + + my $fp; + if ($fp = $remote->{fingerprint}) { + $conn_args->{cached_fingerprints} = { uc($fp) => 1 }; + } + + print "Establishing API connection with remote at '$remote->{host}'\n"; + + my $api_client = PVE::APIClient::LWP->new(%$conn_args); + + if (!defined($fp)) { + my $cert_info = $api_client->get("/nodes/localhost/certificates/info"); + foreach my $cert (@$cert_info) { + my $filename = $cert->{filename}; + next if $filename ne 'pveproxy-ssl.pem' && $filename ne 'pve-ssl.pem'; + $fp = $cert->{fingerprint} if !$fp || $filename eq 'pveproxy-ssl.pem'; + } + $conn_args->{cached_fingerprints} = { uc($fp) => 1 } + if defined($fp); + } + + my $storecfg = PVE::Storage::config(); + my $target_storage = extract_param($param, 'target-storage'); + my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') }; + raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" }) + if $@; + + my $target_bridge = extract_param($param, 'target-bridge'); + my $bridgemap = eval { PVE::JSONSchema::parse_idmap($target_bridge, 'pve-bridge-id') }; + raise_param_exc({ 'target-bridge' => "failed to parse bridge map: $@" }) + if $@; + + die "remote migration requires explicit storage mapping!\n" + if $storagemap->{identity}; + + $param->{storagemap} = $storagemap; + $param->{bridgemap} = $bridgemap; + $param->{remote} = { + conn => $conn_args, # re-use fingerprint for tunnel + client => $api_client, + vmid => $target_vmid, + }; + $param->{migration_type} = 'websocket'; + $param->{delete} = $delete if $delete; + + my $cluster_status = $api_client->get("/cluster/status"); + my $target_node; + foreach my $entry (@$cluster_status) { + next if $entry->{type} ne 'node'; + if ($entry->{local}) { + $target_node = $entry->{name}; + last; + } + } + + die "couldn't determine endpoint's node name\n" + if !defined($target_node); + + my $realcmd = sub { + PVE::LXC::Migrate->migrate($target_node, $remote->{host}, $source_vmid, $param); + }; + + my $worker = sub { + return PVE::GuestHelpers::guest_migration_lock($source_vmid, 10, $realcmd); + }; + + return $rpcenv->fork_worker('vzmigrate', $source_vmid, $authuser, $worker); + }}); + + __PACKAGE__->register_method({ name => 'migrate_vm', path => '{vmid}/migrate', @@ -1060,6 +1296,7 @@ __PACKAGE__->register_method({ description => "Target node.", completion => \&PVE::Cluster::complete_migration_target, }), + 'target-storage' => get_standard_option('pve-targetstorage'), online => { type => 'boolean', description => "Use online/live migration.", @@ -1076,12 +1313,6 @@ __PACKAGE__->register_method({ optional => 1, default => 180, }, - force => { - type => 'boolean', - description => "Force migration despite local bind / device" . - " mounts. NOTE: deprecated, use 'shared' property of mount point instead.", - optional => 1, - }, bwlimit => { description => "Override I/O bandwidth limit (in KiB/s).", optional => 1, @@ -1124,6 +1355,25 @@ __PACKAGE__->register_method({ if !$param->{online} && !$param->{restart}; } + if (my $targetstorage = delete $param->{'target-storage'}) { + my $storecfg = PVE::Storage::config(); + my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" }) + if $@; + + $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']) + if !defined($storagemap->{identity}); + + foreach my $target_sid (values %{$storagemap->{entries}}) { + $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $target_sid, $target); + } + + $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storagemap->{default}, $target) + if $storagemap->{default}; + + $param->{storagemap} = $storagemap; + } + if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { my $hacmd = sub { @@ -1293,7 +1543,7 @@ __PACKAGE__->register_method({ description => "You need 'VM.Clone' permissions on /vms/{vmid}, " . "and 'VM.Allocate' permissions " . "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " . - "'Datastore.AllocateSpace' on any used storage.", + "'Datastore.AllocateSpace' on any used storage, and 'SDN.Use' on any bridge.", check => [ 'and', ['perm', '/vms/{vmid}', [ 'VM.Clone' ]], @@ -1384,7 +1634,7 @@ __PACKAGE__->register_method({ PVE::Storage::storage_check_enabled($storecfg, $storage); if ($target) { # check if storage is available on target node - PVE::Storage::storage_check_node($storecfg, $storage, $target); + PVE::Storage::storage_check_enabled($storecfg, $storage, $target); # clone only works if target storage is shared my $scfg = PVE::Storage::storage_config($storecfg, $storage); die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared}; @@ -1393,117 +1643,143 @@ __PACKAGE__->register_method({ PVE::Cluster::check_cfs_quorum(); - my $conffile; my $newconf = {}; my $mountpoints = {}; my $fullclone = {}; my $vollist = []; my $running; - PVE::LXC::Config->lock_config($vmid, sub { - my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk'); + my $lock_and_reload = sub { + my ($vmid, $code) = @_; + return PVE::LXC::Config->lock_config($vmid, sub { + my $conf = PVE::LXC::Config->load_config($vmid); + die "Lost 'create' config lock, aborting.\n" + if !PVE::LXC::Config->has_lock($conf, 'create'); + + return $code->($conf); + }); + }; + + my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk'); + + eval { + PVE::LXC::Config->create_and_lock_config($newid, 0); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn "Failed to remove source CT config lock - $@\n" if $@; + + die $err; + } + eval { $running = PVE::LXC::check_running($vmid) || 0; my $full = extract_param($param, 'full'); if (!defined($full)) { $full = !PVE::LXC::Config->is_template($src_conf); } - die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full; - - eval { - die "snapshot '$snapname' does not exist\n" - if $snapname && !defined($src_conf->{snapshots}->{$snapname}); - my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf; + PVE::Firewall::clone_vmfw_conf($vmid, $newid); - $conffile = PVE::LXC::Config->config_file($newid); - die "unable to create CT $newid: config file already exists\n" - if -f $conffile; + die "parameter 'storage' not allowed for linked clones\n" + if defined($storage) && !$full; - my $sharedvm = 1; - for my $opt (sort keys %$src_conf) { - next if $opt =~ m/^unused\d+$/; + die "snapshot '$snapname' does not exist\n" + if $snapname && !defined($src_conf->{snapshots}->{$snapname}); - my $value = $src_conf->{$opt}; + my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf; - if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { - my $mp = PVE::LXC::Config->parse_volume($opt, $value); + my $sharedvm = 1; + for my $opt (sort keys %$src_conf) { + next if $opt =~ m/^unused\d+$/; - if ($mp->{type} eq 'volume') { - my $volid = $mp->{volume}; + my $value = $src_conf->{$opt}; - my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); - $sid = $storage if defined($storage); - my $scfg = PVE::Storage::storage_config($storecfg, $sid); - if (!$scfg->{shared}) { - $sharedvm = 0; - warn "found non-shared volume: $volid\n" if $target; - } + if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { + my $mp = PVE::LXC::Config->parse_volume($opt, $value); - $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); + if ($mp->{type} eq 'volume') { + my $volid = $mp->{volume}; - if ($full) { - die "Cannot do full clones on a running container without snapshots\n" - if $running && !defined($snapname); - $fullclone->{$opt} = 1; - } else { - # not full means clone instead of copy - die "Linked clone feature for '$volid' is not available\n" - if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running, {'valid_target_formats' => ['raw', 'subvol']}); - } + my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); + $sid = $storage if defined($storage); + my $scfg = PVE::Storage::storage_config($storecfg, $sid); + if (!$scfg->{shared}) { + $sharedvm = 0; + warn "found non-shared volume: $volid\n" if $target; + } - $mountpoints->{$opt} = $mp; - push @$vollist, $volid; + $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); + if ($full) { + die "Cannot do full clones on a running container without snapshots\n" + if $running && !defined($snapname); + $fullclone->{$opt} = 1; } else { - # TODO: allow bind mounts? - die "unable to clone mountpint '$opt' (type $mp->{type})\n"; + # not full means clone instead of copy + die "Linked clone feature for '$volid' is not available\n" + if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running, {'valid_target_formats' => ['raw', 'subvol']}); } - } elsif ($opt =~ m/^net(\d+)$/) { - # always change MAC! address - my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg'); - my $net = PVE::LXC::Config->parse_lxc_network($value); - $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix}); - $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net); + + $mountpoints->{$opt} = $mp; + push @$vollist, $volid; + } else { - # copy everything else - $newconf->{$opt} = $value; + # TODO: allow bind mounts? + die "unable to clone mountpoint '$opt' (type $mp->{type})\n"; } + } elsif ($opt =~ m/^net(\d+)$/) { + # always change MAC! address + my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg'); + my $net = PVE::LXC::Config->parse_lxc_network($value); + $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix}); + $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net); + + PVE::LXC::check_bridge_access($rpcenv, $authuser, $newconf->{$opt}); + } else { + # copy everything else + $newconf->{$opt} = $value; } - die "can't clone CT to node '$target' (CT uses local storage)\n" - if $target && !$sharedvm; + } + die "can't clone CT to node '$target' (CT uses local storage)\n" + if $target && !$sharedvm; - # Replace the 'disk' lock with a 'create' lock. - $newconf->{lock} = 'create'; + # Replace the 'disk' lock with a 'create' lock. + $newconf->{lock} = 'create'; - delete $newconf->{snapshots}; - delete $newconf->{pending}; - delete $newconf->{template}; - if ($param->{hostname}) { - $newconf->{hostname} = $param->{hostname}; - } + # delete all snapshot related config options + delete $newconf->@{qw(snapshots parent snaptime snapstate)}; - if ($param->{description}) { - $newconf->{description} = $param->{description}; - } + delete $newconf->{pending}; + delete $newconf->{template}; - # create empty/temp config - this fails if CT already exists on other node + $newconf->{hostname} = $param->{hostname} if $param->{hostname}; + $newconf->{description} = $param->{description} if $param->{description}; + + $lock_and_reload->($newid, sub { PVE::LXC::Config->write_config($newid, $newconf); + }); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn "Failed to remove source CT config lock - $@\n" if $@; + + eval { + $lock_and_reload->($newid, sub { + PVE::LXC::Config->destroy_config($newid); + PVE::Firewall::remove_vmfw_conf($newid); + }); }; - if (my $err = $@) { - eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; - warn $@ if $@; - die $err; - } - }); + warn "Failed to remove target CT config - $@\n" if $@; + + die $err; + } my $update_conf = sub { my ($key, $value) = @_; - return PVE::LXC::Config->lock_config($newid, sub { - my $conf = PVE::LXC::Config->load_config($newid); - die "Lost 'create' config lock, aborting.\n" - if !PVE::LXC::Config->has_lock($conf, 'create'); + return $lock_and_reload->($newid, sub { + my $conf = shift; $conf->{$key} = $value; PVE::LXC::Config->write_config($newid, $conf); }); @@ -1549,41 +1825,68 @@ __PACKAGE__->register_method({ } PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; - PVE::LXC::Config->remove_lock($newid, 'create'); - if ($target) { - # always deactivate volumes - avoid lvm LVs to be active on several nodes - PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running; - PVE::Storage::deactivate_volumes($storecfg, $newvollist); + $lock_and_reload->($newid, sub { + my $conf = shift; + my $rootdir = PVE::LXC::mount_all($newid, $storecfg, $conf, 1); - my $newconffile = PVE::LXC::Config->config_file($newid, $target); - die "Failed to move config to node '$target' - rename failed: $!\n" - if !rename($conffile, $newconffile); - } + eval { + PVE::LXC::create_ifaces_ipams_ips($conf, $vmid); + my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); + $lxc_setup->post_clone_hook($conf); + }; + my $err = $@; + eval { PVE::LXC::umount_all($newid, $storecfg, $conf, 1); }; + if ($err) { + warn "$@\n" if $@; + die $err; + } else { + die $@ if $@; + } + }); }; my $err = $@; - # Unlock the source config in any case: eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; warn $@ if $@; if ($err) { - # Now cleanup the config & disks: - unlink $conffile; - + # Now cleanup the config & disks & ipam: sleep 1; # some storages like rbd need to wait before release volume - really? foreach my $volid (@$newvollist) { eval { PVE::Storage::vdisk_free($storecfg, $volid); }; warn $@ if $@; } + + eval { + $lock_and_reload->($newid, sub { + my $conf = shift; + PVE::LXC::delete_ifaces_ipams_ips($conf, $newid); + PVE::LXC::Config->destroy_config($newid); + PVE::Firewall::remove_vmfw_conf($newid); + }); + }; + warn "Failed to remove target CT config - $@\n" if $@; + die "clone failed: $err"; } + $lock_and_reload->($newid, sub { + PVE::LXC::Config->remove_lock($newid, 'create'); + + if ($target) { + # always deactivate volumes - avoid lvm LVs to be active on several nodes + PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running; + PVE::Storage::deactivate_volumes($storecfg, $newvollist); + + PVE::LXC::Config->move_config_to_node($newid, $target); + } + }); + return; }; - PVE::Firewall::clone_vmfw_conf($vmid, $newid); return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); }}); @@ -1640,23 +1943,20 @@ __PACKAGE__->register_method({ my $sizestr = extract_param($param, 'size'); my $ext = ($sizestr =~ s/^\+//); - my $newsize = PVE::JSONSchema::parse_size($sizestr); - die "invalid size string" if !defined($newsize); + my $request_size = PVE::JSONSchema::parse_size($sizestr); + die "invalid size string" if !defined($request_size); die "no options specified\n" if !scalar(keys %$param); - PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $param, []); - my $storage_cfg = cfs_read_file("storage.cfg"); - my $code = sub { - + my $load_and_check = sub { my $conf = PVE::LXC::Config->load_config($vmid); PVE::LXC::Config->check_lock($conf); - PVE::Tools::assert_if_modified($digest, $conf->{digest}); + PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $conf, $param, [], $conf->{unprivileged}); - my $running = PVE::LXC::check_running($vmid); + PVE::Tools::assert_if_modified($digest, $conf->{digest}); my $disk = $param->{disk}; my $mp = PVE::LXC::Config->parse_volume($disk, $conf->{$disk}); @@ -1669,9 +1969,6 @@ __PACKAGE__->register_method({ die "can't resize mount point owned by another container ($owner)" if $vmid != $owner; - die "can't resize volume: $disk if snapshot exists\n" - if %{$conf->{snapshots}} && $format eq 'qcow2'; - my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid); $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); @@ -1682,66 +1979,77 @@ __PACKAGE__->register_method({ die "Could not determine current size of volume '$volid'\n" if !defined($size); - $newsize += $size if $ext; + my $newsize = $ext ? $size + $request_size : $request_size; $newsize = int($newsize); die "unable to shrink disk size\n" if $newsize < $size; die "disk is already at specified size\n" if $size == $newsize; + return ($conf, $disk, $mp, $volid, $format, $newsize); + }; + + my $code = sub { + my ($conf, $disk, $mp, $volid, $format, $newsize) = $load_and_check->(); + + my $running = PVE::LXC::check_running($vmid); + PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr"); - my $realcmd = sub { - # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so - # we pass 0 here (parameter only makes sense for qemu) - PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0); - $mp->{size} = $newsize; - $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs'); + # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so + # we pass 0 here (parameter only makes sense for qemu) + PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0); - PVE::LXC::Config->write_config($vmid, $conf); + $mp->{size} = $newsize; + $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs'); - if ($format eq 'raw') { - # we need to ensure that the volume is mapped, if not needed this is a NOP - my $path = PVE::Storage::map_volume($storage_cfg, $volid); - $path = PVE::Storage::path($storage_cfg, $volid) if !defined($path); - if ($running) { - - $mp->{mp} = '/'; - my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1]; - $path = PVE::LXC::query_loopdev($path) if $use_loopdev; - die "internal error: CT running but mount point not attached to a loop device" - if !$path; - PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev; - - # In order for resize2fs to know that we need online-resizing a mountpoint needs - # to be visible to it in its namespace. - # To not interfere with the rest of the system we unshare the current mount namespace, - # mount over /tmp and then run resize2fs. - - # interestingly we don't need to e2fsck on mounted systems... - my $quoted = PVE::Tools::shellquote($path); - my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted"; - eval { - PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]); - }; - warn "Failed to update the container's filesystem: $@\n" if $@; - } else { - eval { - PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]); - PVE::Tools::run_command(['resize2fs', $path]); - }; - warn "Failed to update the container's filesystem: $@\n" if $@; + PVE::LXC::Config->write_config($vmid, $conf); - # always un-map if not running, this is a NOP if not needed - PVE::Storage::unmap_volume($storage_cfg, $volid); - } + if ($format eq 'raw') { + # we need to ensure that the volume is mapped, if not needed this is a NOP + my $path = PVE::Storage::map_volume($storage_cfg, $volid); + $path = PVE::Storage::path($storage_cfg, $volid) if !defined($path); + if ($running) { + + $mp->{mp} = '/'; + my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1]; + $path = PVE::LXC::query_loopdev($path) if $use_loopdev; + die "internal error: CT running but mount point not attached to a loop device" + if !$path; + PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev; + + # In order for resize2fs to know that we need online-resizing a mountpoint needs + # to be visible to it in its namespace. + # To not interfere with the rest of the system we unshare the current mount namespace, + # mount over /tmp and then run resize2fs. + + # interestingly we don't need to e2fsck on mounted systems... + my $quoted = PVE::Tools::shellquote($path); + my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted"; + eval { + PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]); + }; + warn "Failed to update the container's filesystem: $@\n" if $@; + } else { + eval { + PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]); + PVE::Tools::run_command(['resize2fs', $path]); + }; + warn "Failed to update the container's filesystem: $@\n" if $@; + + # always un-map if not running, this is a NOP if not needed + PVE::Storage::unmap_volume($storage_cfg, $volid); } - }; + } + }; - return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd); + my $worker = sub { + PVE::LXC::Config->lock_config($vmid, $code);; }; - return PVE::LXC::Config->lock_config($vmid, $code);; + $load_and_check->(); # early checks before forking+locking + + return $rpcenv->fork_worker('resize', $vmid, $authuser, $worker); }}); __PACKAGE__->register_method({ @@ -1750,39 +2058,45 @@ __PACKAGE__->register_method({ method => 'POST', protected => 1, proxyto => 'node', - description => "Move a rootfs-/mp-volume to a different storage", + description => "Move a rootfs-/mp-volume to a different storage or to a different container.", permissions => { description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " . - "and 'Datastore.AllocateSpace' permissions on the storage.", - check => - [ 'and', - ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], - ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]], - ], + "and 'Datastore.AllocateSpace' permissions on the storage. To move ". + "a volume to another container, you need the permissions on the ". + "target container as well.", + check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], }, parameters => { additionalProperties => 0, properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), + 'target-vmid' => get_standard_option('pve-vmid', { + completion => \&PVE::LXC::complete_ctid, + optional => 1, + }), volume => { type => 'string', - enum => [ PVE::LXC::Config->valid_volume_keys() ], + #TODO: check how to handle unused mount points as the mp parameter is not configured + enum => [ PVE::LXC::Config->valid_volume_keys_with_unused() ], description => "Volume which will be moved.", }, storage => get_standard_option('pve-storage-id', { description => "Target Storage.", completion => \&PVE::Storage::complete_storage_enabled, + optional => 1, }), delete => { type => 'boolean', - description => "Delete the original volume after successful copy. By default the original is kept as an unused volume entry.", + description => "Delete the original volume after successful copy. By default the " . + "original is kept as an unused volume entry.", optional => 1, default => 0, }, digest => { type => 'string', - description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + description => 'Prevent changes if current configuration file has different SHA1 " . + "digest. This can be used to prevent concurrent modifications.', maxLength => 40, optional => 1, }, @@ -1793,10 +2107,25 @@ __PACKAGE__->register_method({ minimum => '0', default => 'clone limit from datacenter or storage config', }, - }, - }, - returns => { - type => 'string', + 'target-volume' => { + type => 'string', + description => "The config key the volume will be moved to. Default is the " . + "source volume key.", + enum => [PVE::LXC::Config->valid_volume_keys_with_unused()], + optional => 1, + }, + 'target-digest' => { + type => 'string', + description => 'Prevent changes if current configuration file of the target " . + "container has a different SHA1 digest. This can be used to prevent " . + "concurrent modifications.', + maxLength => 40, + optional => 1, + }, + }, + }, + returns => { + type => 'string', }, code => sub { my ($param) = @_; @@ -1807,34 +2136,59 @@ __PACKAGE__->register_method({ my $vmid = extract_param($param, 'vmid'); + my $target_vmid = extract_param($param, 'target-vmid'); + my $storage = extract_param($param, 'storage'); my $mpkey = extract_param($param, 'volume'); + my $target_mpkey = extract_param($param, 'target-volume') // $mpkey; + + my $digest = extract_param($param, 'digest'); + + my $target_digest = extract_param($param, 'target-digest'); + my $lockname = 'disk'; my ($mpdata, $old_volid); - PVE::LXC::Config->lock_config($vmid, sub { - my $conf = PVE::LXC::Config->load_config($vmid); - PVE::LXC::Config->check_lock($conf); + die "either set storage or target-vmid, but not both\n" + if $storage && $target_vmid; - die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid); + my $storecfg = PVE::Storage::config(); - $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey}); - $old_volid = $mpdata->{volume}; + my $move_to_storage_checks = sub { + PVE::LXC::Config->lock_config($vmid, sub { + my $conf = PVE::LXC::Config->load_config($vmid); + PVE::LXC::Config->check_lock($conf); - die "you can't move a volume with snapshots and delete the source\n" - if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid); + die "cannot move volumes of a running container\n" + if PVE::LXC::check_running($vmid); - PVE::Tools::assert_if_modified($param->{digest}, $conf->{digest}); + if ($mpkey =~ m/^unused\d+$/) { + die "cannot move volume '$mpkey', only configured volumes can be moved to ". + "another storage\n"; + } - PVE::LXC::Config->set_lock($vmid, $lockname); - }); + $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey}); + $old_volid = $mpdata->{volume}; - my $realcmd = sub { + die "you can't move a volume with snapshots and delete the source\n" + if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid); + + PVE::Tools::assert_if_modified($digest, $conf->{digest}); + + PVE::LXC::Config->set_lock($vmid, $lockname); + }); + }; + + my $storage_realcmd = sub { eval { - PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: move --volume $mpkey --storage $storage"); + PVE::Cluster::log_msg( + 'info', + $authuser, + "move volume CT $vmid: move --volume $mpkey --storage $storage" + ); my $conf = PVE::LXC::Config->load_config($vmid); my $storage_cfg = PVE::Storage::config(); @@ -1845,8 +2199,20 @@ __PACKAGE__->register_method({ PVE::Storage::activate_volumes($storage_cfg, [ $old_volid ]); my $bwlimit = extract_param($param, 'bwlimit'); my $source_storage = PVE::Storage::parse_volume_id($old_volid); - my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$source_storage, $storage], $bwlimit); - $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf, undef, $movelimit); + my $movelimit = PVE::Storage::get_bandwidth_limit( + 'move', + [$source_storage, $storage], + $bwlimit + ); + $new_volid = PVE::LXC::copy_volume( + $mpdata, + $vmid, + $storage, + $storage_cfg, + $conf, + undef, + $movelimit + ); if (PVE::LXC::Config->is_template($conf)) { PVE::Storage::activate_volumes($storage_cfg, [ $new_volid ]); my $template_volid = PVE::Storage::vdisk_create_base($storage_cfg, $new_volid); @@ -1860,7 +2226,10 @@ __PACKAGE__->register_method({ $conf = PVE::LXC::Config->load_config($vmid); PVE::Tools::assert_if_modified($digest, $conf->{digest}); - $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint($mpdata, $mpkey eq 'rootfs'); + $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint( + $mpdata, + $mpkey eq 'rootfs' + ); PVE::LXC::Config->add_unused_volume($conf, $old_volid) if !$param->{delete}; @@ -1882,13 +2251,23 @@ __PACKAGE__->register_method({ die $err; } + my $deactivated = 0; + eval { + PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]); + $deactivated = 1; + }; + warn $@ if $@; + if ($param->{delete}) { - eval { - PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]); - PVE::Storage::vdisk_free($storage_cfg, $old_volid); - }; - if (my $err = $@) { - warn $err; + my $removed = 0; + if ($deactivated) { + eval { + PVE::Storage::vdisk_free($storage_cfg, $old_volid); + $removed = 1; + }; + warn $@ if $@; + } + if (!$removed) { PVE::LXC::Config->lock_config($vmid, sub { my $conf = PVE::LXC::Config->load_config($vmid); PVE::LXC::Config->add_unused_volume($conf, $old_volid); @@ -1902,15 +2281,185 @@ __PACKAGE__->register_method({ warn $@ if $@; die $err if $err; }; - my $task = eval { - $rpcenv->fork_worker('move_volume', $vmid, $authuser, $realcmd); + + my $load_and_check_reassign_configs = sub { + my $vmlist = PVE::Cluster::get_vmlist()->{ids}; + + die "Cannot move to/from 'rootfs'\n" if $mpkey eq "rootfs" || $target_mpkey eq "rootfs"; + + if ($mpkey =~ m/^unused\d+$/ && $target_mpkey !~ m/^unused\d+$/) { + die "Moving an unused volume to a used one is not possible\n"; + } + die "could not find CT ${vmid}\n" if !exists($vmlist->{$vmid}); + die "could not find CT ${target_vmid}\n" if !exists($vmlist->{$target_vmid}); + + my $source_node = $vmlist->{$vmid}->{node}; + my $target_node = $vmlist->{$target_vmid}->{node}; + + die "Both containers need to be on the same node ($source_node != $target_node)\n" + if $source_node ne $target_node; + + my $source_conf = PVE::LXC::Config->load_config($vmid); + PVE::LXC::Config->check_lock($source_conf); + my $target_conf; + if ($target_vmid eq $vmid) { + $target_conf = $source_conf; + } else { + $target_conf = PVE::LXC::Config->load_config($target_vmid); + PVE::LXC::Config->check_lock($target_conf); + } + + die "Can't move volumes from or to template CT\n" + if ($source_conf->{template} || $target_conf->{template}); + + if ($digest) { + eval { PVE::Tools::assert_if_modified($digest, $source_conf->{digest}) }; + die "Container ${vmid}: $@" if $@; + } + + if ($target_digest) { + eval { PVE::Tools::assert_if_modified($target_digest, $target_conf->{digest}) }; + die "Container ${target_vmid}: $@" if $@; + } + + die "volume '${mpkey}' for container '$vmid' does not exist\n" + if !defined($source_conf->{$mpkey}); + + die "Target volume key '${target_mpkey}' is already in use for container '$target_vmid'\n" + if exists $target_conf->{$target_mpkey}; + + my $drive = PVE::LXC::Config->parse_volume($mpkey, $source_conf->{$mpkey}); + my $source_volid = $drive->{volume} or die "Volume '${mpkey}' has no associated image\n"; + die "Cannot move volume used by a snapshot to another container\n" + if PVE::LXC::Config->is_volume_in_use_by_snapshots($source_conf, $source_volid); + die "Storage does not support moving of this disk to another container\n" + if !PVE::Storage::volume_has_feature($storecfg, 'rename', $source_volid); + die "Cannot move a bindmount or device mount to another container\n" + if $drive->{type} ne "volume"; + die "Cannot move in-use volume while the source CT is running - detach or shutdown first\n" + if PVE::LXC::check_running($vmid) && $mpkey !~ m/^unused\d+$/; + + my $repl_conf = PVE::ReplicationConfig->new(); + if ($repl_conf->check_for_existing_jobs($target_vmid, 1)) { + my ($storeid, undef) = PVE::Storage::parse_volume_id($source_volid); + my $format = (PVE::Storage::parse_volname($storecfg, $source_volid))[6]; + + die "Cannot move volume on storage '$storeid' to a replicated container - missing replication support\n" + if !PVE::Storage::storage_can_replicate($storecfg, $storeid, $format); + } + + return ($source_conf, $target_conf, $drive); }; - if (my $err = $@) { - eval { PVE::LXC::Config->remove_lock($vmid, $lockname) }; - warn $@ if $@; - die $err; + + my $logfunc = sub { print STDERR "$_[0]\n"; }; + + my $volume_reassignfn = sub { + return PVE::LXC::Config->lock_config($vmid, sub { + return PVE::LXC::Config->lock_config($target_vmid, sub { + my ($source_conf, $target_conf, $drive) = $load_and_check_reassign_configs->(); + my $source_volid = $drive->{volume}; + + my $target_unused = $target_mpkey =~ m/^unused\d+$/; + + print "moving volume '$mpkey' from container '$vmid' to '$target_vmid'\n"; + + my ($storage, $source_volname) = PVE::Storage::parse_volume_id($source_volid); + + my $fmt = (PVE::Storage::parse_volname($storecfg, $source_volid))[6]; + + my $new_volid = PVE::Storage::rename_volume( + $storecfg, + $source_volid, + $target_vmid, + ); + + $drive->{volume} = $new_volid; + + delete $source_conf->{$mpkey}; + print "removing volume '${mpkey}' from container '${vmid}' config\n"; + PVE::LXC::Config->write_config($vmid, $source_conf); + + my $drive_string; + if ($target_unused) { + $drive_string = $new_volid; + } else { + $drive_string = PVE::LXC::Config->print_volume($target_mpkey, $drive); + } + + if ($target_unused) { + $target_conf->{$target_mpkey} = $drive_string; + } else { + my $running = PVE::LXC::check_running($target_vmid); + my $param = { $target_mpkey => $drive_string }; + my $errors = PVE::LXC::Config->update_pct_config( + $target_vmid, + $target_conf, + $running, + $param + ); + $rpcenv->warn($errors->{$_}) for keys $errors->%*; + } + + PVE::LXC::Config->write_config($target_vmid, $target_conf); + $target_conf = PVE::LXC::Config->load_config($target_vmid); + + PVE::LXC::update_lxc_config($target_vmid, $target_conf) if !$target_unused; + print "target container '$target_vmid' updated with '$target_mpkey'\n"; + + # remove possible replication snapshots + if (PVE::Storage::volume_has_feature($storecfg,'replicate', $source_volid)) { + eval { + PVE::Replication::prepare( + $storecfg, + [$new_volid], + undef, + 1, + undef, + $logfunc, + ) + }; + if (my $err = $@) { + $rpcenv->warn("Failed to remove replication snapshots on volume ". + "'${target_mpkey}'. Manual cleanup could be necessary. " . + "Error: ${err}\n"); + } + } + }); + }); + }; + + if ($target_vmid && $storage) { + my $msg = "either set 'storage' or 'target-vmid', but not both"; + raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg }); + } elsif ($target_vmid) { + $rpcenv->check_vm_perm($authuser, $target_vmid, undef, ['VM.Config.Disk']) + if $authuser ne 'root@pam'; + + my (undef, undef, $drive) = $load_and_check_reassign_configs->(); + my $storeid = PVE::Storage::parse_volume_id($drive->{volume}); + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); + return $rpcenv->fork_worker( + 'move_volume', + "${vmid}-${mpkey}>${target_vmid}-${target_mpkey}", + $authuser, + $volume_reassignfn + ); + } elsif ($storage) { + $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']); + &$move_to_storage_checks(); + my $task = eval { + $rpcenv->fork_worker('move_volume', $vmid, $authuser, $storage_realcmd); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, $lockname) }; + warn $@ if $@; + die $err; + } + return $task; + } else { + my $msg = "both 'storage' and 'target-vmid' missing, either needs to be set"; + raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg }); } - return $task; }}); __PACKAGE__->register_method({ @@ -1968,4 +2517,523 @@ __PACKAGE__->register_method({ return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash); }}); +__PACKAGE__->register_method({ + name => 'ip', + path => '{vmid}/interfaces', + method => 'GET', + protected => 1, + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], + }, + description => 'Get IP addresses of the specified container interface.', + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), + }, + }, + returns => { + type => "array", + items => { + type => 'object', + properties => { + name => { + type => 'string', + description => 'The name of the interface', + optional => 0, + }, + hwaddr => { + type => 'string', + description => 'The MAC address of the interface', + optional => 0, + }, + inet => { + type => 'string', + description => 'The IPv4 address of the interface', + optional => 1, + }, + inet6 => { + type => 'string', + description => 'The IPv6 address of the interface', + optional => 1, + }, + } + }, + }, + code => sub { + my ($param) = @_; + + return PVE::LXC::get_interfaces($param->{vmid}); + }}); + +__PACKAGE__->register_method({ + name => 'mtunnel', + path => '{vmid}/mtunnel', + method => 'POST', + protected => 1, + description => 'Migration tunnel endpoint - only for internal use by CT migration.', + permissions => { + check => + [ 'and', + ['perm', '/vms/{vmid}', [ 'VM.Allocate' ]], + ['perm', '/', [ 'Sys.Incoming' ]], + ], + description => "You need 'VM.Allocate' permissions on '/vms/{vmid}' and Sys.Incoming" . + " on '/'. Further permission checks happen during the actual migration.", + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + storages => { + type => 'string', + format => 'pve-storage-id-list', + optional => 1, + description => 'List of storages to check permission and availability. Will be checked again for all actually used storages during migration.', + }, + bridges => { + type => 'string', + format => 'pve-bridge-id-list', + optional => 1, + description => 'List of network bridges to check availability. Will be checked again for actually used bridges during migration.', + }, + }, + }, + returns => { + additionalProperties => 0, + properties => { + upid => { type => 'string' }, + ticket => { type => 'string' }, + socket => { type => 'string' }, + }, + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + my $vmid = extract_param($param, 'vmid'); + + my $storages = extract_param($param, 'storages'); + my $bridges = extract_param($param, 'bridges'); + + my $nodename = PVE::INotify::nodename(); + + raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" }) + if $node ne 'localhost' && $node ne $nodename; + + $node = $nodename; + + my $storecfg = PVE::Storage::config(); + foreach my $storeid (PVE::Tools::split_list($storages)) { + $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storeid, $node); + } + + foreach my $bridge (PVE::Tools::split_list($bridges)) { + PVE::Network::read_bridge_mtu($bridge); + } + + PVE::Cluster::check_cfs_quorum(); + + my $socket_addr = "/run/pve/ct-$vmid.mtunnel"; + + my $lock = 'create'; + eval { PVE::LXC::Config->create_and_lock_config($vmid, 0, $lock); }; + + raise_param_exc({ vmid => "unable to create empty CT config - $@"}) + if $@; + + my $realcmd = sub { + my $state = { + storecfg => PVE::Storage::config(), + lock => $lock, + vmid => $vmid, + }; + + my $run_locked = sub { + my ($code, $params) = @_; + return PVE::LXC::Config->lock_config($state->{vmid}, sub { + my $conf = PVE::LXC::Config->load_config($state->{vmid}); + + $state->{conf} = $conf; + + die "Encountered wrong lock - aborting mtunnel command handling.\n" + if $state->{lock} && !PVE::LXC::Config->has_lock($conf, $state->{lock}); + + return $code->($params); + }); + }; + + my $cmd_desc = { + config => { + conf => { + type => 'string', + description => 'Full CT config, adapted for target cluster/node', + }, + 'firewall-config' => { + type => 'string', + description => 'CT firewall config', + optional => 1, + }, + }, + ticket => { + path => { + type => 'string', + description => 'socket path for which the ticket should be valid. must be known to current mtunnel instance.', + }, + }, + quit => { + cleanup => { + type => 'boolean', + description => 'remove CT config and volumes, aborting migration', + default => 0, + }, + }, + 'disk-import' => $PVE::StorageTunnel::cmd_schema->{'disk-import'}, + 'query-disk-import' => $PVE::StorageTunnel::cmd_schema->{'query-disk-import'}, + bwlimit => $PVE::StorageTunnel::cmd_schema->{bwlimit}, + }; + + my $cmd_handlers = { + 'version' => sub { + # compared against other end's version + # bump/reset for breaking changes + # bump/bump for opt-in changes + return { + api => $PVE::LXC::Migrate::WS_TUNNEL_VERSION, + age => 0, + }; + }, + 'config' => sub { + my ($params) = @_; + + # parse and write out VM FW config if given + if (my $fw_conf = $params->{'firewall-config'}) { + my ($path, $fh) = PVE::Tools::tempfile_contents($fw_conf, 700); + + my $empty_conf = { + rules => [], + options => {}, + aliases => {}, + ipset => {} , + ipset_comments => {}, + }; + my $cluster_fw_conf = PVE::Firewall::load_clusterfw_conf(); + + # TODO: add flag for strict parsing? + # TODO: add import sub that does all this given raw content? + my $vmfw_conf = PVE::Firewall::generic_fw_config_parser($path, $cluster_fw_conf, $empty_conf, 'vm'); + $vmfw_conf->{vmid} = $state->{vmid}; + PVE::Firewall::save_vmfw_conf($state->{vmid}, $vmfw_conf); + + $state->{cleanup}->{fw} = 1; + } + + my $conf_fn = "incoming/lxc/$state->{vmid}.conf"; + my $new_conf = PVE::LXC::Config::parse_pct_config($conf_fn, $params->{conf}, 1); + delete $new_conf->{lock}; + delete $new_conf->{digest}; + + my $unprivileged = delete $new_conf->{unprivileged}; + my $arch = delete $new_conf->{arch}; + + # TODO handle properly? + delete $new_conf->{snapshots}; + delete $new_conf->{parent}; + delete $new_conf->{pending}; + delete $new_conf->{lxc}; + + PVE::LXC::Config->remove_lock($state->{vmid}, 'create'); + + eval { + my $conf = { + unprivileged => $unprivileged, + arch => $arch, + }; + PVE::LXC::check_ct_modify_config_perm( + $rpcenv, + $authuser, + $state->{vmid}, + undef, + $conf, + $new_conf, + undef, + $unprivileged, + ); + my $errors = PVE::LXC::Config->update_pct_config( + $state->{vmid}, + $conf, + 0, + $new_conf, + [], + [], + ); + raise_param_exc($errors) if scalar(keys %$errors); + PVE::LXC::Config->write_config($state->{vmid}, $conf); + PVE::LXC::update_lxc_config($vmid, $conf); + }; + if (my $err = $@) { + # revert to locked previous config + my $conf = PVE::LXC::Config->load_config($state->{vmid}); + $conf->{lock} = 'create'; + PVE::LXC::Config->write_config($state->{vmid}, $conf); + + die $err; + } + + my $conf = PVE::LXC::Config->load_config($state->{vmid}); + $conf->{lock} = 'migrate'; + PVE::LXC::Config->write_config($state->{vmid}, $conf); + + $state->{lock} = 'migrate'; + + return; + }, + 'bwlimit' => sub { + my ($params) = @_; + return PVE::StorageTunnel::handle_bwlimit($params); + }, + 'disk-import' => sub { + my ($params) = @_; + + $check_storage_access_migrate->( + $rpcenv, + $authuser, + $state->{storecfg}, + $params->{storage}, + $node + ); + + $params->{unix} = "/run/pve/ct-$state->{vmid}.storage"; + + return PVE::StorageTunnel::handle_disk_import($state, $params); + }, + 'query-disk-import' => sub { + my ($params) = @_; + + return PVE::StorageTunnel::handle_query_disk_import($state, $params); + }, + 'unlock' => sub { + PVE::LXC::Config->remove_lock($state->{vmid}, $state->{lock}); + delete $state->{lock}; + return; + }, + 'start' => sub { + PVE::LXC::vm_start( + $state->{vmid}, + $state->{conf}, + 0 + ); + + return; + }, + 'stop' => sub { + PVE::LXC::vm_stop($state->{vmid}, 1, 10, 1); + return; + }, + 'ticket' => sub { + my ($params) = @_; + + my $path = $params->{path}; + + die "Not allowed to generate ticket for unknown socket '$path'\n" + if !defined($state->{sockets}->{$path}); + + return { ticket => PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$path") }; + }, + 'quit' => sub { + my ($params) = @_; + + if ($params->{cleanup}) { + if ($state->{cleanup}->{fw}) { + PVE::Firewall::remove_vmfw_conf($state->{vmid}); + } + + for my $volid (keys $state->{cleanup}->{volumes}->%*) { + print "freeing volume '$volid' as part of cleanup\n"; + eval { PVE::Storage::vdisk_free($state->{storecfg}, $volid) }; + warn $@ if $@; + } + + PVE::LXC::destroy_lxc_container( + $state->{storecfg}, + $state->{vmid}, + $state->{conf}, + undef, + 0, + ); + } + + print "switching to exit-mode, waiting for client to disconnect\n"; + $state->{exit} = 1; + return; + }, + }; + + $run_locked->(sub { + my $socket_addr = "/run/pve/ct-$state->{vmid}.mtunnel"; + unlink $socket_addr; + + $state->{socket} = IO::Socket::UNIX->new( + Type => SOCK_STREAM(), + Local => $socket_addr, + Listen => 1, + ); + + $state->{socket_uid} = getpwnam('www-data') + or die "Failed to resolve user 'www-data' to numeric UID\n"; + chown $state->{socket_uid}, -1, $socket_addr; + }); + + print "mtunnel started\n"; + + my $conn = eval { PVE::Tools::run_with_timeout(300, sub { $state->{socket}->accept() }) }; + if ($@) { + warn "Failed to accept tunnel connection - $@\n"; + + warn "Removing tunnel socket..\n"; + unlink $state->{socket}; + + warn "Removing temporary VM config..\n"; + $run_locked->(sub { + PVE::LXC::destroy_config($state->{vmid}); + }); + + die "Exiting mtunnel\n"; + } + + $state->{conn} = $conn; + + my $reply_err = sub { + my ($msg) = @_; + + my $reply = JSON::encode_json({ + success => JSON::false, + msg => $msg, + }); + $conn->print("$reply\n"); + $conn->flush(); + }; + + my $reply_ok = sub { + my ($res) = @_; + + $res->{success} = JSON::true; + my $reply = JSON::encode_json($res); + $conn->print("$reply\n"); + $conn->flush(); + }; + + while (my $line = <$conn>) { + chomp $line; + + # untaint, we validate below if needed + ($line) = $line =~ /^(.*)$/; + my $parsed = eval { JSON::decode_json($line) }; + if ($@) { + $reply_err->("failed to parse command - $@"); + next; + } + + my $cmd = delete $parsed->{cmd}; + if (!defined($cmd)) { + $reply_err->("'cmd' missing"); + } elsif ($state->{exit}) { + $reply_err->("tunnel is in exit-mode, processing '$cmd' cmd not possible"); + next; + } elsif (my $handler = $cmd_handlers->{$cmd}) { + print "received command '$cmd'\n"; + eval { + if (my $props = $cmd_desc->{$cmd}) { + my $schema = { + type => 'object', + properties => $props, + }; + PVE::JSONSchema::validate($parsed, $schema); + } else { + $parsed = {}; + } + my $res = $run_locked->($handler, $parsed); + $reply_ok->($res); + }; + $reply_err->("failed to handle '$cmd' command - $@") + if $@; + } else { + $reply_err->("unknown command '$cmd' given"); + } + } + + if ($state->{exit}) { + print "mtunnel exited\n"; + } else { + die "mtunnel exited unexpectedly\n"; + } + }; + + my $ticket = PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$socket_addr"); + my $upid = $rpcenv->fork_worker('vzmtunnel', $vmid, $authuser, $realcmd); + + return { + ticket => $ticket, + upid => $upid, + socket => $socket_addr, + }; + }}); + +__PACKAGE__->register_method({ + name => 'mtunnelwebsocket', + path => '{vmid}/mtunnelwebsocket', + method => 'GET', + permissions => { + description => "You need to pass a ticket valid for the selected socket. Tickets can be created via the mtunnel API call, which will check permissions accordingly.", + user => 'all', # check inside + }, + description => 'Migration tunnel endpoint for websocket upgrade - only for internal use by VM migration.', + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + socket => { + type => "string", + description => "unix socket to forward to", + }, + ticket => { + type => "string", + description => "ticket return by initial 'mtunnel' API call, or retrieved via 'ticket' tunnel command", + }, + }, + }, + returns => { + type => "object", + properties => { + port => { type => 'string', optional => 1 }, + socket => { type => 'string', optional => 1 }, + }, + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); + + my $nodename = PVE::INotify::nodename(); + my $node = extract_param($param, 'node'); + + raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" }) + if $node ne 'localhost' && $node ne $nodename; + + my $vmid = $param->{vmid}; + # check VM exists + PVE::LXC::Config->load_config($vmid); + + my $socket = $param->{socket}; + PVE::AccessControl::verify_tunnel_ticket($param->{ticket}, $authuser, "/socket/$socket"); + + return { socket => $socket }; + }}); 1;