X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=src%2FPVE%2FAPI2%2FLXC.pm;h=1cbd3d83b82fe365c3cf03598bbcffff5a54941d;hb=80440faa0ce47df75fab407f9f87dccf200965b2;hp=a259eec06c5a46f7c47e2c9bf7450f9445c0f2e1;hpb=db18c1e4c50470e74912175f66b41f14ca8fda82;p=pve-container.git diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm index a259eec..1cbd3d8 100644 --- a/src/PVE/API2/LXC.pm +++ b/src/PVE/API2/LXC.pm @@ -5,7 +5,7 @@ use warnings; use PVE::SafeSyslog; use PVE::Tools qw(extract_param run_command); -use PVE::Exception qw(raise raise_param_exc); +use PVE::Exception qw(raise raise_param_exc raise_perm_exc); use PVE::INotify; use PVE::Cluster qw(cfs_read_file); use PVE::AccessControl; @@ -13,18 +13,25 @@ use PVE::Firewall; use PVE::Storage; use PVE::RESTHandler; use PVE::RPCEnvironment; +use PVE::ReplicationConfig; use PVE::LXC; use PVE::LXC::Create; use PVE::LXC::Migrate; +use PVE::GuestHelpers; use PVE::API2::LXC::Config; use PVE::API2::LXC::Status; use PVE::API2::LXC::Snapshot; -use PVE::HA::Env::PVE2; -use PVE::HA::Config; use PVE::JSONSchema qw(get_standard_option); use base qw(PVE::RESTHandler); -use Data::Dumper; # fixme: remove +BEGIN { + if (!$ENV{PVE_GENERATING_DOCS}) { + require PVE::HA::Env::PVE2; + import PVE::HA::Env::PVE2; + require PVE::HA::Config; + import PVE::HA::Config; + } +} __PACKAGE__->register_method ({ subclass => "PVE::API2::LXC::Config", @@ -67,7 +74,7 @@ __PACKAGE__->register_method({ type => 'array', items => { type => "object", - properties => {}, + properties => $PVE::LXC::vmstatus_return_properties, }, links => [ { rel => 'child', href => "{vmid}" } ], }, @@ -84,7 +91,6 @@ __PACKAGE__->register_method({ next if !$rpcenv->check($authuser, "/vms/$vmid", [ 'VM.Audit' ], 1); my $data = $vmstatus->{$vmid}; - $data->{vmid} = $vmid; push @$res, $data; } @@ -154,6 +160,18 @@ __PACKAGE__->register_method({ description => "Setup public SSH keys (one key per line, " . "OpenSSH format).", }, + bwlimit => { + description => "Override i/o bandwidth limit (in KiB/s).", + optional => 1, + type => 'number', + minimum => '0', + }, + start => { + optional => 1, + type => 'boolean', + default => 0, + description => "Start the CT after its creation finished successfully.", + }, }), }, returns => { @@ -162,28 +180,26 @@ __PACKAGE__->register_method({ code => sub { my ($param) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); + PVE::Cluster::check_cfs_quorum(); + my $rpcenv = PVE::RPCEnvironment::get(); my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); - my $ignore_unpack_errors = extract_param($param, 'ignore-unpack-errors'); + my $bwlimit = extract_param($param, 'bwlimit'); + my $start_after_create = extract_param($param, 'start'); my $basecfg_fn = PVE::LXC::Config->config_file($vmid); - my $same_container_exists = -f $basecfg_fn; # 'unprivileged' is read-only, so we can't pass it to update_pct_config my $unprivileged = extract_param($param, 'unprivileged'); - my $restore = extract_param($param, 'restore'); if ($restore) { # fixme: limit allowed parameters - } my $force = extract_param($param, 'force'); @@ -191,17 +207,16 @@ __PACKAGE__->register_method({ if (!($same_container_exists && $restore && $force)) { PVE::Cluster::check_vmid_unused($vmid); } else { + die "can't overwrite running container\n" if PVE::LXC::check_running($vmid); my $conf = PVE::LXC::Config->load_config($vmid); PVE::LXC::Config->check_protection($conf, "unable to restore CT $vmid"); } my $password = extract_param($param, 'password'); - my $ssh_keys = extract_param($param, 'ssh-public-keys'); PVE::Tools::validate_ssh_public_keys($ssh_keys) if defined($ssh_keys); my $pool = extract_param($param, 'pool'); - if (defined($pool)) { $rpcenv->check_pool_exist($pool); $rpcenv->check_perm_modify($authuser, "/pool/$pool"); @@ -218,16 +233,14 @@ __PACKAGE__->register_method({ raise_perm_exc(); } - PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []); - + my $ostemplate = extract_param($param, 'ostemplate'); my $storage = extract_param($param, 'storage') // 'local'; - my $storage_cfg = cfs_read_file("storage.cfg"); + PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []); - my $ostemplate = extract_param($param, 'ostemplate'); + my $storage_cfg = cfs_read_file("storage.cfg"); my $archive; - if ($ostemplate eq '-') { die "pipe requires cli environment\n" if $rpcenv->{type} ne 'cli'; @@ -236,10 +249,11 @@ __PACKAGE__->register_method({ $archive = '-'; die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs}); } else { - $rpcenv->check_volume_access($authuser, $storage_cfg, $vmid, $ostemplate); + PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate); $archive = PVE::Storage::abs_filesystem_path($storage_cfg, $ostemplate); } + my %used_storages; my $check_and_activate_storage = sub { my ($sid) = @_; @@ -251,10 +265,13 @@ __PACKAGE__->register_method({ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); PVE::Storage::activate_storage($storage_cfg, $sid); + $used_storages{$sid} = 1; }; my $conf = {}; + my $is_root = $authuser eq 'root@pam'; + my $no_disk_param = {}; my $mp_param = {}; my $storage_only_mode = 1; @@ -276,7 +293,7 @@ __PACKAGE__->register_method({ } } - die "mountpoints configured, but 'rootfs' not set - aborting\n" + die "mount points configured, but 'rootfs' not set - aborting\n" if !$storage_only_mode && !defined($mp_param->{rootfs}); # check storage access, activate storage @@ -289,7 +306,7 @@ __PACKAGE__->register_method({ if ($mountpoint->{type} ne 'volume') { # bind or device die "Only root can pass arbitrary filesystem paths.\n" - if $authuser ne 'root@pam'; + if !$is_root; } else { my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); &$check_and_activate_storage($sid); @@ -303,25 +320,35 @@ __PACKAGE__->register_method({ $conf->{unprivileged} = 1 if $unprivileged; - my $check_vmid_usage = sub { - if ($force) { - die "can't overwrite running container\n" - if PVE::LXC::check_running($vmid); - } else { - PVE::Cluster::check_vmid_unused($vmid); - } - }; + my $emsg = $restore ? "unable to restore CT $vmid -" : "unable to create CT $vmid -"; + + eval { PVE::LXC::Config->create_and_lock_config($vmid, $force) }; + die "$emsg $@" if $@; my $code = sub { - &$check_vmid_usage(); # final check after locking - - PVE::Cluster::check_cfs_quorum(); - my $vollist = []; + my $old_conf = PVE::LXC::Config->load_config($vmid); + my $vollist = []; eval { + my $orig_mp_param; # only used if $restore + if ($restore) { + die "can't overwrite running container\n" if PVE::LXC::check_running($vmid); + if ($is_root && $archive ne '-') { + my $orig_conf; + ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($archive); + # When we're root call 'restore_configuration' with ristricted=0, + # causing it to restore the raw lxc entries, among which there may be + # 'lxc.idmap' entries. We need to make sure that the extracted contents + # of the container match up with the restored configuration afterwards: + $conf->{lxc} = [grep { $_->[0] eq 'lxc.idmap' } @{$orig_conf->{lxc}}]; + } + } if ($storage_only_mode) { if ($restore) { - (undef, $mp_param) = PVE::LXC::Create::recover_config($archive); + if (!defined($orig_mp_param)) { + (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($archive); + } + $mp_param = $orig_mp_param; die "rootfs configuration could not be recovered, please check and specify manually!\n" if !defined($mp_param->{rootfs}); PVE::LXC::Config->foreach_mountpoint($mp_param, sub { @@ -335,6 +362,19 @@ __PACKAGE__->register_method({ $mountpoint->{volume} = "$storage:$disksize"; $mp_param->{$ms} = PVE::LXC::Config->print_ct_mountpoint($mountpoint, $ms eq 'rootfs'); } else { + my $type = $mountpoint->{type}; + die "restoring rootfs to $type mount is only possible by specifying -rootfs manually!\n" + if ($ms eq 'rootfs'); + die "restoring '$ms' to $type mount is only possible for root\n" + if !$is_root; + + if ($mountpoint->{backup}) { + warn "WARNING - unsupported configuration!\n"; + warn "backup was enabled for $type mount point $ms ('$mountpoint->{mp}')\n"; + warn "mount point configuration will be restored after archive extraction!\n"; + warn "contained files will be restored to wrong directory!\n"; + } + delete $mp_param->{$ms}; # actually delay bind/dev mps $delayed_mp_param->{$ms} = PVE::LXC::Config->print_ct_mountpoint($mountpoint, $ms eq 'rootfs'); } }); @@ -345,10 +385,29 @@ __PACKAGE__->register_method({ $vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf); + # we always have the 'create' lock so check for more than 1 entry + if (scalar(keys %$old_conf) > 1) { + # destroy old container volumes + PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $old_conf, { lock => 'create' }); + } - PVE::LXC::Create::create_rootfs($storage_cfg, $vmid, $conf, - $archive, $password, $restore, - $ignore_unpack_errors, $ssh_keys); + eval { + my $rootdir = PVE::LXC::mount_all($vmid, $storage_cfg, $conf, 1); + $bwlimit = PVE::Storage::get_bandwidth_limit('restore', [keys %used_storages], $bwlimit); + PVE::LXC::Create::restore_archive($archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit); + + if ($restore) { + PVE::LXC::Create::restore_configuration($vmid, $rootdir, $conf, !$is_root); + } else { + my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); # detect OS + PVE::LXC::Config->write_config($vmid, $conf); # safe config (after OS detection) + $lxc_setup->post_create_hook($password, $ssh_keys); + } + }; + my $err = $@; + PVE::LXC::umount_all($vmid, $storage_cfg, $conf, $err ? 1 : 0); + PVE::Storage::deactivate_volumes($storage_cfg, PVE::LXC::Config->get_vm_volumes($conf)); + die $err if $err; # set some defaults $conf->{hostname} ||= "CT$vmid"; $conf->{memory} ||= 512; @@ -360,19 +419,20 @@ __PACKAGE__->register_method({ }; if (my $err = $@) { PVE::LXC::destroy_disks($storage_cfg, $vollist); - PVE::LXC::destroy_config($vmid); - die $err; + eval { PVE::LXC::destroy_config($vmid) }; + warn $@ if $@; + die "$emsg $err"; } PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; + + PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node }) + if $start_after_create; }; + my $workername = $restore ? 'vzrestore' : 'vzcreate'; my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); }; - &$check_vmid_usage(); # first check before locking - - return $rpcenv->fork_worker($restore ? 'vzrestore' : 'vzcreate', - $vmid, $authuser, $realcmd); - + return $rpcenv->fork_worker($workername, $vmid, $authuser, $realcmd); }}); __PACKAGE__->register_method({ @@ -411,6 +471,7 @@ __PACKAGE__->register_method({ { subdir => 'config' }, { subdir => 'status' }, { subdir => 'vncproxy' }, + { subdir => 'termproxy' }, { subdir => 'vncwebsocket' }, { subdir => 'spiceproxy' }, { subdir => 'migrate' }, @@ -553,6 +614,10 @@ __PACKAGE__->register_method({ die "unable to remove CT $vmid - used in HA resources\n" if PVE::HA::Config::vm_is_ha_managed($vmid); + # do not allow destroy if there are replication jobs + my $repl_conf = PVE::ReplicationConfig->new(); + $repl_conf->check_for_existing_jobs($vmid); + my $running_error_msg = "unable to destroy CT $vmid - container is running\n"; die $running_error_msg if PVE::LXC::check_running($vmid); # check early @@ -595,6 +660,20 @@ __PACKAGE__->register_method ({ type => 'boolean', description => "use websocket instead of standard VNC.", }, + width => { + optional => 1, + description => "sets the width of the console in pixels.", + type => 'integer', + minimum => 16, + maximum => 4096, + }, + height => { + optional => 1, + description => "sets the height of the console in pixels.", + type => 'integer', + minimum => 16, + maximum => 2160, + }, }, }, returns => { @@ -637,10 +716,10 @@ __PACKAGE__->register_method ({ # NOTE: vncterm VNC traffic is already TLS encrypted, # so we select the fastest chipher here (or 'none'?) my $remcmd = $remip ? - ['/usr/bin/ssh', '-t', $remip] : []; + ['/usr/bin/ssh', '-e', 'none', '-t', $remip] : []; my $conf = PVE::LXC::Config->load_config($vmid, $node); - my $concmd = PVE::LXC::get_console_command($vmid, $conf); + my $concmd = PVE::LXC::get_console_command($vmid, $conf, -1); my $shcmd = [ '/usr/bin/dtach', '-A', "/var/run/dtach/vzctlconsole$vmid", @@ -657,6 +736,14 @@ __PACKAGE__->register_method ({ '-timeout', $timeout, '-authpath', $authpath, '-perm', 'VM.Console']; + if ($param->{width}) { + push @$cmd, '-width', $param->{width}; + } + + if ($param->{height}) { + push @$cmd, '-height', $param->{height}; + } + if ($param->{websocket}) { $ENV{PVE_VNC_TICKET} = $ticket; # pass ticket to vncterm push @$cmd, '-notls', '-listen', 'localhost'; @@ -664,7 +751,7 @@ __PACKAGE__->register_method ({ push @$cmd, '-c', @$remcmd, @$shcmd; - run_command($cmd); + run_command($cmd, keeplocale => 1); return; }; @@ -682,6 +769,89 @@ __PACKAGE__->register_method ({ }; }}); +__PACKAGE__->register_method ({ + name => 'termproxy', + path => '{vmid}/termproxy', + method => 'POST', + protected => 1, + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]], + }, + description => "Creates a TCP proxy connection.", + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + }, + }, + returns => { + additionalProperties => 0, + properties => { + user => { type => 'string' }, + ticket => { type => 'string' }, + port => { type => 'integer' }, + upid => { type => 'string' }, + }, + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = $param->{vmid}; + my $node = $param->{node}; + + my $authpath = "/vms/$vmid"; + + my $ticket = PVE::AccessControl::assemble_vnc_ticket($authuser, $authpath); + + my ($remip, $family); + + if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) { + ($remip, $family) = PVE::Cluster::remote_node_ip($node); + } else { + $family = PVE::Tools::get_host_address_family($node); + } + + my $port = PVE::Tools::next_vnc_port($family); + + my $remcmd = $remip ? + ['/usr/bin/ssh', '-e', 'none', '-t', $remip, '--'] : []; + + my $conf = PVE::LXC::Config->load_config($vmid, $node); + my $concmd = PVE::LXC::get_console_command($vmid, $conf, -1); + + my $shcmd = [ '/usr/bin/dtach', '-A', + "/var/run/dtach/vzctlconsole$vmid", + '-r', 'winch', '-z', @$concmd]; + + my $realcmd = sub { + my $upid = shift; + + syslog ('info', "starting lxc termproxy $upid\n"); + + my $cmd = ['/usr/bin/termproxy', $port, '--path', $authpath, + '--perm', 'VM.Console', '--']; + push @$cmd, @$remcmd, @$shcmd; + + PVE::Tools::run_command($cmd); + }; + + my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1); + + PVE::Tools::wait_for_vnc_port($port); + + return { + user => $authuser, + ticket => $ticket, + port => $port, + upid => $upid, + }; + }}); + __PACKAGE__->register_method({ name => 'vncwebsocket', path => '{vmid}/vncwebsocket', @@ -800,11 +970,21 @@ __PACKAGE__->register_method({ description => "Use online/live migration.", optional => 1, }, + restart => { + type => 'boolean', + description => "Use restart migration", + optional => 1, + }, + timeout => { + type => 'integer', + description => "Timeout in seconds for shutdown for restart migration", + optional => 1, + default => 180, + }, force => { type => 'boolean', description => "Force migration despite local bind / device" . - " mounts. WARNING: identical bind / device mounts need to ". - " be available on the target node.", + " mounts. NOTE: deprecated, use 'shared' property of mount point instead.", optional => 1, }, }, @@ -838,8 +1018,8 @@ __PACKAGE__->register_method({ # try to detect errors early if (PVE::LXC::check_running($vmid)) { - die "can't migrate running container without --online\n" - if !$param->{online}; + die "can't migrate running container without --online or --restart\n" + if !$param->{online} && !$param->{restart}; } if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { @@ -851,7 +1031,7 @@ __PACKAGE__->register_method({ my $cmd = ['ha-manager', 'migrate', $service, $target]; - print "Executing HA migrate for CT $vmid to node $target\n"; + print "Requesting HA migration for CT $vmid to node $target\n"; PVE::Tools::run_command($cmd); @@ -863,14 +1043,14 @@ __PACKAGE__->register_method({ } else { my $realcmd = sub { - my $upid = shift; - PVE::LXC::Migrate->migrate($target, $targetip, $vmid, $param); + }; - return; + my $worker = sub { + return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd); }; - return $rpcenv->fork_worker('vzmigrate', $vmid, $authuser, $realcmd); + return $rpcenv->fork_worker('vzmigrate', $vmid, $authuser, $worker); } }}); @@ -892,7 +1072,7 @@ __PACKAGE__->register_method({ feature => { description => "Feature to check.", type => 'string', - enum => [ 'snapshot' ], + enum => [ 'snapshot', 'clone', 'copy' ], }, snapname => get_standard_option('pve-lxc-snapshot-name', { optional => 1, @@ -954,12 +1134,6 @@ __PACKAGE__->register_method({ properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }), - experimental => { - type => 'boolean', - description => "The template feature is experimental, set this " . - "flag if you know what you are doing.", - default => 0, - }, }, }, returns => { type => 'null'}, @@ -988,15 +1162,24 @@ __PACKAGE__->register_method({ die "you can't convert a CT to template if the CT is running\n" if PVE::LXC::check_running($vmid); + my $scfg = PVE::Storage::config(); + PVE::LXC::Config->foreach_mountpoint($conf, sub { + my ($ms, $mp) = @_; + + my ($sid) =PVE::Storage::parse_volume_id($mp->{volume}, 0); + die "Directory storage '$sid' does not support container templates!\n" + if $scfg->{ids}->{$sid}->{path}; + }); + my $realcmd = sub { PVE::LXC::template_create($vmid, $conf); - }; - $conf->{template} = 1; + $conf->{template} = 1; - PVE::LXC::Config->write_config($vmid, $conf); - # and remove lxc config - PVE::LXC::update_lxc_config(undef, $vmid, $conf); + PVE::LXC::Config->write_config($vmid, $conf); + # and remove lxc config + PVE::LXC::update_lxc_config($vmid, $conf); + }; return $rpcenv->fork_worker('vztemplate', $vmid, $authuser, $realcmd); }; @@ -1055,26 +1238,18 @@ __PACKAGE__->register_method({ }), storage => get_standard_option('pve-storage-id', { description => "Target storage for full clone.", - requires => 'full', optional => 1, }), full => { optional => 1, type => 'boolean', - description => "Create a full copy of all disk. This is always done when " . + description => "Create a full copy of all disks. This is always done when " . "you clone a normal CT. For CT templates, we try to create a linked clone by default.", - default => 0, }, - experimental => { - type => 'boolean', - description => "The clone feature is experimental, set this " . - "flag if you know what you are doing.", - default => 0, - }, -# target => get_standard_option('pve-node', { -# description => "Target node. Only allowed if the original VM is on shared storage.", -# optional => 1, -# }), + target => get_standard_option('pve-node', { + description => "Target node. Only allowed if the original VM is on shared storage.", + optional => 1, + }), }, }, returns => { @@ -1103,150 +1278,220 @@ __PACKAGE__->register_method({ my $storage = extract_param($param, 'storage'); + my $target = extract_param($param, 'target'); + my $localnode = PVE::INotify::nodename(); + undef $target if $target && ($target eq $localnode || $target eq 'localhost'); + + PVE::Cluster::check_node_exists($target) if $target; + my $storecfg = PVE::Storage::config(); if ($storage) { # check if storage is enabled on local node PVE::Storage::storage_check_enabled($storecfg, $storage); + if ($target) { + # check if storage is available on target node + PVE::Storage::storage_check_node($storecfg, $storage, $target); + # clone only works if target storage is shared + my $scfg = PVE::Storage::storage_config($storecfg, $storage); + die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared}; + } } PVE::Cluster::check_cfs_quorum(); - my $running = PVE::LXC::check_running($vmid) || 0; + my $conffile; + my $newconf = {}; + my $mountpoints = {}; + my $fullclone = {}; + my $vollist = []; + my $running; - my $clonefn = sub { + PVE::LXC::Config->lock_config($vmid, sub { + my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk'); - # do all tests after lock - # we also try to do all tests before we fork the worker - my $conf = PVE::LXC::Config->load_config($vmid); + $running = PVE::LXC::check_running($vmid) || 0; - PVE::LXC::Config->check_lock($conf); + my $full = extract_param($param, 'full'); + if (!defined($full)) { + $full = !PVE::LXC::Config->is_template($src_conf); + } + die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full; - my $verify_running = PVE::LXC::check_running($vmid) || 0; + eval { + die "snapshot '$snapname' does not exist\n" + if $snapname && !defined($src_conf->{snapshots}->{$snapname}); - die "unexpected state change\n" if $verify_running != $running; - die "snapshot '$snapname' does not exist\n" - if $snapname && !defined( $conf->{snapshots}->{$snapname}); + my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf; - my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf; + $conffile = PVE::LXC::Config->config_file($newid); + die "unable to create CT $newid: config file already exists\n" + if -f $conffile; - my $conffile = PVE::LXC::Config->config_file($newid); - die "unable to create CT $newid: config file already exists\n" - if -f $conffile; + my $sharedvm = 1; + foreach my $opt (keys %$src_conf) { + next if $opt =~ m/^unused\d+$/; - my $newconf = { lock => 'clone' }; - my $mountpoints = {}; - my $fullclone = {}; - my $vollist = []; + my $value = $src_conf->{$opt}; - foreach my $opt (keys %$oldconf) { - my $value = $oldconf->{$opt}; + if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { + my $mp = $opt eq 'rootfs' ? + PVE::LXC::Config->parse_ct_rootfs($value) : + PVE::LXC::Config->parse_ct_mountpoint($value); - # no need to copy unused images, because VMID(owner) changes anyways - next if $opt =~ m/^unused\d+$/; + if ($mp->{type} eq 'volume') { + my $volid = $mp->{volume}; - if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { - my $mp = $opt eq 'rootfs' ? - PVE::LXC::Config->parse_ct_rootfs($value) : - PVE::LXC::Config->parse_ct_mountpoint($value); + my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); + $sid = $storage if defined($storage); + my $scfg = PVE::Storage::storage_config($storecfg, $sid); + if (!$scfg->{shared}) { + $sharedvm = 0; + warn "found non-shared volume: $volid\n" if $target; + } - if ($mp->{type} eq 'volume') { - my $volid = $mp->{volume}; - if ($param->{full}) { - die "fixme: full clone not implemented"; + $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); - die "Full clone feature for '$volid' is not available\n" - if !PVE::Storage::volume_has_feature($storecfg, 'copy', $volid, $snapname, $running); - $fullclone->{$opt} = 1; - } else { - # not full means clone instead of copy - die "Linked clone feature for '$volid' is not available\n" - if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running); - } + if ($full) { + die "Cannot do full clones on a running container without snapshots\n" + if $running && !defined($snapname); + $fullclone->{$opt} = 1; + } else { + # not full means clone instead of copy + die "Linked clone feature for '$volid' is not available\n" + if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running); + } - $mountpoints->{$opt} = $mp; - push @$vollist, $volid; + $mountpoints->{$opt} = $mp; + push @$vollist, $volid; + } else { + # TODO: allow bind mounts? + die "unable to clone mountpint '$opt' (type $mp->{type})\n"; + } + } elsif ($opt =~ m/^net(\d+)$/) { + # always change MAC! address + my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg'); + my $net = PVE::LXC::Config->parse_lxc_network($value); + $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix}); + $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net); } else { - # TODO: allow bind mounts? - die "unable to clone mountpint '$opt' (type $mp->{type})\n"; + # copy everything else + $newconf->{$opt} = $value; } - - } else { - # copy everything else - $newconf->{$opt} = $value; } - } + die "can't clone CT to node '$target' (CT uses local storage)\n" + if $target && !$sharedvm; - delete $newconf->{template}; - if ($param->{hostname}) { - $newconf->{hostname} = $param->{hostname}; - } + # Replace the 'disk' lock with a 'create' lock. + $newconf->{lock} = 'create'; - if ($param->{description}) { - $newconf->{description} = $param->{description}; - } - - # create empty/temp config - this fails if CT already exists on other node - PVE::Tools::file_set_contents($conffile, "# ctclone temporary file\nlock: clone\n"); + delete $newconf->{template}; + if ($param->{hostname}) { + $newconf->{hostname} = $param->{hostname}; + } - my $realcmd = sub { - my $upid = shift; + if ($param->{description}) { + $newconf->{description} = $param->{description}; + } - my $newvollist = []; + # create empty/temp config - this fails if CT already exists on other node + PVE::LXC::Config->write_config($newid, $newconf); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn $@ if $@; + die $err; + } + }); - eval { - local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; }; + my $update_conf = sub { + my ($key, $value) = @_; + return PVE::LXC::Config->lock_config($newid, sub { + my $conf = PVE::LXC::Config->load_config($newid); + die "Lost 'create' config lock, aborting.\n" + if !PVE::LXC::Config->has_lock($conf, 'create'); + $conf->{$key} = $value; + PVE::LXC::Config->write_config($newid, $conf); + }); + }; - PVE::Storage::activate_volumes($storecfg, $vollist, $snapname); + my $realcmd = sub { + my ($upid) = @_; - foreach my $opt (keys %$mountpoints) { - my $mp = $mountpoints->{$opt}; - my $volid = $mp->{volume}; + my $newvollist = []; - if ($fullclone->{$opt}) { - die "fixme: full clone not implemented\n"; - } else { - print "create linked clone of mountpoint $opt ($volid)\n"; - my $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname); - push @$newvollist, $newvolid; - $mp->{volume} = $newvolid; + my $verify_running = PVE::LXC::check_running($vmid) || 0; + die "unexpected state change\n" if $verify_running != $running; - $newconf->{$opt} = PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs'); - PVE::LXC::Config->write_config($newid, $newconf); - } + eval { + local $SIG{INT} = + local $SIG{TERM} = + local $SIG{QUIT} = + local $SIG{HUP} = sub { die "interrupted by signal\n"; }; + + PVE::Storage::activate_volumes($storecfg, $vollist, $snapname); + + foreach my $opt (keys %$mountpoints) { + my $mp = $mountpoints->{$opt}; + my $volid = $mp->{volume}; + + my $newvolid; + if ($fullclone->{$opt}) { + print "create full clone of mountpoint $opt ($volid)\n"; + my $target_storage = $storage // PVE::Storage::parse_volume_id($volid); + $newvolid = PVE::LXC::copy_volume($mp, $newid, $target_storage, $storecfg, $newconf, $snapname); + } else { + print "create linked clone of mount point $opt ($volid)\n"; + $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname); } - delete $newconf->{lock}; - PVE::LXC::Config->write_config($newid, $newconf); + push @$newvollist, $newvolid; + $mp->{volume} = $newvolid; - PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; - }; - if (my $err = $@) { - unlink $conffile; + $update_conf->($opt, PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs')); + } - sleep 1; # some storage like rbd need to wait before release volume - really? + PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; + PVE::LXC::Config->remove_lock($newid, 'create'); - foreach my $volid (@$newvollist) { - eval { PVE::Storage::vdisk_free($storecfg, $volid); }; - warn $@ if $@; - } - die "clone failed: $err"; - } + if ($target) { + # always deactivate volumes - avoid lvm LVs to be active on several nodes + PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running; + PVE::Storage::deactivate_volumes($storecfg, $newvollist); - return; + my $newconffile = PVE::LXC::Config->config_file($newid, $target); + die "Failed to move config to node '$target' - rename failed: $!\n" + if !rename($conffile, $newconffile); + } }; + my $err = $@; - PVE::Firewall::clone_vmfw_conf($vmid, $newid); + # Unlock the source config in any case: + eval { PVE::LXC::Config->remove_lock($vmid, 'disk') }; + warn $@ if $@; - return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); + if ($err) { + # Now cleanup the config & disks: + unlink $conffile; + + sleep 1; # some storages like rbd need to wait before release volume - really? + + foreach my $volid (@$newvollist) { + eval { PVE::Storage::vdisk_free($storecfg, $volid); }; + warn $@ if $@; + } + die "clone failed: $err"; + } + return; }; - return PVE::LXC::Config->lock_config($vmid, $clonefn); + PVE::Firewall::clone_vmfw_conf($vmid, $newid); + return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); }}); @@ -1256,7 +1501,7 @@ __PACKAGE__->register_method({ method => 'PUT', protected => 1, proxyto => 'node', - description => "Resize a container mountpoint.", + description => "Resize a container mount point.", permissions => { check => ['perm', '/vms/{vmid}', ['VM.Config.Disk'], any => 1], }, @@ -1329,7 +1574,7 @@ __PACKAGE__->register_method({ my (undef, undef, $owner, undef, undef, undef, $format) = PVE::Storage::parse_volname($storage_cfg, $volid); - die "can't resize mountpoint owned by another container ($owner)" + die "can't resize mount point owned by another container ($owner)" if $vmid != $owner; die "can't resize volume: $disk if snapshot exists\n" @@ -1361,13 +1606,13 @@ __PACKAGE__->register_method({ PVE::LXC::Config->write_config($vmid, $conf); if ($format eq 'raw') { - my $path = PVE::Storage::path($storage_cfg, $volid, undef); + my $path = PVE::Storage::map_volume($storage_cfg, $volid) // PVE::Storage::path($storage_cfg, $volid); if ($running) { $mp->{mp} = '/'; my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1]; $path = PVE::LXC::query_loopdev($path) if $use_loopdev; - die "internal error: CT running but mountpoint not attached to a loop device" + die "internal error: CT running but mount point not attached to a loop device" if !$path; PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev; @@ -1389,6 +1634,8 @@ __PACKAGE__->register_method({ PVE::Tools::run_command(['resize2fs', $path]); }; warn "Failed to update the container's filesystem: $@\n" if $@; + + PVE::Storage::unmap_volume($storage_cfg, $volid); } } }; @@ -1399,4 +1646,156 @@ __PACKAGE__->register_method({ return PVE::LXC::Config->lock_config($vmid, $code);; }}); +__PACKAGE__->register_method({ + name => 'move_volume', + path => '{vmid}/move_volume', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Move a rootfs-/mp-volume to a different storage", + permissions => { + description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " . + "and 'Datastore.AllocateSpace' permissions on the storage.", + check => + [ 'and', + ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], + ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]], + ], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), + volume => { + type => 'string', + enum => [ PVE::LXC::Config->mountpoint_names() ], + description => "Volume which will be moved.", + }, + storage => get_standard_option('pve-storage-id', { + description => "Target Storage.", + completion => \&PVE::Storage::complete_storage_enabled, + }), + delete => { + type => 'boolean', + description => "Delete the original volume after successful copy. By default the original is kept as an unused volume entry.", + optional => 1, + default => 0, + }, + digest => { + type => 'string', + description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + maxLength => 40, + optional => 1, + } + }, + }, + returns => { + type => 'string', + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = extract_param($param, 'vmid'); + + my $storage = extract_param($param, 'storage'); + + my $mpkey = extract_param($param, 'volume'); + + my $lockname = 'disk'; + + my ($mpdata, $old_volid); + + PVE::LXC::Config->lock_config($vmid, sub { + my $conf = PVE::LXC::Config->load_config($vmid); + PVE::LXC::Config->check_lock($conf); + + die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid); + + if ($mpkey eq 'rootfs') { + $mpdata = PVE::LXC::Config->parse_ct_rootfs($conf->{$mpkey}); + } elsif ($mpkey =~ m/mp\d+/) { + $mpdata = PVE::LXC::Config->parse_ct_mountpoint($conf->{$mpkey}); + } else { + die "Can't parse $mpkey\n"; + } + $old_volid = $mpdata->{volume}; + + die "you can't move a volume with snapshots and delete the source\n" + if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid); + + PVE::Tools::assert_if_modified($param->{digest}, $conf->{digest}); + + PVE::LXC::Config->set_lock($vmid, $lockname); + }); + + my $realcmd = sub { + eval { + PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: move --volume $mpkey --storage $storage"); + + my $conf = PVE::LXC::Config->load_config($vmid); + my $storage_cfg = PVE::Storage::config(); + + my $new_volid; + + eval { + PVE::Storage::activate_volumes($storage_cfg, [ $old_volid ]); + $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf); + $mpdata->{volume} = $new_volid; + + PVE::LXC::Config->lock_config($vmid, sub { + my $digest = $conf->{digest}; + $conf = PVE::LXC::Config->load_config($vmid); + PVE::Tools::assert_if_modified($digest, $conf->{digest}); + + $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint($mpdata, $mpkey eq 'rootfs'); + + PVE::LXC::Config->add_unused_volume($conf, $old_volid) if !$param->{delete}; + + PVE::LXC::Config->write_config($vmid, $conf); + }); + + eval { + # try to deactivate volumes - avoid lvm LVs to be active on several nodes + PVE::Storage::deactivate_volumes($storage_cfg, [ $new_volid ]) + }; + warn $@ if $@; + }; + if (my $err = $@) { + eval { + PVE::Storage::vdisk_free($storage_cfg, $new_volid) + if defined($new_volid); + }; + warn $@ if $@; + die $err; + } + + if ($param->{delete}) { + eval { + PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]); + PVE::Storage::vdisk_free($storage_cfg, $old_volid); + }; + warn $@ if $@; + } + }; + my $err = $@; + eval { PVE::LXC::Config->remove_lock($vmid, $lockname) }; + warn $@ if $@; + die $err if $err; + }; + my $task = eval { + $rpcenv->fork_worker('move_volume', $vmid, $authuser, $realcmd); + }; + if (my $err = $@) { + eval { PVE::LXC::Config->remove_lock($vmid, $lockname) }; + warn $@ if $@; + die $err; + } + return $task; + }}); + 1;