use strict;
use warnings;
+use IO::Socket::UNIX;
+use Socket qw(SOCK_STREAM);
+
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
}
}
+my $check_storage_access_migrate = sub {
+ my ($rpcenv, $authuser, $storecfg, $storage, $node) = @_;
+
+ PVE::Storage::storage_check_enabled($storecfg, $storage, $node);
+
+ $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']);
+
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "storage '$storage' does not support CT rootdirs\n"
+ if !$scfg->{content}->{rootdir};
+};
+
__PACKAGE__->register_method ({
subclass => "PVE::API2::LXC::Config",
path => '{vmid}/config',
my $restore = extract_param($param, 'restore');
my $unique = extract_param($param, 'unique');
+ $param->{cpuunits} = PVE::CGroup::clamp_cpu_shares($param->{cpuunits})
+ if defined($param->{cpuunits}); # clamp value depending on cgroup version
+
# used to skip firewall config restore if user lacks permission
my $skip_fw_config_restore = 0;
PVE::Tools::validate_ssh_public_keys($ssh_keys) if defined($ssh_keys);
my $pool = extract_param($param, 'pool');
- if (defined($pool)) {
- $rpcenv->check_pool_exist($pool);
- $rpcenv->check_perm_modify($authuser, "/pool/$pool");
- }
+ $rpcenv->check_pool_exist($pool) if defined($pool);
if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
# OK
$archive = '-';
die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs});
} else {
- PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate);
+ my $content_type = $restore ? 'backup' : 'vztmpl';
+ PVE::Storage::check_volume_access(
+ $rpcenv,
+ $authuser,
+ $storage_cfg,
+ $vmid,
+ $ostemplate,
+ $content_type,
+ );
$archive = $ostemplate;
}
eval { PVE::LXC::Config->create_and_lock_config($vmid, $force) };
die "$emsg $@" if $@;
- my $remove_lock = 1;
+ my $destroy_config_on_error = !$same_container_exists;
my $code = sub {
my $old_conf = PVE::LXC::Config->load_config($vmid);
my $was_template;
+ my $vollist = [];
eval {
my $orig_mp_param; # only used if $restore
if ($restore) {
print "recovering backed-up configuration from '$archive'\n";
($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid);
+ for my $opt (keys %$orig_conf) {
+ # early check before disks are created
+ # the "real" check is in later on when actually merging the configs
+ if ($opt =~ /^net\d+$/ && !defined($param->{$opt})) {
+ PVE::LXC::check_bridge_access($rpcenv, $authuser, $orig_conf->{$opt});
+ }
+ }
+
$was_template = delete $orig_conf->{template};
# When we're root call 'restore_configuration' with restricted=0,
$mp_param->{rootfs} = "$storage:4"; # defaults to 4GB
}
}
- };
- die "$emsg $@" if $@;
- # up until here we did not modify the container, besides the lock
- $remove_lock = 0;
+ # up until here we did not modify the container, besides the lock
+ $destroy_config_on_error = 1;
- my $vollist = [];
- eval {
$vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf);
# we always have the 'create' lock so check for more than 1 entry
if ($restore) {
print "merging backed-up and given configuration..\n";
PVE::LXC::Create::restore_configuration($vmid, $storage_cfg, $archive, $rootdir, $conf, !$is_root, $unique, $skip_fw_config_restore);
+ PVE::LXC::create_ifaces_ipams_ips($conf, $vmid) if $unique;
my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
$lxc_setup->template_fixup($conf);
} else {
PVE::LXC::Config->write_config($vmid, $conf);
};
if (my $err = $@) {
- PVE::LXC::destroy_disks($storage_cfg, $vollist);
- eval { PVE::LXC::Config->destroy_config($vmid) };
+ eval { PVE::LXC::delete_ifaces_ipams_ips($conf, $vmid) };
warn $@ if $@;
+ PVE::LXC::destroy_disks($storage_cfg, $vollist);
+ if ($destroy_config_on_error) {
+ eval { PVE::LXC::Config->destroy_config($vmid) };
+ warn $@ if $@;
+
+ if (!$skip_fw_config_restore) { # Only if user has permission to change the fw
+ PVE::Firewall::remove_vmfw_conf($vmid);
+ warn $@ if $@;
+ }
+ }
die "$emsg $err";
}
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
-
- PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node })
- if $start_after_create;
};
my $workername = $restore ? 'vzrestore' : 'vzcreate';
};
if (my $err = $@) {
# if we aborted before changing the container, we must remove the create lock
- if ($remove_lock) {
+ if (!$destroy_config_on_error) {
PVE::LXC::Config->remove_lock($vmid, 'create');
}
die $err;
+ } elsif ($start_after_create) {
+ PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node });
}
};
}});
+__PACKAGE__->register_method({
+ name => 'remote_migrate_vm',
+ path => '{vmid}/remote_migrate',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Migrate the container to another cluster. Creates a new migration task. EXPERIMENTAL feature!",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }),
+ 'target-endpoint' => get_standard_option('proxmox-remote', {
+ description => "Remote target endpoint",
+ }),
+ online => {
+ type => 'boolean',
+ description => "Use online/live migration.",
+ optional => 1,
+ },
+ restart => {
+ type => 'boolean',
+ description => "Use restart migration",
+ optional => 1,
+ },
+ timeout => {
+ type => 'integer',
+ description => "Timeout in seconds for shutdown for restart migration",
+ optional => 1,
+ default => 180,
+ },
+ delete => {
+ type => 'boolean',
+ description => "Delete the original CT and related data after successful migration. By default the original CT is kept on the source cluster in a stopped state.",
+ optional => 1,
+ default => 0,
+ },
+ 'target-storage' => get_standard_option('pve-targetstorage', {
+ optional => 0,
+ }),
+ 'target-bridge' => {
+ type => 'string',
+ description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.",
+ format => 'bridge-pair-list',
+ },
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'number',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $source_vmid = extract_param($param, 'vmid');
+ my $target_endpoint = extract_param($param, 'target-endpoint');
+ my $target_vmid = extract_param($param, 'target-vmid') // $source_vmid;
+
+ my $delete = extract_param($param, 'delete') // 0;
+
+ PVE::Cluster::check_cfs_quorum();
+
+ # test if CT exists
+ my $conf = PVE::LXC::Config->load_config($source_vmid);
+ PVE::LXC::Config->check_lock($conf);
+
+ # try to detect errors early
+ if (PVE::LXC::check_running($source_vmid)) {
+ die "can't migrate running container without --online or --restart\n"
+ if !$param->{online} && !$param->{restart};
+ }
+
+ raise_param_exc({ vmid => "cannot migrate HA-managed CT to remote cluster" })
+ if PVE::HA::Config::vm_is_ha_managed($source_vmid);
+
+ my $remote = PVE::JSONSchema::parse_property_string('proxmox-remote', $target_endpoint);
+
+ # TODO: move this as helper somewhere appropriate?
+ my $conn_args = {
+ protocol => 'https',
+ host => $remote->{host},
+ port => $remote->{port} // 8006,
+ apitoken => $remote->{apitoken},
+ };
+
+ my $fp;
+ if ($fp = $remote->{fingerprint}) {
+ $conn_args->{cached_fingerprints} = { uc($fp) => 1 };
+ }
+
+ print "Establishing API connection with remote at '$remote->{host}'\n";
+
+ my $api_client = PVE::APIClient::LWP->new(%$conn_args);
+
+ if (!defined($fp)) {
+ my $cert_info = $api_client->get("/nodes/localhost/certificates/info");
+ foreach my $cert (@$cert_info) {
+ my $filename = $cert->{filename};
+ next if $filename ne 'pveproxy-ssl.pem' && $filename ne 'pve-ssl.pem';
+ $fp = $cert->{fingerprint} if !$fp || $filename eq 'pveproxy-ssl.pem';
+ }
+ $conn_args->{cached_fingerprints} = { uc($fp) => 1 }
+ if defined($fp);
+ }
+
+ my $storecfg = PVE::Storage::config();
+ my $target_storage = extract_param($param, 'target-storage');
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') };
+ raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" })
+ if $@;
+
+ my $target_bridge = extract_param($param, 'target-bridge');
+ my $bridgemap = eval { PVE::JSONSchema::parse_idmap($target_bridge, 'pve-bridge-id') };
+ raise_param_exc({ 'target-bridge' => "failed to parse bridge map: $@" })
+ if $@;
+
+ die "remote migration requires explicit storage mapping!\n"
+ if $storagemap->{identity};
+
+ $param->{storagemap} = $storagemap;
+ $param->{bridgemap} = $bridgemap;
+ $param->{remote} = {
+ conn => $conn_args, # re-use fingerprint for tunnel
+ client => $api_client,
+ vmid => $target_vmid,
+ };
+ $param->{migration_type} = 'websocket';
+ $param->{delete} = $delete if $delete;
+
+ my $cluster_status = $api_client->get("/cluster/status");
+ my $target_node;
+ foreach my $entry (@$cluster_status) {
+ next if $entry->{type} ne 'node';
+ if ($entry->{local}) {
+ $target_node = $entry->{name};
+ last;
+ }
+ }
+
+ die "couldn't determine endpoint's node name\n"
+ if !defined($target_node);
+
+ my $realcmd = sub {
+ PVE::LXC::Migrate->migrate($target_node, $remote->{host}, $source_vmid, $param);
+ };
+
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($source_vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('vzmigrate', $source_vmid, $authuser, $worker);
+ }});
+
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
description => "Target node.",
completion => \&PVE::Cluster::complete_migration_target,
}),
+ 'target-storage' => get_standard_option('pve-targetstorage'),
online => {
type => 'boolean',
description => "Use online/live migration.",
if !$param->{online} && !$param->{restart};
}
+ if (my $targetstorage = delete $param->{'target-storage'}) {
+ my $storecfg = PVE::Storage::config();
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
+ raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" })
+ if $@;
+
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk'])
+ if !defined($storagemap->{identity});
+
+ foreach my $target_sid (values %{$storagemap->{entries}}) {
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $target_sid, $target);
+ }
+
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storagemap->{default}, $target)
+ if $storagemap->{default};
+
+ $param->{storagemap} = $storagemap;
+ }
+
if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
description => "You need 'VM.Clone' permissions on /vms/{vmid}, " .
"and 'VM.Allocate' permissions " .
"on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
- "'Datastore.AllocateSpace' on any used storage.",
+ "'Datastore.AllocateSpace' on any used storage, and 'SDN.Use' on any bridge.",
check =>
[ 'and',
['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
my $vollist = [];
my $running;
- PVE::LXC::Config->create_and_lock_config($newid, 0);
- PVE::Firewall::clone_vmfw_conf($vmid, $newid);
-
my $lock_and_reload = sub {
my ($vmid, $code) = @_;
return PVE::LXC::Config->lock_config($vmid, sub {
my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk');
- $running = PVE::LXC::check_running($vmid) || 0;
+ eval {
+ PVE::LXC::Config->create_and_lock_config($newid, 0);
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
+ warn "Failed to remove source CT config lock - $@\n" if $@;
- my $full = extract_param($param, 'full');
- if (!defined($full)) {
- $full = !PVE::LXC::Config->is_template($src_conf);
+ die $err;
}
eval {
+ $running = PVE::LXC::check_running($vmid) || 0;
+
+ my $full = extract_param($param, 'full');
+ if (!defined($full)) {
+ $full = !PVE::LXC::Config->is_template($src_conf);
+ }
+
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
+
die "parameter 'storage' not allowed for linked clones\n"
if defined($storage) && !$full;
my $net = PVE::LXC::Config->parse_lxc_network($value);
$net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
$newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net);
+
+ PVE::LXC::check_bridge_access($rpcenv, $authuser, $newconf->{$opt});
} else {
# copy everything else
$newconf->{$opt} = $value;
$lock_and_reload->($newid, sub {
my $conf = shift;
my $rootdir = PVE::LXC::mount_all($newid, $storecfg, $conf, 1);
+
eval {
+ PVE::LXC::create_ifaces_ipams_ips($conf, $vmid);
my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
$lxc_setup->post_clone_hook($conf);
};
warn $@ if $@;
if ($err) {
- # Now cleanup the config & disks:
+ # Now cleanup the config & disks & ipam:
sleep 1; # some storages like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
eval {
$lock_and_reload->($newid, sub {
+ my $conf = shift;
+ PVE::LXC::delete_ifaces_ipams_ips($conf, $newid);
PVE::LXC::Config->destroy_config($newid);
PVE::Firewall::remove_vmfw_conf($newid);
});
my $sizestr = extract_param($param, 'size');
my $ext = ($sizestr =~ s/^\+//);
- my $newsize = PVE::JSONSchema::parse_size($sizestr);
- die "invalid size string" if !defined($newsize);
+ my $request_size = PVE::JSONSchema::parse_size($sizestr);
+ die "invalid size string" if !defined($request_size);
die "no options specified\n" if !scalar(keys %$param);
my $storage_cfg = cfs_read_file("storage.cfg");
- my $code = sub {
-
+ my $load_and_check = sub {
my $conf = PVE::LXC::Config->load_config($vmid);
PVE::LXC::Config->check_lock($conf);
PVE::Tools::assert_if_modified($digest, $conf->{digest});
- my $running = PVE::LXC::check_running($vmid);
-
my $disk = $param->{disk};
my $mp = PVE::LXC::Config->parse_volume($disk, $conf->{$disk});
die "can't resize mount point owned by another container ($owner)"
if $vmid != $owner;
- die "can't resize volume: $disk if snapshot exists\n"
- if %{$conf->{snapshots}} && $format eq 'qcow2';
-
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
die "Could not determine current size of volume '$volid'\n" if !defined($size);
- $newsize += $size if $ext;
+ my $newsize = $ext ? $size + $request_size : $request_size;
$newsize = int($newsize);
die "unable to shrink disk size\n" if $newsize < $size;
die "disk is already at specified size\n" if $size == $newsize;
+ return ($conf, $disk, $mp, $volid, $format, $newsize);
+ };
+
+ my $code = sub {
+ my ($conf, $disk, $mp, $volid, $format, $newsize) = $load_and_check->();
+
+ my $running = PVE::LXC::check_running($vmid);
+
PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr");
- my $realcmd = sub {
- # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so
- # we pass 0 here (parameter only makes sense for qemu)
- PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0);
- $mp->{size} = $newsize;
- $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs');
+ # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so
+ # we pass 0 here (parameter only makes sense for qemu)
+ PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0);
- PVE::LXC::Config->write_config($vmid, $conf);
+ $mp->{size} = $newsize;
+ $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs');
- if ($format eq 'raw') {
- # we need to ensure that the volume is mapped, if not needed this is a NOP
- my $path = PVE::Storage::map_volume($storage_cfg, $volid);
- $path = PVE::Storage::path($storage_cfg, $volid) if !defined($path);
- if ($running) {
-
- $mp->{mp} = '/';
- my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1];
- $path = PVE::LXC::query_loopdev($path) if $use_loopdev;
- die "internal error: CT running but mount point not attached to a loop device"
- if !$path;
- PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev;
-
- # In order for resize2fs to know that we need online-resizing a mountpoint needs
- # to be visible to it in its namespace.
- # To not interfere with the rest of the system we unshare the current mount namespace,
- # mount over /tmp and then run resize2fs.
-
- # interestingly we don't need to e2fsck on mounted systems...
- my $quoted = PVE::Tools::shellquote($path);
- my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
- eval {
- PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
- };
- warn "Failed to update the container's filesystem: $@\n" if $@;
- } else {
- eval {
- PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
- PVE::Tools::run_command(['resize2fs', $path]);
- };
- warn "Failed to update the container's filesystem: $@\n" if $@;
+ PVE::LXC::Config->write_config($vmid, $conf);
- # always un-map if not running, this is a NOP if not needed
- PVE::Storage::unmap_volume($storage_cfg, $volid);
- }
+ if ($format eq 'raw') {
+ # we need to ensure that the volume is mapped, if not needed this is a NOP
+ my $path = PVE::Storage::map_volume($storage_cfg, $volid);
+ $path = PVE::Storage::path($storage_cfg, $volid) if !defined($path);
+ if ($running) {
+
+ $mp->{mp} = '/';
+ my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1];
+ $path = PVE::LXC::query_loopdev($path) if $use_loopdev;
+ die "internal error: CT running but mount point not attached to a loop device"
+ if !$path;
+ PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev;
+
+ # In order for resize2fs to know that we need online-resizing a mountpoint needs
+ # to be visible to it in its namespace.
+ # To not interfere with the rest of the system we unshare the current mount namespace,
+ # mount over /tmp and then run resize2fs.
+
+ # interestingly we don't need to e2fsck on mounted systems...
+ my $quoted = PVE::Tools::shellquote($path);
+ my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
+ eval {
+ PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
+ } else {
+ eval {
+ PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
+ PVE::Tools::run_command(['resize2fs', $path]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
+
+ # always un-map if not running, this is a NOP if not needed
+ PVE::Storage::unmap_volume($storage_cfg, $volid);
}
- };
+ }
+ };
- return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ PVE::LXC::Config->lock_config($vmid, $code);;
};
- return PVE::LXC::Config->lock_config($vmid, $code);;
+ $load_and_check->(); # early checks before forking+locking
+
+ return $rpcenv->fork_worker('resize', $vmid, $authuser, $worker);
}});
__PACKAGE__->register_method({
"and 'Datastore.AllocateSpace' permissions on the storage. To move ".
"a volume to another container, you need the permissions on the ".
"target container as well.",
- check =>
- [ 'and',
- ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
- ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
- ],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
},
parameters => {
additionalProperties => 0,
die $err;
}
+ my $deactivated = 0;
+ eval {
+ PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]);
+ $deactivated = 1;
+ };
+ warn $@ if $@;
+
if ($param->{delete}) {
- eval {
- PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]);
- PVE::Storage::vdisk_free($storage_cfg, $old_volid);
- };
- if (my $err = $@) {
- warn $err;
+ my $removed = 0;
+ if ($deactivated) {
+ eval {
+ PVE::Storage::vdisk_free($storage_cfg, $old_volid);
+ $removed = 1;
+ };
+ warn $@ if $@;
+ }
+ if (!$removed) {
PVE::LXC::Config->lock_config($vmid, sub {
my $conf = PVE::LXC::Config->load_config($vmid);
PVE::LXC::Config->add_unused_volume($conf, $old_volid);
my $source_conf = PVE::LXC::Config->load_config($vmid);
PVE::LXC::Config->check_lock($source_conf);
- my $target_conf = PVE::LXC::Config->load_config($target_vmid);
- PVE::LXC::Config->check_lock($target_conf);
+ my $target_conf;
+ if ($target_vmid eq $vmid) {
+ $target_conf = $source_conf;
+ } else {
+ $target_conf = PVE::LXC::Config->load_config($target_vmid);
+ PVE::LXC::Config->check_lock($target_conf);
+ }
die "Can't move volumes from or to template CT\n"
if ($source_conf->{template} || $target_conf->{template});
die "Target volume key '${target_mpkey}' is already in use for container '$target_vmid'\n"
if exists $target_conf->{$target_mpkey};
- my $drive = PVE::LXC::Config->parse_volume(
- $mpkey,
- $source_conf->{$mpkey},
- );
-
- my $source_volid = $drive->{volume};
-
- die "Volume '${mpkey}' has no associated image\n"
- if !$source_volid;
+ my $drive = PVE::LXC::Config->parse_volume($mpkey, $source_conf->{$mpkey});
+ my $source_volid = $drive->{volume} or die "Volume '${mpkey}' has no associated image\n";
die "Cannot move volume used by a snapshot to another container\n"
if PVE::LXC::Config->is_volume_in_use_by_snapshots($source_conf, $source_volid);
die "Storage does not support moving of this disk to another container\n"
if !PVE::Storage::volume_has_feature($storecfg, 'rename', $source_volid);
die "Cannot move a bindmount or device mount to another container\n"
if $drive->{type} ne "volume";
- die "Cannot move volume to another container while the source is running - detach first\n"
+ die "Cannot move in-use volume while the source CT is running - detach or shutdown first\n"
if PVE::LXC::check_running($vmid) && $mpkey !~ m/^unused\d+$/;
my $repl_conf = PVE::ReplicationConfig->new();
return ($source_conf, $target_conf, $drive);
};
- my $logfunc = sub {
- my ($msg) = @_;
- print STDERR "$msg\n";
- };
+ my $logfunc = sub { print STDERR "$_[0]\n"; };
my $volume_reassignfn = sub {
return PVE::LXC::Config->lock_config($vmid, sub {
return PVE::LXC::Config->lock_config($target_vmid, sub {
- my ($source_conf, $target_conf, $drive) = &$load_and_check_reassign_configs();
+ my ($source_conf, $target_conf, $drive) = $load_and_check_reassign_configs->();
my $source_volid = $drive->{volume};
my $target_unused = $target_mpkey =~ m/^unused\d+$/;
PVE::LXC::Config->write_config($vmid, $source_conf);
my $drive_string;
-
if ($target_unused) {
$drive_string = $new_volid;
} else {
$running,
$param
);
-
- foreach my $key (keys %$errors) {
- $rpcenv->warn($errors->{$key});
- }
+ $rpcenv->warn($errors->{$_}) for keys $errors->%*;
}
PVE::LXC::Config->write_config($target_vmid, $target_conf);
$rpcenv->check_vm_perm($authuser, $target_vmid, undef, ['VM.Config.Disk'])
if $authuser ne 'root@pam';
- if ($vmid eq $target_vmid) {
- my $msg = "must be different than source VMID to move disk to another container";
- raise_param_exc({ 'target-vmid' => $msg });
- }
-
- &$load_and_check_reassign_configs();
+ my (undef, undef, $drive) = $load_and_check_reassign_configs->();
+ my $storeid = PVE::Storage::parse_volume_id($drive->{volume});
+ $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
return $rpcenv->fork_worker(
'move_volume',
"${vmid}-${mpkey}>${target_vmid}-${target_mpkey}",
$volume_reassignfn
);
} elsif ($storage) {
+ $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']);
&$move_to_storage_checks();
my $task = eval {
$rpcenv->fork_worker('move_volume', $vmid, $authuser, $storage_realcmd);
return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
}});
+__PACKAGE__->register_method({
+ name => 'ip',
+ path => '{vmid}/interfaces',
+ method => 'GET',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ description => 'Get IP addresses of the specified container interface.',
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ },
+ },
+ returns => {
+ type => "array",
+ items => {
+ type => 'object',
+ properties => {
+ name => {
+ type => 'string',
+ description => 'The name of the interface',
+ optional => 0,
+ },
+ hwaddr => {
+ type => 'string',
+ description => 'The MAC address of the interface',
+ optional => 0,
+ },
+ inet => {
+ type => 'string',
+ description => 'The IPv4 address of the interface',
+ optional => 1,
+ },
+ inet6 => {
+ type => 'string',
+ description => 'The IPv6 address of the interface',
+ optional => 1,
+ },
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ return PVE::LXC::get_interfaces($param->{vmid});
+ }});
+
+__PACKAGE__->register_method({
+ name => 'mtunnel',
+ path => '{vmid}/mtunnel',
+ method => 'POST',
+ protected => 1,
+ description => 'Migration tunnel endpoint - only for internal use by CT migration.',
+ permissions => {
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Allocate' ]],
+ ['perm', '/', [ 'Sys.Incoming' ]],
+ ],
+ description => "You need 'VM.Allocate' permissions on '/vms/{vmid}' and Sys.Incoming" .
+ " on '/'. Further permission checks happen during the actual migration.",
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ storages => {
+ type => 'string',
+ format => 'pve-storage-id-list',
+ optional => 1,
+ description => 'List of storages to check permission and availability. Will be checked again for all actually used storages during migration.',
+ },
+ bridges => {
+ type => 'string',
+ format => 'pve-bridge-id-list',
+ optional => 1,
+ description => 'List of network bridges to check availability. Will be checked again for actually used bridges during migration.',
+ },
+ },
+ },
+ returns => {
+ additionalProperties => 0,
+ properties => {
+ upid => { type => 'string' },
+ ticket => { type => 'string' },
+ socket => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $storages = extract_param($param, 'storages');
+ my $bridges = extract_param($param, 'bridges');
+
+ my $nodename = PVE::INotify::nodename();
+
+ raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" })
+ if $node ne 'localhost' && $node ne $nodename;
+
+ $node = $nodename;
+
+ my $storecfg = PVE::Storage::config();
+ foreach my $storeid (PVE::Tools::split_list($storages)) {
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storeid, $node);
+ }
+
+ foreach my $bridge (PVE::Tools::split_list($bridges)) {
+ PVE::Network::read_bridge_mtu($bridge);
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $socket_addr = "/run/pve/ct-$vmid.mtunnel";
+
+ my $lock = 'create';
+ eval { PVE::LXC::Config->create_and_lock_config($vmid, 0, $lock); };
+
+ raise_param_exc({ vmid => "unable to create empty CT config - $@"})
+ if $@;
+
+ my $realcmd = sub {
+ my $state = {
+ storecfg => PVE::Storage::config(),
+ lock => $lock,
+ vmid => $vmid,
+ };
+
+ my $run_locked = sub {
+ my ($code, $params) = @_;
+ return PVE::LXC::Config->lock_config($state->{vmid}, sub {
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+
+ $state->{conf} = $conf;
+
+ die "Encountered wrong lock - aborting mtunnel command handling.\n"
+ if $state->{lock} && !PVE::LXC::Config->has_lock($conf, $state->{lock});
+
+ return $code->($params);
+ });
+ };
+
+ my $cmd_desc = {
+ config => {
+ conf => {
+ type => 'string',
+ description => 'Full CT config, adapted for target cluster/node',
+ },
+ 'firewall-config' => {
+ type => 'string',
+ description => 'CT firewall config',
+ optional => 1,
+ },
+ },
+ ticket => {
+ path => {
+ type => 'string',
+ description => 'socket path for which the ticket should be valid. must be known to current mtunnel instance.',
+ },
+ },
+ quit => {
+ cleanup => {
+ type => 'boolean',
+ description => 'remove CT config and volumes, aborting migration',
+ default => 0,
+ },
+ },
+ 'disk-import' => $PVE::StorageTunnel::cmd_schema->{'disk-import'},
+ 'query-disk-import' => $PVE::StorageTunnel::cmd_schema->{'query-disk-import'},
+ bwlimit => $PVE::StorageTunnel::cmd_schema->{bwlimit},
+ };
+
+ my $cmd_handlers = {
+ 'version' => sub {
+ # compared against other end's version
+ # bump/reset for breaking changes
+ # bump/bump for opt-in changes
+ return {
+ api => $PVE::LXC::Migrate::WS_TUNNEL_VERSION,
+ age => 0,
+ };
+ },
+ 'config' => sub {
+ my ($params) = @_;
+
+ # parse and write out VM FW config if given
+ if (my $fw_conf = $params->{'firewall-config'}) {
+ my ($path, $fh) = PVE::Tools::tempfile_contents($fw_conf, 700);
+
+ my $empty_conf = {
+ rules => [],
+ options => {},
+ aliases => {},
+ ipset => {} ,
+ ipset_comments => {},
+ };
+ my $cluster_fw_conf = PVE::Firewall::load_clusterfw_conf();
+
+ # TODO: add flag for strict parsing?
+ # TODO: add import sub that does all this given raw content?
+ my $vmfw_conf = PVE::Firewall::generic_fw_config_parser($path, $cluster_fw_conf, $empty_conf, 'vm');
+ $vmfw_conf->{vmid} = $state->{vmid};
+ PVE::Firewall::save_vmfw_conf($state->{vmid}, $vmfw_conf);
+
+ $state->{cleanup}->{fw} = 1;
+ }
+
+ my $conf_fn = "incoming/lxc/$state->{vmid}.conf";
+ my $new_conf = PVE::LXC::Config::parse_pct_config($conf_fn, $params->{conf}, 1);
+ delete $new_conf->{lock};
+ delete $new_conf->{digest};
+
+ my $unprivileged = delete $new_conf->{unprivileged};
+ my $arch = delete $new_conf->{arch};
+
+ # TODO handle properly?
+ delete $new_conf->{snapshots};
+ delete $new_conf->{parent};
+ delete $new_conf->{pending};
+ delete $new_conf->{lxc};
+
+ PVE::LXC::Config->remove_lock($state->{vmid}, 'create');
+
+ eval {
+ my $conf = {
+ unprivileged => $unprivileged,
+ arch => $arch,
+ };
+ PVE::LXC::check_ct_modify_config_perm(
+ $rpcenv,
+ $authuser,
+ $state->{vmid},
+ undef,
+ $conf,
+ $new_conf,
+ undef,
+ $unprivileged,
+ );
+ my $errors = PVE::LXC::Config->update_pct_config(
+ $state->{vmid},
+ $conf,
+ 0,
+ $new_conf,
+ [],
+ [],
+ );
+ raise_param_exc($errors) if scalar(keys %$errors);
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+ PVE::LXC::update_lxc_config($vmid, $conf);
+ };
+ if (my $err = $@) {
+ # revert to locked previous config
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+ $conf->{lock} = 'create';
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+
+ die $err;
+ }
+
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+ $conf->{lock} = 'migrate';
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+
+ $state->{lock} = 'migrate';
+
+ return;
+ },
+ 'bwlimit' => sub {
+ my ($params) = @_;
+ return PVE::StorageTunnel::handle_bwlimit($params);
+ },
+ 'disk-import' => sub {
+ my ($params) = @_;
+
+ $check_storage_access_migrate->(
+ $rpcenv,
+ $authuser,
+ $state->{storecfg},
+ $params->{storage},
+ $node
+ );
+
+ $params->{unix} = "/run/pve/ct-$state->{vmid}.storage";
+
+ return PVE::StorageTunnel::handle_disk_import($state, $params);
+ },
+ 'query-disk-import' => sub {
+ my ($params) = @_;
+
+ return PVE::StorageTunnel::handle_query_disk_import($state, $params);
+ },
+ 'unlock' => sub {
+ PVE::LXC::Config->remove_lock($state->{vmid}, $state->{lock});
+ delete $state->{lock};
+ return;
+ },
+ 'start' => sub {
+ PVE::LXC::vm_start(
+ $state->{vmid},
+ $state->{conf},
+ 0
+ );
+
+ return;
+ },
+ 'stop' => sub {
+ PVE::LXC::vm_stop($state->{vmid}, 1, 10, 1);
+ return;
+ },
+ 'ticket' => sub {
+ my ($params) = @_;
+
+ my $path = $params->{path};
+
+ die "Not allowed to generate ticket for unknown socket '$path'\n"
+ if !defined($state->{sockets}->{$path});
+
+ return { ticket => PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$path") };
+ },
+ 'quit' => sub {
+ my ($params) = @_;
+
+ if ($params->{cleanup}) {
+ if ($state->{cleanup}->{fw}) {
+ PVE::Firewall::remove_vmfw_conf($state->{vmid});
+ }
+
+ for my $volid (keys $state->{cleanup}->{volumes}->%*) {
+ print "freeing volume '$volid' as part of cleanup\n";
+ eval { PVE::Storage::vdisk_free($state->{storecfg}, $volid) };
+ warn $@ if $@;
+ }
+
+ PVE::LXC::destroy_lxc_container(
+ $state->{storecfg},
+ $state->{vmid},
+ $state->{conf},
+ undef,
+ 0,
+ );
+ }
+
+ print "switching to exit-mode, waiting for client to disconnect\n";
+ $state->{exit} = 1;
+ return;
+ },
+ };
+
+ $run_locked->(sub {
+ my $socket_addr = "/run/pve/ct-$state->{vmid}.mtunnel";
+ unlink $socket_addr;
+
+ $state->{socket} = IO::Socket::UNIX->new(
+ Type => SOCK_STREAM(),
+ Local => $socket_addr,
+ Listen => 1,
+ );
+
+ $state->{socket_uid} = getpwnam('www-data')
+ or die "Failed to resolve user 'www-data' to numeric UID\n";
+ chown $state->{socket_uid}, -1, $socket_addr;
+ });
+
+ print "mtunnel started\n";
+
+ my $conn = eval { PVE::Tools::run_with_timeout(300, sub { $state->{socket}->accept() }) };
+ if ($@) {
+ warn "Failed to accept tunnel connection - $@\n";
+
+ warn "Removing tunnel socket..\n";
+ unlink $state->{socket};
+
+ warn "Removing temporary VM config..\n";
+ $run_locked->(sub {
+ PVE::LXC::destroy_config($state->{vmid});
+ });
+
+ die "Exiting mtunnel\n";
+ }
+
+ $state->{conn} = $conn;
+
+ my $reply_err = sub {
+ my ($msg) = @_;
+
+ my $reply = JSON::encode_json({
+ success => JSON::false,
+ msg => $msg,
+ });
+ $conn->print("$reply\n");
+ $conn->flush();
+ };
+
+ my $reply_ok = sub {
+ my ($res) = @_;
+
+ $res->{success} = JSON::true;
+ my $reply = JSON::encode_json($res);
+ $conn->print("$reply\n");
+ $conn->flush();
+ };
+
+ while (my $line = <$conn>) {
+ chomp $line;
+
+ # untaint, we validate below if needed
+ ($line) = $line =~ /^(.*)$/;
+ my $parsed = eval { JSON::decode_json($line) };
+ if ($@) {
+ $reply_err->("failed to parse command - $@");
+ next;
+ }
+
+ my $cmd = delete $parsed->{cmd};
+ if (!defined($cmd)) {
+ $reply_err->("'cmd' missing");
+ } elsif ($state->{exit}) {
+ $reply_err->("tunnel is in exit-mode, processing '$cmd' cmd not possible");
+ next;
+ } elsif (my $handler = $cmd_handlers->{$cmd}) {
+ print "received command '$cmd'\n";
+ eval {
+ if ($cmd_desc->{$cmd}) {
+ PVE::JSONSchema::validate($parsed, $cmd_desc->{$cmd});
+ } else {
+ $parsed = {};
+ }
+ my $res = $run_locked->($handler, $parsed);
+ $reply_ok->($res);
+ };
+ $reply_err->("failed to handle '$cmd' command - $@")
+ if $@;
+ } else {
+ $reply_err->("unknown command '$cmd' given");
+ }
+ }
+
+ if ($state->{exit}) {
+ print "mtunnel exited\n";
+ } else {
+ die "mtunnel exited unexpectedly\n";
+ }
+ };
+
+ my $ticket = PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$socket_addr");
+ my $upid = $rpcenv->fork_worker('vzmtunnel', $vmid, $authuser, $realcmd);
+
+ return {
+ ticket => $ticket,
+ upid => $upid,
+ socket => $socket_addr,
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'mtunnelwebsocket',
+ path => '{vmid}/mtunnelwebsocket',
+ method => 'GET',
+ permissions => {
+ description => "You need to pass a ticket valid for the selected socket. Tickets can be created via the mtunnel API call, which will check permissions accordingly.",
+ user => 'all', # check inside
+ },
+ description => 'Migration tunnel endpoint for websocket upgrade - only for internal use by VM migration.',
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ socket => {
+ type => "string",
+ description => "unix socket to forward to",
+ },
+ ticket => {
+ type => "string",
+ description => "ticket return by initial 'mtunnel' API call, or retrieved via 'ticket' tunnel command",
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string', optional => 1 },
+ socket => { type => 'string', optional => 1 },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = PVE::INotify::nodename();
+ my $node = extract_param($param, 'node');
+
+ raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" })
+ if $node ne 'localhost' && $node ne $nodename;
+
+ my $vmid = $param->{vmid};
+ # check VM exists
+ PVE::LXC::Config->load_config($vmid);
+
+ my $socket = $param->{socket};
+ PVE::AccessControl::verify_tunnel_ticket($param->{ticket}, $authuser, "/socket/$socket");
+
+ return { socket => $socket };
+ }});
1;