use strict;
use warnings;
+use IO::Socket::UNIX;
+use Socket qw(SOCK_STREAM);
+
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::INotify;
use PVE::Cluster qw(cfs_read_file);
+use PVE::RRD;
+use PVE::DataCenterConfig;
use PVE::AccessControl;
use PVE::Firewall;
use PVE::Storage;
use PVE::LXC::Create;
use PVE::LXC::Migrate;
use PVE::GuestHelpers;
+use PVE::VZDump::Plugin;
use PVE::API2::LXC::Config;
use PVE::API2::LXC::Status;
use PVE::API2::LXC::Snapshot;
}
}
+my $check_storage_access_migrate = sub {
+ my ($rpcenv, $authuser, $storecfg, $storage, $node) = @_;
+
+ PVE::Storage::storage_check_enabled($storecfg, $storage, $node);
+
+ $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']);
+
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "storage '$storage' does not support CT rootdirs\n"
+ if !$scfg->{content}->{rootdir};
+};
+
__PACKAGE__->register_method ({
subclass => "PVE::API2::LXC::Config",
path => '{vmid}/config',
my $restore = extract_param($param, 'restore');
my $unique = extract_param($param, 'unique');
+ $param->{cpuunits} = PVE::CGroup::clamp_cpu_shares($param->{cpuunits})
+ if defined($param->{cpuunits}); # clamp value depending on cgroup version
+
# used to skip firewall config restore if user lacks permission
my $skip_fw_config_restore = 0;
if ($restore) {
# fixme: limit allowed parameters
}
-
+
my $force = extract_param($param, 'force');
if (!($same_container_exists && $restore && $force)) {
PVE::Tools::validate_ssh_public_keys($ssh_keys) if defined($ssh_keys);
my $pool = extract_param($param, 'pool');
- if (defined($pool)) {
- $rpcenv->check_pool_exist($pool);
- $rpcenv->check_perm_modify($authuser, "/pool/$pool");
- }
+ $rpcenv->check_pool_exist($pool) if defined($pool);
if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
# OK
# we don't want to restore a container-provided FW conf in this case
# since the user is lacking permission to configure the container's FW
$skip_fw_config_restore = 1;
+
+ # error out if a user tries to change from unprivileged to privileged
+ # explicit change is checked here, implicit is checked down below or happening in root-only paths
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ if ($conf->{unprivileged} && defined($unprivileged) && !$unprivileged) {
+ raise_perm_exc("cannot change from unprivileged to privileged without VM.Allocate");
+ }
} else {
raise_perm_exc();
}
my $ostemplate = extract_param($param, 'ostemplate');
my $storage = extract_param($param, 'storage') // 'local';
- PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []);
+ PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, undef, $param, [], $unprivileged);
my $storage_cfg = cfs_read_file("storage.cfg");
my $archive;
if ($ostemplate eq '-') {
- die "pipe requires cli environment\n"
- if $rpcenv->{type} ne 'cli';
- die "pipe can only be used with restore tasks\n"
+ die "pipe requires cli environment\n"
+ if $rpcenv->{type} ne 'cli';
+ die "pipe can only be used with restore tasks\n"
if !$restore;
$archive = '-';
die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs});
} else {
- PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate);
- $archive = PVE::Storage::abs_filesystem_path($storage_cfg, $ostemplate);
+ my $content_type = $restore ? 'backup' : 'vztmpl';
+ PVE::Storage::check_volume_access(
+ $rpcenv,
+ $authuser,
+ $storage_cfg,
+ $vmid,
+ $ostemplate,
+ $content_type,
+ );
+ $archive = $ostemplate;
}
my %used_storages;
my $check_and_activate_storage = sub {
my ($sid) = @_;
- my $scfg = PVE::Storage::storage_check_node($storage_cfg, $sid, $node);
+ my $scfg = PVE::Storage::storage_check_enabled($storage_cfg, $sid, $node);
raise_param_exc({ storage => "storage '$sid' does not support container directories"})
if !$scfg->{content}->{rootdir};
# check storage access, activate storage
my $delayed_mp_param = {};
- PVE::LXC::Config->foreach_mountpoint($mp_param, sub {
+ PVE::LXC::Config->foreach_volume($mp_param, sub {
my ($ms, $mountpoint) = @_;
my $volid = $mountpoint->{volume};
eval { PVE::LXC::Config->create_and_lock_config($vmid, $force) };
die "$emsg $@" if $@;
+ my $destroy_config_on_error = !$same_container_exists;
+
my $code = sub {
my $old_conf = PVE::LXC::Config->load_config($vmid);
my $was_template;
my $orig_mp_param; # only used if $restore
if ($restore) {
die "can't overwrite running container\n" if PVE::LXC::check_running($vmid);
- if ($is_root && $archive ne '-') {
+ if ($archive ne '-') {
my $orig_conf;
- ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($archive);
+ print "recovering backed-up configuration from '$archive'\n";
+ ($orig_conf, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid);
+
+ for my $opt (keys %$orig_conf) {
+ # early check before disks are created
+ # the "real" check is in later on when actually merging the configs
+ if ($opt =~ /^net\d+$/ && !defined($param->{$opt})) {
+ PVE::LXC::check_bridge_access($rpcenv, $authuser, $orig_conf->{$opt});
+ }
+ }
+
$was_template = delete $orig_conf->{template};
+
# When we're root call 'restore_configuration' with restricted=0,
# causing it to restore the raw lxc entries, among which there may be
# 'lxc.idmap' entries. We need to make sure that the extracted contents
# of the container match up with the restored configuration afterwards:
- $conf->{lxc} = $orig_conf->{lxc};
+ $conf->{lxc} = $orig_conf->{lxc} if $is_root;
+
+ $conf->{unprivileged} = $orig_conf->{unprivileged}
+ if !defined($unprivileged) && defined($orig_conf->{unprivileged});
+
+ # implicit privileged change is checked here
+ if ($old_conf->{unprivileged} && !$conf->{unprivileged}) {
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Allocate']);
+ }
}
}
if ($storage_only_mode) {
if ($restore) {
if (!defined($orig_mp_param)) {
- (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($archive);
+ print "recovering backed-up configuration from '$archive'\n";
+ (undef, $orig_mp_param) = PVE::LXC::Create::recover_config($storage_cfg, $archive, $vmid);
}
$mp_param = $orig_mp_param;
die "rootfs configuration could not be recovered, please check and specify manually!\n"
if !defined($mp_param->{rootfs});
- PVE::LXC::Config->foreach_mountpoint($mp_param, sub {
+ PVE::LXC::Config->foreach_volume($mp_param, sub {
my ($ms, $mountpoint) = @_;
my $type = $mountpoint->{type};
if ($type eq 'volume') {
}
}
+ # up until here we did not modify the container, besides the lock
+ $destroy_config_on_error = 1;
+
$vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf);
# we always have the 'create' lock so check for more than 1 entry
eval {
my $rootdir = PVE::LXC::mount_all($vmid, $storage_cfg, $conf, 1);
$bwlimit = PVE::Storage::get_bandwidth_limit('restore', [keys %used_storages], $bwlimit);
- PVE::LXC::Create::restore_archive($archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit);
+ print "restoring '$archive' now..\n"
+ if $restore && $archive ne '-';
+ PVE::LXC::Create::restore_archive($storage_cfg, $archive, $rootdir, $conf, $ignore_unpack_errors, $bwlimit);
if ($restore) {
- PVE::LXC::Create::restore_configuration($vmid, $rootdir, $conf, !$is_root, $unique, $skip_fw_config_restore);
+ print "merging backed-up and given configuration..\n";
+ PVE::LXC::Create::restore_configuration($vmid, $storage_cfg, $archive, $rootdir, $conf, !$is_root, $unique, $skip_fw_config_restore);
+ PVE::LXC::create_ifaces_ipams_ips($conf, $vmid) if $unique;
my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
$lxc_setup->template_fixup($conf);
} else {
# If the template flag was set, we try to convert again to template after restore
if ($was_template) {
print STDERR "Convert restored container to template...\n";
- if (my $err = check_storage_supports_templates($conf)) {
- warn $err;
- warn "Leave restored backup as container instead of converting to template.\n"
- } else {
- PVE::LXC::template_create($vmid, $conf);
- $conf->{template} = 1;
- }
+ PVE::LXC::template_create($vmid, $conf);
+ $conf->{template} = 1;
}
PVE::LXC::Config->write_config($vmid, $conf);
};
if (my $err = $@) {
- PVE::LXC::destroy_disks($storage_cfg, $vollist);
- eval { PVE::LXC::Config->destroy_config($vmid) };
+ eval { PVE::LXC::delete_ifaces_ipams_ips($conf, $vmid) };
warn $@ if $@;
+ PVE::LXC::destroy_disks($storage_cfg, $vollist);
+ if ($destroy_config_on_error) {
+ eval { PVE::LXC::Config->destroy_config($vmid) };
+ warn $@ if $@;
+
+ if (!$skip_fw_config_restore) { # Only if user has permission to change the fw
+ PVE::Firewall::remove_vmfw_conf($vmid);
+ warn $@ if $@;
+ }
+ }
die "$emsg $err";
}
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
-
- PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node })
- if $start_after_create;
};
my $workername = $restore ? 'vzrestore' : 'vzcreate';
- my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); };
+ my $realcmd = sub {
+ eval {
+ PVE::LXC::Config->lock_config($vmid, $code);
+ };
+ if (my $err = $@) {
+ # if we aborted before changing the container, we must remove the create lock
+ if (!$destroy_config_on_error) {
+ PVE::LXC::Config->remove_lock($vmid, 'create');
+ }
+ die $err;
+ } elsif ($start_after_create) {
+ PVE::API2::LXC::Status->vm_start({ vmid => $vmid, node => $node });
+ }
+ };
return $rpcenv->fork_worker($workername, $vmid, $authuser, $realcmd);
}});
-sub check_storage_supports_templates {
- my ($conf) = @_;
-
- my $scfg = PVE::Storage::config();
- eval {
- PVE::LXC::Config->foreach_mountpoint($conf, sub {
- my ($ms, $mp) = @_;
-
- my ($sid) = PVE::Storage::parse_volume_id($mp->{volume}, 0);
- die "Warning: Directory storage '$sid' does not support container templates!\n"
- if $scfg->{ids}->{$sid}->{path};
- });
- };
- return $@
-}
-
__PACKAGE__->register_method({
name => 'vmdiridx',
path => '{vmid}',
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_graph(
+ return PVE::RRD::create_rrd_graph(
"pve2-vm/$param->{vmid}", $param->{timeframe},
$param->{ds}, $param->{cf});
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_data(
+ return PVE::RRD::create_rrd_data(
"pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf});
}});
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }),
+ force => {
+ type => 'boolean',
+ description => "Force destroy, even if running.",
+ default => 0,
+ optional => 1,
+ },
+ purge => {
+ type => 'boolean',
+ description => "Remove container from all related configurations."
+ ." For example, backup jobs, replication jobs or HA."
+ ." Related ACLs and Firewall entries will *always* be removed.",
+ default => 0,
+ optional => 1,
+ },
+ 'destroy-unreferenced-disks' => {
+ type => 'boolean',
+ description => "If set, destroy additionally all disks with the VMID from all"
+ ." enabled storages which are not referenced in the config.",
+ optional => 1,
+ },
},
},
returns => {
my $vmid = $param->{vmid};
# test if container exists
+
my $conf = PVE::LXC::Config->load_config($vmid);
- my $storage_cfg = cfs_read_file("storage.cfg");
- PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid");
+ my $early_checks = sub {
+ my ($conf) = @_;
+ PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid");
+ PVE::LXC::Config->check_lock($conf);
- die "unable to remove CT $vmid - used in HA resources\n"
- if PVE::HA::Config::vm_is_ha_managed($vmid);
+ my $ha_managed = PVE::HA::Config::service_is_configured("ct:$vmid");
- # do not allow destroy if there are replication jobs
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
+ if (!$param->{purge}) {
+ die "unable to remove CT $vmid - used in HA resources and purge parameter not set.\n"
+ if $ha_managed;
- my $running_error_msg = "unable to destroy CT $vmid - container is running\n";
+ # do not allow destroy if there are replication jobs without purge
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
+
+ return $ha_managed;
+ };
- die $running_error_msg if PVE::LXC::check_running($vmid); # check early
+ $early_checks->($conf);
+
+ my $running_error_msg = "unable to destroy CT $vmid - container is running\n";
+ die $running_error_msg if !$param->{force} && PVE::LXC::check_running($vmid); # check early
my $code = sub {
# reload config after lock
$conf = PVE::LXC::Config->load_config($vmid);
- PVE::LXC::Config->check_lock($conf);
+ my $ha_managed = $early_checks->($conf);
- die $running_error_msg if PVE::LXC::check_running($vmid);
+ if (PVE::LXC::check_running($vmid)) {
+ die $running_error_msg if !$param->{force};
+ warn "forced to stop CT $vmid before destroying!\n";
+ if (!$ha_managed) {
+ PVE::LXC::vm_stop($vmid, 1);
+ } else {
+ run_command(['ha-manager', 'crm-command', 'stop', "ct:$vmid", '120']);
+ }
+ }
- PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $conf, { lock => 'destroyed' });
+ my $storage_cfg = cfs_read_file("storage.cfg");
+ PVE::LXC::destroy_lxc_container(
+ $storage_cfg,
+ $vmid,
+ $conf,
+ { lock => 'destroyed' },
+ $param->{'destroy-unreferenced-disks'},
+ );
PVE::AccessControl::remove_vm_access($vmid);
PVE::Firewall::remove_vmfw_conf($vmid);
+ if ($param->{purge}) {
+ print "purging CT $vmid from related configurations..\n";
+ PVE::ReplicationConfig::remove_vmid_jobs($vmid);
+ PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+
+ if ($ha_managed) {
+ PVE::HA::Config::delete_service_from_config("ct:$vmid");
+ print "NOTE: removed CT $vmid from HA resource configuration.\n";
+ }
+ }
# only now remove the zombie config, else we can have reuse race
PVE::LXC::Config->destroy_config($vmid);
};
my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); };
-
+
return $rpcenv->fork_worker('vzdestroy', $vmid, $authuser, $realcmd);
}});
}});
+__PACKAGE__->register_method({
+ name => 'remote_migrate_vm',
+ path => '{vmid}/remote_migrate',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Migrate the container to another cluster. Creates a new migration task. EXPERIMENTAL feature!",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }),
+ 'target-endpoint' => get_standard_option('proxmox-remote', {
+ description => "Remote target endpoint",
+ }),
+ online => {
+ type => 'boolean',
+ description => "Use online/live migration.",
+ optional => 1,
+ },
+ restart => {
+ type => 'boolean',
+ description => "Use restart migration",
+ optional => 1,
+ },
+ timeout => {
+ type => 'integer',
+ description => "Timeout in seconds for shutdown for restart migration",
+ optional => 1,
+ default => 180,
+ },
+ delete => {
+ type => 'boolean',
+ description => "Delete the original CT and related data after successful migration. By default the original CT is kept on the source cluster in a stopped state.",
+ optional => 1,
+ default => 0,
+ },
+ 'target-storage' => get_standard_option('pve-targetstorage', {
+ optional => 0,
+ }),
+ 'target-bridge' => {
+ type => 'string',
+ description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.",
+ format => 'bridge-pair-list',
+ },
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'number',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $source_vmid = extract_param($param, 'vmid');
+ my $target_endpoint = extract_param($param, 'target-endpoint');
+ my $target_vmid = extract_param($param, 'target-vmid') // $source_vmid;
+
+ my $delete = extract_param($param, 'delete') // 0;
+
+ PVE::Cluster::check_cfs_quorum();
+
+ # test if CT exists
+ my $conf = PVE::LXC::Config->load_config($source_vmid);
+ PVE::LXC::Config->check_lock($conf);
+
+ # try to detect errors early
+ if (PVE::LXC::check_running($source_vmid)) {
+ die "can't migrate running container without --online or --restart\n"
+ if !$param->{online} && !$param->{restart};
+ }
+
+ raise_param_exc({ vmid => "cannot migrate HA-managed CT to remote cluster" })
+ if PVE::HA::Config::vm_is_ha_managed($source_vmid);
+
+ my $remote = PVE::JSONSchema::parse_property_string('proxmox-remote', $target_endpoint);
+
+ # TODO: move this as helper somewhere appropriate?
+ my $conn_args = {
+ protocol => 'https',
+ host => $remote->{host},
+ port => $remote->{port} // 8006,
+ apitoken => $remote->{apitoken},
+ };
+
+ my $fp;
+ if ($fp = $remote->{fingerprint}) {
+ $conn_args->{cached_fingerprints} = { uc($fp) => 1 };
+ }
+
+ print "Establishing API connection with remote at '$remote->{host}'\n";
+
+ my $api_client = PVE::APIClient::LWP->new(%$conn_args);
+
+ if (!defined($fp)) {
+ my $cert_info = $api_client->get("/nodes/localhost/certificates/info");
+ foreach my $cert (@$cert_info) {
+ my $filename = $cert->{filename};
+ next if $filename ne 'pveproxy-ssl.pem' && $filename ne 'pve-ssl.pem';
+ $fp = $cert->{fingerprint} if !$fp || $filename eq 'pveproxy-ssl.pem';
+ }
+ $conn_args->{cached_fingerprints} = { uc($fp) => 1 }
+ if defined($fp);
+ }
+
+ my $storecfg = PVE::Storage::config();
+ my $target_storage = extract_param($param, 'target-storage');
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') };
+ raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" })
+ if $@;
+
+ my $target_bridge = extract_param($param, 'target-bridge');
+ my $bridgemap = eval { PVE::JSONSchema::parse_idmap($target_bridge, 'pve-bridge-id') };
+ raise_param_exc({ 'target-bridge' => "failed to parse bridge map: $@" })
+ if $@;
+
+ die "remote migration requires explicit storage mapping!\n"
+ if $storagemap->{identity};
+
+ $param->{storagemap} = $storagemap;
+ $param->{bridgemap} = $bridgemap;
+ $param->{remote} = {
+ conn => $conn_args, # re-use fingerprint for tunnel
+ client => $api_client,
+ vmid => $target_vmid,
+ };
+ $param->{migration_type} = 'websocket';
+ $param->{delete} = $delete if $delete;
+
+ my $cluster_status = $api_client->get("/cluster/status");
+ my $target_node;
+ foreach my $entry (@$cluster_status) {
+ next if $entry->{type} ne 'node';
+ if ($entry->{local}) {
+ $target_node = $entry->{name};
+ last;
+ }
+ }
+
+ die "couldn't determine endpoint's node name\n"
+ if !defined($target_node);
+
+ my $realcmd = sub {
+ PVE::LXC::Migrate->migrate($target_node, $remote->{host}, $source_vmid, $param);
+ };
+
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($source_vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('vzmigrate', $source_vmid, $authuser, $worker);
+ }});
+
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
description => "Target node.",
completion => \&PVE::Cluster::complete_migration_target,
}),
+ 'target-storage' => get_standard_option('pve-targetstorage'),
online => {
type => 'boolean',
description => "Use online/live migration.",
optional => 1,
default => 180,
},
- force => {
- type => 'boolean',
- description => "Force migration despite local bind / device" .
- " mounts. NOTE: deprecated, use 'shared' property of mount point instead.",
- optional => 1,
- },
bwlimit => {
description => "Override I/O bandwidth limit (in KiB/s).",
optional => 1,
if !$param->{online} && !$param->{restart};
}
+ if (my $targetstorage = delete $param->{'target-storage'}) {
+ my $storecfg = PVE::Storage::config();
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
+ raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" })
+ if $@;
+
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk'])
+ if !defined($storagemap->{identity});
+
+ foreach my $target_sid (values %{$storagemap->{entries}}) {
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $target_sid, $target);
+ }
+
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storagemap->{default}, $target)
+ if $storagemap->{default};
+
+ $param->{storagemap} = $storagemap;
+ }
+
if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
die "you can't convert a CT to template if the CT is running\n"
if PVE::LXC::check_running($vmid);
- if (my $err = check_storage_supports_templates($conf)) {
- die $err;
- }
-
my $realcmd = sub {
PVE::LXC::template_create($vmid, $conf);
description => "You need 'VM.Clone' permissions on /vms/{vmid}, " .
"and 'VM.Allocate' permissions " .
"on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
- "'Datastore.AllocateSpace' on any used storage.",
+ "'Datastore.AllocateSpace' on any used storage, and 'SDN.Use' on any bridge.",
check =>
[ 'and',
['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
+ my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
-
my $newid = extract_param($param, 'newid');
-
my $pool = extract_param($param, 'pool');
-
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
}
-
my $snapname = extract_param($param, 'snapname');
-
my $storage = extract_param($param, 'storage');
-
my $target = extract_param($param, 'target');
-
my $localnode = PVE::INotify::nodename();
- undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
PVE::Cluster::check_node_exists($target) if $target;
PVE::Storage::storage_check_enabled($storecfg, $storage);
if ($target) {
# check if storage is available on target node
- PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ PVE::Storage::storage_check_enabled($storecfg, $storage, $target);
# clone only works if target storage is shared
my $scfg = PVE::Storage::storage_config($storecfg, $storage);
die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
PVE::Cluster::check_cfs_quorum();
- my $conffile;
my $newconf = {};
my $mountpoints = {};
my $fullclone = {};
my $vollist = [];
my $running;
- PVE::LXC::Config->lock_config($vmid, sub {
- my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk');
+ my $lock_and_reload = sub {
+ my ($vmid, $code) = @_;
+ return PVE::LXC::Config->lock_config($vmid, sub {
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ die "Lost 'create' config lock, aborting.\n"
+ if !PVE::LXC::Config->has_lock($conf, 'create');
+
+ return $code->($conf);
+ });
+ };
+
+ my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk');
+
+ eval {
+ PVE::LXC::Config->create_and_lock_config($newid, 0);
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
+ warn "Failed to remove source CT config lock - $@\n" if $@;
+
+ die $err;
+ }
+ eval {
$running = PVE::LXC::check_running($vmid) || 0;
my $full = extract_param($param, 'full');
if (!defined($full)) {
$full = !PVE::LXC::Config->is_template($src_conf);
}
- die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full;
-
- eval {
- die "snapshot '$snapname' does not exist\n"
- if $snapname && !defined($src_conf->{snapshots}->{$snapname});
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
- my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf;
+ die "parameter 'storage' not allowed for linked clones\n"
+ if defined($storage) && !$full;
- $conffile = PVE::LXC::Config->config_file($newid);
- die "unable to create CT $newid: config file already exists\n"
- if -f $conffile;
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined($src_conf->{snapshots}->{$snapname});
- my $sharedvm = 1;
- foreach my $opt (keys %$src_conf) {
- next if $opt =~ m/^unused\d+$/;
+ my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf;
- my $value = $src_conf->{$opt};
+ my $sharedvm = 1;
+ for my $opt (sort keys %$src_conf) {
+ next if $opt =~ m/^unused\d+$/;
- if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
- my $mp = $opt eq 'rootfs' ?
- PVE::LXC::Config->parse_ct_rootfs($value) :
- PVE::LXC::Config->parse_ct_mountpoint($value);
+ my $value = $src_conf->{$opt};
- if ($mp->{type} eq 'volume') {
- my $volid = $mp->{volume};
+ if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
+ my $mp = PVE::LXC::Config->parse_volume($opt, $value);
- my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
- $sid = $storage if defined($storage);
- my $scfg = PVE::Storage::storage_config($storecfg, $sid);
- if (!$scfg->{shared}) {
- $sharedvm = 0;
- warn "found non-shared volume: $volid\n" if $target;
- }
-
- $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ if ($mp->{type} eq 'volume') {
+ my $volid = $mp->{volume};
- if ($full) {
- die "Cannot do full clones on a running container without snapshots\n"
- if $running && !defined($snapname);
- $fullclone->{$opt} = 1;
- } else {
- # not full means clone instead of copy
- die "Linked clone feature for '$volid' is not available\n"
- if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running);
- }
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ $sid = $storage if defined($storage);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ if (!$scfg->{shared}) {
+ $sharedvm = 0;
+ warn "found non-shared volume: $volid\n" if $target;
+ }
- $mountpoints->{$opt} = $mp;
- push @$vollist, $volid;
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ if ($full) {
+ die "Cannot do full clones on a running container without snapshots\n"
+ if $running && !defined($snapname);
+ $fullclone->{$opt} = 1;
} else {
- # TODO: allow bind mounts?
- die "unable to clone mountpint '$opt' (type $mp->{type})\n";
+ # not full means clone instead of copy
+ die "Linked clone feature for '$volid' is not available\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running, {'valid_target_formats' => ['raw', 'subvol']});
}
- } elsif ($opt =~ m/^net(\d+)$/) {
- # always change MAC! address
- my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
- my $net = PVE::LXC::Config->parse_lxc_network($value);
- $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
- $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net);
+
+ $mountpoints->{$opt} = $mp;
+ push @$vollist, $volid;
+
} else {
- # copy everything else
- $newconf->{$opt} = $value;
+ # TODO: allow bind mounts?
+ die "unable to clone mountpoint '$opt' (type $mp->{type})\n";
}
+ } elsif ($opt =~ m/^net(\d+)$/) {
+ # always change MAC! address
+ my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
+ my $net = PVE::LXC::Config->parse_lxc_network($value);
+ $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
+ $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net);
+
+ PVE::LXC::check_bridge_access($rpcenv, $authuser, $newconf->{$opt});
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
}
- die "can't clone CT to node '$target' (CT uses local storage)\n"
- if $target && !$sharedvm;
+ }
+ die "can't clone CT to node '$target' (CT uses local storage)\n"
+ if $target && !$sharedvm;
- # Replace the 'disk' lock with a 'create' lock.
- $newconf->{lock} = 'create';
+ # Replace the 'disk' lock with a 'create' lock.
+ $newconf->{lock} = 'create';
- delete $newconf->{template};
- if ($param->{hostname}) {
- $newconf->{hostname} = $param->{hostname};
- }
+ # delete all snapshot related config options
+ delete $newconf->@{qw(snapshots parent snaptime snapstate)};
- if ($param->{description}) {
- $newconf->{description} = $param->{description};
- }
+ delete $newconf->{pending};
+ delete $newconf->{template};
+
+ $newconf->{hostname} = $param->{hostname} if $param->{hostname};
+ $newconf->{description} = $param->{description} if $param->{description};
- # create empty/temp config - this fails if CT already exists on other node
+ $lock_and_reload->($newid, sub {
PVE::LXC::Config->write_config($newid, $newconf);
+ });
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
+ warn "Failed to remove source CT config lock - $@\n" if $@;
+
+ eval {
+ $lock_and_reload->($newid, sub {
+ PVE::LXC::Config->destroy_config($newid);
+ PVE::Firewall::remove_vmfw_conf($newid);
+ });
};
- if (my $err = $@) {
- eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
- warn $@ if $@;
- die $err;
- }
- });
+ warn "Failed to remove target CT config - $@\n" if $@;
+
+ die $err;
+ }
my $update_conf = sub {
my ($key, $value) = @_;
- return PVE::LXC::Config->lock_config($newid, sub {
- my $conf = PVE::LXC::Config->load_config($newid);
- die "Lost 'create' config lock, aborting.\n"
- if !PVE::LXC::Config->has_lock($conf, 'create');
+ return $lock_and_reload->($newid, sub {
+ my $conf = shift;
$conf->{$key} = $value;
PVE::LXC::Config->write_config($newid, $conf);
});
}
PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
- PVE::LXC::Config->remove_lock($newid, 'create');
- if ($target) {
- # always deactivate volumes - avoid lvm LVs to be active on several nodes
- PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
- PVE::Storage::deactivate_volumes($storecfg, $newvollist);
+ $lock_and_reload->($newid, sub {
+ my $conf = shift;
+ my $rootdir = PVE::LXC::mount_all($newid, $storecfg, $conf, 1);
- my $newconffile = PVE::LXC::Config->config_file($newid, $target);
- die "Failed to move config to node '$target' - rename failed: $!\n"
- if !rename($conffile, $newconffile);
- }
+ eval {
+ PVE::LXC::create_ifaces_ipams_ips($conf, $vmid);
+ my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
+ $lxc_setup->post_clone_hook($conf);
+ };
+ my $err = $@;
+ eval { PVE::LXC::umount_all($newid, $storecfg, $conf, 1); };
+ if ($err) {
+ warn "$@\n" if $@;
+ die $err;
+ } else {
+ die $@ if $@;
+ }
+ });
};
my $err = $@;
-
# Unlock the source config in any case:
eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
warn $@ if $@;
if ($err) {
- # Now cleanup the config & disks:
- unlink $conffile;
-
+ # Now cleanup the config & disks & ipam:
sleep 1; # some storages like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
eval { PVE::Storage::vdisk_free($storecfg, $volid); };
warn $@ if $@;
}
+
+ eval {
+ $lock_and_reload->($newid, sub {
+ my $conf = shift;
+ PVE::LXC::delete_ifaces_ipams_ips($conf, $newid);
+ PVE::LXC::Config->destroy_config($newid);
+ PVE::Firewall::remove_vmfw_conf($newid);
+ });
+ };
+ warn "Failed to remove target CT config - $@\n" if $@;
+
die "clone failed: $err";
}
- return;
- };
-
- PVE::Firewall::clone_vmfw_conf($vmid, $newid);
- return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd);
- }});
+ $lock_and_reload->($newid, sub {
+ PVE::LXC::Config->remove_lock($newid, 'create');
+
+ if ($target) {
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
+ PVE::Storage::deactivate_volumes($storecfg, $newvollist);
+
+ PVE::LXC::Config->move_config_to_node($newid, $target);
+ }
+ });
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd);
+ }});
__PACKAGE__->register_method({
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::LXC::Config->mountpoint_names()],
+ enum => [PVE::LXC::Config->valid_volume_keys()],
},
size => {
type => 'string',
my $sizestr = extract_param($param, 'size');
my $ext = ($sizestr =~ s/^\+//);
- my $newsize = PVE::JSONSchema::parse_size($sizestr);
- die "invalid size string" if !defined($newsize);
+ my $request_size = PVE::JSONSchema::parse_size($sizestr);
+ die "invalid size string" if !defined($request_size);
die "no options specified\n" if !scalar(keys %$param);
- PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $param, []);
-
my $storage_cfg = cfs_read_file("storage.cfg");
- my $code = sub {
-
+ my $load_and_check = sub {
my $conf = PVE::LXC::Config->load_config($vmid);
PVE::LXC::Config->check_lock($conf);
- PVE::Tools::assert_if_modified($digest, $conf->{digest});
+ PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $conf, $param, [], $conf->{unprivileged});
- my $running = PVE::LXC::check_running($vmid);
+ PVE::Tools::assert_if_modified($digest, $conf->{digest});
my $disk = $param->{disk};
- my $mp = $disk eq 'rootfs' ? PVE::LXC::Config->parse_ct_rootfs($conf->{$disk}) :
- PVE::LXC::Config->parse_ct_mountpoint($conf->{$disk});
+ my $mp = PVE::LXC::Config->parse_volume($disk, $conf->{$disk});
my $volid = $mp->{volume};
die "can't resize mount point owned by another container ($owner)"
if $vmid != $owner;
- die "can't resize volume: $disk if snapshot exists\n"
- if %{$conf->{snapshots}} && $format eq 'qcow2';
-
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
PVE::Storage::activate_volumes($storage_cfg, [$volid]);
my $size = PVE::Storage::volume_size_info($storage_cfg, $volid, 5);
- $newsize += $size if $ext;
+
+ die "Could not determine current size of volume '$volid'\n" if !defined($size);
+
+ my $newsize = $ext ? $size + $request_size : $request_size;
$newsize = int($newsize);
die "unable to shrink disk size\n" if $newsize < $size;
- return if $size == $newsize;
+ die "disk is already at specified size\n" if $size == $newsize;
+
+ return ($conf, $disk, $mp, $volid, $format, $newsize);
+ };
+
+ my $code = sub {
+ my ($conf, $disk, $mp, $volid, $format, $newsize) = $load_and_check->();
+
+ my $running = PVE::LXC::check_running($vmid);
PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr");
- my $realcmd = sub {
- # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so
- # we pass 0 here (parameter only makes sense for qemu)
- PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0);
- $mp->{size} = $newsize;
- $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs');
+ # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so
+ # we pass 0 here (parameter only makes sense for qemu)
+ PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0);
- PVE::LXC::Config->write_config($vmid, $conf);
+ $mp->{size} = $newsize;
+ $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs');
- if ($format eq 'raw') {
- # we need to ensure that the volume is mapped, if not needed this is a NOP
- my $path = PVE::Storage::map_volume($storage_cfg, $volid);
- $path = PVE::Storage::path($storage_cfg, $volid) if !defined($path);
- if ($running) {
-
- $mp->{mp} = '/';
- my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1];
- $path = PVE::LXC::query_loopdev($path) if $use_loopdev;
- die "internal error: CT running but mount point not attached to a loop device"
- if !$path;
- PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev;
-
- # In order for resize2fs to know that we need online-resizing a mountpoint needs
- # to be visible to it in its namespace.
- # To not interfere with the rest of the system we unshare the current mount namespace,
- # mount over /tmp and then run resize2fs.
-
- # interestingly we don't need to e2fsck on mounted systems...
- my $quoted = PVE::Tools::shellquote($path);
- my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
- eval {
- PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
- };
- warn "Failed to update the container's filesystem: $@\n" if $@;
- } else {
- eval {
- PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
- PVE::Tools::run_command(['resize2fs', $path]);
- };
- warn "Failed to update the container's filesystem: $@\n" if $@;
+ PVE::LXC::Config->write_config($vmid, $conf);
- # always un-map if not running, this is a NOP if not needed
- PVE::Storage::unmap_volume($storage_cfg, $volid);
- }
+ if ($format eq 'raw') {
+ # we need to ensure that the volume is mapped, if not needed this is a NOP
+ my $path = PVE::Storage::map_volume($storage_cfg, $volid);
+ $path = PVE::Storage::path($storage_cfg, $volid) if !defined($path);
+ if ($running) {
+
+ $mp->{mp} = '/';
+ my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1];
+ $path = PVE::LXC::query_loopdev($path) if $use_loopdev;
+ die "internal error: CT running but mount point not attached to a loop device"
+ if !$path;
+ PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev;
+
+ # In order for resize2fs to know that we need online-resizing a mountpoint needs
+ # to be visible to it in its namespace.
+ # To not interfere with the rest of the system we unshare the current mount namespace,
+ # mount over /tmp and then run resize2fs.
+
+ # interestingly we don't need to e2fsck on mounted systems...
+ my $quoted = PVE::Tools::shellquote($path);
+ my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
+ eval {
+ PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
+ } else {
+ eval {
+ PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
+ PVE::Tools::run_command(['resize2fs', $path]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
+
+ # always un-map if not running, this is a NOP if not needed
+ PVE::Storage::unmap_volume($storage_cfg, $volid);
}
- };
+ }
+ };
- return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ PVE::LXC::Config->lock_config($vmid, $code);;
};
- return PVE::LXC::Config->lock_config($vmid, $code);;
+ $load_and_check->(); # early checks before forking+locking
+
+ return $rpcenv->fork_worker('resize', $vmid, $authuser, $worker);
}});
__PACKAGE__->register_method({
method => 'POST',
protected => 1,
proxyto => 'node',
- description => "Move a rootfs-/mp-volume to a different storage",
+ description => "Move a rootfs-/mp-volume to a different storage or to a different container.",
permissions => {
description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
- "and 'Datastore.AllocateSpace' permissions on the storage.",
- check =>
- [ 'and',
- ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
- ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
- ],
+ "and 'Datastore.AllocateSpace' permissions on the storage. To move ".
+ "a volume to another container, you need the permissions on the ".
+ "target container as well.",
+ check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
},
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ 'target-vmid' => get_standard_option('pve-vmid', {
+ completion => \&PVE::LXC::complete_ctid,
+ optional => 1,
+ }),
volume => {
type => 'string',
- enum => [ PVE::LXC::Config->mountpoint_names() ],
+ #TODO: check how to handle unused mount points as the mp parameter is not configured
+ enum => [ PVE::LXC::Config->valid_volume_keys_with_unused() ],
description => "Volume which will be moved.",
},
storage => get_standard_option('pve-storage-id', {
description => "Target Storage.",
completion => \&PVE::Storage::complete_storage_enabled,
+ optional => 1,
}),
delete => {
type => 'boolean',
- description => "Delete the original volume after successful copy. By default the original is kept as an unused volume entry.",
+ description => "Delete the original volume after successful copy. By default the " .
+ "original is kept as an unused volume entry.",
optional => 1,
default => 0,
},
digest => {
type => 'string',
- description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ description => 'Prevent changes if current configuration file has different SHA1 " .
+ "digest. This can be used to prevent concurrent modifications.',
maxLength => 40,
optional => 1,
},
minimum => '0',
default => 'clone limit from datacenter or storage config',
},
+ 'target-volume' => {
+ type => 'string',
+ description => "The config key the volume will be moved to. Default is the " .
+ "source volume key.",
+ enum => [PVE::LXC::Config->valid_volume_keys_with_unused()],
+ optional => 1,
+ },
+ 'target-digest' => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file of the target " .
+ "container has a different SHA1 digest. This can be used to prevent " .
+ "concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
},
},
returns => {
my $vmid = extract_param($param, 'vmid');
+ my $target_vmid = extract_param($param, 'target-vmid');
+
my $storage = extract_param($param, 'storage');
my $mpkey = extract_param($param, 'volume');
+ my $target_mpkey = extract_param($param, 'target-volume') // $mpkey;
+
+ my $digest = extract_param($param, 'digest');
+
+ my $target_digest = extract_param($param, 'target-digest');
+
my $lockname = 'disk';
my ($mpdata, $old_volid);
- PVE::LXC::Config->lock_config($vmid, sub {
- my $conf = PVE::LXC::Config->load_config($vmid);
- PVE::LXC::Config->check_lock($conf);
+ die "either set storage or target-vmid, but not both\n"
+ if $storage && $target_vmid;
- die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid);
+ my $storecfg = PVE::Storage::config();
- if ($mpkey eq 'rootfs') {
- $mpdata = PVE::LXC::Config->parse_ct_rootfs($conf->{$mpkey});
- } elsif ($mpkey =~ m/mp\d+/) {
- $mpdata = PVE::LXC::Config->parse_ct_mountpoint($conf->{$mpkey});
- } else {
- die "Can't parse $mpkey\n";
- }
- $old_volid = $mpdata->{volume};
+ my $move_to_storage_checks = sub {
+ PVE::LXC::Config->lock_config($vmid, sub {
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($conf);
- die "you can't move a volume with snapshots and delete the source\n"
- if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid);
+ die "cannot move volumes of a running container\n"
+ if PVE::LXC::check_running($vmid);
- PVE::Tools::assert_if_modified($param->{digest}, $conf->{digest});
+ if ($mpkey =~ m/^unused\d+$/) {
+ die "cannot move volume '$mpkey', only configured volumes can be moved to ".
+ "another storage\n";
+ }
- PVE::LXC::Config->set_lock($vmid, $lockname);
- });
+ $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey});
+ $old_volid = $mpdata->{volume};
- my $realcmd = sub {
+ die "you can't move a volume with snapshots and delete the source\n"
+ if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid);
+
+ PVE::Tools::assert_if_modified($digest, $conf->{digest});
+
+ PVE::LXC::Config->set_lock($vmid, $lockname);
+ });
+ };
+
+ my $storage_realcmd = sub {
eval {
- PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: move --volume $mpkey --storage $storage");
+ PVE::Cluster::log_msg(
+ 'info',
+ $authuser,
+ "move volume CT $vmid: move --volume $mpkey --storage $storage"
+ );
my $conf = PVE::LXC::Config->load_config($vmid);
my $storage_cfg = PVE::Storage::config();
PVE::Storage::activate_volumes($storage_cfg, [ $old_volid ]);
my $bwlimit = extract_param($param, 'bwlimit');
my $source_storage = PVE::Storage::parse_volume_id($old_volid);
- my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$source_storage, $storage], $bwlimit);
- $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf, undef, $movelimit);
- $mpdata->{volume} = $new_volid;
+ my $movelimit = PVE::Storage::get_bandwidth_limit(
+ 'move',
+ [$source_storage, $storage],
+ $bwlimit
+ );
+ $new_volid = PVE::LXC::copy_volume(
+ $mpdata,
+ $vmid,
+ $storage,
+ $storage_cfg,
+ $conf,
+ undef,
+ $movelimit
+ );
+ if (PVE::LXC::Config->is_template($conf)) {
+ PVE::Storage::activate_volumes($storage_cfg, [ $new_volid ]);
+ my $template_volid = PVE::Storage::vdisk_create_base($storage_cfg, $new_volid);
+ $mpdata->{volume} = $template_volid;
+ } else {
+ $mpdata->{volume} = $new_volid;
+ }
PVE::LXC::Config->lock_config($vmid, sub {
my $digest = $conf->{digest};
$conf = PVE::LXC::Config->load_config($vmid);
PVE::Tools::assert_if_modified($digest, $conf->{digest});
- $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint($mpdata, $mpkey eq 'rootfs');
+ $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint(
+ $mpdata,
+ $mpkey eq 'rootfs'
+ );
PVE::LXC::Config->add_unused_volume($conf, $old_volid) if !$param->{delete};
die $err;
}
+ my $deactivated = 0;
+ eval {
+ PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]);
+ $deactivated = 1;
+ };
+ warn $@ if $@;
+
if ($param->{delete}) {
- eval {
- PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]);
- PVE::Storage::vdisk_free($storage_cfg, $old_volid);
- };
- warn $@ if $@;
+ my $removed = 0;
+ if ($deactivated) {
+ eval {
+ PVE::Storage::vdisk_free($storage_cfg, $old_volid);
+ $removed = 1;
+ };
+ warn $@ if $@;
+ }
+ if (!$removed) {
+ PVE::LXC::Config->lock_config($vmid, sub {
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->add_unused_volume($conf, $old_volid);
+ PVE::LXC::Config->write_config($vmid, $conf);
+ });
+ }
}
};
my $err = $@;
warn $@ if $@;
die $err if $err;
};
- my $task = eval {
- $rpcenv->fork_worker('move_volume', $vmid, $authuser, $realcmd);
+
+ my $load_and_check_reassign_configs = sub {
+ my $vmlist = PVE::Cluster::get_vmlist()->{ids};
+
+ die "Cannot move to/from 'rootfs'\n" if $mpkey eq "rootfs" || $target_mpkey eq "rootfs";
+
+ if ($mpkey =~ m/^unused\d+$/ && $target_mpkey !~ m/^unused\d+$/) {
+ die "Moving an unused volume to a used one is not possible\n";
+ }
+ die "could not find CT ${vmid}\n" if !exists($vmlist->{$vmid});
+ die "could not find CT ${target_vmid}\n" if !exists($vmlist->{$target_vmid});
+
+ my $source_node = $vmlist->{$vmid}->{node};
+ my $target_node = $vmlist->{$target_vmid}->{node};
+
+ die "Both containers need to be on the same node ($source_node != $target_node)\n"
+ if $source_node ne $target_node;
+
+ my $source_conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($source_conf);
+ my $target_conf;
+ if ($target_vmid eq $vmid) {
+ $target_conf = $source_conf;
+ } else {
+ $target_conf = PVE::LXC::Config->load_config($target_vmid);
+ PVE::LXC::Config->check_lock($target_conf);
+ }
+
+ die "Can't move volumes from or to template CT\n"
+ if ($source_conf->{template} || $target_conf->{template});
+
+ if ($digest) {
+ eval { PVE::Tools::assert_if_modified($digest, $source_conf->{digest}) };
+ die "Container ${vmid}: $@" if $@;
+ }
+
+ if ($target_digest) {
+ eval { PVE::Tools::assert_if_modified($target_digest, $target_conf->{digest}) };
+ die "Container ${target_vmid}: $@" if $@;
+ }
+
+ die "volume '${mpkey}' for container '$vmid' does not exist\n"
+ if !defined($source_conf->{$mpkey});
+
+ die "Target volume key '${target_mpkey}' is already in use for container '$target_vmid'\n"
+ if exists $target_conf->{$target_mpkey};
+
+ my $drive = PVE::LXC::Config->parse_volume($mpkey, $source_conf->{$mpkey});
+ my $source_volid = $drive->{volume} or die "Volume '${mpkey}' has no associated image\n";
+ die "Cannot move volume used by a snapshot to another container\n"
+ if PVE::LXC::Config->is_volume_in_use_by_snapshots($source_conf, $source_volid);
+ die "Storage does not support moving of this disk to another container\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'rename', $source_volid);
+ die "Cannot move a bindmount or device mount to another container\n"
+ if $drive->{type} ne "volume";
+ die "Cannot move in-use volume while the source CT is running - detach or shutdown first\n"
+ if PVE::LXC::check_running($vmid) && $mpkey !~ m/^unused\d+$/;
+
+ my $repl_conf = PVE::ReplicationConfig->new();
+ if ($repl_conf->check_for_existing_jobs($target_vmid, 1)) {
+ my ($storeid, undef) = PVE::Storage::parse_volume_id($source_volid);
+ my $format = (PVE::Storage::parse_volname($storecfg, $source_volid))[6];
+
+ die "Cannot move volume on storage '$storeid' to a replicated container - missing replication support\n"
+ if !PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+ }
+
+ return ($source_conf, $target_conf, $drive);
};
- if (my $err = $@) {
- eval { PVE::LXC::Config->remove_lock($vmid, $lockname) };
- warn $@ if $@;
- die $err;
+
+ my $logfunc = sub { print STDERR "$_[0]\n"; };
+
+ my $volume_reassignfn = sub {
+ return PVE::LXC::Config->lock_config($vmid, sub {
+ return PVE::LXC::Config->lock_config($target_vmid, sub {
+ my ($source_conf, $target_conf, $drive) = $load_and_check_reassign_configs->();
+ my $source_volid = $drive->{volume};
+
+ my $target_unused = $target_mpkey =~ m/^unused\d+$/;
+
+ print "moving volume '$mpkey' from container '$vmid' to '$target_vmid'\n";
+
+ my ($storage, $source_volname) = PVE::Storage::parse_volume_id($source_volid);
+
+ my $fmt = (PVE::Storage::parse_volname($storecfg, $source_volid))[6];
+
+ my $new_volid = PVE::Storage::rename_volume(
+ $storecfg,
+ $source_volid,
+ $target_vmid,
+ );
+
+ $drive->{volume} = $new_volid;
+
+ delete $source_conf->{$mpkey};
+ print "removing volume '${mpkey}' from container '${vmid}' config\n";
+ PVE::LXC::Config->write_config($vmid, $source_conf);
+
+ my $drive_string;
+ if ($target_unused) {
+ $drive_string = $new_volid;
+ } else {
+ $drive_string = PVE::LXC::Config->print_volume($target_mpkey, $drive);
+ }
+
+ if ($target_unused) {
+ $target_conf->{$target_mpkey} = $drive_string;
+ } else {
+ my $running = PVE::LXC::check_running($target_vmid);
+ my $param = { $target_mpkey => $drive_string };
+ my $errors = PVE::LXC::Config->update_pct_config(
+ $target_vmid,
+ $target_conf,
+ $running,
+ $param
+ );
+ $rpcenv->warn($errors->{$_}) for keys $errors->%*;
+ }
+
+ PVE::LXC::Config->write_config($target_vmid, $target_conf);
+ $target_conf = PVE::LXC::Config->load_config($target_vmid);
+
+ PVE::LXC::update_lxc_config($target_vmid, $target_conf) if !$target_unused;
+ print "target container '$target_vmid' updated with '$target_mpkey'\n";
+
+ # remove possible replication snapshots
+ if (PVE::Storage::volume_has_feature($storecfg,'replicate', $source_volid)) {
+ eval {
+ PVE::Replication::prepare(
+ $storecfg,
+ [$new_volid],
+ undef,
+ 1,
+ undef,
+ $logfunc,
+ )
+ };
+ if (my $err = $@) {
+ $rpcenv->warn("Failed to remove replication snapshots on volume ".
+ "'${target_mpkey}'. Manual cleanup could be necessary. " .
+ "Error: ${err}\n");
+ }
+ }
+ });
+ });
+ };
+
+ if ($target_vmid && $storage) {
+ my $msg = "either set 'storage' or 'target-vmid', but not both";
+ raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg });
+ } elsif ($target_vmid) {
+ $rpcenv->check_vm_perm($authuser, $target_vmid, undef, ['VM.Config.Disk'])
+ if $authuser ne 'root@pam';
+
+ my (undef, undef, $drive) = $load_and_check_reassign_configs->();
+ my $storeid = PVE::Storage::parse_volume_id($drive->{volume});
+ $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+ return $rpcenv->fork_worker(
+ 'move_volume',
+ "${vmid}-${mpkey}>${target_vmid}-${target_mpkey}",
+ $authuser,
+ $volume_reassignfn
+ );
+ } elsif ($storage) {
+ $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']);
+ &$move_to_storage_checks();
+ my $task = eval {
+ $rpcenv->fork_worker('move_volume', $vmid, $authuser, $storage_realcmd);
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, $lockname) };
+ warn $@ if $@;
+ die $err;
+ }
+ return $task;
+ } else {
+ my $msg = "both 'storage' and 'target-vmid' missing, either needs to be set";
+ raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg });
}
- return $task;
}});
__PACKAGE__->register_method({
my $pending_delete_hash = PVE::LXC::Config->parse_pending_delete($conf->{pending}->{delete});
- return PVE::GuestHelpers::conf_table_with_pending($conf, $pending_delete_hash);
+ return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
+ }});
+
+__PACKAGE__->register_method({
+ name => 'ip',
+ path => '{vmid}/interfaces',
+ method => 'GET',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ description => 'Get IP addresses of the specified container interface.',
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ },
+ },
+ returns => {
+ type => "array",
+ items => {
+ type => 'object',
+ properties => {
+ name => {
+ type => 'string',
+ description => 'The name of the interface',
+ optional => 0,
+ },
+ hwaddr => {
+ type => 'string',
+ description => 'The MAC address of the interface',
+ optional => 0,
+ },
+ inet => {
+ type => 'string',
+ description => 'The IPv4 address of the interface',
+ optional => 1,
+ },
+ inet6 => {
+ type => 'string',
+ description => 'The IPv6 address of the interface',
+ optional => 1,
+ },
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ return PVE::LXC::get_interfaces($param->{vmid});
+ }});
+
+__PACKAGE__->register_method({
+ name => 'mtunnel',
+ path => '{vmid}/mtunnel',
+ method => 'POST',
+ protected => 1,
+ description => 'Migration tunnel endpoint - only for internal use by CT migration.',
+ permissions => {
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Allocate' ]],
+ ['perm', '/', [ 'Sys.Incoming' ]],
+ ],
+ description => "You need 'VM.Allocate' permissions on '/vms/{vmid}' and Sys.Incoming" .
+ " on '/'. Further permission checks happen during the actual migration.",
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ storages => {
+ type => 'string',
+ format => 'pve-storage-id-list',
+ optional => 1,
+ description => 'List of storages to check permission and availability. Will be checked again for all actually used storages during migration.',
+ },
+ bridges => {
+ type => 'string',
+ format => 'pve-bridge-id-list',
+ optional => 1,
+ description => 'List of network bridges to check availability. Will be checked again for actually used bridges during migration.',
+ },
+ },
+ },
+ returns => {
+ additionalProperties => 0,
+ properties => {
+ upid => { type => 'string' },
+ ticket => { type => 'string' },
+ socket => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $storages = extract_param($param, 'storages');
+ my $bridges = extract_param($param, 'bridges');
+
+ my $nodename = PVE::INotify::nodename();
+
+ raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" })
+ if $node ne 'localhost' && $node ne $nodename;
+
+ $node = $nodename;
+
+ my $storecfg = PVE::Storage::config();
+ foreach my $storeid (PVE::Tools::split_list($storages)) {
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storeid, $node);
+ }
+
+ foreach my $bridge (PVE::Tools::split_list($bridges)) {
+ PVE::Network::read_bridge_mtu($bridge);
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $socket_addr = "/run/pve/ct-$vmid.mtunnel";
+
+ my $lock = 'create';
+ eval { PVE::LXC::Config->create_and_lock_config($vmid, 0, $lock); };
+
+ raise_param_exc({ vmid => "unable to create empty CT config - $@"})
+ if $@;
+
+ my $realcmd = sub {
+ my $state = {
+ storecfg => PVE::Storage::config(),
+ lock => $lock,
+ vmid => $vmid,
+ };
+
+ my $run_locked = sub {
+ my ($code, $params) = @_;
+ return PVE::LXC::Config->lock_config($state->{vmid}, sub {
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+
+ $state->{conf} = $conf;
+
+ die "Encountered wrong lock - aborting mtunnel command handling.\n"
+ if $state->{lock} && !PVE::LXC::Config->has_lock($conf, $state->{lock});
+
+ return $code->($params);
+ });
+ };
+
+ my $cmd_desc = {
+ config => {
+ conf => {
+ type => 'string',
+ description => 'Full CT config, adapted for target cluster/node',
+ },
+ 'firewall-config' => {
+ type => 'string',
+ description => 'CT firewall config',
+ optional => 1,
+ },
+ },
+ ticket => {
+ path => {
+ type => 'string',
+ description => 'socket path for which the ticket should be valid. must be known to current mtunnel instance.',
+ },
+ },
+ quit => {
+ cleanup => {
+ type => 'boolean',
+ description => 'remove CT config and volumes, aborting migration',
+ default => 0,
+ },
+ },
+ 'disk-import' => $PVE::StorageTunnel::cmd_schema->{'disk-import'},
+ 'query-disk-import' => $PVE::StorageTunnel::cmd_schema->{'query-disk-import'},
+ bwlimit => $PVE::StorageTunnel::cmd_schema->{bwlimit},
+ };
+
+ my $cmd_handlers = {
+ 'version' => sub {
+ # compared against other end's version
+ # bump/reset for breaking changes
+ # bump/bump for opt-in changes
+ return {
+ api => $PVE::LXC::Migrate::WS_TUNNEL_VERSION,
+ age => 0,
+ };
+ },
+ 'config' => sub {
+ my ($params) = @_;
+
+ # parse and write out VM FW config if given
+ if (my $fw_conf = $params->{'firewall-config'}) {
+ my ($path, $fh) = PVE::Tools::tempfile_contents($fw_conf, 700);
+
+ my $empty_conf = {
+ rules => [],
+ options => {},
+ aliases => {},
+ ipset => {} ,
+ ipset_comments => {},
+ };
+ my $cluster_fw_conf = PVE::Firewall::load_clusterfw_conf();
+
+ # TODO: add flag for strict parsing?
+ # TODO: add import sub that does all this given raw content?
+ my $vmfw_conf = PVE::Firewall::generic_fw_config_parser($path, $cluster_fw_conf, $empty_conf, 'vm');
+ $vmfw_conf->{vmid} = $state->{vmid};
+ PVE::Firewall::save_vmfw_conf($state->{vmid}, $vmfw_conf);
+
+ $state->{cleanup}->{fw} = 1;
+ }
+
+ my $conf_fn = "incoming/lxc/$state->{vmid}.conf";
+ my $new_conf = PVE::LXC::Config::parse_pct_config($conf_fn, $params->{conf}, 1);
+ delete $new_conf->{lock};
+ delete $new_conf->{digest};
+
+ my $unprivileged = delete $new_conf->{unprivileged};
+ my $arch = delete $new_conf->{arch};
+
+ # TODO handle properly?
+ delete $new_conf->{snapshots};
+ delete $new_conf->{parent};
+ delete $new_conf->{pending};
+ delete $new_conf->{lxc};
+
+ PVE::LXC::Config->remove_lock($state->{vmid}, 'create');
+
+ eval {
+ my $conf = {
+ unprivileged => $unprivileged,
+ arch => $arch,
+ };
+ PVE::LXC::check_ct_modify_config_perm(
+ $rpcenv,
+ $authuser,
+ $state->{vmid},
+ undef,
+ $conf,
+ $new_conf,
+ undef,
+ $unprivileged,
+ );
+ my $errors = PVE::LXC::Config->update_pct_config(
+ $state->{vmid},
+ $conf,
+ 0,
+ $new_conf,
+ [],
+ [],
+ );
+ raise_param_exc($errors) if scalar(keys %$errors);
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+ PVE::LXC::update_lxc_config($vmid, $conf);
+ };
+ if (my $err = $@) {
+ # revert to locked previous config
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+ $conf->{lock} = 'create';
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+
+ die $err;
+ }
+
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+ $conf->{lock} = 'migrate';
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+
+ $state->{lock} = 'migrate';
+
+ return;
+ },
+ 'bwlimit' => sub {
+ my ($params) = @_;
+ return PVE::StorageTunnel::handle_bwlimit($params);
+ },
+ 'disk-import' => sub {
+ my ($params) = @_;
+
+ $check_storage_access_migrate->(
+ $rpcenv,
+ $authuser,
+ $state->{storecfg},
+ $params->{storage},
+ $node
+ );
+
+ $params->{unix} = "/run/pve/ct-$state->{vmid}.storage";
+
+ return PVE::StorageTunnel::handle_disk_import($state, $params);
+ },
+ 'query-disk-import' => sub {
+ my ($params) = @_;
+
+ return PVE::StorageTunnel::handle_query_disk_import($state, $params);
+ },
+ 'unlock' => sub {
+ PVE::LXC::Config->remove_lock($state->{vmid}, $state->{lock});
+ delete $state->{lock};
+ return;
+ },
+ 'start' => sub {
+ PVE::LXC::vm_start(
+ $state->{vmid},
+ $state->{conf},
+ 0
+ );
+
+ return;
+ },
+ 'stop' => sub {
+ PVE::LXC::vm_stop($state->{vmid}, 1, 10, 1);
+ return;
+ },
+ 'ticket' => sub {
+ my ($params) = @_;
+
+ my $path = $params->{path};
+
+ die "Not allowed to generate ticket for unknown socket '$path'\n"
+ if !defined($state->{sockets}->{$path});
+
+ return { ticket => PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$path") };
+ },
+ 'quit' => sub {
+ my ($params) = @_;
+
+ if ($params->{cleanup}) {
+ if ($state->{cleanup}->{fw}) {
+ PVE::Firewall::remove_vmfw_conf($state->{vmid});
+ }
+
+ for my $volid (keys $state->{cleanup}->{volumes}->%*) {
+ print "freeing volume '$volid' as part of cleanup\n";
+ eval { PVE::Storage::vdisk_free($state->{storecfg}, $volid) };
+ warn $@ if $@;
+ }
+
+ PVE::LXC::destroy_lxc_container(
+ $state->{storecfg},
+ $state->{vmid},
+ $state->{conf},
+ undef,
+ 0,
+ );
+ }
+
+ print "switching to exit-mode, waiting for client to disconnect\n";
+ $state->{exit} = 1;
+ return;
+ },
+ };
+
+ $run_locked->(sub {
+ my $socket_addr = "/run/pve/ct-$state->{vmid}.mtunnel";
+ unlink $socket_addr;
+
+ $state->{socket} = IO::Socket::UNIX->new(
+ Type => SOCK_STREAM(),
+ Local => $socket_addr,
+ Listen => 1,
+ );
+
+ $state->{socket_uid} = getpwnam('www-data')
+ or die "Failed to resolve user 'www-data' to numeric UID\n";
+ chown $state->{socket_uid}, -1, $socket_addr;
+ });
+
+ print "mtunnel started\n";
+
+ my $conn = eval { PVE::Tools::run_with_timeout(300, sub { $state->{socket}->accept() }) };
+ if ($@) {
+ warn "Failed to accept tunnel connection - $@\n";
+
+ warn "Removing tunnel socket..\n";
+ unlink $state->{socket};
+
+ warn "Removing temporary VM config..\n";
+ $run_locked->(sub {
+ PVE::LXC::destroy_config($state->{vmid});
+ });
+
+ die "Exiting mtunnel\n";
+ }
+
+ $state->{conn} = $conn;
+
+ my $reply_err = sub {
+ my ($msg) = @_;
+
+ my $reply = JSON::encode_json({
+ success => JSON::false,
+ msg => $msg,
+ });
+ $conn->print("$reply\n");
+ $conn->flush();
+ };
+
+ my $reply_ok = sub {
+ my ($res) = @_;
+
+ $res->{success} = JSON::true;
+ my $reply = JSON::encode_json($res);
+ $conn->print("$reply\n");
+ $conn->flush();
+ };
+
+ while (my $line = <$conn>) {
+ chomp $line;
+
+ # untaint, we validate below if needed
+ ($line) = $line =~ /^(.*)$/;
+ my $parsed = eval { JSON::decode_json($line) };
+ if ($@) {
+ $reply_err->("failed to parse command - $@");
+ next;
+ }
+
+ my $cmd = delete $parsed->{cmd};
+ if (!defined($cmd)) {
+ $reply_err->("'cmd' missing");
+ } elsif ($state->{exit}) {
+ $reply_err->("tunnel is in exit-mode, processing '$cmd' cmd not possible");
+ next;
+ } elsif (my $handler = $cmd_handlers->{$cmd}) {
+ print "received command '$cmd'\n";
+ eval {
+ if ($cmd_desc->{$cmd}) {
+ PVE::JSONSchema::validate($parsed, $cmd_desc->{$cmd});
+ } else {
+ $parsed = {};
+ }
+ my $res = $run_locked->($handler, $parsed);
+ $reply_ok->($res);
+ };
+ $reply_err->("failed to handle '$cmd' command - $@")
+ if $@;
+ } else {
+ $reply_err->("unknown command '$cmd' given");
+ }
+ }
+
+ if ($state->{exit}) {
+ print "mtunnel exited\n";
+ } else {
+ die "mtunnel exited unexpectedly\n";
+ }
+ };
+
+ my $ticket = PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$socket_addr");
+ my $upid = $rpcenv->fork_worker('vzmtunnel', $vmid, $authuser, $realcmd);
+
+ return {
+ ticket => $ticket,
+ upid => $upid,
+ socket => $socket_addr,
+ };
}});
+__PACKAGE__->register_method({
+ name => 'mtunnelwebsocket',
+ path => '{vmid}/mtunnelwebsocket',
+ method => 'GET',
+ permissions => {
+ description => "You need to pass a ticket valid for the selected socket. Tickets can be created via the mtunnel API call, which will check permissions accordingly.",
+ user => 'all', # check inside
+ },
+ description => 'Migration tunnel endpoint for websocket upgrade - only for internal use by VM migration.',
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ socket => {
+ type => "string",
+ description => "unix socket to forward to",
+ },
+ ticket => {
+ type => "string",
+ description => "ticket return by initial 'mtunnel' API call, or retrieved via 'ticket' tunnel command",
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string', optional => 1 },
+ socket => { type => 'string', optional => 1 },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = PVE::INotify::nodename();
+ my $node = extract_param($param, 'node');
+
+ raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" })
+ if $node ne 'localhost' && $node ne $nodename;
+
+ my $vmid = $param->{vmid};
+ # check VM exists
+ PVE::LXC::Config->load_config($vmid);
+
+ my $socket = $param->{socket};
+ PVE::AccessControl::verify_tunnel_ticket($param->{ticket}, $authuser, "/socket/$socket");
+
+ return { socket => $socket };
+ }});
1;