+__PACKAGE__->register_method({
+ name => 'clone_vm',
+ path => '{vmid}/clone',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a container clone/copy",
+ permissions => {
+ description => "You need 'VM.Clone' permissions on /vms/{vmid}, " .
+ "and 'VM.Allocate' permissions " .
+ "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+ "'Datastore.AllocateSpace' on any used storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
+ [ 'or',
+ [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+ [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+ ],
+ ]
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ newid => get_standard_option('pve-vmid', {
+ completion => \&PVE::Cluster::complete_next_vmid,
+ description => 'VMID for the clone.' }),
+ hostname => {
+ optional => 1,
+ type => 'string', format => 'dns-name',
+ description => "Set a hostname for the new CT.",
+ },
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "Description for the new CT.",
+ },
+ pool => {
+ optional => 1,
+ type => 'string', format => 'pve-poolid',
+ description => "Add the new CT to the specified pool.",
+ },
+ snapname => get_standard_option('pve-lxc-snapshot-name', {
+ optional => 1,
+ }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage for full clone.",
+ optional => 1,
+ }),
+ full => {
+ optional => 1,
+ type => 'boolean',
+ description => "Create a full copy of all disks. This is always done when " .
+ "you clone a normal CT. For CT templates, we try to create a linked clone by default.",
+ },
+ target => get_standard_option('pve-node', {
+ description => "Target node. Only allowed if the original VM is on shared storage.",
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $newid = extract_param($param, 'newid');
+
+ my $pool = extract_param($param, 'pool');
+
+ if (defined($pool)) {
+ $rpcenv->check_pool_exist($pool);
+ }
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $storage = extract_param($param, 'storage');
+
+ my $target = extract_param($param, 'target');
+
+ my $localnode = PVE::INotify::nodename();
+
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+
+ PVE::Cluster::check_node_exists($target) if $target;
+
+ my $storecfg = PVE::Storage::config();
+
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ if ($target) {
+ # check if storage is available on target node
+ PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ # clone only works if target storage is shared
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
+ }
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $conffile;
+ my $newconf = {};
+ my $mountpoints = {};
+ my $fullclone = {};
+ my $vollist = [];
+ my $running;
+
+ PVE::LXC::Config->lock_config($vmid, sub {
+ my $src_conf = PVE::LXC::Config->set_lock($vmid, 'disk');
+
+ $running = PVE::LXC::check_running($vmid) || 0;
+
+ my $full = extract_param($param, 'full');
+ if (!defined($full)) {
+ $full = !PVE::LXC::Config->is_template($src_conf);
+ }
+ die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full;
+
+ eval {
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined($src_conf->{snapshots}->{$snapname});
+
+
+ my $src_conf = $snapname ? $src_conf->{snapshots}->{$snapname} : $src_conf;
+
+ $conffile = PVE::LXC::Config->config_file($newid);
+ die "unable to create CT $newid: config file already exists\n"
+ if -f $conffile;
+
+ my $sharedvm = 1;
+ foreach my $opt (keys %$src_conf) {
+ next if $opt =~ m/^unused\d+$/;
+
+ my $value = $src_conf->{$opt};
+
+ if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
+ my $mp = $opt eq 'rootfs' ?
+ PVE::LXC::Config->parse_ct_rootfs($value) :
+ PVE::LXC::Config->parse_ct_mountpoint($value);
+
+ if ($mp->{type} eq 'volume') {
+ my $volid = $mp->{volume};
+
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ $sid = $storage if defined($storage);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ if (!$scfg->{shared}) {
+ $sharedvm = 0;
+ warn "found non-shared volume: $volid\n" if $target;
+ }
+
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+
+ if ($full) {
+ die "Cannot do full clones on a running container without snapshots\n"
+ if $running && !defined($snapname);
+ $fullclone->{$opt} = 1;
+ } else {
+ # not full means clone instead of copy
+ die "Linked clone feature for '$volid' is not available\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running);
+ }
+
+ $mountpoints->{$opt} = $mp;
+ push @$vollist, $volid;
+
+ } else {
+ # TODO: allow bind mounts?
+ die "unable to clone mountpint '$opt' (type $mp->{type})\n";
+ }
+ } elsif ($opt =~ m/^net(\d+)$/) {
+ # always change MAC! address
+ my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
+ my $net = PVE::LXC::Config->parse_lxc_network($value);
+ $net->{hwaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
+ $newconf->{$opt} = PVE::LXC::Config->print_lxc_network($net);
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
+ }
+ }
+ die "can't clone CT to node '$target' (CT uses local storage)\n"
+ if $target && !$sharedvm;
+
+ # Replace the 'disk' lock with a 'create' lock.
+ $newconf->{lock} = 'create';
+
+ delete $newconf->{template};
+ if ($param->{hostname}) {
+ $newconf->{hostname} = $param->{hostname};
+ }
+
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
+
+ # create empty/temp config - this fails if CT already exists on other node
+ PVE::LXC::Config->write_config($newid, $newconf);
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
+ warn $@ if $@;
+ die $err;
+ }
+ });
+
+ my $update_conf = sub {
+ my ($key, $value) = @_;
+ return PVE::LXC::Config->lock_config($newid, sub {
+ my $conf = PVE::LXC::Config->load_config($newid);
+ die "Lost 'create' config lock, aborting.\n"
+ if !PVE::LXC::Config->has_lock($conf, 'create');
+ $conf->{$key} = $value;
+ PVE::LXC::Config->write_config($newid, $conf);
+ });
+ };
+
+ my $realcmd = sub {
+ my ($upid) = @_;
+
+ my $newvollist = [];
+
+ my $verify_running = PVE::LXC::check_running($vmid) || 0;
+ die "unexpected state change\n" if $verify_running != $running;
+
+ eval {
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+
+ foreach my $opt (keys %$mountpoints) {
+ my $mp = $mountpoints->{$opt};
+ my $volid = $mp->{volume};
+
+ my $newvolid;
+ if ($fullclone->{$opt}) {
+ print "create full clone of mountpoint $opt ($volid)\n";
+ my $target_storage = $storage // PVE::Storage::parse_volume_id($volid);
+ $newvolid = PVE::LXC::copy_volume($mp, $newid, $target_storage, $storecfg, $newconf, $snapname);
+ } else {
+ print "create linked clone of mount point $opt ($volid)\n";
+ $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname);
+ }
+
+ push @$newvollist, $newvolid;
+ $mp->{volume} = $newvolid;
+
+ $update_conf->($opt, PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs'));
+ }
+
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
+ PVE::LXC::Config->remove_lock($newid, 'create');
+
+ if ($target) {
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
+ PVE::Storage::deactivate_volumes($storecfg, $newvollist);
+
+ my $newconffile = PVE::LXC::Config->config_file($newid, $target);
+ die "Failed to move config to node '$target' - rename failed: $!\n"
+ if !rename($conffile, $newconffile);
+ }
+ };
+ my $err = $@;
+
+ # Unlock the source config in any case:
+ eval { PVE::LXC::Config->remove_lock($vmid, 'disk') };
+ warn $@ if $@;
+
+ if ($err) {
+ # Now cleanup the config & disks:
+ unlink $conffile;
+
+ sleep 1; # some storages like rbd need to wait before release volume - really?
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "clone failed: $err";
+ }
+
+ return;
+ };
+
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
+ return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd);
+ }});
+
+
+__PACKAGE__->register_method({
+ name => 'resize_vm',
+ path => '{vmid}/resize',
+ method => 'PUT',
+ protected => 1,
+ proxyto => 'node',
+ description => "Resize a container mount point.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', ['VM.Config.Disk'], any => 1],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ disk => {
+ type => 'string',
+ description => "The disk you want to resize.",
+ enum => [PVE::LXC::Config->mountpoint_names()],
+ },
+ size => {
+ type => 'string',
+ pattern => '\+?\d+(\.\d+)?[KMGT]?',
+ description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ }
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $digest = extract_param($param, 'digest');
+
+ my $sizestr = extract_param($param, 'size');
+ my $ext = ($sizestr =~ s/^\+//);
+ my $newsize = PVE::JSONSchema::parse_size($sizestr);
+ die "invalid size string" if !defined($newsize);
+
+ die "no options specified\n" if !scalar(keys %$param);
+
+ PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $param, []);
+
+ my $storage_cfg = cfs_read_file("storage.cfg");
+
+ my $code = sub {
+
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($conf);
+
+ PVE::Tools::assert_if_modified($digest, $conf->{digest});
+
+ my $running = PVE::LXC::check_running($vmid);
+
+ my $disk = $param->{disk};
+ my $mp = $disk eq 'rootfs' ? PVE::LXC::Config->parse_ct_rootfs($conf->{$disk}) :
+ PVE::LXC::Config->parse_ct_mountpoint($conf->{$disk});
+
+ my $volid = $mp->{volume};
+
+ my (undef, undef, $owner, undef, undef, undef, $format) =
+ PVE::Storage::parse_volname($storage_cfg, $volid);
+
+ die "can't resize mount point owned by another container ($owner)"
+ if $vmid != $owner;
+
+ die "can't resize volume: $disk if snapshot exists\n"
+ if %{$conf->{snapshots}} && $format eq 'qcow2';
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+
+ $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+
+ PVE::Storage::activate_volumes($storage_cfg, [$volid]);
+
+ my $size = PVE::Storage::volume_size_info($storage_cfg, $volid, 5);
+ $newsize += $size if $ext;
+ $newsize = int($newsize);
+
+ die "unable to shrink disk size\n" if $newsize < $size;
+
+ return if $size == $newsize;
+
+ PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr");
+ my $realcmd = sub {
+ # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so
+ # we pass 0 here (parameter only makes sense for qemu)
+ PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0);
+
+ $mp->{size} = $newsize;
+ $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs');
+
+ PVE::LXC::Config->write_config($vmid, $conf);
+
+ if ($format eq 'raw') {
+ my $path = PVE::Storage::map_volume($storage_cfg, $volid) // PVE::Storage::path($storage_cfg, $volid);
+ if ($running) {
+
+ $mp->{mp} = '/';
+ my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1];
+ $path = PVE::LXC::query_loopdev($path) if $use_loopdev;
+ die "internal error: CT running but mount point not attached to a loop device"
+ if !$path;
+ PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev;
+
+ # In order for resize2fs to know that we need online-resizing a mountpoint needs
+ # to be visible to it in its namespace.
+ # To not interfere with the rest of the system we unshare the current mount namespace,
+ # mount over /tmp and then run resize2fs.
+
+ # interestingly we don't need to e2fsck on mounted systems...
+ my $quoted = PVE::Tools::shellquote($path);
+ my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
+ eval {
+ PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
+ } else {
+ eval {
+ PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
+ PVE::Tools::run_command(['resize2fs', $path]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
+
+ PVE::Storage::unmap_volume($storage_cfg, $volid);
+ }
+ }
+ };
+
+ return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd);
+ };
+
+ return PVE::LXC::Config->lock_config($vmid, $code);;
+ }});
+
+__PACKAGE__->register_method({
+ name => 'move_volume',
+ path => '{vmid}/move_volume',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Move a rootfs-/mp-volume to a different storage",
+ permissions => {
+ description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
+ "and 'Datastore.AllocateSpace' permissions on the storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
+ ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
+ ],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ volume => {
+ type => 'string',
+ enum => [ PVE::LXC::Config->mountpoint_names() ],
+ description => "Volume which will be moved.",
+ },
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target Storage.",
+ completion => \&PVE::Storage::complete_storage_enabled,
+ }),
+ delete => {
+ type => 'boolean',
+ description => "Delete the original volume after successful copy. By default the original is kept as an unused volume entry.",
+ optional => 1,
+ default => 0,
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ }
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $storage = extract_param($param, 'storage');
+
+ my $mpkey = extract_param($param, 'volume');
+
+ my $lockname = 'disk';
+
+ my ($mpdata, $old_volid);
+
+ PVE::LXC::Config->lock_config($vmid, sub {
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($conf);
+
+ die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid);
+
+ if ($mpkey eq 'rootfs') {
+ $mpdata = PVE::LXC::Config->parse_ct_rootfs($conf->{$mpkey});
+ } elsif ($mpkey =~ m/mp\d+/) {
+ $mpdata = PVE::LXC::Config->parse_ct_mountpoint($conf->{$mpkey});
+ } else {
+ die "Can't parse $mpkey\n";
+ }
+ $old_volid = $mpdata->{volume};
+
+ die "you can't move a volume with snapshots and delete the source\n"
+ if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid);
+
+ PVE::Tools::assert_if_modified($param->{digest}, $conf->{digest});
+
+ PVE::LXC::Config->set_lock($vmid, $lockname);
+ });
+
+ my $realcmd = sub {
+ eval {
+ PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: move --volume $mpkey --storage $storage");
+
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ my $storage_cfg = PVE::Storage::config();
+
+ my $new_volid;
+
+ eval {
+ PVE::Storage::activate_volumes($storage_cfg, [ $old_volid ]);
+ $new_volid = PVE::LXC::copy_volume($mpdata, $vmid, $storage, $storage_cfg, $conf);
+ $mpdata->{volume} = $new_volid;
+
+ PVE::LXC::Config->lock_config($vmid, sub {
+ my $digest = $conf->{digest};
+ $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::Tools::assert_if_modified($digest, $conf->{digest});
+
+ $conf->{$mpkey} = PVE::LXC::Config->print_ct_mountpoint($mpdata, $mpkey eq 'rootfs');
+
+ PVE::LXC::Config->add_unused_volume($conf, $old_volid) if !$param->{delete};
+
+ PVE::LXC::Config->write_config($vmid, $conf);
+ });
+
+ eval {
+ # try to deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storage_cfg, [ $new_volid ])
+ };
+ warn $@ if $@;
+ };
+ if (my $err = $@) {
+ eval {
+ PVE::Storage::vdisk_free($storage_cfg, $new_volid)
+ if defined($new_volid);
+ };
+ warn $@ if $@;
+ die $err;
+ }
+
+ if ($param->{delete}) {
+ eval {
+ PVE::Storage::deactivate_volumes($storage_cfg, [ $old_volid ]);
+ PVE::Storage::vdisk_free($storage_cfg, $old_volid);
+ };
+ warn $@ if $@;
+ }
+ };
+ my $err = $@;
+ eval { PVE::LXC::Config->remove_lock($vmid, $lockname) };
+ warn $@ if $@;
+ die $err if $err;
+ };
+ my $task = eval {
+ $rpcenv->fork_worker('move_volume', $vmid, $authuser, $realcmd);
+ };
+ if (my $err = $@) {
+ eval { PVE::LXC::Config->remove_lock($vmid, $lockname) };
+ warn $@ if $@;
+ die $err;
+ }
+ return $task;
+ }});
+