use PVE::API2::LXC::Config;
use PVE::API2::LXC::Status;
use PVE::API2::LXC::Snapshot;
+use PVE::HA::Env::PVE2;
use PVE::HA::Config;
use PVE::JSONSchema qw(get_standard_option);
use base qw(PVE::RESTHandler);
proxyto => 'node',
parameters => {
additionalProperties => 0,
- properties => PVE::LXC::json_config_properties({
+ properties => PVE::LXC::Config->json_config_properties({
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }),
ostemplate => {
type => 'string', format => 'pve-poolid',
description => "Add the VM to the specified pool.",
},
+ 'ignore-unpack-errors' => {
+ optional => 1,
+ type => 'boolean',
+ description => "Ignore errors when extracting the template.",
+ },
+ 'ssh-public-keys' => {
+ optional => 1,
+ type => 'string',
+ description => "Setup public SSH keys (one key per line, " .
+ "OpenSSH format).",
+ },
}),
},
returns => {
my $vmid = extract_param($param, 'vmid');
- my $basecfg_fn = PVE::LXC::config_file($vmid);
+ my $ignore_unpack_errors = extract_param($param, 'ignore-unpack-errors');
+
+ my $basecfg_fn = PVE::LXC::Config->config_file($vmid);
my $same_container_exists = -f $basecfg_fn;
if (!($same_container_exists && $restore && $force)) {
PVE::Cluster::check_vmid_unused($vmid);
} else {
- my $conf = PVE::LXC::load_config($vmid);
- PVE::LXC::check_protection($conf, "unable to restore CT $vmid");
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_protection($conf, "unable to restore CT $vmid");
}
my $password = extract_param($param, 'password');
+ my $ssh_keys = extract_param($param, 'ssh-public-keys');
+ PVE::Tools::validate_ssh_public_keys($ssh_keys) if defined($ssh_keys);
+
my $pool = extract_param($param, 'pool');
if (defined($pool)) {
raise_perm_exc();
}
- PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
+ PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []);
my $storage = extract_param($param, 'storage') // 'local';
my $conf = {};
my $no_disk_param = {};
+ my $mp_param = {};
+ my $storage_only_mode = 1;
foreach my $opt (keys %$param) {
my $value = $param->{$opt};
if ($opt eq 'rootfs' || $opt =~ m/^mp\d+$/) {
# allow to use simple numbers (add default storage in that case)
- $param->{$opt} = "$storage:$value" if $value =~ m/^\d+(\.\d+)?$/;
+ if ($value =~ m/^\d+(\.\d+)?$/) {
+ $mp_param->{$opt} = "$storage:$value";
+ } else {
+ $mp_param->{$opt} = $value;
+ }
+ $storage_only_mode = 0;
+ } elsif ($opt =~ m/^unused\d+$/) {
+ warn "ignoring '$opt', cannot create/restore with unused volume\n";
+ delete $param->{$opt};
} else {
$no_disk_param->{$opt} = $value;
}
}
+ die "mountpoints configured, but 'rootfs' not set - aborting\n"
+ if !$storage_only_mode && !defined($mp_param->{rootfs});
+
# check storage access, activate storage
- PVE::LXC::foreach_mountpoint($param, sub {
+ my $delayed_mp_param = {};
+ PVE::LXC::Config->foreach_mountpoint($mp_param, sub {
my ($ms, $mountpoint) = @_;
my $volid = $mountpoint->{volume};
my $mp = $mountpoint->{mp};
- my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
-
- &$check_and_activate_storage($sid) if $sid;
+ if ($mountpoint->{type} ne 'volume') { # bind or device
+ die "Only root can pass arbitrary filesystem paths.\n"
+ if $authuser ne 'root@pam';
+ } else {
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ &$check_and_activate_storage($sid);
+ }
});
# check/activate default storage
- &$check_and_activate_storage($storage) if !defined($param->{rootfs});
+ &$check_and_activate_storage($storage) if !defined($mp_param->{rootfs});
- PVE::LXC::update_pct_config($vmid, $conf, 0, $no_disk_param);
+ PVE::LXC::Config->update_pct_config($vmid, $conf, 0, $no_disk_param);
$conf->{unprivileged} = 1 if $unprivileged;
my $code = sub {
&$check_vmid_usage(); # final check after locking
-
+ my $old_conf;
+
+ my $config_fn = PVE::LXC::Config->config_file($vmid);
+ if (-f $config_fn) {
+ die "container exists" if !$restore; # just to be sure
+ $old_conf = PVE::LXC::Config->load_config($vmid);
+ } else {
+ eval {
+ # try to create empty config on local node, we have an flock
+ PVE::LXC::Config->write_config($vmid, {});
+ };
+
+ # another node was faster, abort
+ die "Could not reserve ID $vmid, already taken\n" if $@;
+ }
+
PVE::Cluster::check_cfs_quorum();
my $vollist = [];
eval {
- if (!defined($param->{rootfs})) {
+ if ($storage_only_mode) {
if ($restore) {
- my (undef, $disksize) = PVE::LXC::Create::recover_config($archive);
- die "unable to detect disk size - please specify rootfs (size)\n"
- if !$disksize;
- $disksize /= 1024 * 1024 * 1024; # create_disks expects GB as unit size
- $param->{rootfs} = "$storage:$disksize";
+ (undef, $mp_param) = PVE::LXC::Create::recover_config($archive);
+ die "rootfs configuration could not be recovered, please check and specify manually!\n"
+ if !defined($mp_param->{rootfs});
+ PVE::LXC::Config->foreach_mountpoint($mp_param, sub {
+ my ($ms, $mountpoint) = @_;
+ my $type = $mountpoint->{type};
+ if ($type eq 'volume') {
+ die "unable to detect disk size - please specify $ms (size)\n"
+ if !defined($mountpoint->{size});
+ my $disksize = $mountpoint->{size} / (1024 * 1024 * 1024); # create_disks expects GB as unit size
+ delete $mountpoint->{size};
+ $mountpoint->{volume} = "$storage:$disksize";
+ $mp_param->{$ms} = PVE::LXC::Config->print_ct_mountpoint($mountpoint, $ms eq 'rootfs');
+ } else {
+ my $type = $mountpoint->{type};
+ die "restoring rootfs to $type mount is only possible by specifying -rootfs manually!\n"
+ if ($ms eq 'rootfs');
+
+ if ($mountpoint->{backup}) {
+ warn "WARNING - unsupported configuration!\n";
+ warn "backup was enabled for $type mountpoint $ms ('$mountpoint->{mp}')\n";
+ warn "mountpoint configuration will be restored after archive extraction!\n";
+ warn "contained files will be restored to wrong directory!\n";
+ }
+ $delayed_mp_param->{$ms} = PVE::LXC::Config->print_ct_mountpoint($mountpoint, $ms eq 'rootfs');
+ }
+ });
} else {
- $param->{rootfs} = "$storage:4"; # defaults to 4GB
+ $mp_param->{rootfs} = "$storage:4"; # defaults to 4GB
}
}
- $vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $param, $conf);
+ $vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf);
+
+ if (defined($old_conf)) {
+ # destroy old container volumes
+ PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $old_conf, {});
+ }
+
+ eval {
+ my $rootdir = PVE::LXC::mount_all($vmid, $storage_cfg, $conf, 1);
+ PVE::LXC::Create::restore_archive($archive, $rootdir, $conf, $ignore_unpack_errors);
- PVE::LXC::Create::create_rootfs($storage_cfg, $vmid, $conf, $archive, $password, $restore);
+ if ($restore) {
+ PVE::LXC::Create::restore_configuration($vmid, $rootdir, $conf);
+ } else {
+ my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); # detect OS
+ PVE::LXC::Config->write_config($vmid, $conf); # safe config (after OS detection)
+ $lxc_setup->post_create_hook($password, $ssh_keys);
+ }
+ };
+ my $err = $@;
+ PVE::LXC::umount_all($vmid, $storage_cfg, $conf, $err ? 1 : 0);
+ PVE::Storage::deactivate_volumes($storage_cfg, PVE::LXC::Config->get_vm_volumes($conf));
+ die $err if $err;
# set some defaults
$conf->{hostname} ||= "CT$vmid";
$conf->{memory} ||= 512;
$conf->{swap} //= 512;
- PVE::LXC::create_config($vmid, $conf);
+ foreach my $mp (keys %$delayed_mp_param) {
+ $conf->{$mp} = $delayed_mp_param->{$mp};
+ }
+ PVE::LXC::Config->write_config($vmid, $conf);
};
if (my $err = $@) {
PVE::LXC::destroy_disks($storage_cfg, $vollist);
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
- my $realcmd = sub { PVE::LXC::lock_container($vmid, 1, $code); };
+ my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); };
&$check_vmid_usage(); # first check before locking
my ($param) = @_;
# test if VM exists
- my $conf = PVE::LXC::load_config($param->{vmid});
+ my $conf = PVE::LXC::Config->load_config($param->{vmid});
my $res = [
{ subdir => 'config' },
{ subdir => 'vncwebsocket' },
{ subdir => 'spiceproxy' },
{ subdir => 'migrate' },
+ { subdir => 'clone' },
# { subdir => 'initlog' },
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
return $res;
}});
+
__PACKAGE__->register_method({
name => 'rrd',
path => '{vmid}/rrd',
my $vmid = $param->{vmid};
# test if container exists
- my $conf = PVE::LXC::load_config($vmid);
+ my $conf = PVE::LXC::Config->load_config($vmid);
my $storage_cfg = cfs_read_file("storage.cfg");
- PVE::LXC::check_protection($conf, "can't remove CT $vmid");
+ PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid");
die "unable to remove CT $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
my $code = sub {
# reload config after lock
- $conf = PVE::LXC::load_config($vmid);
- PVE::LXC::check_lock($conf);
+ $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($conf);
die $running_error_msg if PVE::LXC::check_running($vmid);
PVE::Firewall::remove_vmfw_conf($vmid);
};
- my $realcmd = sub { PVE::LXC::lock_container($vmid, 1, $code); };
+ my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); };
return $rpcenv->fork_worker('vzdestroy', $vmid, $authuser, $realcmd);
}});
my $remcmd = $remip ?
['/usr/bin/ssh', '-t', $remip] : [];
- my $conf = PVE::LXC::load_config($vmid, $node);
+ my $conf = PVE::LXC::Config->load_config($vmid, $node);
my $concmd = PVE::LXC::get_console_command($vmid, $conf);
my $shcmd = [ '/usr/bin/dtach', '-A',
my $authpath = "/vms/$vmid";
my $permissions = 'VM.Console';
- my $conf = PVE::LXC::load_config($vmid);
+ my $conf = PVE::LXC::Config->load_config($vmid);
die "CT $vmid not running\n" if !PVE::LXC::check_running($vmid);
description => "Use online/live migration.",
optional => 1,
},
+ force => {
+ type => 'boolean',
+ description => "Force migration despite local bind / device" .
+ " mounts. WARNING: identical bind / device mounts need to ".
+ " be available on the target node.",
+ optional => 1,
+ },
},
},
returns => {
my $vmid = extract_param($param, 'vmid');
# test if VM exists
- PVE::LXC::load_config($vmid);
+ PVE::LXC::Config->load_config($vmid);
# try to detect errors early
if (PVE::LXC::check_running($vmid)) {
my $feature = extract_param($param, 'feature');
- my $conf = PVE::LXC::load_config($vmid);
+ my $conf = PVE::LXC::Config->load_config($vmid);
if($snapname){
my $snap = $conf->{snapshots}->{$snapname};
my $storage_cfg = PVE::Storage::config();
#Maybe include later
#my $nodelist = PVE::LXC::shared_nodes($conf, $storage_cfg);
- my $hasFeature = PVE::LXC::has_feature($feature, $conf, $storage_cfg, $snapname);
+ my $hasFeature = PVE::LXC::Config->has_feature($feature, $conf, $storage_cfg, $snapname);
return {
hasFeature => $hasFeature,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }),
+ experimental => {
+ type => 'boolean',
+ description => "The template feature is experimental, set this " .
+ "flag if you know what you are doing.",
+ default => 0,
+ },
},
},
returns => { type => 'null'},
my $updatefn = sub {
- my $conf = PVE::LXC::load_config($vmid);
- PVE::LXC::check_lock($conf);
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($conf);
die "unable to create template, because CT contains snapshots\n"
if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
die "you can't convert a template to a template\n"
- if PVE::LXC::is_template($conf);
+ if PVE::LXC::Config->is_template($conf);
die "you can't convert a CT to template if the CT is running\n"
if PVE::LXC::check_running($vmid);
$conf->{template} = 1;
- PVE::LXC::write_config($vmid, $conf);
+ PVE::LXC::Config->write_config($vmid, $conf);
# and remove lxc config
PVE::LXC::update_lxc_config(undef, $vmid, $conf);
return $rpcenv->fork_worker('vztemplate', $vmid, $authuser, $realcmd);
};
- PVE::LXC::lock_container($vmid, undef, $updatefn);
+ PVE::LXC::Config->lock_config($vmid, $updatefn);
return undef;
}});
+__PACKAGE__->register_method({
+ name => 'clone_vm',
+ path => '{vmid}/clone',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a container clone/copy",
+ permissions => {
+ description => "You need 'VM.Clone' permissions on /vms/{vmid}, " .
+ "and 'VM.Allocate' permissions " .
+ "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+ "'Datastore.AllocateSpace' on any used storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
+ [ 'or',
+ [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+ [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+ ],
+ ]
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ newid => get_standard_option('pve-vmid', {
+ completion => \&PVE::Cluster::complete_next_vmid,
+ description => 'VMID for the clone.' }),
+ hostname => {
+ optional => 1,
+ type => 'string', format => 'dns-name',
+ description => "Set a hostname for the new CT.",
+ },
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "Description for the new CT.",
+ },
+ pool => {
+ optional => 1,
+ type => 'string', format => 'pve-poolid',
+ description => "Add the new CT to the specified pool.",
+ },
+ snapname => get_standard_option('pve-lxc-snapshot-name', {
+ optional => 1,
+ }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage for full clone.",
+ requires => 'full',
+ optional => 1,
+ }),
+ full => {
+ optional => 1,
+ type => 'boolean',
+ description => "Create a full copy of all disk. This is always done when " .
+ "you clone a normal CT. For CT templates, we try to create a linked clone by default.",
+ default => 0,
+ },
+ experimental => {
+ type => 'boolean',
+ description => "The clone feature is experimental, set this " .
+ "flag if you know what you are doing.",
+ default => 0,
+ },
+# target => get_standard_option('pve-node', {
+# description => "Target node. Only allowed if the original VM is on shared storage.",
+# optional => 1,
+# }),
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $newid = extract_param($param, 'newid');
+
+ my $pool = extract_param($param, 'pool');
+
+ if (defined($pool)) {
+ $rpcenv->check_pool_exist($pool);
+ }
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $storage = extract_param($param, 'storage');
+
+ my $localnode = PVE::INotify::nodename();
+
+ my $storecfg = PVE::Storage::config();
+
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $running = PVE::LXC::check_running($vmid) || 0;
+
+ my $clonefn = sub {
+
+ # do all tests after lock
+ # we also try to do all tests before we fork the worker
+ my $conf = PVE::LXC::Config->load_config($vmid);
+
+ PVE::LXC::Config->check_lock($conf);
+
+ my $verify_running = PVE::LXC::check_running($vmid) || 0;
+
+ die "unexpected state change\n" if $verify_running != $running;
+
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined( $conf->{snapshots}->{$snapname});
+
+ my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
+
+ my $conffile = PVE::LXC::Config->config_file($newid);
+ die "unable to create CT $newid: config file already exists\n"
+ if -f $conffile;
+
+ my $newconf = { lock => 'clone' };
+ my $mountpoints = {};
+ my $fullclone = {};
+ my $vollist = [];
+
+ foreach my $opt (keys %$oldconf) {
+ my $value = $oldconf->{$opt};
+
+ # no need to copy unused images, because VMID(owner) changes anyways
+ next if $opt =~ m/^unused\d+$/;
+
+ if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
+ my $mp = $opt eq 'rootfs' ?
+ PVE::LXC::Config->parse_ct_rootfs($value) :
+ PVE::LXC::Config->parse_ct_mountpoint($value);
+
+ if ($mp->{type} eq 'volume') {
+ my $volid = $mp->{volume};
+ if ($param->{full}) {
+ die "fixme: full clone not implemented";
+
+ die "Full clone feature for '$volid' is not available\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'copy', $volid, $snapname, $running);
+ $fullclone->{$opt} = 1;
+ } else {
+ # not full means clone instead of copy
+ die "Linked clone feature for '$volid' is not available\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running);
+ }
+
+ $mountpoints->{$opt} = $mp;
+ push @$vollist, $volid;
+
+ } else {
+ # TODO: allow bind mounts?
+ die "unable to clone mountpint '$opt' (type $mp->{type})\n";
+ }
+
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
+ }
+ }
+
+ delete $newconf->{template};
+ if ($param->{hostname}) {
+ $newconf->{hostname} = $param->{hostname};
+ }
+
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
+
+ # create empty/temp config - this fails if CT already exists on other node
+ PVE::Tools::file_set_contents($conffile, "# ctclone temporary file\nlock: clone\n");
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+
+ foreach my $opt (keys %$mountpoints) {
+ my $mp = $mountpoints->{$opt};
+ my $volid = $mp->{volume};
+
+ if ($fullclone->{$opt}) {
+ die "fixme: full clone not implemented\n";
+ } else {
+ print "create linked clone of mountpoint $opt ($volid)\n";
+ my $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname);
+ push @$newvollist, $newvolid;
+ $mp->{volume} = $newvolid;
+
+ $newconf->{$opt} = PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs');
+ PVE::LXC::Config->write_config($newid, $newconf);
+ }
+ }
+
+ delete $newconf->{lock};
+ PVE::LXC::Config->write_config($newid, $newconf);
+
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
+ };
+ if (my $err = $@) {
+ unlink $conffile;
+
+ sleep 1; # some storage like rbd need to wait before release volume - really?
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "clone failed: $err";
+ }
+
+ return;
+ };
+
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
+
+ return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd);
+
+ };
+
+ return PVE::LXC::Config->lock_config($vmid, $clonefn);
+ }});
+
+
__PACKAGE__->register_method({
name => 'resize_vm',
path => '{vmid}/resize',
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::LXC::mountpoint_names()],
+ enum => [PVE::LXC::Config->mountpoint_names()],
},
size => {
type => 'string',
die "no options specified\n" if !scalar(keys %$param);
- PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]);
+ PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $param, []);
my $storage_cfg = cfs_read_file("storage.cfg");
- my $query_loopdev = sub {
- my ($path) = @_;
- my $found;
- my $parser = sub {
- my $line = shift;
- if ($line =~ m@^(/dev/loop\d+):@) {
- $found = $1;
- }
- };
- my $cmd = ['losetup', '--associated', $path];
- PVE::Tools::run_command($cmd, outfunc => $parser);
- return $found;
- };
-
my $code = sub {
- my $conf = PVE::LXC::load_config($vmid);
- PVE::LXC::check_lock($conf);
+ my $conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->check_lock($conf);
PVE::Tools::assert_if_modified($digest, $conf->{digest});
my $running = PVE::LXC::check_running($vmid);
my $disk = $param->{disk};
- my $mp = PVE::LXC::parse_ct_mountpoint($conf->{$disk});
+ my $mp = $disk eq 'rootfs' ? PVE::LXC::Config->parse_ct_rootfs($conf->{$disk}) :
+ PVE::LXC::Config->parse_ct_mountpoint($conf->{$disk});
+
my $volid = $mp->{volume};
my (undef, undef, $owner, undef, undef, undef, $format) =
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+ PVE::Storage::activate_volumes($storage_cfg, [$volid]);
+
my $size = PVE::Storage::volume_size_info($storage_cfg, $volid, 5);
$newsize += $size if $ext;
$newsize = int($newsize);
PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0);
$mp->{size} = $newsize;
- $conf->{$disk} = PVE::LXC::print_ct_mountpoint($mp, $disk eq 'rootfs');
+ $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs');
- PVE::LXC::write_config($vmid, $conf);
+ PVE::LXC::Config->write_config($vmid, $conf);
if ($format eq 'raw') {
my $path = PVE::Storage::path($storage_cfg, $volid, undef);
$mp->{mp} = '/';
my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1];
- $path = &$query_loopdev($path) if $use_loopdev;
+ $path = PVE::LXC::query_loopdev($path) if $use_loopdev;
die "internal error: CT running but mountpoint not attached to a loop device"
if !$path;
PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev;
# interestingly we don't need to e2fsck on mounted systems...
my $quoted = PVE::Tools::shellquote($path);
my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
- PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
+ eval {
+ PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
} else {
- PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
- PVE::Tools::run_command(['resize2fs', $path]);
+ eval {
+ PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]);
+ PVE::Tools::run_command(['resize2fs', $path]);
+ };
+ warn "Failed to update the container's filesystem: $@\n" if $@;
}
}
};
return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd);
};
- return PVE::LXC::lock_container($vmid, undef, $code);;
+ return PVE::LXC::Config->lock_config($vmid, $code);;
}});
1;