1 package PVE
::API2
::LXC
;
7 use PVE
::Tools
qw(extract_param run_command);
8 use PVE
::Exception
qw(raise raise_param_exc);
10 use PVE
::Cluster
qw(cfs_read_file);
11 use PVE
::AccessControl
;
15 use PVE
::RPCEnvironment
;
16 use PVE
::ReplicationConfig
;
19 use PVE
::LXC
::Migrate
;
20 use PVE
::GuestHelpers
;
21 use PVE
::API2
::LXC
::Config
;
22 use PVE
::API2
::LXC
::Status
;
23 use PVE
::API2
::LXC
::Snapshot
;
24 use PVE
::JSONSchema
qw(get_standard_option);
25 use base
qw(PVE::RESTHandler);
28 if (!$ENV{PVE_GENERATING_DOCS
}) {
29 require PVE
::HA
::Env
::PVE2
;
30 import PVE
::HA
::Env
::PVE2
;
31 require PVE
::HA
::Config
;
32 import PVE
::HA
::Config
;
36 __PACKAGE__-
>register_method ({
37 subclass
=> "PVE::API2::LXC::Config",
38 path
=> '{vmid}/config',
41 __PACKAGE__-
>register_method ({
42 subclass
=> "PVE::API2::LXC::Status",
43 path
=> '{vmid}/status',
46 __PACKAGE__-
>register_method ({
47 subclass
=> "PVE::API2::LXC::Snapshot",
48 path
=> '{vmid}/snapshot',
51 __PACKAGE__-
>register_method ({
52 subclass
=> "PVE::API2::Firewall::CT",
53 path
=> '{vmid}/firewall',
56 __PACKAGE__-
>register_method({
60 description
=> "LXC container index (per node).",
62 description
=> "Only list CTs where you have VM.Audit permissons on /vms/<vmid>.",
66 protected
=> 1, # /proc files are only readable by root
68 additionalProperties
=> 0,
70 node
=> get_standard_option
('pve-node'),
79 links
=> [ { rel
=> 'child', href
=> "{vmid}" } ],
84 my $rpcenv = PVE
::RPCEnvironment
::get
();
85 my $authuser = $rpcenv->get_user();
87 my $vmstatus = PVE
::LXC
::vmstatus
();
90 foreach my $vmid (keys %$vmstatus) {
91 next if !$rpcenv->check($authuser, "/vms/$vmid", [ 'VM.Audit' ], 1);
93 my $data = $vmstatus->{$vmid};
94 $data->{vmid
} = $vmid;
102 __PACKAGE__-
>register_method({
106 description
=> "Create or restore a container.",
108 user
=> 'all', # check inside
109 description
=> "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " .
110 "For restore, it is enough if the user has 'VM.Backup' permission and the VM already exists. " .
111 "You also need 'Datastore.AllocateSpace' permissions on the storage.",
116 additionalProperties
=> 0,
117 properties
=> PVE
::LXC
::Config-
>json_config_properties({
118 node
=> get_standard_option
('pve-node'),
119 vmid
=> get_standard_option
('pve-vmid', { completion
=> \
&PVE
::Cluster
::complete_next_vmid
}),
121 description
=> "The OS template or backup file.",
124 completion
=> \
&PVE
::LXC
::complete_os_templates
,
129 description
=> "Sets root password inside container.",
132 storage
=> get_standard_option
('pve-storage-id', {
133 description
=> "Default Storage.",
136 completion
=> \
&PVE
::Storage
::complete_storage_enabled
,
141 description
=> "Allow to overwrite existing container.",
146 description
=> "Mark this as restore task.",
150 type
=> 'string', format
=> 'pve-poolid',
151 description
=> "Add the VM to the specified pool.",
153 'ignore-unpack-errors' => {
156 description
=> "Ignore errors when extracting the template.",
158 'ssh-public-keys' => {
161 description
=> "Setup public SSH keys (one key per line, " .
172 my $rpcenv = PVE
::RPCEnvironment
::get
();
174 my $authuser = $rpcenv->get_user();
176 my $node = extract_param
($param, 'node');
178 my $vmid = extract_param
($param, 'vmid');
180 my $ignore_unpack_errors = extract_param
($param, 'ignore-unpack-errors');
182 my $basecfg_fn = PVE
::LXC
::Config-
>config_file($vmid);
184 my $same_container_exists = -f
$basecfg_fn;
186 # 'unprivileged' is read-only, so we can't pass it to update_pct_config
187 my $unprivileged = extract_param
($param, 'unprivileged');
189 my $restore = extract_param
($param, 'restore');
192 # fixme: limit allowed parameters
196 my $force = extract_param
($param, 'force');
198 if (!($same_container_exists && $restore && $force)) {
199 PVE
::Cluster
::check_vmid_unused
($vmid);
201 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
202 PVE
::LXC
::Config-
>check_protection($conf, "unable to restore CT $vmid");
205 my $password = extract_param
($param, 'password');
207 my $ssh_keys = extract_param
($param, 'ssh-public-keys');
208 PVE
::Tools
::validate_ssh_public_keys
($ssh_keys) if defined($ssh_keys);
210 my $pool = extract_param
($param, 'pool');
212 if (defined($pool)) {
213 $rpcenv->check_pool_exist($pool);
214 $rpcenv->check_perm_modify($authuser, "/pool/$pool");
217 if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
219 } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) {
221 } elsif ($restore && $force && $same_container_exists &&
222 $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) {
223 # OK: user has VM.Backup permissions, and want to restore an existing VM
228 my $ostemplate = extract_param
($param, 'ostemplate');
229 my $storage = extract_param
($param, 'storage') // 'local';
231 PVE
::LXC
::check_ct_modify_config_perm
($rpcenv, $authuser, $vmid, $pool, $param, []);
233 my $storage_cfg = cfs_read_file
("storage.cfg");
238 if ($ostemplate eq '-') {
239 die "pipe requires cli environment\n"
240 if $rpcenv->{type
} ne 'cli';
241 die "pipe can only be used with restore tasks\n"
244 die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs
});
246 PVE
::Storage
::check_volume_access
($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate);
247 $archive = PVE
::Storage
::abs_filesystem_path
($storage_cfg, $ostemplate);
250 my $check_and_activate_storage = sub {
253 my $scfg = PVE
::Storage
::storage_check_node
($storage_cfg, $sid, $node);
255 raise_param_exc
({ storage
=> "storage '$sid' does not support container directories"})
256 if !$scfg->{content
}->{rootdir
};
258 $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
260 PVE
::Storage
::activate_storage
($storage_cfg, $sid);
265 my $no_disk_param = {};
267 my $storage_only_mode = 1;
268 foreach my $opt (keys %$param) {
269 my $value = $param->{$opt};
270 if ($opt eq 'rootfs' || $opt =~ m/^mp\d+$/) {
271 # allow to use simple numbers (add default storage in that case)
272 if ($value =~ m/^\d+(\.\d+)?$/) {
273 $mp_param->{$opt} = "$storage:$value";
275 $mp_param->{$opt} = $value;
277 $storage_only_mode = 0;
278 } elsif ($opt =~ m/^unused\d+$/) {
279 warn "ignoring '$opt', cannot create/restore with unused volume\n";
280 delete $param->{$opt};
282 $no_disk_param->{$opt} = $value;
286 die "mount points configured, but 'rootfs' not set - aborting\n"
287 if !$storage_only_mode && !defined($mp_param->{rootfs
});
289 # check storage access, activate storage
290 my $delayed_mp_param = {};
291 PVE
::LXC
::Config-
>foreach_mountpoint($mp_param, sub {
292 my ($ms, $mountpoint) = @_;
294 my $volid = $mountpoint->{volume
};
295 my $mp = $mountpoint->{mp
};
297 if ($mountpoint->{type
} ne 'volume') { # bind or device
298 die "Only root can pass arbitrary filesystem paths.\n"
299 if $authuser ne 'root@pam';
301 my ($sid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
302 &$check_and_activate_storage($sid);
306 # check/activate default storage
307 &$check_and_activate_storage($storage) if !defined($mp_param->{rootfs
});
309 PVE
::LXC
::Config-
>update_pct_config($vmid, $conf, 0, $no_disk_param);
311 $conf->{unprivileged
} = 1 if $unprivileged;
313 my $check_vmid_usage = sub {
315 die "can't overwrite running container\n"
316 if PVE
::LXC
::check_running
($vmid);
318 PVE
::Cluster
::check_vmid_unused
($vmid);
323 &$check_vmid_usage(); # final check after locking
326 my $config_fn = PVE
::LXC
::Config-
>config_file($vmid);
328 die "container exists" if !$restore; # just to be sure
329 $old_conf = PVE
::LXC
::Config-
>load_config($vmid);
332 # try to create empty config on local node, we have an flock
333 PVE
::LXC
::Config-
>write_config($vmid, {});
336 # another node was faster, abort
337 die "Could not reserve ID $vmid, already taken\n" if $@;
340 PVE
::Cluster
::check_cfs_quorum
();
344 if ($storage_only_mode) {
346 (undef, $mp_param) = PVE
::LXC
::Create
::recover_config
($archive);
347 die "rootfs configuration could not be recovered, please check and specify manually!\n"
348 if !defined($mp_param->{rootfs
});
349 PVE
::LXC
::Config-
>foreach_mountpoint($mp_param, sub {
350 my ($ms, $mountpoint) = @_;
351 my $type = $mountpoint->{type
};
352 if ($type eq 'volume') {
353 die "unable to detect disk size - please specify $ms (size)\n"
354 if !defined($mountpoint->{size
});
355 my $disksize = $mountpoint->{size
} / (1024 * 1024 * 1024); # create_disks expects GB as unit size
356 delete $mountpoint->{size
};
357 $mountpoint->{volume
} = "$storage:$disksize";
358 $mp_param->{$ms} = PVE
::LXC
::Config-
>print_ct_mountpoint($mountpoint, $ms eq 'rootfs');
360 my $type = $mountpoint->{type
};
361 die "restoring rootfs to $type mount is only possible by specifying -rootfs manually!\n"
362 if ($ms eq 'rootfs');
363 die "restoring '$ms' to $type mount is only possible for root\n"
364 if $authuser ne 'root@pam';
366 if ($mountpoint->{backup
}) {
367 warn "WARNING - unsupported configuration!\n";
368 warn "backup was enabled for $type mount point $ms ('$mountpoint->{mp}')\n";
369 warn "mount point configuration will be restored after archive extraction!\n";
370 warn "contained files will be restored to wrong directory!\n";
372 delete $mp_param->{$ms}; # actually delay bind/dev mps
373 $delayed_mp_param->{$ms} = PVE
::LXC
::Config-
>print_ct_mountpoint($mountpoint, $ms eq 'rootfs');
377 $mp_param->{rootfs
} = "$storage:4"; # defaults to 4GB
381 $vollist = PVE
::LXC
::create_disks
($storage_cfg, $vmid, $mp_param, $conf);
383 if (defined($old_conf)) {
384 # destroy old container volumes
385 PVE
::LXC
::destroy_lxc_container
($storage_cfg, $vmid, $old_conf, {});
389 my $rootdir = PVE
::LXC
::mount_all
($vmid, $storage_cfg, $conf, 1);
390 PVE
::LXC
::Create
::restore_archive
($archive, $rootdir, $conf, $ignore_unpack_errors);
393 PVE
::LXC
::Create
::restore_configuration
($vmid, $rootdir, $conf, $authuser ne 'root@pam');
395 my $lxc_setup = PVE
::LXC
::Setup-
>new($conf, $rootdir); # detect OS
396 PVE
::LXC
::Config-
>write_config($vmid, $conf); # safe config (after OS detection)
397 $lxc_setup->post_create_hook($password, $ssh_keys);
401 PVE
::LXC
::umount_all
($vmid, $storage_cfg, $conf, $err ?
1 : 0);
402 PVE
::Storage
::deactivate_volumes
($storage_cfg, PVE
::LXC
::Config-
>get_vm_volumes($conf));
405 $conf->{hostname
} ||= "CT$vmid";
406 $conf->{memory
} ||= 512;
407 $conf->{swap
} //= 512;
408 foreach my $mp (keys %$delayed_mp_param) {
409 $conf->{$mp} = $delayed_mp_param->{$mp};
411 PVE
::LXC
::Config-
>write_config($vmid, $conf);
414 PVE
::LXC
::destroy_disks
($storage_cfg, $vollist);
415 PVE
::LXC
::destroy_config
($vmid);
418 PVE
::AccessControl
::add_vm_to_pool
($vmid, $pool) if $pool;
421 my $realcmd = sub { PVE
::LXC
::Config-
>lock_config($vmid, $code); };
423 &$check_vmid_usage(); # first check before locking
425 return $rpcenv->fork_worker($restore ?
'vzrestore' : 'vzcreate',
426 $vmid, $authuser, $realcmd);
430 __PACKAGE__-
>register_method({
435 description
=> "Directory index",
440 additionalProperties
=> 0,
442 node
=> get_standard_option
('pve-node'),
443 vmid
=> get_standard_option
('pve-vmid'),
451 subdir
=> { type
=> 'string' },
454 links
=> [ { rel
=> 'child', href
=> "{subdir}" } ],
460 my $conf = PVE
::LXC
::Config-
>load_config($param->{vmid
});
463 { subdir
=> 'config' },
464 { subdir
=> 'status' },
465 { subdir
=> 'vncproxy' },
466 { subdir
=> 'vncwebsocket' },
467 { subdir
=> 'spiceproxy' },
468 { subdir
=> 'migrate' },
469 { subdir
=> 'clone' },
470 # { subdir => 'initlog' },
472 { subdir
=> 'rrddata' },
473 { subdir
=> 'firewall' },
474 { subdir
=> 'snapshot' },
475 { subdir
=> 'resize' },
482 __PACKAGE__-
>register_method({
484 path
=> '{vmid}/rrd',
486 protected
=> 1, # fixme: can we avoid that?
488 check
=> ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
490 description
=> "Read VM RRD statistics (returns PNG)",
492 additionalProperties
=> 0,
494 node
=> get_standard_option
('pve-node'),
495 vmid
=> get_standard_option
('pve-vmid'),
497 description
=> "Specify the time frame you are interested in.",
499 enum
=> [ 'hour', 'day', 'week', 'month', 'year' ],
502 description
=> "The list of datasources you want to display.",
503 type
=> 'string', format
=> 'pve-configid-list',
506 description
=> "The RRD consolidation function",
508 enum
=> [ 'AVERAGE', 'MAX' ],
516 filename
=> { type
=> 'string' },
522 return PVE
::Cluster
::create_rrd_graph
(
523 "pve2-vm/$param->{vmid}", $param->{timeframe
},
524 $param->{ds
}, $param->{cf
});
528 __PACKAGE__-
>register_method({
530 path
=> '{vmid}/rrddata',
532 protected
=> 1, # fixme: can we avoid that?
534 check
=> ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
536 description
=> "Read VM RRD statistics",
538 additionalProperties
=> 0,
540 node
=> get_standard_option
('pve-node'),
541 vmid
=> get_standard_option
('pve-vmid'),
543 description
=> "Specify the time frame you are interested in.",
545 enum
=> [ 'hour', 'day', 'week', 'month', 'year' ],
548 description
=> "The RRD consolidation function",
550 enum
=> [ 'AVERAGE', 'MAX' ],
565 return PVE
::Cluster
::create_rrd_data
(
566 "pve2-vm/$param->{vmid}", $param->{timeframe
}, $param->{cf
});
569 __PACKAGE__-
>register_method({
570 name
=> 'destroy_vm',
575 description
=> "Destroy the container (also delete all uses files).",
577 check
=> [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
580 additionalProperties
=> 0,
582 node
=> get_standard_option
('pve-node'),
583 vmid
=> get_standard_option
('pve-vmid', { completion
=> \
&PVE
::LXC
::complete_ctid_stopped
}),
592 my $rpcenv = PVE
::RPCEnvironment
::get
();
594 my $authuser = $rpcenv->get_user();
596 my $vmid = $param->{vmid
};
598 # test if container exists
599 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
601 my $storage_cfg = cfs_read_file
("storage.cfg");
603 PVE
::LXC
::Config-
>check_protection($conf, "can't remove CT $vmid");
605 die "unable to remove CT $vmid - used in HA resources\n"
606 if PVE
::HA
::Config
::vm_is_ha_managed
($vmid);
608 # do not allow destroy if there are replication jobs
609 my $repl_conf = PVE
::ReplicationConfig-
>new();
610 $repl_conf->check_for_existing_jobs($vmid);
612 my $running_error_msg = "unable to destroy CT $vmid - container is running\n";
614 die $running_error_msg if PVE
::LXC
::check_running
($vmid); # check early
617 # reload config after lock
618 $conf = PVE
::LXC
::Config-
>load_config($vmid);
619 PVE
::LXC
::Config-
>check_lock($conf);
621 die $running_error_msg if PVE
::LXC
::check_running
($vmid);
623 PVE
::LXC
::destroy_lxc_container
($storage_cfg, $vmid, $conf);
624 PVE
::AccessControl
::remove_vm_access
($vmid);
625 PVE
::Firewall
::remove_vmfw_conf
($vmid);
628 my $realcmd = sub { PVE
::LXC
::Config-
>lock_config($vmid, $code); };
630 return $rpcenv->fork_worker('vzdestroy', $vmid, $authuser, $realcmd);
635 __PACKAGE__-
>register_method ({
637 path
=> '{vmid}/vncproxy',
641 check
=> ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
643 description
=> "Creates a TCP VNC proxy connections.",
645 additionalProperties
=> 0,
647 node
=> get_standard_option
('pve-node'),
648 vmid
=> get_standard_option
('pve-vmid'),
652 description
=> "use websocket instead of standard VNC.",
657 additionalProperties
=> 0,
659 user
=> { type
=> 'string' },
660 ticket
=> { type
=> 'string' },
661 cert
=> { type
=> 'string' },
662 port
=> { type
=> 'integer' },
663 upid
=> { type
=> 'string' },
669 my $rpcenv = PVE
::RPCEnvironment
::get
();
671 my $authuser = $rpcenv->get_user();
673 my $vmid = $param->{vmid
};
674 my $node = $param->{node
};
676 my $authpath = "/vms/$vmid";
678 my $ticket = PVE
::AccessControl
::assemble_vnc_ticket
($authuser, $authpath);
680 $sslcert = PVE
::Tools
::file_get_contents
("/etc/pve/pve-root-ca.pem", 8192)
683 my ($remip, $family);
685 if ($node ne PVE
::INotify
::nodename
()) {
686 ($remip, $family) = PVE
::Cluster
::remote_node_ip
($node);
688 $family = PVE
::Tools
::get_host_address_family
($node);
691 my $port = PVE
::Tools
::next_vnc_port
($family);
693 # NOTE: vncterm VNC traffic is already TLS encrypted,
694 # so we select the fastest chipher here (or 'none'?)
695 my $remcmd = $remip ?
696 ['/usr/bin/ssh', '-t', $remip] : [];
698 my $conf = PVE
::LXC
::Config-
>load_config($vmid, $node);
699 my $concmd = PVE
::LXC
::get_console_command
($vmid, $conf);
701 my $shcmd = [ '/usr/bin/dtach', '-A',
702 "/var/run/dtach/vzctlconsole$vmid",
703 '-r', 'winch', '-z', @$concmd];
708 syslog
('info', "starting lxc vnc proxy $upid\n");
712 my $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
713 '-timeout', $timeout, '-authpath', $authpath,
714 '-perm', 'VM.Console'];
716 if ($param->{websocket
}) {
717 $ENV{PVE_VNC_TICKET
} = $ticket; # pass ticket to vncterm
718 push @$cmd, '-notls', '-listen', 'localhost';
721 push @$cmd, '-c', @$remcmd, @$shcmd;
723 run_command
($cmd, keeplocale
=> 1);
728 my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
730 PVE
::Tools
::wait_for_vnc_port
($port);
741 __PACKAGE__-
>register_method({
742 name
=> 'vncwebsocket',
743 path
=> '{vmid}/vncwebsocket',
746 description
=> "You also need to pass a valid ticket (vncticket).",
747 check
=> ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
749 description
=> "Opens a weksocket for VNC traffic.",
751 additionalProperties
=> 0,
753 node
=> get_standard_option
('pve-node'),
754 vmid
=> get_standard_option
('pve-vmid'),
756 description
=> "Ticket from previous call to vncproxy.",
761 description
=> "Port number returned by previous vncproxy call.",
771 port
=> { type
=> 'string' },
777 my $rpcenv = PVE
::RPCEnvironment
::get
();
779 my $authuser = $rpcenv->get_user();
781 my $authpath = "/vms/$param->{vmid}";
783 PVE
::AccessControl
::verify_vnc_ticket
($param->{vncticket
}, $authuser, $authpath);
785 my $port = $param->{port
};
787 return { port
=> $port };
790 __PACKAGE__-
>register_method ({
791 name
=> 'spiceproxy',
792 path
=> '{vmid}/spiceproxy',
797 check
=> ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
799 description
=> "Returns a SPICE configuration to connect to the CT.",
801 additionalProperties
=> 0,
803 node
=> get_standard_option
('pve-node'),
804 vmid
=> get_standard_option
('pve-vmid'),
805 proxy
=> get_standard_option
('spice-proxy', { optional
=> 1 }),
808 returns
=> get_standard_option
('remote-viewer-config'),
812 my $vmid = $param->{vmid
};
813 my $node = $param->{node
};
814 my $proxy = $param->{proxy
};
816 my $authpath = "/vms/$vmid";
817 my $permissions = 'VM.Console';
819 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
821 die "CT $vmid not running\n" if !PVE
::LXC
::check_running
($vmid);
823 my $concmd = PVE
::LXC
::get_console_command
($vmid, $conf);
825 my $shcmd = ['/usr/bin/dtach', '-A',
826 "/var/run/dtach/vzctlconsole$vmid",
827 '-r', 'winch', '-z', @$concmd];
829 my $title = "CT $vmid";
831 return PVE
::API2Tools
::run_spiceterm
($authpath, $permissions, $vmid, $node, $proxy, $title, $shcmd);
835 __PACKAGE__-
>register_method({
836 name
=> 'migrate_vm',
837 path
=> '{vmid}/migrate',
841 description
=> "Migrate the container to another node. Creates a new migration task.",
843 check
=> ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
846 additionalProperties
=> 0,
848 node
=> get_standard_option
('pve-node'),
849 vmid
=> get_standard_option
('pve-vmid', { completion
=> \
&PVE
::LXC
::complete_ctid
}),
850 target
=> get_standard_option
('pve-node', {
851 description
=> "Target node.",
852 completion
=> \
&PVE
::Cluster
::complete_migration_target
,
856 description
=> "Use online/live migration.",
861 description
=> "Use restart migration",
866 description
=> "Timeout in seconds for shutdown for restart migration",
872 description
=> "Force migration despite local bind / device" .
873 " mounts. NOTE: deprecated, use 'shared' property of mount point instead.",
880 description
=> "the task ID.",
885 my $rpcenv = PVE
::RPCEnvironment
::get
();
887 my $authuser = $rpcenv->get_user();
889 my $target = extract_param
($param, 'target');
891 my $localnode = PVE
::INotify
::nodename
();
892 raise_param_exc
({ target
=> "target is local node."}) if $target eq $localnode;
894 PVE
::Cluster
::check_cfs_quorum
();
896 PVE
::Cluster
::check_node_exists
($target);
898 my $targetip = PVE
::Cluster
::remote_node_ip
($target);
900 my $vmid = extract_param
($param, 'vmid');
903 PVE
::LXC
::Config-
>load_config($vmid);
905 # try to detect errors early
906 if (PVE
::LXC
::check_running
($vmid)) {
907 die "can't migrate running container without --online or --restart\n"
908 if !$param->{online
} && !$param->{restart
};
911 if (PVE
::HA
::Config
::vm_is_ha_managed
($vmid) && $rpcenv->{type
} ne 'ha') {
916 my $service = "ct:$vmid";
918 my $cmd = ['ha-manager', 'migrate', $service, $target];
920 print "Executing HA migrate for CT $vmid to node $target\n";
922 PVE
::Tools
::run_command
($cmd);
927 return $rpcenv->fork_worker('hamigrate', $vmid, $authuser, $hacmd);
935 PVE
::LXC
::Migrate-
>migrate($target, $targetip, $vmid, $param);
940 return $rpcenv->fork_worker('vzmigrate', $vmid, $authuser, $realcmd);
943 return PVE
::GuestHelpers
::guest_migration_lock
($vmid, 10, $code);
947 __PACKAGE__-
>register_method({
948 name
=> 'vm_feature',
949 path
=> '{vmid}/feature',
953 description
=> "Check if feature for virtual machine is available.",
955 check
=> ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
958 additionalProperties
=> 0,
960 node
=> get_standard_option
('pve-node'),
961 vmid
=> get_standard_option
('pve-vmid'),
963 description
=> "Feature to check.",
965 enum
=> [ 'snapshot' ],
967 snapname
=> get_standard_option
('pve-lxc-snapshot-name', {
975 hasFeature
=> { type
=> 'boolean' },
978 #items => { type => 'string' },
985 my $node = extract_param
($param, 'node');
987 my $vmid = extract_param
($param, 'vmid');
989 my $snapname = extract_param
($param, 'snapname');
991 my $feature = extract_param
($param, 'feature');
993 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
996 my $snap = $conf->{snapshots
}->{$snapname};
997 die "snapshot '$snapname' does not exist\n" if !defined($snap);
1000 my $storage_cfg = PVE
::Storage
::config
();
1001 #Maybe include later
1002 #my $nodelist = PVE::LXC::shared_nodes($conf, $storage_cfg);
1003 my $hasFeature = PVE
::LXC
::Config-
>has_feature($feature, $conf, $storage_cfg, $snapname);
1006 hasFeature
=> $hasFeature,
1007 #nodes => [ keys %$nodelist ],
1011 __PACKAGE__-
>register_method({
1013 path
=> '{vmid}/template',
1017 description
=> "Create a Template.",
1019 description
=> "You need 'VM.Allocate' permissions on /vms/{vmid}",
1020 check
=> [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
1023 additionalProperties
=> 0,
1025 node
=> get_standard_option
('pve-node'),
1026 vmid
=> get_standard_option
('pve-vmid', { completion
=> \
&PVE
::LXC
::complete_ctid_stopped
}),
1029 description
=> "The template feature is experimental, set this " .
1030 "flag if you know what you are doing.",
1035 returns
=> { type
=> 'null'},
1039 my $rpcenv = PVE
::RPCEnvironment
::get
();
1041 my $authuser = $rpcenv->get_user();
1043 my $node = extract_param
($param, 'node');
1045 my $vmid = extract_param
($param, 'vmid');
1047 my $updatefn = sub {
1049 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
1050 PVE
::LXC
::Config-
>check_lock($conf);
1052 die "unable to create template, because CT contains snapshots\n"
1053 if $conf->{snapshots
} && scalar(keys %{$conf->{snapshots
}});
1055 die "you can't convert a template to a template\n"
1056 if PVE
::LXC
::Config-
>is_template($conf);
1058 die "you can't convert a CT to template if the CT is running\n"
1059 if PVE
::LXC
::check_running
($vmid);
1062 PVE
::LXC
::template_create
($vmid, $conf);
1065 $conf->{template
} = 1;
1067 PVE
::LXC
::Config-
>write_config($vmid, $conf);
1068 # and remove lxc config
1069 PVE
::LXC
::update_lxc_config
($vmid, $conf);
1071 return $rpcenv->fork_worker('vztemplate', $vmid, $authuser, $realcmd);
1074 PVE
::LXC
::Config-
>lock_config($vmid, $updatefn);
1079 __PACKAGE__-
>register_method({
1081 path
=> '{vmid}/clone',
1085 description
=> "Create a container clone/copy",
1087 description
=> "You need 'VM.Clone' permissions on /vms/{vmid}, " .
1088 "and 'VM.Allocate' permissions " .
1089 "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
1090 "'Datastore.AllocateSpace' on any used storage.",
1093 ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
1095 [ 'perm', '/vms/{newid}', ['VM.Allocate']],
1096 [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param
=> 'pool'],
1101 additionalProperties
=> 0,
1103 node
=> get_standard_option
('pve-node'),
1104 vmid
=> get_standard_option
('pve-vmid', { completion
=> \
&PVE
::LXC
::complete_ctid
}),
1105 newid
=> get_standard_option
('pve-vmid', {
1106 completion
=> \
&PVE
::Cluster
::complete_next_vmid
,
1107 description
=> 'VMID for the clone.' }),
1110 type
=> 'string', format
=> 'dns-name',
1111 description
=> "Set a hostname for the new CT.",
1116 description
=> "Description for the new CT.",
1120 type
=> 'string', format
=> 'pve-poolid',
1121 description
=> "Add the new CT to the specified pool.",
1123 snapname
=> get_standard_option
('pve-lxc-snapshot-name', {
1126 storage
=> get_standard_option
('pve-storage-id', {
1127 description
=> "Target storage for full clone.",
1134 description
=> "Create a full copy of all disk. This is always done when " .
1135 "you clone a normal CT. For CT templates, we try to create a linked clone by default.",
1140 description
=> "The clone feature is experimental, set this " .
1141 "flag if you know what you are doing.",
1144 # target => get_standard_option('pve-node', {
1145 # description => "Target node. Only allowed if the original VM is on shared storage.",
1156 my $rpcenv = PVE
::RPCEnvironment
::get
();
1158 my $authuser = $rpcenv->get_user();
1160 my $node = extract_param
($param, 'node');
1162 my $vmid = extract_param
($param, 'vmid');
1164 my $newid = extract_param
($param, 'newid');
1166 my $pool = extract_param
($param, 'pool');
1168 if (defined($pool)) {
1169 $rpcenv->check_pool_exist($pool);
1172 my $snapname = extract_param
($param, 'snapname');
1174 my $storage = extract_param
($param, 'storage');
1176 my $localnode = PVE
::INotify
::nodename
();
1178 my $storecfg = PVE
::Storage
::config
();
1181 # check if storage is enabled on local node
1182 PVE
::Storage
::storage_check_enabled
($storecfg, $storage);
1185 PVE
::Cluster
::check_cfs_quorum
();
1187 my $running = PVE
::LXC
::check_running
($vmid) || 0;
1191 # do all tests after lock
1192 # we also try to do all tests before we fork the worker
1193 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
1195 PVE
::LXC
::Config-
>check_lock($conf);
1197 my $verify_running = PVE
::LXC
::check_running
($vmid) || 0;
1199 die "unexpected state change\n" if $verify_running != $running;
1201 die "snapshot '$snapname' does not exist\n"
1202 if $snapname && !defined( $conf->{snapshots
}->{$snapname});
1204 my $oldconf = $snapname ?
$conf->{snapshots
}->{$snapname} : $conf;
1206 my $conffile = PVE
::LXC
::Config-
>config_file($newid);
1207 die "unable to create CT $newid: config file already exists\n"
1210 my $newconf = { lock => 'clone' };
1211 my $mountpoints = {};
1215 foreach my $opt (keys %$oldconf) {
1216 my $value = $oldconf->{$opt};
1218 # no need to copy unused images, because VMID(owner) changes anyways
1219 next if $opt =~ m/^unused\d+$/;
1221 if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) {
1222 my $mp = $opt eq 'rootfs' ?
1223 PVE
::LXC
::Config-
>parse_ct_rootfs($value) :
1224 PVE
::LXC
::Config-
>parse_ct_mountpoint($value);
1226 if ($mp->{type
} eq 'volume') {
1227 my $volid = $mp->{volume
};
1228 if ($param->{full
}) {
1229 die "fixme: full clone not implemented";
1231 die "Full clone feature for '$volid' is not available\n"
1232 if !PVE
::Storage
::volume_has_feature
($storecfg, 'copy', $volid, $snapname, $running);
1233 $fullclone->{$opt} = 1;
1235 # not full means clone instead of copy
1236 die "Linked clone feature for '$volid' is not available\n"
1237 if !PVE
::Storage
::volume_has_feature
($storecfg, 'clone', $volid, $snapname, $running);
1240 $mountpoints->{$opt} = $mp;
1241 push @$vollist, $volid;
1244 # TODO: allow bind mounts?
1245 die "unable to clone mountpint '$opt' (type $mp->{type})\n";
1249 # copy everything else
1250 $newconf->{$opt} = $value;
1254 delete $newconf->{template
};
1255 if ($param->{hostname
}) {
1256 $newconf->{hostname
} = $param->{hostname
};
1259 if ($param->{description
}) {
1260 $newconf->{description
} = $param->{description
};
1263 # create empty/temp config - this fails if CT already exists on other node
1264 PVE
::Tools
::file_set_contents
($conffile, "# ctclone temporary file\nlock: clone\n");
1269 my $newvollist = [];
1272 local $SIG{INT
} = $SIG{TERM
} = $SIG{QUIT
} = $SIG{HUP
} = sub { die "interrupted by signal\n"; };
1274 PVE
::Storage
::activate_volumes
($storecfg, $vollist, $snapname);
1276 foreach my $opt (keys %$mountpoints) {
1277 my $mp = $mountpoints->{$opt};
1278 my $volid = $mp->{volume
};
1280 if ($fullclone->{$opt}) {
1281 die "fixme: full clone not implemented\n";
1283 print "create linked clone of mount point $opt ($volid)\n";
1284 my $newvolid = PVE
::Storage
::vdisk_clone
($storecfg, $volid, $newid, $snapname);
1285 push @$newvollist, $newvolid;
1286 $mp->{volume
} = $newvolid;
1288 $newconf->{$opt} = PVE
::LXC
::Config-
>print_ct_mountpoint($mp, $opt eq 'rootfs');
1289 PVE
::LXC
::Config-
>write_config($newid, $newconf);
1293 delete $newconf->{lock};
1294 PVE
::LXC
::Config-
>write_config($newid, $newconf);
1296 PVE
::AccessControl
::add_vm_to_pool
($newid, $pool) if $pool;
1301 sleep 1; # some storage like rbd need to wait before release volume - really?
1303 foreach my $volid (@$newvollist) {
1304 eval { PVE
::Storage
::vdisk_free
($storecfg, $volid); };
1307 die "clone failed: $err";
1313 PVE
::Firewall
::clone_vmfw_conf
($vmid, $newid);
1315 return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd);
1319 return PVE
::LXC
::Config-
>lock_config($vmid, $clonefn);
1323 __PACKAGE__-
>register_method({
1324 name
=> 'resize_vm',
1325 path
=> '{vmid}/resize',
1329 description
=> "Resize a container mount point.",
1331 check
=> ['perm', '/vms/{vmid}', ['VM.Config.Disk'], any
=> 1],
1334 additionalProperties
=> 0,
1336 node
=> get_standard_option
('pve-node'),
1337 vmid
=> get_standard_option
('pve-vmid', { completion
=> \
&PVE
::LXC
::complete_ctid
}),
1340 description
=> "The disk you want to resize.",
1341 enum
=> [PVE
::LXC
::Config-
>mountpoint_names()],
1345 pattern
=> '\+?\d+(\.\d+)?[KMGT]?',
1346 description
=> "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
1350 description
=> 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
1358 description
=> "the task ID.",
1363 my $rpcenv = PVE
::RPCEnvironment
::get
();
1365 my $authuser = $rpcenv->get_user();
1367 my $node = extract_param
($param, 'node');
1369 my $vmid = extract_param
($param, 'vmid');
1371 my $digest = extract_param
($param, 'digest');
1373 my $sizestr = extract_param
($param, 'size');
1374 my $ext = ($sizestr =~ s/^\+//);
1375 my $newsize = PVE
::JSONSchema
::parse_size
($sizestr);
1376 die "invalid size string" if !defined($newsize);
1378 die "no options specified\n" if !scalar(keys %$param);
1380 PVE
::LXC
::check_ct_modify_config_perm
($rpcenv, $authuser, $vmid, undef, $param, []);
1382 my $storage_cfg = cfs_read_file
("storage.cfg");
1386 my $conf = PVE
::LXC
::Config-
>load_config($vmid);
1387 PVE
::LXC
::Config-
>check_lock($conf);
1389 PVE
::Tools
::assert_if_modified
($digest, $conf->{digest
});
1391 my $running = PVE
::LXC
::check_running
($vmid);
1393 my $disk = $param->{disk
};
1394 my $mp = $disk eq 'rootfs' ? PVE
::LXC
::Config-
>parse_ct_rootfs($conf->{$disk}) :
1395 PVE
::LXC
::Config-
>parse_ct_mountpoint($conf->{$disk});
1397 my $volid = $mp->{volume
};
1399 my (undef, undef, $owner, undef, undef, undef, $format) =
1400 PVE
::Storage
::parse_volname
($storage_cfg, $volid);
1402 die "can't resize mount point owned by another container ($owner)"
1405 die "can't resize volume: $disk if snapshot exists\n"
1406 if %{$conf->{snapshots
}} && $format eq 'qcow2';
1408 my ($storeid, $volname) = PVE
::Storage
::parse_volume_id
($volid);
1410 $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
1412 PVE
::Storage
::activate_volumes
($storage_cfg, [$volid]);
1414 my $size = PVE
::Storage
::volume_size_info
($storage_cfg, $volid, 5);
1415 $newsize += $size if $ext;
1416 $newsize = int($newsize);
1418 die "unable to shrink disk size\n" if $newsize < $size;
1420 return if $size == $newsize;
1422 PVE
::Cluster
::log_msg
('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr");
1424 # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so
1425 # we pass 0 here (parameter only makes sense for qemu)
1426 PVE
::Storage
::volume_resize
($storage_cfg, $volid, $newsize, 0);
1428 $mp->{size
} = $newsize;
1429 $conf->{$disk} = PVE
::LXC
::Config-
>print_ct_mountpoint($mp, $disk eq 'rootfs');
1431 PVE
::LXC
::Config-
>write_config($vmid, $conf);
1433 if ($format eq 'raw') {
1434 my $path = PVE
::Storage
::path
($storage_cfg, $volid, undef);
1438 my $use_loopdev = (PVE
::LXC
::mountpoint_mount_path
($mp, $storage_cfg))[1];
1439 $path = PVE
::LXC
::query_loopdev
($path) if $use_loopdev;
1440 die "internal error: CT running but mount point not attached to a loop device"
1442 PVE
::Tools
::run_command
(['losetup', '--set-capacity', $path]) if $use_loopdev;
1444 # In order for resize2fs to know that we need online-resizing a mountpoint needs
1445 # to be visible to it in its namespace.
1446 # To not interfere with the rest of the system we unshare the current mount namespace,
1447 # mount over /tmp and then run resize2fs.
1449 # interestingly we don't need to e2fsck on mounted systems...
1450 my $quoted = PVE
::Tools
::shellquote
($path);
1451 my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted";
1453 PVE
::Tools
::run_command
(['unshare', '-m', '--', 'sh', '-c', $cmd]);
1455 warn "Failed to update the container's filesystem: $@\n" if $@;
1458 PVE
::Tools
::run_command
(['e2fsck', '-f', '-y', $path]);
1459 PVE
::Tools
::run_command
(['resize2fs', $path]);
1461 warn "Failed to update the container's filesystem: $@\n" if $@;
1466 return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd);
1469 return PVE
::LXC
::Config-
>lock_config($vmid, $code);;