]>
Commit | Line | Data |
---|---|---|
1 | package PVE::API2::LXC; | |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | ||
6 | use PVE::SafeSyslog; | |
7 | use PVE::Tools qw(extract_param run_command); | |
8 | use PVE::Exception qw(raise raise_param_exc); | |
9 | use PVE::INotify; | |
10 | use PVE::Cluster qw(cfs_read_file); | |
11 | use PVE::AccessControl; | |
12 | use PVE::Firewall; | |
13 | use PVE::Storage; | |
14 | use PVE::RESTHandler; | |
15 | use PVE::RPCEnvironment; | |
16 | use PVE::ReplicationConfig; | |
17 | use PVE::LXC; | |
18 | use PVE::LXC::Create; | |
19 | use PVE::LXC::Migrate; | |
20 | use PVE::GuestHelpers; | |
21 | use PVE::API2::LXC::Config; | |
22 | use PVE::API2::LXC::Status; | |
23 | use PVE::API2::LXC::Snapshot; | |
24 | use PVE::JSONSchema qw(get_standard_option); | |
25 | use base qw(PVE::RESTHandler); | |
26 | ||
27 | BEGIN { | |
28 | if (!$ENV{PVE_GENERATING_DOCS}) { | |
29 | require PVE::HA::Env::PVE2; | |
30 | import PVE::HA::Env::PVE2; | |
31 | require PVE::HA::Config; | |
32 | import PVE::HA::Config; | |
33 | } | |
34 | } | |
35 | ||
36 | __PACKAGE__->register_method ({ | |
37 | subclass => "PVE::API2::LXC::Config", | |
38 | path => '{vmid}/config', | |
39 | }); | |
40 | ||
41 | __PACKAGE__->register_method ({ | |
42 | subclass => "PVE::API2::LXC::Status", | |
43 | path => '{vmid}/status', | |
44 | }); | |
45 | ||
46 | __PACKAGE__->register_method ({ | |
47 | subclass => "PVE::API2::LXC::Snapshot", | |
48 | path => '{vmid}/snapshot', | |
49 | }); | |
50 | ||
51 | __PACKAGE__->register_method ({ | |
52 | subclass => "PVE::API2::Firewall::CT", | |
53 | path => '{vmid}/firewall', | |
54 | }); | |
55 | ||
56 | __PACKAGE__->register_method({ | |
57 | name => 'vmlist', | |
58 | path => '', | |
59 | method => 'GET', | |
60 | description => "LXC container index (per node).", | |
61 | permissions => { | |
62 | description => "Only list CTs where you have VM.Audit permissons on /vms/<vmid>.", | |
63 | user => 'all', | |
64 | }, | |
65 | proxyto => 'node', | |
66 | protected => 1, # /proc files are only readable by root | |
67 | parameters => { | |
68 | additionalProperties => 0, | |
69 | properties => { | |
70 | node => get_standard_option('pve-node'), | |
71 | }, | |
72 | }, | |
73 | returns => { | |
74 | type => 'array', | |
75 | items => { | |
76 | type => "object", | |
77 | properties => {}, | |
78 | }, | |
79 | links => [ { rel => 'child', href => "{vmid}" } ], | |
80 | }, | |
81 | code => sub { | |
82 | my ($param) = @_; | |
83 | ||
84 | my $rpcenv = PVE::RPCEnvironment::get(); | |
85 | my $authuser = $rpcenv->get_user(); | |
86 | ||
87 | my $vmstatus = PVE::LXC::vmstatus(); | |
88 | ||
89 | my $res = []; | |
90 | foreach my $vmid (keys %$vmstatus) { | |
91 | next if !$rpcenv->check($authuser, "/vms/$vmid", [ 'VM.Audit' ], 1); | |
92 | ||
93 | my $data = $vmstatus->{$vmid}; | |
94 | $data->{vmid} = $vmid; | |
95 | push @$res, $data; | |
96 | } | |
97 | ||
98 | return $res; | |
99 | ||
100 | }}); | |
101 | ||
102 | __PACKAGE__->register_method({ | |
103 | name => 'create_vm', | |
104 | path => '', | |
105 | method => 'POST', | |
106 | description => "Create or restore a container.", | |
107 | permissions => { | |
108 | user => 'all', # check inside | |
109 | description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " . | |
110 | "For restore, it is enough if the user has 'VM.Backup' permission and the VM already exists. " . | |
111 | "You also need 'Datastore.AllocateSpace' permissions on the storage.", | |
112 | }, | |
113 | protected => 1, | |
114 | proxyto => 'node', | |
115 | parameters => { | |
116 | additionalProperties => 0, | |
117 | properties => PVE::LXC::Config->json_config_properties({ | |
118 | node => get_standard_option('pve-node'), | |
119 | vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }), | |
120 | ostemplate => { | |
121 | description => "The OS template or backup file.", | |
122 | type => 'string', | |
123 | maxLength => 255, | |
124 | completion => \&PVE::LXC::complete_os_templates, | |
125 | }, | |
126 | password => { | |
127 | optional => 1, | |
128 | type => 'string', | |
129 | description => "Sets root password inside container.", | |
130 | minLength => 5, | |
131 | }, | |
132 | storage => get_standard_option('pve-storage-id', { | |
133 | description => "Default Storage.", | |
134 | default => 'local', | |
135 | optional => 1, | |
136 | completion => \&PVE::Storage::complete_storage_enabled, | |
137 | }), | |
138 | force => { | |
139 | optional => 1, | |
140 | type => 'boolean', | |
141 | description => "Allow to overwrite existing container.", | |
142 | }, | |
143 | restore => { | |
144 | optional => 1, | |
145 | type => 'boolean', | |
146 | description => "Mark this as restore task.", | |
147 | }, | |
148 | pool => { | |
149 | optional => 1, | |
150 | type => 'string', format => 'pve-poolid', | |
151 | description => "Add the VM to the specified pool.", | |
152 | }, | |
153 | 'ignore-unpack-errors' => { | |
154 | optional => 1, | |
155 | type => 'boolean', | |
156 | description => "Ignore errors when extracting the template.", | |
157 | }, | |
158 | 'ssh-public-keys' => { | |
159 | optional => 1, | |
160 | type => 'string', | |
161 | description => "Setup public SSH keys (one key per line, " . | |
162 | "OpenSSH format).", | |
163 | }, | |
164 | }), | |
165 | }, | |
166 | returns => { | |
167 | type => 'string', | |
168 | }, | |
169 | code => sub { | |
170 | my ($param) = @_; | |
171 | ||
172 | my $rpcenv = PVE::RPCEnvironment::get(); | |
173 | ||
174 | my $authuser = $rpcenv->get_user(); | |
175 | ||
176 | my $node = extract_param($param, 'node'); | |
177 | ||
178 | my $vmid = extract_param($param, 'vmid'); | |
179 | ||
180 | my $ignore_unpack_errors = extract_param($param, 'ignore-unpack-errors'); | |
181 | ||
182 | my $basecfg_fn = PVE::LXC::Config->config_file($vmid); | |
183 | ||
184 | my $same_container_exists = -f $basecfg_fn; | |
185 | ||
186 | # 'unprivileged' is read-only, so we can't pass it to update_pct_config | |
187 | my $unprivileged = extract_param($param, 'unprivileged'); | |
188 | ||
189 | my $restore = extract_param($param, 'restore'); | |
190 | ||
191 | if ($restore) { | |
192 | # fixme: limit allowed parameters | |
193 | ||
194 | } | |
195 | ||
196 | my $force = extract_param($param, 'force'); | |
197 | ||
198 | if (!($same_container_exists && $restore && $force)) { | |
199 | PVE::Cluster::check_vmid_unused($vmid); | |
200 | } else { | |
201 | my $conf = PVE::LXC::Config->load_config($vmid); | |
202 | PVE::LXC::Config->check_protection($conf, "unable to restore CT $vmid"); | |
203 | } | |
204 | ||
205 | my $password = extract_param($param, 'password'); | |
206 | ||
207 | my $ssh_keys = extract_param($param, 'ssh-public-keys'); | |
208 | PVE::Tools::validate_ssh_public_keys($ssh_keys) if defined($ssh_keys); | |
209 | ||
210 | my $pool = extract_param($param, 'pool'); | |
211 | ||
212 | if (defined($pool)) { | |
213 | $rpcenv->check_pool_exist($pool); | |
214 | $rpcenv->check_perm_modify($authuser, "/pool/$pool"); | |
215 | } | |
216 | ||
217 | if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) { | |
218 | # OK | |
219 | } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) { | |
220 | # OK | |
221 | } elsif ($restore && $force && $same_container_exists && | |
222 | $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) { | |
223 | # OK: user has VM.Backup permissions, and want to restore an existing VM | |
224 | } else { | |
225 | raise_perm_exc(); | |
226 | } | |
227 | ||
228 | my $ostemplate = extract_param($param, 'ostemplate'); | |
229 | my $storage = extract_param($param, 'storage') // 'local'; | |
230 | ||
231 | PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, $pool, $param, []); | |
232 | ||
233 | my $storage_cfg = cfs_read_file("storage.cfg"); | |
234 | ||
235 | ||
236 | my $archive; | |
237 | ||
238 | if ($ostemplate eq '-') { | |
239 | die "pipe requires cli environment\n" | |
240 | if $rpcenv->{type} ne 'cli'; | |
241 | die "pipe can only be used with restore tasks\n" | |
242 | if !$restore; | |
243 | $archive = '-'; | |
244 | die "restore from pipe requires rootfs parameter\n" if !defined($param->{rootfs}); | |
245 | } else { | |
246 | PVE::Storage::check_volume_access($rpcenv, $authuser, $storage_cfg, $vmid, $ostemplate); | |
247 | $archive = PVE::Storage::abs_filesystem_path($storage_cfg, $ostemplate); | |
248 | } | |
249 | ||
250 | my $check_and_activate_storage = sub { | |
251 | my ($sid) = @_; | |
252 | ||
253 | my $scfg = PVE::Storage::storage_check_node($storage_cfg, $sid, $node); | |
254 | ||
255 | raise_param_exc({ storage => "storage '$sid' does not support container directories"}) | |
256 | if !$scfg->{content}->{rootdir}; | |
257 | ||
258 | $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); | |
259 | ||
260 | PVE::Storage::activate_storage($storage_cfg, $sid); | |
261 | }; | |
262 | ||
263 | my $conf = {}; | |
264 | ||
265 | my $no_disk_param = {}; | |
266 | my $mp_param = {}; | |
267 | my $storage_only_mode = 1; | |
268 | foreach my $opt (keys %$param) { | |
269 | my $value = $param->{$opt}; | |
270 | if ($opt eq 'rootfs' || $opt =~ m/^mp\d+$/) { | |
271 | # allow to use simple numbers (add default storage in that case) | |
272 | if ($value =~ m/^\d+(\.\d+)?$/) { | |
273 | $mp_param->{$opt} = "$storage:$value"; | |
274 | } else { | |
275 | $mp_param->{$opt} = $value; | |
276 | } | |
277 | $storage_only_mode = 0; | |
278 | } elsif ($opt =~ m/^unused\d+$/) { | |
279 | warn "ignoring '$opt', cannot create/restore with unused volume\n"; | |
280 | delete $param->{$opt}; | |
281 | } else { | |
282 | $no_disk_param->{$opt} = $value; | |
283 | } | |
284 | } | |
285 | ||
286 | die "mount points configured, but 'rootfs' not set - aborting\n" | |
287 | if !$storage_only_mode && !defined($mp_param->{rootfs}); | |
288 | ||
289 | # check storage access, activate storage | |
290 | my $delayed_mp_param = {}; | |
291 | PVE::LXC::Config->foreach_mountpoint($mp_param, sub { | |
292 | my ($ms, $mountpoint) = @_; | |
293 | ||
294 | my $volid = $mountpoint->{volume}; | |
295 | my $mp = $mountpoint->{mp}; | |
296 | ||
297 | if ($mountpoint->{type} ne 'volume') { # bind or device | |
298 | die "Only root can pass arbitrary filesystem paths.\n" | |
299 | if $authuser ne 'root@pam'; | |
300 | } else { | |
301 | my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); | |
302 | &$check_and_activate_storage($sid); | |
303 | } | |
304 | }); | |
305 | ||
306 | # check/activate default storage | |
307 | &$check_and_activate_storage($storage) if !defined($mp_param->{rootfs}); | |
308 | ||
309 | PVE::LXC::Config->update_pct_config($vmid, $conf, 0, $no_disk_param); | |
310 | ||
311 | $conf->{unprivileged} = 1 if $unprivileged; | |
312 | ||
313 | my $check_vmid_usage = sub { | |
314 | if ($force) { | |
315 | die "can't overwrite running container\n" | |
316 | if PVE::LXC::check_running($vmid); | |
317 | } else { | |
318 | PVE::Cluster::check_vmid_unused($vmid); | |
319 | } | |
320 | }; | |
321 | ||
322 | my $code = sub { | |
323 | &$check_vmid_usage(); # final check after locking | |
324 | my $old_conf; | |
325 | ||
326 | my $config_fn = PVE::LXC::Config->config_file($vmid); | |
327 | if (-f $config_fn) { | |
328 | die "container exists" if !$restore; # just to be sure | |
329 | $old_conf = PVE::LXC::Config->load_config($vmid); | |
330 | } else { | |
331 | eval { | |
332 | # try to create empty config on local node, we have an flock | |
333 | PVE::LXC::Config->write_config($vmid, {}); | |
334 | }; | |
335 | ||
336 | # another node was faster, abort | |
337 | die "Could not reserve ID $vmid, already taken\n" if $@; | |
338 | } | |
339 | ||
340 | PVE::Cluster::check_cfs_quorum(); | |
341 | my $vollist = []; | |
342 | ||
343 | eval { | |
344 | if ($storage_only_mode) { | |
345 | if ($restore) { | |
346 | (undef, $mp_param) = PVE::LXC::Create::recover_config($archive); | |
347 | die "rootfs configuration could not be recovered, please check and specify manually!\n" | |
348 | if !defined($mp_param->{rootfs}); | |
349 | PVE::LXC::Config->foreach_mountpoint($mp_param, sub { | |
350 | my ($ms, $mountpoint) = @_; | |
351 | my $type = $mountpoint->{type}; | |
352 | if ($type eq 'volume') { | |
353 | die "unable to detect disk size - please specify $ms (size)\n" | |
354 | if !defined($mountpoint->{size}); | |
355 | my $disksize = $mountpoint->{size} / (1024 * 1024 * 1024); # create_disks expects GB as unit size | |
356 | delete $mountpoint->{size}; | |
357 | $mountpoint->{volume} = "$storage:$disksize"; | |
358 | $mp_param->{$ms} = PVE::LXC::Config->print_ct_mountpoint($mountpoint, $ms eq 'rootfs'); | |
359 | } else { | |
360 | my $type = $mountpoint->{type}; | |
361 | die "restoring rootfs to $type mount is only possible by specifying -rootfs manually!\n" | |
362 | if ($ms eq 'rootfs'); | |
363 | die "restoring '$ms' to $type mount is only possible for root\n" | |
364 | if $authuser ne 'root@pam'; | |
365 | ||
366 | if ($mountpoint->{backup}) { | |
367 | warn "WARNING - unsupported configuration!\n"; | |
368 | warn "backup was enabled for $type mount point $ms ('$mountpoint->{mp}')\n"; | |
369 | warn "mount point configuration will be restored after archive extraction!\n"; | |
370 | warn "contained files will be restored to wrong directory!\n"; | |
371 | } | |
372 | delete $mp_param->{$ms}; # actually delay bind/dev mps | |
373 | $delayed_mp_param->{$ms} = PVE::LXC::Config->print_ct_mountpoint($mountpoint, $ms eq 'rootfs'); | |
374 | } | |
375 | }); | |
376 | } else { | |
377 | $mp_param->{rootfs} = "$storage:4"; # defaults to 4GB | |
378 | } | |
379 | } | |
380 | ||
381 | $vollist = PVE::LXC::create_disks($storage_cfg, $vmid, $mp_param, $conf); | |
382 | ||
383 | if (defined($old_conf)) { | |
384 | # destroy old container volumes | |
385 | PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $old_conf, {}); | |
386 | } | |
387 | ||
388 | eval { | |
389 | my $rootdir = PVE::LXC::mount_all($vmid, $storage_cfg, $conf, 1); | |
390 | PVE::LXC::Create::restore_archive($archive, $rootdir, $conf, $ignore_unpack_errors); | |
391 | ||
392 | if ($restore) { | |
393 | PVE::LXC::Create::restore_configuration($vmid, $rootdir, $conf, $authuser ne 'root@pam'); | |
394 | } else { | |
395 | my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir); # detect OS | |
396 | PVE::LXC::Config->write_config($vmid, $conf); # safe config (after OS detection) | |
397 | $lxc_setup->post_create_hook($password, $ssh_keys); | |
398 | } | |
399 | }; | |
400 | my $err = $@; | |
401 | PVE::LXC::umount_all($vmid, $storage_cfg, $conf, $err ? 1 : 0); | |
402 | PVE::Storage::deactivate_volumes($storage_cfg, PVE::LXC::Config->get_vm_volumes($conf)); | |
403 | die $err if $err; | |
404 | # set some defaults | |
405 | $conf->{hostname} ||= "CT$vmid"; | |
406 | $conf->{memory} ||= 512; | |
407 | $conf->{swap} //= 512; | |
408 | foreach my $mp (keys %$delayed_mp_param) { | |
409 | $conf->{$mp} = $delayed_mp_param->{$mp}; | |
410 | } | |
411 | PVE::LXC::Config->write_config($vmid, $conf); | |
412 | }; | |
413 | if (my $err = $@) { | |
414 | PVE::LXC::destroy_disks($storage_cfg, $vollist); | |
415 | PVE::LXC::destroy_config($vmid); | |
416 | die $err; | |
417 | } | |
418 | PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; | |
419 | }; | |
420 | ||
421 | my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); }; | |
422 | ||
423 | &$check_vmid_usage(); # first check before locking | |
424 | ||
425 | return $rpcenv->fork_worker($restore ? 'vzrestore' : 'vzcreate', | |
426 | $vmid, $authuser, $realcmd); | |
427 | ||
428 | }}); | |
429 | ||
430 | __PACKAGE__->register_method({ | |
431 | name => 'vmdiridx', | |
432 | path => '{vmid}', | |
433 | method => 'GET', | |
434 | proxyto => 'node', | |
435 | description => "Directory index", | |
436 | permissions => { | |
437 | user => 'all', | |
438 | }, | |
439 | parameters => { | |
440 | additionalProperties => 0, | |
441 | properties => { | |
442 | node => get_standard_option('pve-node'), | |
443 | vmid => get_standard_option('pve-vmid'), | |
444 | }, | |
445 | }, | |
446 | returns => { | |
447 | type => 'array', | |
448 | items => { | |
449 | type => "object", | |
450 | properties => { | |
451 | subdir => { type => 'string' }, | |
452 | }, | |
453 | }, | |
454 | links => [ { rel => 'child', href => "{subdir}" } ], | |
455 | }, | |
456 | code => sub { | |
457 | my ($param) = @_; | |
458 | ||
459 | # test if VM exists | |
460 | my $conf = PVE::LXC::Config->load_config($param->{vmid}); | |
461 | ||
462 | my $res = [ | |
463 | { subdir => 'config' }, | |
464 | { subdir => 'status' }, | |
465 | { subdir => 'vncproxy' }, | |
466 | { subdir => 'vncwebsocket' }, | |
467 | { subdir => 'spiceproxy' }, | |
468 | { subdir => 'migrate' }, | |
469 | { subdir => 'clone' }, | |
470 | # { subdir => 'initlog' }, | |
471 | { subdir => 'rrd' }, | |
472 | { subdir => 'rrddata' }, | |
473 | { subdir => 'firewall' }, | |
474 | { subdir => 'snapshot' }, | |
475 | { subdir => 'resize' }, | |
476 | ]; | |
477 | ||
478 | return $res; | |
479 | }}); | |
480 | ||
481 | ||
482 | __PACKAGE__->register_method({ | |
483 | name => 'rrd', | |
484 | path => '{vmid}/rrd', | |
485 | method => 'GET', | |
486 | protected => 1, # fixme: can we avoid that? | |
487 | permissions => { | |
488 | check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], | |
489 | }, | |
490 | description => "Read VM RRD statistics (returns PNG)", | |
491 | parameters => { | |
492 | additionalProperties => 0, | |
493 | properties => { | |
494 | node => get_standard_option('pve-node'), | |
495 | vmid => get_standard_option('pve-vmid'), | |
496 | timeframe => { | |
497 | description => "Specify the time frame you are interested in.", | |
498 | type => 'string', | |
499 | enum => [ 'hour', 'day', 'week', 'month', 'year' ], | |
500 | }, | |
501 | ds => { | |
502 | description => "The list of datasources you want to display.", | |
503 | type => 'string', format => 'pve-configid-list', | |
504 | }, | |
505 | cf => { | |
506 | description => "The RRD consolidation function", | |
507 | type => 'string', | |
508 | enum => [ 'AVERAGE', 'MAX' ], | |
509 | optional => 1, | |
510 | }, | |
511 | }, | |
512 | }, | |
513 | returns => { | |
514 | type => "object", | |
515 | properties => { | |
516 | filename => { type => 'string' }, | |
517 | }, | |
518 | }, | |
519 | code => sub { | |
520 | my ($param) = @_; | |
521 | ||
522 | return PVE::Cluster::create_rrd_graph( | |
523 | "pve2-vm/$param->{vmid}", $param->{timeframe}, | |
524 | $param->{ds}, $param->{cf}); | |
525 | ||
526 | }}); | |
527 | ||
528 | __PACKAGE__->register_method({ | |
529 | name => 'rrddata', | |
530 | path => '{vmid}/rrddata', | |
531 | method => 'GET', | |
532 | protected => 1, # fixme: can we avoid that? | |
533 | permissions => { | |
534 | check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], | |
535 | }, | |
536 | description => "Read VM RRD statistics", | |
537 | parameters => { | |
538 | additionalProperties => 0, | |
539 | properties => { | |
540 | node => get_standard_option('pve-node'), | |
541 | vmid => get_standard_option('pve-vmid'), | |
542 | timeframe => { | |
543 | description => "Specify the time frame you are interested in.", | |
544 | type => 'string', | |
545 | enum => [ 'hour', 'day', 'week', 'month', 'year' ], | |
546 | }, | |
547 | cf => { | |
548 | description => "The RRD consolidation function", | |
549 | type => 'string', | |
550 | enum => [ 'AVERAGE', 'MAX' ], | |
551 | optional => 1, | |
552 | }, | |
553 | }, | |
554 | }, | |
555 | returns => { | |
556 | type => "array", | |
557 | items => { | |
558 | type => "object", | |
559 | properties => {}, | |
560 | }, | |
561 | }, | |
562 | code => sub { | |
563 | my ($param) = @_; | |
564 | ||
565 | return PVE::Cluster::create_rrd_data( | |
566 | "pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf}); | |
567 | }}); | |
568 | ||
569 | __PACKAGE__->register_method({ | |
570 | name => 'destroy_vm', | |
571 | path => '{vmid}', | |
572 | method => 'DELETE', | |
573 | protected => 1, | |
574 | proxyto => 'node', | |
575 | description => "Destroy the container (also delete all uses files).", | |
576 | permissions => { | |
577 | check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']], | |
578 | }, | |
579 | parameters => { | |
580 | additionalProperties => 0, | |
581 | properties => { | |
582 | node => get_standard_option('pve-node'), | |
583 | vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }), | |
584 | }, | |
585 | }, | |
586 | returns => { | |
587 | type => 'string', | |
588 | }, | |
589 | code => sub { | |
590 | my ($param) = @_; | |
591 | ||
592 | my $rpcenv = PVE::RPCEnvironment::get(); | |
593 | ||
594 | my $authuser = $rpcenv->get_user(); | |
595 | ||
596 | my $vmid = $param->{vmid}; | |
597 | ||
598 | # test if container exists | |
599 | my $conf = PVE::LXC::Config->load_config($vmid); | |
600 | ||
601 | my $storage_cfg = cfs_read_file("storage.cfg"); | |
602 | ||
603 | PVE::LXC::Config->check_protection($conf, "can't remove CT $vmid"); | |
604 | ||
605 | die "unable to remove CT $vmid - used in HA resources\n" | |
606 | if PVE::HA::Config::vm_is_ha_managed($vmid); | |
607 | ||
608 | # do not allow destroy if there are replication jobs | |
609 | my $repl_conf = PVE::ReplicationConfig->new(); | |
610 | $repl_conf->check_for_existing_jobs($vmid); | |
611 | ||
612 | my $running_error_msg = "unable to destroy CT $vmid - container is running\n"; | |
613 | ||
614 | die $running_error_msg if PVE::LXC::check_running($vmid); # check early | |
615 | ||
616 | my $code = sub { | |
617 | # reload config after lock | |
618 | $conf = PVE::LXC::Config->load_config($vmid); | |
619 | PVE::LXC::Config->check_lock($conf); | |
620 | ||
621 | die $running_error_msg if PVE::LXC::check_running($vmid); | |
622 | ||
623 | PVE::LXC::destroy_lxc_container($storage_cfg, $vmid, $conf); | |
624 | PVE::AccessControl::remove_vm_access($vmid); | |
625 | PVE::Firewall::remove_vmfw_conf($vmid); | |
626 | }; | |
627 | ||
628 | my $realcmd = sub { PVE::LXC::Config->lock_config($vmid, $code); }; | |
629 | ||
630 | return $rpcenv->fork_worker('vzdestroy', $vmid, $authuser, $realcmd); | |
631 | }}); | |
632 | ||
633 | my $sslcert; | |
634 | ||
635 | __PACKAGE__->register_method ({ | |
636 | name => 'vncproxy', | |
637 | path => '{vmid}/vncproxy', | |
638 | method => 'POST', | |
639 | protected => 1, | |
640 | permissions => { | |
641 | check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]], | |
642 | }, | |
643 | description => "Creates a TCP VNC proxy connections.", | |
644 | parameters => { | |
645 | additionalProperties => 0, | |
646 | properties => { | |
647 | node => get_standard_option('pve-node'), | |
648 | vmid => get_standard_option('pve-vmid'), | |
649 | websocket => { | |
650 | optional => 1, | |
651 | type => 'boolean', | |
652 | description => "use websocket instead of standard VNC.", | |
653 | }, | |
654 | width => { | |
655 | optional => 1, | |
656 | description => "sets the width of the console in pixels.", | |
657 | type => 'integer', | |
658 | minimum => 16, | |
659 | maximum => 4096, | |
660 | }, | |
661 | height => { | |
662 | optional => 1, | |
663 | description => "sets the height of the console in pixels.", | |
664 | type => 'integer', | |
665 | minimum => 16, | |
666 | maximum => 2160, | |
667 | }, | |
668 | }, | |
669 | }, | |
670 | returns => { | |
671 | additionalProperties => 0, | |
672 | properties => { | |
673 | user => { type => 'string' }, | |
674 | ticket => { type => 'string' }, | |
675 | cert => { type => 'string' }, | |
676 | port => { type => 'integer' }, | |
677 | upid => { type => 'string' }, | |
678 | }, | |
679 | }, | |
680 | code => sub { | |
681 | my ($param) = @_; | |
682 | ||
683 | my $rpcenv = PVE::RPCEnvironment::get(); | |
684 | ||
685 | my $authuser = $rpcenv->get_user(); | |
686 | ||
687 | my $vmid = $param->{vmid}; | |
688 | my $node = $param->{node}; | |
689 | ||
690 | my $authpath = "/vms/$vmid"; | |
691 | ||
692 | my $ticket = PVE::AccessControl::assemble_vnc_ticket($authuser, $authpath); | |
693 | ||
694 | $sslcert = PVE::Tools::file_get_contents("/etc/pve/pve-root-ca.pem", 8192) | |
695 | if !$sslcert; | |
696 | ||
697 | my ($remip, $family); | |
698 | ||
699 | if ($node ne PVE::INotify::nodename()) { | |
700 | ($remip, $family) = PVE::Cluster::remote_node_ip($node); | |
701 | } else { | |
702 | $family = PVE::Tools::get_host_address_family($node); | |
703 | } | |
704 | ||
705 | my $port = PVE::Tools::next_vnc_port($family); | |
706 | ||
707 | # NOTE: vncterm VNC traffic is already TLS encrypted, | |
708 | # so we select the fastest chipher here (or 'none'?) | |
709 | my $remcmd = $remip ? | |
710 | ['/usr/bin/ssh', '-t', $remip] : []; | |
711 | ||
712 | my $conf = PVE::LXC::Config->load_config($vmid, $node); | |
713 | my $concmd = PVE::LXC::get_console_command($vmid, $conf); | |
714 | ||
715 | my $shcmd = [ '/usr/bin/dtach', '-A', | |
716 | "/var/run/dtach/vzctlconsole$vmid", | |
717 | '-r', 'winch', '-z', @$concmd]; | |
718 | ||
719 | my $realcmd = sub { | |
720 | my $upid = shift; | |
721 | ||
722 | syslog ('info', "starting lxc vnc proxy $upid\n"); | |
723 | ||
724 | my $timeout = 10; | |
725 | ||
726 | my $cmd = ['/usr/bin/vncterm', '-rfbport', $port, | |
727 | '-timeout', $timeout, '-authpath', $authpath, | |
728 | '-perm', 'VM.Console']; | |
729 | ||
730 | if ($param->{width}) { | |
731 | push @$cmd, '-width', $param->{width}; | |
732 | } | |
733 | ||
734 | if ($param->{height}) { | |
735 | push @$cmd, '-height', $param->{height}; | |
736 | } | |
737 | ||
738 | if ($param->{websocket}) { | |
739 | $ENV{PVE_VNC_TICKET} = $ticket; # pass ticket to vncterm | |
740 | push @$cmd, '-notls', '-listen', 'localhost'; | |
741 | } | |
742 | ||
743 | push @$cmd, '-c', @$remcmd, @$shcmd; | |
744 | ||
745 | run_command($cmd, keeplocale => 1); | |
746 | ||
747 | return; | |
748 | }; | |
749 | ||
750 | my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd); | |
751 | ||
752 | PVE::Tools::wait_for_vnc_port($port); | |
753 | ||
754 | return { | |
755 | user => $authuser, | |
756 | ticket => $ticket, | |
757 | port => $port, | |
758 | upid => $upid, | |
759 | cert => $sslcert, | |
760 | }; | |
761 | }}); | |
762 | ||
763 | __PACKAGE__->register_method({ | |
764 | name => 'vncwebsocket', | |
765 | path => '{vmid}/vncwebsocket', | |
766 | method => 'GET', | |
767 | permissions => { | |
768 | description => "You also need to pass a valid ticket (vncticket).", | |
769 | check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]], | |
770 | }, | |
771 | description => "Opens a weksocket for VNC traffic.", | |
772 | parameters => { | |
773 | additionalProperties => 0, | |
774 | properties => { | |
775 | node => get_standard_option('pve-node'), | |
776 | vmid => get_standard_option('pve-vmid'), | |
777 | vncticket => { | |
778 | description => "Ticket from previous call to vncproxy.", | |
779 | type => 'string', | |
780 | maxLength => 512, | |
781 | }, | |
782 | port => { | |
783 | description => "Port number returned by previous vncproxy call.", | |
784 | type => 'integer', | |
785 | minimum => 5900, | |
786 | maximum => 5999, | |
787 | }, | |
788 | }, | |
789 | }, | |
790 | returns => { | |
791 | type => "object", | |
792 | properties => { | |
793 | port => { type => 'string' }, | |
794 | }, | |
795 | }, | |
796 | code => sub { | |
797 | my ($param) = @_; | |
798 | ||
799 | my $rpcenv = PVE::RPCEnvironment::get(); | |
800 | ||
801 | my $authuser = $rpcenv->get_user(); | |
802 | ||
803 | my $authpath = "/vms/$param->{vmid}"; | |
804 | ||
805 | PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath); | |
806 | ||
807 | my $port = $param->{port}; | |
808 | ||
809 | return { port => $port }; | |
810 | }}); | |
811 | ||
812 | __PACKAGE__->register_method ({ | |
813 | name => 'spiceproxy', | |
814 | path => '{vmid}/spiceproxy', | |
815 | method => 'POST', | |
816 | protected => 1, | |
817 | proxyto => 'node', | |
818 | permissions => { | |
819 | check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]], | |
820 | }, | |
821 | description => "Returns a SPICE configuration to connect to the CT.", | |
822 | parameters => { | |
823 | additionalProperties => 0, | |
824 | properties => { | |
825 | node => get_standard_option('pve-node'), | |
826 | vmid => get_standard_option('pve-vmid'), | |
827 | proxy => get_standard_option('spice-proxy', { optional => 1 }), | |
828 | }, | |
829 | }, | |
830 | returns => get_standard_option('remote-viewer-config'), | |
831 | code => sub { | |
832 | my ($param) = @_; | |
833 | ||
834 | my $vmid = $param->{vmid}; | |
835 | my $node = $param->{node}; | |
836 | my $proxy = $param->{proxy}; | |
837 | ||
838 | my $authpath = "/vms/$vmid"; | |
839 | my $permissions = 'VM.Console'; | |
840 | ||
841 | my $conf = PVE::LXC::Config->load_config($vmid); | |
842 | ||
843 | die "CT $vmid not running\n" if !PVE::LXC::check_running($vmid); | |
844 | ||
845 | my $concmd = PVE::LXC::get_console_command($vmid, $conf); | |
846 | ||
847 | my $shcmd = ['/usr/bin/dtach', '-A', | |
848 | "/var/run/dtach/vzctlconsole$vmid", | |
849 | '-r', 'winch', '-z', @$concmd]; | |
850 | ||
851 | my $title = "CT $vmid"; | |
852 | ||
853 | return PVE::API2Tools::run_spiceterm($authpath, $permissions, $vmid, $node, $proxy, $title, $shcmd); | |
854 | }}); | |
855 | ||
856 | ||
857 | __PACKAGE__->register_method({ | |
858 | name => 'migrate_vm', | |
859 | path => '{vmid}/migrate', | |
860 | method => 'POST', | |
861 | protected => 1, | |
862 | proxyto => 'node', | |
863 | description => "Migrate the container to another node. Creates a new migration task.", | |
864 | permissions => { | |
865 | check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], | |
866 | }, | |
867 | parameters => { | |
868 | additionalProperties => 0, | |
869 | properties => { | |
870 | node => get_standard_option('pve-node'), | |
871 | vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), | |
872 | target => get_standard_option('pve-node', { | |
873 | description => "Target node.", | |
874 | completion => \&PVE::Cluster::complete_migration_target, | |
875 | }), | |
876 | online => { | |
877 | type => 'boolean', | |
878 | description => "Use online/live migration.", | |
879 | optional => 1, | |
880 | }, | |
881 | restart => { | |
882 | type => 'boolean', | |
883 | description => "Use restart migration", | |
884 | optional => 1, | |
885 | }, | |
886 | timeout => { | |
887 | type => 'integer', | |
888 | description => "Timeout in seconds for shutdown for restart migration", | |
889 | optional => 1, | |
890 | default => 180, | |
891 | }, | |
892 | force => { | |
893 | type => 'boolean', | |
894 | description => "Force migration despite local bind / device" . | |
895 | " mounts. NOTE: deprecated, use 'shared' property of mount point instead.", | |
896 | optional => 1, | |
897 | }, | |
898 | }, | |
899 | }, | |
900 | returns => { | |
901 | type => 'string', | |
902 | description => "the task ID.", | |
903 | }, | |
904 | code => sub { | |
905 | my ($param) = @_; | |
906 | ||
907 | my $rpcenv = PVE::RPCEnvironment::get(); | |
908 | ||
909 | my $authuser = $rpcenv->get_user(); | |
910 | ||
911 | my $target = extract_param($param, 'target'); | |
912 | ||
913 | my $localnode = PVE::INotify::nodename(); | |
914 | raise_param_exc({ target => "target is local node."}) if $target eq $localnode; | |
915 | ||
916 | PVE::Cluster::check_cfs_quorum(); | |
917 | ||
918 | PVE::Cluster::check_node_exists($target); | |
919 | ||
920 | my $targetip = PVE::Cluster::remote_node_ip($target); | |
921 | ||
922 | my $vmid = extract_param($param, 'vmid'); | |
923 | ||
924 | # test if VM exists | |
925 | PVE::LXC::Config->load_config($vmid); | |
926 | ||
927 | # try to detect errors early | |
928 | if (PVE::LXC::check_running($vmid)) { | |
929 | die "can't migrate running container without --online or --restart\n" | |
930 | if !$param->{online} && !$param->{restart}; | |
931 | } | |
932 | ||
933 | if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { | |
934 | ||
935 | my $hacmd = sub { | |
936 | my $upid = shift; | |
937 | ||
938 | my $service = "ct:$vmid"; | |
939 | ||
940 | my $cmd = ['ha-manager', 'migrate', $service, $target]; | |
941 | ||
942 | print "Executing HA migrate for CT $vmid to node $target\n"; | |
943 | ||
944 | PVE::Tools::run_command($cmd); | |
945 | ||
946 | return; | |
947 | }; | |
948 | ||
949 | return $rpcenv->fork_worker('hamigrate', $vmid, $authuser, $hacmd); | |
950 | ||
951 | } else { | |
952 | ||
953 | my $realcmd = sub { | |
954 | PVE::LXC::Migrate->migrate($target, $targetip, $vmid, $param); | |
955 | }; | |
956 | ||
957 | my $worker = sub { | |
958 | return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd); | |
959 | }; | |
960 | ||
961 | return $rpcenv->fork_worker('vzmigrate', $vmid, $authuser, $worker); | |
962 | } | |
963 | }}); | |
964 | ||
965 | __PACKAGE__->register_method({ | |
966 | name => 'vm_feature', | |
967 | path => '{vmid}/feature', | |
968 | method => 'GET', | |
969 | proxyto => 'node', | |
970 | protected => 1, | |
971 | description => "Check if feature for virtual machine is available.", | |
972 | permissions => { | |
973 | check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], | |
974 | }, | |
975 | parameters => { | |
976 | additionalProperties => 0, | |
977 | properties => { | |
978 | node => get_standard_option('pve-node'), | |
979 | vmid => get_standard_option('pve-vmid'), | |
980 | feature => { | |
981 | description => "Feature to check.", | |
982 | type => 'string', | |
983 | enum => [ 'snapshot' ], | |
984 | }, | |
985 | snapname => get_standard_option('pve-lxc-snapshot-name', { | |
986 | optional => 1, | |
987 | }), | |
988 | }, | |
989 | }, | |
990 | returns => { | |
991 | type => "object", | |
992 | properties => { | |
993 | hasFeature => { type => 'boolean' }, | |
994 | #nodes => { | |
995 | #type => 'array', | |
996 | #items => { type => 'string' }, | |
997 | #} | |
998 | }, | |
999 | }, | |
1000 | code => sub { | |
1001 | my ($param) = @_; | |
1002 | ||
1003 | my $node = extract_param($param, 'node'); | |
1004 | ||
1005 | my $vmid = extract_param($param, 'vmid'); | |
1006 | ||
1007 | my $snapname = extract_param($param, 'snapname'); | |
1008 | ||
1009 | my $feature = extract_param($param, 'feature'); | |
1010 | ||
1011 | my $conf = PVE::LXC::Config->load_config($vmid); | |
1012 | ||
1013 | if($snapname){ | |
1014 | my $snap = $conf->{snapshots}->{$snapname}; | |
1015 | die "snapshot '$snapname' does not exist\n" if !defined($snap); | |
1016 | $conf = $snap; | |
1017 | } | |
1018 | my $storage_cfg = PVE::Storage::config(); | |
1019 | #Maybe include later | |
1020 | #my $nodelist = PVE::LXC::shared_nodes($conf, $storage_cfg); | |
1021 | my $hasFeature = PVE::LXC::Config->has_feature($feature, $conf, $storage_cfg, $snapname); | |
1022 | ||
1023 | return { | |
1024 | hasFeature => $hasFeature, | |
1025 | #nodes => [ keys %$nodelist ], | |
1026 | }; | |
1027 | }}); | |
1028 | ||
1029 | __PACKAGE__->register_method({ | |
1030 | name => 'template', | |
1031 | path => '{vmid}/template', | |
1032 | method => 'POST', | |
1033 | protected => 1, | |
1034 | proxyto => 'node', | |
1035 | description => "Create a Template.", | |
1036 | permissions => { | |
1037 | description => "You need 'VM.Allocate' permissions on /vms/{vmid}", | |
1038 | check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']], | |
1039 | }, | |
1040 | parameters => { | |
1041 | additionalProperties => 0, | |
1042 | properties => { | |
1043 | node => get_standard_option('pve-node'), | |
1044 | vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid_stopped }), | |
1045 | experimental => { | |
1046 | type => 'boolean', | |
1047 | description => "The template feature is experimental, set this " . | |
1048 | "flag if you know what you are doing.", | |
1049 | default => 0, | |
1050 | }, | |
1051 | }, | |
1052 | }, | |
1053 | returns => { type => 'null'}, | |
1054 | code => sub { | |
1055 | my ($param) = @_; | |
1056 | ||
1057 | my $rpcenv = PVE::RPCEnvironment::get(); | |
1058 | ||
1059 | my $authuser = $rpcenv->get_user(); | |
1060 | ||
1061 | my $node = extract_param($param, 'node'); | |
1062 | ||
1063 | my $vmid = extract_param($param, 'vmid'); | |
1064 | ||
1065 | my $updatefn = sub { | |
1066 | ||
1067 | my $conf = PVE::LXC::Config->load_config($vmid); | |
1068 | PVE::LXC::Config->check_lock($conf); | |
1069 | ||
1070 | die "unable to create template, because CT contains snapshots\n" | |
1071 | if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}}); | |
1072 | ||
1073 | die "you can't convert a template to a template\n" | |
1074 | if PVE::LXC::Config->is_template($conf); | |
1075 | ||
1076 | die "you can't convert a CT to template if the CT is running\n" | |
1077 | if PVE::LXC::check_running($vmid); | |
1078 | ||
1079 | my $realcmd = sub { | |
1080 | PVE::LXC::template_create($vmid, $conf); | |
1081 | }; | |
1082 | ||
1083 | $conf->{template} = 1; | |
1084 | ||
1085 | PVE::LXC::Config->write_config($vmid, $conf); | |
1086 | # and remove lxc config | |
1087 | PVE::LXC::update_lxc_config($vmid, $conf); | |
1088 | ||
1089 | return $rpcenv->fork_worker('vztemplate', $vmid, $authuser, $realcmd); | |
1090 | }; | |
1091 | ||
1092 | PVE::LXC::Config->lock_config($vmid, $updatefn); | |
1093 | ||
1094 | return undef; | |
1095 | }}); | |
1096 | ||
1097 | __PACKAGE__->register_method({ | |
1098 | name => 'clone_vm', | |
1099 | path => '{vmid}/clone', | |
1100 | method => 'POST', | |
1101 | protected => 1, | |
1102 | proxyto => 'node', | |
1103 | description => "Create a container clone/copy", | |
1104 | permissions => { | |
1105 | description => "You need 'VM.Clone' permissions on /vms/{vmid}, " . | |
1106 | "and 'VM.Allocate' permissions " . | |
1107 | "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " . | |
1108 | "'Datastore.AllocateSpace' on any used storage.", | |
1109 | check => | |
1110 | [ 'and', | |
1111 | ['perm', '/vms/{vmid}', [ 'VM.Clone' ]], | |
1112 | [ 'or', | |
1113 | [ 'perm', '/vms/{newid}', ['VM.Allocate']], | |
1114 | [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'], | |
1115 | ], | |
1116 | ] | |
1117 | }, | |
1118 | parameters => { | |
1119 | additionalProperties => 0, | |
1120 | properties => { | |
1121 | node => get_standard_option('pve-node'), | |
1122 | vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), | |
1123 | newid => get_standard_option('pve-vmid', { | |
1124 | completion => \&PVE::Cluster::complete_next_vmid, | |
1125 | description => 'VMID for the clone.' }), | |
1126 | hostname => { | |
1127 | optional => 1, | |
1128 | type => 'string', format => 'dns-name', | |
1129 | description => "Set a hostname for the new CT.", | |
1130 | }, | |
1131 | description => { | |
1132 | optional => 1, | |
1133 | type => 'string', | |
1134 | description => "Description for the new CT.", | |
1135 | }, | |
1136 | pool => { | |
1137 | optional => 1, | |
1138 | type => 'string', format => 'pve-poolid', | |
1139 | description => "Add the new CT to the specified pool.", | |
1140 | }, | |
1141 | snapname => get_standard_option('pve-lxc-snapshot-name', { | |
1142 | optional => 1, | |
1143 | }), | |
1144 | storage => get_standard_option('pve-storage-id', { | |
1145 | description => "Target storage for full clone.", | |
1146 | requires => 'full', | |
1147 | optional => 1, | |
1148 | }), | |
1149 | full => { | |
1150 | optional => 1, | |
1151 | type => 'boolean', | |
1152 | description => "Create a full copy of all disk. This is always done when " . | |
1153 | "you clone a normal CT. For CT templates, we try to create a linked clone by default.", | |
1154 | default => 0, | |
1155 | }, | |
1156 | experimental => { | |
1157 | type => 'boolean', | |
1158 | description => "The clone feature is experimental, set this " . | |
1159 | "flag if you know what you are doing.", | |
1160 | default => 0, | |
1161 | }, | |
1162 | # target => get_standard_option('pve-node', { | |
1163 | # description => "Target node. Only allowed if the original VM is on shared storage.", | |
1164 | # optional => 1, | |
1165 | # }), | |
1166 | }, | |
1167 | }, | |
1168 | returns => { | |
1169 | type => 'string', | |
1170 | }, | |
1171 | code => sub { | |
1172 | my ($param) = @_; | |
1173 | ||
1174 | my $rpcenv = PVE::RPCEnvironment::get(); | |
1175 | ||
1176 | my $authuser = $rpcenv->get_user(); | |
1177 | ||
1178 | my $node = extract_param($param, 'node'); | |
1179 | ||
1180 | my $vmid = extract_param($param, 'vmid'); | |
1181 | ||
1182 | my $newid = extract_param($param, 'newid'); | |
1183 | ||
1184 | my $pool = extract_param($param, 'pool'); | |
1185 | ||
1186 | if (defined($pool)) { | |
1187 | $rpcenv->check_pool_exist($pool); | |
1188 | } | |
1189 | ||
1190 | my $snapname = extract_param($param, 'snapname'); | |
1191 | ||
1192 | my $storage = extract_param($param, 'storage'); | |
1193 | ||
1194 | my $localnode = PVE::INotify::nodename(); | |
1195 | ||
1196 | my $storecfg = PVE::Storage::config(); | |
1197 | ||
1198 | if ($storage) { | |
1199 | # check if storage is enabled on local node | |
1200 | PVE::Storage::storage_check_enabled($storecfg, $storage); | |
1201 | } | |
1202 | ||
1203 | PVE::Cluster::check_cfs_quorum(); | |
1204 | ||
1205 | my $running = PVE::LXC::check_running($vmid) || 0; | |
1206 | ||
1207 | my $clonefn = sub { | |
1208 | ||
1209 | # do all tests after lock | |
1210 | # we also try to do all tests before we fork the worker | |
1211 | my $conf = PVE::LXC::Config->load_config($vmid); | |
1212 | ||
1213 | PVE::LXC::Config->check_lock($conf); | |
1214 | ||
1215 | my $verify_running = PVE::LXC::check_running($vmid) || 0; | |
1216 | ||
1217 | die "unexpected state change\n" if $verify_running != $running; | |
1218 | ||
1219 | die "snapshot '$snapname' does not exist\n" | |
1220 | if $snapname && !defined( $conf->{snapshots}->{$snapname}); | |
1221 | ||
1222 | my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf; | |
1223 | ||
1224 | my $conffile = PVE::LXC::Config->config_file($newid); | |
1225 | die "unable to create CT $newid: config file already exists\n" | |
1226 | if -f $conffile; | |
1227 | ||
1228 | my $newconf = { lock => 'clone' }; | |
1229 | my $mountpoints = {}; | |
1230 | my $fullclone = {}; | |
1231 | my $vollist = []; | |
1232 | ||
1233 | foreach my $opt (keys %$oldconf) { | |
1234 | my $value = $oldconf->{$opt}; | |
1235 | ||
1236 | # no need to copy unused images, because VMID(owner) changes anyways | |
1237 | next if $opt =~ m/^unused\d+$/; | |
1238 | ||
1239 | if (($opt eq 'rootfs') || ($opt =~ m/^mp\d+$/)) { | |
1240 | my $mp = $opt eq 'rootfs' ? | |
1241 | PVE::LXC::Config->parse_ct_rootfs($value) : | |
1242 | PVE::LXC::Config->parse_ct_mountpoint($value); | |
1243 | ||
1244 | if ($mp->{type} eq 'volume') { | |
1245 | my $volid = $mp->{volume}; | |
1246 | if ($param->{full}) { | |
1247 | die "fixme: full clone not implemented"; | |
1248 | ||
1249 | die "Full clone feature for '$volid' is not available\n" | |
1250 | if !PVE::Storage::volume_has_feature($storecfg, 'copy', $volid, $snapname, $running); | |
1251 | $fullclone->{$opt} = 1; | |
1252 | } else { | |
1253 | # not full means clone instead of copy | |
1254 | die "Linked clone feature for '$volid' is not available\n" | |
1255 | if !PVE::Storage::volume_has_feature($storecfg, 'clone', $volid, $snapname, $running); | |
1256 | } | |
1257 | ||
1258 | $mountpoints->{$opt} = $mp; | |
1259 | push @$vollist, $volid; | |
1260 | ||
1261 | } else { | |
1262 | # TODO: allow bind mounts? | |
1263 | die "unable to clone mountpint '$opt' (type $mp->{type})\n"; | |
1264 | } | |
1265 | ||
1266 | } else { | |
1267 | # copy everything else | |
1268 | $newconf->{$opt} = $value; | |
1269 | } | |
1270 | } | |
1271 | ||
1272 | delete $newconf->{template}; | |
1273 | if ($param->{hostname}) { | |
1274 | $newconf->{hostname} = $param->{hostname}; | |
1275 | } | |
1276 | ||
1277 | if ($param->{description}) { | |
1278 | $newconf->{description} = $param->{description}; | |
1279 | } | |
1280 | ||
1281 | # create empty/temp config - this fails if CT already exists on other node | |
1282 | PVE::Tools::file_set_contents($conffile, "# ctclone temporary file\nlock: clone\n"); | |
1283 | ||
1284 | my $realcmd = sub { | |
1285 | my $upid = shift; | |
1286 | ||
1287 | my $newvollist = []; | |
1288 | ||
1289 | eval { | |
1290 | local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; }; | |
1291 | ||
1292 | PVE::Storage::activate_volumes($storecfg, $vollist, $snapname); | |
1293 | ||
1294 | foreach my $opt (keys %$mountpoints) { | |
1295 | my $mp = $mountpoints->{$opt}; | |
1296 | my $volid = $mp->{volume}; | |
1297 | ||
1298 | if ($fullclone->{$opt}) { | |
1299 | die "fixme: full clone not implemented\n"; | |
1300 | } else { | |
1301 | print "create linked clone of mount point $opt ($volid)\n"; | |
1302 | my $newvolid = PVE::Storage::vdisk_clone($storecfg, $volid, $newid, $snapname); | |
1303 | push @$newvollist, $newvolid; | |
1304 | $mp->{volume} = $newvolid; | |
1305 | ||
1306 | $newconf->{$opt} = PVE::LXC::Config->print_ct_mountpoint($mp, $opt eq 'rootfs'); | |
1307 | PVE::LXC::Config->write_config($newid, $newconf); | |
1308 | } | |
1309 | } | |
1310 | ||
1311 | delete $newconf->{lock}; | |
1312 | PVE::LXC::Config->write_config($newid, $newconf); | |
1313 | ||
1314 | PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; | |
1315 | }; | |
1316 | if (my $err = $@) { | |
1317 | unlink $conffile; | |
1318 | ||
1319 | sleep 1; # some storage like rbd need to wait before release volume - really? | |
1320 | ||
1321 | foreach my $volid (@$newvollist) { | |
1322 | eval { PVE::Storage::vdisk_free($storecfg, $volid); }; | |
1323 | warn $@ if $@; | |
1324 | } | |
1325 | die "clone failed: $err"; | |
1326 | } | |
1327 | ||
1328 | return; | |
1329 | }; | |
1330 | ||
1331 | PVE::Firewall::clone_vmfw_conf($vmid, $newid); | |
1332 | ||
1333 | return $rpcenv->fork_worker('vzclone', $vmid, $authuser, $realcmd); | |
1334 | ||
1335 | }; | |
1336 | ||
1337 | return PVE::LXC::Config->lock_config($vmid, $clonefn); | |
1338 | }}); | |
1339 | ||
1340 | ||
1341 | __PACKAGE__->register_method({ | |
1342 | name => 'resize_vm', | |
1343 | path => '{vmid}/resize', | |
1344 | method => 'PUT', | |
1345 | protected => 1, | |
1346 | proxyto => 'node', | |
1347 | description => "Resize a container mount point.", | |
1348 | permissions => { | |
1349 | check => ['perm', '/vms/{vmid}', ['VM.Config.Disk'], any => 1], | |
1350 | }, | |
1351 | parameters => { | |
1352 | additionalProperties => 0, | |
1353 | properties => { | |
1354 | node => get_standard_option('pve-node'), | |
1355 | vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }), | |
1356 | disk => { | |
1357 | type => 'string', | |
1358 | description => "The disk you want to resize.", | |
1359 | enum => [PVE::LXC::Config->mountpoint_names()], | |
1360 | }, | |
1361 | size => { | |
1362 | type => 'string', | |
1363 | pattern => '\+?\d+(\.\d+)?[KMGT]?', | |
1364 | description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.", | |
1365 | }, | |
1366 | digest => { | |
1367 | type => 'string', | |
1368 | description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', | |
1369 | maxLength => 40, | |
1370 | optional => 1, | |
1371 | } | |
1372 | }, | |
1373 | }, | |
1374 | returns => { | |
1375 | type => 'string', | |
1376 | description => "the task ID.", | |
1377 | }, | |
1378 | code => sub { | |
1379 | my ($param) = @_; | |
1380 | ||
1381 | my $rpcenv = PVE::RPCEnvironment::get(); | |
1382 | ||
1383 | my $authuser = $rpcenv->get_user(); | |
1384 | ||
1385 | my $node = extract_param($param, 'node'); | |
1386 | ||
1387 | my $vmid = extract_param($param, 'vmid'); | |
1388 | ||
1389 | my $digest = extract_param($param, 'digest'); | |
1390 | ||
1391 | my $sizestr = extract_param($param, 'size'); | |
1392 | my $ext = ($sizestr =~ s/^\+//); | |
1393 | my $newsize = PVE::JSONSchema::parse_size($sizestr); | |
1394 | die "invalid size string" if !defined($newsize); | |
1395 | ||
1396 | die "no options specified\n" if !scalar(keys %$param); | |
1397 | ||
1398 | PVE::LXC::check_ct_modify_config_perm($rpcenv, $authuser, $vmid, undef, $param, []); | |
1399 | ||
1400 | my $storage_cfg = cfs_read_file("storage.cfg"); | |
1401 | ||
1402 | my $code = sub { | |
1403 | ||
1404 | my $conf = PVE::LXC::Config->load_config($vmid); | |
1405 | PVE::LXC::Config->check_lock($conf); | |
1406 | ||
1407 | PVE::Tools::assert_if_modified($digest, $conf->{digest}); | |
1408 | ||
1409 | my $running = PVE::LXC::check_running($vmid); | |
1410 | ||
1411 | my $disk = $param->{disk}; | |
1412 | my $mp = $disk eq 'rootfs' ? PVE::LXC::Config->parse_ct_rootfs($conf->{$disk}) : | |
1413 | PVE::LXC::Config->parse_ct_mountpoint($conf->{$disk}); | |
1414 | ||
1415 | my $volid = $mp->{volume}; | |
1416 | ||
1417 | my (undef, undef, $owner, undef, undef, undef, $format) = | |
1418 | PVE::Storage::parse_volname($storage_cfg, $volid); | |
1419 | ||
1420 | die "can't resize mount point owned by another container ($owner)" | |
1421 | if $vmid != $owner; | |
1422 | ||
1423 | die "can't resize volume: $disk if snapshot exists\n" | |
1424 | if %{$conf->{snapshots}} && $format eq 'qcow2'; | |
1425 | ||
1426 | my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid); | |
1427 | ||
1428 | $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); | |
1429 | ||
1430 | PVE::Storage::activate_volumes($storage_cfg, [$volid]); | |
1431 | ||
1432 | my $size = PVE::Storage::volume_size_info($storage_cfg, $volid, 5); | |
1433 | $newsize += $size if $ext; | |
1434 | $newsize = int($newsize); | |
1435 | ||
1436 | die "unable to shrink disk size\n" if $newsize < $size; | |
1437 | ||
1438 | return if $size == $newsize; | |
1439 | ||
1440 | PVE::Cluster::log_msg('info', $authuser, "update CT $vmid: resize --disk $disk --size $sizestr"); | |
1441 | my $realcmd = sub { | |
1442 | # Note: PVE::Storage::volume_resize doesn't do anything if $running=1, so | |
1443 | # we pass 0 here (parameter only makes sense for qemu) | |
1444 | PVE::Storage::volume_resize($storage_cfg, $volid, $newsize, 0); | |
1445 | ||
1446 | $mp->{size} = $newsize; | |
1447 | $conf->{$disk} = PVE::LXC::Config->print_ct_mountpoint($mp, $disk eq 'rootfs'); | |
1448 | ||
1449 | PVE::LXC::Config->write_config($vmid, $conf); | |
1450 | ||
1451 | if ($format eq 'raw') { | |
1452 | my $path = PVE::Storage::path($storage_cfg, $volid, undef); | |
1453 | if ($running) { | |
1454 | ||
1455 | $mp->{mp} = '/'; | |
1456 | my $use_loopdev = (PVE::LXC::mountpoint_mount_path($mp, $storage_cfg))[1]; | |
1457 | $path = PVE::LXC::query_loopdev($path) if $use_loopdev; | |
1458 | die "internal error: CT running but mount point not attached to a loop device" | |
1459 | if !$path; | |
1460 | PVE::Tools::run_command(['losetup', '--set-capacity', $path]) if $use_loopdev; | |
1461 | ||
1462 | # In order for resize2fs to know that we need online-resizing a mountpoint needs | |
1463 | # to be visible to it in its namespace. | |
1464 | # To not interfere with the rest of the system we unshare the current mount namespace, | |
1465 | # mount over /tmp and then run resize2fs. | |
1466 | ||
1467 | # interestingly we don't need to e2fsck on mounted systems... | |
1468 | my $quoted = PVE::Tools::shellquote($path); | |
1469 | my $cmd = "mount --make-rprivate / && mount $quoted /tmp && resize2fs $quoted"; | |
1470 | eval { | |
1471 | PVE::Tools::run_command(['unshare', '-m', '--', 'sh', '-c', $cmd]); | |
1472 | }; | |
1473 | warn "Failed to update the container's filesystem: $@\n" if $@; | |
1474 | } else { | |
1475 | eval { | |
1476 | PVE::Tools::run_command(['e2fsck', '-f', '-y', $path]); | |
1477 | PVE::Tools::run_command(['resize2fs', $path]); | |
1478 | }; | |
1479 | warn "Failed to update the container's filesystem: $@\n" if $@; | |
1480 | } | |
1481 | } | |
1482 | }; | |
1483 | ||
1484 | return $rpcenv->fork_worker('resize', $vmid, $authuser, $realcmd); | |
1485 | }; | |
1486 | ||
1487 | return PVE::LXC::Config->lock_config($vmid, $code);; | |
1488 | }}); | |
1489 | ||
1490 | 1; |