]> git.proxmox.com Git - pve-cluster.git/blame - src/PVE/CLI/pvecm.pm
pvecm: updatecerts: check and report if we got a timeout
[pve-cluster.git] / src / PVE / CLI / pvecm.pm
CommitLineData
13d44dc5
DM
1package PVE::CLI::pvecm;
2
3use strict;
4use warnings;
95e7bcac 5
91c25936 6use Cwd qw(getcwd);
13d44dc5
DM
7use File::Path;
8use File::Basename;
97dc596c
FE
9use Time::HiRes qw(usleep);
10
294f76c4 11use PVE::Tools qw(run_command);
13d44dc5
DM
12use PVE::Cluster;
13use PVE::INotify;
10c6810e 14use PVE::JSONSchema qw(get_standard_option);
03b23bd0 15use PVE::RPCEnvironment;
13d44dc5 16use PVE::CLIHandler;
03b23bd0 17use PVE::PTY;
1d26c202 18use PVE::API2::ClusterConfig;
b6973a89 19use PVE::Corosync;
c5204e14 20use PVE::Cluster::Setup;
13d44dc5
DM
21
22use base qw(PVE::CLIHandler);
23
24$ENV{HOME} = '/root'; # for ssh-copy-id
25
26my $basedir = "/etc/pve";
27my $clusterconf = "$basedir/corosync.conf";
28my $libdir = "/var/lib/pve-cluster";
13d44dc5
DM
29my $authfile = "/etc/corosync/authkey";
30
9db3f0c0 31
03b23bd0
TL
32sub setup_environment {
33 PVE::RPCEnvironment->setup_default_cli_env();
34}
35
13d44dc5 36__PACKAGE__->register_method ({
c53b111f 37 name => 'keygen',
13d44dc5
DM
38 path => 'keygen',
39 method => 'PUT',
40 description => "Generate new cryptographic key for corosync.",
41 parameters => {
42 additionalProperties => 0,
43 properties => {
44 filename => {
45 type => 'string',
46 description => "Output file name"
47 }
48 },
49 },
50 returns => { type => 'null' },
c53b111f 51
13d44dc5
DM
52 code => sub {
53 my ($param) = @_;
54
55 my $filename = $param->{filename};
56
57 # test EUID
58 $> == 0 || die "Error: Authorization key must be generated as root user.\n";
59 my $dirname = dirname($filename);
13d44dc5
DM
60
61 die "key file '$filename' already exists\n" if -e $filename;
62
63 File::Path::make_path($dirname) if $dirname;
64
294f76c4 65 run_command(['corosync-keygen', '-l', '-k', $filename]);
13d44dc5
DM
66
67 return undef;
68 }});
69
63afd01d
OB
70my $foreach_member = sub {
71 my ($code, $noerr) = @_;
72
73 my $members = PVE::Cluster::get_members();
74 foreach my $node (sort keys %$members) {
75 if (my $ip = $members->{$node}->{ip}) {
76 $code->($node, $ip);
77 } else {
78 die "cannot get the cluster IP for node '$node'.\n" if !$noerr;
79 warn "cannot get the cluster IP for node '$node'.\n";
80 return undef;
81 }
82 }
83};
84
85__PACKAGE__->register_method ({
86 name => 'setup_qdevice',
87 path => 'setup_qdevice',
88 method => 'PUT',
89 description => "Setup the use of a QDevice",
90 parameters => {
91 additionalProperties => 0,
92 properties => {
93 address => {
94 type => 'string', format => 'ip',
95 description => "Specifies the network address of an external corosync QDevice" ,
96 },
97 network => {
98 type => 'string',
99 format => 'CIDR',
100 description => 'The network which should be used to connect to the external qdevice',
101 optional => 1,
102 },
103 force => {
104 type => 'boolean',
105 description => "Do not throw error on possible dangerous operations.",
106 optional => 1,
107 },
108 },
109 },
110 returns => { type => 'null' },
111
112 code => sub {
113 my ($param) = @_;
114
75a3d341 115 PVE::Corosync::check_conf_exists();
63afd01d
OB
116
117 my $members = PVE::Cluster::get_members();
118 foreach my $node (sort keys %$members) {
119 die "All nodes must be online! Node $node is offline, aborting.\n"
120 if !$members->{$node}->{online};
121 }
122
123 my $conf = PVE::Cluster::cfs_read_file("corosync.conf");
124
125 die "QDevice already configured!\n"
126 if defined($conf->{main}->{quorum}->{device}) && !$param->{force};
127
128 my $network = $param->{network};
129
130 my $model = "net";
131 my $algorithm = 'ffsplit';
9e7e46ae 132 if (scalar(%{$members}) & 1) {
63afd01d
OB
133 if ($param->{force}) {
134 $algorithm = 'lms';
135 } else {
136 die "Clusters with an odd node count are not officially supported!\n";
137 }
138 }
139
140 my $qnetd_addr = $param->{address};
141 my $base_dir = "/etc/corosync/qdevice/net";
142 my $db_dir_qnetd = "/etc/corosync/qnetd/nssdb";
143 my $db_dir_node = "$base_dir/nssdb";
144 my $ca_export_base = "qnetd-cacert.crt";
145 my $ca_export_file = "$db_dir_qnetd/$ca_export_base";
146 my $crq_file_base = "qdevice-net-node.crq";
147 my $p12_file_base = "qdevice-net-node.p12";
148 my $qdevice_certutil = "corosync-qdevice-net-certutil";
149 my $qnetd_certutil= "corosync-qnetd-certutil";
150 my $clustername = $conf->{main}->{totem}->{cluster_name};
151
152 run_command(['ssh-copy-id', '-i', '/root/.ssh/id_rsa', "root\@$qnetd_addr"]);
153
154 if (-d $db_dir_node) {
155 # FIXME: check on all nodes?!
156 if ($param->{force}) {
157 rmtree $db_dir_node;
158 } else {
159 die "QDevice certificate store already initialised, set force to delete!\n";
160 }
161 }
162
163 my $ssh_cmd = ['ssh', '-o', 'BatchMode=yes', '-lroot'];
164 my $scp_cmd = ['scp', '-o', 'BatchMode=yes'];
165
166 print "\nINFO: initializing qnetd server\n";
167 run_command(
168 [@$ssh_cmd, $qnetd_addr, $qnetd_certutil, "-i"],
169 noerr => 1
170 );
171
172 print "\nINFO: copying CA cert and initializing on all nodes\n";
173 run_command([@$scp_cmd, "root\@\[$qnetd_addr\]:$ca_export_file", "/etc/pve/$ca_export_base"]);
174 $foreach_member->(sub {
175 my ($node, $ip) = @_;
176 my $outsub = sub { print "\nnode '$node': " . shift };
177 run_command(
178 [@$ssh_cmd, $ip, $qdevice_certutil, "-i", "-c", "/etc/pve/$ca_export_base"],
179 noerr => 1, outfunc => \&$outsub
180 );
181 });
182 unlink "/etc/pve/$ca_export_base";
183
184 print "\nINFO: generating cert request\n";
185 run_command([$qdevice_certutil, "-r", "-n", $clustername]);
186
187 print "\nINFO: copying exported cert request to qnetd server\n";
188 run_command([@$scp_cmd, "$db_dir_node/$crq_file_base", "root\@\[$qnetd_addr\]:/tmp"]);
189
190 print "\nINFO: sign and export cluster cert\n";
191 run_command([
192 @$ssh_cmd, $qnetd_addr, $qnetd_certutil, "-s", "-c",
193 "/tmp/$crq_file_base", "-n", "$clustername"
194 ]);
195
196 print "\nINFO: copy exported CRT\n";
197 run_command([
198 @$scp_cmd, "root\@\[$qnetd_addr\]:$db_dir_qnetd/cluster-$clustername.crt",
199 "$db_dir_node"
200 ]);
201
202 print "\nINFO: import certificate\n";
203 run_command(["$qdevice_certutil", "-M", "-c", "$db_dir_node/cluster-$clustername.crt"]);
204
205 print "\nINFO: copy and import pk12 cert to all nodes\n";
206 run_command([@$scp_cmd, "$db_dir_node/$p12_file_base", "/etc/pve/"]);
207 $foreach_member->(sub {
208 my ($node, $ip) = @_;
209 my $outsub = sub { print "\nnode '$node': " . shift };
210 run_command([
211 @$ssh_cmd, $ip, "$qdevice_certutil", "-m", "-c",
212 "/etc/pve/$p12_file_base"], outfunc => \&$outsub
213 );
214 });
215 unlink "/etc/pve/$p12_file_base";
216
217
218 my $code = sub {
219 my $conf = PVE::Cluster::cfs_read_file("corosync.conf");
220 my $quorum_section = $conf->{main}->{quorum};
221
222 die "Qdevice already configured, must be removed before setting up new one!\n"
223 if defined($quorum_section->{device}); # must not be forced!
224
225 my $qdev_section = {
226 model => $model,
227 "$model" => {
228 tls => 'on',
229 host => $qnetd_addr,
230 algorithm => $algorithm,
231 }
232 };
233 $qdev_section->{votes} = 1 if $algorithm eq 'ffsplit';
234
235 $quorum_section->{device} = $qdev_section;
236
237 PVE::Corosync::atomic_write_conf($conf);
238 };
239
240 print "\nINFO: add QDevice to cluster configuration\n";
241 PVE::Cluster::cfs_lock_file('corosync.conf', 10, $code);
242 die $@ if $@;
243
244 $foreach_member->(sub {
245 my ($node, $ip) = @_;
246 my $outsub = sub { print "\nnode '$node': " . shift };
247 print "\nINFO: start and enable corosync qdevice daemon on node '$node'...\n";
248 run_command([@$ssh_cmd, $ip, 'systemctl', 'start', 'corosync-qdevice'], outfunc => \&$outsub);
249 run_command([@$ssh_cmd, $ip, 'systemctl', 'enable', 'corosync-qdevice'], outfunc => \&$outsub);
250 });
251
252 run_command(['corosync-cfgtool', '-R']); # do cluster wide config reload
253
254 return undef;
255}});
256
257__PACKAGE__->register_method ({
258 name => 'remove_qdevice',
259 path => 'remove_qdevice',
260 method => 'DELETE',
261 description => "Remove a configured QDevice",
262 parameters => {
263 additionalProperties => 0,
264 properties => {},
265 },
266 returns => { type => 'null' },
267
268 code => sub {
269 my ($param) = @_;
270
75a3d341 271 PVE::Corosync::check_conf_exists();
63afd01d
OB
272
273 my $members = PVE::Cluster::get_members();
274 foreach my $node (sort keys %$members) {
275 die "All nodes must be online! Node $node is offline, aborting.\n"
276 if !$members->{$node}->{online};
277 }
278
279 my $ssh_cmd = ['ssh', '-o', 'BatchMode=yes', '-lroot'];
280
281 my $code = sub {
282 my $conf = PVE::Cluster::cfs_read_file("corosync.conf");
283 my $quorum_section = $conf->{main}->{quorum};
284
285 die "No QDevice configured!\n" if !defined($quorum_section->{device});
286
287 delete $quorum_section->{device};
288
289 PVE::Corosync::atomic_write_conf($conf);
290
291 # cleanup qdev state (cert storage)
292 my $qdev_state_dir = "/etc/corosync/qdevice";
293 $foreach_member->(sub {
294 my (undef, $ip) = @_;
295 run_command([@$ssh_cmd, $ip, '--', 'rm', '-rf', $qdev_state_dir]);
296 });
297 };
298
299 PVE::Cluster::cfs_lock_file('corosync.conf', 10, $code);
300 die $@ if $@;
301
302 $foreach_member->(sub {
303 my (undef, $ip) = @_;
304 run_command([@$ssh_cmd, $ip, 'systemctl', 'stop', 'corosync-qdevice']);
305 run_command([@$ssh_cmd, $ip, 'systemctl', 'disable', 'corosync-qdevice']);
306 });
307
308 run_command(['corosync-cfgtool', '-R']);
309
310 print "\nRemoved Qdevice.\n";
311
312 return undef;
313}});
314
13d44dc5 315__PACKAGE__->register_method ({
c53b111f 316 name => 'add',
13d44dc5
DM
317 path => 'add',
318 method => 'PUT',
319 description => "Adds the current node to an existing cluster.",
320 parameters => {
321 additionalProperties => 0,
8ef581e4 322 properties => PVE::Corosync::add_corosync_link_properties({
13d44dc5
DM
323 hostname => {
324 type => 'string',
325 description => "Hostname (or IP) of an existing cluster member."
326 },
10c6810e 327 nodeid => get_standard_option('corosync-nodeid'),
13d44dc5
DM
328 votes => {
329 type => 'integer',
330 description => "Number of votes for this node",
331 minimum => 0,
332 optional => 1,
333 },
334 force => {
335 type => 'boolean',
336 description => "Do not throw error if node already exists.",
337 optional => 1,
338 },
10c6810e 339 fingerprint => get_standard_option('fingerprint-sha256', {
03b23bd0
TL
340 optional => 1,
341 }),
342 'use_ssh' => {
343 type => 'boolean',
344 description => "Always use SSH to join, even if peer may do it over API.",
345 optional => 1,
346 },
8ef581e4 347 }),
13d44dc5
DM
348 },
349 returns => { type => 'null' },
c53b111f 350
13d44dc5
DM
351 code => sub {
352 my ($param) = @_;
353
91c25936
FW
354 # avoid "transport endpoint not connected" errors that occur if
355 # restarting pmxcfs while in fuse-mounted /etc/pve
356 die "Navigate out of $basedir before running 'pvecm add', for example by running 'cd'.\n"
357 if getcwd() =~ m!^$basedir(/.*)?$!;
358
13d44dc5 359 my $nodename = PVE::INotify::nodename();
03b23bd0 360 my $host = $param->{hostname};
f566b424 361
83e5e7b7 362 my $worker = sub {
03b23bd0 363
83e5e7b7 364 if (!$param->{use_ssh}) {
f55fe6b3 365 my $password = PVE::PTY::read_password("Please enter superuser (root) password for '$host': ");
03b23bd0 366
83e5e7b7
TL
367 delete $param->{use_ssh};
368 $param->{password} = $password;
03b23bd0 369
83e5e7b7 370 my $local_cluster_lock = "/var/lock/pvecm.lock";
c5204e14 371 PVE::Tools::lock_file($local_cluster_lock, 10, \&PVE::Cluster::Setup::join, $param);
83e5e7b7
TL
372
373 if (my $err = $@) {
efe59a84 374 if (ref($err) eq 'PVE::APIClient::Exception' && defined($err->{code}) && $err->{code} == 501) {
83e5e7b7
TL
375 $err = "Remote side is not able to use API for Cluster join!\n" .
376 "Pass the 'use_ssh' switch or update the remote side.\n";
377 }
378 die $err;
03b23bd0 379 }
83e5e7b7 380 return; # all OK, the API join endpoint successfully set us up
03b23bd0 381 }
03b23bd0 382
83e5e7b7
TL
383 # allow fallback to old ssh only join if wished or needed
384
1e3e951c 385 my $local_ip_address = PVE::Cluster::remote_node_ip($nodename);
8ef581e4 386 my $links = PVE::Corosync::extract_corosync_link_args($param);
1e3e951c 387
8ef581e4 388 PVE::Cluster::Setup::assert_joinable($local_ip_address, $links, $param->{force});
1e3e951c 389
c5204e14
FG
390 PVE::Cluster::Setup::setup_sshd_config();
391 PVE::Cluster::Setup::setup_rootsshconfig();
392 PVE::Cluster::Setup::setup_ssh_keys();
03b23bd0 393
83e5e7b7 394 # make sure known_hosts is on local filesystem
c5204e14 395 PVE::Cluster::Setup::ssh_unmerge_known_hosts();
5a630d8f 396
83e5e7b7 397 my $cmd = ['ssh-copy-id', '-i', '/root/.ssh/id_rsa', "root\@$host"];
6ae43599
TL
398 run_command(
399 $cmd, 'outfunc' => sub {}, 'errfunc' => sub {}, 'errmsg' => "unable to copy ssh ID");
13d44dc5 400
a755ff54
SR
401 $cmd = ['ssh', $host, '-o', 'BatchMode=yes', 'pvecm', 'apiver'];
402 my $remote_apiver = 0;
403 run_command($cmd, 'outfunc' => sub {
404 $remote_apiver = shift;
405 chomp $remote_apiver;
406 }, 'noerr' => 1);
407
498e355e 408 PVE::Cluster::Setup::assert_we_can_join_cluster_version($remote_apiver);
a755ff54 409
6ae43599 410 $cmd = ['ssh', $host, '-o', 'BatchMode=yes', 'pvecm', 'addnode', $nodename, '--force', 1];
13d44dc5 411
83e5e7b7
TL
412 push @$cmd, '--nodeid', $param->{nodeid} if $param->{nodeid};
413 push @$cmd, '--votes', $param->{votes} if defined($param->{votes});
8ef581e4 414
f017b47f
FG
415 my $link_desc = get_standard_option('corosync-link');
416
8ef581e4
SR
417 foreach my $link (keys %$links) {
418 push @$cmd, "--link$link", PVE::JSONSchema::print_property_string(
f017b47f 419 $links->{$link}, $link_desc->{format});
8ef581e4 420 }
83e5e7b7 421
88b4cb13
SR
422 # this will be used as fallback if no links are specified
423 if (!%$links) {
a755ff54
SR
424 push @$cmd, '--link0', $local_ip_address if $remote_apiver == 0;
425 push @$cmd, '--new_node_ip', $local_ip_address if $remote_apiver >= 1;
426
88b4cb13
SR
427 print "No cluster network links passed explicitly, fallback to local node"
428 . " IP '$local_ip_address'\n";
429 }
430
83e5e7b7
TL
431 if (system (@$cmd) != 0) {
432 my $cmdtxt = join (' ', @$cmd);
433 die "unable to add node: command failed ($cmdtxt)\n";
434 }
13d44dc5 435
83e5e7b7
TL
436 my $tmpdir = "$libdir/.pvecm_add.tmp.$$";
437 mkdir $tmpdir;
14d0000a 438
83e5e7b7
TL
439 eval {
440 print "copy corosync auth key\n";
441 $cmd = ['rsync', '--rsh=ssh -l root -o BatchMode=yes', '-lpgoq',
96979975 442 "[$host]:$authfile", "[$host]:$clusterconf", $tmpdir];
13d44dc5 443
83e5e7b7 444 system(@$cmd) == 0 || die "can't rsync data from host '$host'\n";
13d44dc5 445
83e5e7b7
TL
446 my $corosync_conf = PVE::Tools::file_get_contents("$tmpdir/corosync.conf");
447 my $corosync_authkey = PVE::Tools::file_get_contents("$tmpdir/authkey");
13d44dc5 448
1e0c6aff 449 PVE::Cluster::Setup::finish_join($nodename, $corosync_conf, $corosync_authkey);
83e5e7b7
TL
450 };
451 my $err = $@;
13d44dc5 452
83e5e7b7 453 rmtree $tmpdir;
13d44dc5 454
83e5e7b7 455 die $err if $err;
13d44dc5 456 };
13d44dc5 457
83e5e7b7
TL
458 # use a synced worker so we get a nice task log when joining through CLI
459 my $rpcenv = PVE::RPCEnvironment::get();
460 my $authuser = $rpcenv->get_user();
13d44dc5 461
83e5e7b7 462 $rpcenv->fork_worker('clusterjoin', '', $authuser, $worker);
13d44dc5
DM
463
464 return undef;
465 }});
466
467__PACKAGE__->register_method ({
c53b111f 468 name => 'status',
13d44dc5
DM
469 path => 'status',
470 method => 'GET',
471 description => "Displays the local view of the cluster status.",
472 parameters => {
473 additionalProperties => 0,
474 properties => {},
475 },
476 returns => { type => 'null' },
c53b111f 477
13d44dc5
DM
478 code => sub {
479 my ($param) = @_;
480
b6973a89 481 PVE::Corosync::check_conf_exists();
3df092f9
TL
482 my $conf = eval { PVE::Cluster::cfs_read_file("corosync.conf") } // {};
483 warn "$@" if $@;
484 my $totem = PVE::Corosync::totem_config($conf);
485
486 if (scalar(%$totem)) {
487 my $print_info = sub {
488 my ($label, $key, $default) = @_;
489 my $val = $totem->{$key} // $default;
490 printf "%-17s %s\n", "$label:", "$val";
491 };
492
493 printf "Cluster information\n";
494 printf "-------------------\n";
495 $print_info->('Name', 'cluster_name', 'UNKOWN?');
496 $print_info->('Config Version', 'config_version', -1);
497 $print_info->('Transport', 'transport', 'knet');
498 $print_info->('Secure auth', 'secauth', 'off');
499 printf "\n";
500 }
eb51b829 501
28d5105b 502 exec ('corosync-quorumtool', '-siH');
13d44dc5
DM
503 exit (-1); # should not be reached
504 }});
505
506__PACKAGE__->register_method ({
c53b111f 507 name => 'nodes',
13d44dc5
DM
508 path => 'nodes',
509 method => 'GET',
510 description => "Displays the local view of the cluster nodes.",
511 parameters => {
512 additionalProperties => 0,
513 properties => {},
514 },
515 returns => { type => 'null' },
c53b111f 516
13d44dc5
DM
517 code => sub {
518 my ($param) = @_;
519
b6973a89 520 PVE::Corosync::check_conf_exists();
eb51b829 521
28d5105b 522 exec ('corosync-quorumtool', '-l');
13d44dc5
DM
523 exit (-1); # should not be reached
524 }});
525
526__PACKAGE__->register_method ({
c53b111f 527 name => 'expected',
13d44dc5
DM
528 path => 'expected',
529 method => 'PUT',
530 description => "Tells corosync a new value of expected votes.",
531 parameters => {
532 additionalProperties => 0,
533 properties => {
534 expected => {
535 type => 'integer',
536 description => "Expected votes",
537 minimum => 1,
538 },
539 },
540 },
541 returns => { type => 'null' },
c53b111f 542
13d44dc5
DM
543 code => sub {
544 my ($param) = @_;
545
b6973a89 546 PVE::Corosync::check_conf_exists();
eb51b829 547
28d5105b 548 exec ('corosync-quorumtool', '-e', $param->{expected});
13d44dc5 549 exit (-1); # should not be reached
13d44dc5
DM
550 }});
551
13d44dc5 552__PACKAGE__->register_method ({
c53b111f 553 name => 'updatecerts',
13d44dc5
DM
554 path => 'updatecerts',
555 method => 'PUT',
556 description => "Update node certificates (and generate all needed files/directories).",
557 parameters => {
558 additionalProperties => 0,
559 properties => {
560 force => {
80d19645 561 description => "Force generation of new SSL certificate.",
13d44dc5
DM
562 type => 'boolean',
563 optional => 1,
564 },
565 silent => {
566 description => "Ignore errors (i.e. when cluster has no quorum).",
567 type => 'boolean',
568 optional => 1,
569 },
570 },
571 },
572 returns => { type => 'null' },
573 code => sub {
574 my ($param) = @_;
1d428706 575 my ($force_new_cert, $silent) = $param->@{qw(force silent)};
13d44dc5 576
a56d0aa8
TL
577 # pveproxy's ExecStartPre calls this, and as we do IO (on /etc/pve) that can hang
578 # (uninterruptible D state) we could fail the whole service, rendering the API guaranteed
579 # inaccessible. Let's rather fail small(er) as the API could still work without this..
66b0e690 580 my ($_res, $got_timeout) = PVE::Tools::run_fork_with_timeout(30, sub {
9a375348 581 PVE::Cluster::Setup::generate_local_files();
97dc596c
FE
582
583 for (my $i = 0; !PVE::Cluster::check_cfs_quorum(1); $i++) {
f34d4614 584 print "waiting for pmxcfs mount to appear and get quorate...\n" if !$silent && $i % 50 == 0;
97dc596c
FE
585 usleep(100 * 1000);
586 }
587
1d428706 588 PVE::Cluster::Setup::updatecerts_and_ssh($force_new_cert, $silent);
462c16b7 589 PVE::Cluster::prepare_observed_file_basedirs();
50f74e31 590 });
66b0e690
TL
591 if ($got_timeout) {
592 my $msg = "got timeout when trying to ensure cluster certificates and base file"
593 ." hierarchy is set up - no quorum (yet) or hung pmxcfs?\n";
594 die $msg if !$silent;
595 # this might be unexpected for the $silent case, but in our ExecStartPre use case it's
596 # really better than keeping the user completely in the dark, so maybe split/fix params
597 warn $msg;
598 }
13d44dc5
DM
599
600 return undef;
601 }});
602
ac7a8cf1
TL
603__PACKAGE__->register_method ({
604 name => 'mtunnel',
605 path => 'mtunnel',
606 method => 'POST',
607 description => "Used by VM/CT migration - do not use manually.",
608 parameters => {
609 additionalProperties => 0,
610 properties => {
611 get_migration_ip => {
612 type => 'boolean',
613 default => 0,
614 description => 'return the migration IP, if configured',
615 optional => 1,
616 },
617 migration_network => {
618 type => 'string',
619 format => 'CIDR',
620 description => 'the migration network used to detect the local migration IP',
621 optional => 1,
622 },
623 'run-command' => {
624 type => 'boolean',
625 description => 'Run a command with a tcp socket as standard input.'
626 .' The IP address and port are printed via this'
627 ." command's stdandard output first, each on a separate line.",
628 optional => 1,
629 },
630 'extra-args' => PVE::JSONSchema::get_standard_option('extra-args'),
631 },
632 },
633 returns => { type => 'null'},
634 code => sub {
635 my ($param) = @_;
636
637 if (!PVE::Cluster::check_cfs_quorum(1)) {
638 print "no quorum\n";
639 return undef;
640 }
641
ab966729
FG
642 my $get_local_migration_ip = sub {
643 my ($cidr) = @_;
644
645 if (!defined($cidr)) {
646 my $dc_conf = cfs_read_file('datacenter.cfg');
647 $cidr = $dc_conf->{migration}->{network}
648 if defined($dc_conf->{migration}->{network});
649 }
650
651 if (defined($cidr)) {
652 my $ips = PVE::Network::get_local_ip_from_cidr($cidr);
653
654 die "could not get migration ip: no IP address configured on local " .
655 "node for network '$cidr'\n" if scalar(@$ips) == 0;
656
26604116
TL
657 die "could not get migration ip: multiple, different, IP address configured for " .
658 "network '$cidr'\n" if scalar(@$ips) > 1 && grep { @$ips[0] ne $_ } @$ips;
ab966729
FG
659
660 return @$ips[0];
661 }
662
663 return undef;
664 };
665
ac7a8cf1
TL
666 my $network = $param->{migration_network};
667 if ($param->{get_migration_ip}) {
668 die "cannot use --run-command with --get_migration_ip\n"
669 if $param->{'run-command'};
ab966729
FG
670
671 if (my $ip = $get_local_migration_ip->($network)) {
ac7a8cf1
TL
672 print "ip: '$ip'\n";
673 } else {
674 print "no ip\n";
675 }
676 # do not keep tunnel open when asked for migration ip
677 return undef;
678 }
679
680 if ($param->{'run-command'}) {
681 my $cmd = $param->{'extra-args'};
682 die "missing command\n"
683 if !$cmd || !scalar(@$cmd);
684
685 # Get an ip address to listen on, and find a free migration port
686 my ($ip, $family);
687 if (defined($network)) {
ab966729 688 $ip = $get_local_migration_ip->($network)
ac7a8cf1
TL
689 or die "failed to get migration IP address to listen on\n";
690 $family = PVE::Tools::get_host_address_family($ip);
691 } else {
692 my $nodename = PVE::INotify::nodename();
693 ($ip, $family) = PVE::Network::get_ip_from_hostname($nodename, 0);
694 }
695 my $port = PVE::Tools::next_migrate_port($family, $ip);
696
697 PVE::Tools::pipe_socket_to_command($cmd, $ip, $port);
698 return undef;
699 }
700
701 print "tunnel online\n";
702 *STDOUT->flush();
703
704 while (my $line = <STDIN>) {
705 chomp $line;
706 last if $line =~ m/^quit$/;
707 }
708
709 return undef;
710 }});
711
712
13d44dc5 713our $cmddef = {
a755ff54
SR
714 apiver => [ 'PVE::API2::ClusterConfig', 'join_api_version', [], {}, sub {
715 my $apiver = shift;
716 print "$apiver\n";
717 }],
13d44dc5 718 keygen => [ __PACKAGE__, 'keygen', ['filename']],
74e09a93 719 create => [ 'PVE::API2::ClusterConfig', 'create', ['clustername']],
13d44dc5 720 add => [ __PACKAGE__, 'add', ['hostname']],
1d26c202
TL
721 addnode => [ 'PVE::API2::ClusterConfig', 'addnode', ['node']],
722 delnode => [ 'PVE::API2::ClusterConfig', 'delnode', ['node']],
13d44dc5
DM
723 status => [ __PACKAGE__, 'status' ],
724 nodes => [ __PACKAGE__, 'nodes' ],
725 expected => [ __PACKAGE__, 'expected', ['expected']],
726 updatecerts => [ __PACKAGE__, 'updatecerts', []],
ac7a8cf1 727 mtunnel => [ __PACKAGE__, 'mtunnel', ['extra-args']],
63afd01d
OB
728 qdevice => {
729 setup => [ __PACKAGE__, 'setup_qdevice', ['address']],
730 remove => [ __PACKAGE__, 'remove_qdevice', []],
731 }
13d44dc5
DM
732};
733
7341;