]> git.proxmox.com Git - pve-manager.git/blob - PVE/API2/Ceph.pm
fix #1430: ceph init: allow to specify separate cluster network
[pve-manager.git] / PVE / API2 / Ceph.pm
1 package PVE::API2::CephOSD;
2
3 use strict;
4 use warnings;
5
6 use Cwd qw(abs_path);
7 use IO::File;
8
9 use PVE::CephTools;
10 use PVE::Diskmanage;
11 use PVE::Exception qw(raise_param_exc);
12 use PVE::JSONSchema qw(get_standard_option);
13 use PVE::RADOS;
14 use PVE::RESTHandler;
15 use PVE::RPCEnvironment;
16 use PVE::Tools qw(run_command file_set_contents);
17
18 use base qw(PVE::RESTHandler);
19
20 my $get_osd_status = sub {
21 my ($rados, $osdid) = @_;
22
23 my $stat = $rados->mon_command({ prefix => 'osd dump' });
24
25 my $osdlist = $stat->{osds} || [];
26
27 my $flags = $stat->{flags} || undef;
28
29 my $osdstat;
30 foreach my $d (@$osdlist) {
31 $osdstat->{$d->{osd}} = $d if defined($d->{osd});
32 }
33 if (defined($osdid)) {
34 die "no such OSD '$osdid'\n" if !$osdstat->{$osdid};
35 return $osdstat->{$osdid};
36 }
37
38 return wantarray? ($osdstat, $flags):$osdstat;
39 };
40
41 my $get_osd_usage = sub {
42 my ($rados) = @_;
43
44 my $osdlist = $rados->mon_command({ prefix => 'pg dump',
45 dumpcontents => [ 'osds' ]}) || [];
46
47 my $osdstat;
48 foreach my $d (@$osdlist) {
49 $osdstat->{$d->{osd}} = $d if defined($d->{osd});
50 }
51
52 return $osdstat;
53 };
54
55 __PACKAGE__->register_method ({
56 name => 'index',
57 path => '',
58 method => 'GET',
59 description => "Get Ceph osd list/tree.",
60 proxyto => 'node',
61 protected => 1,
62 permissions => {
63 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
64 },
65 parameters => {
66 additionalProperties => 0,
67 properties => {
68 node => get_standard_option('pve-node'),
69 },
70 },
71 # fixme: return a list instead of extjs tree format ?
72 returns => {
73 type => "object",
74 },
75 code => sub {
76 my ($param) = @_;
77
78 PVE::CephTools::check_ceph_inited();
79
80 my $rados = PVE::RADOS->new();
81 my $res = $rados->mon_command({ prefix => 'osd tree' });
82
83 die "no tree nodes found\n" if !($res && $res->{nodes});
84
85 my ($osdhash, $flags) = &$get_osd_status($rados);
86
87 my $usagehash = &$get_osd_usage($rados);
88
89 my $osdmetadata_tmp = $rados->mon_command({ prefix => 'osd metadata' });
90
91 my $osdmetadata = {};
92 foreach my $osd (@$osdmetadata_tmp) {
93 $osdmetadata->{$osd->{id}} = $osd;
94 }
95
96 my $nodes = {};
97 my $newnodes = {};
98 foreach my $e (@{$res->{nodes}}) {
99 $nodes->{$e->{id}} = $e;
100
101 my $new = {
102 id => $e->{id},
103 name => $e->{name},
104 type => $e->{type}
105 };
106
107 foreach my $opt (qw(status crush_weight reweight device_class)) {
108 $new->{$opt} = $e->{$opt} if defined($e->{$opt});
109 }
110
111 if (my $stat = $osdhash->{$e->{id}}) {
112 $new->{in} = $stat->{in} if defined($stat->{in});
113 }
114
115 if (my $stat = $usagehash->{$e->{id}}) {
116 $new->{total_space} = ($stat->{kb} || 1) * 1024;
117 $new->{bytes_used} = ($stat->{kb_used} || 0) * 1024;
118 $new->{percent_used} = ($new->{bytes_used}*100)/$new->{total_space};
119 if (my $d = $stat->{perf_stat}) {
120 $new->{commit_latency_ms} = $d->{commit_latency_ms};
121 $new->{apply_latency_ms} = $d->{apply_latency_ms};
122 }
123 }
124
125 my $osdmd = $osdmetadata->{$e->{id}};
126 if ($e->{type} eq 'osd' && $osdmd) {
127 if ($osdmd->{bluefs}) {
128 $new->{osdtype} = 'bluestore';
129 $new->{blfsdev} = $osdmd->{bluestore_bdev_dev_node};
130 $new->{dbdev} = $osdmd->{bluefs_db_dev_node};
131 $new->{waldev} = $osdmd->{bluefs_wal_dev_node};
132 } else {
133 $new->{osdtype} = 'filestore';
134 }
135 }
136
137 $newnodes->{$e->{id}} = $new;
138 }
139
140 foreach my $e (@{$res->{nodes}}) {
141 my $new = $newnodes->{$e->{id}};
142 if ($e->{children} && scalar(@{$e->{children}})) {
143 $new->{children} = [];
144 $new->{leaf} = 0;
145 foreach my $cid (@{$e->{children}}) {
146 $nodes->{$cid}->{parent} = $e->{id};
147 if ($nodes->{$cid}->{type} eq 'osd' &&
148 $e->{type} eq 'host') {
149 $newnodes->{$cid}->{host} = $e->{name};
150 }
151 push @{$new->{children}}, $newnodes->{$cid};
152 }
153 } else {
154 $new->{leaf} = ($e->{id} >= 0) ? 1 : 0;
155 }
156 }
157
158 my $roots = [];
159 foreach my $e (@{$res->{nodes}}) {
160 if (!$nodes->{$e->{id}}->{parent}) {
161 push @$roots, $newnodes->{$e->{id}};
162 }
163 }
164
165 die "no root node\n" if !@$roots;
166
167 my $data = { root => { leaf => 0, children => $roots } };
168
169 # we want this for the noout flag
170 $data->{flags} = $flags if $flags;
171
172 return $data;
173 }});
174
175 __PACKAGE__->register_method ({
176 name => 'createosd',
177 path => '',
178 method => 'POST',
179 description => "Create OSD",
180 proxyto => 'node',
181 protected => 1,
182 parameters => {
183 additionalProperties => 0,
184 properties => {
185 node => get_standard_option('pve-node'),
186 dev => {
187 description => "Block device name.",
188 type => 'string',
189 },
190 journal_dev => {
191 description => "Block device name for journal (filestore) or block.db (bluestore).",
192 optional => 1,
193 type => 'string',
194 },
195 wal_dev => {
196 description => "Block device name for block.wal (bluestore only).",
197 optional => 1,
198 type => 'string',
199 },
200 fstype => {
201 description => "File system type (filestore only).",
202 type => 'string',
203 enum => ['xfs', 'ext4'],
204 default => 'xfs',
205 optional => 1,
206 },
207 bluestore => {
208 description => "Use bluestore instead of filestore. This is the default.",
209 type => 'boolean',
210 default => 1,
211 optional => 1,
212 },
213 },
214 },
215 returns => { type => 'string' },
216 code => sub {
217 my ($param) = @_;
218
219 my $rpcenv = PVE::RPCEnvironment::get();
220
221 my $authuser = $rpcenv->get_user();
222
223 raise_param_exc({ 'bluestore' => "conflicts with parameter 'fstype'" })
224 if (defined($param->{fstype}) && defined($param->{bluestore}) && $param->{bluestore});
225
226 PVE::CephTools::check_ceph_inited();
227
228 PVE::CephTools::setup_pve_symlinks();
229
230 PVE::CephTools::check_ceph_installed('ceph_osd');
231
232 my $bluestore = $param->{bluestore} // 1;
233
234 my $journal_dev;
235 my $wal_dev;
236
237 if ($param->{journal_dev} && ($param->{journal_dev} ne $param->{dev})) {
238 $journal_dev = PVE::Diskmanage::verify_blockdev_path($param->{journal_dev});
239 }
240
241 if ($param->{wal_dev} &&
242 ($param->{wal_dev} ne $param->{dev}) &&
243 (!$param->{journal_dev} || $param->{wal_dev} ne $param->{journal_dev})) {
244 raise_param_exc({ 'wal_dev' => "can only be set with paramater 'bluestore'"})
245 if !$bluestore;
246 $wal_dev = PVE::Diskmanage::verify_blockdev_path($param->{wal_dev});
247 }
248
249 $param->{dev} = PVE::Diskmanage::verify_blockdev_path($param->{dev});
250
251 my $devname = $param->{dev};
252 $devname =~ s|/dev/||;
253
254 my $disklist = PVE::Diskmanage::get_disks($devname, 1);
255
256 my $diskinfo = $disklist->{$devname};
257 die "unable to get device info for '$devname'\n"
258 if !$diskinfo;
259
260 die "device '$param->{dev}' is in use\n"
261 if $diskinfo->{used};
262
263 my $devpath = $diskinfo->{devpath};
264 my $rados = PVE::RADOS->new();
265 my $monstat = $rados->mon_command({ prefix => 'mon_status' });
266 die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
267
268 my $fsid = $monstat->{monmap}->{fsid};
269 $fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
270
271 my $ceph_bootstrap_osd_keyring = PVE::CephTools::get_config('ceph_bootstrap_osd_keyring');
272
273 if (! -f $ceph_bootstrap_osd_keyring) {
274 my $bindata = $rados->mon_command({ prefix => 'auth get', entity => 'client.bootstrap-osd', format => 'plain' });
275 file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
276 };
277
278 my $worker = sub {
279 my $upid = shift;
280
281 my $fstype = $param->{fstype} || 'xfs';
282
283
284 my $ccname = PVE::CephTools::get_config('ccname');
285
286 my $cmd = ['ceph-disk', 'prepare', '--zap-disk',
287 '--cluster', $ccname, '--cluster-uuid', $fsid ];
288
289 if ($bluestore) {
290 print "create OSD on $devpath (bluestore)\n";
291 push @$cmd, '--bluestore';
292
293 if ($journal_dev) {
294 print "using device '$journal_dev' for block.db\n";
295 push @$cmd, '--block.db', $journal_dev;
296 }
297
298 if ($wal_dev) {
299 print "using device '$wal_dev' for block.wal\n";
300 push @$cmd, '--block.wal', $wal_dev;
301 }
302
303 push @$cmd, $devpath;
304 } else {
305 print "create OSD on $devpath ($fstype)\n";
306 push @$cmd, '--filestore', '--fs-type', $fstype;
307 if ($journal_dev) {
308 print "using device '$journal_dev' for journal\n";
309 push @$cmd, '--journal-dev', $devpath, $journal_dev;
310 } else {
311 push @$cmd, $devpath;
312 }
313 }
314
315
316 run_command($cmd);
317 };
318
319 return $rpcenv->fork_worker('cephcreateosd', $devname, $authuser, $worker);
320 }});
321
322 __PACKAGE__->register_method ({
323 name => 'destroyosd',
324 path => '{osdid}',
325 method => 'DELETE',
326 description => "Destroy OSD",
327 proxyto => 'node',
328 protected => 1,
329 parameters => {
330 additionalProperties => 0,
331 properties => {
332 node => get_standard_option('pve-node'),
333 osdid => {
334 description => 'OSD ID',
335 type => 'integer',
336 },
337 cleanup => {
338 description => "If set, we remove partition table entries.",
339 type => 'boolean',
340 optional => 1,
341 default => 0,
342 },
343 },
344 },
345 returns => { type => 'string' },
346 code => sub {
347 my ($param) = @_;
348
349 my $rpcenv = PVE::RPCEnvironment::get();
350
351 my $authuser = $rpcenv->get_user();
352
353 PVE::CephTools::check_ceph_inited();
354
355 my $osdid = $param->{osdid};
356
357 my $rados = PVE::RADOS->new();
358 my $osdstat = &$get_osd_status($rados, $osdid);
359
360 die "osd is in use (in == 1)\n" if $osdstat->{in};
361 #&$run_ceph_cmd(['osd', 'out', $osdid]);
362
363 die "osd is still runnung (up == 1)\n" if $osdstat->{up};
364
365 my $osdsection = "osd.$osdid";
366
367 my $worker = sub {
368 my $upid = shift;
369
370 # reopen with longer timeout
371 $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
372
373 print "destroy OSD $osdsection\n";
374
375 eval {
376 PVE::CephTools::ceph_service_cmd('stop', $osdsection);
377 PVE::CephTools::ceph_service_cmd('disable', $osdsection);
378 };
379 warn $@ if $@;
380
381 print "Remove $osdsection from the CRUSH map\n";
382 $rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
383
384 print "Remove the $osdsection authentication key.\n";
385 $rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
386
387 print "Remove OSD $osdsection\n";
388 $rados->mon_command({ prefix => "osd rm", ids => [ $osdsection ], format => 'plain' });
389
390 # try to unmount from standard mount point
391 my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
392
393 my $disks_to_wipe = {};
394 my $remove_partition = sub {
395 my ($part) = @_;
396
397 return if !$part || (! -b $part );
398 my $partnum = PVE::Diskmanage::get_partnum($part);
399 my $devpath = PVE::Diskmanage::get_blockdev($part);
400
401 print "remove partition $part (disk '${devpath}', partnum $partnum)\n";
402 eval { run_command(['/sbin/sgdisk', '-d', $partnum, "${devpath}"]); };
403 warn $@ if $@;
404
405 $disks_to_wipe->{$devpath} = 1;
406 };
407
408 my $partitions_to_remove = [];
409
410 if ($param->{cleanup}) {
411 if (my $fd = IO::File->new("/proc/mounts", "r")) {
412 while (defined(my $line = <$fd>)) {
413 my ($dev, $path, $fstype) = split(/\s+/, $line);
414 next if !($dev && $path && $fstype);
415 next if $dev !~ m|^/dev/|;
416 if ($path eq $mountpoint) {
417 my $data_part = abs_path($dev);
418 push @$partitions_to_remove, $data_part;
419 last;
420 }
421 }
422 close($fd);
423 }
424
425 foreach my $path (qw(journal block block.db block.wal)) {
426 my $part = abs_path("$mountpoint/$path");
427 if ($part) {
428 push @$partitions_to_remove, $part;
429 }
430 }
431 }
432
433 print "Unmount OSD $osdsection from $mountpoint\n";
434 eval { run_command(['/bin/umount', $mountpoint]); };
435 if (my $err = $@) {
436 warn $err;
437 } elsif ($param->{cleanup}) {
438 #be aware of the ceph udev rules which can remount.
439 foreach my $part (@$partitions_to_remove) {
440 $remove_partition->($part);
441 }
442 my @wipe_cmd = qw(/bin/dd if=/dev/zero bs=1M count=200 conv=fdatasync);
443 foreach my $devpath (keys %$disks_to_wipe) {
444 print "wipe disk: $devpath\n";
445 eval { run_command([@wipe_cmd, "of=${devpath}"]) };
446 warn $@ if $@;
447 }
448 }
449 };
450
451 return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
452 }});
453
454 __PACKAGE__->register_method ({
455 name => 'in',
456 path => '{osdid}/in',
457 method => 'POST',
458 description => "ceph osd in",
459 proxyto => 'node',
460 protected => 1,
461 permissions => {
462 check => ['perm', '/', [ 'Sys.Modify' ]],
463 },
464 parameters => {
465 additionalProperties => 0,
466 properties => {
467 node => get_standard_option('pve-node'),
468 osdid => {
469 description => 'OSD ID',
470 type => 'integer',
471 },
472 },
473 },
474 returns => { type => "null" },
475 code => sub {
476 my ($param) = @_;
477
478 PVE::CephTools::check_ceph_inited();
479
480 my $osdid = $param->{osdid};
481
482 my $rados = PVE::RADOS->new();
483
484 my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
485
486 my $osdsection = "osd.$osdid";
487
488 $rados->mon_command({ prefix => "osd in", ids => [ $osdsection ], format => 'plain' });
489
490 return undef;
491 }});
492
493 __PACKAGE__->register_method ({
494 name => 'out',
495 path => '{osdid}/out',
496 method => 'POST',
497 description => "ceph osd out",
498 proxyto => 'node',
499 protected => 1,
500 permissions => {
501 check => ['perm', '/', [ 'Sys.Modify' ]],
502 },
503 parameters => {
504 additionalProperties => 0,
505 properties => {
506 node => get_standard_option('pve-node'),
507 osdid => {
508 description => 'OSD ID',
509 type => 'integer',
510 },
511 },
512 },
513 returns => { type => "null" },
514 code => sub {
515 my ($param) = @_;
516
517 PVE::CephTools::check_ceph_inited();
518
519 my $osdid = $param->{osdid};
520
521 my $rados = PVE::RADOS->new();
522
523 my $osdstat = &$get_osd_status($rados, $osdid); # osd exists?
524
525 my $osdsection = "osd.$osdid";
526
527 $rados->mon_command({ prefix => "osd out", ids => [ $osdsection ], format => 'plain' });
528
529 return undef;
530 }});
531
532 package PVE::API2::Ceph;
533
534 use strict;
535 use warnings;
536
537 use File::Path;
538 use Net::IP;
539 use UUID;
540
541 use PVE::CephTools;
542 use PVE::Cluster;
543 use PVE::JSONSchema qw(get_standard_option);
544 use PVE::Network;
545 use PVE::RADOS;
546 use PVE::RESTHandler;
547 use PVE::RPCEnvironment;
548 use PVE::Storage;
549 use PVE::Tools qw(run_command file_get_contents file_set_contents);
550
551 use PVE::API2::Ceph::FS;
552 use PVE::API2::Ceph::MDS;
553 use PVE::API2::Storage::Config;
554
555 use base qw(PVE::RESTHandler);
556
557 my $pve_osd_default_journal_size = 1024*5;
558
559 __PACKAGE__->register_method ({
560 subclass => "PVE::API2::CephOSD",
561 path => 'osd',
562 });
563
564 __PACKAGE__->register_method ({
565 subclass => "PVE::API2::Ceph::MDS",
566 path => 'mds',
567 });
568
569 __PACKAGE__->register_method ({
570 subclass => "PVE::API2::Ceph::FS",
571 path => 'fs',
572 });
573
574 __PACKAGE__->register_method ({
575 name => 'index',
576 path => '',
577 method => 'GET',
578 description => "Directory index.",
579 permissions => { user => 'all' },
580 permissions => {
581 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
582 },
583 parameters => {
584 additionalProperties => 0,
585 properties => {
586 node => get_standard_option('pve-node'),
587 },
588 },
589 returns => {
590 type => 'array',
591 items => {
592 type => "object",
593 properties => {},
594 },
595 links => [ { rel => 'child', href => "{name}" } ],
596 },
597 code => sub {
598 my ($param) = @_;
599
600 my $result = [
601 { name => 'init' },
602 { name => 'mon' },
603 { name => 'osd' },
604 { name => 'pools' },
605 { name => 'fs' },
606 { name => 'mds' },
607 { name => 'stop' },
608 { name => 'start' },
609 { name => 'status' },
610 { name => 'crush' },
611 { name => 'config' },
612 { name => 'log' },
613 { name => 'disks' },
614 { name => 'flags' },
615 { name => 'rules' },
616 ];
617
618 return $result;
619 }});
620
621 __PACKAGE__->register_method ({
622 name => 'disks',
623 path => 'disks',
624 method => 'GET',
625 description => "List local disks.",
626 proxyto => 'node',
627 protected => 1,
628 permissions => {
629 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
630 },
631 parameters => {
632 additionalProperties => 0,
633 properties => {
634 node => get_standard_option('pve-node'),
635 type => {
636 description => "Only list specific types of disks.",
637 type => 'string',
638 enum => ['unused', 'journal_disks'],
639 optional => 1,
640 },
641 },
642 },
643 returns => {
644 type => 'array',
645 items => {
646 type => "object",
647 properties => {
648 dev => { type => 'string' },
649 used => { type => 'string', optional => 1 },
650 gpt => { type => 'boolean' },
651 size => { type => 'integer' },
652 osdid => { type => 'integer' },
653 vendor => { type => 'string', optional => 1 },
654 model => { type => 'string', optional => 1 },
655 serial => { type => 'string', optional => 1 },
656 },
657 },
658 # links => [ { rel => 'child', href => "{}" } ],
659 },
660 code => sub {
661 my ($param) = @_;
662
663 PVE::CephTools::check_ceph_inited();
664
665 my $disks = PVE::Diskmanage::get_disks(undef, 1);
666
667 my $res = [];
668 foreach my $dev (keys %$disks) {
669 my $d = $disks->{$dev};
670 if ($param->{type}) {
671 if ($param->{type} eq 'journal_disks') {
672 next if $d->{osdid} >= 0;
673 next if !$d->{gpt};
674 } elsif ($param->{type} eq 'unused') {
675 next if $d->{used};
676 } else {
677 die "internal error"; # should not happen
678 }
679 }
680
681 $d->{dev} = "/dev/$dev";
682 push @$res, $d;
683 }
684
685 return $res;
686 }});
687
688 __PACKAGE__->register_method ({
689 name => 'config',
690 path => 'config',
691 method => 'GET',
692 permissions => {
693 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
694 },
695 description => "Get Ceph configuration.",
696 parameters => {
697 additionalProperties => 0,
698 properties => {
699 node => get_standard_option('pve-node'),
700 },
701 },
702 returns => { type => 'string' },
703 code => sub {
704 my ($param) = @_;
705
706 PVE::CephTools::check_ceph_inited();
707
708 my $path = PVE::CephTools::get_config('pve_ceph_cfgpath');
709 return file_get_contents($path);
710
711 }});
712
713 my $add_storage = sub {
714 my ($pool, $storeid) = @_;
715
716 my $storage_params = {
717 type => 'rbd',
718 pool => $pool,
719 storage => $storeid,
720 krbd => 0,
721 content => 'rootdir,images',
722 };
723
724 PVE::API2::Storage::Config->create($storage_params);
725 };
726
727 my $get_storages = sub {
728 my ($pool) = @_;
729
730 my $cfg = PVE::Storage::config();
731
732 my $storages = $cfg->{ids};
733 my $res = {};
734 foreach my $storeid (keys %$storages) {
735 my $curr = $storages->{$storeid};
736 $res->{$storeid} = $storages->{$storeid}
737 if $curr->{type} eq 'rbd' && $pool eq $curr->{pool};
738 }
739
740 return $res;
741 };
742
743 __PACKAGE__->register_method ({
744 name => 'listmon',
745 path => 'mon',
746 method => 'GET',
747 description => "Get Ceph monitor list.",
748 proxyto => 'node',
749 protected => 1,
750 permissions => {
751 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
752 },
753 parameters => {
754 additionalProperties => 0,
755 properties => {
756 node => get_standard_option('pve-node'),
757 },
758 },
759 returns => {
760 type => 'array',
761 items => {
762 type => "object",
763 properties => {
764 name => { type => 'string' },
765 addr => { type => 'string' },
766 },
767 },
768 links => [ { rel => 'child', href => "{name}" } ],
769 },
770 code => sub {
771 my ($param) = @_;
772
773 PVE::CephTools::check_ceph_inited();
774
775 my $res = [];
776
777 my $cfg = PVE::CephTools::parse_ceph_config();
778
779 my $monhash = {};
780 foreach my $section (keys %$cfg) {
781 my $d = $cfg->{$section};
782 if ($section =~ m/^mon\.(\S+)$/) {
783 my $monid = $1;
784 if ($d->{'mon addr'} && $d->{'host'}) {
785 $monhash->{$monid} = {
786 addr => $d->{'mon addr'},
787 host => $d->{'host'},
788 name => $monid,
789 }
790 }
791 }
792 }
793
794 eval {
795 my $rados = PVE::RADOS->new();
796 my $monstat = $rados->mon_command({ prefix => 'mon_status' });
797 my $mons = $monstat->{monmap}->{mons};
798 foreach my $d (@$mons) {
799 next if !defined($d->{name});
800 $monhash->{$d->{name}}->{rank} = $d->{rank};
801 $monhash->{$d->{name}}->{addr} = $d->{addr};
802 if (grep { $_ eq $d->{rank} } @{$monstat->{quorum}}) {
803 $monhash->{$d->{name}}->{quorum} = 1;
804 }
805 }
806 };
807 warn $@ if $@;
808
809 return PVE::RESTHandler::hash_to_array($monhash, 'name');
810 }});
811
812 __PACKAGE__->register_method ({
813 name => 'init',
814 path => 'init',
815 method => 'POST',
816 description => "Create initial ceph default configuration and setup symlinks.",
817 proxyto => 'node',
818 protected => 1,
819 permissions => {
820 check => ['perm', '/', [ 'Sys.Modify' ]],
821 },
822 parameters => {
823 additionalProperties => 0,
824 properties => {
825 node => get_standard_option('pve-node'),
826 network => {
827 description => "Use specific network for all ceph related traffic",
828 type => 'string', format => 'CIDR',
829 optional => 1,
830 maxLength => 128,
831 },
832 'cluster-network' => {
833 description => "Declare a separate cluster network, OSDs will route" .
834 "heartbeat, object replication and recovery traffic over it",
835 type => 'string', format => 'CIDR',
836 requires => 'network',
837 optional => 1,
838 maxLength => 128,
839 },
840 size => {
841 description => 'Targeted number of replicas per object',
842 type => 'integer',
843 default => 3,
844 optional => 1,
845 minimum => 1,
846 maximum => 7,
847 },
848 min_size => {
849 description => 'Minimum number of available replicas per object to allow I/O',
850 type => 'integer',
851 default => 2,
852 optional => 1,
853 minimum => 1,
854 maximum => 7,
855 },
856 pg_bits => {
857 description => "Placement group bits, used to specify the " .
858 "default number of placement groups.\n\nNOTE: 'osd pool " .
859 "default pg num' does not work for default pools.",
860 type => 'integer',
861 default => 6,
862 optional => 1,
863 minimum => 6,
864 maximum => 14,
865 },
866 disable_cephx => {
867 description => "Disable cephx authentification.\n\n" .
868 "WARNING: cephx is a security feature protecting against " .
869 "man-in-the-middle attacks. Only consider disabling cephx ".
870 "if your network is private!",
871 type => 'boolean',
872 optional => 1,
873 default => 0,
874 },
875 },
876 },
877 returns => { type => 'null' },
878 code => sub {
879 my ($param) = @_;
880
881 my $version = PVE::CephTools::get_local_version(1);
882
883 if (!$version || $version < 12) {
884 die "Ceph Luminous required - please run 'pveceph install'\n";
885 } else {
886 PVE::CephTools::check_ceph_installed('ceph_bin');
887 }
888
889 # simply load old config if it already exists
890 my $cfg = PVE::CephTools::parse_ceph_config();
891
892 if (!$cfg->{global}) {
893
894 my $fsid;
895 my $uuid;
896
897 UUID::generate($uuid);
898 UUID::unparse($uuid, $fsid);
899
900 my $auth = $param->{disable_cephx} ? 'none' : 'cephx';
901
902 $cfg->{global} = {
903 'fsid' => $fsid,
904 'auth cluster required' => $auth,
905 'auth service required' => $auth,
906 'auth client required' => $auth,
907 'osd journal size' => $pve_osd_default_journal_size,
908 'osd pool default size' => $param->{size} // 3,
909 'osd pool default min size' => $param->{min_size} // 2,
910 'mon allow pool delete' => 'true',
911 };
912
913 # this does not work for default pools
914 #'osd pool default pg num' => $pg_num,
915 #'osd pool default pgp num' => $pg_num,
916 }
917
918 $cfg->{global}->{keyring} = '/etc/pve/priv/$cluster.$name.keyring';
919 $cfg->{osd}->{keyring} = '/var/lib/ceph/osd/ceph-$id/keyring';
920
921 if ($param->{pg_bits}) {
922 $cfg->{global}->{'osd pg bits'} = $param->{pg_bits};
923 $cfg->{global}->{'osd pgp bits'} = $param->{pg_bits};
924 }
925
926 if ($param->{network}) {
927 $cfg->{global}->{'public network'} = $param->{network};
928 $cfg->{global}->{'cluster network'} = $param->{network};
929 }
930
931 if ($param->{'cluster-network'}) {
932 $cfg->{global}->{'cluster network'} = $param->{'cluster-network'};
933 }
934
935 PVE::CephTools::write_ceph_config($cfg);
936
937 PVE::CephTools::setup_pve_symlinks();
938
939 return undef;
940 }});
941
942 my $find_mon_ip = sub {
943 my ($pubnet, $node, $overwrite_ip) = @_;
944
945 if (!$pubnet) {
946 return $overwrite_ip // PVE::Cluster::remote_node_ip($node);
947 }
948
949 my $allowed_ips = PVE::Network::get_local_ip_from_cidr($pubnet);
950 die "No IP configured and up from ceph public network '$pubnet'\n"
951 if scalar(@$allowed_ips) < 1;
952
953 if (!$overwrite_ip) {
954 if (scalar(@$allowed_ips) == 1) {
955 return $allowed_ips->[0];
956 }
957 die "Multiple IPs for ceph public network '$pubnet' detected on $node:\n".
958 join("\n", @$allowed_ips) ."\nuse 'mon-address' to specify one of them.\n";
959 } else {
960 if (grep { $_ eq $overwrite_ip } @$allowed_ips) {
961 return $overwrite_ip;
962 }
963 die "Monitor IP '$overwrite_ip' not in ceph public network '$pubnet'\n"
964 if !PVE::Network::is_ip_in_cidr($overwrite_ip, $pubnet);
965
966 die "Specified monitor IP '$overwrite_ip' not configured or up on $node!\n";
967 }
968 };
969
970 my $create_mgr = sub {
971 my ($rados, $id) = @_;
972
973 my $clustername = PVE::CephTools::get_config('ccname');
974 my $mgrdir = "/var/lib/ceph/mgr/$clustername-$id";
975 my $mgrkeyring = "$mgrdir/keyring";
976 my $mgrname = "mgr.$id";
977
978 die "ceph manager directory '$mgrdir' already exists\n"
979 if -d $mgrdir;
980
981 print "creating manager directory '$mgrdir'\n";
982 mkdir $mgrdir;
983 print "creating keys for '$mgrname'\n";
984 my $output = $rados->mon_command({ prefix => 'auth get-or-create',
985 entity => $mgrname,
986 caps => [
987 mon => 'allow profile mgr',
988 osd => 'allow *',
989 mds => 'allow *',
990 ],
991 format => 'plain'});
992 file_set_contents($mgrkeyring, $output);
993
994 print "setting owner for directory\n";
995 run_command(["chown", 'ceph:ceph', '-R', $mgrdir]);
996
997 print "enabling service 'ceph-mgr\@$id.service'\n";
998 PVE::CephTools::ceph_service_cmd('enable', $mgrname);
999 print "starting service 'ceph-mgr\@$id.service'\n";
1000 PVE::CephTools::ceph_service_cmd('start', $mgrname);
1001 };
1002
1003 my $destroy_mgr = sub {
1004 my ($mgrid) = @_;
1005
1006 my $clustername = PVE::CephTools::get_config('ccname');
1007 my $mgrname = "mgr.$mgrid";
1008 my $mgrdir = "/var/lib/ceph/mgr/$clustername-$mgrid";
1009
1010 die "ceph manager directory '$mgrdir' not found\n"
1011 if ! -d $mgrdir;
1012
1013 print "disabling service 'ceph-mgr\@$mgrid.service'\n";
1014 PVE::CephTools::ceph_service_cmd('disable', $mgrname);
1015 print "stopping service 'ceph-mgr\@$mgrid.service'\n";
1016 PVE::CephTools::ceph_service_cmd('stop', $mgrname);
1017
1018 print "removing manager directory '$mgrdir'\n";
1019 File::Path::remove_tree($mgrdir);
1020 };
1021
1022 __PACKAGE__->register_method ({
1023 name => 'createmon',
1024 path => 'mon',
1025 method => 'POST',
1026 description => "Create Ceph Monitor and Manager",
1027 proxyto => 'node',
1028 protected => 1,
1029 permissions => {
1030 check => ['perm', '/', [ 'Sys.Modify' ]],
1031 },
1032 parameters => {
1033 additionalProperties => 0,
1034 properties => {
1035 node => get_standard_option('pve-node'),
1036 id => {
1037 type => 'string',
1038 optional => 1,
1039 pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
1040 description => "The ID for the monitor, when omitted the same as the nodename",
1041 },
1042 'exclude-manager' => {
1043 type => 'boolean',
1044 optional => 1,
1045 default => 0,
1046 description => "When set, only a monitor will be created.",
1047 },
1048 'mon-address' => {
1049 description => 'Overwrites autodetected monitor IP address. ' .
1050 'Must be in the public network of ceph.',
1051 type => 'string', format => 'ip',
1052 optional => 1,
1053 },
1054 },
1055 },
1056 returns => { type => 'string' },
1057 code => sub {
1058 my ($param) = @_;
1059
1060 PVE::CephTools::check_ceph_installed('ceph_mon');
1061
1062 PVE::CephTools::check_ceph_installed('ceph_mgr')
1063 if (!$param->{'exclude-manager'});
1064
1065 PVE::CephTools::check_ceph_inited();
1066
1067 PVE::CephTools::setup_pve_symlinks();
1068
1069 my $rpcenv = PVE::RPCEnvironment::get();
1070
1071 my $authuser = $rpcenv->get_user();
1072
1073 my $cfg = PVE::CephTools::parse_ceph_config();
1074
1075 my $moncount = 0;
1076
1077 my $monaddrhash = {};
1078
1079 my $systemd_managed = PVE::CephTools::systemd_managed();
1080
1081 foreach my $section (keys %$cfg) {
1082 next if $section eq 'global';
1083 my $d = $cfg->{$section};
1084 if ($section =~ m/^mon\./) {
1085 $moncount++;
1086 if ($d->{'mon addr'}) {
1087 $monaddrhash->{$d->{'mon addr'}} = $section;
1088 }
1089 }
1090 }
1091
1092 my $monid = $param->{id} // $param->{node};
1093
1094 my $monsection = "mon.$monid";
1095 my $pubnet = $cfg->{global}->{'public network'};
1096 my $ip = $find_mon_ip->($pubnet, $param->{node}, $param->{'mon-address'});
1097
1098 my $monaddr = Net::IP::ip_is_ipv6($ip) ? "[$ip]:6789" : "$ip:6789";
1099 my $monname = $param->{node};
1100
1101 die "monitor '$monsection' already exists\n" if $cfg->{$monsection};
1102 die "monitor address '$monaddr' already in use by '$monaddrhash->{$monaddr}'\n"
1103 if $monaddrhash->{$monaddr};
1104
1105 my $worker = sub {
1106 my $upid = shift;
1107
1108 my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
1109
1110 if (! -f $pve_ckeyring_path) {
1111 run_command("ceph-authtool $pve_ckeyring_path --create-keyring " .
1112 "--gen-key -n client.admin");
1113 }
1114
1115 my $pve_mon_key_path = PVE::CephTools::get_config('pve_mon_key_path');
1116 if (! -f $pve_mon_key_path) {
1117 run_command("cp $pve_ckeyring_path $pve_mon_key_path.tmp");
1118 run_command("ceph-authtool $pve_mon_key_path.tmp -n client.admin --set-uid=0 " .
1119 "--cap mds 'allow' " .
1120 "--cap osd 'allow *' " .
1121 "--cap mgr 'allow *' " .
1122 "--cap mon 'allow *'");
1123 run_command("cp $pve_mon_key_path.tmp /etc/ceph/ceph.client.admin.keyring") if $systemd_managed;
1124 run_command("chown ceph:ceph /etc/ceph/ceph.client.admin.keyring") if $systemd_managed;
1125 run_command("ceph-authtool $pve_mon_key_path.tmp --gen-key -n mon. --cap mon 'allow *'");
1126 run_command("mv $pve_mon_key_path.tmp $pve_mon_key_path");
1127 }
1128
1129 my $ccname = PVE::CephTools::get_config('ccname');
1130
1131 my $mondir = "/var/lib/ceph/mon/$ccname-$monid";
1132 -d $mondir && die "monitor filesystem '$mondir' already exist\n";
1133
1134 my $monmap = "/tmp/monmap";
1135
1136 eval {
1137 mkdir $mondir;
1138
1139 run_command("chown ceph:ceph $mondir") if $systemd_managed;
1140
1141 if ($moncount > 0) {
1142 my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
1143 my $mapdata = $rados->mon_command({ prefix => 'mon getmap', format => 'plain' });
1144 file_set_contents($monmap, $mapdata);
1145 } else {
1146 run_command("monmaptool --create --clobber --add $monid $monaddr --print $monmap");
1147 }
1148
1149 run_command("ceph-mon --mkfs -i $monid --monmap $monmap --keyring $pve_mon_key_path");
1150 run_command("chown ceph:ceph -R $mondir") if $systemd_managed;
1151 };
1152 my $err = $@;
1153 unlink $monmap;
1154 if ($err) {
1155 File::Path::remove_tree($mondir);
1156 die $err;
1157 }
1158
1159 $cfg->{$monsection} = {
1160 'host' => $monname,
1161 'mon addr' => $monaddr,
1162 };
1163
1164 PVE::CephTools::write_ceph_config($cfg);
1165
1166 my $create_keys_pid = fork();
1167 if (!defined($create_keys_pid)) {
1168 die "Could not spawn ceph-create-keys to create bootstrap keys\n";
1169 } elsif ($create_keys_pid == 0) {
1170 exit PVE::Tools::run_command(['ceph-create-keys', '-i', $monid]);
1171 } else {
1172 PVE::CephTools::ceph_service_cmd('start', $monsection);
1173
1174 if ($systemd_managed) {
1175 #to ensure we have the correct startup order.
1176 eval { PVE::Tools::run_command(['/bin/systemctl', 'enable', "ceph-mon\@${monid}.service"]); };
1177 warn "Enable ceph-mon\@${monid}.service manually"if $@;
1178 }
1179 waitpid($create_keys_pid, 0);
1180 }
1181
1182 # create manager
1183 if (!$param->{'exclude-manager'}) {
1184 my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
1185 $create_mgr->($rados, $monid);
1186 }
1187 };
1188
1189 return $rpcenv->fork_worker('cephcreatemon', $monsection, $authuser, $worker);
1190 }});
1191
1192 __PACKAGE__->register_method ({
1193 name => 'destroymon',
1194 path => 'mon/{monid}',
1195 method => 'DELETE',
1196 description => "Destroy Ceph Monitor and Manager.",
1197 proxyto => 'node',
1198 protected => 1,
1199 permissions => {
1200 check => ['perm', '/', [ 'Sys.Modify' ]],
1201 },
1202 parameters => {
1203 additionalProperties => 0,
1204 properties => {
1205 node => get_standard_option('pve-node'),
1206 monid => {
1207 description => 'Monitor ID',
1208 type => 'string',
1209 pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
1210 },
1211 'exclude-manager' => {
1212 type => 'boolean',
1213 default => 0,
1214 optional => 1,
1215 description => "When set, removes only the monitor, not the manager"
1216 }
1217 },
1218 },
1219 returns => { type => 'string' },
1220 code => sub {
1221 my ($param) = @_;
1222
1223 my $rpcenv = PVE::RPCEnvironment::get();
1224
1225 my $authuser = $rpcenv->get_user();
1226
1227 PVE::CephTools::check_ceph_inited();
1228
1229 my $cfg = PVE::CephTools::parse_ceph_config();
1230
1231 my $monid = $param->{monid};
1232 my $monsection = "mon.$monid";
1233
1234 my $rados = PVE::RADOS->new();
1235 my $monstat = $rados->mon_command({ prefix => 'mon_status' });
1236 my $monlist = $monstat->{monmap}->{mons};
1237
1238 die "no such monitor id '$monid'\n"
1239 if !defined($cfg->{$monsection});
1240
1241 my $ccname = PVE::CephTools::get_config('ccname');
1242
1243 my $mondir = "/var/lib/ceph/mon/$ccname-$monid";
1244 -d $mondir || die "monitor filesystem '$mondir' does not exist on this node\n";
1245
1246 die "can't remove last monitor\n" if scalar(@$monlist) <= 1;
1247
1248 my $worker = sub {
1249 my $upid = shift;
1250
1251 # reopen with longer timeout
1252 $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
1253
1254 $rados->mon_command({ prefix => "mon remove", name => $monid, format => 'plain' });
1255
1256 eval { PVE::CephTools::ceph_service_cmd('stop', $monsection); };
1257 warn $@ if $@;
1258
1259 delete $cfg->{$monsection};
1260 PVE::CephTools::write_ceph_config($cfg);
1261 File::Path::remove_tree($mondir);
1262
1263 # remove manager
1264 if (!$param->{'exclude-manager'}) {
1265 eval { $destroy_mgr->($monid); };
1266 warn $@ if $@;
1267 }
1268 };
1269
1270 return $rpcenv->fork_worker('cephdestroymon', $monsection, $authuser, $worker);
1271 }});
1272
1273 __PACKAGE__->register_method ({
1274 name => 'createmgr',
1275 path => 'mgr',
1276 method => 'POST',
1277 description => "Create Ceph Manager",
1278 proxyto => 'node',
1279 protected => 1,
1280 permissions => {
1281 check => ['perm', '/', [ 'Sys.Modify' ]],
1282 },
1283 parameters => {
1284 additionalProperties => 0,
1285 properties => {
1286 node => get_standard_option('pve-node'),
1287 id => {
1288 type => 'string',
1289 optional => 1,
1290 pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
1291 description => "The ID for the manager, when omitted the same as the nodename",
1292 },
1293 },
1294 },
1295 returns => { type => 'string' },
1296 code => sub {
1297 my ($param) = @_;
1298
1299 PVE::CephTools::check_ceph_installed('ceph_mgr');
1300
1301 PVE::CephTools::check_ceph_inited();
1302
1303 my $rpcenv = PVE::RPCEnvironment::get();
1304
1305 my $authuser = $rpcenv->get_user();
1306
1307 my $mgrid = $param->{id} // $param->{node};
1308
1309 my $worker = sub {
1310 my $upid = shift;
1311
1312 my $rados = PVE::RADOS->new(timeout => PVE::CephTools::get_config('long_rados_timeout'));
1313
1314 $create_mgr->($rados, $mgrid);
1315 };
1316
1317 return $rpcenv->fork_worker('cephcreatemgr', "mgr.$mgrid", $authuser, $worker);
1318 }});
1319
1320 __PACKAGE__->register_method ({
1321 name => 'destroymgr',
1322 path => 'mgr/{id}',
1323 method => 'DELETE',
1324 description => "Destroy Ceph Manager.",
1325 proxyto => 'node',
1326 protected => 1,
1327 permissions => {
1328 check => ['perm', '/', [ 'Sys.Modify' ]],
1329 },
1330 parameters => {
1331 additionalProperties => 0,
1332 properties => {
1333 node => get_standard_option('pve-node'),
1334 id => {
1335 description => 'The ID of the manager',
1336 type => 'string',
1337 pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
1338 },
1339 },
1340 },
1341 returns => { type => 'string' },
1342 code => sub {
1343 my ($param) = @_;
1344
1345 my $rpcenv = PVE::RPCEnvironment::get();
1346
1347 my $authuser = $rpcenv->get_user();
1348
1349 PVE::CephTools::check_ceph_inited();
1350
1351 my $mgrid = $param->{id};
1352
1353 my $worker = sub {
1354 my $upid = shift;
1355
1356 $destroy_mgr->($mgrid);
1357 };
1358
1359 return $rpcenv->fork_worker('cephdestroymgr', "mgr.$mgrid", $authuser, $worker);
1360 }});
1361
1362 __PACKAGE__->register_method ({
1363 name => 'stop',
1364 path => 'stop',
1365 method => 'POST',
1366 description => "Stop ceph services.",
1367 proxyto => 'node',
1368 protected => 1,
1369 permissions => {
1370 check => ['perm', '/', [ 'Sys.Modify' ]],
1371 },
1372 parameters => {
1373 additionalProperties => 0,
1374 properties => {
1375 node => get_standard_option('pve-node'),
1376 service => {
1377 description => 'Ceph service name.',
1378 type => 'string',
1379 optional => 1,
1380 default => 'ceph.target',
1381 pattern => '(mon|mds|osd|mgr)\.[A-Za-z0-9\-]{1,32}',
1382 },
1383 },
1384 },
1385 returns => { type => 'string' },
1386 code => sub {
1387 my ($param) = @_;
1388
1389 my $rpcenv = PVE::RPCEnvironment::get();
1390
1391 my $authuser = $rpcenv->get_user();
1392
1393 PVE::CephTools::check_ceph_inited();
1394
1395 my $cfg = PVE::CephTools::parse_ceph_config();
1396 scalar(keys %$cfg) || die "no configuration\n";
1397
1398 my $worker = sub {
1399 my $upid = shift;
1400
1401 my $cmd = ['stop'];
1402 if ($param->{service}) {
1403 push @$cmd, $param->{service};
1404 }
1405
1406 PVE::CephTools::ceph_service_cmd(@$cmd);
1407 };
1408
1409 return $rpcenv->fork_worker('srvstop', $param->{service} || 'ceph',
1410 $authuser, $worker);
1411 }});
1412
1413 __PACKAGE__->register_method ({
1414 name => 'start',
1415 path => 'start',
1416 method => 'POST',
1417 description => "Start ceph services.",
1418 proxyto => 'node',
1419 protected => 1,
1420 permissions => {
1421 check => ['perm', '/', [ 'Sys.Modify' ]],
1422 },
1423 parameters => {
1424 additionalProperties => 0,
1425 properties => {
1426 node => get_standard_option('pve-node'),
1427 service => {
1428 description => 'Ceph service name.',
1429 type => 'string',
1430 optional => 1,
1431 default => 'ceph.target',
1432 pattern => '(mon|mds|osd|mgr)\.[A-Za-z0-9\-]{1,32}',
1433 },
1434 },
1435 },
1436 returns => { type => 'string' },
1437 code => sub {
1438 my ($param) = @_;
1439
1440 my $rpcenv = PVE::RPCEnvironment::get();
1441
1442 my $authuser = $rpcenv->get_user();
1443
1444 PVE::CephTools::check_ceph_inited();
1445
1446 my $cfg = PVE::CephTools::parse_ceph_config();
1447 scalar(keys %$cfg) || die "no configuration\n";
1448
1449 my $worker = sub {
1450 my $upid = shift;
1451
1452 my $cmd = ['start'];
1453 if ($param->{service}) {
1454 push @$cmd, $param->{service};
1455 }
1456
1457 PVE::CephTools::ceph_service_cmd(@$cmd);
1458 };
1459
1460 return $rpcenv->fork_worker('srvstart', $param->{service} || 'ceph',
1461 $authuser, $worker);
1462 }});
1463
1464 __PACKAGE__->register_method ({
1465 name => 'restart',
1466 path => 'restart',
1467 method => 'POST',
1468 description => "Restart ceph services.",
1469 proxyto => 'node',
1470 protected => 1,
1471 permissions => {
1472 check => ['perm', '/', [ 'Sys.Modify' ]],
1473 },
1474 parameters => {
1475 additionalProperties => 0,
1476 properties => {
1477 node => get_standard_option('pve-node'),
1478 service => {
1479 description => 'Ceph service name.',
1480 type => 'string',
1481 optional => 1,
1482 default => 'ceph.target',
1483 pattern => '(mon|mds|osd|mgr)\.[A-Za-z0-9\-]{1,32}',
1484 },
1485 },
1486 },
1487 returns => { type => 'string' },
1488 code => sub {
1489 my ($param) = @_;
1490
1491 my $rpcenv = PVE::RPCEnvironment::get();
1492
1493 my $authuser = $rpcenv->get_user();
1494
1495 PVE::CephTools::check_ceph_inited();
1496
1497 my $cfg = PVE::CephTools::parse_ceph_config();
1498 scalar(keys %$cfg) || die "no configuration\n";
1499
1500 my $worker = sub {
1501 my $upid = shift;
1502
1503 my $cmd = ['restart'];
1504 if ($param->{service}) {
1505 push @$cmd, $param->{service};
1506 }
1507
1508 PVE::CephTools::ceph_service_cmd(@$cmd);
1509 };
1510
1511 return $rpcenv->fork_worker('srvrestart', $param->{service} || 'ceph',
1512 $authuser, $worker);
1513 }});
1514
1515 __PACKAGE__->register_method ({
1516 name => 'status',
1517 path => 'status',
1518 method => 'GET',
1519 description => "Get ceph status.",
1520 proxyto => 'node',
1521 protected => 1,
1522 permissions => {
1523 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
1524 },
1525 parameters => {
1526 additionalProperties => 0,
1527 properties => {
1528 node => get_standard_option('pve-node'),
1529 },
1530 },
1531 returns => { type => 'object' },
1532 code => sub {
1533 my ($param) = @_;
1534
1535 PVE::CephTools::check_ceph_enabled();
1536
1537 my $rados = PVE::RADOS->new();
1538 my $status = $rados->mon_command({ prefix => 'status' });
1539 $status->{health} = $rados->mon_command({ prefix => 'health', detail => 'detail' });
1540 return $status;
1541 }});
1542
1543 __PACKAGE__->register_method ({
1544 name => 'lspools',
1545 path => 'pools',
1546 method => 'GET',
1547 description => "List all pools.",
1548 proxyto => 'node',
1549 protected => 1,
1550 permissions => {
1551 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
1552 },
1553 parameters => {
1554 additionalProperties => 0,
1555 properties => {
1556 node => get_standard_option('pve-node'),
1557 },
1558 },
1559 returns => {
1560 type => 'array',
1561 items => {
1562 type => "object",
1563 properties => {
1564 pool => { type => 'integer' },
1565 pool_name => { type => 'string' },
1566 size => { type => 'integer' },
1567 },
1568 },
1569 links => [ { rel => 'child', href => "{pool_name}" } ],
1570 },
1571 code => sub {
1572 my ($param) = @_;
1573
1574 PVE::CephTools::check_ceph_inited();
1575
1576 my $rados = PVE::RADOS->new();
1577
1578 my $stats = {};
1579 my $res = $rados->mon_command({ prefix => 'df' });
1580
1581 foreach my $d (@{$res->{pools}}) {
1582 next if !$d->{stats};
1583 next if !defined($d->{id});
1584 $stats->{$d->{id}} = $d->{stats};
1585 }
1586
1587 $res = $rados->mon_command({ prefix => 'osd dump' });
1588 my $rulestmp = $rados->mon_command({ prefix => 'osd crush rule dump'});
1589
1590 my $rules = {};
1591 for my $rule (@$rulestmp) {
1592 $rules->{$rule->{rule_id}} = $rule->{rule_name};
1593 }
1594
1595 my $data = [];
1596 foreach my $e (@{$res->{pools}}) {
1597 my $d = {};
1598 foreach my $attr (qw(pool pool_name size min_size pg_num crush_rule)) {
1599 $d->{$attr} = $e->{$attr} if defined($e->{$attr});
1600 }
1601
1602 if (defined($d->{crush_rule}) && defined($rules->{$d->{crush_rule}})) {
1603 $d->{crush_rule_name} = $rules->{$d->{crush_rule}};
1604 }
1605
1606 if (my $s = $stats->{$d->{pool}}) {
1607 $d->{bytes_used} = $s->{bytes_used};
1608 $d->{percent_used} = $s->{percent_used};
1609 }
1610 push @$data, $d;
1611 }
1612
1613
1614 return $data;
1615 }});
1616
1617 __PACKAGE__->register_method ({
1618 name => 'createpool',
1619 path => 'pools',
1620 method => 'POST',
1621 description => "Create POOL",
1622 proxyto => 'node',
1623 protected => 1,
1624 permissions => {
1625 check => ['perm', '/', [ 'Sys.Modify' ]],
1626 },
1627 parameters => {
1628 additionalProperties => 0,
1629 properties => {
1630 node => get_standard_option('pve-node'),
1631 name => {
1632 description => "The name of the pool. It must be unique.",
1633 type => 'string',
1634 },
1635 size => {
1636 description => 'Number of replicas per object',
1637 type => 'integer',
1638 default => 3,
1639 optional => 1,
1640 minimum => 1,
1641 maximum => 7,
1642 },
1643 min_size => {
1644 description => 'Minimum number of replicas per object',
1645 type => 'integer',
1646 default => 2,
1647 optional => 1,
1648 minimum => 1,
1649 maximum => 7,
1650 },
1651 pg_num => {
1652 description => "Number of placement groups.",
1653 type => 'integer',
1654 default => 64,
1655 optional => 1,
1656 minimum => 8,
1657 maximum => 32768,
1658 },
1659 crush_rule => {
1660 description => "The rule to use for mapping object placement in the cluster.",
1661 type => 'string',
1662 optional => 1,
1663 },
1664 application => {
1665 description => "The application of the pool, 'rbd' by default.",
1666 type => 'string',
1667 enum => ['rbd', 'cephfs', 'rgw'],
1668 optional => 1,
1669 },
1670 add_storages => {
1671 description => "Configure VM and CT storage using the new pool.",
1672 type => 'boolean',
1673 optional => 1,
1674 },
1675 },
1676 },
1677 returns => { type => 'string' },
1678 code => sub {
1679 my ($param) = @_;
1680
1681 PVE::Cluster::check_cfs_quorum();
1682 PVE::CephTools::check_ceph_inited();
1683
1684 my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
1685
1686 die "not fully configured - missing '$pve_ckeyring_path'\n"
1687 if ! -f $pve_ckeyring_path;
1688
1689 my $pool = $param->{name};
1690 my $rpcenv = PVE::RPCEnvironment::get();
1691 my $user = $rpcenv->get_user();
1692
1693 if ($param->{add_storages}) {
1694 $rpcenv->check($user, '/storage', ['Datastore.Allocate']);
1695 die "pool name contains characters which are illegal for storage naming\n"
1696 if !PVE::JSONSchema::parse_storage_id($pool);
1697 }
1698
1699 my $pg_num = $param->{pg_num} || 64;
1700 my $size = $param->{size} || 3;
1701 my $min_size = $param->{min_size} || 2;
1702 my $application = $param->{application} // 'rbd';
1703
1704 my $worker = sub {
1705
1706 PVE::CephTools::create_pool($pool, $param);
1707
1708 if ($param->{add_storages}) {
1709 my $err;
1710 eval { $add_storage->($pool, "${pool}"); };
1711 if ($@) {
1712 warn "failed to add storage: $@";
1713 $err = 1;
1714 }
1715 die "adding storage for pool '$pool' failed, check log and add manually!\n"
1716 if $err;
1717 }
1718 };
1719
1720 return $rpcenv->fork_worker('cephcreatepool', $pool, $user, $worker);
1721 }});
1722
1723 __PACKAGE__->register_method ({
1724 name => 'get_flags',
1725 path => 'flags',
1726 method => 'GET',
1727 description => "get all set ceph flags",
1728 proxyto => 'node',
1729 protected => 1,
1730 permissions => {
1731 check => ['perm', '/', [ 'Sys.Audit' ]],
1732 },
1733 parameters => {
1734 additionalProperties => 0,
1735 properties => {
1736 node => get_standard_option('pve-node'),
1737 },
1738 },
1739 returns => { type => 'string' },
1740 code => sub {
1741 my ($param) = @_;
1742
1743 PVE::CephTools::check_ceph_inited();
1744
1745 my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
1746
1747 die "not fully configured - missing '$pve_ckeyring_path'\n"
1748 if ! -f $pve_ckeyring_path;
1749
1750 my $rados = PVE::RADOS->new();
1751
1752 my $stat = $rados->mon_command({ prefix => 'osd dump' });
1753
1754 return $stat->{flags} // '';
1755 }});
1756
1757 __PACKAGE__->register_method ({
1758 name => 'set_flag',
1759 path => 'flags/{flag}',
1760 method => 'POST',
1761 description => "Set a ceph flag",
1762 proxyto => 'node',
1763 protected => 1,
1764 permissions => {
1765 check => ['perm', '/', [ 'Sys.Modify' ]],
1766 },
1767 parameters => {
1768 additionalProperties => 0,
1769 properties => {
1770 node => get_standard_option('pve-node'),
1771 flag => {
1772 description => 'The ceph flag to set/unset',
1773 type => 'string',
1774 enum => [ 'full', 'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'notieragent'],
1775 },
1776 },
1777 },
1778 returns => { type => 'null' },
1779 code => sub {
1780 my ($param) = @_;
1781
1782 PVE::CephTools::check_ceph_inited();
1783
1784 my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
1785
1786 die "not fully configured - missing '$pve_ckeyring_path'\n"
1787 if ! -f $pve_ckeyring_path;
1788
1789 my $set = $param->{set} // !$param->{unset};
1790 my $rados = PVE::RADOS->new();
1791
1792 $rados->mon_command({
1793 prefix => "osd set",
1794 key => $param->{flag},
1795 });
1796
1797 return undef;
1798 }});
1799
1800 __PACKAGE__->register_method ({
1801 name => 'unset_flag',
1802 path => 'flags/{flag}',
1803 method => 'DELETE',
1804 description => "Unset a ceph flag",
1805 proxyto => 'node',
1806 protected => 1,
1807 permissions => {
1808 check => ['perm', '/', [ 'Sys.Modify' ]],
1809 },
1810 parameters => {
1811 additionalProperties => 0,
1812 properties => {
1813 node => get_standard_option('pve-node'),
1814 flag => {
1815 description => 'The ceph flag to set/unset',
1816 type => 'string',
1817 enum => [ 'full', 'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'notieragent'],
1818 },
1819 },
1820 },
1821 returns => { type => 'null' },
1822 code => sub {
1823 my ($param) = @_;
1824
1825 PVE::CephTools::check_ceph_inited();
1826
1827 my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
1828
1829 die "not fully configured - missing '$pve_ckeyring_path'\n"
1830 if ! -f $pve_ckeyring_path;
1831
1832 my $set = $param->{set} // !$param->{unset};
1833 my $rados = PVE::RADOS->new();
1834
1835 $rados->mon_command({
1836 prefix => "osd unset",
1837 key => $param->{flag},
1838 });
1839
1840 return undef;
1841 }});
1842
1843 __PACKAGE__->register_method ({
1844 name => 'destroypool',
1845 path => 'pools/{name}',
1846 method => 'DELETE',
1847 description => "Destroy pool",
1848 proxyto => 'node',
1849 protected => 1,
1850 permissions => {
1851 check => ['perm', '/', [ 'Sys.Modify' ]],
1852 },
1853 parameters => {
1854 additionalProperties => 0,
1855 properties => {
1856 node => get_standard_option('pve-node'),
1857 name => {
1858 description => "The name of the pool. It must be unique.",
1859 type => 'string',
1860 },
1861 force => {
1862 description => "If true, destroys pool even if in use",
1863 type => 'boolean',
1864 optional => 1,
1865 default => 0,
1866 },
1867 remove_storages => {
1868 description => "Remove all pveceph-managed storages configured for this pool",
1869 type => 'boolean',
1870 optional => 1,
1871 default => 0,
1872 },
1873 },
1874 },
1875 returns => { type => 'string' },
1876 code => sub {
1877 my ($param) = @_;
1878
1879 PVE::CephTools::check_ceph_inited();
1880
1881 my $rpcenv = PVE::RPCEnvironment::get();
1882 my $user = $rpcenv->get_user();
1883 $rpcenv->check($user, '/storage', ['Datastore.Allocate'])
1884 if $param->{remove_storages};
1885
1886 my $pool = $param->{name};
1887
1888 my $worker = sub {
1889 my $storages = $get_storages->($pool);
1890
1891 # if not forced, destroy ceph pool only when no
1892 # vm disks are on it anymore
1893 if (!$param->{force}) {
1894 my $storagecfg = PVE::Storage::config();
1895 foreach my $storeid (keys %$storages) {
1896 my $storage = $storages->{$storeid};
1897
1898 # check if any vm disks are on the pool
1899 print "checking storage '$storeid' for RBD images..\n";
1900 my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
1901 die "ceph pool '$pool' still in use by storage '$storeid'\n"
1902 if @{$res->{$storeid}} != 0;
1903 }
1904 }
1905
1906 PVE::CephTools::destroy_pool($pool);
1907
1908 if ($param->{remove_storages}) {
1909 my $err;
1910 foreach my $storeid (keys %$storages) {
1911 # skip external clusters, not managed by pveceph
1912 next if $storages->{$storeid}->{monhost};
1913 eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
1914 if ($@) {
1915 warn "failed to remove storage '$storeid': $@\n";
1916 $err = 1;
1917 }
1918 }
1919 die "failed to remove (some) storages - check log and remove manually!\n"
1920 if $err;
1921 }
1922 };
1923 return $rpcenv->fork_worker('cephdestroypool', $pool, $user, $worker);
1924 }});
1925
1926
1927 __PACKAGE__->register_method ({
1928 name => 'crush',
1929 path => 'crush',
1930 method => 'GET',
1931 description => "Get OSD crush map",
1932 proxyto => 'node',
1933 protected => 1,
1934 permissions => {
1935 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
1936 },
1937 parameters => {
1938 additionalProperties => 0,
1939 properties => {
1940 node => get_standard_option('pve-node'),
1941 },
1942 },
1943 returns => { type => 'string' },
1944 code => sub {
1945 my ($param) = @_;
1946
1947 PVE::CephTools::check_ceph_inited();
1948
1949 # this produces JSON (difficult to read for the user)
1950 # my $txt = &$run_ceph_cmd_text(['osd', 'crush', 'dump'], quiet => 1);
1951
1952 my $txt = '';
1953
1954 my $mapfile = "/var/tmp/ceph-crush.map.$$";
1955 my $mapdata = "/var/tmp/ceph-crush.txt.$$";
1956
1957 my $rados = PVE::RADOS->new();
1958
1959 eval {
1960 my $bindata = $rados->mon_command({ prefix => 'osd getcrushmap', format => 'plain' });
1961 file_set_contents($mapfile, $bindata);
1962 run_command(['crushtool', '-d', $mapfile, '-o', $mapdata]);
1963 $txt = file_get_contents($mapdata);
1964 };
1965 my $err = $@;
1966
1967 unlink $mapfile;
1968 unlink $mapdata;
1969
1970 die $err if $err;
1971
1972 return $txt;
1973 }});
1974
1975 __PACKAGE__->register_method({
1976 name => 'log',
1977 path => 'log',
1978 method => 'GET',
1979 description => "Read ceph log",
1980 proxyto => 'node',
1981 permissions => {
1982 check => ['perm', '/nodes/{node}', [ 'Sys.Syslog' ]],
1983 },
1984 protected => 1,
1985 parameters => {
1986 additionalProperties => 0,
1987 properties => {
1988 node => get_standard_option('pve-node'),
1989 start => {
1990 type => 'integer',
1991 minimum => 0,
1992 optional => 1,
1993 },
1994 limit => {
1995 type => 'integer',
1996 minimum => 0,
1997 optional => 1,
1998 },
1999 },
2000 },
2001 returns => {
2002 type => 'array',
2003 items => {
2004 type => "object",
2005 properties => {
2006 n => {
2007 description=> "Line number",
2008 type=> 'integer',
2009 },
2010 t => {
2011 description=> "Line text",
2012 type => 'string',
2013 }
2014 }
2015 }
2016 },
2017 code => sub {
2018 my ($param) = @_;
2019
2020 my $rpcenv = PVE::RPCEnvironment::get();
2021 my $user = $rpcenv->get_user();
2022 my $node = $param->{node};
2023
2024 my $logfile = "/var/log/ceph/ceph.log";
2025 my ($count, $lines) = PVE::Tools::dump_logfile($logfile, $param->{start}, $param->{limit});
2026
2027 $rpcenv->set_result_attrib('total', $count);
2028
2029 return $lines;
2030 }});
2031
2032 __PACKAGE__->register_method ({
2033 name => 'rules',
2034 path => 'rules',
2035 method => 'GET',
2036 description => "List ceph rules.",
2037 proxyto => 'node',
2038 protected => 1,
2039 permissions => {
2040 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
2041 },
2042 parameters => {
2043 additionalProperties => 0,
2044 properties => {
2045 node => get_standard_option('pve-node'),
2046 },
2047 },
2048 returns => {
2049 type => 'array',
2050 items => {
2051 type => "object",
2052 properties => {},
2053 },
2054 links => [ { rel => 'child', href => "{name}" } ],
2055 },
2056 code => sub {
2057 my ($param) = @_;
2058
2059 PVE::CephTools::check_ceph_inited();
2060
2061 my $rados = PVE::RADOS->new();
2062
2063 my $rules = $rados->mon_command({ prefix => 'osd crush rule ls' });
2064
2065 my $res = [];
2066
2067 foreach my $rule (@$rules) {
2068 push @$res, { name => $rule };
2069 }
2070
2071 return $res;
2072 }});