]> git.proxmox.com Git - pve-manager.git/blob - PVE/API2/Ceph/OSD.pm
93433b3a2bc06a87d98f986dfa1ed751432eaadb
[pve-manager.git] / PVE / API2 / Ceph / OSD.pm
1 package PVE::API2::Ceph::OSD;
2
3 use strict;
4 use warnings;
5
6 use Cwd qw(abs_path);
7 use IO::File;
8 use UUID;
9
10 use PVE::Ceph::Tools;
11 use PVE::Ceph::Services;
12 use PVE::CephConfig;
13 use PVE::Cluster qw(cfs_read_file cfs_write_file);
14 use PVE::Diskmanage;
15 use PVE::Storage::LVMPlugin;
16 use PVE::Exception qw(raise_param_exc);
17 use PVE::JSONSchema qw(get_standard_option);
18 use PVE::INotify;
19 use PVE::RADOS;
20 use PVE::RESTHandler;
21 use PVE::RPCEnvironment;
22 use PVE::Tools qw(run_command file_set_contents);
23 use PVE::ProcFSTools;
24 use PVE::Network;
25
26 use base qw(PVE::RESTHandler);
27
28 my $nodename = PVE::INotify::nodename();
29
30 my $get_osd_status = sub {
31 my ($rados, $osdid) = @_;
32
33 my $stat = $rados->mon_command({ prefix => 'osd dump' });
34
35 my $osdlist = $stat->{osds} || [];
36
37 my $flags = $stat->{flags} || undef;
38
39 my $osdstat;
40 foreach my $d (@$osdlist) {
41 $osdstat->{$d->{osd}} = $d if defined($d->{osd});
42 }
43 if (defined($osdid)) {
44 die "no such OSD '$osdid'\n" if !$osdstat->{$osdid};
45 return $osdstat->{$osdid};
46 }
47
48 return wantarray ? ($osdstat, $flags) : $osdstat;
49 };
50
51 my $get_osd_usage = sub {
52 my ($rados) = @_;
53
54 my $osdlist = $rados->mon_command({ prefix => 'pg dump', dumpcontents => [ 'osds' ]});
55 if (!($osdlist && ref($osdlist))) {
56 warn "got unknown result format for 'pg dump osds' command\n";
57 return [];
58 }
59
60 if (ref($osdlist) eq "HASH") { # since nautilus
61 $osdlist = $osdlist->{osd_stats};
62 }
63
64 my $osdstat = {};
65 for my $d (@$osdlist) {
66 $osdstat->{$d->{osd}} = $d if defined($d->{osd});
67 }
68
69 return $osdstat;
70 };
71
72 __PACKAGE__->register_method ({
73 name => 'index',
74 path => '',
75 method => 'GET',
76 description => "Get Ceph osd list/tree.",
77 proxyto => 'node',
78 protected => 1,
79 permissions => {
80 check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
81 },
82 parameters => {
83 additionalProperties => 0,
84 properties => {
85 node => get_standard_option('pve-node'),
86 },
87 },
88 # fixme: return a list instead of extjs tree format ?
89 returns => {
90 type => "object",
91 },
92 code => sub {
93 my ($param) = @_;
94
95 PVE::Ceph::Tools::check_ceph_inited();
96
97 my $rados = PVE::RADOS->new();
98 my $res = $rados->mon_command({ prefix => 'osd tree' });
99
100 die "no tree nodes found\n" if !($res && $res->{nodes});
101
102 my ($osdhash, $flags) = $get_osd_status->($rados);
103
104 my $osd_usage = $get_osd_usage->($rados);
105
106 my $osdmetadata_res = $rados->mon_command({ prefix => 'osd metadata' });
107 my $osdmetadata = { map { $_->{id} => $_ } @$osdmetadata_res };
108
109 my $hostversions = PVE::Ceph::Services::get_ceph_versions();
110
111 my $nodes = {};
112 my $newnodes = {};
113 foreach my $e (@{$res->{nodes}}) {
114 my ($id, $name) = $e->@{qw(id name)};
115
116 $nodes->{$id} = $e;
117
118 my $new = {
119 id => $id,
120 name => $name,
121 type => $e->{type}
122 };
123
124 foreach my $opt (qw(status crush_weight reweight device_class)) {
125 $new->{$opt} = $e->{$opt} if defined($e->{$opt});
126 }
127
128 if (my $stat = $osdhash->{$id}) {
129 $new->{in} = $stat->{in} if defined($stat->{in});
130 }
131
132 if (my $stat = $osd_usage->{$id}) {
133 $new->{total_space} = ($stat->{kb} || 1) * 1024;
134 $new->{bytes_used} = ($stat->{kb_used} || 0) * 1024;
135 $new->{percent_used} = ($new->{bytes_used}*100)/$new->{total_space};
136 if (my $d = $stat->{perf_stat}) {
137 $new->{commit_latency_ms} = $d->{commit_latency_ms};
138 $new->{apply_latency_ms} = $d->{apply_latency_ms};
139 }
140 }
141
142 my $osdmd = $osdmetadata->{$id};
143 if ($e->{type} eq 'osd' && $osdmd) {
144 if ($osdmd->{bluefs}) {
145 $new->{osdtype} = 'bluestore';
146 $new->{blfsdev} = $osdmd->{bluestore_bdev_dev_node};
147 $new->{dbdev} = $osdmd->{bluefs_db_dev_node};
148 $new->{waldev} = $osdmd->{bluefs_wal_dev_node};
149 } else {
150 $new->{osdtype} = 'filestore';
151 }
152 for my $field (qw(ceph_version ceph_version_short)) {
153 $new->{$field} = $osdmd->{$field} if $osdmd->{$field};
154 }
155 }
156
157 $newnodes->{$id} = $new;
158 }
159
160 foreach my $e (@{$res->{nodes}}) {
161 my ($id, $name) = $e->@{qw(id name)};
162 my $new = $newnodes->{$id};
163
164 if ($e->{children} && scalar(@{$e->{children}})) {
165 $new->{children} = [];
166 $new->{leaf} = 0;
167 foreach my $cid (@{$e->{children}}) {
168 $nodes->{$cid}->{parent} = $id;
169 if ($nodes->{$cid}->{type} eq 'osd' && $e->{type} eq 'host') {
170 $newnodes->{$cid}->{host} = $name;
171 }
172 push @{$new->{children}}, $newnodes->{$cid};
173 }
174 } else {
175 $new->{leaf} = ($id >= 0) ? 1 : 0;
176 }
177
178 if ($name && $e->{type} eq 'host') {
179 $new->{version} = $hostversions->{$name}->{version}->{str};
180 }
181 }
182
183 my $realroots = [];
184 foreach my $e (@{$res->{nodes}}) {
185 my $id = $e->{id};
186 if (!$nodes->{$id}->{parent}) {
187 push @$realroots, $newnodes->{$id};
188 }
189 }
190
191 die "no root node\n" if scalar(@$realroots) < 1;
192
193 my $data = {
194 root => {
195 leaf => 0,
196 children => $realroots
197 },
198 };
199
200 $data->{flags} = $flags if $flags; # we want this for the noout flag
201
202 return $data;
203 }});
204
205 __PACKAGE__->register_method ({
206 name => 'createosd',
207 path => '',
208 method => 'POST',
209 description => "Create OSD",
210 proxyto => 'node',
211 protected => 1,
212 parameters => {
213 additionalProperties => 0,
214 properties => {
215 node => get_standard_option('pve-node'),
216 dev => {
217 description => "Block device name.",
218 type => 'string',
219 },
220 db_dev => {
221 description => "Block device name for block.db.",
222 optional => 1,
223 type => 'string',
224 },
225 db_dev_size => {
226 description => "Size in GiB for block.db.",
227 verbose_description => "If a block.db is requested but the size is not given, ".
228 "will be automatically selected by: bluestore_block_db_size from the ".
229 "ceph database (osd or global section) or config (osd or global section)".
230 "in that order. If this is not available, it will be sized 10% of the size ".
231 "of the OSD device. Fails if the available size is not enough.",
232 optional => 1,
233 type => 'number',
234 default => 'bluestore_block_db_size or 10% of OSD size',
235 requires => 'db_dev',
236 minimum => 1.0,
237 },
238 wal_dev => {
239 description => "Block device name for block.wal.",
240 optional => 1,
241 type => 'string',
242 },
243 wal_dev_size => {
244 description => "Size in GiB for block.wal.",
245 verbose_description => "If a block.wal is requested but the size is not given, ".
246 "will be automatically selected by: bluestore_block_wal_size from the ".
247 "ceph database (osd or global section) or config (osd or global section)".
248 "in that order. If this is not available, it will be sized 1% of the size ".
249 "of the OSD device. Fails if the available size is not enough.",
250 optional => 1,
251 minimum => 0.5,
252 default => 'bluestore_block_wal_size or 1% of OSD size',
253 requires => 'wal_dev',
254 type => 'number',
255 },
256 encrypted => {
257 type => 'boolean',
258 optional => 1,
259 default => 0,
260 description => "Enables encryption of the OSD."
261 },
262 'crush-device-class' => {
263 optional => 1,
264 type => 'string',
265 description => "Set the device class of the OSD in crush."
266 },
267 },
268 },
269 returns => { type => 'string' },
270 code => sub {
271 my ($param) = @_;
272
273 my $rpcenv = PVE::RPCEnvironment::get();
274
275 my $authuser = $rpcenv->get_user();
276
277 # test basic requirements
278 PVE::Ceph::Tools::check_ceph_inited();
279 PVE::Ceph::Tools::setup_pve_symlinks();
280 PVE::Ceph::Tools::check_ceph_installed('ceph_osd');
281 PVE::Ceph::Tools::check_ceph_installed('ceph_volume');
282
283 # extract parameter info and fail if a device is set more than once
284 my $devs = {};
285
286 my $ceph_conf = cfs_read_file('ceph.conf');
287
288 my $osd_network = $ceph_conf->{global}->{cluster_network};
289 $osd_network //= $ceph_conf->{global}->{public_network}; # fallback
290
291 if ($osd_network) { # check only if something is configured
292 my $cluster_net_ips = PVE::Network::get_local_ip_from_cidr($osd_network);
293 if (scalar(@$cluster_net_ips) < 1) {
294 my $osd_net_obj = PVE::Network::IP_from_cidr($osd_network);
295 my $osd_base_cidr = $osd_net_obj->{ip} . "/" . $osd_net_obj->{prefixlen};
296
297 die "No address from ceph cluster network (${osd_base_cidr}) found on node '$nodename'. ".
298 "Check your network config.\n";
299 }
300 }
301
302 for my $type ( qw(dev db_dev wal_dev) ) {
303 next if !$param->{$type};
304
305 my $type_dev = PVE::Diskmanage::verify_blockdev_path($param->{$type});
306 (my $type_devname = $type_dev) =~ s|/dev/||;
307
308 raise_param_exc({ $type => "cannot chose '$type_dev' for more than one type." })
309 if grep { $_->{name} eq $type_devname } values %$devs;
310
311 $devs->{$type} = {
312 dev => $type_dev,
313 name => $type_devname,
314 };
315
316 if (my $size = $param->{"${type}_size"}) {
317 $devs->{$type}->{size} = PVE::Tools::convert_size($size, 'gb' => 'b') ;
318 }
319 }
320
321 my $test_disk_requirements = sub {
322 my ($disklist) = @_;
323
324 my $dev = $devs->{dev}->{dev};
325 my $devname = $devs->{dev}->{name};
326 die "unable to get device info for '$dev'\n" if !$disklist->{$devname};
327 die "device '$dev' is already in use\n" if $disklist->{$devname}->{used};
328
329 for my $type ( qw(db_dev wal_dev) ) {
330 my $d = $devs->{$type};
331 next if !$d;
332 my $name = $d->{name};
333 my $info = $disklist->{$name};
334 die "unable to get device info for '$d->{dev}' for type $type\n" if !$disklist->{$name};
335 if (my $usage = $info->{used}) {
336 if ($usage eq 'partitions') {
337 die "device '$d->{dev}' is not GPT partitioned\n" if !$info->{gpt};
338 } elsif ($usage ne 'LVM') {
339 die "device '$d->{dev}' is already in use and has no LVM on it\n";
340 }
341 }
342 }
343 };
344
345
346 # test disk requirements early
347 my $devlist = [ map { $_->{name} } values %$devs ];
348 my $disklist = PVE::Diskmanage::get_disks($devlist, 1, 1);
349 $test_disk_requirements->($disklist);
350
351 # get necessary ceph infos
352 my $rados = PVE::RADOS->new();
353 my $monstat = $rados->mon_command({ prefix => 'quorum_status' });
354
355 die "unable to get fsid\n" if !$monstat->{monmap} || !$monstat->{monmap}->{fsid};
356 my $fsid = $monstat->{monmap}->{fsid};
357 $fsid = $1 if $fsid =~ m/^([0-9a-f\-]+)$/;
358
359 my $ceph_bootstrap_osd_keyring = PVE::Ceph::Tools::get_config('ceph_bootstrap_osd_keyring');
360
361 if (! -f $ceph_bootstrap_osd_keyring && $ceph_conf->{global}->{auth_client_required} eq 'cephx') {
362 my $bindata = $rados->mon_command({
363 prefix => 'auth get-or-create',
364 entity => 'client.bootstrap-osd',
365 caps => [
366 'mon' => 'allow profile bootstrap-osd'
367 ],
368 format => 'plain',
369 });
370 file_set_contents($ceph_bootstrap_osd_keyring, $bindata);
371 };
372
373 # See FIXME below
374 my @udev_trigger_devs = ();
375
376 my $create_part_or_lv = sub {
377 my ($dev, $size, $type) = @_;
378
379 $size =~ m/^(\d+)$/ or die "invalid size '$size'\n";
380 $size = $1;
381
382 die "'$dev->{devpath}' is smaller than requested size '$size' bytes\n"
383 if $dev->{size} < $size;
384
385 # sgdisk and lvcreate can only sizes divisible by 512b
386 # so we round down to the nearest kb
387 $size = PVE::Tools::convert_size($size, 'b' => 'kb', 1);
388
389 if (!$dev->{used}) {
390 # create pv,vg,lv
391
392 my $vg = "ceph-" . UUID::uuid();
393 my $lv = $type . "-" . UUID::uuid();
394
395 PVE::Storage::LVMPlugin::lvm_create_volume_group($dev->{devpath}, $vg);
396 PVE::Storage::LVMPlugin::lvcreate($vg, $lv, "${size}k");
397
398 if (PVE::Diskmanage::is_partition($dev->{devpath})) {
399 eval { PVE::Diskmanage::change_parttype($dev->{devpath}, '8E00'); };
400 warn $@ if $@;
401 }
402
403 push @udev_trigger_devs, $dev->{devpath};
404
405 return "$vg/$lv";
406
407 } elsif ($dev->{used} eq 'LVM') {
408 # check pv/vg and create lv
409
410 my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
411 my $vg;
412 for my $vgname ( sort keys %$vgs ) {
413 next if $vgname !~ /^ceph-/;
414
415 for my $pv ( @{$vgs->{$vgname}->{pvs}} ) {
416 next if $pv->{name} ne $dev->{devpath};
417 $vg = $vgname;
418 last;
419 }
420 last if $vg;
421 }
422
423 die "no ceph vg found on '$dev->{devpath}'\n" if !$vg;
424 die "vg '$vg' has not enough free space\n" if $vgs->{$vg}->{free} < $size;
425
426 my $lv = $type . "-" . UUID::uuid();
427
428 PVE::Storage::LVMPlugin::lvcreate($vg, $lv, "${size}k");
429
430 return "$vg/$lv";
431
432 } elsif ($dev->{used} eq 'partitions' && $dev->{gpt}) {
433 # create new partition at the end
434 my $parttypes = {
435 'osd-db' => '30CD0809-C2B2-499C-8879-2D6B78529876',
436 'osd-wal' => '5CE17FCE-4087-4169-B7FF-056CC58473F9',
437 };
438
439 my $part = PVE::Diskmanage::append_partition($dev->{devpath}, $size * 1024);
440
441 if (my $parttype = $parttypes->{$type}) {
442 eval { PVE::Diskmanage::change_parttype($part, $parttype); };
443 warn $@ if $@;
444 }
445
446 push @udev_trigger_devs, $part;
447 return $part;
448 }
449
450 die "cannot use '$dev->{devpath}' for '$type'\n";
451 };
452
453 my $worker = sub {
454 my $upid = shift;
455
456 PVE::Diskmanage::locked_disk_action(sub {
457 # update disklist and re-test requirements
458 $disklist = PVE::Diskmanage::get_disks($devlist, 1, 1);
459 $test_disk_requirements->($disklist);
460
461 my $dev_class = $param->{'crush-device-class'};
462 my $cmd = ['ceph-volume', 'lvm', 'create', '--cluster-fsid', $fsid ];
463 push @$cmd, '--crush-device-class', $dev_class if $dev_class;
464
465 my $devname = $devs->{dev}->{name};
466 my $devpath = $disklist->{$devname}->{devpath};
467 print "create OSD on $devpath (bluestore)\n";
468
469 push @udev_trigger_devs, $devpath;
470
471 my $osd_size = $disklist->{$devname}->{size};
472 my $size_map = {
473 db => int($osd_size / 10), # 10% of OSD
474 wal => int($osd_size / 100), # 1% of OSD
475 };
476
477 my $sizes;
478 foreach my $type ( qw(db wal) ) {
479 my $fallback_size = $size_map->{$type};
480 my $d = $devs->{"${type}_dev"};
481 next if !$d;
482
483 # size was not set via api, getting from config/fallback
484 if (!defined($d->{size})) {
485 $sizes = PVE::Ceph::Tools::get_db_wal_sizes() if !$sizes;
486 $d->{size} = $sizes->{$type} // $fallback_size;
487 }
488 print "creating block.$type on '$d->{dev}'\n";
489 my $name = $d->{name};
490 my $part_or_lv = $create_part_or_lv->($disklist->{$name}, $d->{size}, "osd-$type");
491
492 print "using '$part_or_lv' for block.$type\n";
493 push @$cmd, "--block.$type", $part_or_lv;
494 }
495
496 push @$cmd, '--data', $devpath;
497 push @$cmd, '--dmcrypt' if $param->{encrypted};
498
499 PVE::Diskmanage::wipe_blockdev($devpath);
500
501 if (PVE::Diskmanage::is_partition($devpath)) {
502 eval { PVE::Diskmanage::change_parttype($devpath, '8E00'); };
503 warn $@ if $@;
504 }
505
506 run_command($cmd);
507
508 # FIXME: Remove once we depend on systemd >= v249.
509 # Work around udev bug https://github.com/systemd/systemd/issues/18525 to ensure the
510 # udev database is updated.
511 eval { run_command(['udevadm', 'trigger', @udev_trigger_devs]); };
512 warn $@ if $@;
513 });
514 };
515
516 return $rpcenv->fork_worker('cephcreateosd', $devs->{dev}->{name}, $authuser, $worker);
517 }});
518
519 # Check if $osdid belongs to $nodename
520 # $tree ... rados osd tree (passing the tree makes it easy to test)
521 sub osd_belongs_to_node {
522 my ($tree, $nodename, $osdid) = @_;
523 return 0 if !($tree && $tree->{nodes});
524
525 my $node_map = {};
526 for my $el (grep { defined($_->{type}) && $_->{type} eq 'host' } @{$tree->{nodes}}) {
527 my $name = $el->{name};
528 die "internal error: duplicate host name found '$name'\n" if $node_map->{$name};
529 $node_map->{$name} = $el;
530 }
531
532 my $osds = $node_map->{$nodename}->{children};
533 return 0 if !$osds;
534
535 return grep($_ == $osdid, @$osds);
536 }
537
538 __PACKAGE__->register_method ({
539 name => 'destroyosd',
540 path => '{osdid}',
541 method => 'DELETE',
542 description => "Destroy OSD",
543 proxyto => 'node',
544 protected => 1,
545 parameters => {
546 additionalProperties => 0,
547 properties => {
548 node => get_standard_option('pve-node'),
549 osdid => {
550 description => 'OSD ID',
551 type => 'integer',
552 },
553 cleanup => {
554 description => "If set, we remove partition table entries.",
555 type => 'boolean',
556 optional => 1,
557 default => 0,
558 },
559 },
560 },
561 returns => { type => 'string' },
562 code => sub {
563 my ($param) = @_;
564
565 my $rpcenv = PVE::RPCEnvironment::get();
566
567 my $authuser = $rpcenv->get_user();
568
569 PVE::Ceph::Tools::check_ceph_inited();
570
571 my $osdid = $param->{osdid};
572 my $cleanup = $param->{cleanup};
573
574 my $rados = PVE::RADOS->new();
575
576 my $osd_belongs_to_node = osd_belongs_to_node(
577 $rados->mon_command({ prefix => 'osd tree' }),
578 $param->{node},
579 $osdid,
580 );
581 die "OSD osd.$osdid does not belong to node $param->{node}!"
582 if !$osd_belongs_to_node;
583
584 # dies if osdid is unknown
585 my $osdstat = $get_osd_status->($rados, $osdid);
586
587 die "osd is in use (in == 1)\n" if $osdstat->{in};
588 #&$run_ceph_cmd(['osd', 'out', $osdid]);
589
590 die "osd is still running (up == 1)\n" if $osdstat->{up};
591
592 my $osdsection = "osd.$osdid";
593
594 my $worker = sub {
595 my $upid = shift;
596
597 # reopen with longer timeout
598 $rados = PVE::RADOS->new(timeout => PVE::Ceph::Tools::get_config('long_rados_timeout'));
599
600 print "destroy OSD $osdsection\n";
601
602 eval {
603 PVE::Ceph::Services::ceph_service_cmd('stop', $osdsection);
604 PVE::Ceph::Services::ceph_service_cmd('disable', $osdsection);
605 };
606 warn $@ if $@;
607
608 print "Remove $osdsection from the CRUSH map\n";
609 $rados->mon_command({ prefix => "osd crush remove", name => $osdsection, format => 'plain' });
610
611 print "Remove the $osdsection authentication key.\n";
612 $rados->mon_command({ prefix => "auth del", entity => $osdsection, format => 'plain' });
613
614 print "Remove OSD $osdsection\n";
615 $rados->mon_command({ prefix => "osd rm", ids => [ $osdsection ], format => 'plain' });
616
617 # try to unmount from standard mount point
618 my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
619
620 # See FIXME below
621 my $udev_trigger_devs = {};
622
623 my $remove_partition = sub {
624 my ($part) = @_;
625
626 return if !$part || (! -b $part );
627 my $partnum = PVE::Diskmanage::get_partnum($part);
628 my $devpath = PVE::Diskmanage::get_blockdev($part);
629
630 $udev_trigger_devs->{$devpath} = 1;
631
632 PVE::Diskmanage::wipe_blockdev($part);
633 print "remove partition $part (disk '${devpath}', partnum $partnum)\n";
634 eval { run_command(['/sbin/sgdisk', '-d', $partnum, "${devpath}"]); };
635 warn $@ if $@;
636 };
637
638 my $osd_list = PVE::Ceph::Tools::ceph_volume_list();
639
640 if ($osd_list->{$osdid}) { # ceph-volume managed
641
642 eval { PVE::Ceph::Tools::ceph_volume_zap($osdid, $cleanup) };
643 warn $@ if $@;
644
645 if ($cleanup) {
646 # try to remove pvs, but do not fail if it does not work
647 for my $osd_part (@{$osd_list->{$osdid}}) {
648 for my $dev (@{$osd_part->{devices}}) {
649 ($dev) = ($dev =~ m|^(/dev/[-_.a-zA-Z0-9\/]+)$|); #untaint
650
651 eval { run_command(['/sbin/pvremove', $dev], errfunc => sub {}) };
652 warn $@ if $@;
653
654 $udev_trigger_devs->{$dev} = 1;
655 }
656 }
657 }
658 } else {
659 my $partitions_to_remove = [];
660 if ($cleanup) {
661 if (my $mp = PVE::ProcFSTools::parse_proc_mounts()) {
662 foreach my $line (@$mp) {
663 my ($dev, $path, $fstype) = @$line;
664 next if !($dev && $path && $fstype);
665 next if $dev !~ m|^/dev/|;
666
667 if ($path eq $mountpoint) {
668 abs_path($dev) =~ m|^(/.+)| or die "invalid dev: $dev\n";
669 push @$partitions_to_remove, $1;
670 last;
671 }
672 }
673 }
674
675 foreach my $path (qw(journal block block.db block.wal)) {
676 abs_path("$mountpoint/$path") =~ m|^(/.+)| or die "invalid path: $path\n";
677 push @$partitions_to_remove, $1;
678 }
679 }
680
681 print "Unmount OSD $osdsection from $mountpoint\n";
682 eval { run_command(['/bin/umount', $mountpoint]); };
683 if (my $err = $@) {
684 warn $err;
685 } elsif ($cleanup) {
686 #be aware of the ceph udev rules which can remount.
687 foreach my $part (@$partitions_to_remove) {
688 $remove_partition->($part);
689 }
690 }
691 }
692
693 # FIXME: Remove once we depend on systemd >= v249.
694 # Work around udev bug https://github.com/systemd/systemd/issues/18525 to ensure the
695 # udev database is updated.
696 if ($cleanup) {
697 eval { run_command(['udevadm', 'trigger', keys $udev_trigger_devs->%*]); };
698 warn $@ if $@;
699 }
700 };
701
702 return $rpcenv->fork_worker('cephdestroyosd', $osdsection, $authuser, $worker);
703 }});
704
705 __PACKAGE__->register_method ({
706 name => 'in',
707 path => '{osdid}/in',
708 method => 'POST',
709 description => "ceph osd in",
710 proxyto => 'node',
711 protected => 1,
712 permissions => {
713 check => ['perm', '/', [ 'Sys.Modify' ]],
714 },
715 parameters => {
716 additionalProperties => 0,
717 properties => {
718 node => get_standard_option('pve-node'),
719 osdid => {
720 description => 'OSD ID',
721 type => 'integer',
722 },
723 },
724 },
725 returns => { type => "null" },
726 code => sub {
727 my ($param) = @_;
728
729 PVE::Ceph::Tools::check_ceph_inited();
730
731 my $osdid = $param->{osdid};
732
733 my $rados = PVE::RADOS->new();
734
735 $get_osd_status->($rados, $osdid); # osd exists?
736
737 my $osdsection = "osd.$osdid";
738
739 $rados->mon_command({ prefix => "osd in", ids => [ $osdsection ], format => 'plain' });
740
741 return undef;
742 }});
743
744 __PACKAGE__->register_method ({
745 name => 'out',
746 path => '{osdid}/out',
747 method => 'POST',
748 description => "ceph osd out",
749 proxyto => 'node',
750 protected => 1,
751 permissions => {
752 check => ['perm', '/', [ 'Sys.Modify' ]],
753 },
754 parameters => {
755 additionalProperties => 0,
756 properties => {
757 node => get_standard_option('pve-node'),
758 osdid => {
759 description => 'OSD ID',
760 type => 'integer',
761 },
762 },
763 },
764 returns => { type => "null" },
765 code => sub {
766 my ($param) = @_;
767
768 PVE::Ceph::Tools::check_ceph_inited();
769
770 my $osdid = $param->{osdid};
771
772 my $rados = PVE::RADOS->new();
773
774 $get_osd_status->($rados, $osdid); # osd exists?
775
776 my $osdsection = "osd.$osdid";
777
778 $rados->mon_command({ prefix => "osd out", ids => [ $osdsection ], format => 'plain' });
779
780 return undef;
781 }});
782
783 __PACKAGE__->register_method ({
784 name => 'scrub',
785 path => '{osdid}/scrub',
786 method => 'POST',
787 description => "Instruct the OSD to scrub.",
788 proxyto => 'node',
789 protected => 1,
790 permissions => {
791 check => ['perm', '/', [ 'Sys.Modify' ]],
792 },
793 parameters => {
794 additionalProperties => 0,
795 properties => {
796 node => get_standard_option('pve-node'),
797 osdid => {
798 description => 'OSD ID',
799 type => 'integer',
800 },
801 deep => {
802 description => 'If set, instructs a deep scrub instead of a normal one.',
803 type => 'boolean',
804 optional => 1,
805 default => 0,
806 },
807 },
808 },
809 returns => { type => "null" },
810 code => sub {
811 my ($param) = @_;
812
813 PVE::Ceph::Tools::check_ceph_inited();
814
815 my $osdid = $param->{osdid};
816 my $deep = $param->{deep} // 0;
817
818 my $rados = PVE::RADOS->new();
819
820 $get_osd_status->($rados, $osdid); # osd exists?
821
822 my $prefix = $deep ? 'osd deep-scrub' : 'osd scrub';
823 $rados->mon_command({ prefix => $prefix, who => $osdid });
824
825 return undef;
826 }});
827
828 1;