]> git.proxmox.com Git - pve-storage.git/blob - PVE/API2/Disks/LVMThin.pm
038310a4864c6e82234ee7dc75fbaa2a7ce6b1de
[pve-storage.git] / PVE / API2 / Disks / LVMThin.pm
1 package PVE::API2::Disks::LVMThin;
2
3 use strict;
4 use warnings;
5
6 use PVE::Storage::LvmThinPlugin;
7 use PVE::Diskmanage;
8 use PVE::JSONSchema qw(get_standard_option);
9 use PVE::API2::Storage::Config;
10 use PVE::Storage;
11 use PVE::Tools qw(run_command lock_file);
12
13 use PVE::RPCEnvironment;
14 use PVE::RESTHandler;
15
16 use base qw(PVE::RESTHandler);
17
18 __PACKAGE__->register_method ({
19 name => 'index',
20 path => '',
21 method => 'GET',
22 proxyto => 'node',
23 protected => 1,
24 permissions => {
25 check => ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any => 1],
26 },
27 description => "List LVM thinpools",
28 parameters => {
29 additionalProperties => 0,
30 properties => {
31 node => get_standard_option('pve-node'),
32 },
33 },
34 returns => {
35 type => 'array',
36 items => {
37 type => 'object',
38 properties => {
39 lv => {
40 type => 'string',
41 description => 'The name of the thinpool.',
42 },
43 vg => {
44 type => 'string',
45 description => 'The associated volume group.',
46 },
47 lv_size => {
48 type => 'integer',
49 description => 'The size of the thinpool in bytes.',
50 },
51 used => {
52 type => 'integer',
53 description => 'The used bytes of the thinpool.',
54 },
55 metadata_size => {
56 type => 'integer',
57 description => 'The size of the metadata lv in bytes.',
58 },
59 metadata_used => {
60 type => 'integer',
61 description => 'The used bytes of the metadata lv.',
62 },
63 },
64 },
65 },
66 code => sub {
67 my ($param) = @_;
68 return PVE::Storage::LvmThinPlugin::list_thinpools(undef);
69 }});
70
71 __PACKAGE__->register_method ({
72 name => 'create',
73 path => '',
74 method => 'POST',
75 proxyto => 'node',
76 protected => 1,
77 permissions => {
78 check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
79 },
80 description => "Create an LVM thinpool",
81 parameters => {
82 additionalProperties => 0,
83 properties => {
84 node => get_standard_option('pve-node'),
85 name => get_standard_option('pve-storage-id'),
86 device => {
87 type => 'string',
88 description => 'The block device you want to create the thinpool on.',
89 },
90 add_storage => {
91 description => "Configure storage using the thinpool.",
92 type => 'boolean',
93 optional => 1,
94 default => 0,
95 },
96 },
97 },
98 returns => { type => 'string' },
99 code => sub {
100 my ($param) = @_;
101
102 my $rpcenv = PVE::RPCEnvironment::get();
103 my $user = $rpcenv->get_user();
104
105 my $name = $param->{name};
106 my $dev = $param->{device};
107 my $node = $param->{node};
108
109 $dev = PVE::Diskmanage::verify_blockdev_path($dev);
110 PVE::Diskmanage::assert_disk_unused($dev);
111
112 my $storage_params = {
113 type => 'lvmthin',
114 vgname => $name,
115 thinpool => $name,
116 storage => $name,
117 content => 'rootdir,images',
118 nodes => $node,
119 };
120 my $verify_params = [qw(vgname thinpool)];
121
122 if ($param->{add_storage}) {
123 PVE::API2::Storage::Config->create_or_update(
124 $name,
125 $node,
126 $storage_params,
127 $verify_params,
128 1,
129 );
130 }
131
132 my $worker = sub {
133 PVE::Diskmanage::locked_disk_action(sub {
134 PVE::Diskmanage::assert_disk_unused($dev);
135
136 die "volume group with name '${name}' already exists on node '${node}'\n"
137 if PVE::Storage::LVMPlugin::lvm_vgs()->{$name};
138
139 if (PVE::Diskmanage::is_partition($dev)) {
140 eval { PVE::Diskmanage::change_parttype($dev, '8E00'); };
141 warn $@ if $@;
142 }
143
144 PVE::Storage::LVMPlugin::lvm_create_volume_group($dev, $name);
145 my $pv = PVE::Storage::LVMPlugin::lvm_pv_info($dev);
146 # keep some free space just in case
147 my $datasize = $pv->{size} - 128*1024;
148 # default to 1% for metadata
149 my $metadatasize = $datasize/100;
150 # but at least 1G, as recommended in lvmthin man
151 $metadatasize = 1024*1024 if $metadatasize < 1024*1024;
152 # but at most 16G, which is the current lvm max
153 $metadatasize = 16*1024*1024 if $metadatasize > 16*1024*1024;
154 # shrink data by needed amount for metadata
155 $datasize -= 2*$metadatasize;
156
157 run_command([
158 '/sbin/lvcreate',
159 '--type', 'thin-pool',
160 "-L${datasize}K",
161 '--poolmetadatasize', "${metadatasize}K",
162 '-n', $name,
163 $name
164 ]);
165
166 PVE::Diskmanage::udevadm_trigger($dev);
167
168 if ($param->{add_storage}) {
169 PVE::API2::Storage::Config->create_or_update(
170 $name,
171 $node,
172 $storage_params,
173 $verify_params,
174 );
175 }
176 });
177 };
178
179 return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker);
180 }});
181
182 __PACKAGE__->register_method ({
183 name => 'delete',
184 path => '{name}',
185 method => 'DELETE',
186 proxyto => 'node',
187 protected => 1,
188 permissions => {
189 check => ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
190 },
191 description => "Remove an LVM thin pool.",
192 parameters => {
193 additionalProperties => 0,
194 properties => {
195 node => get_standard_option('pve-node'),
196 name => get_standard_option('pve-storage-id'),
197 'volume-group' => get_standard_option('pve-storage-id'),
198 'cleanup-config' => {
199 description => "Marks associated storage(s) as not available on this node anymore ".
200 "or removes them from the configuration (if configured for this node only).",
201 type => 'boolean',
202 optional => 1,
203 default => 0,
204 },
205 'cleanup-disks' => {
206 description => "Also wipe disks so they can be repurposed afterwards.",
207 type => 'boolean',
208 optional => 1,
209 default => 0,
210 },
211 },
212 },
213 returns => { type => 'string' },
214 code => sub {
215 my ($param) = @_;
216
217 my $rpcenv = PVE::RPCEnvironment::get();
218 my $user = $rpcenv->get_user();
219
220 my $vg = $param->{'volume-group'};
221 my $lv = $param->{name};
222 my $node = $param->{node};
223
224 my $worker = sub {
225 PVE::Diskmanage::locked_disk_action(sub {
226 my $thinpools = PVE::Storage::LvmThinPlugin::list_thinpools();
227
228 die "no such thin pool ${vg}/${lv}\n"
229 if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*;
230
231 run_command(['lvremove', '-y', "${vg}/${lv}"]);
232
233 my $config_err;
234 if ($param->{'cleanup-config'}) {
235 my $match = sub {
236 my ($scfg) = @_;
237 return $scfg->{type} eq 'lvmthin'
238 && $scfg->{vgname} eq $vg
239 && $scfg->{thinpool} eq $lv;
240 };
241 eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
242 warn $config_err = $@ if $@;
243 }
244
245 if ($param->{'cleanup-disks'}) {
246 my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
247
248 die "no such volume group '$vg'\n" if !$vgs->{$vg};
249 die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0;
250
251 my $wiped = [];
252 eval {
253 for my $pv ($vgs->{$vg}->{pvs}->@*) {
254 my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
255 PVE::Diskmanage::wipe_blockdev($dev);
256 push $wiped->@*, $dev;
257 }
258 };
259 my $err = $@;
260 PVE::Diskmanage::udevadm_trigger($wiped->@*);
261 die "cleanup failed - $err" if $err;
262 }
263
264 die "config cleanup failed - $config_err" if $config_err;
265 });
266 };
267
268 return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker);
269 }});
270
271 1;