]>
git.proxmox.com Git - pve-storage.git/blob - PVE/API2/Disks/LVMThin.pm
1 package PVE
::API2
::Disks
::LVMThin
;
6 use PVE
::Storage
::LvmThinPlugin
;
8 use PVE
::JSONSchema
qw(get_standard_option);
9 use PVE
::API2
::Storage
::Config
;
11 use PVE
::Tools
qw(run_command lock_file);
13 use PVE
::RPCEnvironment
;
16 use base
qw(PVE::RESTHandler);
18 __PACKAGE__-
>register_method ({
25 check
=> ['perm', '/', ['Sys.Audit', 'Datastore.Audit'], any
=> 1],
27 description
=> "List LVM thinpools",
29 additionalProperties
=> 0,
31 node
=> get_standard_option
('pve-node'),
41 description
=> 'The name of the thinpool.',
45 description
=> 'The associated volume group.',
49 description
=> 'The size of the thinpool in bytes.',
53 description
=> 'The used bytes of the thinpool.',
57 description
=> 'The size of the metadata lv in bytes.',
61 description
=> 'The used bytes of the metadata lv.',
68 return PVE
::Storage
::LvmThinPlugin
::list_thinpools
(undef);
71 __PACKAGE__-
>register_method ({
78 check
=> ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
80 description
=> "Create an LVM thinpool",
82 additionalProperties
=> 0,
84 node
=> get_standard_option
('pve-node'),
85 name
=> get_standard_option
('pve-storage-id'),
88 description
=> 'The block device you want to create the thinpool on.',
91 description
=> "Configure storage using the thinpool.",
98 returns
=> { type
=> 'string' },
102 my $rpcenv = PVE
::RPCEnvironment
::get
();
103 my $user = $rpcenv->get_user();
105 my $name = $param->{name
};
106 my $dev = $param->{device
};
107 my $node = $param->{node
};
109 $dev = PVE
::Diskmanage
::verify_blockdev_path
($dev);
110 PVE
::Diskmanage
::assert_disk_unused
($dev);
111 PVE
::Storage
::assert_sid_unused
($name) if $param->{add_storage
};
114 PVE
::Diskmanage
::locked_disk_action
(sub {
115 PVE
::Diskmanage
::assert_disk_unused
($dev);
117 if (PVE
::Diskmanage
::is_partition
($dev)) {
118 eval { PVE
::Diskmanage
::change_parttype
($dev, '8E00'); };
122 PVE
::Storage
::LVMPlugin
::lvm_create_volume_group
($dev, $name);
123 my $pv = PVE
::Storage
::LVMPlugin
::lvm_pv_info
($dev);
124 # keep some free space just in case
125 my $datasize = $pv->{size
} - 128*1024;
126 # default to 1% for metadata
127 my $metadatasize = $datasize/100;
128 # but at least 1G, as recommended in lvmthin man
129 $metadatasize = 1024*1024 if $metadatasize < 1024*1024;
130 # but at most 16G, which is the current lvm max
131 $metadatasize = 16*1024*1024 if $metadatasize > 16*1024*1024;
132 # shrink data by needed amount for metadata
133 $datasize -= 2*$metadatasize;
137 '--type', 'thin-pool',
139 '--poolmetadatasize', "${metadatasize}K",
144 PVE
::Diskmanage
::udevadm_trigger
($dev);
146 if ($param->{add_storage
}) {
147 my $storage_params = {
152 content
=> 'rootdir,images',
156 PVE
::API2
::Storage
::Config-
>create($storage_params);
161 return $rpcenv->fork_worker('lvmthincreate', $name, $user, $worker);
164 __PACKAGE__-
>register_method ({
171 check
=> ['perm', '/', ['Sys.Modify', 'Datastore.Allocate']],
173 description
=> "Remove an LVM thin pool.",
175 additionalProperties
=> 0,
177 node
=> get_standard_option
('pve-node'),
178 name
=> get_standard_option
('pve-storage-id'),
179 'volume-group' => get_standard_option
('pve-storage-id'),
180 'cleanup-config' => {
181 description
=> "Marks associated storage(s) as not available on this node anymore ".
182 "or removes them from the configuration (if configured for this node only).",
188 description
=> "Also wipe disks so they can be repurposed afterwards.",
195 returns
=> { type
=> 'string' },
199 my $rpcenv = PVE
::RPCEnvironment
::get
();
200 my $user = $rpcenv->get_user();
202 my $vg = $param->{'volume-group'};
203 my $lv = $param->{name
};
204 my $node = $param->{node
};
207 PVE
::Diskmanage
::locked_disk_action
(sub {
208 my $thinpools = PVE
::Storage
::LvmThinPlugin
::list_thinpools
();
210 die "no such thin pool ${vg}/${lv}\n"
211 if !grep { $_->{lv
} eq $lv && $_->{vg
} eq $vg } $thinpools->@*;
213 run_command
(['lvremove', '-y', "${vg}/${lv}"]);
216 if ($param->{'cleanup-config'}) {
219 return $scfg->{type
} eq 'lvmthin'
220 && $scfg->{vgname
} eq $vg
221 && $scfg->{thinpool
} eq $lv;
223 eval { PVE
::API2
::Storage
::Config-
>cleanup_storages_for_node($match, $node); };
224 warn $config_err = $@ if $@;
227 if ($param->{'cleanup-disks'}) {
228 my $vgs = PVE
::Storage
::LVMPlugin
::lvm_vgs
(1);
230 die "no such volume group '$vg'\n" if !$vgs->{$vg};
231 die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount
} > 0;
235 for my $pv ($vgs->{$vg}->{pvs
}->@*) {
236 my $dev = PVE
::Diskmanage
::verify_blockdev_path
($pv->{name
});
237 PVE
::Diskmanage
::wipe_blockdev
($dev);
238 push $wiped->@*, $dev;
242 PVE
::Diskmanage
::udevadm_trigger
($wiped->@*);
243 die "cleanup failed - $err" if $err;
246 die "config cleanup failed - $config_err" if $config_err;
250 return $rpcenv->fork_worker('lvmthinremove', "${vg}-${lv}", $user, $worker);