]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/node/disks/zfs.rs
src/api2/node/disks/zfs.rs: add zpool_details api
[proxmox-backup.git] / src / api2 / node / disks / zfs.rs
1 use anyhow::{bail, Error};
2 use serde_json::{json, Value};
3 use ::serde::{Deserialize, Serialize};
4
5 use proxmox::api::{
6 api, Permission, RpcEnvironment, RpcEnvironmentType,
7 schema::{
8 Schema,
9 StringSchema,
10 ArraySchema,
11 IntegerSchema,
12 ApiStringFormat,
13 parse_property_string,
14 },
15 };
16 use proxmox::api::router::Router;
17
18 use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
19 use crate::tools::disks::{
20 zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
21 DiskUsageType,
22 };
23
24 use crate::server::WorkerTask;
25
26 use crate::api2::types::*;
27
28 pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
29 "Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
30 .schema();
31
32 pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
33 "A list of disk names, comma separated.")
34 .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
35 .schema();
36
37 pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
38 "Pool sector size exponent.")
39 .minimum(9)
40 .maximum(16)
41 .default(12)
42 .schema();
43
44
45 #[api(
46 default: "On",
47 )]
48 #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
49 #[serde(rename_all = "lowercase")]
50 /// The ZFS compression algorithm to use.
51 pub enum ZfsCompressionType {
52 /// Gnu Zip
53 Gzip,
54 /// LZ4
55 Lz4,
56 /// LZJB
57 Lzjb,
58 /// ZLE
59 Zle,
60 /// Enable compression using the default algorithm.
61 On,
62 /// Disable compression.
63 Off,
64 }
65
66 #[api()]
67 #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
68 #[serde(rename_all = "lowercase")]
69 /// The ZFS RAID level to use.
70 pub enum ZfsRaidLevel {
71 /// Single Disk
72 Single,
73 /// Mirror
74 Mirror,
75 /// Raid10
76 Raid10,
77 /// RaidZ
78 RaidZ,
79 /// RaidZ2
80 RaidZ2,
81 /// RaidZ3
82 RaidZ3,
83 }
84
85
86 #[api()]
87 #[derive(Debug, Serialize, Deserialize)]
88 #[serde(rename_all="kebab-case")]
89 /// zpool list item
90 pub struct ZpoolListItem {
91 /// zpool name
92 pub name: String,
93 /// Health
94 pub health: String,
95 /// Total size
96 pub size: u64,
97 /// Used size
98 pub alloc: u64,
99 /// Free space
100 pub free: u64,
101 /// ZFS fragnentation level
102 pub frag: u64,
103 /// ZFS deduplication ratio
104 pub dedup: f64,
105 }
106
107
108 #[api(
109 protected: true,
110 input: {
111 properties: {
112 node: {
113 schema: NODE_SCHEMA,
114 },
115 },
116 },
117 returns: {
118 description: "List of zpools.",
119 type: Array,
120 items: {
121 type: ZpoolListItem,
122 },
123 },
124 access: {
125 permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
126 },
127 )]
128 /// List zfs pools.
129 pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
130
131 let data = zpool_list(None, false)?;
132
133 let mut list = Vec::new();
134
135 for item in data {
136 if let Some(usage) = item.usage {
137 list.push(ZpoolListItem {
138 name: item.name,
139 health: item.health,
140 size: usage.size,
141 alloc: usage.alloc,
142 free: usage.free,
143 frag: usage.frag,
144 dedup: usage.dedup,
145 });
146 }
147 }
148
149 Ok(list)
150 }
151
152 #[api(
153 protected: true,
154 input: {
155 properties: {
156 node: {
157 schema: NODE_SCHEMA,
158 },
159 name: {
160 schema: DATASTORE_SCHEMA,
161 },
162 },
163 },
164 returns: {
165 description: "zpool vdev tree with status",
166 properties: {
167
168 },
169 },
170 access: {
171 permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
172 },
173 )]
174 /// Get zpool status details.
175 pub fn zpool_details(
176 name: String,
177 ) -> Result<Value, Error> {
178
179 let key_value_list = zpool_status(&name)?;
180
181 let config = match key_value_list.iter().find(|(k, _)| k == "config") {
182 Some((_, v)) => v,
183 None => bail!("got zpool status without config key"),
184 };
185
186 let vdev_list = parse_zpool_status_config_tree(config)?;
187 let mut tree = vdev_list_to_tree(&vdev_list);
188
189 for (k, v) in key_value_list {
190 if k != "config" {
191 tree[k] = v.into();
192 }
193 }
194
195 tree["name"] = tree.as_object_mut().unwrap()
196 .remove("pool")
197 .unwrap_or(Value::Null);
198
199
200 Ok(tree)
201 }
202
203 #[api(
204 protected: true,
205 input: {
206 properties: {
207 node: {
208 schema: NODE_SCHEMA,
209 },
210 name: {
211 schema: DATASTORE_SCHEMA,
212 },
213 devices: {
214 schema: DISK_LIST_SCHEMA,
215 },
216 raidlevel: {
217 type: ZfsRaidLevel,
218 },
219 ashift: {
220 schema: ZFS_ASHIFT_SCHEMA,
221 optional: true,
222 },
223 compression: {
224 type: ZfsCompressionType,
225 optional: true,
226 },
227 "add-datastore": {
228 description: "Configure a datastore using the zpool.",
229 type: bool,
230 optional: true,
231 },
232 },
233 },
234 returns: {
235 schema: UPID_SCHEMA,
236 },
237 access: {
238 permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
239 },
240 )]
241 /// Create a new ZFS pool.
242 pub fn create_zpool(
243 name: String,
244 devices: String,
245 raidlevel: ZfsRaidLevel,
246 compression: Option<String>,
247 ashift: Option<usize>,
248 add_datastore: Option<bool>,
249 rpcenv: &mut dyn RpcEnvironment,
250 ) -> Result<String, Error> {
251
252 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
253
254 let username = rpcenv.get_user().unwrap();
255
256 let add_datastore = add_datastore.unwrap_or(false);
257
258 let ashift = ashift.unwrap_or(12);
259
260 let devices_text = devices.clone();
261 let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
262 let devices: Vec<String> = devices.as_array().unwrap().iter()
263 .map(|v| v.as_str().unwrap().to_string()).collect();
264
265 let disk_map = crate::tools::disks::get_disks(None, true)?;
266 for disk in devices.iter() {
267 match disk_map.get(disk) {
268 Some(info) => {
269 if info.used != DiskUsageType::Unused {
270 bail!("disk '{}' is already in use.", disk);
271 }
272 }
273 None => {
274 bail!("no such disk '{}'", disk);
275 }
276 }
277 }
278
279 let min_disks = match raidlevel {
280 ZfsRaidLevel::Single => 1,
281 ZfsRaidLevel::Mirror => 2,
282 ZfsRaidLevel::Raid10 => 4,
283 ZfsRaidLevel::RaidZ => 3,
284 ZfsRaidLevel::RaidZ2 => 4,
285 ZfsRaidLevel::RaidZ3 => 5,
286 };
287
288 // Sanity checks
289 if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
290 bail!("Raid10 needs an even number of disks.");
291 }
292
293 if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
294 bail!("Please give only one disk for single disk mode.");
295 }
296
297 if devices.len() < min_disks {
298 bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
299 }
300
301 let upid_str = WorkerTask::new_thread(
302 "zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
303 {
304 worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
305
306
307 let mut command = std::process::Command::new("zpool");
308 command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
309
310 match raidlevel {
311 ZfsRaidLevel::Single => {
312 command.arg(&devices[0]);
313 }
314 ZfsRaidLevel::Mirror => {
315 command.arg("mirror");
316 command.args(devices);
317 }
318 ZfsRaidLevel::Raid10 => {
319 devices.chunks(2).for_each(|pair| {
320 command.arg("mirror");
321 command.args(pair);
322 });
323 }
324 ZfsRaidLevel::RaidZ => {
325 command.arg("raidz");
326 command.args(devices);
327 }
328 ZfsRaidLevel::RaidZ2 => {
329 command.arg("raidz2");
330 command.args(devices);
331 }
332 ZfsRaidLevel::RaidZ3 => {
333 command.arg("raidz3");
334 command.args(devices);
335 }
336 }
337
338 worker.log(format!("# {:?}", command));
339
340 let output = crate::tools::run_command(command, None)?;
341 worker.log(output);
342
343 if let Some(compression) = compression {
344 let mut command = std::process::Command::new("zfs");
345 command.args(&["set", &format!("compression={}", compression), &name]);
346 worker.log(format!("# {:?}", command));
347 let output = crate::tools::run_command(command, None)?;
348 worker.log(output);
349 }
350
351 if add_datastore {
352 let mount_point = format!("/{}", name);
353 crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
354 }
355
356 Ok(())
357 })?;
358
359 Ok(upid_str)
360 }
361
362 pub const POOL_ROUTER: Router = Router::new()
363 .get(&API_METHOD_ZPOOL_DETAILS);
364
365 pub const ROUTER: Router = Router::new()
366 .get(&API_METHOD_LIST_ZPOOLS)
367 .post(&API_METHOD_CREATE_ZPOOL)
368 .match_all("name", &POOL_ROUTER);