]>
Commit | Line | Data |
---|---|---|
929a13b3 | 1 | use anyhow::{bail, Error}; |
026dc1d1 | 2 | use serde_json::{json, Value}; |
929a13b3 DM |
3 | use ::serde::{Deserialize, Serialize}; |
4 | ||
5 | use proxmox::api::{ | |
6 | api, Permission, RpcEnvironment, RpcEnvironmentType, | |
7 | schema::{ | |
8 | Schema, | |
9 | StringSchema, | |
10 | ArraySchema, | |
11 | IntegerSchema, | |
12 | ApiStringFormat, | |
13 | parse_property_string, | |
14 | }, | |
15 | }; | |
16 | use proxmox::api::router::Router; | |
17 | ||
18 | use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; | |
19 | use crate::tools::disks::{ | |
026dc1d1 | 20 | zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree, |
929a13b3 DM |
21 | DiskUsageType, |
22 | }; | |
23 | ||
24 | use crate::server::WorkerTask; | |
25 | ||
26 | use crate::api2::types::*; | |
27 | ||
905147a5 SI |
28 | use crate::tools::systemd; |
29 | ||
929a13b3 DM |
30 | pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new( |
31 | "Disk name list.", &BLOCKDEVICE_NAME_SCHEMA) | |
32 | .schema(); | |
33 | ||
34 | pub const DISK_LIST_SCHEMA: Schema = StringSchema::new( | |
35 | "A list of disk names, comma separated.") | |
36 | .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA)) | |
37 | .schema(); | |
38 | ||
39 | pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new( | |
40 | "Pool sector size exponent.") | |
41 | .minimum(9) | |
42 | .maximum(16) | |
43 | .default(12) | |
44 | .schema(); | |
45 | ||
7957fabf DC |
46 | pub const ZPOOL_NAME_SCHEMA: Schema =StringSchema::new("ZFS Pool Name") |
47 | .format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX)) | |
48 | .schema(); | |
929a13b3 DM |
49 | |
50 | #[api( | |
51 | default: "On", | |
52 | )] | |
53 | #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] | |
54 | #[serde(rename_all = "lowercase")] | |
55 | /// The ZFS compression algorithm to use. | |
56 | pub enum ZfsCompressionType { | |
57 | /// Gnu Zip | |
58 | Gzip, | |
59 | /// LZ4 | |
60 | Lz4, | |
61 | /// LZJB | |
62 | Lzjb, | |
63 | /// ZLE | |
64 | Zle, | |
65 | /// Enable compression using the default algorithm. | |
66 | On, | |
67 | /// Disable compression. | |
68 | Off, | |
69 | } | |
70 | ||
71 | #[api()] | |
72 | #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] | |
73 | #[serde(rename_all = "lowercase")] | |
74 | /// The ZFS RAID level to use. | |
75 | pub enum ZfsRaidLevel { | |
76 | /// Single Disk | |
77 | Single, | |
78 | /// Mirror | |
79 | Mirror, | |
80 | /// Raid10 | |
81 | Raid10, | |
82 | /// RaidZ | |
83 | RaidZ, | |
84 | /// RaidZ2 | |
85 | RaidZ2, | |
86 | /// RaidZ3 | |
87 | RaidZ3, | |
88 | } | |
89 | ||
90 | ||
91 | #[api()] | |
92 | #[derive(Debug, Serialize, Deserialize)] | |
93 | #[serde(rename_all="kebab-case")] | |
94 | /// zpool list item | |
95 | pub struct ZpoolListItem { | |
96 | /// zpool name | |
97 | pub name: String, | |
98 | /// Health | |
99 | pub health: String, | |
100 | /// Total size | |
101 | pub size: u64, | |
102 | /// Used size | |
103 | pub alloc: u64, | |
104 | /// Free space | |
105 | pub free: u64, | |
106 | /// ZFS fragnentation level | |
107 | pub frag: u64, | |
108 | /// ZFS deduplication ratio | |
109 | pub dedup: f64, | |
110 | } | |
111 | ||
112 | ||
113 | #[api( | |
114 | protected: true, | |
115 | input: { | |
116 | properties: { | |
117 | node: { | |
118 | schema: NODE_SCHEMA, | |
119 | }, | |
120 | }, | |
121 | }, | |
122 | returns: { | |
123 | description: "List of zpools.", | |
124 | type: Array, | |
125 | items: { | |
126 | type: ZpoolListItem, | |
127 | }, | |
128 | }, | |
129 | access: { | |
130 | permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false), | |
131 | }, | |
132 | )] | |
133 | /// List zfs pools. | |
134 | pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> { | |
135 | ||
66af7f51 | 136 | let data = zpool_list(None, false)?; |
929a13b3 DM |
137 | |
138 | let mut list = Vec::new(); | |
139 | ||
140 | for item in data { | |
141 | if let Some(usage) = item.usage { | |
142 | list.push(ZpoolListItem { | |
143 | name: item.name, | |
144 | health: item.health, | |
145 | size: usage.size, | |
146 | alloc: usage.alloc, | |
147 | free: usage.free, | |
148 | frag: usage.frag, | |
149 | dedup: usage.dedup, | |
150 | }); | |
151 | } | |
152 | } | |
153 | ||
154 | Ok(list) | |
155 | } | |
156 | ||
026dc1d1 DM |
157 | #[api( |
158 | protected: true, | |
159 | input: { | |
160 | properties: { | |
161 | node: { | |
162 | schema: NODE_SCHEMA, | |
163 | }, | |
164 | name: { | |
7957fabf | 165 | schema: ZPOOL_NAME_SCHEMA, |
026dc1d1 DM |
166 | }, |
167 | }, | |
168 | }, | |
169 | returns: { | |
170 | description: "zpool vdev tree with status", | |
171 | properties: { | |
172 | ||
173 | }, | |
174 | }, | |
175 | access: { | |
176 | permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false), | |
177 | }, | |
178 | )] | |
179 | /// Get zpool status details. | |
180 | pub fn zpool_details( | |
181 | name: String, | |
182 | ) -> Result<Value, Error> { | |
183 | ||
184 | let key_value_list = zpool_status(&name)?; | |
185 | ||
186 | let config = match key_value_list.iter().find(|(k, _)| k == "config") { | |
187 | Some((_, v)) => v, | |
188 | None => bail!("got zpool status without config key"), | |
189 | }; | |
190 | ||
191 | let vdev_list = parse_zpool_status_config_tree(config)?; | |
4e37d9ce | 192 | let mut tree = vdev_list_to_tree(&vdev_list)?; |
026dc1d1 DM |
193 | |
194 | for (k, v) in key_value_list { | |
195 | if k != "config" { | |
196 | tree[k] = v.into(); | |
197 | } | |
198 | } | |
199 | ||
200 | tree["name"] = tree.as_object_mut().unwrap() | |
201 | .remove("pool") | |
42d19fdf | 202 | .unwrap_or_else(|| name.into()); |
026dc1d1 DM |
203 | |
204 | ||
205 | Ok(tree) | |
206 | } | |
207 | ||
929a13b3 DM |
208 | #[api( |
209 | protected: true, | |
210 | input: { | |
211 | properties: { | |
212 | node: { | |
213 | schema: NODE_SCHEMA, | |
214 | }, | |
215 | name: { | |
216 | schema: DATASTORE_SCHEMA, | |
217 | }, | |
218 | devices: { | |
219 | schema: DISK_LIST_SCHEMA, | |
220 | }, | |
221 | raidlevel: { | |
222 | type: ZfsRaidLevel, | |
223 | }, | |
224 | ashift: { | |
225 | schema: ZFS_ASHIFT_SCHEMA, | |
226 | optional: true, | |
227 | }, | |
228 | compression: { | |
229 | type: ZfsCompressionType, | |
230 | optional: true, | |
231 | }, | |
232 | "add-datastore": { | |
233 | description: "Configure a datastore using the zpool.", | |
234 | type: bool, | |
235 | optional: true, | |
236 | }, | |
237 | }, | |
238 | }, | |
239 | returns: { | |
240 | schema: UPID_SCHEMA, | |
241 | }, | |
242 | access: { | |
243 | permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false), | |
244 | }, | |
245 | )] | |
246 | /// Create a new ZFS pool. | |
247 | pub fn create_zpool( | |
248 | name: String, | |
249 | devices: String, | |
250 | raidlevel: ZfsRaidLevel, | |
251 | compression: Option<String>, | |
252 | ashift: Option<usize>, | |
253 | add_datastore: Option<bool>, | |
254 | rpcenv: &mut dyn RpcEnvironment, | |
255 | ) -> Result<String, Error> { | |
256 | ||
257 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; | |
258 | ||
e7cb4dc5 | 259 | let userid: Userid = rpcenv.get_user().unwrap().parse()?; |
929a13b3 DM |
260 | |
261 | let add_datastore = add_datastore.unwrap_or(false); | |
262 | ||
263 | let ashift = ashift.unwrap_or(12); | |
264 | ||
265 | let devices_text = devices.clone(); | |
266 | let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?; | |
267 | let devices: Vec<String> = devices.as_array().unwrap().iter() | |
268 | .map(|v| v.as_str().unwrap().to_string()).collect(); | |
269 | ||
270 | let disk_map = crate::tools::disks::get_disks(None, true)?; | |
271 | for disk in devices.iter() { | |
272 | match disk_map.get(disk) { | |
273 | Some(info) => { | |
274 | if info.used != DiskUsageType::Unused { | |
275 | bail!("disk '{}' is already in use.", disk); | |
276 | } | |
277 | } | |
278 | None => { | |
279 | bail!("no such disk '{}'", disk); | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | let min_disks = match raidlevel { | |
285 | ZfsRaidLevel::Single => 1, | |
286 | ZfsRaidLevel::Mirror => 2, | |
287 | ZfsRaidLevel::Raid10 => 4, | |
288 | ZfsRaidLevel::RaidZ => 3, | |
289 | ZfsRaidLevel::RaidZ2 => 4, | |
290 | ZfsRaidLevel::RaidZ3 => 5, | |
291 | }; | |
292 | ||
293 | // Sanity checks | |
294 | if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 { | |
295 | bail!("Raid10 needs an even number of disks."); | |
296 | } | |
297 | ||
298 | if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 { | |
299 | bail!("Please give only one disk for single disk mode."); | |
300 | } | |
301 | ||
302 | if devices.len() < min_disks { | |
303 | bail!("{:?} needs at least {} disks.", raidlevel, min_disks); | |
304 | } | |
305 | ||
b91b7d9f DC |
306 | // check if the default path does exist already and bail if it does |
307 | // otherwise we get an error on mounting | |
308 | let mut default_path = std::path::PathBuf::from("/"); | |
309 | default_path.push(&name); | |
310 | ||
311 | match std::fs::metadata(&default_path) { | |
312 | Err(_) => {}, // path does not exist | |
313 | Ok(_) => { | |
314 | bail!("path {:?} already exists", default_path); | |
315 | } | |
316 | } | |
317 | ||
929a13b3 | 318 | let upid_str = WorkerTask::new_thread( |
e7cb4dc5 | 319 | "zfscreate", Some(name.clone()), userid, to_stdout, move |worker| |
929a13b3 DM |
320 | { |
321 | worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text)); | |
322 | ||
323 | ||
324 | let mut command = std::process::Command::new("zpool"); | |
325 | command.args(&["create", "-o", &format!("ashift={}", ashift), &name]); | |
326 | ||
327 | match raidlevel { | |
328 | ZfsRaidLevel::Single => { | |
329 | command.arg(&devices[0]); | |
330 | } | |
331 | ZfsRaidLevel::Mirror => { | |
332 | command.arg("mirror"); | |
333 | command.args(devices); | |
334 | } | |
335 | ZfsRaidLevel::Raid10 => { | |
336 | devices.chunks(2).for_each(|pair| { | |
337 | command.arg("mirror"); | |
338 | command.args(pair); | |
339 | }); | |
340 | } | |
341 | ZfsRaidLevel::RaidZ => { | |
342 | command.arg("raidz"); | |
343 | command.args(devices); | |
344 | } | |
345 | ZfsRaidLevel::RaidZ2 => { | |
346 | command.arg("raidz2"); | |
347 | command.args(devices); | |
348 | } | |
349 | ZfsRaidLevel::RaidZ3 => { | |
350 | command.arg("raidz3"); | |
351 | command.args(devices); | |
352 | } | |
353 | } | |
354 | ||
355 | worker.log(format!("# {:?}", command)); | |
356 | ||
357 | let output = crate::tools::run_command(command, None)?; | |
358 | worker.log(output); | |
359 | ||
905147a5 SI |
360 | let import_unit = format!("zfs-import@{}.service", systemd::escape_unit(&name, false)); |
361 | systemd::enable_unit(&import_unit)?; | |
362 | ||
929a13b3 DM |
363 | if let Some(compression) = compression { |
364 | let mut command = std::process::Command::new("zfs"); | |
365 | command.args(&["set", &format!("compression={}", compression), &name]); | |
366 | worker.log(format!("# {:?}", command)); | |
367 | let output = crate::tools::run_command(command, None)?; | |
368 | worker.log(output); | |
369 | } | |
370 | ||
371 | if add_datastore { | |
372 | let mount_point = format!("/{}", name); | |
373 | crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))? | |
374 | } | |
375 | ||
376 | Ok(()) | |
377 | })?; | |
378 | ||
379 | Ok(upid_str) | |
380 | } | |
381 | ||
026dc1d1 DM |
382 | pub const POOL_ROUTER: Router = Router::new() |
383 | .get(&API_METHOD_ZPOOL_DETAILS); | |
384 | ||
929a13b3 DM |
385 | pub const ROUTER: Router = Router::new() |
386 | .get(&API_METHOD_LIST_ZPOOLS) | |
026dc1d1 DM |
387 | .post(&API_METHOD_CREATE_ZPOOL) |
388 | .match_all("name", &POOL_ROUTER); |