]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/node/disks/zfs.rs
c9a6ac72d9ba3cb877f3ce2edb7abe676d87ef2a
[proxmox-backup.git] / src / api2 / node / disks / zfs.rs
1 use anyhow::{bail, Error};
2 use serde_json::{json, Value};
3
4 use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
5 use proxmox_schema::{api, parse_property_string};
6 use proxmox_sys::task_log;
7
8 use pbs_api_types::{
9 ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig,
10 NODE_SCHEMA, ZPOOL_NAME_SCHEMA, DATASTORE_SCHEMA, DISK_ARRAY_SCHEMA,
11 DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
12 PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
13 };
14
15 use crate::tools::disks::{
16 zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
17 DiskUsageType,
18 };
19
20 use proxmox_rest_server::WorkerTask;
21
22
23 #[api(
24 protected: true,
25 input: {
26 properties: {
27 node: {
28 schema: NODE_SCHEMA,
29 },
30 },
31 },
32 returns: {
33 description: "List of zpools.",
34 type: Array,
35 items: {
36 type: ZpoolListItem,
37 },
38 },
39 access: {
40 permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
41 },
42 )]
43 /// List zfs pools.
44 pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
45
46 let data = zpool_list(None, false)?;
47
48 let mut list = Vec::new();
49
50 for item in data {
51 if let Some(usage) = item.usage {
52 list.push(ZpoolListItem {
53 name: item.name,
54 health: item.health,
55 size: usage.size,
56 alloc: usage.alloc,
57 free: usage.free,
58 frag: usage.frag,
59 dedup: usage.dedup,
60 });
61 }
62 }
63
64 Ok(list)
65 }
66
67 #[api(
68 protected: true,
69 input: {
70 properties: {
71 node: {
72 schema: NODE_SCHEMA,
73 },
74 name: {
75 schema: ZPOOL_NAME_SCHEMA,
76 },
77 },
78 },
79 returns: {
80 description: "zpool vdev tree with status",
81 properties: {
82
83 },
84 },
85 access: {
86 permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
87 },
88 )]
89 /// Get zpool status details.
90 pub fn zpool_details(
91 name: String,
92 ) -> Result<Value, Error> {
93
94 let key_value_list = zpool_status(&name)?;
95
96 let config = match key_value_list.iter().find(|(k, _)| k == "config") {
97 Some((_, v)) => v,
98 None => bail!("got zpool status without config key"),
99 };
100
101 let vdev_list = parse_zpool_status_config_tree(config)?;
102 let mut tree = vdev_list_to_tree(&vdev_list)?;
103
104 for (k, v) in key_value_list {
105 if k != "config" {
106 tree[k] = v.into();
107 }
108 }
109
110 tree["name"] = tree.as_object_mut().unwrap()
111 .remove("pool")
112 .unwrap_or_else(|| name.into());
113
114
115 Ok(tree)
116 }
117
118 #[api(
119 protected: true,
120 input: {
121 properties: {
122 node: {
123 schema: NODE_SCHEMA,
124 },
125 name: {
126 schema: DATASTORE_SCHEMA,
127 },
128 devices: {
129 schema: DISK_LIST_SCHEMA,
130 },
131 raidlevel: {
132 type: ZfsRaidLevel,
133 },
134 ashift: {
135 schema: ZFS_ASHIFT_SCHEMA,
136 optional: true,
137 },
138 compression: {
139 type: ZfsCompressionType,
140 optional: true,
141 },
142 "add-datastore": {
143 description: "Configure a datastore using the zpool.",
144 type: bool,
145 optional: true,
146 },
147 },
148 },
149 returns: {
150 schema: UPID_SCHEMA,
151 },
152 access: {
153 permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
154 },
155 )]
156 /// Create a new ZFS pool. Will be mounted under '/mnt/datastore/<name>'.
157 pub fn create_zpool(
158 name: String,
159 devices: String,
160 raidlevel: ZfsRaidLevel,
161 compression: Option<String>,
162 ashift: Option<usize>,
163 add_datastore: Option<bool>,
164 rpcenv: &mut dyn RpcEnvironment,
165 ) -> Result<String, Error> {
166
167 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
168
169 let auth_id = rpcenv.get_auth_id().unwrap();
170
171 let add_datastore = add_datastore.unwrap_or(false);
172
173 let ashift = ashift.unwrap_or(12);
174
175 let devices_text = devices.clone();
176 let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
177 let devices: Vec<String> = devices.as_array().unwrap().iter()
178 .map(|v| v.as_str().unwrap().to_string()).collect();
179
180 let disk_map = crate::tools::disks::get_disks(None, true)?;
181 for disk in devices.iter() {
182 match disk_map.get(disk) {
183 Some(info) => {
184 if info.used != DiskUsageType::Unused {
185 bail!("disk '{}' is already in use.", disk);
186 }
187 }
188 None => {
189 bail!("no such disk '{}'", disk);
190 }
191 }
192 }
193
194 let min_disks = match raidlevel {
195 ZfsRaidLevel::Single => 1,
196 ZfsRaidLevel::Mirror => 2,
197 ZfsRaidLevel::Raid10 => 4,
198 ZfsRaidLevel::RaidZ => 3,
199 ZfsRaidLevel::RaidZ2 => 4,
200 ZfsRaidLevel::RaidZ3 => 5,
201 };
202
203 // Sanity checks
204 if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
205 bail!("Raid10 needs an even number of disks.");
206 }
207
208 if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
209 bail!("Please give only one disk for single disk mode.");
210 }
211
212 if devices.len() < min_disks {
213 bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
214 }
215
216 let mount_point = format!("/mnt/datastore/{}", &name);
217
218 // check if the default path does exist already and bail if it does
219 // otherwise 'zpool create' aborts after partitioning, but before creating the pool
220 let default_path = std::path::PathBuf::from(&mount_point);
221
222 match std::fs::metadata(&default_path) {
223 Err(_) => {}, // path does not exist
224 Ok(_) => {
225 bail!("path {:?} already exists", default_path);
226 }
227 }
228
229 let upid_str = WorkerTask::new_thread(
230 "zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
231 {
232 task_log!(worker, "create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text);
233
234
235 let mut command = std::process::Command::new("zpool");
236 command.args(&["create", "-o", &format!("ashift={}", ashift), "-m", &mount_point, &name]);
237
238 match raidlevel {
239 ZfsRaidLevel::Single => {
240 command.arg(&devices[0]);
241 }
242 ZfsRaidLevel::Mirror => {
243 command.arg("mirror");
244 command.args(devices);
245 }
246 ZfsRaidLevel::Raid10 => {
247 devices.chunks(2).for_each(|pair| {
248 command.arg("mirror");
249 command.args(pair);
250 });
251 }
252 ZfsRaidLevel::RaidZ => {
253 command.arg("raidz");
254 command.args(devices);
255 }
256 ZfsRaidLevel::RaidZ2 => {
257 command.arg("raidz2");
258 command.args(devices);
259 }
260 ZfsRaidLevel::RaidZ3 => {
261 command.arg("raidz3");
262 command.args(devices);
263 }
264 }
265
266 task_log!(worker, "# {:?}", command);
267
268 let output = pbs_tools::run_command(command, None)?;
269 task_log!(worker, "{}", output);
270
271 if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
272 let import_unit = format!("zfs-import@{}.service", proxmox::tools::systemd::escape_unit(&name, false));
273 crate::tools::systemd::enable_unit(&import_unit)?;
274 }
275
276 if let Some(compression) = compression {
277 let mut command = std::process::Command::new("zfs");
278 command.args(&["set", &format!("compression={}", compression), &name]);
279 task_log!(worker, "# {:?}", command);
280 let output = pbs_tools::run_command(command, None)?;
281 task_log!(worker, "{}", output);
282 }
283
284 if add_datastore {
285 let lock = pbs_config::datastore::lock_config()?;
286 let datastore: DataStoreConfig =
287 serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
288
289 let (config, _digest) = pbs_config::datastore::config()?;
290
291 if config.sections.get(&datastore.name).is_some() {
292 bail!("datastore '{}' already exists.", datastore.name);
293 }
294
295 crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
296 }
297
298 Ok(())
299 })?;
300
301 Ok(upid_str)
302 }
303
304 pub const POOL_ROUTER: Router = Router::new()
305 .get(&API_METHOD_ZPOOL_DETAILS);
306
307 pub const ROUTER: Router = Router::new()
308 .get(&API_METHOD_LIST_ZPOOLS)
309 .post(&API_METHOD_CREATE_ZPOOL)
310 .match_all("name", &POOL_ROUTER);