]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/nvdimm/security.c
Merge tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / nvdimm / security.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
8 #include <linux/io.h>
9 #include <linux/mm.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
15 #include "nd-core.h"
16 #include "nd.h"
17
18 #define NVDIMM_BASE_KEY 0
19 #define NVDIMM_NEW_KEY 1
20
21 static bool key_revalidate = true;
22 module_param(key_revalidate, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
24
25 static const char zero_key[NVDIMM_PASSPHRASE_LEN];
26
27 static void *key_data(struct key *key)
28 {
29 struct encrypted_key_payload *epayload = dereference_key_locked(key);
30
31 lockdep_assert_held_read(&key->sem);
32
33 return epayload->decrypted_data;
34 }
35
36 static void nvdimm_put_key(struct key *key)
37 {
38 if (!key)
39 return;
40
41 up_read(&key->sem);
42 key_put(key);
43 }
44
45 /*
46 * Retrieve kernel key for DIMM and request from user space if
47 * necessary. Returns a key held for read and must be put by
48 * nvdimm_put_key() before the usage goes out of scope.
49 */
50 static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
51 {
52 struct key *key = NULL;
53 static const char NVDIMM_PREFIX[] = "nvdimm:";
54 char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
55 struct device *dev = &nvdimm->dev;
56
57 sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
58 key = request_key(&key_type_encrypted, desc, "");
59 if (IS_ERR(key)) {
60 if (PTR_ERR(key) == -ENOKEY)
61 dev_dbg(dev, "request_key() found no key\n");
62 else
63 dev_dbg(dev, "request_key() upcall failed\n");
64 key = NULL;
65 } else {
66 struct encrypted_key_payload *epayload;
67
68 down_read(&key->sem);
69 epayload = dereference_key_locked(key);
70 if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
71 up_read(&key->sem);
72 key_put(key);
73 key = NULL;
74 }
75 }
76
77 return key;
78 }
79
80 static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
81 struct key **key)
82 {
83 *key = nvdimm_request_key(nvdimm);
84 if (!*key)
85 return zero_key;
86
87 return key_data(*key);
88 }
89
90 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
91 key_serial_t id, int subclass)
92 {
93 key_ref_t keyref;
94 struct key *key;
95 struct encrypted_key_payload *epayload;
96 struct device *dev = &nvdimm->dev;
97
98 keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
99 if (IS_ERR(keyref))
100 return NULL;
101
102 key = key_ref_to_ptr(keyref);
103 if (key->type != &key_type_encrypted) {
104 key_put(key);
105 return NULL;
106 }
107
108 dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
109
110 down_read_nested(&key->sem, subclass);
111 epayload = dereference_key_locked(key);
112 if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
113 up_read(&key->sem);
114 key_put(key);
115 key = NULL;
116 }
117 return key;
118 }
119
120 static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
121 key_serial_t id, int subclass, struct key **key)
122 {
123 *key = NULL;
124 if (id == 0) {
125 if (subclass == NVDIMM_BASE_KEY)
126 return zero_key;
127 else
128 return NULL;
129 }
130
131 *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
132 if (!*key)
133 return NULL;
134
135 return key_data(*key);
136 }
137
138
139 static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
140 {
141 struct key *key;
142 int rc;
143 const void *data;
144
145 if (!nvdimm->sec.ops->change_key)
146 return -EOPNOTSUPP;
147
148 data = nvdimm_get_key_payload(nvdimm, &key);
149
150 /*
151 * Send the same key to the hardware as new and old key to
152 * verify that the key is good.
153 */
154 rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
155 if (rc < 0) {
156 nvdimm_put_key(key);
157 return rc;
158 }
159
160 nvdimm_put_key(key);
161 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
162 return 0;
163 }
164
165 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
166 {
167 struct device *dev = &nvdimm->dev;
168 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
169 struct key *key;
170 const void *data;
171 int rc;
172
173 /* The bus lock should be held at the top level of the call stack */
174 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
175
176 if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
177 || !nvdimm->sec.flags)
178 return -EIO;
179
180 /* No need to go further if security is disabled */
181 if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
182 return 0;
183
184 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
185 dev_dbg(dev, "Security operation in progress.\n");
186 return -EBUSY;
187 }
188
189 /*
190 * If the pre-OS has unlocked the DIMM, attempt to send the key
191 * from request_key() to the hardware for verification. Failure
192 * to revalidate the key against the hardware results in a
193 * freeze of the security configuration. I.e. if the OS does not
194 * have the key, security is being managed pre-OS.
195 */
196 if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
197 if (!key_revalidate)
198 return 0;
199
200 return nvdimm_key_revalidate(nvdimm);
201 } else
202 data = nvdimm_get_key_payload(nvdimm, &key);
203
204 rc = nvdimm->sec.ops->unlock(nvdimm, data);
205 dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
206 rc == 0 ? "success" : "fail");
207
208 nvdimm_put_key(key);
209 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
210 return rc;
211 }
212
213 int nvdimm_security_unlock(struct device *dev)
214 {
215 struct nvdimm *nvdimm = to_nvdimm(dev);
216 int rc;
217
218 nvdimm_bus_lock(dev);
219 rc = __nvdimm_security_unlock(nvdimm);
220 nvdimm_bus_unlock(dev);
221 return rc;
222 }
223
224 static int check_security_state(struct nvdimm *nvdimm)
225 {
226 struct device *dev = &nvdimm->dev;
227
228 if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
229 dev_dbg(dev, "Incorrect security state: %#lx\n",
230 nvdimm->sec.flags);
231 return -EIO;
232 }
233
234 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
235 dev_dbg(dev, "Security operation in progress.\n");
236 return -EBUSY;
237 }
238
239 return 0;
240 }
241
242 static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
243 {
244 struct device *dev = &nvdimm->dev;
245 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
246 struct key *key;
247 int rc;
248 const void *data;
249
250 /* The bus lock should be held at the top level of the call stack */
251 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
252
253 if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
254 || !nvdimm->sec.flags)
255 return -EOPNOTSUPP;
256
257 rc = check_security_state(nvdimm);
258 if (rc)
259 return rc;
260
261 data = nvdimm_get_user_key_payload(nvdimm, keyid,
262 NVDIMM_BASE_KEY, &key);
263 if (!data)
264 return -ENOKEY;
265
266 rc = nvdimm->sec.ops->disable(nvdimm, data);
267 dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
268 rc == 0 ? "success" : "fail");
269
270 nvdimm_put_key(key);
271 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
272 return rc;
273 }
274
275 static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
276 unsigned int new_keyid,
277 enum nvdimm_passphrase_type pass_type)
278 {
279 struct device *dev = &nvdimm->dev;
280 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
281 struct key *key, *newkey;
282 int rc;
283 const void *data, *newdata;
284
285 /* The bus lock should be held at the top level of the call stack */
286 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
287
288 if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
289 || !nvdimm->sec.flags)
290 return -EOPNOTSUPP;
291
292 rc = check_security_state(nvdimm);
293 if (rc)
294 return rc;
295
296 data = nvdimm_get_user_key_payload(nvdimm, keyid,
297 NVDIMM_BASE_KEY, &key);
298 if (!data)
299 return -ENOKEY;
300
301 newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
302 NVDIMM_NEW_KEY, &newkey);
303 if (!newdata) {
304 nvdimm_put_key(key);
305 return -ENOKEY;
306 }
307
308 rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
309 dev_dbg(dev, "key: %d %d update%s: %s\n",
310 key_serial(key), key_serial(newkey),
311 pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
312 rc == 0 ? "success" : "fail");
313
314 nvdimm_put_key(newkey);
315 nvdimm_put_key(key);
316 if (pass_type == NVDIMM_MASTER)
317 nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
318 NVDIMM_MASTER);
319 else
320 nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
321 NVDIMM_USER);
322 return rc;
323 }
324
325 static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
326 enum nvdimm_passphrase_type pass_type)
327 {
328 struct device *dev = &nvdimm->dev;
329 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
330 struct key *key = NULL;
331 int rc;
332 const void *data;
333
334 /* The bus lock should be held at the top level of the call stack */
335 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
336
337 if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
338 || !nvdimm->sec.flags)
339 return -EOPNOTSUPP;
340
341 rc = check_security_state(nvdimm);
342 if (rc)
343 return rc;
344
345 if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
346 && pass_type == NVDIMM_MASTER) {
347 dev_dbg(dev,
348 "Attempt to secure erase in wrong master state.\n");
349 return -EOPNOTSUPP;
350 }
351
352 data = nvdimm_get_user_key_payload(nvdimm, keyid,
353 NVDIMM_BASE_KEY, &key);
354 if (!data)
355 return -ENOKEY;
356
357 rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
358 dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
359 pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
360 rc == 0 ? "success" : "fail");
361
362 nvdimm_put_key(key);
363 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
364 return rc;
365 }
366
367 static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
368 {
369 struct device *dev = &nvdimm->dev;
370 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
371 struct key *key = NULL;
372 int rc;
373 const void *data;
374
375 /* The bus lock should be held at the top level of the call stack */
376 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
377
378 if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
379 || !nvdimm->sec.flags)
380 return -EOPNOTSUPP;
381
382 if (dev->driver == NULL) {
383 dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
384 return -EINVAL;
385 }
386
387 rc = check_security_state(nvdimm);
388 if (rc)
389 return rc;
390
391 data = nvdimm_get_user_key_payload(nvdimm, keyid,
392 NVDIMM_BASE_KEY, &key);
393 if (!data)
394 return -ENOKEY;
395
396 rc = nvdimm->sec.ops->overwrite(nvdimm, data);
397 dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
398 rc == 0 ? "success" : "fail");
399
400 nvdimm_put_key(key);
401 if (rc == 0) {
402 set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
403 set_bit(NDD_WORK_PENDING, &nvdimm->flags);
404 set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
405 /*
406 * Make sure we don't lose device while doing overwrite
407 * query.
408 */
409 get_device(dev);
410 queue_delayed_work(system_wq, &nvdimm->dwork, 0);
411 }
412
413 return rc;
414 }
415
416 void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
417 {
418 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
419 int rc;
420 unsigned int tmo;
421
422 /* The bus lock should be held at the top level of the call stack */
423 lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
424
425 /*
426 * Abort and release device if we no longer have the overwrite
427 * flag set. It means the work has been canceled.
428 */
429 if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
430 return;
431
432 tmo = nvdimm->sec.overwrite_tmo;
433
434 if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
435 || !nvdimm->sec.flags)
436 return;
437
438 rc = nvdimm->sec.ops->query_overwrite(nvdimm);
439 if (rc == -EBUSY) {
440
441 /* setup delayed work again */
442 tmo += 10;
443 queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
444 nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
445 return;
446 }
447
448 if (rc < 0)
449 dev_dbg(&nvdimm->dev, "overwrite failed\n");
450 else
451 dev_dbg(&nvdimm->dev, "overwrite completed\n");
452
453 /*
454 * Mark the overwrite work done and update dimm security flags,
455 * then send a sysfs event notification to wake up userspace
456 * poll threads to picked up the changed state.
457 */
458 nvdimm->sec.overwrite_tmo = 0;
459 clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
460 clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
461 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
462 nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
463 if (nvdimm->sec.overwrite_state)
464 sysfs_notify_dirent(nvdimm->sec.overwrite_state);
465 put_device(&nvdimm->dev);
466 }
467
468 void nvdimm_security_overwrite_query(struct work_struct *work)
469 {
470 struct nvdimm *nvdimm =
471 container_of(work, typeof(*nvdimm), dwork.work);
472
473 nvdimm_bus_lock(&nvdimm->dev);
474 __nvdimm_security_overwrite_query(nvdimm);
475 nvdimm_bus_unlock(&nvdimm->dev);
476 }
477
478 #define OPS \
479 C( OP_FREEZE, "freeze", 1), \
480 C( OP_DISABLE, "disable", 2), \
481 C( OP_UPDATE, "update", 3), \
482 C( OP_ERASE, "erase", 2), \
483 C( OP_OVERWRITE, "overwrite", 2), \
484 C( OP_MASTER_UPDATE, "master_update", 3), \
485 C( OP_MASTER_ERASE, "master_erase", 2)
486 #undef C
487 #define C(a, b, c) a
488 enum nvdimmsec_op_ids { OPS };
489 #undef C
490 #define C(a, b, c) { b, c }
491 static struct {
492 const char *name;
493 int args;
494 } ops[] = { OPS };
495 #undef C
496
497 #define SEC_CMD_SIZE 32
498 #define KEY_ID_SIZE 10
499
500 ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
501 {
502 struct nvdimm *nvdimm = to_nvdimm(dev);
503 ssize_t rc;
504 char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
505 nkeystr[KEY_ID_SIZE+1];
506 unsigned int key, newkey;
507 int i;
508
509 rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
510 " %"__stringify(KEY_ID_SIZE)"s"
511 " %"__stringify(KEY_ID_SIZE)"s",
512 cmd, keystr, nkeystr);
513 if (rc < 1)
514 return -EINVAL;
515 for (i = 0; i < ARRAY_SIZE(ops); i++)
516 if (sysfs_streq(cmd, ops[i].name))
517 break;
518 if (i >= ARRAY_SIZE(ops))
519 return -EINVAL;
520 if (ops[i].args > 1)
521 rc = kstrtouint(keystr, 0, &key);
522 if (rc >= 0 && ops[i].args > 2)
523 rc = kstrtouint(nkeystr, 0, &newkey);
524 if (rc < 0)
525 return rc;
526
527 if (i == OP_FREEZE) {
528 dev_dbg(dev, "freeze\n");
529 rc = nvdimm_security_freeze(nvdimm);
530 } else if (i == OP_DISABLE) {
531 dev_dbg(dev, "disable %u\n", key);
532 rc = security_disable(nvdimm, key);
533 } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
534 dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
535 rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
536 ? NVDIMM_USER : NVDIMM_MASTER);
537 } else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
538 dev_dbg(dev, "%s %u\n", ops[i].name, key);
539 if (atomic_read(&nvdimm->busy)) {
540 dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
541 return -EBUSY;
542 }
543 rc = security_erase(nvdimm, key, i == OP_ERASE
544 ? NVDIMM_USER : NVDIMM_MASTER);
545 } else if (i == OP_OVERWRITE) {
546 dev_dbg(dev, "overwrite %u\n", key);
547 if (atomic_read(&nvdimm->busy)) {
548 dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
549 return -EBUSY;
550 }
551 rc = security_overwrite(nvdimm, key);
552 } else
553 return -EINVAL;
554
555 if (rc == 0)
556 rc = len;
557 return rc;
558 }