]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/nvme/target/admin-cmd.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / drivers / nvme / target / admin-cmd.c
1 /*
2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
17
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
20 #include "nvmet.h"
21
22 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
23 {
24 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
25
26 len <<= 16;
27 len += le16_to_cpu(cmd->get_log_page.numdl);
28 /* NUMD is a 0's based value */
29 len += 1;
30 len *= sizeof(u32);
31
32 return len;
33 }
34
35 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
36 {
37 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
38 }
39
40 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
41 {
42 struct nvmet_ctrl *ctrl = req->sq->ctrl;
43 u16 status = NVME_SC_SUCCESS;
44 unsigned long flags;
45 off_t offset = 0;
46 u64 slot;
47 u64 i;
48
49 spin_lock_irqsave(&ctrl->error_lock, flags);
50 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
51
52 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
53 status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
54 sizeof(struct nvme_error_slot));
55 if (status)
56 break;
57
58 if (slot == 0)
59 slot = NVMET_ERROR_LOG_SLOTS - 1;
60 else
61 slot--;
62 offset += sizeof(struct nvme_error_slot);
63 }
64 spin_unlock_irqrestore(&ctrl->error_lock, flags);
65 nvmet_req_complete(req, status);
66 }
67
68 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
69 struct nvme_smart_log *slog)
70 {
71 struct nvmet_ns *ns;
72 u64 host_reads, host_writes, data_units_read, data_units_written;
73
74 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
75 if (!ns) {
76 pr_err("Could not find namespace id : %d\n",
77 le32_to_cpu(req->cmd->get_log_page.nsid));
78 req->error_loc = offsetof(struct nvme_rw_command, nsid);
79 return NVME_SC_INVALID_NS;
80 }
81
82 /* we don't have the right data for file backed ns */
83 if (!ns->bdev)
84 goto out;
85
86 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
87 data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
88 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
89 data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
90
91 put_unaligned_le64(host_reads, &slog->host_reads[0]);
92 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
93 put_unaligned_le64(host_writes, &slog->host_writes[0]);
94 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
95 out:
96 nvmet_put_namespace(ns);
97
98 return NVME_SC_SUCCESS;
99 }
100
101 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
102 struct nvme_smart_log *slog)
103 {
104 u64 host_reads = 0, host_writes = 0;
105 u64 data_units_read = 0, data_units_written = 0;
106 struct nvmet_ns *ns;
107 struct nvmet_ctrl *ctrl;
108
109 ctrl = req->sq->ctrl;
110
111 rcu_read_lock();
112 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
113 /* we don't have the right data for file backed ns */
114 if (!ns->bdev)
115 continue;
116 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
117 data_units_read +=
118 part_stat_read(ns->bdev->bd_part, sectors[READ]);
119 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
120 data_units_written +=
121 part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
122
123 }
124 rcu_read_unlock();
125
126 put_unaligned_le64(host_reads, &slog->host_reads[0]);
127 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
128 put_unaligned_le64(host_writes, &slog->host_writes[0]);
129 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
130
131 return NVME_SC_SUCCESS;
132 }
133
134 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
135 {
136 struct nvme_smart_log *log;
137 u16 status = NVME_SC_INTERNAL;
138 unsigned long flags;
139
140 if (req->data_len != sizeof(*log))
141 goto out;
142
143 log = kzalloc(sizeof(*log), GFP_KERNEL);
144 if (!log)
145 goto out;
146
147 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
148 status = nvmet_get_smart_log_all(req, log);
149 else
150 status = nvmet_get_smart_log_nsid(req, log);
151 if (status)
152 goto out_free_log;
153
154 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
155 put_unaligned_le64(req->sq->ctrl->err_counter,
156 &log->num_err_log_entries);
157 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
158
159 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
160 out_free_log:
161 kfree(log);
162 out:
163 nvmet_req_complete(req, status);
164 }
165
166 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
167 {
168 u16 status = NVME_SC_INTERNAL;
169 struct nvme_effects_log *log;
170
171 log = kzalloc(sizeof(*log), GFP_KERNEL);
172 if (!log)
173 goto out;
174
175 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
176 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
177 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
178 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
179 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
180 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
181 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
182
183 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
184 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
185 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
186 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
187 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
188
189 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
190
191 kfree(log);
192 out:
193 nvmet_req_complete(req, status);
194 }
195
196 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
197 {
198 struct nvmet_ctrl *ctrl = req->sq->ctrl;
199 u16 status = NVME_SC_INTERNAL;
200 size_t len;
201
202 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
203 goto out;
204
205 mutex_lock(&ctrl->lock);
206 if (ctrl->nr_changed_ns == U32_MAX)
207 len = sizeof(__le32);
208 else
209 len = ctrl->nr_changed_ns * sizeof(__le32);
210 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
211 if (!status)
212 status = nvmet_zero_sgl(req, len, req->data_len - len);
213 ctrl->nr_changed_ns = 0;
214 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
215 mutex_unlock(&ctrl->lock);
216 out:
217 nvmet_req_complete(req, status);
218 }
219
220 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
221 struct nvme_ana_group_desc *desc)
222 {
223 struct nvmet_ctrl *ctrl = req->sq->ctrl;
224 struct nvmet_ns *ns;
225 u32 count = 0;
226
227 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
228 rcu_read_lock();
229 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
230 if (ns->anagrpid == grpid)
231 desc->nsids[count++] = cpu_to_le32(ns->nsid);
232 rcu_read_unlock();
233 }
234
235 desc->grpid = cpu_to_le32(grpid);
236 desc->nnsids = cpu_to_le32(count);
237 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
238 desc->state = req->port->ana_state[grpid];
239 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
240 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
241 }
242
243 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
244 {
245 struct nvme_ana_rsp_hdr hdr = { 0, };
246 struct nvme_ana_group_desc *desc;
247 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
248 size_t len;
249 u32 grpid;
250 u16 ngrps = 0;
251 u16 status;
252
253 status = NVME_SC_INTERNAL;
254 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
255 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
256 if (!desc)
257 goto out;
258
259 down_read(&nvmet_ana_sem);
260 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
261 if (!nvmet_ana_group_enabled[grpid])
262 continue;
263 len = nvmet_format_ana_group(req, grpid, desc);
264 status = nvmet_copy_to_sgl(req, offset, desc, len);
265 if (status)
266 break;
267 offset += len;
268 ngrps++;
269 }
270 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
271 if (nvmet_ana_group_enabled[grpid])
272 ngrps++;
273 }
274
275 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
276 hdr.ngrps = cpu_to_le16(ngrps);
277 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
278 up_read(&nvmet_ana_sem);
279
280 kfree(desc);
281
282 /* copy the header last once we know the number of groups */
283 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
284 out:
285 nvmet_req_complete(req, status);
286 }
287
288 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
289 {
290 struct nvmet_ctrl *ctrl = req->sq->ctrl;
291 struct nvme_id_ctrl *id;
292 u16 status = 0;
293 const char model[] = "Linux";
294
295 id = kzalloc(sizeof(*id), GFP_KERNEL);
296 if (!id) {
297 status = NVME_SC_INTERNAL;
298 goto out;
299 }
300
301 /* XXX: figure out how to assign real vendors IDs. */
302 id->vid = 0;
303 id->ssvid = 0;
304
305 memset(id->sn, ' ', sizeof(id->sn));
306 bin2hex(id->sn, &ctrl->subsys->serial,
307 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
308 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
309 memcpy_and_pad(id->fr, sizeof(id->fr),
310 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
311
312 id->rab = 6;
313
314 /*
315 * XXX: figure out how we can assign a IEEE OUI, but until then
316 * the safest is to leave it as zeroes.
317 */
318
319 /* we support multiple ports, multiples hosts and ANA: */
320 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
321
322 /* no limit on data transfer sizes for now */
323 id->mdts = 0;
324 id->cntlid = cpu_to_le16(ctrl->cntlid);
325 id->ver = cpu_to_le32(ctrl->subsys->ver);
326
327 /* XXX: figure out what to do about RTD3R/RTD3 */
328 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
329 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
330 NVME_CTRL_ATTR_TBKAS);
331
332 id->oacs = 0;
333
334 /*
335 * We don't really have a practical limit on the number of abort
336 * comands. But we don't do anything useful for abort either, so
337 * no point in allowing more abort commands than the spec requires.
338 */
339 id->acl = 3;
340
341 id->aerl = NVMET_ASYNC_EVENTS - 1;
342
343 /* first slot is read-only, only one slot supported */
344 id->frmw = (1 << 0) | (1 << 1);
345 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
346 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
347 id->npss = 0;
348
349 /* We support keep-alive timeout in granularity of seconds */
350 id->kas = cpu_to_le16(NVMET_KAS);
351
352 id->sqes = (0x6 << 4) | 0x6;
353 id->cqes = (0x4 << 4) | 0x4;
354
355 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
356 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
357
358 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
359 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
360 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
361 NVME_CTRL_ONCS_WRITE_ZEROES);
362
363 /* XXX: don't report vwc if the underlying device is write through */
364 id->vwc = NVME_CTRL_VWC_PRESENT;
365
366 /*
367 * We can't support atomic writes bigger than a LBA without support
368 * from the backend device.
369 */
370 id->awun = 0;
371 id->awupf = 0;
372
373 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
374 if (ctrl->ops->has_keyed_sgls)
375 id->sgls |= cpu_to_le32(1 << 2);
376 if (req->port->inline_data_size)
377 id->sgls |= cpu_to_le32(1 << 20);
378
379 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
380
381 /* Max command capsule size is sqe + single page of in-capsule data */
382 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
383 req->port->inline_data_size) / 16);
384 /* Max response capsule size is cqe */
385 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
386
387 id->msdbd = ctrl->ops->msdbd;
388
389 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
390 id->anatt = 10; /* random value */
391 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
392 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
393
394 /*
395 * Meh, we don't really support any power state. Fake up the same
396 * values that qemu does.
397 */
398 id->psd[0].max_power = cpu_to_le16(0x9c4);
399 id->psd[0].entry_lat = cpu_to_le32(0x10);
400 id->psd[0].exit_lat = cpu_to_le32(0x4);
401
402 id->nwpc = 1 << 0; /* write protect and no write protect */
403
404 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
405
406 kfree(id);
407 out:
408 nvmet_req_complete(req, status);
409 }
410
411 static void nvmet_execute_identify_ns(struct nvmet_req *req)
412 {
413 struct nvmet_ns *ns;
414 struct nvme_id_ns *id;
415 u16 status = 0;
416
417 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
418 req->error_loc = offsetof(struct nvme_identify, nsid);
419 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
420 goto out;
421 }
422
423 id = kzalloc(sizeof(*id), GFP_KERNEL);
424 if (!id) {
425 status = NVME_SC_INTERNAL;
426 goto out;
427 }
428
429 /* return an all zeroed buffer if we can't find an active namespace */
430 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
431 if (!ns)
432 goto done;
433
434 /*
435 * nuse = ncap = nsze isn't always true, but we have no way to find
436 * that out from the underlying device.
437 */
438 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
439 switch (req->port->ana_state[ns->anagrpid]) {
440 case NVME_ANA_INACCESSIBLE:
441 case NVME_ANA_PERSISTENT_LOSS:
442 break;
443 default:
444 id->nuse = id->nsze;
445 break;
446 }
447
448 /*
449 * We just provide a single LBA format that matches what the
450 * underlying device reports.
451 */
452 id->nlbaf = 0;
453 id->flbas = 0;
454
455 /*
456 * Our namespace might always be shared. Not just with other
457 * controllers, but also with any other user of the block device.
458 */
459 id->nmic = (1 << 0);
460 id->anagrpid = cpu_to_le32(ns->anagrpid);
461
462 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
463
464 id->lbaf[0].ds = ns->blksize_shift;
465
466 if (ns->readonly)
467 id->nsattr |= (1 << 0);
468 nvmet_put_namespace(ns);
469 done:
470 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
471 kfree(id);
472 out:
473 nvmet_req_complete(req, status);
474 }
475
476 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
477 {
478 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
479 struct nvmet_ctrl *ctrl = req->sq->ctrl;
480 struct nvmet_ns *ns;
481 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
482 __le32 *list;
483 u16 status = 0;
484 int i = 0;
485
486 list = kzalloc(buf_size, GFP_KERNEL);
487 if (!list) {
488 status = NVME_SC_INTERNAL;
489 goto out;
490 }
491
492 rcu_read_lock();
493 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
494 if (ns->nsid <= min_nsid)
495 continue;
496 list[i++] = cpu_to_le32(ns->nsid);
497 if (i == buf_size / sizeof(__le32))
498 break;
499 }
500 rcu_read_unlock();
501
502 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
503
504 kfree(list);
505 out:
506 nvmet_req_complete(req, status);
507 }
508
509 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
510 void *id, off_t *off)
511 {
512 struct nvme_ns_id_desc desc = {
513 .nidt = type,
514 .nidl = len,
515 };
516 u16 status;
517
518 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
519 if (status)
520 return status;
521 *off += sizeof(desc);
522
523 status = nvmet_copy_to_sgl(req, *off, id, len);
524 if (status)
525 return status;
526 *off += len;
527
528 return 0;
529 }
530
531 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
532 {
533 struct nvmet_ns *ns;
534 u16 status = 0;
535 off_t off = 0;
536
537 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
538 if (!ns) {
539 req->error_loc = offsetof(struct nvme_identify, nsid);
540 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
541 goto out;
542 }
543
544 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
545 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
546 NVME_NIDT_UUID_LEN,
547 &ns->uuid, &off);
548 if (status)
549 goto out_put_ns;
550 }
551 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
552 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
553 NVME_NIDT_NGUID_LEN,
554 &ns->nguid, &off);
555 if (status)
556 goto out_put_ns;
557 }
558
559 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
560 off) != NVME_IDENTIFY_DATA_SIZE - off)
561 status = NVME_SC_INTERNAL | NVME_SC_DNR;
562 out_put_ns:
563 nvmet_put_namespace(ns);
564 out:
565 nvmet_req_complete(req, status);
566 }
567
568 /*
569 * A "minimum viable" abort implementation: the command is mandatory in the
570 * spec, but we are not required to do any useful work. We couldn't really
571 * do a useful abort, so don't bother even with waiting for the command
572 * to be exectuted and return immediately telling the command to abort
573 * wasn't found.
574 */
575 static void nvmet_execute_abort(struct nvmet_req *req)
576 {
577 nvmet_set_result(req, 1);
578 nvmet_req_complete(req, 0);
579 }
580
581 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
582 {
583 u16 status;
584
585 if (req->ns->file)
586 status = nvmet_file_flush(req);
587 else
588 status = nvmet_bdev_flush(req);
589
590 if (status)
591 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
592 return status;
593 }
594
595 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
596 {
597 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
598 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
599 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
600
601 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
602 if (unlikely(!req->ns)) {
603 req->error_loc = offsetof(struct nvme_common_command, nsid);
604 return status;
605 }
606
607 mutex_lock(&subsys->lock);
608 switch (write_protect) {
609 case NVME_NS_WRITE_PROTECT:
610 req->ns->readonly = true;
611 status = nvmet_write_protect_flush_sync(req);
612 if (status)
613 req->ns->readonly = false;
614 break;
615 case NVME_NS_NO_WRITE_PROTECT:
616 req->ns->readonly = false;
617 status = 0;
618 break;
619 default:
620 break;
621 }
622
623 if (!status)
624 nvmet_ns_changed(subsys, req->ns->nsid);
625 mutex_unlock(&subsys->lock);
626 return status;
627 }
628
629 u16 nvmet_set_feat_kato(struct nvmet_req *req)
630 {
631 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
632
633 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
634
635 nvmet_set_result(req, req->sq->ctrl->kato);
636
637 return 0;
638 }
639
640 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
641 {
642 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
643
644 if (val32 & ~mask) {
645 req->error_loc = offsetof(struct nvme_common_command, cdw11);
646 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
647 }
648
649 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
650 nvmet_set_result(req, val32);
651
652 return 0;
653 }
654
655 static void nvmet_execute_set_features(struct nvmet_req *req)
656 {
657 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
658 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
659 u16 status = 0;
660
661 switch (cdw10 & 0xff) {
662 case NVME_FEAT_NUM_QUEUES:
663 nvmet_set_result(req,
664 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
665 break;
666 case NVME_FEAT_KATO:
667 status = nvmet_set_feat_kato(req);
668 break;
669 case NVME_FEAT_ASYNC_EVENT:
670 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
671 break;
672 case NVME_FEAT_HOST_ID:
673 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
674 break;
675 case NVME_FEAT_WRITE_PROTECT:
676 status = nvmet_set_feat_write_protect(req);
677 break;
678 default:
679 req->error_loc = offsetof(struct nvme_common_command, cdw10);
680 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
681 break;
682 }
683
684 nvmet_req_complete(req, status);
685 }
686
687 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
688 {
689 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
690 u32 result;
691
692 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
693 if (!req->ns) {
694 req->error_loc = offsetof(struct nvme_common_command, nsid);
695 return NVME_SC_INVALID_NS | NVME_SC_DNR;
696 }
697 mutex_lock(&subsys->lock);
698 if (req->ns->readonly == true)
699 result = NVME_NS_WRITE_PROTECT;
700 else
701 result = NVME_NS_NO_WRITE_PROTECT;
702 nvmet_set_result(req, result);
703 mutex_unlock(&subsys->lock);
704
705 return 0;
706 }
707
708 void nvmet_get_feat_kato(struct nvmet_req *req)
709 {
710 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
711 }
712
713 void nvmet_get_feat_async_event(struct nvmet_req *req)
714 {
715 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
716 }
717
718 static void nvmet_execute_get_features(struct nvmet_req *req)
719 {
720 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
721 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
722 u16 status = 0;
723
724 switch (cdw10 & 0xff) {
725 /*
726 * These features are mandatory in the spec, but we don't
727 * have a useful way to implement them. We'll eventually
728 * need to come up with some fake values for these.
729 */
730 #if 0
731 case NVME_FEAT_ARBITRATION:
732 break;
733 case NVME_FEAT_POWER_MGMT:
734 break;
735 case NVME_FEAT_TEMP_THRESH:
736 break;
737 case NVME_FEAT_ERR_RECOVERY:
738 break;
739 case NVME_FEAT_IRQ_COALESCE:
740 break;
741 case NVME_FEAT_IRQ_CONFIG:
742 break;
743 case NVME_FEAT_WRITE_ATOMIC:
744 break;
745 #endif
746 case NVME_FEAT_ASYNC_EVENT:
747 nvmet_get_feat_async_event(req);
748 break;
749 case NVME_FEAT_VOLATILE_WC:
750 nvmet_set_result(req, 1);
751 break;
752 case NVME_FEAT_NUM_QUEUES:
753 nvmet_set_result(req,
754 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
755 break;
756 case NVME_FEAT_KATO:
757 nvmet_get_feat_kato(req);
758 break;
759 case NVME_FEAT_HOST_ID:
760 /* need 128-bit host identifier flag */
761 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
762 req->error_loc =
763 offsetof(struct nvme_common_command, cdw11);
764 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
765 break;
766 }
767
768 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
769 sizeof(req->sq->ctrl->hostid));
770 break;
771 case NVME_FEAT_WRITE_PROTECT:
772 status = nvmet_get_feat_write_protect(req);
773 break;
774 default:
775 req->error_loc =
776 offsetof(struct nvme_common_command, cdw10);
777 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
778 break;
779 }
780
781 nvmet_req_complete(req, status);
782 }
783
784 void nvmet_execute_async_event(struct nvmet_req *req)
785 {
786 struct nvmet_ctrl *ctrl = req->sq->ctrl;
787
788 mutex_lock(&ctrl->lock);
789 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
790 mutex_unlock(&ctrl->lock);
791 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
792 return;
793 }
794 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
795 mutex_unlock(&ctrl->lock);
796
797 schedule_work(&ctrl->async_event_work);
798 }
799
800 void nvmet_execute_keep_alive(struct nvmet_req *req)
801 {
802 struct nvmet_ctrl *ctrl = req->sq->ctrl;
803
804 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
805 ctrl->cntlid, ctrl->kato);
806
807 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
808 nvmet_req_complete(req, 0);
809 }
810
811 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
812 {
813 struct nvme_command *cmd = req->cmd;
814 u16 ret;
815
816 ret = nvmet_check_ctrl_status(req, cmd);
817 if (unlikely(ret))
818 return ret;
819
820 switch (cmd->common.opcode) {
821 case nvme_admin_get_log_page:
822 req->data_len = nvmet_get_log_page_len(cmd);
823
824 switch (cmd->get_log_page.lid) {
825 case NVME_LOG_ERROR:
826 req->execute = nvmet_execute_get_log_page_error;
827 return 0;
828 case NVME_LOG_SMART:
829 req->execute = nvmet_execute_get_log_page_smart;
830 return 0;
831 case NVME_LOG_FW_SLOT:
832 /*
833 * We only support a single firmware slot which always
834 * is active, so we can zero out the whole firmware slot
835 * log and still claim to fully implement this mandatory
836 * log page.
837 */
838 req->execute = nvmet_execute_get_log_page_noop;
839 return 0;
840 case NVME_LOG_CHANGED_NS:
841 req->execute = nvmet_execute_get_log_changed_ns;
842 return 0;
843 case NVME_LOG_CMD_EFFECTS:
844 req->execute = nvmet_execute_get_log_cmd_effects_ns;
845 return 0;
846 case NVME_LOG_ANA:
847 req->execute = nvmet_execute_get_log_page_ana;
848 return 0;
849 }
850 break;
851 case nvme_admin_identify:
852 req->data_len = NVME_IDENTIFY_DATA_SIZE;
853 switch (cmd->identify.cns) {
854 case NVME_ID_CNS_NS:
855 req->execute = nvmet_execute_identify_ns;
856 return 0;
857 case NVME_ID_CNS_CTRL:
858 req->execute = nvmet_execute_identify_ctrl;
859 return 0;
860 case NVME_ID_CNS_NS_ACTIVE_LIST:
861 req->execute = nvmet_execute_identify_nslist;
862 return 0;
863 case NVME_ID_CNS_NS_DESC_LIST:
864 req->execute = nvmet_execute_identify_desclist;
865 return 0;
866 }
867 break;
868 case nvme_admin_abort_cmd:
869 req->execute = nvmet_execute_abort;
870 req->data_len = 0;
871 return 0;
872 case nvme_admin_set_features:
873 req->execute = nvmet_execute_set_features;
874 req->data_len = 0;
875 return 0;
876 case nvme_admin_get_features:
877 req->execute = nvmet_execute_get_features;
878 req->data_len = 0;
879 return 0;
880 case nvme_admin_async_event:
881 req->execute = nvmet_execute_async_event;
882 req->data_len = 0;
883 return 0;
884 case nvme_admin_keep_alive:
885 req->execute = nvmet_execute_keep_alive;
886 req->data_len = 0;
887 return 0;
888 }
889
890 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
891 req->sq->qid);
892 req->error_loc = offsetof(struct nvme_common_command, opcode);
893 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
894 }