]>
Commit | Line | Data |
---|---|---|
bc50ad75 | 1 | // SPDX-License-Identifier: GPL-2.0 |
32acab31 | 2 | /* |
0d0b660f | 3 | * Copyright (c) 2017-2018 Christoph Hellwig. |
32acab31 CH |
4 | */ |
5 | ||
6 | #include <linux/moduleparam.h> | |
2796b569 | 7 | #include <trace/events/block.h> |
32acab31 CH |
8 | #include "nvme.h" |
9 | ||
10 | static bool multipath = true; | |
5cadde80 | 11 | module_param(multipath, bool, 0444); |
32acab31 CH |
12 | MODULE_PARM_DESC(multipath, |
13 | "turn on native support for multiple controllers per subsystem"); | |
14 | ||
0d0b660f CH |
15 | inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) |
16 | { | |
8f220c41 | 17 | return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); |
0d0b660f CH |
18 | } |
19 | ||
a785dbcc KB |
20 | /* |
21 | * If multipathing is enabled we need to always use the subsystem instance | |
22 | * number for numbering our devices to avoid conflicts between subsystems that | |
23 | * have multiple controllers and thus use the multipath-aware subsystem node | |
24 | * and those that have a single controller and use the controller node | |
25 | * directly. | |
26 | */ | |
27 | void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, | |
28 | struct nvme_ctrl *ctrl, int *flags) | |
29 | { | |
30 | if (!multipath) { | |
31 | sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); | |
32 | } else if (ns->head->disk) { | |
33 | sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, | |
8a03b27e | 34 | ctrl->instance, ns->head->instance); |
a785dbcc KB |
35 | *flags = GENHD_FL_HIDDEN; |
36 | } else { | |
37 | sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, | |
38 | ns->head->instance); | |
39 | } | |
40 | } | |
41 | ||
32acab31 CH |
42 | void nvme_failover_req(struct request *req) |
43 | { | |
44 | struct nvme_ns *ns = req->q->queuedata; | |
0d0b660f | 45 | u16 status = nvme_req(req)->status; |
32acab31 CH |
46 | unsigned long flags; |
47 | ||
48 | spin_lock_irqsave(&ns->head->requeue_lock, flags); | |
49 | blk_steal_bios(&ns->head->requeue_list, req); | |
50 | spin_unlock_irqrestore(&ns->head->requeue_lock, flags); | |
51 | blk_mq_end_request(req, 0); | |
52 | ||
0d0b660f CH |
53 | switch (status & 0x7ff) { |
54 | case NVME_SC_ANA_TRANSITION: | |
55 | case NVME_SC_ANA_INACCESSIBLE: | |
56 | case NVME_SC_ANA_PERSISTENT_LOSS: | |
57 | /* | |
58 | * If we got back an ANA error we know the controller is alive, | |
59 | * but not ready to serve this namespaces. The spec suggests | |
60 | * we should update our general state here, but due to the fact | |
61 | * that the admin and I/O queues are not serialized that is | |
62 | * fundamentally racy. So instead just clear the current path, | |
63 | * mark the the path as pending and kick of a re-read of the ANA | |
64 | * log page ASAP. | |
65 | */ | |
66 | nvme_mpath_clear_current_path(ns); | |
67 | if (ns->ctrl->ana_log_buf) { | |
68 | set_bit(NVME_NS_ANA_PENDING, &ns->flags); | |
69 | queue_work(nvme_wq, &ns->ctrl->ana_work); | |
70 | } | |
71 | break; | |
783f4a44 JS |
72 | case NVME_SC_HOST_PATH_ERROR: |
73 | /* | |
74 | * Temporary transport disruption in talking to the controller. | |
75 | * Try to send on a new path. | |
76 | */ | |
77 | nvme_mpath_clear_current_path(ns); | |
78 | break; | |
0d0b660f CH |
79 | default: |
80 | /* | |
81 | * Reset the controller for any non-ANA error as we don't know | |
82 | * what caused the error. | |
83 | */ | |
84 | nvme_reset_ctrl(ns->ctrl); | |
85 | break; | |
86 | } | |
87 | ||
32acab31 CH |
88 | kblockd_schedule_work(&ns->head->requeue_work); |
89 | } | |
90 | ||
32acab31 CH |
91 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) |
92 | { | |
93 | struct nvme_ns *ns; | |
94 | ||
765cc031 | 95 | down_read(&ctrl->namespaces_rwsem); |
32acab31 CH |
96 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
97 | if (ns->head->disk) | |
98 | kblockd_schedule_work(&ns->head->requeue_work); | |
99 | } | |
765cc031 | 100 | up_read(&ctrl->namespaces_rwsem); |
32acab31 CH |
101 | } |
102 | ||
0d0b660f CH |
103 | static const char *nvme_ana_state_names[] = { |
104 | [0] = "invalid state", | |
105 | [NVME_ANA_OPTIMIZED] = "optimized", | |
106 | [NVME_ANA_NONOPTIMIZED] = "non-optimized", | |
107 | [NVME_ANA_INACCESSIBLE] = "inaccessible", | |
108 | [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss", | |
109 | [NVME_ANA_CHANGE] = "change", | |
110 | }; | |
111 | ||
f3334447 | 112 | void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
32acab31 | 113 | { |
f3334447 CH |
114 | struct nvme_ns_head *head = ns->head; |
115 | int node; | |
116 | ||
117 | if (!head) | |
118 | return; | |
119 | ||
120 | for_each_node(node) { | |
121 | if (ns == rcu_access_pointer(head->current_path[node])) | |
122 | rcu_assign_pointer(head->current_path[node], NULL); | |
123 | } | |
124 | } | |
125 | ||
126 | static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) | |
127 | { | |
128 | int found_distance = INT_MAX, fallback_distance = INT_MAX, distance; | |
129 | struct nvme_ns *found = NULL, *fallback = NULL, *ns; | |
32acab31 CH |
130 | |
131 | list_for_each_entry_rcu(ns, &head->list, siblings) { | |
0d0b660f CH |
132 | if (ns->ctrl->state != NVME_CTRL_LIVE || |
133 | test_bit(NVME_NS_ANA_PENDING, &ns->flags)) | |
134 | continue; | |
f3334447 | 135 | |
75c10e73 HR |
136 | if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) |
137 | distance = node_distance(node, ns->ctrl->numa_node); | |
138 | else | |
139 | distance = LOCAL_DISTANCE; | |
f3334447 | 140 | |
0d0b660f CH |
141 | switch (ns->ana_state) { |
142 | case NVME_ANA_OPTIMIZED: | |
f3334447 CH |
143 | if (distance < found_distance) { |
144 | found_distance = distance; | |
145 | found = ns; | |
146 | } | |
147 | break; | |
0d0b660f | 148 | case NVME_ANA_NONOPTIMIZED: |
f3334447 CH |
149 | if (distance < fallback_distance) { |
150 | fallback_distance = distance; | |
151 | fallback = ns; | |
152 | } | |
0d0b660f CH |
153 | break; |
154 | default: | |
155 | break; | |
32acab31 CH |
156 | } |
157 | } | |
158 | ||
f3334447 CH |
159 | if (!found) |
160 | found = fallback; | |
161 | if (found) | |
162 | rcu_assign_pointer(head->current_path[node], found); | |
163 | return found; | |
0d0b660f CH |
164 | } |
165 | ||
75c10e73 HR |
166 | static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, |
167 | struct nvme_ns *ns) | |
168 | { | |
169 | ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, | |
170 | siblings); | |
171 | if (ns) | |
172 | return ns; | |
173 | return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); | |
174 | } | |
175 | ||
176 | static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, | |
177 | int node, struct nvme_ns *old) | |
178 | { | |
179 | struct nvme_ns *ns, *found, *fallback = NULL; | |
180 | ||
181 | if (list_is_singular(&head->list)) | |
182 | return old; | |
183 | ||
184 | for (ns = nvme_next_ns(head, old); | |
185 | ns != old; | |
186 | ns = nvme_next_ns(head, ns)) { | |
187 | if (ns->ctrl->state != NVME_CTRL_LIVE || | |
188 | test_bit(NVME_NS_ANA_PENDING, &ns->flags)) | |
189 | continue; | |
190 | ||
191 | if (ns->ana_state == NVME_ANA_OPTIMIZED) { | |
192 | found = ns; | |
193 | goto out; | |
194 | } | |
195 | if (ns->ana_state == NVME_ANA_NONOPTIMIZED) | |
196 | fallback = ns; | |
197 | } | |
198 | ||
199 | if (!fallback) | |
200 | return NULL; | |
201 | found = fallback; | |
202 | out: | |
203 | rcu_assign_pointer(head->current_path[node], found); | |
204 | return found; | |
205 | } | |
206 | ||
0d0b660f CH |
207 | static inline bool nvme_path_is_optimized(struct nvme_ns *ns) |
208 | { | |
209 | return ns->ctrl->state == NVME_CTRL_LIVE && | |
210 | ns->ana_state == NVME_ANA_OPTIMIZED; | |
32acab31 CH |
211 | } |
212 | ||
213 | inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) | |
214 | { | |
f3334447 CH |
215 | int node = numa_node_id(); |
216 | struct nvme_ns *ns; | |
32acab31 | 217 | |
f3334447 | 218 | ns = srcu_dereference(head->current_path[node], &head->srcu); |
75c10e73 HR |
219 | if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns) |
220 | ns = nvme_round_robin_path(head, node, ns); | |
0d0b660f | 221 | if (unlikely(!ns || !nvme_path_is_optimized(ns))) |
f3334447 | 222 | ns = __nvme_find_path(head, node); |
32acab31 CH |
223 | return ns; |
224 | } | |
225 | ||
226 | static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, | |
227 | struct bio *bio) | |
228 | { | |
229 | struct nvme_ns_head *head = q->queuedata; | |
230 | struct device *dev = disk_to_dev(head->disk); | |
231 | struct nvme_ns *ns; | |
232 | blk_qc_t ret = BLK_QC_T_NONE; | |
233 | int srcu_idx; | |
234 | ||
525aa5a7 HR |
235 | /* |
236 | * The namespace might be going away and the bio might | |
237 | * be moved to a different queue via blk_steal_bios(), | |
238 | * so we need to use the bio_split pool from the original | |
239 | * queue to allocate the bvecs from. | |
240 | */ | |
241 | blk_queue_split(q, &bio); | |
242 | ||
32acab31 CH |
243 | srcu_idx = srcu_read_lock(&head->srcu); |
244 | ns = nvme_find_path(head); | |
245 | if (likely(ns)) { | |
246 | bio->bi_disk = ns->disk; | |
247 | bio->bi_opf |= REQ_NVME_MPATH; | |
2796b569 HR |
248 | trace_block_bio_remap(bio->bi_disk->queue, bio, |
249 | disk_devt(ns->head->disk), | |
250 | bio->bi_iter.bi_sector); | |
32acab31 CH |
251 | ret = direct_make_request(bio); |
252 | } else if (!list_empty_careful(&head->list)) { | |
89c4aff6 | 253 | dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); |
32acab31 CH |
254 | |
255 | spin_lock_irq(&head->requeue_lock); | |
256 | bio_list_add(&head->requeue_list, bio); | |
257 | spin_unlock_irq(&head->requeue_lock); | |
258 | } else { | |
259 | dev_warn_ratelimited(dev, "no path - failing I/O\n"); | |
260 | ||
261 | bio->bi_status = BLK_STS_IOERR; | |
262 | bio_endio(bio); | |
263 | } | |
264 | ||
265 | srcu_read_unlock(&head->srcu, srcu_idx); | |
266 | return ret; | |
267 | } | |
268 | ||
32acab31 CH |
269 | static void nvme_requeue_work(struct work_struct *work) |
270 | { | |
271 | struct nvme_ns_head *head = | |
272 | container_of(work, struct nvme_ns_head, requeue_work); | |
273 | struct bio *bio, *next; | |
274 | ||
275 | spin_lock_irq(&head->requeue_lock); | |
276 | next = bio_list_get(&head->requeue_list); | |
277 | spin_unlock_irq(&head->requeue_lock); | |
278 | ||
279 | while ((bio = next) != NULL) { | |
280 | next = bio->bi_next; | |
281 | bio->bi_next = NULL; | |
282 | ||
283 | /* | |
284 | * Reset disk to the mpath node and resubmit to select a new | |
285 | * path. | |
286 | */ | |
287 | bio->bi_disk = head->disk; | |
288 | generic_make_request(bio); | |
289 | } | |
290 | } | |
291 | ||
292 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) | |
293 | { | |
294 | struct request_queue *q; | |
295 | bool vwc = false; | |
296 | ||
0d0b660f | 297 | mutex_init(&head->lock); |
32acab31 CH |
298 | bio_list_init(&head->requeue_list); |
299 | spin_lock_init(&head->requeue_lock); | |
300 | INIT_WORK(&head->requeue_work, nvme_requeue_work); | |
301 | ||
302 | /* | |
303 | * Add a multipath node if the subsystems supports multiple controllers. | |
304 | * We also do this for private namespaces as the namespace sharing data could | |
305 | * change after a rescan. | |
306 | */ | |
307 | if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) | |
308 | return 0; | |
309 | ||
103e515e | 310 | q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node); |
32acab31 CH |
311 | if (!q) |
312 | goto out; | |
313 | q->queuedata = head; | |
314 | blk_queue_make_request(q, nvme_ns_head_make_request); | |
8b904b5b | 315 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
32acab31 CH |
316 | /* set to a default value for 512 until disk is validated */ |
317 | blk_queue_logical_block_size(q, 512); | |
8f676b85 | 318 | blk_set_stacking_limits(&q->limits); |
32acab31 CH |
319 | |
320 | /* we need to propagate up the VMC settings */ | |
321 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) | |
322 | vwc = true; | |
323 | blk_queue_write_cache(q, vwc, vwc); | |
324 | ||
325 | head->disk = alloc_disk(0); | |
326 | if (!head->disk) | |
327 | goto out_cleanup_queue; | |
328 | head->disk->fops = &nvme_ns_head_ops; | |
329 | head->disk->private_data = head; | |
330 | head->disk->queue = q; | |
331 | head->disk->flags = GENHD_FL_EXT_DEVT; | |
332 | sprintf(head->disk->disk_name, "nvme%dn%d", | |
333 | ctrl->subsys->instance, head->instance); | |
334 | return 0; | |
335 | ||
336 | out_cleanup_queue: | |
337 | blk_cleanup_queue(q); | |
338 | out: | |
339 | return -ENOMEM; | |
340 | } | |
341 | ||
0d0b660f | 342 | static void nvme_mpath_set_live(struct nvme_ns *ns) |
32acab31 | 343 | { |
0d0b660f CH |
344 | struct nvme_ns_head *head = ns->head; |
345 | ||
346 | lockdep_assert_held(&ns->head->lock); | |
347 | ||
32acab31 CH |
348 | if (!head->disk) |
349 | return; | |
9bd82b1a | 350 | |
33b14f67 HR |
351 | if (!(head->disk->flags & GENHD_FL_UP)) |
352 | device_add_disk(&head->subsys->dev, head->disk, | |
353 | nvme_ns_id_attr_groups); | |
0d0b660f | 354 | |
886fabf6 KB |
355 | if (nvme_path_is_optimized(ns)) { |
356 | int node, srcu_idx; | |
357 | ||
358 | srcu_idx = srcu_read_lock(&head->srcu); | |
359 | for_each_node(node) | |
360 | __nvme_find_path(head, node); | |
361 | srcu_read_unlock(&head->srcu, srcu_idx); | |
362 | } | |
363 | ||
0d0b660f CH |
364 | kblockd_schedule_work(&ns->head->requeue_work); |
365 | } | |
366 | ||
367 | static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, | |
368 | int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *, | |
369 | void *)) | |
370 | { | |
371 | void *base = ctrl->ana_log_buf; | |
372 | size_t offset = sizeof(struct nvme_ana_rsp_hdr); | |
373 | int error, i; | |
374 | ||
375 | lockdep_assert_held(&ctrl->ana_lock); | |
376 | ||
377 | for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { | |
378 | struct nvme_ana_group_desc *desc = base + offset; | |
379 | u32 nr_nsids = le32_to_cpu(desc->nnsids); | |
380 | size_t nsid_buf_size = nr_nsids * sizeof(__le32); | |
381 | ||
382 | if (WARN_ON_ONCE(desc->grpid == 0)) | |
383 | return -EINVAL; | |
384 | if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax)) | |
385 | return -EINVAL; | |
386 | if (WARN_ON_ONCE(desc->state == 0)) | |
387 | return -EINVAL; | |
388 | if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE)) | |
389 | return -EINVAL; | |
390 | ||
391 | offset += sizeof(*desc); | |
392 | if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size)) | |
393 | return -EINVAL; | |
394 | ||
395 | error = cb(ctrl, desc, data); | |
396 | if (error) | |
397 | return error; | |
398 | ||
399 | offset += nsid_buf_size; | |
400 | if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) | |
401 | return -EINVAL; | |
402 | } | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
407 | static inline bool nvme_state_is_live(enum nvme_ana_state state) | |
408 | { | |
409 | return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED; | |
410 | } | |
411 | ||
412 | static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, | |
413 | struct nvme_ns *ns) | |
414 | { | |
0d0b660f | 415 | mutex_lock(&ns->head->lock); |
0d0b660f CH |
416 | ns->ana_grpid = le32_to_cpu(desc->grpid); |
417 | ns->ana_state = desc->state; | |
418 | clear_bit(NVME_NS_ANA_PENDING, &ns->flags); | |
419 | ||
cc2278c4 | 420 | if (nvme_state_is_live(ns->ana_state)) |
0d0b660f CH |
421 | nvme_mpath_set_live(ns); |
422 | mutex_unlock(&ns->head->lock); | |
423 | } | |
424 | ||
425 | static int nvme_update_ana_state(struct nvme_ctrl *ctrl, | |
426 | struct nvme_ana_group_desc *desc, void *data) | |
427 | { | |
428 | u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; | |
429 | unsigned *nr_change_groups = data; | |
430 | struct nvme_ns *ns; | |
431 | ||
592b6e7b | 432 | dev_dbg(ctrl->device, "ANA group %d: %s.\n", |
0d0b660f CH |
433 | le32_to_cpu(desc->grpid), |
434 | nvme_ana_state_names[desc->state]); | |
435 | ||
436 | if (desc->state == NVME_ANA_CHANGE) | |
437 | (*nr_change_groups)++; | |
438 | ||
439 | if (!nr_nsids) | |
440 | return 0; | |
441 | ||
442 | down_write(&ctrl->namespaces_rwsem); | |
443 | list_for_each_entry(ns, &ctrl->namespaces, list) { | |
444 | if (ns->head->ns_id != le32_to_cpu(desc->nsids[n])) | |
445 | continue; | |
446 | nvme_update_ns_ana_state(desc, ns); | |
447 | if (++n == nr_nsids) | |
448 | break; | |
449 | } | |
450 | up_write(&ctrl->namespaces_rwsem); | |
451 | WARN_ON_ONCE(n < nr_nsids); | |
452 | return 0; | |
453 | } | |
454 | ||
455 | static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only) | |
456 | { | |
457 | u32 nr_change_groups = 0; | |
458 | int error; | |
459 | ||
460 | mutex_lock(&ctrl->ana_lock); | |
461 | error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, | |
462 | groups_only ? NVME_ANA_LOG_RGO : 0, | |
463 | ctrl->ana_log_buf, ctrl->ana_log_size, 0); | |
464 | if (error) { | |
465 | dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); | |
466 | goto out_unlock; | |
467 | } | |
468 | ||
469 | error = nvme_parse_ana_log(ctrl, &nr_change_groups, | |
470 | nvme_update_ana_state); | |
471 | if (error) | |
472 | goto out_unlock; | |
473 | ||
474 | /* | |
475 | * In theory we should have an ANATT timer per group as they might enter | |
476 | * the change state at different times. But that is a lot of overhead | |
477 | * just to protect against a target that keeps entering new changes | |
478 | * states while never finishing previous ones. But we'll still | |
479 | * eventually time out once all groups are in change state, so this | |
480 | * isn't a big deal. | |
481 | * | |
482 | * We also double the ANATT value to provide some slack for transports | |
483 | * or AEN processing overhead. | |
484 | */ | |
485 | if (nr_change_groups) | |
486 | mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); | |
487 | else | |
488 | del_timer_sync(&ctrl->anatt_timer); | |
489 | out_unlock: | |
490 | mutex_unlock(&ctrl->ana_lock); | |
491 | return error; | |
492 | } | |
493 | ||
494 | static void nvme_ana_work(struct work_struct *work) | |
495 | { | |
496 | struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); | |
497 | ||
498 | nvme_read_ana_log(ctrl, false); | |
499 | } | |
500 | ||
501 | static void nvme_anatt_timeout(struct timer_list *t) | |
502 | { | |
503 | struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); | |
504 | ||
505 | dev_info(ctrl->device, "ANATT timeout, resetting controller.\n"); | |
506 | nvme_reset_ctrl(ctrl); | |
507 | } | |
508 | ||
509 | void nvme_mpath_stop(struct nvme_ctrl *ctrl) | |
510 | { | |
511 | if (!nvme_ctrl_use_ana(ctrl)) | |
512 | return; | |
513 | del_timer_sync(&ctrl->anatt_timer); | |
514 | cancel_work_sync(&ctrl->ana_work); | |
515 | } | |
516 | ||
75c10e73 HR |
517 | #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \ |
518 | struct device_attribute subsys_attr_##_name = \ | |
519 | __ATTR(_name, _mode, _show, _store) | |
520 | ||
521 | static const char *nvme_iopolicy_names[] = { | |
522 | [NVME_IOPOLICY_NUMA] = "numa", | |
523 | [NVME_IOPOLICY_RR] = "round-robin", | |
524 | }; | |
525 | ||
526 | static ssize_t nvme_subsys_iopolicy_show(struct device *dev, | |
527 | struct device_attribute *attr, char *buf) | |
528 | { | |
529 | struct nvme_subsystem *subsys = | |
530 | container_of(dev, struct nvme_subsystem, dev); | |
531 | ||
532 | return sprintf(buf, "%s\n", | |
533 | nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); | |
534 | } | |
535 | ||
536 | static ssize_t nvme_subsys_iopolicy_store(struct device *dev, | |
537 | struct device_attribute *attr, const char *buf, size_t count) | |
538 | { | |
539 | struct nvme_subsystem *subsys = | |
540 | container_of(dev, struct nvme_subsystem, dev); | |
541 | int i; | |
542 | ||
543 | for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { | |
544 | if (sysfs_streq(buf, nvme_iopolicy_names[i])) { | |
545 | WRITE_ONCE(subsys->iopolicy, i); | |
546 | return count; | |
547 | } | |
548 | } | |
549 | ||
550 | return -EINVAL; | |
551 | } | |
552 | SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR, | |
553 | nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store); | |
554 | ||
0d0b660f CH |
555 | static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, |
556 | char *buf) | |
557 | { | |
558 | return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); | |
559 | } | |
560 | DEVICE_ATTR_RO(ana_grpid); | |
561 | ||
562 | static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, | |
563 | char *buf) | |
564 | { | |
565 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); | |
566 | ||
567 | return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); | |
568 | } | |
569 | DEVICE_ATTR_RO(ana_state); | |
570 | ||
571 | static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, | |
572 | struct nvme_ana_group_desc *desc, void *data) | |
573 | { | |
574 | struct nvme_ns *ns = data; | |
575 | ||
576 | if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { | |
577 | nvme_update_ns_ana_state(desc, ns); | |
578 | return -ENXIO; /* just break out of the loop */ | |
579 | } | |
580 | ||
581 | return 0; | |
582 | } | |
583 | ||
584 | void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) | |
585 | { | |
586 | if (nvme_ctrl_use_ana(ns->ctrl)) { | |
587 | mutex_lock(&ns->ctrl->ana_lock); | |
588 | ns->ana_grpid = le32_to_cpu(id->anagrpid); | |
589 | nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); | |
590 | mutex_unlock(&ns->ctrl->ana_lock); | |
591 | } else { | |
592 | mutex_lock(&ns->head->lock); | |
593 | ns->ana_state = NVME_ANA_OPTIMIZED; | |
594 | nvme_mpath_set_live(ns); | |
595 | mutex_unlock(&ns->head->lock); | |
9bd82b1a | 596 | } |
32acab31 CH |
597 | } |
598 | ||
599 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) | |
600 | { | |
601 | if (!head->disk) | |
602 | return; | |
33b14f67 | 603 | if (head->disk->flags & GENHD_FL_UP) |
0d0b660f | 604 | del_gendisk(head->disk); |
32acab31 CH |
605 | blk_set_queue_dying(head->disk->queue); |
606 | /* make sure all pending bios are cleaned up */ | |
607 | kblockd_schedule_work(&head->requeue_work); | |
608 | flush_work(&head->requeue_work); | |
609 | blk_cleanup_queue(head->disk->queue); | |
610 | put_disk(head->disk); | |
611 | } | |
0d0b660f CH |
612 | |
613 | int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |
614 | { | |
615 | int error; | |
616 | ||
617 | if (!nvme_ctrl_use_ana(ctrl)) | |
618 | return 0; | |
619 | ||
620 | ctrl->anacap = id->anacap; | |
621 | ctrl->anatt = id->anatt; | |
622 | ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); | |
623 | ctrl->anagrpmax = le32_to_cpu(id->anagrpmax); | |
624 | ||
625 | mutex_init(&ctrl->ana_lock); | |
626 | timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); | |
627 | ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + | |
628 | ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); | |
78a61cd4 | 629 | ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); |
0d0b660f CH |
630 | |
631 | if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { | |
632 | dev_err(ctrl->device, | |
633 | "ANA log page size (%zd) larger than MDTS (%d).\n", | |
634 | ctrl->ana_log_size, | |
635 | ctrl->max_hw_sectors << SECTOR_SHIFT); | |
636 | dev_err(ctrl->device, "disabling ANA support.\n"); | |
637 | return 0; | |
638 | } | |
639 | ||
640 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); | |
641 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); | |
bb830add SD |
642 | if (!ctrl->ana_log_buf) { |
643 | error = -ENOMEM; | |
0d0b660f | 644 | goto out; |
bb830add | 645 | } |
0d0b660f CH |
646 | |
647 | error = nvme_read_ana_log(ctrl, true); | |
648 | if (error) | |
649 | goto out_free_ana_log_buf; | |
650 | return 0; | |
651 | out_free_ana_log_buf: | |
652 | kfree(ctrl->ana_log_buf); | |
c7055fd1 | 653 | ctrl->ana_log_buf = NULL; |
0d0b660f | 654 | out: |
bb830add | 655 | return error; |
0d0b660f CH |
656 | } |
657 | ||
658 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) | |
659 | { | |
660 | kfree(ctrl->ana_log_buf); | |
c7055fd1 | 661 | ctrl->ana_log_buf = NULL; |
0d0b660f CH |
662 | } |
663 |