]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/cxlflash/superpipe.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / cxlflash / superpipe.c
1 /*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/delay.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
18 #include <misc/cxl.h>
19 #include <asm/unaligned.h>
20
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
25 #include <uapi/scsi/cxlflash_ioctl.h>
26
27 #include "sislite.h"
28 #include "common.h"
29 #include "vlun.h"
30 #include "superpipe.h"
31
32 struct cxlflash_global global;
33
34 /**
35 * marshal_rele_to_resize() - translate release to resize structure
36 * @rele: Source structure from which to translate/copy.
37 * @resize: Destination structure for the translate/copy.
38 */
39 static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
40 struct dk_cxlflash_resize *resize)
41 {
42 resize->hdr = release->hdr;
43 resize->context_id = release->context_id;
44 resize->rsrc_handle = release->rsrc_handle;
45 }
46
47 /**
48 * marshal_det_to_rele() - translate detach to release structure
49 * @detach: Destination structure for the translate/copy.
50 * @rele: Source structure from which to translate/copy.
51 */
52 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
53 struct dk_cxlflash_release *release)
54 {
55 release->hdr = detach->hdr;
56 release->context_id = detach->context_id;
57 }
58
59 /**
60 * marshal_udir_to_rele() - translate udirect to release structure
61 * @udirect: Source structure from which to translate/copy.
62 * @release: Destination structure for the translate/copy.
63 */
64 static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
65 struct dk_cxlflash_release *release)
66 {
67 release->hdr = udirect->hdr;
68 release->context_id = udirect->context_id;
69 release->rsrc_handle = udirect->rsrc_handle;
70 }
71
72 /**
73 * cxlflash_free_errpage() - frees resources associated with global error page
74 */
75 void cxlflash_free_errpage(void)
76 {
77
78 mutex_lock(&global.mutex);
79 if (global.err_page) {
80 __free_page(global.err_page);
81 global.err_page = NULL;
82 }
83 mutex_unlock(&global.mutex);
84 }
85
86 /**
87 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
88 * @cfg: Internal structure associated with the host.
89 *
90 * When the host needs to go down, all users must be quiesced and their
91 * memory freed. This is accomplished by putting the contexts in error
92 * state which will notify the user and let them 'drive' the tear down.
93 * Meanwhile, this routine camps until all user contexts have been removed.
94 *
95 * Note that the main loop in this routine will always execute at least once
96 * to flush the reset_waitq.
97 */
98 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
99 {
100 struct device *dev = &cfg->dev->dev;
101 int i, found = true;
102
103 cxlflash_mark_contexts_error(cfg);
104
105 while (true) {
106 for (i = 0; i < MAX_CONTEXT; i++)
107 if (cfg->ctx_tbl[i]) {
108 found = true;
109 break;
110 }
111
112 if (!found && list_empty(&cfg->ctx_err_recovery))
113 return;
114
115 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
116 __func__);
117 wake_up_all(&cfg->reset_waitq);
118 ssleep(1);
119 found = false;
120 }
121 }
122
123 /**
124 * find_error_context() - locates a context by cookie on the error recovery list
125 * @cfg: Internal structure associated with the host.
126 * @rctxid: Desired context by id.
127 * @file: Desired context by file.
128 *
129 * Return: Found context on success, NULL on failure
130 */
131 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
132 struct file *file)
133 {
134 struct ctx_info *ctxi;
135
136 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
137 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
138 return ctxi;
139
140 return NULL;
141 }
142
143 /**
144 * get_context() - obtains a validated and locked context reference
145 * @cfg: Internal structure associated with the host.
146 * @rctxid: Desired context (raw, un-decoded format).
147 * @arg: LUN information or file associated with request.
148 * @ctx_ctrl: Control information to 'steer' desired lookup.
149 *
150 * NOTE: despite the name pid, in linux, current->pid actually refers
151 * to the lightweight process id (tid) and can change if the process is
152 * multi threaded. The tgid remains constant for the process and only changes
153 * when the process of fork. For all intents and purposes, think of tgid
154 * as a pid in the traditional sense.
155 *
156 * Return: Validated context on success, NULL on failure
157 */
158 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
159 void *arg, enum ctx_ctrl ctx_ctrl)
160 {
161 struct device *dev = &cfg->dev->dev;
162 struct ctx_info *ctxi = NULL;
163 struct lun_access *lun_access = NULL;
164 struct file *file = NULL;
165 struct llun_info *lli = arg;
166 u64 ctxid = DECODE_CTXID(rctxid);
167 int rc;
168 pid_t pid = task_tgid_nr(current), ctxpid = 0;
169
170 if (ctx_ctrl & CTX_CTRL_FILE) {
171 lli = NULL;
172 file = (struct file *)arg;
173 }
174
175 if (ctx_ctrl & CTX_CTRL_CLONE)
176 pid = task_ppid_nr(current);
177
178 if (likely(ctxid < MAX_CONTEXT)) {
179 while (true) {
180 mutex_lock(&cfg->ctx_tbl_list_mutex);
181 ctxi = cfg->ctx_tbl[ctxid];
182 if (ctxi)
183 if ((file && (ctxi->file != file)) ||
184 (!file && (ctxi->ctxid != rctxid)))
185 ctxi = NULL;
186
187 if ((ctx_ctrl & CTX_CTRL_ERR) ||
188 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
189 ctxi = find_error_context(cfg, rctxid, file);
190 if (!ctxi) {
191 mutex_unlock(&cfg->ctx_tbl_list_mutex);
192 goto out;
193 }
194
195 /*
196 * Need to acquire ownership of the context while still
197 * under the table/list lock to serialize with a remove
198 * thread. Use the 'try' to avoid stalling the
199 * table/list lock for a single context.
200 *
201 * Note that the lock order is:
202 *
203 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
204 *
205 * Therefore release ctx_tbl_list_mutex before retrying.
206 */
207 rc = mutex_trylock(&ctxi->mutex);
208 mutex_unlock(&cfg->ctx_tbl_list_mutex);
209 if (rc)
210 break; /* got the context's lock! */
211 }
212
213 if (ctxi->unavail)
214 goto denied;
215
216 ctxpid = ctxi->pid;
217 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
218 if (pid != ctxpid)
219 goto denied;
220
221 if (lli) {
222 list_for_each_entry(lun_access, &ctxi->luns, list)
223 if (lun_access->lli == lli)
224 goto out;
225 goto denied;
226 }
227 }
228
229 out:
230 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
231 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
232 ctx_ctrl);
233
234 return ctxi;
235
236 denied:
237 mutex_unlock(&ctxi->mutex);
238 ctxi = NULL;
239 goto out;
240 }
241
242 /**
243 * put_context() - release a context that was retrieved from get_context()
244 * @ctxi: Context to release.
245 *
246 * For now, releasing the context equates to unlocking it's mutex.
247 */
248 void put_context(struct ctx_info *ctxi)
249 {
250 mutex_unlock(&ctxi->mutex);
251 }
252
253 /**
254 * afu_attach() - attach a context to the AFU
255 * @cfg: Internal structure associated with the host.
256 * @ctxi: Context to attach.
257 *
258 * Upon setting the context capabilities, they must be confirmed with
259 * a read back operation as the context might have been closed since
260 * the mailbox was unlocked. When this occurs, registration is failed.
261 *
262 * Return: 0 on success, -errno on failure
263 */
264 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
265 {
266 struct device *dev = &cfg->dev->dev;
267 struct afu *afu = cfg->afu;
268 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
269 int rc = 0;
270 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
271 u64 val;
272
273 /* Unlock cap and restrict user to read/write cmds in translated mode */
274 readq_be(&ctrl_map->mbox_r);
275 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
276 writeq_be(val, &ctrl_map->ctx_cap);
277 val = readq_be(&ctrl_map->ctx_cap);
278 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
279 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
280 __func__, val);
281 rc = -EAGAIN;
282 goto out;
283 }
284
285 /* Set up MMIO registers pointing to the RHT */
286 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
287 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
288 writeq_be(val, &ctrl_map->rht_cnt_id);
289 out:
290 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
291 return rc;
292 }
293
294 /**
295 * read_cap16() - issues a SCSI READ_CAP16 command
296 * @sdev: SCSI device associated with LUN.
297 * @lli: LUN destined for capacity request.
298 *
299 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
300 * in scsi_execute(), the EEH handler will attempt to recover. As part of the
301 * recovery, the handler drains all currently running ioctls, waiting until they
302 * have completed before proceeding with a reset. As this routine is used on the
303 * ioctl path, this can create a condition where the EEH handler becomes stuck,
304 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
305 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
306 * This will allow the EEH handler to proceed with a recovery while this thread
307 * is still running. Once the scsi_execute() returns, reacquire the ioctl read
308 * semaphore and check the adapter state in case it changed while inside of
309 * scsi_execute(). The state check will wait if the adapter is still being
310 * recovered or return a failure if the recovery failed. In the event that the
311 * adapter reset failed, simply return the failure as the ioctl would be unable
312 * to continue.
313 *
314 * Note that the above puts a requirement on this routine to only be called on
315 * an ioctl thread.
316 *
317 * Return: 0 on success, -errno on failure
318 */
319 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
320 {
321 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
322 struct device *dev = &cfg->dev->dev;
323 struct glun_info *gli = lli->parent;
324 struct scsi_sense_hdr sshdr;
325 u8 *cmd_buf = NULL;
326 u8 *scsi_cmd = NULL;
327 u8 *sense_buf = NULL;
328 int rc = 0;
329 int result = 0;
330 int retry_cnt = 0;
331 u32 to = CMD_TIMEOUT * HZ;
332
333 retry:
334 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
335 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
336 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
337 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
338 rc = -ENOMEM;
339 goto out;
340 }
341
342 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
343 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
344 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
345
346 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
347 retry_cnt ? "re" : "", scsi_cmd[0]);
348
349 /* Drop the ioctl read semahpore across lengthy call */
350 up_read(&cfg->ioctl_rwsem);
351 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
352 CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES,
353 0, 0, NULL);
354 down_read(&cfg->ioctl_rwsem);
355 rc = check_state(cfg);
356 if (rc) {
357 dev_err(dev, "%s: Failed state result=%08x\n",
358 __func__, result);
359 rc = -ENODEV;
360 goto out;
361 }
362
363 if (driver_byte(result) == DRIVER_SENSE) {
364 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
365 if (result & SAM_STAT_CHECK_CONDITION) {
366 switch (sshdr.sense_key) {
367 case NO_SENSE:
368 case RECOVERED_ERROR:
369 /* fall through */
370 case NOT_READY:
371 result &= ~SAM_STAT_CHECK_CONDITION;
372 break;
373 case UNIT_ATTENTION:
374 switch (sshdr.asc) {
375 case 0x29: /* Power on Reset or Device Reset */
376 /* fall through */
377 case 0x2A: /* Device capacity changed */
378 case 0x3F: /* Report LUNs changed */
379 /* Retry the command once more */
380 if (retry_cnt++ < 1) {
381 kfree(cmd_buf);
382 kfree(scsi_cmd);
383 kfree(sense_buf);
384 goto retry;
385 }
386 }
387 break;
388 default:
389 break;
390 }
391 }
392 }
393
394 if (result) {
395 dev_err(dev, "%s: command failed, result=%08x\n",
396 __func__, result);
397 rc = -EIO;
398 goto out;
399 }
400
401 /*
402 * Read cap was successful, grab values from the buffer;
403 * note that we don't need to worry about unaligned access
404 * as the buffer is allocated on an aligned boundary.
405 */
406 mutex_lock(&gli->mutex);
407 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
408 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
409 mutex_unlock(&gli->mutex);
410
411 out:
412 kfree(cmd_buf);
413 kfree(scsi_cmd);
414 kfree(sense_buf);
415
416 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
417 __func__, gli->max_lba, gli->blk_len, rc);
418 return rc;
419 }
420
421 /**
422 * get_rhte() - obtains validated resource handle table entry reference
423 * @ctxi: Context owning the resource handle.
424 * @rhndl: Resource handle associated with entry.
425 * @lli: LUN associated with request.
426 *
427 * Return: Validated RHTE on success, NULL on failure
428 */
429 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
430 struct llun_info *lli)
431 {
432 struct cxlflash_cfg *cfg = ctxi->cfg;
433 struct device *dev = &cfg->dev->dev;
434 struct sisl_rht_entry *rhte = NULL;
435
436 if (unlikely(!ctxi->rht_start)) {
437 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
438 __func__);
439 goto out;
440 }
441
442 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
443 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
444 __func__, rhndl);
445 goto out;
446 }
447
448 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
449 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
450 __func__, rhndl);
451 goto out;
452 }
453
454 rhte = &ctxi->rht_start[rhndl];
455 if (unlikely(rhte->nmask == 0)) {
456 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
457 __func__, rhndl);
458 rhte = NULL;
459 goto out;
460 }
461
462 out:
463 return rhte;
464 }
465
466 /**
467 * rhte_checkout() - obtains free/empty resource handle table entry
468 * @ctxi: Context owning the resource handle.
469 * @lli: LUN associated with request.
470 *
471 * Return: Free RHTE on success, NULL on failure
472 */
473 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
474 struct llun_info *lli)
475 {
476 struct cxlflash_cfg *cfg = ctxi->cfg;
477 struct device *dev = &cfg->dev->dev;
478 struct sisl_rht_entry *rhte = NULL;
479 int i;
480
481 /* Find a free RHT entry */
482 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
483 if (ctxi->rht_start[i].nmask == 0) {
484 rhte = &ctxi->rht_start[i];
485 ctxi->rht_out++;
486 break;
487 }
488
489 if (likely(rhte))
490 ctxi->rht_lun[i] = lli;
491
492 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
493 return rhte;
494 }
495
496 /**
497 * rhte_checkin() - releases a resource handle table entry
498 * @ctxi: Context owning the resource handle.
499 * @rhte: RHTE to release.
500 */
501 void rhte_checkin(struct ctx_info *ctxi,
502 struct sisl_rht_entry *rhte)
503 {
504 u32 rsrc_handle = rhte - ctxi->rht_start;
505
506 rhte->nmask = 0;
507 rhte->fp = 0;
508 ctxi->rht_out--;
509 ctxi->rht_lun[rsrc_handle] = NULL;
510 ctxi->rht_needs_ws[rsrc_handle] = false;
511 }
512
513 /**
514 * rhte_format1() - populates a RHTE for format 1
515 * @rhte: RHTE to populate.
516 * @lun_id: LUN ID of LUN associated with RHTE.
517 * @perm: Desired permissions for RHTE.
518 * @port_sel: Port selection mask
519 */
520 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
521 u32 port_sel)
522 {
523 /*
524 * Populate the Format 1 RHT entry for direct access (physical
525 * LUN) using the synchronization sequence defined in the
526 * SISLite specification.
527 */
528 struct sisl_rht_entry_f1 dummy = { 0 };
529 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
530
531 memset(rhte_f1, 0, sizeof(*rhte_f1));
532 rhte_f1->fp = SISL_RHT_FP(1U, 0);
533 dma_wmb(); /* Make setting of format bit visible */
534
535 rhte_f1->lun_id = lun_id;
536 dma_wmb(); /* Make setting of LUN id visible */
537
538 /*
539 * Use a dummy RHT Format 1 entry to build the second dword
540 * of the entry that must be populated in a single write when
541 * enabled (valid bit set to TRUE).
542 */
543 dummy.valid = 0x80;
544 dummy.fp = SISL_RHT_FP(1U, perm);
545 dummy.port_sel = port_sel;
546 rhte_f1->dw = dummy.dw;
547
548 dma_wmb(); /* Make remaining RHT entry fields visible */
549 }
550
551 /**
552 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
553 * @gli: LUN to attach.
554 * @mode: Desired mode of the LUN.
555 * @locked: Mutex status on current thread.
556 *
557 * Return: 0 on success, -errno on failure
558 */
559 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
560 {
561 int rc = 0;
562
563 if (!locked)
564 mutex_lock(&gli->mutex);
565
566 if (gli->mode == MODE_NONE)
567 gli->mode = mode;
568 else if (gli->mode != mode) {
569 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
570 __func__, gli->mode, mode);
571 rc = -EINVAL;
572 goto out;
573 }
574
575 gli->users++;
576 WARN_ON(gli->users <= 0);
577 out:
578 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
579 __func__, rc, gli->mode, gli->users);
580 if (!locked)
581 mutex_unlock(&gli->mutex);
582 return rc;
583 }
584
585 /**
586 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
587 * @gli: LUN to detach.
588 *
589 * When resetting the mode, terminate block allocation resources as they
590 * are no longer required (service is safe to call even when block allocation
591 * resources were not present - such as when transitioning from physical mode).
592 * These resources will be reallocated when needed (subsequent transition to
593 * virtual mode).
594 */
595 void cxlflash_lun_detach(struct glun_info *gli)
596 {
597 mutex_lock(&gli->mutex);
598 WARN_ON(gli->mode == MODE_NONE);
599 if (--gli->users == 0) {
600 gli->mode = MODE_NONE;
601 cxlflash_ba_terminate(&gli->blka.ba_lun);
602 }
603 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
604 WARN_ON(gli->users < 0);
605 mutex_unlock(&gli->mutex);
606 }
607
608 /**
609 * _cxlflash_disk_release() - releases the specified resource entry
610 * @sdev: SCSI device associated with LUN.
611 * @ctxi: Context owning resources.
612 * @release: Release ioctl data structure.
613 *
614 * For LUNs in virtual mode, the virtual LUN associated with the specified
615 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
616 * AFU sync should _not_ be performed when the context is sitting on the error
617 * recovery list. A context on the error recovery list is not known to the AFU
618 * due to reset. When the context is recovered, it will be reattached and made
619 * known again to the AFU.
620 *
621 * Return: 0 on success, -errno on failure
622 */
623 int _cxlflash_disk_release(struct scsi_device *sdev,
624 struct ctx_info *ctxi,
625 struct dk_cxlflash_release *release)
626 {
627 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
628 struct device *dev = &cfg->dev->dev;
629 struct llun_info *lli = sdev->hostdata;
630 struct glun_info *gli = lli->parent;
631 struct afu *afu = cfg->afu;
632 bool put_ctx = false;
633
634 struct dk_cxlflash_resize size;
635 res_hndl_t rhndl = release->rsrc_handle;
636
637 int rc = 0;
638 int rcr = 0;
639 u64 ctxid = DECODE_CTXID(release->context_id),
640 rctxid = release->context_id;
641
642 struct sisl_rht_entry *rhte;
643 struct sisl_rht_entry_f1 *rhte_f1;
644
645 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
646 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
647
648 if (!ctxi) {
649 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
650 if (unlikely(!ctxi)) {
651 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
652 __func__, ctxid);
653 rc = -EINVAL;
654 goto out;
655 }
656
657 put_ctx = true;
658 }
659
660 rhte = get_rhte(ctxi, rhndl, lli);
661 if (unlikely(!rhte)) {
662 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
663 __func__, rhndl);
664 rc = -EINVAL;
665 goto out;
666 }
667
668 /*
669 * Resize to 0 for virtual LUNS by setting the size
670 * to 0. This will clear LXT_START and LXT_CNT fields
671 * in the RHT entry and properly sync with the AFU.
672 *
673 * Afterwards we clear the remaining fields.
674 */
675 switch (gli->mode) {
676 case MODE_VIRTUAL:
677 marshal_rele_to_resize(release, &size);
678 size.req_size = 0;
679 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
680 if (rc) {
681 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
682 goto out;
683 }
684
685 break;
686 case MODE_PHYSICAL:
687 /*
688 * Clear the Format 1 RHT entry for direct access
689 * (physical LUN) using the synchronization sequence
690 * defined in the SISLite specification.
691 */
692 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
693
694 rhte_f1->valid = 0;
695 dma_wmb(); /* Make revocation of RHT entry visible */
696
697 rhte_f1->lun_id = 0;
698 dma_wmb(); /* Make clearing of LUN id visible */
699
700 rhte_f1->dw = 0;
701 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
702
703 if (!ctxi->err_recovery_active) {
704 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
705 if (unlikely(rcr))
706 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
707 __func__, rcr);
708 }
709 break;
710 default:
711 WARN(1, "Unsupported LUN mode!");
712 goto out;
713 }
714
715 rhte_checkin(ctxi, rhte);
716 cxlflash_lun_detach(gli);
717
718 out:
719 if (put_ctx)
720 put_context(ctxi);
721 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
722 return rc;
723 }
724
725 int cxlflash_disk_release(struct scsi_device *sdev,
726 struct dk_cxlflash_release *release)
727 {
728 return _cxlflash_disk_release(sdev, NULL, release);
729 }
730
731 /**
732 * destroy_context() - releases a context
733 * @cfg: Internal structure associated with the host.
734 * @ctxi: Context to release.
735 *
736 * This routine is safe to be called with a a non-initialized context.
737 * Also note that the routine conditionally checks for the existence
738 * of the context control map before clearing the RHT registers and
739 * context capabilities because it is possible to destroy a context
740 * while the context is in the error state (previous mapping was
741 * removed [so there is no need to worry about clearing] and context
742 * is waiting for a new mapping).
743 */
744 static void destroy_context(struct cxlflash_cfg *cfg,
745 struct ctx_info *ctxi)
746 {
747 struct afu *afu = cfg->afu;
748
749 if (ctxi->initialized) {
750 WARN_ON(!list_empty(&ctxi->luns));
751
752 /* Clear RHT registers and drop all capabilities for context */
753 if (afu->afu_map && ctxi->ctrl_map) {
754 writeq_be(0, &ctxi->ctrl_map->rht_start);
755 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
756 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
757 }
758 }
759
760 /* Free memory associated with context */
761 free_page((ulong)ctxi->rht_start);
762 kfree(ctxi->rht_needs_ws);
763 kfree(ctxi->rht_lun);
764 kfree(ctxi);
765 }
766
767 /**
768 * create_context() - allocates and initializes a context
769 * @cfg: Internal structure associated with the host.
770 *
771 * Return: Allocated context on success, NULL on failure
772 */
773 static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
774 {
775 struct device *dev = &cfg->dev->dev;
776 struct ctx_info *ctxi = NULL;
777 struct llun_info **lli = NULL;
778 u8 *ws = NULL;
779 struct sisl_rht_entry *rhte;
780
781 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
782 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
783 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
784 if (unlikely(!ctxi || !lli || !ws)) {
785 dev_err(dev, "%s: Unable to allocate context\n", __func__);
786 goto err;
787 }
788
789 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
790 if (unlikely(!rhte)) {
791 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
792 goto err;
793 }
794
795 ctxi->rht_lun = lli;
796 ctxi->rht_needs_ws = ws;
797 ctxi->rht_start = rhte;
798 out:
799 return ctxi;
800
801 err:
802 kfree(ws);
803 kfree(lli);
804 kfree(ctxi);
805 ctxi = NULL;
806 goto out;
807 }
808
809 /**
810 * init_context() - initializes a previously allocated context
811 * @ctxi: Previously allocated context
812 * @cfg: Internal structure associated with the host.
813 * @ctx: Previously obtained CXL context reference.
814 * @ctxid: Previously obtained process element associated with CXL context.
815 * @file: Previously obtained file associated with CXL context.
816 * @perms: User-specified permissions.
817 */
818 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
819 struct cxl_context *ctx, int ctxid, struct file *file,
820 u32 perms)
821 {
822 struct afu *afu = cfg->afu;
823
824 ctxi->rht_perms = perms;
825 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
826 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
827 ctxi->pid = task_tgid_nr(current); /* tgid = pid */
828 ctxi->ctx = ctx;
829 ctxi->cfg = cfg;
830 ctxi->file = file;
831 ctxi->initialized = true;
832 mutex_init(&ctxi->mutex);
833 kref_init(&ctxi->kref);
834 INIT_LIST_HEAD(&ctxi->luns);
835 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
836 }
837
838 /**
839 * remove_context() - context kref release handler
840 * @kref: Kernel reference associated with context to be removed.
841 *
842 * When a context no longer has any references it can safely be removed
843 * from global access and destroyed. Note that it is assumed the thread
844 * relinquishing access to the context holds its mutex.
845 */
846 static void remove_context(struct kref *kref)
847 {
848 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
849 struct cxlflash_cfg *cfg = ctxi->cfg;
850 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
851
852 /* Remove context from table/error list */
853 WARN_ON(!mutex_is_locked(&ctxi->mutex));
854 ctxi->unavail = true;
855 mutex_unlock(&ctxi->mutex);
856 mutex_lock(&cfg->ctx_tbl_list_mutex);
857 mutex_lock(&ctxi->mutex);
858
859 if (!list_empty(&ctxi->list))
860 list_del(&ctxi->list);
861 cfg->ctx_tbl[ctxid] = NULL;
862 mutex_unlock(&cfg->ctx_tbl_list_mutex);
863 mutex_unlock(&ctxi->mutex);
864
865 /* Context now completely uncoupled/unreachable */
866 destroy_context(cfg, ctxi);
867 }
868
869 /**
870 * _cxlflash_disk_detach() - detaches a LUN from a context
871 * @sdev: SCSI device associated with LUN.
872 * @ctxi: Context owning resources.
873 * @detach: Detach ioctl data structure.
874 *
875 * As part of the detach, all per-context resources associated with the LUN
876 * are cleaned up. When detaching the last LUN for a context, the context
877 * itself is cleaned up and released.
878 *
879 * Return: 0 on success, -errno on failure
880 */
881 static int _cxlflash_disk_detach(struct scsi_device *sdev,
882 struct ctx_info *ctxi,
883 struct dk_cxlflash_detach *detach)
884 {
885 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
886 struct device *dev = &cfg->dev->dev;
887 struct llun_info *lli = sdev->hostdata;
888 struct lun_access *lun_access, *t;
889 struct dk_cxlflash_release rel;
890 bool put_ctx = false;
891
892 int i;
893 int rc = 0;
894 u64 ctxid = DECODE_CTXID(detach->context_id),
895 rctxid = detach->context_id;
896
897 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
898
899 if (!ctxi) {
900 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
901 if (unlikely(!ctxi)) {
902 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
903 __func__, ctxid);
904 rc = -EINVAL;
905 goto out;
906 }
907
908 put_ctx = true;
909 }
910
911 /* Cleanup outstanding resources tied to this LUN */
912 if (ctxi->rht_out) {
913 marshal_det_to_rele(detach, &rel);
914 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
915 if (ctxi->rht_lun[i] == lli) {
916 rel.rsrc_handle = i;
917 _cxlflash_disk_release(sdev, ctxi, &rel);
918 }
919
920 /* No need to loop further if we're done */
921 if (ctxi->rht_out == 0)
922 break;
923 }
924 }
925
926 /* Take our LUN out of context, free the node */
927 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
928 if (lun_access->lli == lli) {
929 list_del(&lun_access->list);
930 kfree(lun_access);
931 lun_access = NULL;
932 break;
933 }
934
935 /*
936 * Release the context reference and the sdev reference that
937 * bound this LUN to the context.
938 */
939 if (kref_put(&ctxi->kref, remove_context))
940 put_ctx = false;
941 scsi_device_put(sdev);
942 out:
943 if (put_ctx)
944 put_context(ctxi);
945 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
946 return rc;
947 }
948
949 static int cxlflash_disk_detach(struct scsi_device *sdev,
950 struct dk_cxlflash_detach *detach)
951 {
952 return _cxlflash_disk_detach(sdev, NULL, detach);
953 }
954
955 /**
956 * cxlflash_cxl_release() - release handler for adapter file descriptor
957 * @inode: File-system inode associated with fd.
958 * @file: File installed with adapter file descriptor.
959 *
960 * This routine is the release handler for the fops registered with
961 * the CXL services on an initial attach for a context. It is called
962 * when a close (explicity by the user or as part of a process tear
963 * down) is performed on the adapter file descriptor returned to the
964 * user. The user should be aware that explicitly performing a close
965 * considered catastrophic and subsequent usage of the superpipe API
966 * with previously saved off tokens will fail.
967 *
968 * This routine derives the context reference and calls detach for
969 * each LUN associated with the context.The final detach operation
970 * causes the context itself to be freed. With exception to when the
971 * CXL process element (context id) lookup fails (a case that should
972 * theoretically never occur), every call into this routine results
973 * in a complete freeing of a context.
974 *
975 * Return: 0 on success
976 */
977 static int cxlflash_cxl_release(struct inode *inode, struct file *file)
978 {
979 struct cxl_context *ctx = cxl_fops_get_context(file);
980 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
981 cxl_fops);
982 struct device *dev = &cfg->dev->dev;
983 struct ctx_info *ctxi = NULL;
984 struct dk_cxlflash_detach detach = { { 0 }, 0 };
985 struct lun_access *lun_access, *t;
986 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
987 int ctxid;
988
989 ctxid = cxl_process_element(ctx);
990 if (unlikely(ctxid < 0)) {
991 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
992 __func__, ctx, ctxid);
993 goto out;
994 }
995
996 ctxi = get_context(cfg, ctxid, file, ctrl);
997 if (unlikely(!ctxi)) {
998 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
999 if (!ctxi) {
1000 dev_dbg(dev, "%s: ctxid=%d already free\n",
1001 __func__, ctxid);
1002 goto out_release;
1003 }
1004
1005 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1006 __func__, ctxid);
1007 put_context(ctxi);
1008 goto out;
1009 }
1010
1011 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1012
1013 detach.context_id = ctxi->ctxid;
1014 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1015 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1016 out_release:
1017 cxl_fd_release(inode, file);
1018 out:
1019 dev_dbg(dev, "%s: returning\n", __func__);
1020 return 0;
1021 }
1022
1023 /**
1024 * unmap_context() - clears a previously established mapping
1025 * @ctxi: Context owning the mapping.
1026 *
1027 * This routine is used to switch between the error notification page
1028 * (dummy page of all 1's) and the real mapping (established by the CXL
1029 * fault handler).
1030 */
1031 static void unmap_context(struct ctx_info *ctxi)
1032 {
1033 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1034 }
1035
1036 /**
1037 * get_err_page() - obtains and allocates the error notification page
1038 * @cfg: Internal structure associated with the host.
1039 *
1040 * Return: error notification page on success, NULL on failure
1041 */
1042 static struct page *get_err_page(struct cxlflash_cfg *cfg)
1043 {
1044 struct page *err_page = global.err_page;
1045 struct device *dev = &cfg->dev->dev;
1046
1047 if (unlikely(!err_page)) {
1048 err_page = alloc_page(GFP_KERNEL);
1049 if (unlikely(!err_page)) {
1050 dev_err(dev, "%s: Unable to allocate err_page\n",
1051 __func__);
1052 goto out;
1053 }
1054
1055 memset(page_address(err_page), -1, PAGE_SIZE);
1056
1057 /* Serialize update w/ other threads to avoid a leak */
1058 mutex_lock(&global.mutex);
1059 if (likely(!global.err_page))
1060 global.err_page = err_page;
1061 else {
1062 __free_page(err_page);
1063 err_page = global.err_page;
1064 }
1065 mutex_unlock(&global.mutex);
1066 }
1067
1068 out:
1069 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1070 return err_page;
1071 }
1072
1073 /**
1074 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1075 * @vmf: VM fault associated with current fault.
1076 *
1077 * To support error notification via MMIO, faults are 'caught' by this routine
1078 * that was inserted before passing back the adapter file descriptor on attach.
1079 * When a fault occurs, this routine evaluates if error recovery is active and
1080 * if so, installs the error page to 'notify' the user about the error state.
1081 * During normal operation, the fault is simply handled by the original fault
1082 * handler that was installed by CXL services as part of initializing the
1083 * adapter file descriptor. The VMA's page protection bits are toggled to
1084 * indicate cached/not-cached depending on the memory backing the fault.
1085 *
1086 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1087 */
1088 static int cxlflash_mmap_fault(struct vm_fault *vmf)
1089 {
1090 struct vm_area_struct *vma = vmf->vma;
1091 struct file *file = vma->vm_file;
1092 struct cxl_context *ctx = cxl_fops_get_context(file);
1093 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1094 cxl_fops);
1095 struct device *dev = &cfg->dev->dev;
1096 struct ctx_info *ctxi = NULL;
1097 struct page *err_page = NULL;
1098 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1099 int rc = 0;
1100 int ctxid;
1101
1102 ctxid = cxl_process_element(ctx);
1103 if (unlikely(ctxid < 0)) {
1104 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1105 __func__, ctx, ctxid);
1106 goto err;
1107 }
1108
1109 ctxi = get_context(cfg, ctxid, file, ctrl);
1110 if (unlikely(!ctxi)) {
1111 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1112 goto err;
1113 }
1114
1115 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1116
1117 if (likely(!ctxi->err_recovery_active)) {
1118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1119 rc = ctxi->cxl_mmap_vmops->fault(vmf);
1120 } else {
1121 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1122 __func__);
1123
1124 err_page = get_err_page(cfg);
1125 if (unlikely(!err_page)) {
1126 dev_err(dev, "%s: Could not get err_page\n", __func__);
1127 rc = VM_FAULT_RETRY;
1128 goto out;
1129 }
1130
1131 get_page(err_page);
1132 vmf->page = err_page;
1133 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1134 }
1135
1136 out:
1137 if (likely(ctxi))
1138 put_context(ctxi);
1139 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1140 return rc;
1141
1142 err:
1143 rc = VM_FAULT_SIGBUS;
1144 goto out;
1145 }
1146
1147 /*
1148 * Local MMAP vmops to 'catch' faults
1149 */
1150 static const struct vm_operations_struct cxlflash_mmap_vmops = {
1151 .fault = cxlflash_mmap_fault,
1152 };
1153
1154 /**
1155 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1156 * @file: File installed with adapter file descriptor.
1157 * @vma: VM area associated with mapping.
1158 *
1159 * Installs local mmap vmops to 'catch' faults for error notification support.
1160 *
1161 * Return: 0 on success, -errno on failure
1162 */
1163 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1164 {
1165 struct cxl_context *ctx = cxl_fops_get_context(file);
1166 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1167 cxl_fops);
1168 struct device *dev = &cfg->dev->dev;
1169 struct ctx_info *ctxi = NULL;
1170 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1171 int ctxid;
1172 int rc = 0;
1173
1174 ctxid = cxl_process_element(ctx);
1175 if (unlikely(ctxid < 0)) {
1176 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1177 __func__, ctx, ctxid);
1178 rc = -EIO;
1179 goto out;
1180 }
1181
1182 ctxi = get_context(cfg, ctxid, file, ctrl);
1183 if (unlikely(!ctxi)) {
1184 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1185 rc = -EIO;
1186 goto out;
1187 }
1188
1189 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1190
1191 rc = cxl_fd_mmap(file, vma);
1192 if (likely(!rc)) {
1193 /* Insert ourself in the mmap fault handler path */
1194 ctxi->cxl_mmap_vmops = vma->vm_ops;
1195 vma->vm_ops = &cxlflash_mmap_vmops;
1196 }
1197
1198 out:
1199 if (likely(ctxi))
1200 put_context(ctxi);
1201 return rc;
1202 }
1203
1204 const struct file_operations cxlflash_cxl_fops = {
1205 .owner = THIS_MODULE,
1206 .mmap = cxlflash_cxl_mmap,
1207 .release = cxlflash_cxl_release,
1208 };
1209
1210 /**
1211 * cxlflash_mark_contexts_error() - move contexts to error state and list
1212 * @cfg: Internal structure associated with the host.
1213 *
1214 * A context is only moved over to the error list when there are no outstanding
1215 * references to it. This ensures that a running operation has completed.
1216 *
1217 * Return: 0 on success, -errno on failure
1218 */
1219 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1220 {
1221 int i, rc = 0;
1222 struct ctx_info *ctxi = NULL;
1223
1224 mutex_lock(&cfg->ctx_tbl_list_mutex);
1225
1226 for (i = 0; i < MAX_CONTEXT; i++) {
1227 ctxi = cfg->ctx_tbl[i];
1228 if (ctxi) {
1229 mutex_lock(&ctxi->mutex);
1230 cfg->ctx_tbl[i] = NULL;
1231 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1232 ctxi->err_recovery_active = true;
1233 ctxi->ctrl_map = NULL;
1234 unmap_context(ctxi);
1235 mutex_unlock(&ctxi->mutex);
1236 }
1237 }
1238
1239 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1240 return rc;
1241 }
1242
1243 /*
1244 * Dummy NULL fops
1245 */
1246 static const struct file_operations null_fops = {
1247 .owner = THIS_MODULE,
1248 };
1249
1250 /**
1251 * check_state() - checks and responds to the current adapter state
1252 * @cfg: Internal structure associated with the host.
1253 *
1254 * This routine can block and should only be used on process context.
1255 * It assumes that the caller is an ioctl thread and holding the ioctl
1256 * read semaphore. This is temporarily let up across the wait to allow
1257 * for draining actively running ioctls. Also note that when waking up
1258 * from waiting in reset, the state is unknown and must be checked again
1259 * before proceeding.
1260 *
1261 * Return: 0 on success, -errno on failure
1262 */
1263 int check_state(struct cxlflash_cfg *cfg)
1264 {
1265 struct device *dev = &cfg->dev->dev;
1266 int rc = 0;
1267
1268 retry:
1269 switch (cfg->state) {
1270 case STATE_RESET:
1271 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1272 up_read(&cfg->ioctl_rwsem);
1273 rc = wait_event_interruptible(cfg->reset_waitq,
1274 cfg->state != STATE_RESET);
1275 down_read(&cfg->ioctl_rwsem);
1276 if (unlikely(rc))
1277 break;
1278 goto retry;
1279 case STATE_FAILTERM:
1280 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1281 rc = -ENODEV;
1282 break;
1283 default:
1284 break;
1285 }
1286
1287 return rc;
1288 }
1289
1290 /**
1291 * cxlflash_disk_attach() - attach a LUN to a context
1292 * @sdev: SCSI device associated with LUN.
1293 * @attach: Attach ioctl data structure.
1294 *
1295 * Creates a context and attaches LUN to it. A LUN can only be attached
1296 * one time to a context (subsequent attaches for the same context/LUN pair
1297 * are not supported). Additional LUNs can be attached to a context by
1298 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1299 *
1300 * Return: 0 on success, -errno on failure
1301 */
1302 static int cxlflash_disk_attach(struct scsi_device *sdev,
1303 struct dk_cxlflash_attach *attach)
1304 {
1305 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1306 struct device *dev = &cfg->dev->dev;
1307 struct afu *afu = cfg->afu;
1308 struct llun_info *lli = sdev->hostdata;
1309 struct glun_info *gli = lli->parent;
1310 struct cxl_ioctl_start_work *work;
1311 struct ctx_info *ctxi = NULL;
1312 struct lun_access *lun_access = NULL;
1313 int rc = 0;
1314 u32 perms;
1315 int ctxid = -1;
1316 u64 flags = 0UL;
1317 u64 rctxid = 0UL;
1318 struct file *file = NULL;
1319
1320 struct cxl_context *ctx = NULL;
1321
1322 int fd = -1;
1323
1324 if (attach->num_interrupts > 4) {
1325 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1326 __func__, attach->num_interrupts);
1327 rc = -EINVAL;
1328 goto out;
1329 }
1330
1331 if (gli->max_lba == 0) {
1332 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1333 __func__, lli->lun_id[sdev->channel]);
1334 rc = read_cap16(sdev, lli);
1335 if (rc) {
1336 dev_err(dev, "%s: Invalid device rc=%d\n",
1337 __func__, rc);
1338 rc = -ENODEV;
1339 goto out;
1340 }
1341 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1342 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1343 }
1344
1345 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1346 rctxid = attach->context_id;
1347 ctxi = get_context(cfg, rctxid, NULL, 0);
1348 if (!ctxi) {
1349 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1350 __func__, rctxid);
1351 rc = -EINVAL;
1352 goto out;
1353 }
1354
1355 list_for_each_entry(lun_access, &ctxi->luns, list)
1356 if (lun_access->lli == lli) {
1357 dev_dbg(dev, "%s: Already attached\n",
1358 __func__);
1359 rc = -EINVAL;
1360 goto out;
1361 }
1362 }
1363
1364 rc = scsi_device_get(sdev);
1365 if (unlikely(rc)) {
1366 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1367 goto out;
1368 }
1369
1370 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1371 if (unlikely(!lun_access)) {
1372 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1373 rc = -ENOMEM;
1374 goto err;
1375 }
1376
1377 lun_access->lli = lli;
1378 lun_access->sdev = sdev;
1379
1380 /* Non-NULL context indicates reuse (another context reference) */
1381 if (ctxi) {
1382 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1383 __func__, rctxid);
1384 kref_get(&ctxi->kref);
1385 list_add(&lun_access->list, &ctxi->luns);
1386 goto out_attach;
1387 }
1388
1389 ctxi = create_context(cfg);
1390 if (unlikely(!ctxi)) {
1391 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1392 __func__, ctxid);
1393 goto err;
1394 }
1395
1396 ctx = cxl_dev_context_init(cfg->dev);
1397 if (IS_ERR_OR_NULL(ctx)) {
1398 dev_err(dev, "%s: Could not initialize context %p\n",
1399 __func__, ctx);
1400 rc = -ENODEV;
1401 goto err;
1402 }
1403
1404 work = &ctxi->work;
1405 work->num_interrupts = attach->num_interrupts;
1406 work->flags = CXL_START_WORK_NUM_IRQS;
1407
1408 rc = cxl_start_work(ctx, work);
1409 if (unlikely(rc)) {
1410 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1411 __func__, rc);
1412 goto err;
1413 }
1414
1415 ctxid = cxl_process_element(ctx);
1416 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1417 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1418 rc = -EPERM;
1419 goto err;
1420 }
1421
1422 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1423 if (unlikely(fd < 0)) {
1424 rc = -ENODEV;
1425 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1426 goto err;
1427 }
1428
1429 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1430 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1431
1432 /* Context mutex is locked upon return */
1433 init_context(ctxi, cfg, ctx, ctxid, file, perms);
1434
1435 rc = afu_attach(cfg, ctxi);
1436 if (unlikely(rc)) {
1437 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1438 goto err;
1439 }
1440
1441 /*
1442 * No error paths after this point. Once the fd is installed it's
1443 * visible to user space and can't be undone safely on this thread.
1444 * There is no need to worry about a deadlock here because no one
1445 * knows about us yet; we can be the only one holding our mutex.
1446 */
1447 list_add(&lun_access->list, &ctxi->luns);
1448 mutex_lock(&cfg->ctx_tbl_list_mutex);
1449 mutex_lock(&ctxi->mutex);
1450 cfg->ctx_tbl[ctxid] = ctxi;
1451 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1452 fd_install(fd, file);
1453
1454 out_attach:
1455 if (fd != -1)
1456 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1457 if (afu_is_sq_cmd_mode(afu))
1458 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1459
1460 attach->hdr.return_flags = flags;
1461 attach->context_id = ctxi->ctxid;
1462 attach->block_size = gli->blk_len;
1463 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1464 attach->last_lba = gli->max_lba;
1465 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1466 attach->max_xfer /= gli->blk_len;
1467
1468 out:
1469 attach->adap_fd = fd;
1470
1471 if (ctxi)
1472 put_context(ctxi);
1473
1474 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1475 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1476 return rc;
1477
1478 err:
1479 /* Cleanup CXL context; okay to 'stop' even if it was not started */
1480 if (!IS_ERR_OR_NULL(ctx)) {
1481 cxl_stop_context(ctx);
1482 cxl_release_context(ctx);
1483 ctx = NULL;
1484 }
1485
1486 /*
1487 * Here, we're overriding the fops with a dummy all-NULL fops because
1488 * fput() calls the release fop, which will cause us to mistakenly
1489 * call into the CXL code. Rather than try to add yet more complexity
1490 * to that routine (cxlflash_cxl_release) we should try to fix the
1491 * issue here.
1492 */
1493 if (fd > 0) {
1494 file->f_op = &null_fops;
1495 fput(file);
1496 put_unused_fd(fd);
1497 fd = -1;
1498 file = NULL;
1499 }
1500
1501 /* Cleanup our context */
1502 if (ctxi) {
1503 destroy_context(cfg, ctxi);
1504 ctxi = NULL;
1505 }
1506
1507 kfree(lun_access);
1508 scsi_device_put(sdev);
1509 goto out;
1510 }
1511
1512 /**
1513 * recover_context() - recovers a context in error
1514 * @cfg: Internal structure associated with the host.
1515 * @ctxi: Context to release.
1516 * @adap_fd: Adapter file descriptor associated with new/recovered context.
1517 *
1518 * Restablishes the state for a context-in-error.
1519 *
1520 * Return: 0 on success, -errno on failure
1521 */
1522 static int recover_context(struct cxlflash_cfg *cfg,
1523 struct ctx_info *ctxi,
1524 int *adap_fd)
1525 {
1526 struct device *dev = &cfg->dev->dev;
1527 int rc = 0;
1528 int fd = -1;
1529 int ctxid = -1;
1530 struct file *file;
1531 struct cxl_context *ctx;
1532 struct afu *afu = cfg->afu;
1533
1534 ctx = cxl_dev_context_init(cfg->dev);
1535 if (IS_ERR_OR_NULL(ctx)) {
1536 dev_err(dev, "%s: Could not initialize context %p\n",
1537 __func__, ctx);
1538 rc = -ENODEV;
1539 goto out;
1540 }
1541
1542 rc = cxl_start_work(ctx, &ctxi->work);
1543 if (unlikely(rc)) {
1544 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1545 __func__, rc);
1546 goto err1;
1547 }
1548
1549 ctxid = cxl_process_element(ctx);
1550 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1551 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1552 rc = -EPERM;
1553 goto err2;
1554 }
1555
1556 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1557 if (unlikely(fd < 0)) {
1558 rc = -ENODEV;
1559 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1560 goto err2;
1561 }
1562
1563 /* Update with new MMIO area based on updated context id */
1564 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1565
1566 rc = afu_attach(cfg, ctxi);
1567 if (rc) {
1568 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1569 goto err3;
1570 }
1571
1572 /*
1573 * No error paths after this point. Once the fd is installed it's
1574 * visible to user space and can't be undone safely on this thread.
1575 */
1576 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1577 ctxi->ctx = ctx;
1578 ctxi->file = file;
1579
1580 /*
1581 * Put context back in table (note the reinit of the context list);
1582 * we must first drop the context's mutex and then acquire it in
1583 * order with the table/list mutex to avoid a deadlock - safe to do
1584 * here because no one can find us at this moment in time.
1585 */
1586 mutex_unlock(&ctxi->mutex);
1587 mutex_lock(&cfg->ctx_tbl_list_mutex);
1588 mutex_lock(&ctxi->mutex);
1589 list_del_init(&ctxi->list);
1590 cfg->ctx_tbl[ctxid] = ctxi;
1591 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1592 fd_install(fd, file);
1593 *adap_fd = fd;
1594 out:
1595 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1596 __func__, ctxid, fd, rc);
1597 return rc;
1598
1599 err3:
1600 fput(file);
1601 put_unused_fd(fd);
1602 err2:
1603 cxl_stop_context(ctx);
1604 err1:
1605 cxl_release_context(ctx);
1606 goto out;
1607 }
1608
1609 /**
1610 * cxlflash_afu_recover() - initiates AFU recovery
1611 * @sdev: SCSI device associated with LUN.
1612 * @recover: Recover ioctl data structure.
1613 *
1614 * Only a single recovery is allowed at a time to avoid exhausting CXL
1615 * resources (leading to recovery failure) in the event that we're up
1616 * against the maximum number of contexts limit. For similar reasons,
1617 * a context recovery is retried if there are multiple recoveries taking
1618 * place at the same time and the failure was due to CXL services being
1619 * unable to keep up.
1620 *
1621 * As this routine is called on ioctl context, it holds the ioctl r/w
1622 * semaphore that is used to drain ioctls in recovery scenarios. The
1623 * implementation to achieve the pacing described above (a local mutex)
1624 * requires that the ioctl r/w semaphore be dropped and reacquired to
1625 * avoid a 3-way deadlock when multiple process recoveries operate in
1626 * parallel.
1627 *
1628 * Because a user can detect an error condition before the kernel, it is
1629 * quite possible for this routine to act as the kernel's EEH detection
1630 * source (MMIO read of mbox_r). Because of this, there is a window of
1631 * time where an EEH might have been detected but not yet 'serviced'
1632 * (callback invoked, causing the device to enter reset state). To avoid
1633 * looping in this routine during that window, a 1 second sleep is in place
1634 * between the time the MMIO failure is detected and the time a wait on the
1635 * reset wait queue is attempted via check_state().
1636 *
1637 * Return: 0 on success, -errno on failure
1638 */
1639 static int cxlflash_afu_recover(struct scsi_device *sdev,
1640 struct dk_cxlflash_recover_afu *recover)
1641 {
1642 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1643 struct device *dev = &cfg->dev->dev;
1644 struct llun_info *lli = sdev->hostdata;
1645 struct afu *afu = cfg->afu;
1646 struct ctx_info *ctxi = NULL;
1647 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1648 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1649 u64 flags;
1650 u64 ctxid = DECODE_CTXID(recover->context_id),
1651 rctxid = recover->context_id;
1652 long reg;
1653 int lretry = 20; /* up to 2 seconds */
1654 int new_adap_fd = -1;
1655 int rc = 0;
1656
1657 atomic_inc(&cfg->recovery_threads);
1658 up_read(&cfg->ioctl_rwsem);
1659 rc = mutex_lock_interruptible(mutex);
1660 down_read(&cfg->ioctl_rwsem);
1661 if (rc)
1662 goto out;
1663 rc = check_state(cfg);
1664 if (rc) {
1665 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1666 rc = -ENODEV;
1667 goto out;
1668 }
1669
1670 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1671 __func__, recover->reason, rctxid);
1672
1673 retry:
1674 /* Ensure that this process is attached to the context */
1675 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1676 if (unlikely(!ctxi)) {
1677 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1678 rc = -EINVAL;
1679 goto out;
1680 }
1681
1682 if (ctxi->err_recovery_active) {
1683 retry_recover:
1684 rc = recover_context(cfg, ctxi, &new_adap_fd);
1685 if (unlikely(rc)) {
1686 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1687 __func__, ctxid, rc);
1688 if ((rc == -ENODEV) &&
1689 ((atomic_read(&cfg->recovery_threads) > 1) ||
1690 (lretry--))) {
1691 dev_dbg(dev, "%s: Going to try again\n",
1692 __func__);
1693 mutex_unlock(mutex);
1694 msleep(100);
1695 rc = mutex_lock_interruptible(mutex);
1696 if (rc)
1697 goto out;
1698 goto retry_recover;
1699 }
1700
1701 goto out;
1702 }
1703
1704 ctxi->err_recovery_active = false;
1705
1706 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1707 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1708 if (afu_is_sq_cmd_mode(afu))
1709 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1710
1711 recover->hdr.return_flags = flags;
1712 recover->context_id = ctxi->ctxid;
1713 recover->adap_fd = new_adap_fd;
1714 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1715 goto out;
1716 }
1717
1718 /* Test if in error state */
1719 reg = readq_be(&hwq->ctrl_map->mbox_r);
1720 if (reg == -1) {
1721 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1722
1723 /*
1724 * Before checking the state, put back the context obtained with
1725 * get_context() as it is no longer needed and sleep for a short
1726 * period of time (see prolog notes).
1727 */
1728 put_context(ctxi);
1729 ctxi = NULL;
1730 ssleep(1);
1731 rc = check_state(cfg);
1732 if (unlikely(rc))
1733 goto out;
1734 goto retry;
1735 }
1736
1737 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1738 out:
1739 if (likely(ctxi))
1740 put_context(ctxi);
1741 mutex_unlock(mutex);
1742 atomic_dec_if_positive(&cfg->recovery_threads);
1743 return rc;
1744 }
1745
1746 /**
1747 * process_sense() - evaluates and processes sense data
1748 * @sdev: SCSI device associated with LUN.
1749 * @verify: Verify ioctl data structure.
1750 *
1751 * Return: 0 on success, -errno on failure
1752 */
1753 static int process_sense(struct scsi_device *sdev,
1754 struct dk_cxlflash_verify *verify)
1755 {
1756 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1757 struct device *dev = &cfg->dev->dev;
1758 struct llun_info *lli = sdev->hostdata;
1759 struct glun_info *gli = lli->parent;
1760 u64 prev_lba = gli->max_lba;
1761 struct scsi_sense_hdr sshdr = { 0 };
1762 int rc = 0;
1763
1764 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1765 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1766 if (!rc) {
1767 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1768 rc = -EINVAL;
1769 goto out;
1770 }
1771
1772 switch (sshdr.sense_key) {
1773 case NO_SENSE:
1774 case RECOVERED_ERROR:
1775 /* fall through */
1776 case NOT_READY:
1777 break;
1778 case UNIT_ATTENTION:
1779 switch (sshdr.asc) {
1780 case 0x29: /* Power on Reset or Device Reset */
1781 /* fall through */
1782 case 0x2A: /* Device settings/capacity changed */
1783 rc = read_cap16(sdev, lli);
1784 if (rc) {
1785 rc = -ENODEV;
1786 break;
1787 }
1788 if (prev_lba != gli->max_lba)
1789 dev_dbg(dev, "%s: Capacity changed old=%lld "
1790 "new=%lld\n", __func__, prev_lba,
1791 gli->max_lba);
1792 break;
1793 case 0x3F: /* Report LUNs changed, Rescan. */
1794 scsi_scan_host(cfg->host);
1795 break;
1796 default:
1797 rc = -EIO;
1798 break;
1799 }
1800 break;
1801 default:
1802 rc = -EIO;
1803 break;
1804 }
1805 out:
1806 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1807 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1808 return rc;
1809 }
1810
1811 /**
1812 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1813 * @sdev: SCSI device associated with LUN.
1814 * @verify: Verify ioctl data structure.
1815 *
1816 * Return: 0 on success, -errno on failure
1817 */
1818 static int cxlflash_disk_verify(struct scsi_device *sdev,
1819 struct dk_cxlflash_verify *verify)
1820 {
1821 int rc = 0;
1822 struct ctx_info *ctxi = NULL;
1823 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1824 struct device *dev = &cfg->dev->dev;
1825 struct llun_info *lli = sdev->hostdata;
1826 struct glun_info *gli = lli->parent;
1827 struct sisl_rht_entry *rhte = NULL;
1828 res_hndl_t rhndl = verify->rsrc_handle;
1829 u64 ctxid = DECODE_CTXID(verify->context_id),
1830 rctxid = verify->context_id;
1831 u64 last_lba = 0;
1832
1833 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1834 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1835 verify->hint, verify->hdr.flags);
1836
1837 ctxi = get_context(cfg, rctxid, lli, 0);
1838 if (unlikely(!ctxi)) {
1839 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1840 rc = -EINVAL;
1841 goto out;
1842 }
1843
1844 rhte = get_rhte(ctxi, rhndl, lli);
1845 if (unlikely(!rhte)) {
1846 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1847 __func__, rhndl);
1848 rc = -EINVAL;
1849 goto out;
1850 }
1851
1852 /*
1853 * Look at the hint/sense to see if it requires us to redrive
1854 * inquiry (i.e. the Unit attention is due to the WWN changing).
1855 */
1856 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1857 /* Can't hold mutex across process_sense/read_cap16,
1858 * since we could have an intervening EEH event.
1859 */
1860 ctxi->unavail = true;
1861 mutex_unlock(&ctxi->mutex);
1862 rc = process_sense(sdev, verify);
1863 if (unlikely(rc)) {
1864 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1865 __func__, rc);
1866 mutex_lock(&ctxi->mutex);
1867 ctxi->unavail = false;
1868 goto out;
1869 }
1870 mutex_lock(&ctxi->mutex);
1871 ctxi->unavail = false;
1872 }
1873
1874 switch (gli->mode) {
1875 case MODE_PHYSICAL:
1876 last_lba = gli->max_lba;
1877 break;
1878 case MODE_VIRTUAL:
1879 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1880 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1881 last_lba /= CXLFLASH_BLOCK_SIZE;
1882 last_lba--;
1883 break;
1884 default:
1885 WARN(1, "Unsupported LUN mode!");
1886 }
1887
1888 verify->last_lba = last_lba;
1889
1890 out:
1891 if (likely(ctxi))
1892 put_context(ctxi);
1893 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1894 __func__, rc, verify->last_lba);
1895 return rc;
1896 }
1897
1898 /**
1899 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1900 * @cmd: The ioctl command to decode.
1901 *
1902 * Return: A string identifying the decoded ioctl.
1903 */
1904 static char *decode_ioctl(int cmd)
1905 {
1906 switch (cmd) {
1907 case DK_CXLFLASH_ATTACH:
1908 return __stringify_1(DK_CXLFLASH_ATTACH);
1909 case DK_CXLFLASH_USER_DIRECT:
1910 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1911 case DK_CXLFLASH_USER_VIRTUAL:
1912 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1913 case DK_CXLFLASH_VLUN_RESIZE:
1914 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1915 case DK_CXLFLASH_RELEASE:
1916 return __stringify_1(DK_CXLFLASH_RELEASE);
1917 case DK_CXLFLASH_DETACH:
1918 return __stringify_1(DK_CXLFLASH_DETACH);
1919 case DK_CXLFLASH_VERIFY:
1920 return __stringify_1(DK_CXLFLASH_VERIFY);
1921 case DK_CXLFLASH_VLUN_CLONE:
1922 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1923 case DK_CXLFLASH_RECOVER_AFU:
1924 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1925 case DK_CXLFLASH_MANAGE_LUN:
1926 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1927 }
1928
1929 return "UNKNOWN";
1930 }
1931
1932 /**
1933 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1934 * @sdev: SCSI device associated with LUN.
1935 * @arg: UDirect ioctl data structure.
1936 *
1937 * On successful return, the user is informed of the resource handle
1938 * to be used to identify the direct lun and the size (in blocks) of
1939 * the direct lun in last LBA format.
1940 *
1941 * Return: 0 on success, -errno on failure
1942 */
1943 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1944 {
1945 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1946 struct device *dev = &cfg->dev->dev;
1947 struct afu *afu = cfg->afu;
1948 struct llun_info *lli = sdev->hostdata;
1949 struct glun_info *gli = lli->parent;
1950 struct dk_cxlflash_release rel = { { 0 }, 0 };
1951
1952 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1953
1954 u64 ctxid = DECODE_CTXID(pphys->context_id),
1955 rctxid = pphys->context_id;
1956 u64 lun_size = 0;
1957 u64 last_lba = 0;
1958 u64 rsrc_handle = -1;
1959 u32 port = CHAN2PORTMASK(sdev->channel);
1960
1961 int rc = 0;
1962
1963 struct ctx_info *ctxi = NULL;
1964 struct sisl_rht_entry *rhte = NULL;
1965
1966 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1967
1968 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1969 if (unlikely(rc)) {
1970 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1971 goto out;
1972 }
1973
1974 ctxi = get_context(cfg, rctxid, lli, 0);
1975 if (unlikely(!ctxi)) {
1976 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1977 rc = -EINVAL;
1978 goto err1;
1979 }
1980
1981 rhte = rhte_checkout(ctxi, lli);
1982 if (unlikely(!rhte)) {
1983 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
1984 __func__, ctxid);
1985 rc = -EMFILE; /* too many opens */
1986 goto err1;
1987 }
1988
1989 rsrc_handle = (rhte - ctxi->rht_start);
1990
1991 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
1992
1993 last_lba = gli->max_lba;
1994 pphys->hdr.return_flags = 0;
1995 pphys->last_lba = last_lba;
1996 pphys->rsrc_handle = rsrc_handle;
1997
1998 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
1999 if (unlikely(rc)) {
2000 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2001 goto err2;
2002 }
2003
2004 out:
2005 if (likely(ctxi))
2006 put_context(ctxi);
2007 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2008 __func__, rsrc_handle, rc, last_lba);
2009 return rc;
2010
2011 err2:
2012 marshal_udir_to_rele(pphys, &rel);
2013 _cxlflash_disk_release(sdev, ctxi, &rel);
2014 goto out;
2015 err1:
2016 cxlflash_lun_detach(gli);
2017 goto out;
2018 }
2019
2020 /**
2021 * ioctl_common() - common IOCTL handler for driver
2022 * @sdev: SCSI device associated with LUN.
2023 * @cmd: IOCTL command.
2024 *
2025 * Handles common fencing operations that are valid for multiple ioctls. Always
2026 * allow through ioctls that are cleanup oriented in nature, even when operating
2027 * in a failed/terminating state.
2028 *
2029 * Return: 0 on success, -errno on failure
2030 */
2031 static int ioctl_common(struct scsi_device *sdev, int cmd)
2032 {
2033 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2034 struct device *dev = &cfg->dev->dev;
2035 struct llun_info *lli = sdev->hostdata;
2036 int rc = 0;
2037
2038 if (unlikely(!lli)) {
2039 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2040 rc = -EINVAL;
2041 goto out;
2042 }
2043
2044 rc = check_state(cfg);
2045 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2046 switch (cmd) {
2047 case DK_CXLFLASH_VLUN_RESIZE:
2048 case DK_CXLFLASH_RELEASE:
2049 case DK_CXLFLASH_DETACH:
2050 dev_dbg(dev, "%s: Command override rc=%d\n",
2051 __func__, rc);
2052 rc = 0;
2053 break;
2054 }
2055 }
2056 out:
2057 return rc;
2058 }
2059
2060 /**
2061 * cxlflash_ioctl() - IOCTL handler for driver
2062 * @sdev: SCSI device associated with LUN.
2063 * @cmd: IOCTL command.
2064 * @arg: Userspace ioctl data structure.
2065 *
2066 * A read/write semaphore is used to implement a 'drain' of currently
2067 * running ioctls. The read semaphore is taken at the beginning of each
2068 * ioctl thread and released upon concluding execution. Additionally the
2069 * semaphore should be released and then reacquired in any ioctl execution
2070 * path which will wait for an event to occur that is outside the scope of
2071 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
2072 * a thread simply needs to acquire the write semaphore.
2073 *
2074 * Return: 0 on success, -errno on failure
2075 */
2076 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2077 {
2078 typedef int (*sioctl) (struct scsi_device *, void *);
2079
2080 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2081 struct device *dev = &cfg->dev->dev;
2082 struct afu *afu = cfg->afu;
2083 struct dk_cxlflash_hdr *hdr;
2084 char buf[sizeof(union cxlflash_ioctls)];
2085 size_t size = 0;
2086 bool known_ioctl = false;
2087 int idx;
2088 int rc = 0;
2089 struct Scsi_Host *shost = sdev->host;
2090 sioctl do_ioctl = NULL;
2091
2092 static const struct {
2093 size_t size;
2094 sioctl ioctl;
2095 } ioctl_tbl[] = { /* NOTE: order matters here */
2096 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2097 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2098 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2099 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2100 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2101 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2102 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2103 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2104 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2105 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2106 };
2107
2108 /* Hold read semaphore so we can drain if needed */
2109 down_read(&cfg->ioctl_rwsem);
2110
2111 /* Restrict command set to physical support only for internal LUN */
2112 if (afu->internal_lun)
2113 switch (cmd) {
2114 case DK_CXLFLASH_RELEASE:
2115 case DK_CXLFLASH_USER_VIRTUAL:
2116 case DK_CXLFLASH_VLUN_RESIZE:
2117 case DK_CXLFLASH_VLUN_CLONE:
2118 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2119 __func__, decode_ioctl(cmd), afu->internal_lun);
2120 rc = -EINVAL;
2121 goto cxlflash_ioctl_exit;
2122 }
2123
2124 switch (cmd) {
2125 case DK_CXLFLASH_ATTACH:
2126 case DK_CXLFLASH_USER_DIRECT:
2127 case DK_CXLFLASH_RELEASE:
2128 case DK_CXLFLASH_DETACH:
2129 case DK_CXLFLASH_VERIFY:
2130 case DK_CXLFLASH_RECOVER_AFU:
2131 case DK_CXLFLASH_USER_VIRTUAL:
2132 case DK_CXLFLASH_VLUN_RESIZE:
2133 case DK_CXLFLASH_VLUN_CLONE:
2134 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2135 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2136 sdev->channel, sdev->id, sdev->lun);
2137 rc = ioctl_common(sdev, cmd);
2138 if (unlikely(rc))
2139 goto cxlflash_ioctl_exit;
2140
2141 /* fall through */
2142
2143 case DK_CXLFLASH_MANAGE_LUN:
2144 known_ioctl = true;
2145 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2146 size = ioctl_tbl[idx].size;
2147 do_ioctl = ioctl_tbl[idx].ioctl;
2148
2149 if (likely(do_ioctl))
2150 break;
2151
2152 /* fall through */
2153 default:
2154 rc = -EINVAL;
2155 goto cxlflash_ioctl_exit;
2156 }
2157
2158 if (unlikely(copy_from_user(&buf, arg, size))) {
2159 dev_err(dev, "%s: copy_from_user() fail "
2160 "size=%lu cmd=%d (%s) arg=%p\n",
2161 __func__, size, cmd, decode_ioctl(cmd), arg);
2162 rc = -EFAULT;
2163 goto cxlflash_ioctl_exit;
2164 }
2165
2166 hdr = (struct dk_cxlflash_hdr *)&buf;
2167 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2168 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2169 __func__, hdr->version, decode_ioctl(cmd));
2170 rc = -EINVAL;
2171 goto cxlflash_ioctl_exit;
2172 }
2173
2174 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2175 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2176 rc = -EINVAL;
2177 goto cxlflash_ioctl_exit;
2178 }
2179
2180 rc = do_ioctl(sdev, (void *)&buf);
2181 if (likely(!rc))
2182 if (unlikely(copy_to_user(arg, &buf, size))) {
2183 dev_err(dev, "%s: copy_to_user() fail "
2184 "size=%lu cmd=%d (%s) arg=%p\n",
2185 __func__, size, cmd, decode_ioctl(cmd), arg);
2186 rc = -EFAULT;
2187 }
2188
2189 /* fall through to exit */
2190
2191 cxlflash_ioctl_exit:
2192 up_read(&cfg->ioctl_rwsem);
2193 if (unlikely(rc && known_ioctl))
2194 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2195 "returned rc %d\n", __func__,
2196 decode_ioctl(cmd), cmd, shost->host_no,
2197 sdev->channel, sdev->id, sdev->lun, rc);
2198 else
2199 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2200 "returned rc %d\n", __func__, decode_ioctl(cmd),
2201 cmd, shost->host_no, sdev->channel, sdev->id,
2202 sdev->lun, rc);
2203 return rc;
2204 }