]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/cxlflash/superpipe.c
scsi: cxlflash: Fence EEH during probe
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / cxlflash / superpipe.c
CommitLineData
65be2c79
MO
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/file.h>
17#include <linux/syscalls.h>
18#include <misc/cxl.h>
19#include <asm/unaligned.h>
20
21#include <scsi/scsi.h>
22#include <scsi/scsi_host.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_eh.h>
25#include <uapi/scsi/cxlflash_ioctl.h>
26
27#include "sislite.h"
28#include "common.h"
2cb79266 29#include "vlun.h"
65be2c79
MO
30#include "superpipe.h"
31
32struct cxlflash_global global;
33
2cb79266
MO
34/**
35 * marshal_rele_to_resize() - translate release to resize structure
36 * @rele: Source structure from which to translate/copy.
37 * @resize: Destination structure for the translate/copy.
38 */
39static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
40 struct dk_cxlflash_resize *resize)
41{
42 resize->hdr = release->hdr;
43 resize->context_id = release->context_id;
44 resize->rsrc_handle = release->rsrc_handle;
45}
46
65be2c79
MO
47/**
48 * marshal_det_to_rele() - translate detach to release structure
49 * @detach: Destination structure for the translate/copy.
50 * @rele: Source structure from which to translate/copy.
51 */
52static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
53 struct dk_cxlflash_release *release)
54{
55 release->hdr = detach->hdr;
56 release->context_id = detach->context_id;
57}
58
59/**
60 * cxlflash_free_errpage() - frees resources associated with global error page
61 */
62void cxlflash_free_errpage(void)
63{
64
65 mutex_lock(&global.mutex);
66 if (global.err_page) {
67 __free_page(global.err_page);
68 global.err_page = NULL;
69 }
70 mutex_unlock(&global.mutex);
71}
72
73/**
74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
75 * @cfg: Internal structure associated with the host.
76 *
77 * When the host needs to go down, all users must be quiesced and their
78 * memory freed. This is accomplished by putting the contexts in error
f15fbf8d 79 * state which will notify the user and let them 'drive' the tear down.
65be2c79 80 * Meanwhile, this routine camps until all user contexts have been removed.
f92ba507
MO
81 *
82 * Note that the main loop in this routine will always execute at least once
83 * to flush the reset_waitq.
65be2c79
MO
84 */
85void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
86{
87 struct device *dev = &cfg->dev->dev;
f92ba507 88 int i, found = true;
65be2c79
MO
89
90 cxlflash_mark_contexts_error(cfg);
91
92 while (true) {
65be2c79
MO
93 for (i = 0; i < MAX_CONTEXT; i++)
94 if (cfg->ctx_tbl[i]) {
95 found = true;
96 break;
97 }
98
99 if (!found && list_empty(&cfg->ctx_err_recovery))
100 return;
101
102 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
103 __func__);
439e85c1 104 wake_up_all(&cfg->reset_waitq);
65be2c79 105 ssleep(1);
f92ba507 106 found = false;
65be2c79
MO
107 }
108}
109
110/**
111 * find_error_context() - locates a context by cookie on the error recovery list
112 * @cfg: Internal structure associated with the host.
113 * @rctxid: Desired context by id.
114 * @file: Desired context by file.
115 *
116 * Return: Found context on success, NULL on failure
117 */
118static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
119 struct file *file)
120{
121 struct ctx_info *ctxi;
122
123 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
124 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
125 return ctxi;
126
127 return NULL;
128}
129
130/**
131 * get_context() - obtains a validated and locked context reference
132 * @cfg: Internal structure associated with the host.
133 * @rctxid: Desired context (raw, un-decoded format).
134 * @arg: LUN information or file associated with request.
135 * @ctx_ctrl: Control information to 'steer' desired lookup.
136 *
137 * NOTE: despite the name pid, in linux, current->pid actually refers
138 * to the lightweight process id (tid) and can change if the process is
139 * multi threaded. The tgid remains constant for the process and only changes
140 * when the process of fork. For all intents and purposes, think of tgid
141 * as a pid in the traditional sense.
142 *
143 * Return: Validated context on success, NULL on failure
144 */
145struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
146 void *arg, enum ctx_ctrl ctx_ctrl)
147{
148 struct device *dev = &cfg->dev->dev;
149 struct ctx_info *ctxi = NULL;
150 struct lun_access *lun_access = NULL;
151 struct file *file = NULL;
152 struct llun_info *lli = arg;
153 u64 ctxid = DECODE_CTXID(rctxid);
154 int rc;
155 pid_t pid = current->tgid, ctxpid = 0;
156
157 if (ctx_ctrl & CTX_CTRL_FILE) {
158 lli = NULL;
159 file = (struct file *)arg;
160 }
161
162 if (ctx_ctrl & CTX_CTRL_CLONE)
163 pid = current->parent->tgid;
164
165 if (likely(ctxid < MAX_CONTEXT)) {
166 while (true) {
a82544c7 167 mutex_lock(&cfg->ctx_tbl_list_mutex);
65be2c79
MO
168 ctxi = cfg->ctx_tbl[ctxid];
169 if (ctxi)
170 if ((file && (ctxi->file != file)) ||
171 (!file && (ctxi->ctxid != rctxid)))
172 ctxi = NULL;
173
174 if ((ctx_ctrl & CTX_CTRL_ERR) ||
175 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
176 ctxi = find_error_context(cfg, rctxid, file);
177 if (!ctxi) {
178 mutex_unlock(&cfg->ctx_tbl_list_mutex);
179 goto out;
180 }
181
182 /*
183 * Need to acquire ownership of the context while still
184 * under the table/list lock to serialize with a remove
185 * thread. Use the 'try' to avoid stalling the
186 * table/list lock for a single context.
187 *
188 * Note that the lock order is:
189 *
190 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
191 *
192 * Therefore release ctx_tbl_list_mutex before retrying.
193 */
194 rc = mutex_trylock(&ctxi->mutex);
195 mutex_unlock(&cfg->ctx_tbl_list_mutex);
196 if (rc)
197 break; /* got the context's lock! */
198 }
199
200 if (ctxi->unavail)
201 goto denied;
202
203 ctxpid = ctxi->pid;
204 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
205 if (pid != ctxpid)
206 goto denied;
207
208 if (lli) {
209 list_for_each_entry(lun_access, &ctxi->luns, list)
210 if (lun_access->lli == lli)
211 goto out;
212 goto denied;
213 }
214 }
215
216out:
88d33628 217 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
65be2c79
MO
218 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
219 ctx_ctrl);
220
221 return ctxi;
222
223denied:
224 mutex_unlock(&ctxi->mutex);
225 ctxi = NULL;
226 goto out;
227}
228
229/**
230 * put_context() - release a context that was retrieved from get_context()
231 * @ctxi: Context to release.
232 *
233 * For now, releasing the context equates to unlocking it's mutex.
234 */
235void put_context(struct ctx_info *ctxi)
236{
237 mutex_unlock(&ctxi->mutex);
238}
239
240/**
241 * afu_attach() - attach a context to the AFU
242 * @cfg: Internal structure associated with the host.
243 * @ctxi: Context to attach.
244 *
245 * Upon setting the context capabilities, they must be confirmed with
246 * a read back operation as the context might have been closed since
247 * the mailbox was unlocked. When this occurs, registration is failed.
248 *
249 * Return: 0 on success, -errno on failure
250 */
251static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
252{
253 struct device *dev = &cfg->dev->dev;
254 struct afu *afu = cfg->afu;
1786f4a0 255 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
65be2c79
MO
256 int rc = 0;
257 u64 val;
258
259 /* Unlock cap and restrict user to read/write cmds in translated mode */
260 readq_be(&ctrl_map->mbox_r);
261 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
262 writeq_be(val, &ctrl_map->ctx_cap);
263 val = readq_be(&ctrl_map->ctx_cap);
264 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
88d33628 265 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
65be2c79
MO
266 __func__, val);
267 rc = -EAGAIN;
268 goto out;
269 }
270
271 /* Set up MMIO registers pointing to the RHT */
272 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
273 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
274 writeq_be(val, &ctrl_map->rht_cnt_id);
275out:
276 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
277 return rc;
278}
279
280/**
281 * read_cap16() - issues a SCSI READ_CAP16 command
282 * @sdev: SCSI device associated with LUN.
283 * @lli: LUN destined for capacity request.
284 *
aacb4ff6
MO
285 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
286 * in scsi_execute(), the EEH handler will attempt to recover. As part of the
287 * recovery, the handler drains all currently running ioctls, waiting until they
288 * have completed before proceeding with a reset. As this routine is used on the
289 * ioctl path, this can create a condition where the EEH handler becomes stuck,
290 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
291 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
292 * This will allow the EEH handler to proceed with a recovery while this thread
293 * is still running. Once the scsi_execute() returns, reacquire the ioctl read
294 * semaphore and check the adapter state in case it changed while inside of
295 * scsi_execute(). The state check will wait if the adapter is still being
296 * recovered or return a failure if the recovery failed. In the event that the
297 * adapter reset failed, simply return the failure as the ioctl would be unable
298 * to continue.
299 *
300 * Note that the above puts a requirement on this routine to only be called on
301 * an ioctl thread.
302 *
65be2c79
MO
303 * Return: 0 on success, -errno on failure
304 */
305static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
306{
88d33628 307 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
308 struct device *dev = &cfg->dev->dev;
309 struct glun_info *gli = lli->parent;
310 u8 *cmd_buf = NULL;
311 u8 *scsi_cmd = NULL;
312 u8 *sense_buf = NULL;
313 int rc = 0;
314 int result = 0;
315 int retry_cnt = 0;
471a5a60 316 u32 to = CMD_TIMEOUT * HZ;
65be2c79
MO
317
318retry:
319 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
320 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
321 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
322 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
323 rc = -ENOMEM;
324 goto out;
325 }
326
327 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
328 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
329 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
330
88d33628 331 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
65be2c79
MO
332 retry_cnt ? "re" : "", scsi_cmd[0]);
333
aacb4ff6
MO
334 /* Drop the ioctl read semahpore across lengthy call */
335 up_read(&cfg->ioctl_rwsem);
65be2c79 336 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
471a5a60 337 CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
aacb4ff6
MO
338 down_read(&cfg->ioctl_rwsem);
339 rc = check_state(cfg);
340 if (rc) {
88d33628 341 dev_err(dev, "%s: Failed state result=%08x\n",
aacb4ff6
MO
342 __func__, result);
343 rc = -ENODEV;
344 goto out;
345 }
65be2c79
MO
346
347 if (driver_byte(result) == DRIVER_SENSE) {
348 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
349 if (result & SAM_STAT_CHECK_CONDITION) {
350 struct scsi_sense_hdr sshdr;
351
352 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
353 &sshdr);
354 switch (sshdr.sense_key) {
355 case NO_SENSE:
356 case RECOVERED_ERROR:
357 /* fall through */
358 case NOT_READY:
359 result &= ~SAM_STAT_CHECK_CONDITION;
360 break;
361 case UNIT_ATTENTION:
362 switch (sshdr.asc) {
363 case 0x29: /* Power on Reset or Device Reset */
364 /* fall through */
365 case 0x2A: /* Device capacity changed */
366 case 0x3F: /* Report LUNs changed */
367 /* Retry the command once more */
368 if (retry_cnt++ < 1) {
369 kfree(cmd_buf);
370 kfree(scsi_cmd);
371 kfree(sense_buf);
372 goto retry;
373 }
374 }
375 break;
376 default:
377 break;
378 }
379 }
380 }
381
382 if (result) {
88d33628 383 dev_err(dev, "%s: command failed, result=%08x\n",
65be2c79
MO
384 __func__, result);
385 rc = -EIO;
386 goto out;
387 }
388
389 /*
390 * Read cap was successful, grab values from the buffer;
391 * note that we don't need to worry about unaligned access
392 * as the buffer is allocated on an aligned boundary.
393 */
394 mutex_lock(&gli->mutex);
1786f4a0
MO
395 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
396 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
65be2c79
MO
397 mutex_unlock(&gli->mutex);
398
399out:
400 kfree(cmd_buf);
401 kfree(scsi_cmd);
402 kfree(sense_buf);
403
404 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
405 __func__, gli->max_lba, gli->blk_len, rc);
406 return rc;
407}
408
409/**
410 * get_rhte() - obtains validated resource handle table entry reference
411 * @ctxi: Context owning the resource handle.
412 * @rhndl: Resource handle associated with entry.
413 * @lli: LUN associated with request.
414 *
415 * Return: Validated RHTE on success, NULL on failure
416 */
417struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
418 struct llun_info *lli)
419{
88d33628
MO
420 struct cxlflash_cfg *cfg = ctxi->cfg;
421 struct device *dev = &cfg->dev->dev;
65be2c79
MO
422 struct sisl_rht_entry *rhte = NULL;
423
424 if (unlikely(!ctxi->rht_start)) {
88d33628 425 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
65be2c79
MO
426 __func__);
427 goto out;
428 }
429
430 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
88d33628
MO
431 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
432 __func__, rhndl);
65be2c79
MO
433 goto out;
434 }
435
436 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
88d33628
MO
437 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
438 __func__, rhndl);
65be2c79
MO
439 goto out;
440 }
441
442 rhte = &ctxi->rht_start[rhndl];
443 if (unlikely(rhte->nmask == 0)) {
88d33628
MO
444 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
445 __func__, rhndl);
65be2c79
MO
446 rhte = NULL;
447 goto out;
448 }
449
450out:
451 return rhte;
452}
453
454/**
455 * rhte_checkout() - obtains free/empty resource handle table entry
456 * @ctxi: Context owning the resource handle.
457 * @lli: LUN associated with request.
458 *
459 * Return: Free RHTE on success, NULL on failure
460 */
461struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
462 struct llun_info *lli)
463{
88d33628
MO
464 struct cxlflash_cfg *cfg = ctxi->cfg;
465 struct device *dev = &cfg->dev->dev;
65be2c79
MO
466 struct sisl_rht_entry *rhte = NULL;
467 int i;
468
469 /* Find a free RHT entry */
470 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
471 if (ctxi->rht_start[i].nmask == 0) {
472 rhte = &ctxi->rht_start[i];
473 ctxi->rht_out++;
474 break;
475 }
476
477 if (likely(rhte))
478 ctxi->rht_lun[i] = lli;
479
88d33628 480 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
65be2c79
MO
481 return rhte;
482}
483
484/**
485 * rhte_checkin() - releases a resource handle table entry
486 * @ctxi: Context owning the resource handle.
487 * @rhte: RHTE to release.
488 */
489void rhte_checkin(struct ctx_info *ctxi,
490 struct sisl_rht_entry *rhte)
491{
492 u32 rsrc_handle = rhte - ctxi->rht_start;
493
494 rhte->nmask = 0;
495 rhte->fp = 0;
496 ctxi->rht_out--;
497 ctxi->rht_lun[rsrc_handle] = NULL;
2cb79266 498 ctxi->rht_needs_ws[rsrc_handle] = false;
65be2c79
MO
499}
500
501/**
502 * rhte_format1() - populates a RHTE for format 1
503 * @rhte: RHTE to populate.
504 * @lun_id: LUN ID of LUN associated with RHTE.
505 * @perm: Desired permissions for RHTE.
506 * @port_sel: Port selection mask
507 */
508static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
509 u32 port_sel)
510{
511 /*
512 * Populate the Format 1 RHT entry for direct access (physical
513 * LUN) using the synchronization sequence defined in the
514 * SISLite specification.
515 */
516 struct sisl_rht_entry_f1 dummy = { 0 };
517 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
518
519 memset(rhte_f1, 0, sizeof(*rhte_f1));
520 rhte_f1->fp = SISL_RHT_FP(1U, 0);
521 dma_wmb(); /* Make setting of format bit visible */
522
523 rhte_f1->lun_id = lun_id;
524 dma_wmb(); /* Make setting of LUN id visible */
525
526 /*
527 * Use a dummy RHT Format 1 entry to build the second dword
528 * of the entry that must be populated in a single write when
529 * enabled (valid bit set to TRUE).
530 */
531 dummy.valid = 0x80;
532 dummy.fp = SISL_RHT_FP(1U, perm);
533 dummy.port_sel = port_sel;
534 rhte_f1->dw = dummy.dw;
535
536 dma_wmb(); /* Make remaining RHT entry fields visible */
537}
538
539/**
540 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
541 * @gli: LUN to attach.
542 * @mode: Desired mode of the LUN.
543 * @locked: Mutex status on current thread.
544 *
545 * Return: 0 on success, -errno on failure
546 */
547int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
548{
549 int rc = 0;
550
551 if (!locked)
552 mutex_lock(&gli->mutex);
553
554 if (gli->mode == MODE_NONE)
555 gli->mode = mode;
556 else if (gli->mode != mode) {
88d33628 557 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
65be2c79
MO
558 __func__, gli->mode, mode);
559 rc = -EINVAL;
560 goto out;
561 }
562
563 gli->users++;
564 WARN_ON(gli->users <= 0);
565out:
566 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
567 __func__, rc, gli->mode, gli->users);
568 if (!locked)
569 mutex_unlock(&gli->mutex);
570 return rc;
571}
572
573/**
574 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
575 * @gli: LUN to detach.
2cb79266
MO
576 *
577 * When resetting the mode, terminate block allocation resources as they
578 * are no longer required (service is safe to call even when block allocation
579 * resources were not present - such as when transitioning from physical mode).
580 * These resources will be reallocated when needed (subsequent transition to
581 * virtual mode).
65be2c79
MO
582 */
583void cxlflash_lun_detach(struct glun_info *gli)
584{
585 mutex_lock(&gli->mutex);
586 WARN_ON(gli->mode == MODE_NONE);
2cb79266 587 if (--gli->users == 0) {
65be2c79 588 gli->mode = MODE_NONE;
2cb79266
MO
589 cxlflash_ba_terminate(&gli->blka.ba_lun);
590 }
65be2c79
MO
591 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
592 WARN_ON(gli->users < 0);
593 mutex_unlock(&gli->mutex);
594}
595
596/**
597 * _cxlflash_disk_release() - releases the specified resource entry
598 * @sdev: SCSI device associated with LUN.
599 * @ctxi: Context owning resources.
600 * @release: Release ioctl data structure.
601 *
2cb79266
MO
602 * For LUNs in virtual mode, the virtual LUN associated with the specified
603 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
604 * AFU sync should _not_ be performed when the context is sitting on the error
605 * recovery list. A context on the error recovery list is not known to the AFU
606 * due to reset. When the context is recovered, it will be reattached and made
607 * known again to the AFU.
65be2c79
MO
608 *
609 * Return: 0 on success, -errno on failure
610 */
611int _cxlflash_disk_release(struct scsi_device *sdev,
612 struct ctx_info *ctxi,
613 struct dk_cxlflash_release *release)
614{
88d33628 615 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
616 struct device *dev = &cfg->dev->dev;
617 struct llun_info *lli = sdev->hostdata;
618 struct glun_info *gli = lli->parent;
619 struct afu *afu = cfg->afu;
620 bool put_ctx = false;
621
2cb79266 622 struct dk_cxlflash_resize size;
65be2c79
MO
623 res_hndl_t rhndl = release->rsrc_handle;
624
625 int rc = 0;
626 u64 ctxid = DECODE_CTXID(release->context_id),
627 rctxid = release->context_id;
628
629 struct sisl_rht_entry *rhte;
630 struct sisl_rht_entry_f1 *rhte_f1;
631
88d33628 632 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
65be2c79
MO
633 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
634
635 if (!ctxi) {
636 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
637 if (unlikely(!ctxi)) {
88d33628 638 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
65be2c79
MO
639 __func__, ctxid);
640 rc = -EINVAL;
641 goto out;
642 }
643
644 put_ctx = true;
645 }
646
647 rhte = get_rhte(ctxi, rhndl, lli);
648 if (unlikely(!rhte)) {
88d33628 649 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
65be2c79
MO
650 __func__, rhndl);
651 rc = -EINVAL;
652 goto out;
653 }
654
2cb79266
MO
655 /*
656 * Resize to 0 for virtual LUNS by setting the size
657 * to 0. This will clear LXT_START and LXT_CNT fields
658 * in the RHT entry and properly sync with the AFU.
659 *
660 * Afterwards we clear the remaining fields.
661 */
65be2c79 662 switch (gli->mode) {
2cb79266
MO
663 case MODE_VIRTUAL:
664 marshal_rele_to_resize(release, &size);
665 size.req_size = 0;
666 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
667 if (rc) {
668 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
669 goto out;
670 }
671
672 break;
65be2c79
MO
673 case MODE_PHYSICAL:
674 /*
675 * Clear the Format 1 RHT entry for direct access
676 * (physical LUN) using the synchronization sequence
677 * defined in the SISLite specification.
678 */
679 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
680
681 rhte_f1->valid = 0;
682 dma_wmb(); /* Make revocation of RHT entry visible */
683
684 rhte_f1->lun_id = 0;
685 dma_wmb(); /* Make clearing of LUN id visible */
686
687 rhte_f1->dw = 0;
688 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
689
690 if (!ctxi->err_recovery_active)
691 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
692 break;
693 default:
694 WARN(1, "Unsupported LUN mode!");
695 goto out;
696 }
697
698 rhte_checkin(ctxi, rhte);
699 cxlflash_lun_detach(gli);
700
701out:
702 if (put_ctx)
703 put_context(ctxi);
704 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
705 return rc;
706}
707
708int cxlflash_disk_release(struct scsi_device *sdev,
709 struct dk_cxlflash_release *release)
710{
711 return _cxlflash_disk_release(sdev, NULL, release);
712}
713
714/**
715 * destroy_context() - releases a context
716 * @cfg: Internal structure associated with the host.
717 * @ctxi: Context to release.
718 *
41b99e1a
MO
719 * This routine is safe to be called with a a non-initialized context.
720 * Also note that the routine conditionally checks for the existence
721 * of the context control map before clearing the RHT registers and
722 * context capabilities because it is possible to destroy a context
723 * while the context is in the error state (previous mapping was
724 * removed [so there is no need to worry about clearing] and context
725 * is waiting for a new mapping).
65be2c79
MO
726 */
727static void destroy_context(struct cxlflash_cfg *cfg,
728 struct ctx_info *ctxi)
729{
730 struct afu *afu = cfg->afu;
731
5e6632d1
MO
732 if (ctxi->initialized) {
733 WARN_ON(!list_empty(&ctxi->luns));
65be2c79 734
5e6632d1
MO
735 /* Clear RHT registers and drop all capabilities for context */
736 if (afu->afu_map && ctxi->ctrl_map) {
737 writeq_be(0, &ctxi->ctrl_map->rht_start);
738 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
739 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
740 }
65be2c79
MO
741 }
742
743 /* Free memory associated with context */
744 free_page((ulong)ctxi->rht_start);
2cb79266 745 kfree(ctxi->rht_needs_ws);
65be2c79
MO
746 kfree(ctxi->rht_lun);
747 kfree(ctxi);
65be2c79
MO
748}
749
750/**
751 * create_context() - allocates and initializes a context
752 * @cfg: Internal structure associated with the host.
65be2c79
MO
753 *
754 * Return: Allocated context on success, NULL on failure
755 */
5e6632d1 756static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
65be2c79
MO
757{
758 struct device *dev = &cfg->dev->dev;
65be2c79
MO
759 struct ctx_info *ctxi = NULL;
760 struct llun_info **lli = NULL;
e568e23f 761 u8 *ws = NULL;
65be2c79
MO
762 struct sisl_rht_entry *rhte;
763
764 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
765 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
2cb79266
MO
766 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
767 if (unlikely(!ctxi || !lli || !ws)) {
88d33628 768 dev_err(dev, "%s: Unable to allocate context\n", __func__);
65be2c79
MO
769 goto err;
770 }
771
772 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
773 if (unlikely(!rhte)) {
88d33628 774 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
65be2c79
MO
775 goto err;
776 }
777
778 ctxi->rht_lun = lli;
2cb79266 779 ctxi->rht_needs_ws = ws;
65be2c79 780 ctxi->rht_start = rhte;
5e6632d1
MO
781out:
782 return ctxi;
783
784err:
785 kfree(ws);
786 kfree(lli);
787 kfree(ctxi);
788 ctxi = NULL;
789 goto out;
790}
791
792/**
793 * init_context() - initializes a previously allocated context
794 * @ctxi: Previously allocated context
795 * @cfg: Internal structure associated with the host.
796 * @ctx: Previously obtained CXL context reference.
797 * @ctxid: Previously obtained process element associated with CXL context.
5e6632d1
MO
798 * @file: Previously obtained file associated with CXL context.
799 * @perms: User-specified permissions.
5e6632d1
MO
800 */
801static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
de9f0b0c
MO
802 struct cxl_context *ctx, int ctxid, struct file *file,
803 u32 perms)
5e6632d1
MO
804{
805 struct afu *afu = cfg->afu;
65be2c79 806
5e6632d1 807 ctxi->rht_perms = perms;
65be2c79
MO
808 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
809 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
65be2c79
MO
810 ctxi->pid = current->tgid; /* tgid = pid */
811 ctxi->ctx = ctx;
44ef38f9 812 ctxi->cfg = cfg;
65be2c79 813 ctxi->file = file;
5e6632d1 814 ctxi->initialized = true;
65be2c79 815 mutex_init(&ctxi->mutex);
888baf06 816 kref_init(&ctxi->kref);
65be2c79
MO
817 INIT_LIST_HEAD(&ctxi->luns);
818 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
65be2c79
MO
819}
820
888baf06
MO
821/**
822 * remove_context() - context kref release handler
823 * @kref: Kernel reference associated with context to be removed.
824 *
825 * When a context no longer has any references it can safely be removed
826 * from global access and destroyed. Note that it is assumed the thread
827 * relinquishing access to the context holds its mutex.
828 */
829static void remove_context(struct kref *kref)
830{
831 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
832 struct cxlflash_cfg *cfg = ctxi->cfg;
888baf06
MO
833 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
834
835 /* Remove context from table/error list */
836 WARN_ON(!mutex_is_locked(&ctxi->mutex));
837 ctxi->unavail = true;
838 mutex_unlock(&ctxi->mutex);
839 mutex_lock(&cfg->ctx_tbl_list_mutex);
840 mutex_lock(&ctxi->mutex);
841
842 if (!list_empty(&ctxi->list))
843 list_del(&ctxi->list);
844 cfg->ctx_tbl[ctxid] = NULL;
845 mutex_unlock(&cfg->ctx_tbl_list_mutex);
846 mutex_unlock(&ctxi->mutex);
847
848 /* Context now completely uncoupled/unreachable */
888baf06 849 destroy_context(cfg, ctxi);
888baf06
MO
850}
851
65be2c79
MO
852/**
853 * _cxlflash_disk_detach() - detaches a LUN from a context
854 * @sdev: SCSI device associated with LUN.
855 * @ctxi: Context owning resources.
856 * @detach: Detach ioctl data structure.
857 *
858 * As part of the detach, all per-context resources associated with the LUN
859 * are cleaned up. When detaching the last LUN for a context, the context
860 * itself is cleaned up and released.
861 *
862 * Return: 0 on success, -errno on failure
863 */
864static int _cxlflash_disk_detach(struct scsi_device *sdev,
865 struct ctx_info *ctxi,
866 struct dk_cxlflash_detach *detach)
867{
88d33628 868 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
869 struct device *dev = &cfg->dev->dev;
870 struct llun_info *lli = sdev->hostdata;
871 struct lun_access *lun_access, *t;
872 struct dk_cxlflash_release rel;
873 bool put_ctx = false;
874
875 int i;
876 int rc = 0;
65be2c79
MO
877 u64 ctxid = DECODE_CTXID(detach->context_id),
878 rctxid = detach->context_id;
879
880 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
881
882 if (!ctxi) {
883 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
884 if (unlikely(!ctxi)) {
88d33628 885 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
65be2c79
MO
886 __func__, ctxid);
887 rc = -EINVAL;
888 goto out;
889 }
890
891 put_ctx = true;
892 }
893
894 /* Cleanup outstanding resources tied to this LUN */
895 if (ctxi->rht_out) {
896 marshal_det_to_rele(detach, &rel);
897 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
898 if (ctxi->rht_lun[i] == lli) {
899 rel.rsrc_handle = i;
900 _cxlflash_disk_release(sdev, ctxi, &rel);
901 }
902
903 /* No need to loop further if we're done */
904 if (ctxi->rht_out == 0)
905 break;
906 }
907 }
908
909 /* Take our LUN out of context, free the node */
910 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
911 if (lun_access->lli == lli) {
912 list_del(&lun_access->list);
913 kfree(lun_access);
914 lun_access = NULL;
915 break;
916 }
917
888baf06
MO
918 /*
919 * Release the context reference and the sdev reference that
920 * bound this LUN to the context.
921 */
c4a11827
MO
922 if (kref_put(&ctxi->kref, remove_context))
923 put_ctx = false;
22fe1ae8 924 scsi_device_put(sdev);
65be2c79
MO
925out:
926 if (put_ctx)
927 put_context(ctxi);
928 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
929 return rc;
930}
931
932static int cxlflash_disk_detach(struct scsi_device *sdev,
933 struct dk_cxlflash_detach *detach)
934{
935 return _cxlflash_disk_detach(sdev, NULL, detach);
936}
937
938/**
939 * cxlflash_cxl_release() - release handler for adapter file descriptor
940 * @inode: File-system inode associated with fd.
941 * @file: File installed with adapter file descriptor.
942 *
943 * This routine is the release handler for the fops registered with
944 * the CXL services on an initial attach for a context. It is called
cd34af40
MO
945 * when a close (explicity by the user or as part of a process tear
946 * down) is performed on the adapter file descriptor returned to the
947 * user. The user should be aware that explicitly performing a close
948 * considered catastrophic and subsequent usage of the superpipe API
949 * with previously saved off tokens will fail.
65be2c79 950 *
cd34af40
MO
951 * This routine derives the context reference and calls detach for
952 * each LUN associated with the context.The final detach operation
953 * causes the context itself to be freed. With exception to when the
954 * CXL process element (context id) lookup fails (a case that should
955 * theoretically never occur), every call into this routine results
956 * in a complete freeing of a context.
65be2c79
MO
957 *
958 * Return: 0 on success
959 */
960static int cxlflash_cxl_release(struct inode *inode, struct file *file)
961{
962 struct cxl_context *ctx = cxl_fops_get_context(file);
963 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
964 cxl_fops);
965 struct device *dev = &cfg->dev->dev;
966 struct ctx_info *ctxi = NULL;
967 struct dk_cxlflash_detach detach = { { 0 }, 0 };
968 struct lun_access *lun_access, *t;
969 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
970 int ctxid;
971
972 ctxid = cxl_process_element(ctx);
973 if (unlikely(ctxid < 0)) {
88d33628 974 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
65be2c79
MO
975 __func__, ctx, ctxid);
976 goto out;
977 }
978
979 ctxi = get_context(cfg, ctxid, file, ctrl);
980 if (unlikely(!ctxi)) {
981 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
982 if (!ctxi) {
88d33628 983 dev_dbg(dev, "%s: ctxid=%d already free\n",
65be2c79
MO
984 __func__, ctxid);
985 goto out_release;
986 }
987
88d33628 988 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
65be2c79
MO
989 __func__, ctxid);
990 put_context(ctxi);
991 goto out;
992 }
993
88d33628 994 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
65be2c79 995
65be2c79
MO
996 detach.context_id = ctxi->ctxid;
997 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
998 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
999out_release:
1000 cxl_fd_release(inode, file);
1001out:
1002 dev_dbg(dev, "%s: returning\n", __func__);
1003 return 0;
1004}
1005
1006/**
1007 * unmap_context() - clears a previously established mapping
1008 * @ctxi: Context owning the mapping.
1009 *
1010 * This routine is used to switch between the error notification page
1011 * (dummy page of all 1's) and the real mapping (established by the CXL
1012 * fault handler).
1013 */
1014static void unmap_context(struct ctx_info *ctxi)
1015{
1016 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1017}
1018
1019/**
1020 * get_err_page() - obtains and allocates the error notification page
88d33628 1021 * @cfg: Internal structure associated with the host.
65be2c79
MO
1022 *
1023 * Return: error notification page on success, NULL on failure
1024 */
88d33628 1025static struct page *get_err_page(struct cxlflash_cfg *cfg)
65be2c79
MO
1026{
1027 struct page *err_page = global.err_page;
88d33628 1028 struct device *dev = &cfg->dev->dev;
65be2c79
MO
1029
1030 if (unlikely(!err_page)) {
1031 err_page = alloc_page(GFP_KERNEL);
1032 if (unlikely(!err_page)) {
88d33628
MO
1033 dev_err(dev, "%s: Unable to allocate err_page\n",
1034 __func__);
65be2c79
MO
1035 goto out;
1036 }
1037
1038 memset(page_address(err_page), -1, PAGE_SIZE);
1039
1040 /* Serialize update w/ other threads to avoid a leak */
1041 mutex_lock(&global.mutex);
1042 if (likely(!global.err_page))
1043 global.err_page = err_page;
1044 else {
1045 __free_page(err_page);
1046 err_page = global.err_page;
1047 }
1048 mutex_unlock(&global.mutex);
1049 }
1050
1051out:
88d33628 1052 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
65be2c79
MO
1053 return err_page;
1054}
1055
1056/**
1057 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1058 * @vma: VM area associated with mapping.
1059 * @vmf: VM fault associated with current fault.
1060 *
1061 * To support error notification via MMIO, faults are 'caught' by this routine
1062 * that was inserted before passing back the adapter file descriptor on attach.
1063 * When a fault occurs, this routine evaluates if error recovery is active and
1064 * if so, installs the error page to 'notify' the user about the error state.
1065 * During normal operation, the fault is simply handled by the original fault
1066 * handler that was installed by CXL services as part of initializing the
1067 * adapter file descriptor. The VMA's page protection bits are toggled to
1068 * indicate cached/not-cached depending on the memory backing the fault.
1069 *
1070 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1071 */
1072static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1073{
1074 struct file *file = vma->vm_file;
1075 struct cxl_context *ctx = cxl_fops_get_context(file);
1076 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1077 cxl_fops);
1078 struct device *dev = &cfg->dev->dev;
1079 struct ctx_info *ctxi = NULL;
1080 struct page *err_page = NULL;
1081 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1082 int rc = 0;
1083 int ctxid;
1084
1085 ctxid = cxl_process_element(ctx);
1086 if (unlikely(ctxid < 0)) {
88d33628 1087 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
65be2c79
MO
1088 __func__, ctx, ctxid);
1089 goto err;
1090 }
1091
1092 ctxi = get_context(cfg, ctxid, file, ctrl);
1093 if (unlikely(!ctxi)) {
88d33628 1094 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
65be2c79
MO
1095 goto err;
1096 }
1097
de9f0b0c 1098 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
65be2c79
MO
1099
1100 if (likely(!ctxi->err_recovery_active)) {
1101 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1102 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1103 } else {
88d33628 1104 dev_dbg(dev, "%s: err recovery active, use err_page\n",
65be2c79
MO
1105 __func__);
1106
88d33628 1107 err_page = get_err_page(cfg);
65be2c79 1108 if (unlikely(!err_page)) {
88d33628 1109 dev_err(dev, "%s: Could not get err_page\n", __func__);
65be2c79
MO
1110 rc = VM_FAULT_RETRY;
1111 goto out;
1112 }
1113
1114 get_page(err_page);
1115 vmf->page = err_page;
1116 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1117 }
1118
1119out:
1120 if (likely(ctxi))
1121 put_context(ctxi);
1122 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1123 return rc;
1124
1125err:
1126 rc = VM_FAULT_SIGBUS;
1127 goto out;
1128}
1129
1130/*
1131 * Local MMAP vmops to 'catch' faults
1132 */
1133static const struct vm_operations_struct cxlflash_mmap_vmops = {
1134 .fault = cxlflash_mmap_fault,
1135};
1136
1137/**
1138 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1139 * @file: File installed with adapter file descriptor.
1140 * @vma: VM area associated with mapping.
1141 *
1142 * Installs local mmap vmops to 'catch' faults for error notification support.
1143 *
1144 * Return: 0 on success, -errno on failure
1145 */
1146static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1147{
1148 struct cxl_context *ctx = cxl_fops_get_context(file);
1149 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1150 cxl_fops);
1151 struct device *dev = &cfg->dev->dev;
1152 struct ctx_info *ctxi = NULL;
1153 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1154 int ctxid;
1155 int rc = 0;
1156
1157 ctxid = cxl_process_element(ctx);
1158 if (unlikely(ctxid < 0)) {
88d33628 1159 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
65be2c79
MO
1160 __func__, ctx, ctxid);
1161 rc = -EIO;
1162 goto out;
1163 }
1164
1165 ctxi = get_context(cfg, ctxid, file, ctrl);
1166 if (unlikely(!ctxi)) {
88d33628 1167 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
65be2c79
MO
1168 rc = -EIO;
1169 goto out;
1170 }
1171
de9f0b0c 1172 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
65be2c79
MO
1173
1174 rc = cxl_fd_mmap(file, vma);
1175 if (likely(!rc)) {
1176 /* Insert ourself in the mmap fault handler path */
1177 ctxi->cxl_mmap_vmops = vma->vm_ops;
1178 vma->vm_ops = &cxlflash_mmap_vmops;
1179 }
1180
1181out:
1182 if (likely(ctxi))
1183 put_context(ctxi);
1184 return rc;
1185}
1186
17ead26f 1187const struct file_operations cxlflash_cxl_fops = {
65be2c79
MO
1188 .owner = THIS_MODULE,
1189 .mmap = cxlflash_cxl_mmap,
1190 .release = cxlflash_cxl_release,
1191};
1192
1193/**
1194 * cxlflash_mark_contexts_error() - move contexts to error state and list
1195 * @cfg: Internal structure associated with the host.
1196 *
1197 * A context is only moved over to the error list when there are no outstanding
1198 * references to it. This ensures that a running operation has completed.
1199 *
1200 * Return: 0 on success, -errno on failure
1201 */
1202int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1203{
1204 int i, rc = 0;
1205 struct ctx_info *ctxi = NULL;
1206
1207 mutex_lock(&cfg->ctx_tbl_list_mutex);
1208
1209 for (i = 0; i < MAX_CONTEXT; i++) {
1210 ctxi = cfg->ctx_tbl[i];
1211 if (ctxi) {
1212 mutex_lock(&ctxi->mutex);
1213 cfg->ctx_tbl[i] = NULL;
1214 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1215 ctxi->err_recovery_active = true;
1216 ctxi->ctrl_map = NULL;
1217 unmap_context(ctxi);
1218 mutex_unlock(&ctxi->mutex);
1219 }
1220 }
1221
1222 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1223 return rc;
1224}
1225
1226/*
1227 * Dummy NULL fops
1228 */
1229static const struct file_operations null_fops = {
1230 .owner = THIS_MODULE,
1231};
1232
0a27ae51
MO
1233/**
1234 * check_state() - checks and responds to the current adapter state
1235 * @cfg: Internal structure associated with the host.
1236 *
1237 * This routine can block and should only be used on process context.
1238 * It assumes that the caller is an ioctl thread and holding the ioctl
1239 * read semaphore. This is temporarily let up across the wait to allow
1240 * for draining actively running ioctls. Also note that when waking up
1241 * from waiting in reset, the state is unknown and must be checked again
1242 * before proceeding.
1243 *
1244 * Return: 0 on success, -errno on failure
1245 */
aacb4ff6 1246int check_state(struct cxlflash_cfg *cfg)
0a27ae51
MO
1247{
1248 struct device *dev = &cfg->dev->dev;
1249 int rc = 0;
1250
1251retry:
1252 switch (cfg->state) {
439e85c1
MO
1253 case STATE_RESET:
1254 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
0a27ae51 1255 up_read(&cfg->ioctl_rwsem);
439e85c1
MO
1256 rc = wait_event_interruptible(cfg->reset_waitq,
1257 cfg->state != STATE_RESET);
0a27ae51
MO
1258 down_read(&cfg->ioctl_rwsem);
1259 if (unlikely(rc))
1260 break;
1261 goto retry;
1262 case STATE_FAILTERM:
88d33628 1263 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
0a27ae51
MO
1264 rc = -ENODEV;
1265 break;
1266 default:
1267 break;
1268 }
1269
1270 return rc;
1271}
1272
65be2c79
MO
1273/**
1274 * cxlflash_disk_attach() - attach a LUN to a context
1275 * @sdev: SCSI device associated with LUN.
1276 * @attach: Attach ioctl data structure.
1277 *
1278 * Creates a context and attaches LUN to it. A LUN can only be attached
1279 * one time to a context (subsequent attaches for the same context/LUN pair
1280 * are not supported). Additional LUNs can be attached to a context by
1281 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1282 *
1283 * Return: 0 on success, -errno on failure
1284 */
1285static int cxlflash_disk_attach(struct scsi_device *sdev,
1286 struct dk_cxlflash_attach *attach)
1287{
88d33628 1288 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
1289 struct device *dev = &cfg->dev->dev;
1290 struct afu *afu = cfg->afu;
1291 struct llun_info *lli = sdev->hostdata;
1292 struct glun_info *gli = lli->parent;
1293 struct cxl_ioctl_start_work *work;
1294 struct ctx_info *ctxi = NULL;
1295 struct lun_access *lun_access = NULL;
1296 int rc = 0;
1297 u32 perms;
1298 int ctxid = -1;
bae0ac69 1299 u64 flags = 0UL;
65be2c79 1300 u64 rctxid = 0UL;
8a96b52a 1301 struct file *file = NULL;
65be2c79 1302
8a96b52a 1303 struct cxl_context *ctx = NULL;
65be2c79
MO
1304
1305 int fd = -1;
1306
65be2c79
MO
1307 if (attach->num_interrupts > 4) {
1308 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1309 __func__, attach->num_interrupts);
1310 rc = -EINVAL;
1311 goto out;
1312 }
1313
1314 if (gli->max_lba == 0) {
88d33628 1315 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
65be2c79
MO
1316 __func__, lli->lun_id[sdev->channel]);
1317 rc = read_cap16(sdev, lli);
1318 if (rc) {
88d33628 1319 dev_err(dev, "%s: Invalid device rc=%d\n",
65be2c79
MO
1320 __func__, rc);
1321 rc = -ENODEV;
1322 goto out;
1323 }
88d33628
MO
1324 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1325 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
65be2c79
MO
1326 }
1327
1328 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1329 rctxid = attach->context_id;
1330 ctxi = get_context(cfg, rctxid, NULL, 0);
1331 if (!ctxi) {
88d33628 1332 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
65be2c79
MO
1333 __func__, rctxid);
1334 rc = -EINVAL;
1335 goto out;
1336 }
1337
1338 list_for_each_entry(lun_access, &ctxi->luns, list)
1339 if (lun_access->lli == lli) {
88d33628 1340 dev_dbg(dev, "%s: Already attached\n",
65be2c79
MO
1341 __func__);
1342 rc = -EINVAL;
1343 goto out;
1344 }
1345 }
1346
22fe1ae8
MO
1347 rc = scsi_device_get(sdev);
1348 if (unlikely(rc)) {
88d33628 1349 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
22fe1ae8
MO
1350 goto out;
1351 }
1352
65be2c79
MO
1353 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1354 if (unlikely(!lun_access)) {
88d33628 1355 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
65be2c79 1356 rc = -ENOMEM;
8a96b52a 1357 goto err;
65be2c79
MO
1358 }
1359
1360 lun_access->lli = lli;
1361 lun_access->sdev = sdev;
1362
888baf06 1363 /* Non-NULL context indicates reuse (another context reference) */
65be2c79 1364 if (ctxi) {
88d33628 1365 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
65be2c79 1366 __func__, rctxid);
888baf06 1367 kref_get(&ctxi->kref);
65be2c79 1368 list_add(&lun_access->list, &ctxi->luns);
65be2c79
MO
1369 goto out_attach;
1370 }
1371
5d1952ac
UK
1372 ctxi = create_context(cfg);
1373 if (unlikely(!ctxi)) {
88d33628 1374 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
5d1952ac
UK
1375 __func__, ctxid);
1376 goto err;
1377 }
1378
65be2c79 1379 ctx = cxl_dev_context_init(cfg->dev);
21891a45 1380 if (IS_ERR_OR_NULL(ctx)) {
65be2c79
MO
1381 dev_err(dev, "%s: Could not initialize context %p\n",
1382 __func__, ctx);
1383 rc = -ENODEV;
8a96b52a 1384 goto err;
65be2c79
MO
1385 }
1386
5d1952ac
UK
1387 work = &ctxi->work;
1388 work->num_interrupts = attach->num_interrupts;
1389 work->flags = CXL_START_WORK_NUM_IRQS;
1390
1391 rc = cxl_start_work(ctx, work);
1392 if (unlikely(rc)) {
1393 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1394 __func__, rc);
1395 goto err;
1396 }
1397
65be2c79 1398 ctxid = cxl_process_element(ctx);
e37390be 1399 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
88d33628 1400 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
65be2c79 1401 rc = -EPERM;
8a96b52a 1402 goto err;
65be2c79
MO
1403 }
1404
1405 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1406 if (unlikely(fd < 0)) {
1407 rc = -ENODEV;
1408 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
8a96b52a 1409 goto err;
65be2c79
MO
1410 }
1411
1412 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1413 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1414
5e6632d1 1415 /* Context mutex is locked upon return */
de9f0b0c 1416 init_context(ctxi, cfg, ctx, ctxid, file, perms);
5e6632d1 1417
65be2c79
MO
1418 rc = afu_attach(cfg, ctxi);
1419 if (unlikely(rc)) {
1420 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
8a96b52a 1421 goto err;
65be2c79
MO
1422 }
1423
1424 /*
1425 * No error paths after this point. Once the fd is installed it's
1426 * visible to user space and can't be undone safely on this thread.
1427 * There is no need to worry about a deadlock here because no one
1428 * knows about us yet; we can be the only one holding our mutex.
1429 */
1430 list_add(&lun_access->list, &ctxi->luns);
65be2c79
MO
1431 mutex_lock(&cfg->ctx_tbl_list_mutex);
1432 mutex_lock(&ctxi->mutex);
1433 cfg->ctx_tbl[ctxid] = ctxi;
1434 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1435 fd_install(fd, file);
1436
1437out_attach:
cd34af40 1438 if (fd != -1)
bae0ac69
MO
1439 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1440 if (afu_is_sq_cmd_mode(afu))
1441 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
cd34af40 1442
bae0ac69 1443 attach->hdr.return_flags = flags;
65be2c79
MO
1444 attach->context_id = ctxi->ctxid;
1445 attach->block_size = gli->blk_len;
1446 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1447 attach->last_lba = gli->max_lba;
471a5a60
MK
1448 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1449 attach->max_xfer /= gli->blk_len;
65be2c79
MO
1450
1451out:
1452 attach->adap_fd = fd;
1453
1454 if (ctxi)
1455 put_context(ctxi);
1456
1457 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1458 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1459 return rc;
1460
8a96b52a
MO
1461err:
1462 /* Cleanup CXL context; okay to 'stop' even if it was not started */
1463 if (!IS_ERR_OR_NULL(ctx)) {
1464 cxl_stop_context(ctx);
1465 cxl_release_context(ctx);
1466 ctx = NULL;
1467 }
1468
65be2c79
MO
1469 /*
1470 * Here, we're overriding the fops with a dummy all-NULL fops because
1471 * fput() calls the release fop, which will cause us to mistakenly
1472 * call into the CXL code. Rather than try to add yet more complexity
1473 * to that routine (cxlflash_cxl_release) we should try to fix the
1474 * issue here.
1475 */
8a96b52a
MO
1476 if (fd > 0) {
1477 file->f_op = &null_fops;
1478 fput(file);
1479 put_unused_fd(fd);
1480 fd = -1;
1481 file = NULL;
1482 }
1483
41b99e1a 1484 /* Cleanup our context */
8a96b52a
MO
1485 if (ctxi) {
1486 destroy_context(cfg, ctxi);
1487 ctxi = NULL;
1488 }
1489
65be2c79 1490 kfree(lun_access);
22fe1ae8 1491 scsi_device_put(sdev);
65be2c79
MO
1492 goto out;
1493}
1494
1495/**
1496 * recover_context() - recovers a context in error
1497 * @cfg: Internal structure associated with the host.
1498 * @ctxi: Context to release.
de9f0b0c 1499 * @adap_fd: Adapter file descriptor associated with new/recovered context.
65be2c79
MO
1500 *
1501 * Restablishes the state for a context-in-error.
1502 *
1503 * Return: 0 on success, -errno on failure
1504 */
de9f0b0c
MO
1505static int recover_context(struct cxlflash_cfg *cfg,
1506 struct ctx_info *ctxi,
1507 int *adap_fd)
65be2c79
MO
1508{
1509 struct device *dev = &cfg->dev->dev;
1510 int rc = 0;
cd34af40 1511 int fd = -1;
65be2c79
MO
1512 int ctxid = -1;
1513 struct file *file;
1514 struct cxl_context *ctx;
1515 struct afu *afu = cfg->afu;
1516
1517 ctx = cxl_dev_context_init(cfg->dev);
21891a45 1518 if (IS_ERR_OR_NULL(ctx)) {
65be2c79
MO
1519 dev_err(dev, "%s: Could not initialize context %p\n",
1520 __func__, ctx);
1521 rc = -ENODEV;
1522 goto out;
1523 }
1524
5d1952ac
UK
1525 rc = cxl_start_work(ctx, &ctxi->work);
1526 if (unlikely(rc)) {
1527 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1528 __func__, rc);
1529 goto err1;
1530 }
1531
65be2c79 1532 ctxid = cxl_process_element(ctx);
e37390be 1533 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
88d33628 1534 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
65be2c79 1535 rc = -EPERM;
5d1952ac 1536 goto err2;
65be2c79
MO
1537 }
1538
1539 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1540 if (unlikely(fd < 0)) {
1541 rc = -ENODEV;
1542 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
65be2c79
MO
1543 goto err2;
1544 }
1545
1546 /* Update with new MMIO area based on updated context id */
1547 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1548
1549 rc = afu_attach(cfg, ctxi);
1550 if (rc) {
1551 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1552 goto err3;
1553 }
1554
1555 /*
1556 * No error paths after this point. Once the fd is installed it's
1557 * visible to user space and can't be undone safely on this thread.
1558 */
65be2c79 1559 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
65be2c79
MO
1560 ctxi->ctx = ctx;
1561 ctxi->file = file;
1562
1563 /*
1564 * Put context back in table (note the reinit of the context list);
1565 * we must first drop the context's mutex and then acquire it in
1566 * order with the table/list mutex to avoid a deadlock - safe to do
1567 * here because no one can find us at this moment in time.
1568 */
1569 mutex_unlock(&ctxi->mutex);
1570 mutex_lock(&cfg->ctx_tbl_list_mutex);
1571 mutex_lock(&ctxi->mutex);
1572 list_del_init(&ctxi->list);
1573 cfg->ctx_tbl[ctxid] = ctxi;
1574 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1575 fd_install(fd, file);
de9f0b0c 1576 *adap_fd = fd;
65be2c79
MO
1577out:
1578 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1579 __func__, ctxid, fd, rc);
1580 return rc;
1581
1582err3:
65be2c79
MO
1583 fput(file);
1584 put_unused_fd(fd);
5d1952ac
UK
1585err2:
1586 cxl_stop_context(ctx);
65be2c79
MO
1587err1:
1588 cxl_release_context(ctx);
1589 goto out;
1590}
1591
65be2c79
MO
1592/**
1593 * cxlflash_afu_recover() - initiates AFU recovery
1594 * @sdev: SCSI device associated with LUN.
1595 * @recover: Recover ioctl data structure.
1596 *
1597 * Only a single recovery is allowed at a time to avoid exhausting CXL
1598 * resources (leading to recovery failure) in the event that we're up
1599 * against the maximum number of contexts limit. For similar reasons,
1600 * a context recovery is retried if there are multiple recoveries taking
1601 * place at the same time and the failure was due to CXL services being
1602 * unable to keep up.
1603 *
635f6b08
MK
1604 * As this routine is called on ioctl context, it holds the ioctl r/w
1605 * semaphore that is used to drain ioctls in recovery scenarios. The
1606 * implementation to achieve the pacing described above (a local mutex)
1607 * requires that the ioctl r/w semaphore be dropped and reacquired to
1608 * avoid a 3-way deadlock when multiple process recoveries operate in
1609 * parallel.
1610 *
65be2c79
MO
1611 * Because a user can detect an error condition before the kernel, it is
1612 * quite possible for this routine to act as the kernel's EEH detection
1613 * source (MMIO read of mbox_r). Because of this, there is a window of
1614 * time where an EEH might have been detected but not yet 'serviced'
439e85c1 1615 * (callback invoked, causing the device to enter reset state). To avoid
65be2c79
MO
1616 * looping in this routine during that window, a 1 second sleep is in place
1617 * between the time the MMIO failure is detected and the time a wait on the
439e85c1 1618 * reset wait queue is attempted via check_state().
65be2c79
MO
1619 *
1620 * Return: 0 on success, -errno on failure
1621 */
1622static int cxlflash_afu_recover(struct scsi_device *sdev,
1623 struct dk_cxlflash_recover_afu *recover)
1624{
88d33628 1625 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
1626 struct device *dev = &cfg->dev->dev;
1627 struct llun_info *lli = sdev->hostdata;
1628 struct afu *afu = cfg->afu;
1629 struct ctx_info *ctxi = NULL;
1630 struct mutex *mutex = &cfg->ctx_recovery_mutex;
bae0ac69 1631 u64 flags;
65be2c79
MO
1632 u64 ctxid = DECODE_CTXID(recover->context_id),
1633 rctxid = recover->context_id;
1634 long reg;
1635 int lretry = 20; /* up to 2 seconds */
de9f0b0c 1636 int new_adap_fd = -1;
65be2c79
MO
1637 int rc = 0;
1638
1639 atomic_inc(&cfg->recovery_threads);
635f6b08 1640 up_read(&cfg->ioctl_rwsem);
65be2c79 1641 rc = mutex_lock_interruptible(mutex);
635f6b08 1642 down_read(&cfg->ioctl_rwsem);
65be2c79
MO
1643 if (rc)
1644 goto out;
635f6b08
MK
1645 rc = check_state(cfg);
1646 if (rc) {
88d33628 1647 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
635f6b08
MK
1648 rc = -ENODEV;
1649 goto out;
1650 }
65be2c79 1651
88d33628 1652 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
65be2c79
MO
1653 __func__, recover->reason, rctxid);
1654
1655retry:
1656 /* Ensure that this process is attached to the context */
1657 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1658 if (unlikely(!ctxi)) {
88d33628 1659 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
65be2c79
MO
1660 rc = -EINVAL;
1661 goto out;
1662 }
1663
1664 if (ctxi->err_recovery_active) {
1665retry_recover:
de9f0b0c 1666 rc = recover_context(cfg, ctxi, &new_adap_fd);
65be2c79 1667 if (unlikely(rc)) {
88d33628 1668 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
65be2c79
MO
1669 __func__, ctxid, rc);
1670 if ((rc == -ENODEV) &&
1671 ((atomic_read(&cfg->recovery_threads) > 1) ||
1672 (lretry--))) {
88d33628 1673 dev_dbg(dev, "%s: Going to try again\n",
65be2c79
MO
1674 __func__);
1675 mutex_unlock(mutex);
1676 msleep(100);
1677 rc = mutex_lock_interruptible(mutex);
1678 if (rc)
1679 goto out;
1680 goto retry_recover;
1681 }
1682
1683 goto out;
1684 }
1685
1686 ctxi->err_recovery_active = false;
bae0ac69
MO
1687
1688 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1689 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1690 if (afu_is_sq_cmd_mode(afu))
1691 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1692
1693 recover->hdr.return_flags = flags;
65be2c79 1694 recover->context_id = ctxi->ctxid;
de9f0b0c 1695 recover->adap_fd = new_adap_fd;
65be2c79 1696 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
65be2c79
MO
1697 goto out;
1698 }
1699
1700 /* Test if in error state */
1701 reg = readq_be(&afu->ctrl_map->mbox_r);
1702 if (reg == -1) {
0a27ae51
MO
1703 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1704
1705 /*
1706 * Before checking the state, put back the context obtained with
1707 * get_context() as it is no longer needed and sleep for a short
1708 * period of time (see prolog notes).
1709 */
1710 put_context(ctxi);
65be2c79
MO
1711 ctxi = NULL;
1712 ssleep(1);
1713 rc = check_state(cfg);
1714 if (unlikely(rc))
1715 goto out;
1716 goto retry;
1717 }
1718
88d33628 1719 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
65be2c79
MO
1720out:
1721 if (likely(ctxi))
1722 put_context(ctxi);
1723 mutex_unlock(mutex);
1724 atomic_dec_if_positive(&cfg->recovery_threads);
1725 return rc;
1726}
1727
1728/**
1729 * process_sense() - evaluates and processes sense data
1730 * @sdev: SCSI device associated with LUN.
1731 * @verify: Verify ioctl data structure.
1732 *
1733 * Return: 0 on success, -errno on failure
1734 */
1735static int process_sense(struct scsi_device *sdev,
1736 struct dk_cxlflash_verify *verify)
1737{
88d33628 1738 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
1739 struct device *dev = &cfg->dev->dev;
1740 struct llun_info *lli = sdev->hostdata;
1741 struct glun_info *gli = lli->parent;
1742 u64 prev_lba = gli->max_lba;
1743 struct scsi_sense_hdr sshdr = { 0 };
1744 int rc = 0;
1745
1746 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1747 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1748 if (!rc) {
88d33628 1749 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
65be2c79
MO
1750 rc = -EINVAL;
1751 goto out;
1752 }
1753
1754 switch (sshdr.sense_key) {
1755 case NO_SENSE:
1756 case RECOVERED_ERROR:
1757 /* fall through */
1758 case NOT_READY:
1759 break;
1760 case UNIT_ATTENTION:
1761 switch (sshdr.asc) {
1762 case 0x29: /* Power on Reset or Device Reset */
1763 /* fall through */
1764 case 0x2A: /* Device settings/capacity changed */
1765 rc = read_cap16(sdev, lli);
1766 if (rc) {
1767 rc = -ENODEV;
1768 break;
1769 }
1770 if (prev_lba != gli->max_lba)
1771 dev_dbg(dev, "%s: Capacity changed old=%lld "
1772 "new=%lld\n", __func__, prev_lba,
1773 gli->max_lba);
1774 break;
1775 case 0x3F: /* Report LUNs changed, Rescan. */
1776 scsi_scan_host(cfg->host);
1777 break;
1778 default:
1779 rc = -EIO;
1780 break;
1781 }
1782 break;
1783 default:
1784 rc = -EIO;
1785 break;
1786 }
1787out:
1788 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1789 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1790 return rc;
1791}
1792
1793/**
1794 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1795 * @sdev: SCSI device associated with LUN.
1796 * @verify: Verify ioctl data structure.
1797 *
1798 * Return: 0 on success, -errno on failure
1799 */
1800static int cxlflash_disk_verify(struct scsi_device *sdev,
1801 struct dk_cxlflash_verify *verify)
1802{
1803 int rc = 0;
1804 struct ctx_info *ctxi = NULL;
88d33628 1805 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
1806 struct device *dev = &cfg->dev->dev;
1807 struct llun_info *lli = sdev->hostdata;
1808 struct glun_info *gli = lli->parent;
1809 struct sisl_rht_entry *rhte = NULL;
1810 res_hndl_t rhndl = verify->rsrc_handle;
1811 u64 ctxid = DECODE_CTXID(verify->context_id),
1812 rctxid = verify->context_id;
1813 u64 last_lba = 0;
1814
88d33628
MO
1815 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1816 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
65be2c79
MO
1817 verify->hint, verify->hdr.flags);
1818
1819 ctxi = get_context(cfg, rctxid, lli, 0);
1820 if (unlikely(!ctxi)) {
88d33628 1821 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
65be2c79
MO
1822 rc = -EINVAL;
1823 goto out;
1824 }
1825
1826 rhte = get_rhte(ctxi, rhndl, lli);
1827 if (unlikely(!rhte)) {
88d33628 1828 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
65be2c79
MO
1829 __func__, rhndl);
1830 rc = -EINVAL;
1831 goto out;
1832 }
1833
1834 /*
1835 * Look at the hint/sense to see if it requires us to redrive
1836 * inquiry (i.e. the Unit attention is due to the WWN changing).
1837 */
1838 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
8e782623
MO
1839 /* Can't hold mutex across process_sense/read_cap16,
1840 * since we could have an intervening EEH event.
1841 */
1842 ctxi->unavail = true;
1843 mutex_unlock(&ctxi->mutex);
65be2c79
MO
1844 rc = process_sense(sdev, verify);
1845 if (unlikely(rc)) {
1846 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1847 __func__, rc);
8e782623
MO
1848 mutex_lock(&ctxi->mutex);
1849 ctxi->unavail = false;
65be2c79
MO
1850 goto out;
1851 }
8e782623
MO
1852 mutex_lock(&ctxi->mutex);
1853 ctxi->unavail = false;
65be2c79
MO
1854 }
1855
1856 switch (gli->mode) {
1857 case MODE_PHYSICAL:
1858 last_lba = gli->max_lba;
1859 break;
2cb79266
MO
1860 case MODE_VIRTUAL:
1861 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1862 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1863 last_lba /= CXLFLASH_BLOCK_SIZE;
1864 last_lba--;
1865 break;
65be2c79
MO
1866 default:
1867 WARN(1, "Unsupported LUN mode!");
1868 }
1869
1870 verify->last_lba = last_lba;
1871
1872out:
1873 if (likely(ctxi))
1874 put_context(ctxi);
88d33628 1875 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
65be2c79
MO
1876 __func__, rc, verify->last_lba);
1877 return rc;
1878}
1879
1880/**
1881 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1882 * @cmd: The ioctl command to decode.
1883 *
1884 * Return: A string identifying the decoded ioctl.
1885 */
1886static char *decode_ioctl(int cmd)
1887{
1888 switch (cmd) {
1889 case DK_CXLFLASH_ATTACH:
1890 return __stringify_1(DK_CXLFLASH_ATTACH);
1891 case DK_CXLFLASH_USER_DIRECT:
1892 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
2cb79266
MO
1893 case DK_CXLFLASH_USER_VIRTUAL:
1894 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1895 case DK_CXLFLASH_VLUN_RESIZE:
1896 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
65be2c79
MO
1897 case DK_CXLFLASH_RELEASE:
1898 return __stringify_1(DK_CXLFLASH_RELEASE);
1899 case DK_CXLFLASH_DETACH:
1900 return __stringify_1(DK_CXLFLASH_DETACH);
1901 case DK_CXLFLASH_VERIFY:
1902 return __stringify_1(DK_CXLFLASH_VERIFY);
2cb79266
MO
1903 case DK_CXLFLASH_VLUN_CLONE:
1904 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
65be2c79
MO
1905 case DK_CXLFLASH_RECOVER_AFU:
1906 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1907 case DK_CXLFLASH_MANAGE_LUN:
1908 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1909 }
1910
1911 return "UNKNOWN";
1912}
1913
1914/**
1915 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1916 * @sdev: SCSI device associated with LUN.
1917 * @arg: UDirect ioctl data structure.
1918 *
1919 * On successful return, the user is informed of the resource handle
1920 * to be used to identify the direct lun and the size (in blocks) of
1921 * the direct lun in last LBA format.
1922 *
1923 * Return: 0 on success, -errno on failure
1924 */
1925static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1926{
88d33628 1927 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
1928 struct device *dev = &cfg->dev->dev;
1929 struct afu *afu = cfg->afu;
1930 struct llun_info *lli = sdev->hostdata;
1931 struct glun_info *gli = lli->parent;
1932
1933 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1934
1935 u64 ctxid = DECODE_CTXID(pphys->context_id),
1936 rctxid = pphys->context_id;
1937 u64 lun_size = 0;
1938 u64 last_lba = 0;
1939 u64 rsrc_handle = -1;
e8e17ea6 1940 u32 port = CHAN2PORTMASK(sdev->channel);
65be2c79
MO
1941
1942 int rc = 0;
1943
1944 struct ctx_info *ctxi = NULL;
1945 struct sisl_rht_entry *rhte = NULL;
1946
88d33628 1947 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
65be2c79
MO
1948
1949 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1950 if (unlikely(rc)) {
88d33628 1951 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
65be2c79
MO
1952 goto out;
1953 }
1954
1955 ctxi = get_context(cfg, rctxid, lli, 0);
1956 if (unlikely(!ctxi)) {
88d33628 1957 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
65be2c79
MO
1958 rc = -EINVAL;
1959 goto err1;
1960 }
1961
1962 rhte = rhte_checkout(ctxi, lli);
1963 if (unlikely(!rhte)) {
88d33628
MO
1964 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
1965 __func__, ctxid);
65be2c79
MO
1966 rc = -EMFILE; /* too many opens */
1967 goto err1;
1968 }
1969
1970 rsrc_handle = (rhte - ctxi->rht_start);
1971
1972 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
1973 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
1974
1975 last_lba = gli->max_lba;
1976 pphys->hdr.return_flags = 0;
1977 pphys->last_lba = last_lba;
1978 pphys->rsrc_handle = rsrc_handle;
1979
1980out:
1981 if (likely(ctxi))
1982 put_context(ctxi);
88d33628 1983 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
65be2c79
MO
1984 __func__, rsrc_handle, rc, last_lba);
1985 return rc;
1986
1987err1:
1988 cxlflash_lun_detach(gli);
1989 goto out;
1990}
1991
1992/**
1993 * ioctl_common() - common IOCTL handler for driver
1994 * @sdev: SCSI device associated with LUN.
1995 * @cmd: IOCTL command.
1996 *
1997 * Handles common fencing operations that are valid for multiple ioctls. Always
1998 * allow through ioctls that are cleanup oriented in nature, even when operating
1999 * in a failed/terminating state.
2000 *
2001 * Return: 0 on success, -errno on failure
2002 */
2003static int ioctl_common(struct scsi_device *sdev, int cmd)
2004{
88d33628 2005 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
2006 struct device *dev = &cfg->dev->dev;
2007 struct llun_info *lli = sdev->hostdata;
2008 int rc = 0;
2009
2010 if (unlikely(!lli)) {
2011 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2012 rc = -EINVAL;
2013 goto out;
2014 }
2015
2016 rc = check_state(cfg);
2017 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2018 switch (cmd) {
2cb79266 2019 case DK_CXLFLASH_VLUN_RESIZE:
65be2c79
MO
2020 case DK_CXLFLASH_RELEASE:
2021 case DK_CXLFLASH_DETACH:
88d33628 2022 dev_dbg(dev, "%s: Command override rc=%d\n",
65be2c79
MO
2023 __func__, rc);
2024 rc = 0;
2025 break;
2026 }
2027 }
2028out:
2029 return rc;
2030}
2031
2032/**
2033 * cxlflash_ioctl() - IOCTL handler for driver
2034 * @sdev: SCSI device associated with LUN.
2035 * @cmd: IOCTL command.
2036 * @arg: Userspace ioctl data structure.
2037 *
0a27ae51
MO
2038 * A read/write semaphore is used to implement a 'drain' of currently
2039 * running ioctls. The read semaphore is taken at the beginning of each
2040 * ioctl thread and released upon concluding execution. Additionally the
2041 * semaphore should be released and then reacquired in any ioctl execution
2042 * path which will wait for an event to occur that is outside the scope of
2043 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
2044 * a thread simply needs to acquire the write semaphore.
2045 *
65be2c79
MO
2046 * Return: 0 on success, -errno on failure
2047 */
2048int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2049{
2050 typedef int (*sioctl) (struct scsi_device *, void *);
2051
88d33628 2052 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
65be2c79
MO
2053 struct device *dev = &cfg->dev->dev;
2054 struct afu *afu = cfg->afu;
2055 struct dk_cxlflash_hdr *hdr;
2056 char buf[sizeof(union cxlflash_ioctls)];
2057 size_t size = 0;
2058 bool known_ioctl = false;
2059 int idx;
2060 int rc = 0;
2061 struct Scsi_Host *shost = sdev->host;
2062 sioctl do_ioctl = NULL;
2063
2064 static const struct {
2065 size_t size;
2066 sioctl ioctl;
2067 } ioctl_tbl[] = { /* NOTE: order matters here */
2068 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2069 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2070 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2071 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2072 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2073 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2074 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2cb79266
MO
2075 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2076 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2077 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
65be2c79
MO
2078 };
2079
0a27ae51
MO
2080 /* Hold read semaphore so we can drain if needed */
2081 down_read(&cfg->ioctl_rwsem);
2082
65be2c79
MO
2083 /* Restrict command set to physical support only for internal LUN */
2084 if (afu->internal_lun)
2085 switch (cmd) {
2086 case DK_CXLFLASH_RELEASE:
2cb79266
MO
2087 case DK_CXLFLASH_USER_VIRTUAL:
2088 case DK_CXLFLASH_VLUN_RESIZE:
2089 case DK_CXLFLASH_VLUN_CLONE:
65be2c79
MO
2090 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2091 __func__, decode_ioctl(cmd), afu->internal_lun);
2092 rc = -EINVAL;
2093 goto cxlflash_ioctl_exit;
2094 }
2095
2096 switch (cmd) {
2097 case DK_CXLFLASH_ATTACH:
2098 case DK_CXLFLASH_USER_DIRECT:
2099 case DK_CXLFLASH_RELEASE:
2100 case DK_CXLFLASH_DETACH:
2101 case DK_CXLFLASH_VERIFY:
2102 case DK_CXLFLASH_RECOVER_AFU:
2cb79266
MO
2103 case DK_CXLFLASH_USER_VIRTUAL:
2104 case DK_CXLFLASH_VLUN_RESIZE:
2105 case DK_CXLFLASH_VLUN_CLONE:
65be2c79
MO
2106 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2107 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2108 sdev->channel, sdev->id, sdev->lun);
2109 rc = ioctl_common(sdev, cmd);
2110 if (unlikely(rc))
2111 goto cxlflash_ioctl_exit;
2112
2113 /* fall through */
2114
2115 case DK_CXLFLASH_MANAGE_LUN:
2116 known_ioctl = true;
2117 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2118 size = ioctl_tbl[idx].size;
2119 do_ioctl = ioctl_tbl[idx].ioctl;
2120
2121 if (likely(do_ioctl))
2122 break;
2123
2124 /* fall through */
2125 default:
2126 rc = -EINVAL;
2127 goto cxlflash_ioctl_exit;
2128 }
2129
2130 if (unlikely(copy_from_user(&buf, arg, size))) {
88d33628 2131 dev_err(dev, "%s: copy_from_user() fail "
65be2c79
MO
2132 "size=%lu cmd=%d (%s) arg=%p\n",
2133 __func__, size, cmd, decode_ioctl(cmd), arg);
2134 rc = -EFAULT;
2135 goto cxlflash_ioctl_exit;
2136 }
2137
2138 hdr = (struct dk_cxlflash_hdr *)&buf;
2139 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2140 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2141 __func__, hdr->version, decode_ioctl(cmd));
2142 rc = -EINVAL;
2143 goto cxlflash_ioctl_exit;
2144 }
2145
2146 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
88d33628 2147 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
65be2c79
MO
2148 rc = -EINVAL;
2149 goto cxlflash_ioctl_exit;
2150 }
2151
2152 rc = do_ioctl(sdev, (void *)&buf);
2153 if (likely(!rc))
2154 if (unlikely(copy_to_user(arg, &buf, size))) {
88d33628 2155 dev_err(dev, "%s: copy_to_user() fail "
65be2c79
MO
2156 "size=%lu cmd=%d (%s) arg=%p\n",
2157 __func__, size, cmd, decode_ioctl(cmd), arg);
2158 rc = -EFAULT;
2159 }
2160
2161 /* fall through to exit */
2162
2163cxlflash_ioctl_exit:
0a27ae51 2164 up_read(&cfg->ioctl_rwsem);
65be2c79
MO
2165 if (unlikely(rc && known_ioctl))
2166 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2167 "returned rc %d\n", __func__,
2168 decode_ioctl(cmd), cmd, shost->host_no,
2169 sdev->channel, sdev->id, sdev->lun, rc);
2170 else
2171 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2172 "returned rc %d\n", __func__, decode_ioctl(cmd),
2173 cmd, shost->host_no, sdev->channel, sdev->id,
2174 sdev->lun, rc);
2175 return rc;
2176}