2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
19 #include <asm/unaligned.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
25 #include <uapi/scsi/cxlflash_ioctl.h>
30 #include "superpipe.h"
32 struct cxlflash_global global
;
35 * marshal_rele_to_resize() - translate release to resize structure
36 * @rele: Source structure from which to translate/copy.
37 * @resize: Destination structure for the translate/copy.
39 static void marshal_rele_to_resize(struct dk_cxlflash_release
*release
,
40 struct dk_cxlflash_resize
*resize
)
42 resize
->hdr
= release
->hdr
;
43 resize
->context_id
= release
->context_id
;
44 resize
->rsrc_handle
= release
->rsrc_handle
;
48 * marshal_det_to_rele() - translate detach to release structure
49 * @detach: Destination structure for the translate/copy.
50 * @rele: Source structure from which to translate/copy.
52 static void marshal_det_to_rele(struct dk_cxlflash_detach
*detach
,
53 struct dk_cxlflash_release
*release
)
55 release
->hdr
= detach
->hdr
;
56 release
->context_id
= detach
->context_id
;
60 * cxlflash_free_errpage() - frees resources associated with global error page
62 void cxlflash_free_errpage(void)
65 mutex_lock(&global
.mutex
);
66 if (global
.err_page
) {
67 __free_page(global
.err_page
);
68 global
.err_page
= NULL
;
70 mutex_unlock(&global
.mutex
);
74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
75 * @cfg: Internal structure associated with the host.
77 * When the host needs to go down, all users must be quiesced and their
78 * memory freed. This is accomplished by putting the contexts in error
79 * state which will notify the user and let them 'drive' the tear down.
80 * Meanwhile, this routine camps until all user contexts have been removed.
82 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg
*cfg
)
84 struct device
*dev
= &cfg
->dev
->dev
;
87 cxlflash_mark_contexts_error(cfg
);
92 for (i
= 0; i
< MAX_CONTEXT
; i
++)
93 if (cfg
->ctx_tbl
[i
]) {
98 if (!found
&& list_empty(&cfg
->ctx_err_recovery
))
101 dev_dbg(dev
, "%s: Wait for user contexts to quiesce...\n",
103 wake_up_all(&cfg
->reset_waitq
);
109 * find_error_context() - locates a context by cookie on the error recovery list
110 * @cfg: Internal structure associated with the host.
111 * @rctxid: Desired context by id.
112 * @file: Desired context by file.
114 * Return: Found context on success, NULL on failure
116 static struct ctx_info
*find_error_context(struct cxlflash_cfg
*cfg
, u64 rctxid
,
119 struct ctx_info
*ctxi
;
121 list_for_each_entry(ctxi
, &cfg
->ctx_err_recovery
, list
)
122 if ((ctxi
->ctxid
== rctxid
) || (ctxi
->file
== file
))
129 * get_context() - obtains a validated and locked context reference
130 * @cfg: Internal structure associated with the host.
131 * @rctxid: Desired context (raw, un-decoded format).
132 * @arg: LUN information or file associated with request.
133 * @ctx_ctrl: Control information to 'steer' desired lookup.
135 * NOTE: despite the name pid, in linux, current->pid actually refers
136 * to the lightweight process id (tid) and can change if the process is
137 * multi threaded. The tgid remains constant for the process and only changes
138 * when the process of fork. For all intents and purposes, think of tgid
139 * as a pid in the traditional sense.
141 * Return: Validated context on success, NULL on failure
143 struct ctx_info
*get_context(struct cxlflash_cfg
*cfg
, u64 rctxid
,
144 void *arg
, enum ctx_ctrl ctx_ctrl
)
146 struct device
*dev
= &cfg
->dev
->dev
;
147 struct ctx_info
*ctxi
= NULL
;
148 struct lun_access
*lun_access
= NULL
;
149 struct file
*file
= NULL
;
150 struct llun_info
*lli
= arg
;
151 u64 ctxid
= DECODE_CTXID(rctxid
);
153 pid_t pid
= current
->tgid
, ctxpid
= 0;
155 if (ctx_ctrl
& CTX_CTRL_FILE
) {
157 file
= (struct file
*)arg
;
160 if (ctx_ctrl
& CTX_CTRL_CLONE
)
161 pid
= current
->parent
->tgid
;
163 if (likely(ctxid
< MAX_CONTEXT
)) {
165 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
166 ctxi
= cfg
->ctx_tbl
[ctxid
];
168 if ((file
&& (ctxi
->file
!= file
)) ||
169 (!file
&& (ctxi
->ctxid
!= rctxid
)))
172 if ((ctx_ctrl
& CTX_CTRL_ERR
) ||
173 (!ctxi
&& (ctx_ctrl
& CTX_CTRL_ERR_FALLBACK
)))
174 ctxi
= find_error_context(cfg
, rctxid
, file
);
176 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
181 * Need to acquire ownership of the context while still
182 * under the table/list lock to serialize with a remove
183 * thread. Use the 'try' to avoid stalling the
184 * table/list lock for a single context.
186 * Note that the lock order is:
188 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
190 * Therefore release ctx_tbl_list_mutex before retrying.
192 rc
= mutex_trylock(&ctxi
->mutex
);
193 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
195 break; /* got the context's lock! */
202 if (likely(!(ctx_ctrl
& CTX_CTRL_NOPID
)))
207 list_for_each_entry(lun_access
, &ctxi
->luns
, list
)
208 if (lun_access
->lli
== lli
)
215 dev_dbg(dev
, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
216 "ctx_ctrl=%u\n", __func__
, rctxid
, ctxi
, ctxpid
, pid
,
222 mutex_unlock(&ctxi
->mutex
);
228 * put_context() - release a context that was retrieved from get_context()
229 * @ctxi: Context to release.
231 * For now, releasing the context equates to unlocking it's mutex.
233 void put_context(struct ctx_info
*ctxi
)
235 mutex_unlock(&ctxi
->mutex
);
239 * afu_attach() - attach a context to the AFU
240 * @cfg: Internal structure associated with the host.
241 * @ctxi: Context to attach.
243 * Upon setting the context capabilities, they must be confirmed with
244 * a read back operation as the context might have been closed since
245 * the mailbox was unlocked. When this occurs, registration is failed.
247 * Return: 0 on success, -errno on failure
249 static int afu_attach(struct cxlflash_cfg
*cfg
, struct ctx_info
*ctxi
)
251 struct device
*dev
= &cfg
->dev
->dev
;
252 struct afu
*afu
= cfg
->afu
;
253 struct sisl_ctrl_map __iomem
*ctrl_map
= ctxi
->ctrl_map
;
257 /* Unlock cap and restrict user to read/write cmds in translated mode */
258 readq_be(&ctrl_map
->mbox_r
);
259 val
= (SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
);
260 writeq_be(val
, &ctrl_map
->ctx_cap
);
261 val
= readq_be(&ctrl_map
->ctx_cap
);
262 if (val
!= (SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
)) {
263 dev_err(dev
, "%s: ctx may be closed val=%016llX\n",
269 /* Set up MMIO registers pointing to the RHT */
270 writeq_be((u64
)ctxi
->rht_start
, &ctrl_map
->rht_start
);
271 val
= SISL_RHT_CNT_ID((u64
)MAX_RHT_PER_CONTEXT
, (u64
)(afu
->ctx_hndl
));
272 writeq_be(val
, &ctrl_map
->rht_cnt_id
);
274 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
279 * read_cap16() - issues a SCSI READ_CAP16 command
280 * @sdev: SCSI device associated with LUN.
281 * @lli: LUN destined for capacity request.
283 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
284 * in scsi_execute(), the EEH handler will attempt to recover. As part of the
285 * recovery, the handler drains all currently running ioctls, waiting until they
286 * have completed before proceeding with a reset. As this routine is used on the
287 * ioctl path, this can create a condition where the EEH handler becomes stuck,
288 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
289 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
290 * This will allow the EEH handler to proceed with a recovery while this thread
291 * is still running. Once the scsi_execute() returns, reacquire the ioctl read
292 * semaphore and check the adapter state in case it changed while inside of
293 * scsi_execute(). The state check will wait if the adapter is still being
294 * recovered or return a failure if the recovery failed. In the event that the
295 * adapter reset failed, simply return the failure as the ioctl would be unable
298 * Note that the above puts a requirement on this routine to only be called on
301 * Return: 0 on success, -errno on failure
303 static int read_cap16(struct scsi_device
*sdev
, struct llun_info
*lli
)
305 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
306 struct device
*dev
= &cfg
->dev
->dev
;
307 struct glun_info
*gli
= lli
->parent
;
310 u8
*sense_buf
= NULL
;
314 u32 to
= CMD_TIMEOUT
* HZ
;
317 cmd_buf
= kzalloc(CMD_BUFSIZE
, GFP_KERNEL
);
318 scsi_cmd
= kzalloc(MAX_COMMAND_SIZE
, GFP_KERNEL
);
319 sense_buf
= kzalloc(SCSI_SENSE_BUFFERSIZE
, GFP_KERNEL
);
320 if (unlikely(!cmd_buf
|| !scsi_cmd
|| !sense_buf
)) {
325 scsi_cmd
[0] = SERVICE_ACTION_IN_16
; /* read cap(16) */
326 scsi_cmd
[1] = SAI_READ_CAPACITY_16
; /* service action */
327 put_unaligned_be32(CMD_BUFSIZE
, &scsi_cmd
[10]);
329 dev_dbg(dev
, "%s: %ssending cmd(0x%x)\n", __func__
,
330 retry_cnt
? "re" : "", scsi_cmd
[0]);
332 /* Drop the ioctl read semahpore across lengthy call */
333 up_read(&cfg
->ioctl_rwsem
);
334 result
= scsi_execute(sdev
, scsi_cmd
, DMA_FROM_DEVICE
, cmd_buf
,
335 CMD_BUFSIZE
, sense_buf
, to
, CMD_RETRIES
, 0, NULL
);
336 down_read(&cfg
->ioctl_rwsem
);
337 rc
= check_state(cfg
);
339 dev_err(dev
, "%s: Failed state! result=0x08%X\n",
345 if (driver_byte(result
) == DRIVER_SENSE
) {
346 result
&= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
347 if (result
& SAM_STAT_CHECK_CONDITION
) {
348 struct scsi_sense_hdr sshdr
;
350 scsi_normalize_sense(sense_buf
, SCSI_SENSE_BUFFERSIZE
,
352 switch (sshdr
.sense_key
) {
354 case RECOVERED_ERROR
:
357 result
&= ~SAM_STAT_CHECK_CONDITION
;
361 case 0x29: /* Power on Reset or Device Reset */
363 case 0x2A: /* Device capacity changed */
364 case 0x3F: /* Report LUNs changed */
365 /* Retry the command once more */
366 if (retry_cnt
++ < 1) {
381 dev_err(dev
, "%s: command failed, result=0x%x\n",
388 * Read cap was successful, grab values from the buffer;
389 * note that we don't need to worry about unaligned access
390 * as the buffer is allocated on an aligned boundary.
392 mutex_lock(&gli
->mutex
);
393 gli
->max_lba
= be64_to_cpu(*((__be64
*)&cmd_buf
[0]));
394 gli
->blk_len
= be32_to_cpu(*((__be32
*)&cmd_buf
[8]));
395 mutex_unlock(&gli
->mutex
);
402 dev_dbg(dev
, "%s: maxlba=%lld blklen=%d rc=%d\n",
403 __func__
, gli
->max_lba
, gli
->blk_len
, rc
);
408 * get_rhte() - obtains validated resource handle table entry reference
409 * @ctxi: Context owning the resource handle.
410 * @rhndl: Resource handle associated with entry.
411 * @lli: LUN associated with request.
413 * Return: Validated RHTE on success, NULL on failure
415 struct sisl_rht_entry
*get_rhte(struct ctx_info
*ctxi
, res_hndl_t rhndl
,
416 struct llun_info
*lli
)
418 struct sisl_rht_entry
*rhte
= NULL
;
420 if (unlikely(!ctxi
->rht_start
)) {
421 pr_debug("%s: Context does not have allocated RHT!\n",
426 if (unlikely(rhndl
>= MAX_RHT_PER_CONTEXT
)) {
427 pr_debug("%s: Bad resource handle! (%d)\n", __func__
, rhndl
);
431 if (unlikely(ctxi
->rht_lun
[rhndl
] != lli
)) {
432 pr_debug("%s: Bad resource handle LUN! (%d)\n",
437 rhte
= &ctxi
->rht_start
[rhndl
];
438 if (unlikely(rhte
->nmask
== 0)) {
439 pr_debug("%s: Unopened resource handle! (%d)\n",
450 * rhte_checkout() - obtains free/empty resource handle table entry
451 * @ctxi: Context owning the resource handle.
452 * @lli: LUN associated with request.
454 * Return: Free RHTE on success, NULL on failure
456 struct sisl_rht_entry
*rhte_checkout(struct ctx_info
*ctxi
,
457 struct llun_info
*lli
)
459 struct sisl_rht_entry
*rhte
= NULL
;
462 /* Find a free RHT entry */
463 for (i
= 0; i
< MAX_RHT_PER_CONTEXT
; i
++)
464 if (ctxi
->rht_start
[i
].nmask
== 0) {
465 rhte
= &ctxi
->rht_start
[i
];
471 ctxi
->rht_lun
[i
] = lli
;
473 pr_debug("%s: returning rhte=%p (%d)\n", __func__
, rhte
, i
);
478 * rhte_checkin() - releases a resource handle table entry
479 * @ctxi: Context owning the resource handle.
480 * @rhte: RHTE to release.
482 void rhte_checkin(struct ctx_info
*ctxi
,
483 struct sisl_rht_entry
*rhte
)
485 u32 rsrc_handle
= rhte
- ctxi
->rht_start
;
490 ctxi
->rht_lun
[rsrc_handle
] = NULL
;
491 ctxi
->rht_needs_ws
[rsrc_handle
] = false;
495 * rhte_format1() - populates a RHTE for format 1
496 * @rhte: RHTE to populate.
497 * @lun_id: LUN ID of LUN associated with RHTE.
498 * @perm: Desired permissions for RHTE.
499 * @port_sel: Port selection mask
501 static void rht_format1(struct sisl_rht_entry
*rhte
, u64 lun_id
, u32 perm
,
505 * Populate the Format 1 RHT entry for direct access (physical
506 * LUN) using the synchronization sequence defined in the
507 * SISLite specification.
509 struct sisl_rht_entry_f1 dummy
= { 0 };
510 struct sisl_rht_entry_f1
*rhte_f1
= (struct sisl_rht_entry_f1
*)rhte
;
512 memset(rhte_f1
, 0, sizeof(*rhte_f1
));
513 rhte_f1
->fp
= SISL_RHT_FP(1U, 0);
514 dma_wmb(); /* Make setting of format bit visible */
516 rhte_f1
->lun_id
= lun_id
;
517 dma_wmb(); /* Make setting of LUN id visible */
520 * Use a dummy RHT Format 1 entry to build the second dword
521 * of the entry that must be populated in a single write when
522 * enabled (valid bit set to TRUE).
525 dummy
.fp
= SISL_RHT_FP(1U, perm
);
526 dummy
.port_sel
= port_sel
;
527 rhte_f1
->dw
= dummy
.dw
;
529 dma_wmb(); /* Make remaining RHT entry fields visible */
533 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
534 * @gli: LUN to attach.
535 * @mode: Desired mode of the LUN.
536 * @locked: Mutex status on current thread.
538 * Return: 0 on success, -errno on failure
540 int cxlflash_lun_attach(struct glun_info
*gli
, enum lun_mode mode
, bool locked
)
545 mutex_lock(&gli
->mutex
);
547 if (gli
->mode
== MODE_NONE
)
549 else if (gli
->mode
!= mode
) {
550 pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
551 __func__
, gli
->mode
, mode
);
557 WARN_ON(gli
->users
<= 0);
559 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
560 __func__
, rc
, gli
->mode
, gli
->users
);
562 mutex_unlock(&gli
->mutex
);
567 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
568 * @gli: LUN to detach.
570 * When resetting the mode, terminate block allocation resources as they
571 * are no longer required (service is safe to call even when block allocation
572 * resources were not present - such as when transitioning from physical mode).
573 * These resources will be reallocated when needed (subsequent transition to
576 void cxlflash_lun_detach(struct glun_info
*gli
)
578 mutex_lock(&gli
->mutex
);
579 WARN_ON(gli
->mode
== MODE_NONE
);
580 if (--gli
->users
== 0) {
581 gli
->mode
= MODE_NONE
;
582 cxlflash_ba_terminate(&gli
->blka
.ba_lun
);
584 pr_debug("%s: gli->users=%u\n", __func__
, gli
->users
);
585 WARN_ON(gli
->users
< 0);
586 mutex_unlock(&gli
->mutex
);
590 * _cxlflash_disk_release() - releases the specified resource entry
591 * @sdev: SCSI device associated with LUN.
592 * @ctxi: Context owning resources.
593 * @release: Release ioctl data structure.
595 * For LUNs in virtual mode, the virtual LUN associated with the specified
596 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
597 * AFU sync should _not_ be performed when the context is sitting on the error
598 * recovery list. A context on the error recovery list is not known to the AFU
599 * due to reset. When the context is recovered, it will be reattached and made
600 * known again to the AFU.
602 * Return: 0 on success, -errno on failure
604 int _cxlflash_disk_release(struct scsi_device
*sdev
,
605 struct ctx_info
*ctxi
,
606 struct dk_cxlflash_release
*release
)
608 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
609 struct device
*dev
= &cfg
->dev
->dev
;
610 struct llun_info
*lli
= sdev
->hostdata
;
611 struct glun_info
*gli
= lli
->parent
;
612 struct afu
*afu
= cfg
->afu
;
613 bool put_ctx
= false;
615 struct dk_cxlflash_resize size
;
616 res_hndl_t rhndl
= release
->rsrc_handle
;
619 u64 ctxid
= DECODE_CTXID(release
->context_id
),
620 rctxid
= release
->context_id
;
622 struct sisl_rht_entry
*rhte
;
623 struct sisl_rht_entry_f1
*rhte_f1
;
625 dev_dbg(dev
, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
626 __func__
, ctxid
, release
->rsrc_handle
, gli
->mode
, gli
->users
);
629 ctxi
= get_context(cfg
, rctxid
, lli
, CTX_CTRL_ERR_FALLBACK
);
630 if (unlikely(!ctxi
)) {
631 dev_dbg(dev
, "%s: Bad context! (%llu)\n",
640 rhte
= get_rhte(ctxi
, rhndl
, lli
);
641 if (unlikely(!rhte
)) {
642 dev_dbg(dev
, "%s: Bad resource handle! (%d)\n",
649 * Resize to 0 for virtual LUNS by setting the size
650 * to 0. This will clear LXT_START and LXT_CNT fields
651 * in the RHT entry and properly sync with the AFU.
653 * Afterwards we clear the remaining fields.
657 marshal_rele_to_resize(release
, &size
);
659 rc
= _cxlflash_vlun_resize(sdev
, ctxi
, &size
);
661 dev_dbg(dev
, "%s: resize failed rc %d\n", __func__
, rc
);
668 * Clear the Format 1 RHT entry for direct access
669 * (physical LUN) using the synchronization sequence
670 * defined in the SISLite specification.
672 rhte_f1
= (struct sisl_rht_entry_f1
*)rhte
;
675 dma_wmb(); /* Make revocation of RHT entry visible */
678 dma_wmb(); /* Make clearing of LUN id visible */
681 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
683 if (!ctxi
->err_recovery_active
)
684 cxlflash_afu_sync(afu
, ctxid
, rhndl
, AFU_HW_SYNC
);
687 WARN(1, "Unsupported LUN mode!");
691 rhte_checkin(ctxi
, rhte
);
692 cxlflash_lun_detach(gli
);
697 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
701 int cxlflash_disk_release(struct scsi_device
*sdev
,
702 struct dk_cxlflash_release
*release
)
704 return _cxlflash_disk_release(sdev
, NULL
, release
);
708 * destroy_context() - releases a context
709 * @cfg: Internal structure associated with the host.
710 * @ctxi: Context to release.
712 * This routine is safe to be called with a a non-initialized context.
713 * Also note that the routine conditionally checks for the existence
714 * of the context control map before clearing the RHT registers and
715 * context capabilities because it is possible to destroy a context
716 * while the context is in the error state (previous mapping was
717 * removed [so there is no need to worry about clearing] and context
718 * is waiting for a new mapping).
720 static void destroy_context(struct cxlflash_cfg
*cfg
,
721 struct ctx_info
*ctxi
)
723 struct afu
*afu
= cfg
->afu
;
725 if (ctxi
->initialized
) {
726 WARN_ON(!list_empty(&ctxi
->luns
));
728 /* Clear RHT registers and drop all capabilities for context */
729 if (afu
->afu_map
&& ctxi
->ctrl_map
) {
730 writeq_be(0, &ctxi
->ctrl_map
->rht_start
);
731 writeq_be(0, &ctxi
->ctrl_map
->rht_cnt_id
);
732 writeq_be(0, &ctxi
->ctrl_map
->ctx_cap
);
736 /* Free memory associated with context */
737 free_page((ulong
)ctxi
->rht_start
);
738 kfree(ctxi
->rht_needs_ws
);
739 kfree(ctxi
->rht_lun
);
744 * create_context() - allocates and initializes a context
745 * @cfg: Internal structure associated with the host.
747 * Return: Allocated context on success, NULL on failure
749 static struct ctx_info
*create_context(struct cxlflash_cfg
*cfg
)
751 struct device
*dev
= &cfg
->dev
->dev
;
752 struct ctx_info
*ctxi
= NULL
;
753 struct llun_info
**lli
= NULL
;
755 struct sisl_rht_entry
*rhte
;
757 ctxi
= kzalloc(sizeof(*ctxi
), GFP_KERNEL
);
758 lli
= kzalloc((MAX_RHT_PER_CONTEXT
* sizeof(*lli
)), GFP_KERNEL
);
759 ws
= kzalloc((MAX_RHT_PER_CONTEXT
* sizeof(*ws
)), GFP_KERNEL
);
760 if (unlikely(!ctxi
|| !lli
|| !ws
)) {
761 dev_err(dev
, "%s: Unable to allocate context!\n", __func__
);
765 rhte
= (struct sisl_rht_entry
*)get_zeroed_page(GFP_KERNEL
);
766 if (unlikely(!rhte
)) {
767 dev_err(dev
, "%s: Unable to allocate RHT!\n", __func__
);
772 ctxi
->rht_needs_ws
= ws
;
773 ctxi
->rht_start
= rhte
;
786 * init_context() - initializes a previously allocated context
787 * @ctxi: Previously allocated context
788 * @cfg: Internal structure associated with the host.
789 * @ctx: Previously obtained CXL context reference.
790 * @ctxid: Previously obtained process element associated with CXL context.
791 * @file: Previously obtained file associated with CXL context.
792 * @perms: User-specified permissions.
794 static void init_context(struct ctx_info
*ctxi
, struct cxlflash_cfg
*cfg
,
795 struct cxl_context
*ctx
, int ctxid
, struct file
*file
,
798 struct afu
*afu
= cfg
->afu
;
800 ctxi
->rht_perms
= perms
;
801 ctxi
->ctrl_map
= &afu
->afu_map
->ctrls
[ctxid
].ctrl
;
802 ctxi
->ctxid
= ENCODE_CTXID(ctxi
, ctxid
);
803 ctxi
->pid
= current
->tgid
; /* tgid = pid */
807 ctxi
->initialized
= true;
808 mutex_init(&ctxi
->mutex
);
809 kref_init(&ctxi
->kref
);
810 INIT_LIST_HEAD(&ctxi
->luns
);
811 INIT_LIST_HEAD(&ctxi
->list
); /* initialize for list_empty() */
815 * remove_context() - context kref release handler
816 * @kref: Kernel reference associated with context to be removed.
818 * When a context no longer has any references it can safely be removed
819 * from global access and destroyed. Note that it is assumed the thread
820 * relinquishing access to the context holds its mutex.
822 static void remove_context(struct kref
*kref
)
824 struct ctx_info
*ctxi
= container_of(kref
, struct ctx_info
, kref
);
825 struct cxlflash_cfg
*cfg
= ctxi
->cfg
;
826 u64 ctxid
= DECODE_CTXID(ctxi
->ctxid
);
828 /* Remove context from table/error list */
829 WARN_ON(!mutex_is_locked(&ctxi
->mutex
));
830 ctxi
->unavail
= true;
831 mutex_unlock(&ctxi
->mutex
);
832 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
833 mutex_lock(&ctxi
->mutex
);
835 if (!list_empty(&ctxi
->list
))
836 list_del(&ctxi
->list
);
837 cfg
->ctx_tbl
[ctxid
] = NULL
;
838 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
839 mutex_unlock(&ctxi
->mutex
);
841 /* Context now completely uncoupled/unreachable */
842 destroy_context(cfg
, ctxi
);
846 * _cxlflash_disk_detach() - detaches a LUN from a context
847 * @sdev: SCSI device associated with LUN.
848 * @ctxi: Context owning resources.
849 * @detach: Detach ioctl data structure.
851 * As part of the detach, all per-context resources associated with the LUN
852 * are cleaned up. When detaching the last LUN for a context, the context
853 * itself is cleaned up and released.
855 * Return: 0 on success, -errno on failure
857 static int _cxlflash_disk_detach(struct scsi_device
*sdev
,
858 struct ctx_info
*ctxi
,
859 struct dk_cxlflash_detach
*detach
)
861 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
862 struct device
*dev
= &cfg
->dev
->dev
;
863 struct llun_info
*lli
= sdev
->hostdata
;
864 struct lun_access
*lun_access
, *t
;
865 struct dk_cxlflash_release rel
;
866 bool put_ctx
= false;
870 u64 ctxid
= DECODE_CTXID(detach
->context_id
),
871 rctxid
= detach
->context_id
;
873 dev_dbg(dev
, "%s: ctxid=%llu\n", __func__
, ctxid
);
876 ctxi
= get_context(cfg
, rctxid
, lli
, CTX_CTRL_ERR_FALLBACK
);
877 if (unlikely(!ctxi
)) {
878 dev_dbg(dev
, "%s: Bad context! (%llu)\n",
887 /* Cleanup outstanding resources tied to this LUN */
889 marshal_det_to_rele(detach
, &rel
);
890 for (i
= 0; i
< MAX_RHT_PER_CONTEXT
; i
++) {
891 if (ctxi
->rht_lun
[i
] == lli
) {
893 _cxlflash_disk_release(sdev
, ctxi
, &rel
);
896 /* No need to loop further if we're done */
897 if (ctxi
->rht_out
== 0)
902 /* Take our LUN out of context, free the node */
903 list_for_each_entry_safe(lun_access
, t
, &ctxi
->luns
, list
)
904 if (lun_access
->lli
== lli
) {
905 list_del(&lun_access
->list
);
912 * Release the context reference and the sdev reference that
913 * bound this LUN to the context.
915 if (kref_put(&ctxi
->kref
, remove_context
))
917 scsi_device_put(sdev
);
921 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
925 static int cxlflash_disk_detach(struct scsi_device
*sdev
,
926 struct dk_cxlflash_detach
*detach
)
928 return _cxlflash_disk_detach(sdev
, NULL
, detach
);
932 * cxlflash_cxl_release() - release handler for adapter file descriptor
933 * @inode: File-system inode associated with fd.
934 * @file: File installed with adapter file descriptor.
936 * This routine is the release handler for the fops registered with
937 * the CXL services on an initial attach for a context. It is called
938 * when a close (explicity by the user or as part of a process tear
939 * down) is performed on the adapter file descriptor returned to the
940 * user. The user should be aware that explicitly performing a close
941 * considered catastrophic and subsequent usage of the superpipe API
942 * with previously saved off tokens will fail.
944 * This routine derives the context reference and calls detach for
945 * each LUN associated with the context.The final detach operation
946 * causes the context itself to be freed. With exception to when the
947 * CXL process element (context id) lookup fails (a case that should
948 * theoretically never occur), every call into this routine results
949 * in a complete freeing of a context.
951 * Return: 0 on success
953 static int cxlflash_cxl_release(struct inode
*inode
, struct file
*file
)
955 struct cxl_context
*ctx
= cxl_fops_get_context(file
);
956 struct cxlflash_cfg
*cfg
= container_of(file
->f_op
, struct cxlflash_cfg
,
958 struct device
*dev
= &cfg
->dev
->dev
;
959 struct ctx_info
*ctxi
= NULL
;
960 struct dk_cxlflash_detach detach
= { { 0 }, 0 };
961 struct lun_access
*lun_access
, *t
;
962 enum ctx_ctrl ctrl
= CTX_CTRL_ERR_FALLBACK
| CTX_CTRL_FILE
;
965 ctxid
= cxl_process_element(ctx
);
966 if (unlikely(ctxid
< 0)) {
967 dev_err(dev
, "%s: Context %p was closed! (%d)\n",
968 __func__
, ctx
, ctxid
);
972 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
);
973 if (unlikely(!ctxi
)) {
974 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
| CTX_CTRL_CLONE
);
976 dev_dbg(dev
, "%s: Context %d already free!\n",
981 dev_dbg(dev
, "%s: Another process owns context %d!\n",
987 dev_dbg(dev
, "%s: close for context %d\n", __func__
, ctxid
);
989 detach
.context_id
= ctxi
->ctxid
;
990 list_for_each_entry_safe(lun_access
, t
, &ctxi
->luns
, list
)
991 _cxlflash_disk_detach(lun_access
->sdev
, ctxi
, &detach
);
993 cxl_fd_release(inode
, file
);
995 dev_dbg(dev
, "%s: returning\n", __func__
);
1000 * unmap_context() - clears a previously established mapping
1001 * @ctxi: Context owning the mapping.
1003 * This routine is used to switch between the error notification page
1004 * (dummy page of all 1's) and the real mapping (established by the CXL
1007 static void unmap_context(struct ctx_info
*ctxi
)
1009 unmap_mapping_range(ctxi
->file
->f_mapping
, 0, 0, 1);
1013 * get_err_page() - obtains and allocates the error notification page
1015 * Return: error notification page on success, NULL on failure
1017 static struct page
*get_err_page(void)
1019 struct page
*err_page
= global
.err_page
;
1021 if (unlikely(!err_page
)) {
1022 err_page
= alloc_page(GFP_KERNEL
);
1023 if (unlikely(!err_page
)) {
1024 pr_err("%s: Unable to allocate err_page!\n", __func__
);
1028 memset(page_address(err_page
), -1, PAGE_SIZE
);
1030 /* Serialize update w/ other threads to avoid a leak */
1031 mutex_lock(&global
.mutex
);
1032 if (likely(!global
.err_page
))
1033 global
.err_page
= err_page
;
1035 __free_page(err_page
);
1036 err_page
= global
.err_page
;
1038 mutex_unlock(&global
.mutex
);
1042 pr_debug("%s: returning err_page=%p\n", __func__
, err_page
);
1047 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1048 * @vma: VM area associated with mapping.
1049 * @vmf: VM fault associated with current fault.
1051 * To support error notification via MMIO, faults are 'caught' by this routine
1052 * that was inserted before passing back the adapter file descriptor on attach.
1053 * When a fault occurs, this routine evaluates if error recovery is active and
1054 * if so, installs the error page to 'notify' the user about the error state.
1055 * During normal operation, the fault is simply handled by the original fault
1056 * handler that was installed by CXL services as part of initializing the
1057 * adapter file descriptor. The VMA's page protection bits are toggled to
1058 * indicate cached/not-cached depending on the memory backing the fault.
1060 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1062 static int cxlflash_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1064 struct file
*file
= vma
->vm_file
;
1065 struct cxl_context
*ctx
= cxl_fops_get_context(file
);
1066 struct cxlflash_cfg
*cfg
= container_of(file
->f_op
, struct cxlflash_cfg
,
1068 struct device
*dev
= &cfg
->dev
->dev
;
1069 struct ctx_info
*ctxi
= NULL
;
1070 struct page
*err_page
= NULL
;
1071 enum ctx_ctrl ctrl
= CTX_CTRL_ERR_FALLBACK
| CTX_CTRL_FILE
;
1075 ctxid
= cxl_process_element(ctx
);
1076 if (unlikely(ctxid
< 0)) {
1077 dev_err(dev
, "%s: Context %p was closed! (%d)\n",
1078 __func__
, ctx
, ctxid
);
1082 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
);
1083 if (unlikely(!ctxi
)) {
1084 dev_dbg(dev
, "%s: Bad context! (%d)\n", __func__
, ctxid
);
1088 dev_dbg(dev
, "%s: fault for context %d\n", __func__
, ctxid
);
1090 if (likely(!ctxi
->err_recovery_active
)) {
1091 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1092 rc
= ctxi
->cxl_mmap_vmops
->fault(vma
, vmf
);
1094 dev_dbg(dev
, "%s: err recovery active, use err_page!\n",
1097 err_page
= get_err_page();
1098 if (unlikely(!err_page
)) {
1099 dev_err(dev
, "%s: Could not obtain error page!\n",
1101 rc
= VM_FAULT_RETRY
;
1106 vmf
->page
= err_page
;
1107 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
1113 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1117 rc
= VM_FAULT_SIGBUS
;
1122 * Local MMAP vmops to 'catch' faults
1124 static const struct vm_operations_struct cxlflash_mmap_vmops
= {
1125 .fault
= cxlflash_mmap_fault
,
1129 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1130 * @file: File installed with adapter file descriptor.
1131 * @vma: VM area associated with mapping.
1133 * Installs local mmap vmops to 'catch' faults for error notification support.
1135 * Return: 0 on success, -errno on failure
1137 static int cxlflash_cxl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1139 struct cxl_context
*ctx
= cxl_fops_get_context(file
);
1140 struct cxlflash_cfg
*cfg
= container_of(file
->f_op
, struct cxlflash_cfg
,
1142 struct device
*dev
= &cfg
->dev
->dev
;
1143 struct ctx_info
*ctxi
= NULL
;
1144 enum ctx_ctrl ctrl
= CTX_CTRL_ERR_FALLBACK
| CTX_CTRL_FILE
;
1148 ctxid
= cxl_process_element(ctx
);
1149 if (unlikely(ctxid
< 0)) {
1150 dev_err(dev
, "%s: Context %p was closed! (%d)\n",
1151 __func__
, ctx
, ctxid
);
1156 ctxi
= get_context(cfg
, ctxid
, file
, ctrl
);
1157 if (unlikely(!ctxi
)) {
1158 dev_dbg(dev
, "%s: Bad context! (%d)\n", __func__
, ctxid
);
1163 dev_dbg(dev
, "%s: mmap for context %d\n", __func__
, ctxid
);
1165 rc
= cxl_fd_mmap(file
, vma
);
1167 /* Insert ourself in the mmap fault handler path */
1168 ctxi
->cxl_mmap_vmops
= vma
->vm_ops
;
1169 vma
->vm_ops
= &cxlflash_mmap_vmops
;
1178 const struct file_operations cxlflash_cxl_fops
= {
1179 .owner
= THIS_MODULE
,
1180 .mmap
= cxlflash_cxl_mmap
,
1181 .release
= cxlflash_cxl_release
,
1185 * cxlflash_mark_contexts_error() - move contexts to error state and list
1186 * @cfg: Internal structure associated with the host.
1188 * A context is only moved over to the error list when there are no outstanding
1189 * references to it. This ensures that a running operation has completed.
1191 * Return: 0 on success, -errno on failure
1193 int cxlflash_mark_contexts_error(struct cxlflash_cfg
*cfg
)
1196 struct ctx_info
*ctxi
= NULL
;
1198 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
1200 for (i
= 0; i
< MAX_CONTEXT
; i
++) {
1201 ctxi
= cfg
->ctx_tbl
[i
];
1203 mutex_lock(&ctxi
->mutex
);
1204 cfg
->ctx_tbl
[i
] = NULL
;
1205 list_add(&ctxi
->list
, &cfg
->ctx_err_recovery
);
1206 ctxi
->err_recovery_active
= true;
1207 ctxi
->ctrl_map
= NULL
;
1208 unmap_context(ctxi
);
1209 mutex_unlock(&ctxi
->mutex
);
1213 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
1220 static const struct file_operations null_fops
= {
1221 .owner
= THIS_MODULE
,
1225 * check_state() - checks and responds to the current adapter state
1226 * @cfg: Internal structure associated with the host.
1228 * This routine can block and should only be used on process context.
1229 * It assumes that the caller is an ioctl thread and holding the ioctl
1230 * read semaphore. This is temporarily let up across the wait to allow
1231 * for draining actively running ioctls. Also note that when waking up
1232 * from waiting in reset, the state is unknown and must be checked again
1233 * before proceeding.
1235 * Return: 0 on success, -errno on failure
1237 int check_state(struct cxlflash_cfg
*cfg
)
1239 struct device
*dev
= &cfg
->dev
->dev
;
1243 switch (cfg
->state
) {
1245 dev_dbg(dev
, "%s: Reset state, going to wait...\n", __func__
);
1246 up_read(&cfg
->ioctl_rwsem
);
1247 rc
= wait_event_interruptible(cfg
->reset_waitq
,
1248 cfg
->state
!= STATE_RESET
);
1249 down_read(&cfg
->ioctl_rwsem
);
1253 case STATE_FAILTERM
:
1254 dev_dbg(dev
, "%s: Failed/Terminating!\n", __func__
);
1265 * cxlflash_disk_attach() - attach a LUN to a context
1266 * @sdev: SCSI device associated with LUN.
1267 * @attach: Attach ioctl data structure.
1269 * Creates a context and attaches LUN to it. A LUN can only be attached
1270 * one time to a context (subsequent attaches for the same context/LUN pair
1271 * are not supported). Additional LUNs can be attached to a context by
1272 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1274 * Return: 0 on success, -errno on failure
1276 static int cxlflash_disk_attach(struct scsi_device
*sdev
,
1277 struct dk_cxlflash_attach
*attach
)
1279 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1280 struct device
*dev
= &cfg
->dev
->dev
;
1281 struct afu
*afu
= cfg
->afu
;
1282 struct llun_info
*lli
= sdev
->hostdata
;
1283 struct glun_info
*gli
= lli
->parent
;
1284 struct cxl_ioctl_start_work
*work
;
1285 struct ctx_info
*ctxi
= NULL
;
1286 struct lun_access
*lun_access
= NULL
;
1291 struct file
*file
= NULL
;
1293 struct cxl_context
*ctx
= NULL
;
1297 if (attach
->num_interrupts
> 4) {
1298 dev_dbg(dev
, "%s: Cannot support this many interrupts %llu\n",
1299 __func__
, attach
->num_interrupts
);
1304 if (gli
->max_lba
== 0) {
1305 dev_dbg(dev
, "%s: No capacity info for this LUN (%016llX)\n",
1306 __func__
, lli
->lun_id
[sdev
->channel
]);
1307 rc
= read_cap16(sdev
, lli
);
1309 dev_err(dev
, "%s: Invalid device! (%d)\n",
1314 dev_dbg(dev
, "%s: LBA = %016llX\n", __func__
, gli
->max_lba
);
1315 dev_dbg(dev
, "%s: BLK_LEN = %08X\n", __func__
, gli
->blk_len
);
1318 if (attach
->hdr
.flags
& DK_CXLFLASH_ATTACH_REUSE_CONTEXT
) {
1319 rctxid
= attach
->context_id
;
1320 ctxi
= get_context(cfg
, rctxid
, NULL
, 0);
1322 dev_dbg(dev
, "%s: Bad context! (%016llX)\n",
1328 list_for_each_entry(lun_access
, &ctxi
->luns
, list
)
1329 if (lun_access
->lli
== lli
) {
1330 dev_dbg(dev
, "%s: Already attached!\n",
1337 rc
= scsi_device_get(sdev
);
1339 dev_err(dev
, "%s: Unable to get sdev reference!\n", __func__
);
1343 lun_access
= kzalloc(sizeof(*lun_access
), GFP_KERNEL
);
1344 if (unlikely(!lun_access
)) {
1345 dev_err(dev
, "%s: Unable to allocate lun_access!\n", __func__
);
1350 lun_access
->lli
= lli
;
1351 lun_access
->sdev
= sdev
;
1353 /* Non-NULL context indicates reuse (another context reference) */
1355 dev_dbg(dev
, "%s: Reusing context for LUN! (%016llX)\n",
1357 kref_get(&ctxi
->kref
);
1358 list_add(&lun_access
->list
, &ctxi
->luns
);
1362 ctxi
= create_context(cfg
);
1363 if (unlikely(!ctxi
)) {
1364 dev_err(dev
, "%s: Failed to create context! (%d)\n",
1369 ctx
= cxl_dev_context_init(cfg
->dev
);
1370 if (IS_ERR_OR_NULL(ctx
)) {
1371 dev_err(dev
, "%s: Could not initialize context %p\n",
1378 work
->num_interrupts
= attach
->num_interrupts
;
1379 work
->flags
= CXL_START_WORK_NUM_IRQS
;
1381 rc
= cxl_start_work(ctx
, work
);
1383 dev_dbg(dev
, "%s: Could not start context rc=%d\n",
1388 ctxid
= cxl_process_element(ctx
);
1389 if (unlikely((ctxid
>= MAX_CONTEXT
) || (ctxid
< 0))) {
1390 dev_err(dev
, "%s: ctxid (%d) invalid!\n", __func__
, ctxid
);
1395 file
= cxl_get_fd(ctx
, &cfg
->cxl_fops
, &fd
);
1396 if (unlikely(fd
< 0)) {
1398 dev_err(dev
, "%s: Could not get file descriptor\n", __func__
);
1402 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1403 perms
= SISL_RHT_PERM(attach
->hdr
.flags
+ 1);
1405 /* Context mutex is locked upon return */
1406 init_context(ctxi
, cfg
, ctx
, ctxid
, file
, perms
);
1408 rc
= afu_attach(cfg
, ctxi
);
1410 dev_err(dev
, "%s: Could not attach AFU rc %d\n", __func__
, rc
);
1415 * No error paths after this point. Once the fd is installed it's
1416 * visible to user space and can't be undone safely on this thread.
1417 * There is no need to worry about a deadlock here because no one
1418 * knows about us yet; we can be the only one holding our mutex.
1420 list_add(&lun_access
->list
, &ctxi
->luns
);
1421 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
1422 mutex_lock(&ctxi
->mutex
);
1423 cfg
->ctx_tbl
[ctxid
] = ctxi
;
1424 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
1425 fd_install(fd
, file
);
1429 attach
->hdr
.return_flags
= DK_CXLFLASH_APP_CLOSE_ADAP_FD
;
1431 attach
->hdr
.return_flags
= 0;
1433 attach
->context_id
= ctxi
->ctxid
;
1434 attach
->block_size
= gli
->blk_len
;
1435 attach
->mmio_size
= sizeof(afu
->afu_map
->hosts
[0].harea
);
1436 attach
->last_lba
= gli
->max_lba
;
1437 attach
->max_xfer
= sdev
->host
->max_sectors
* MAX_SECTOR_UNIT
;
1438 attach
->max_xfer
/= gli
->blk_len
;
1441 attach
->adap_fd
= fd
;
1446 dev_dbg(dev
, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1447 __func__
, ctxid
, fd
, attach
->block_size
, rc
, attach
->last_lba
);
1451 /* Cleanup CXL context; okay to 'stop' even if it was not started */
1452 if (!IS_ERR_OR_NULL(ctx
)) {
1453 cxl_stop_context(ctx
);
1454 cxl_release_context(ctx
);
1459 * Here, we're overriding the fops with a dummy all-NULL fops because
1460 * fput() calls the release fop, which will cause us to mistakenly
1461 * call into the CXL code. Rather than try to add yet more complexity
1462 * to that routine (cxlflash_cxl_release) we should try to fix the
1466 file
->f_op
= &null_fops
;
1473 /* Cleanup our context */
1475 destroy_context(cfg
, ctxi
);
1480 scsi_device_put(sdev
);
1485 * recover_context() - recovers a context in error
1486 * @cfg: Internal structure associated with the host.
1487 * @ctxi: Context to release.
1488 * @adap_fd: Adapter file descriptor associated with new/recovered context.
1490 * Restablishes the state for a context-in-error.
1492 * Return: 0 on success, -errno on failure
1494 static int recover_context(struct cxlflash_cfg
*cfg
,
1495 struct ctx_info
*ctxi
,
1498 struct device
*dev
= &cfg
->dev
->dev
;
1503 struct cxl_context
*ctx
;
1504 struct afu
*afu
= cfg
->afu
;
1506 ctx
= cxl_dev_context_init(cfg
->dev
);
1507 if (IS_ERR_OR_NULL(ctx
)) {
1508 dev_err(dev
, "%s: Could not initialize context %p\n",
1514 rc
= cxl_start_work(ctx
, &ctxi
->work
);
1516 dev_dbg(dev
, "%s: Could not start context rc=%d\n",
1521 ctxid
= cxl_process_element(ctx
);
1522 if (unlikely((ctxid
>= MAX_CONTEXT
) || (ctxid
< 0))) {
1523 dev_err(dev
, "%s: ctxid (%d) invalid!\n", __func__
, ctxid
);
1528 file
= cxl_get_fd(ctx
, &cfg
->cxl_fops
, &fd
);
1529 if (unlikely(fd
< 0)) {
1531 dev_err(dev
, "%s: Could not get file descriptor\n", __func__
);
1535 /* Update with new MMIO area based on updated context id */
1536 ctxi
->ctrl_map
= &afu
->afu_map
->ctrls
[ctxid
].ctrl
;
1538 rc
= afu_attach(cfg
, ctxi
);
1540 dev_err(dev
, "%s: Could not attach AFU rc %d\n", __func__
, rc
);
1545 * No error paths after this point. Once the fd is installed it's
1546 * visible to user space and can't be undone safely on this thread.
1548 ctxi
->ctxid
= ENCODE_CTXID(ctxi
, ctxid
);
1553 * Put context back in table (note the reinit of the context list);
1554 * we must first drop the context's mutex and then acquire it in
1555 * order with the table/list mutex to avoid a deadlock - safe to do
1556 * here because no one can find us at this moment in time.
1558 mutex_unlock(&ctxi
->mutex
);
1559 mutex_lock(&cfg
->ctx_tbl_list_mutex
);
1560 mutex_lock(&ctxi
->mutex
);
1561 list_del_init(&ctxi
->list
);
1562 cfg
->ctx_tbl
[ctxid
] = ctxi
;
1563 mutex_unlock(&cfg
->ctx_tbl_list_mutex
);
1564 fd_install(fd
, file
);
1567 dev_dbg(dev
, "%s: returning ctxid=%d fd=%d rc=%d\n",
1568 __func__
, ctxid
, fd
, rc
);
1575 cxl_stop_context(ctx
);
1577 cxl_release_context(ctx
);
1582 * cxlflash_afu_recover() - initiates AFU recovery
1583 * @sdev: SCSI device associated with LUN.
1584 * @recover: Recover ioctl data structure.
1586 * Only a single recovery is allowed at a time to avoid exhausting CXL
1587 * resources (leading to recovery failure) in the event that we're up
1588 * against the maximum number of contexts limit. For similar reasons,
1589 * a context recovery is retried if there are multiple recoveries taking
1590 * place at the same time and the failure was due to CXL services being
1591 * unable to keep up.
1593 * As this routine is called on ioctl context, it holds the ioctl r/w
1594 * semaphore that is used to drain ioctls in recovery scenarios. The
1595 * implementation to achieve the pacing described above (a local mutex)
1596 * requires that the ioctl r/w semaphore be dropped and reacquired to
1597 * avoid a 3-way deadlock when multiple process recoveries operate in
1600 * Because a user can detect an error condition before the kernel, it is
1601 * quite possible for this routine to act as the kernel's EEH detection
1602 * source (MMIO read of mbox_r). Because of this, there is a window of
1603 * time where an EEH might have been detected but not yet 'serviced'
1604 * (callback invoked, causing the device to enter reset state). To avoid
1605 * looping in this routine during that window, a 1 second sleep is in place
1606 * between the time the MMIO failure is detected and the time a wait on the
1607 * reset wait queue is attempted via check_state().
1609 * Return: 0 on success, -errno on failure
1611 static int cxlflash_afu_recover(struct scsi_device
*sdev
,
1612 struct dk_cxlflash_recover_afu
*recover
)
1614 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1615 struct device
*dev
= &cfg
->dev
->dev
;
1616 struct llun_info
*lli
= sdev
->hostdata
;
1617 struct afu
*afu
= cfg
->afu
;
1618 struct ctx_info
*ctxi
= NULL
;
1619 struct mutex
*mutex
= &cfg
->ctx_recovery_mutex
;
1620 u64 ctxid
= DECODE_CTXID(recover
->context_id
),
1621 rctxid
= recover
->context_id
;
1623 int lretry
= 20; /* up to 2 seconds */
1624 int new_adap_fd
= -1;
1627 atomic_inc(&cfg
->recovery_threads
);
1628 up_read(&cfg
->ioctl_rwsem
);
1629 rc
= mutex_lock_interruptible(mutex
);
1630 down_read(&cfg
->ioctl_rwsem
);
1633 rc
= check_state(cfg
);
1635 dev_err(dev
, "%s: Failed state! rc=%d\n", __func__
, rc
);
1640 dev_dbg(dev
, "%s: reason 0x%016llX rctxid=%016llX\n",
1641 __func__
, recover
->reason
, rctxid
);
1644 /* Ensure that this process is attached to the context */
1645 ctxi
= get_context(cfg
, rctxid
, lli
, CTX_CTRL_ERR_FALLBACK
);
1646 if (unlikely(!ctxi
)) {
1647 dev_dbg(dev
, "%s: Bad context! (%llu)\n", __func__
, ctxid
);
1652 if (ctxi
->err_recovery_active
) {
1654 rc
= recover_context(cfg
, ctxi
, &new_adap_fd
);
1656 dev_err(dev
, "%s: Recovery failed for context %llu (rc=%d)\n",
1657 __func__
, ctxid
, rc
);
1658 if ((rc
== -ENODEV
) &&
1659 ((atomic_read(&cfg
->recovery_threads
) > 1) ||
1661 dev_dbg(dev
, "%s: Going to try again!\n",
1663 mutex_unlock(mutex
);
1665 rc
= mutex_lock_interruptible(mutex
);
1674 ctxi
->err_recovery_active
= false;
1675 recover
->context_id
= ctxi
->ctxid
;
1676 recover
->adap_fd
= new_adap_fd
;
1677 recover
->mmio_size
= sizeof(afu
->afu_map
->hosts
[0].harea
);
1678 recover
->hdr
.return_flags
= DK_CXLFLASH_APP_CLOSE_ADAP_FD
|
1679 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET
;
1683 /* Test if in error state */
1684 reg
= readq_be(&afu
->ctrl_map
->mbox_r
);
1686 dev_dbg(dev
, "%s: MMIO fail, wait for recovery.\n", __func__
);
1689 * Before checking the state, put back the context obtained with
1690 * get_context() as it is no longer needed and sleep for a short
1691 * period of time (see prolog notes).
1696 rc
= check_state(cfg
);
1702 dev_dbg(dev
, "%s: MMIO working, no recovery required!\n", __func__
);
1706 mutex_unlock(mutex
);
1707 atomic_dec_if_positive(&cfg
->recovery_threads
);
1712 * process_sense() - evaluates and processes sense data
1713 * @sdev: SCSI device associated with LUN.
1714 * @verify: Verify ioctl data structure.
1716 * Return: 0 on success, -errno on failure
1718 static int process_sense(struct scsi_device
*sdev
,
1719 struct dk_cxlflash_verify
*verify
)
1721 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1722 struct device
*dev
= &cfg
->dev
->dev
;
1723 struct llun_info
*lli
= sdev
->hostdata
;
1724 struct glun_info
*gli
= lli
->parent
;
1725 u64 prev_lba
= gli
->max_lba
;
1726 struct scsi_sense_hdr sshdr
= { 0 };
1729 rc
= scsi_normalize_sense((const u8
*)&verify
->sense_data
,
1730 DK_CXLFLASH_VERIFY_SENSE_LEN
, &sshdr
);
1732 dev_err(dev
, "%s: Failed to normalize sense data!\n", __func__
);
1737 switch (sshdr
.sense_key
) {
1739 case RECOVERED_ERROR
:
1743 case UNIT_ATTENTION
:
1744 switch (sshdr
.asc
) {
1745 case 0x29: /* Power on Reset or Device Reset */
1747 case 0x2A: /* Device settings/capacity changed */
1748 rc
= read_cap16(sdev
, lli
);
1753 if (prev_lba
!= gli
->max_lba
)
1754 dev_dbg(dev
, "%s: Capacity changed old=%lld "
1755 "new=%lld\n", __func__
, prev_lba
,
1758 case 0x3F: /* Report LUNs changed, Rescan. */
1759 scsi_scan_host(cfg
->host
);
1771 dev_dbg(dev
, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__
,
1772 sshdr
.sense_key
, sshdr
.asc
, sshdr
.ascq
, rc
);
1777 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1778 * @sdev: SCSI device associated with LUN.
1779 * @verify: Verify ioctl data structure.
1781 * Return: 0 on success, -errno on failure
1783 static int cxlflash_disk_verify(struct scsi_device
*sdev
,
1784 struct dk_cxlflash_verify
*verify
)
1787 struct ctx_info
*ctxi
= NULL
;
1788 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1789 struct device
*dev
= &cfg
->dev
->dev
;
1790 struct llun_info
*lli
= sdev
->hostdata
;
1791 struct glun_info
*gli
= lli
->parent
;
1792 struct sisl_rht_entry
*rhte
= NULL
;
1793 res_hndl_t rhndl
= verify
->rsrc_handle
;
1794 u64 ctxid
= DECODE_CTXID(verify
->context_id
),
1795 rctxid
= verify
->context_id
;
1798 dev_dbg(dev
, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
1799 "flags=%016llX\n", __func__
, ctxid
, verify
->rsrc_handle
,
1800 verify
->hint
, verify
->hdr
.flags
);
1802 ctxi
= get_context(cfg
, rctxid
, lli
, 0);
1803 if (unlikely(!ctxi
)) {
1804 dev_dbg(dev
, "%s: Bad context! (%llu)\n", __func__
, ctxid
);
1809 rhte
= get_rhte(ctxi
, rhndl
, lli
);
1810 if (unlikely(!rhte
)) {
1811 dev_dbg(dev
, "%s: Bad resource handle! (%d)\n",
1818 * Look at the hint/sense to see if it requires us to redrive
1819 * inquiry (i.e. the Unit attention is due to the WWN changing).
1821 if (verify
->hint
& DK_CXLFLASH_VERIFY_HINT_SENSE
) {
1822 /* Can't hold mutex across process_sense/read_cap16,
1823 * since we could have an intervening EEH event.
1825 ctxi
->unavail
= true;
1826 mutex_unlock(&ctxi
->mutex
);
1827 rc
= process_sense(sdev
, verify
);
1829 dev_err(dev
, "%s: Failed to validate sense data (%d)\n",
1831 mutex_lock(&ctxi
->mutex
);
1832 ctxi
->unavail
= false;
1835 mutex_lock(&ctxi
->mutex
);
1836 ctxi
->unavail
= false;
1839 switch (gli
->mode
) {
1841 last_lba
= gli
->max_lba
;
1844 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1845 last_lba
= ((u64
)rhte
->lxt_cnt
* MC_CHUNK_SIZE
* gli
->blk_len
);
1846 last_lba
/= CXLFLASH_BLOCK_SIZE
;
1850 WARN(1, "Unsupported LUN mode!");
1853 verify
->last_lba
= last_lba
;
1858 dev_dbg(dev
, "%s: returning rc=%d llba=%llX\n",
1859 __func__
, rc
, verify
->last_lba
);
1864 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1865 * @cmd: The ioctl command to decode.
1867 * Return: A string identifying the decoded ioctl.
1869 static char *decode_ioctl(int cmd
)
1872 case DK_CXLFLASH_ATTACH
:
1873 return __stringify_1(DK_CXLFLASH_ATTACH
);
1874 case DK_CXLFLASH_USER_DIRECT
:
1875 return __stringify_1(DK_CXLFLASH_USER_DIRECT
);
1876 case DK_CXLFLASH_USER_VIRTUAL
:
1877 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL
);
1878 case DK_CXLFLASH_VLUN_RESIZE
:
1879 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE
);
1880 case DK_CXLFLASH_RELEASE
:
1881 return __stringify_1(DK_CXLFLASH_RELEASE
);
1882 case DK_CXLFLASH_DETACH
:
1883 return __stringify_1(DK_CXLFLASH_DETACH
);
1884 case DK_CXLFLASH_VERIFY
:
1885 return __stringify_1(DK_CXLFLASH_VERIFY
);
1886 case DK_CXLFLASH_VLUN_CLONE
:
1887 return __stringify_1(DK_CXLFLASH_VLUN_CLONE
);
1888 case DK_CXLFLASH_RECOVER_AFU
:
1889 return __stringify_1(DK_CXLFLASH_RECOVER_AFU
);
1890 case DK_CXLFLASH_MANAGE_LUN
:
1891 return __stringify_1(DK_CXLFLASH_MANAGE_LUN
);
1898 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1899 * @sdev: SCSI device associated with LUN.
1900 * @arg: UDirect ioctl data structure.
1902 * On successful return, the user is informed of the resource handle
1903 * to be used to identify the direct lun and the size (in blocks) of
1904 * the direct lun in last LBA format.
1906 * Return: 0 on success, -errno on failure
1908 static int cxlflash_disk_direct_open(struct scsi_device
*sdev
, void *arg
)
1910 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1911 struct device
*dev
= &cfg
->dev
->dev
;
1912 struct afu
*afu
= cfg
->afu
;
1913 struct llun_info
*lli
= sdev
->hostdata
;
1914 struct glun_info
*gli
= lli
->parent
;
1916 struct dk_cxlflash_udirect
*pphys
= (struct dk_cxlflash_udirect
*)arg
;
1918 u64 ctxid
= DECODE_CTXID(pphys
->context_id
),
1919 rctxid
= pphys
->context_id
;
1922 u64 rsrc_handle
= -1;
1923 u32 port
= CHAN2PORT(sdev
->channel
);
1927 struct ctx_info
*ctxi
= NULL
;
1928 struct sisl_rht_entry
*rhte
= NULL
;
1930 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__
, ctxid
, lun_size
);
1932 rc
= cxlflash_lun_attach(gli
, MODE_PHYSICAL
, false);
1934 dev_dbg(dev
, "%s: Failed to attach to LUN! (PHYSICAL)\n",
1939 ctxi
= get_context(cfg
, rctxid
, lli
, 0);
1940 if (unlikely(!ctxi
)) {
1941 dev_dbg(dev
, "%s: Bad context! (%llu)\n", __func__
, ctxid
);
1946 rhte
= rhte_checkout(ctxi
, lli
);
1947 if (unlikely(!rhte
)) {
1948 dev_dbg(dev
, "%s: too many opens for this context\n", __func__
);
1949 rc
= -EMFILE
; /* too many opens */
1953 rsrc_handle
= (rhte
- ctxi
->rht_start
);
1955 rht_format1(rhte
, lli
->lun_id
[sdev
->channel
], ctxi
->rht_perms
, port
);
1956 cxlflash_afu_sync(afu
, ctxid
, rsrc_handle
, AFU_LW_SYNC
);
1958 last_lba
= gli
->max_lba
;
1959 pphys
->hdr
.return_flags
= 0;
1960 pphys
->last_lba
= last_lba
;
1961 pphys
->rsrc_handle
= rsrc_handle
;
1966 dev_dbg(dev
, "%s: returning handle 0x%llx rc=%d llba %lld\n",
1967 __func__
, rsrc_handle
, rc
, last_lba
);
1971 cxlflash_lun_detach(gli
);
1976 * ioctl_common() - common IOCTL handler for driver
1977 * @sdev: SCSI device associated with LUN.
1978 * @cmd: IOCTL command.
1980 * Handles common fencing operations that are valid for multiple ioctls. Always
1981 * allow through ioctls that are cleanup oriented in nature, even when operating
1982 * in a failed/terminating state.
1984 * Return: 0 on success, -errno on failure
1986 static int ioctl_common(struct scsi_device
*sdev
, int cmd
)
1988 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
1989 struct device
*dev
= &cfg
->dev
->dev
;
1990 struct llun_info
*lli
= sdev
->hostdata
;
1993 if (unlikely(!lli
)) {
1994 dev_dbg(dev
, "%s: Unknown LUN\n", __func__
);
1999 rc
= check_state(cfg
);
2000 if (unlikely(rc
) && (cfg
->state
== STATE_FAILTERM
)) {
2002 case DK_CXLFLASH_VLUN_RESIZE
:
2003 case DK_CXLFLASH_RELEASE
:
2004 case DK_CXLFLASH_DETACH
:
2005 dev_dbg(dev
, "%s: Command override! (%d)\n",
2016 * cxlflash_ioctl() - IOCTL handler for driver
2017 * @sdev: SCSI device associated with LUN.
2018 * @cmd: IOCTL command.
2019 * @arg: Userspace ioctl data structure.
2021 * A read/write semaphore is used to implement a 'drain' of currently
2022 * running ioctls. The read semaphore is taken at the beginning of each
2023 * ioctl thread and released upon concluding execution. Additionally the
2024 * semaphore should be released and then reacquired in any ioctl execution
2025 * path which will wait for an event to occur that is outside the scope of
2026 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
2027 * a thread simply needs to acquire the write semaphore.
2029 * Return: 0 on success, -errno on failure
2031 int cxlflash_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
2033 typedef int (*sioctl
) (struct scsi_device
*, void *);
2035 struct cxlflash_cfg
*cfg
= (struct cxlflash_cfg
*)sdev
->host
->hostdata
;
2036 struct device
*dev
= &cfg
->dev
->dev
;
2037 struct afu
*afu
= cfg
->afu
;
2038 struct dk_cxlflash_hdr
*hdr
;
2039 char buf
[sizeof(union cxlflash_ioctls
)];
2041 bool known_ioctl
= false;
2044 struct Scsi_Host
*shost
= sdev
->host
;
2045 sioctl do_ioctl
= NULL
;
2047 static const struct {
2050 } ioctl_tbl
[] = { /* NOTE: order matters here */
2051 {sizeof(struct dk_cxlflash_attach
), (sioctl
)cxlflash_disk_attach
},
2052 {sizeof(struct dk_cxlflash_udirect
), cxlflash_disk_direct_open
},
2053 {sizeof(struct dk_cxlflash_release
), (sioctl
)cxlflash_disk_release
},
2054 {sizeof(struct dk_cxlflash_detach
), (sioctl
)cxlflash_disk_detach
},
2055 {sizeof(struct dk_cxlflash_verify
), (sioctl
)cxlflash_disk_verify
},
2056 {sizeof(struct dk_cxlflash_recover_afu
), (sioctl
)cxlflash_afu_recover
},
2057 {sizeof(struct dk_cxlflash_manage_lun
), (sioctl
)cxlflash_manage_lun
},
2058 {sizeof(struct dk_cxlflash_uvirtual
), cxlflash_disk_virtual_open
},
2059 {sizeof(struct dk_cxlflash_resize
), (sioctl
)cxlflash_vlun_resize
},
2060 {sizeof(struct dk_cxlflash_clone
), (sioctl
)cxlflash_disk_clone
},
2063 /* Hold read semaphore so we can drain if needed */
2064 down_read(&cfg
->ioctl_rwsem
);
2066 /* Restrict command set to physical support only for internal LUN */
2067 if (afu
->internal_lun
)
2069 case DK_CXLFLASH_RELEASE
:
2070 case DK_CXLFLASH_USER_VIRTUAL
:
2071 case DK_CXLFLASH_VLUN_RESIZE
:
2072 case DK_CXLFLASH_VLUN_CLONE
:
2073 dev_dbg(dev
, "%s: %s not supported for lun_mode=%d\n",
2074 __func__
, decode_ioctl(cmd
), afu
->internal_lun
);
2076 goto cxlflash_ioctl_exit
;
2080 case DK_CXLFLASH_ATTACH
:
2081 case DK_CXLFLASH_USER_DIRECT
:
2082 case DK_CXLFLASH_RELEASE
:
2083 case DK_CXLFLASH_DETACH
:
2084 case DK_CXLFLASH_VERIFY
:
2085 case DK_CXLFLASH_RECOVER_AFU
:
2086 case DK_CXLFLASH_USER_VIRTUAL
:
2087 case DK_CXLFLASH_VLUN_RESIZE
:
2088 case DK_CXLFLASH_VLUN_CLONE
:
2089 dev_dbg(dev
, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2090 __func__
, decode_ioctl(cmd
), cmd
, shost
->host_no
,
2091 sdev
->channel
, sdev
->id
, sdev
->lun
);
2092 rc
= ioctl_common(sdev
, cmd
);
2094 goto cxlflash_ioctl_exit
;
2098 case DK_CXLFLASH_MANAGE_LUN
:
2100 idx
= _IOC_NR(cmd
) - _IOC_NR(DK_CXLFLASH_ATTACH
);
2101 size
= ioctl_tbl
[idx
].size
;
2102 do_ioctl
= ioctl_tbl
[idx
].ioctl
;
2104 if (likely(do_ioctl
))
2110 goto cxlflash_ioctl_exit
;
2113 if (unlikely(copy_from_user(&buf
, arg
, size
))) {
2114 dev_err(dev
, "%s: copy_from_user() fail! "
2115 "size=%lu cmd=%d (%s) arg=%p\n",
2116 __func__
, size
, cmd
, decode_ioctl(cmd
), arg
);
2118 goto cxlflash_ioctl_exit
;
2121 hdr
= (struct dk_cxlflash_hdr
*)&buf
;
2122 if (hdr
->version
!= DK_CXLFLASH_VERSION_0
) {
2123 dev_dbg(dev
, "%s: Version %u not supported for %s\n",
2124 __func__
, hdr
->version
, decode_ioctl(cmd
));
2126 goto cxlflash_ioctl_exit
;
2129 if (hdr
->rsvd
[0] || hdr
->rsvd
[1] || hdr
->rsvd
[2] || hdr
->return_flags
) {
2130 dev_dbg(dev
, "%s: Reserved/rflags populated!\n", __func__
);
2132 goto cxlflash_ioctl_exit
;
2135 rc
= do_ioctl(sdev
, (void *)&buf
);
2137 if (unlikely(copy_to_user(arg
, &buf
, size
))) {
2138 dev_err(dev
, "%s: copy_to_user() fail! "
2139 "size=%lu cmd=%d (%s) arg=%p\n",
2140 __func__
, size
, cmd
, decode_ioctl(cmd
), arg
);
2144 /* fall through to exit */
2146 cxlflash_ioctl_exit
:
2147 up_read(&cfg
->ioctl_rwsem
);
2148 if (unlikely(rc
&& known_ioctl
))
2149 dev_err(dev
, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2150 "returned rc %d\n", __func__
,
2151 decode_ioctl(cmd
), cmd
, shost
->host_no
,
2152 sdev
->channel
, sdev
->id
, sdev
->lun
, rc
);
2154 dev_dbg(dev
, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2155 "returned rc %d\n", __func__
, decode_ioctl(cmd
),
2156 cmd
, shost
->host_no
, sdev
->channel
, sdev
->id
,