2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME
);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
38 * process_cmd_err() - command error handler
39 * @cmd: AFU command that experienced the error.
40 * @scp: SCSI command associated with the AFU command in error.
42 * Translates error bits from AFU command to SCSI command results.
44 static void process_cmd_err(struct afu_cmd
*cmd
, struct scsi_cmnd
*scp
)
46 struct afu
*afu
= cmd
->parent
;
47 struct cxlflash_cfg
*cfg
= afu
->parent
;
48 struct device
*dev
= &cfg
->dev
->dev
;
49 struct sisl_ioarcb
*ioarcb
;
50 struct sisl_ioasa
*ioasa
;
59 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_UNDERRUN
) {
61 scsi_set_resid(scp
, resid
);
62 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
63 __func__
, cmd
, scp
, resid
);
66 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
) {
67 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p\n",
69 scp
->result
= (DID_ERROR
<< 16);
72 dev_dbg(dev
, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__
,
74 ioasa
->rc
.afu_rc
, ioasa
->rc
.scsi_rc
, ioasa
->rc
.fc_rc
,
75 ioasa
->afu_extra
, ioasa
->scsi_extra
, ioasa
->fc_extra
);
77 if (ioasa
->rc
.scsi_rc
) {
78 /* We have a SCSI status */
79 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_SENSE_VALID
) {
80 memcpy(scp
->sense_buffer
, ioasa
->sense_data
,
82 scp
->result
= ioasa
->rc
.scsi_rc
;
84 scp
->result
= ioasa
->rc
.scsi_rc
| (DID_ERROR
<< 16);
88 * We encountered an error. Set scp->result based on nature
91 if (ioasa
->rc
.fc_rc
) {
92 /* We have an FC status */
93 switch (ioasa
->rc
.fc_rc
) {
94 case SISL_FC_RC_LINKDOWN
:
95 scp
->result
= (DID_REQUEUE
<< 16);
97 case SISL_FC_RC_RESID
:
98 /* This indicates an FCP resid underrun */
99 if (!(ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
)) {
100 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
101 * then we will handle this error else where.
102 * If not then we must handle it here.
103 * This is probably an AFU bug.
105 scp
->result
= (DID_ERROR
<< 16);
108 case SISL_FC_RC_RESIDERR
:
109 /* Resid mismatch between adapter and device */
110 case SISL_FC_RC_TGTABORT
:
111 case SISL_FC_RC_ABORTOK
:
112 case SISL_FC_RC_ABORTFAIL
:
113 case SISL_FC_RC_NOLOGI
:
114 case SISL_FC_RC_ABORTPEND
:
115 case SISL_FC_RC_WRABORTPEND
:
116 case SISL_FC_RC_NOEXP
:
117 case SISL_FC_RC_INUSE
:
118 scp
->result
= (DID_ERROR
<< 16);
123 if (ioasa
->rc
.afu_rc
) {
124 /* We have an AFU error */
125 switch (ioasa
->rc
.afu_rc
) {
126 case SISL_AFU_RC_NO_CHANNELS
:
127 scp
->result
= (DID_NO_CONNECT
<< 16);
129 case SISL_AFU_RC_DATA_DMA_ERR
:
130 switch (ioasa
->afu_extra
) {
131 case SISL_AFU_DMA_ERR_PAGE_IN
:
133 scp
->result
= (DID_IMM_RETRY
<< 16);
135 case SISL_AFU_DMA_ERR_INVALID_EA
:
137 scp
->result
= (DID_ERROR
<< 16);
140 case SISL_AFU_RC_OUT_OF_DATA_BUFS
:
142 scp
->result
= (DID_ALLOC_FAILURE
<< 16);
145 scp
->result
= (DID_ERROR
<< 16);
151 * cmd_complete() - command completion handler
152 * @cmd: AFU command that has completed.
154 * Prepares and submits command that has either completed or timed out to
155 * the SCSI stack. Checks AFU command back into command pool for non-internal
156 * (cmd->scp populated) commands.
158 static void cmd_complete(struct afu_cmd
*cmd
)
160 struct scsi_cmnd
*scp
;
162 struct afu
*afu
= cmd
->parent
;
163 struct cxlflash_cfg
*cfg
= afu
->parent
;
164 struct device
*dev
= &cfg
->dev
->dev
;
165 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
168 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
169 list_del(&cmd
->list
);
170 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
174 if (unlikely(cmd
->sa
.ioasc
))
175 process_cmd_err(cmd
, scp
);
177 scp
->result
= (DID_OK
<< 16);
179 cmd_is_tmf
= cmd
->cmd_tmf
;
181 dev_dbg_ratelimited(dev
, "%s:scp=%p result=%08x ioasc=%08x\n",
182 __func__
, scp
, scp
->result
, cmd
->sa
.ioasc
);
187 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
188 cfg
->tmf_active
= false;
189 wake_up_all_locked(&cfg
->tmf_waitq
);
190 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
193 complete(&cmd
->cevent
);
197 * flush_pending_cmds() - flush all pending commands on this hardware queue
198 * @hwq: Hardware queue to flush.
200 * The hardware send queue lock associated with this hardware queue must be
201 * held when calling this routine.
203 static void flush_pending_cmds(struct hwq
*hwq
)
205 struct afu_cmd
*cmd
, *tmp
;
206 struct scsi_cmnd
*scp
;
208 list_for_each_entry_safe(cmd
, tmp
, &hwq
->pending_cmds
, list
) {
209 /* Bypass command when on a doneq, cmd_complete() will handle */
210 if (!list_empty(&cmd
->queue
))
213 list_del(&cmd
->list
);
217 scp
->result
= (DID_IMM_RETRY
<< 16);
220 cmd
->cmd_aborted
= true;
221 complete(&cmd
->cevent
);
227 * context_reset() - reset context via specified register
228 * @hwq: Hardware queue owning the context to be reset.
229 * @reset_reg: MMIO register to perform reset.
231 * When the reset is successful, the SISLite specification guarantees that
232 * the AFU has aborted all currently pending I/O. Accordingly, these commands
235 * Return: 0 on success, -errno on failure
237 static int context_reset(struct hwq
*hwq
, __be64 __iomem
*reset_reg
)
239 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
240 struct device
*dev
= &cfg
->dev
->dev
;
246 dev_dbg(dev
, "%s: hwq=%p\n", __func__
, hwq
);
248 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
250 writeq_be(val
, reset_reg
);
252 val
= readq_be(reset_reg
);
253 if ((val
& 0x1) == 0x0) {
258 /* Double delay each time */
260 } while (nretry
++ < MC_ROOM_RETRY_CNT
);
263 flush_pending_cmds(hwq
);
265 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
267 dev_dbg(dev
, "%s: returning rc=%d, val=%016llx nretry=%d\n",
268 __func__
, rc
, val
, nretry
);
273 * context_reset_ioarrin() - reset context via IOARRIN register
274 * @hwq: Hardware queue owning the context to be reset.
276 * Return: 0 on success, -errno on failure
278 static int context_reset_ioarrin(struct hwq
*hwq
)
280 return context_reset(hwq
, &hwq
->host_map
->ioarrin
);
284 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
285 * @hwq: Hardware queue owning the context to be reset.
287 * Return: 0 on success, -errno on failure
289 static int context_reset_sq(struct hwq
*hwq
)
291 return context_reset(hwq
, &hwq
->host_map
->sq_ctx_reset
);
295 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
296 * @afu: AFU associated with the host.
297 * @cmd: AFU command to send.
300 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
302 static int send_cmd_ioarrin(struct afu
*afu
, struct afu_cmd
*cmd
)
304 struct cxlflash_cfg
*cfg
= afu
->parent
;
305 struct device
*dev
= &cfg
->dev
->dev
;
306 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
312 * To avoid the performance penalty of MMIO, spread the update of
313 * 'room' over multiple commands.
315 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
316 if (--hwq
->room
< 0) {
317 room
= readq_be(&hwq
->host_map
->cmd_room
);
319 dev_dbg_ratelimited(dev
, "%s: no cmd_room to send "
320 "0x%02X, room=0x%016llX\n",
321 __func__
, cmd
->rcb
.cdb
[0], room
);
323 rc
= SCSI_MLQUEUE_HOST_BUSY
;
326 hwq
->room
= room
- 1;
329 list_add(&cmd
->list
, &hwq
->pending_cmds
);
330 writeq_be((u64
)&cmd
->rcb
, &hwq
->host_map
->ioarrin
);
332 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
333 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__
,
334 cmd
, cmd
->rcb
.data_len
, cmd
->rcb
.data_ea
, rc
);
339 * send_cmd_sq() - sends an AFU command via SQ ring
340 * @afu: AFU associated with the host.
341 * @cmd: AFU command to send.
344 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
346 static int send_cmd_sq(struct afu
*afu
, struct afu_cmd
*cmd
)
348 struct cxlflash_cfg
*cfg
= afu
->parent
;
349 struct device
*dev
= &cfg
->dev
->dev
;
350 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
355 newval
= atomic_dec_if_positive(&hwq
->hsq_credits
);
357 rc
= SCSI_MLQUEUE_HOST_BUSY
;
361 cmd
->rcb
.ioasa
= &cmd
->sa
;
363 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
365 *hwq
->hsq_curr
= cmd
->rcb
;
366 if (hwq
->hsq_curr
< hwq
->hsq_end
)
369 hwq
->hsq_curr
= hwq
->hsq_start
;
371 list_add(&cmd
->list
, &hwq
->pending_cmds
);
372 writeq_be((u64
)hwq
->hsq_curr
, &hwq
->host_map
->sq_tail
);
374 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
376 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
377 "head=%016llx tail=%016llx\n", __func__
, cmd
, cmd
->rcb
.data_len
,
378 cmd
->rcb
.data_ea
, cmd
->rcb
.ioasa
, rc
, hwq
->hsq_curr
,
379 readq_be(&hwq
->host_map
->sq_head
),
380 readq_be(&hwq
->host_map
->sq_tail
));
385 * wait_resp() - polls for a response or timeout to a sent AFU command
386 * @afu: AFU associated with the host.
387 * @cmd: AFU command that was sent.
389 * Return: 0 on success, -errno on failure
391 static int wait_resp(struct afu
*afu
, struct afu_cmd
*cmd
)
393 struct cxlflash_cfg
*cfg
= afu
->parent
;
394 struct device
*dev
= &cfg
->dev
->dev
;
396 ulong timeout
= msecs_to_jiffies(cmd
->rcb
.timeout
* 2 * 1000);
398 timeout
= wait_for_completion_timeout(&cmd
->cevent
, timeout
);
402 if (cmd
->cmd_aborted
)
405 if (unlikely(cmd
->sa
.ioasc
!= 0)) {
406 dev_err(dev
, "%s: cmd %02x failed, ioasc=%08x\n",
407 __func__
, cmd
->rcb
.cdb
[0], cmd
->sa
.ioasc
);
415 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
416 * @host: SCSI host associated with device.
417 * @scp: SCSI command to send.
418 * @afu: SCSI command to send.
420 * Hashes a command based upon the hardware queue mode.
422 * Return: Trusted index of target hardware queue
424 static u32
cmd_to_target_hwq(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
,
430 if (afu
->num_hwqs
== 1)
433 switch (afu
->hwq_mode
) {
435 hwq
= afu
->hwq_rr_count
++ % afu
->num_hwqs
;
438 tag
= blk_mq_unique_tag(scp
->request
);
439 hwq
= blk_mq_unique_tag_to_hwq(tag
);
442 hwq
= smp_processor_id() % afu
->num_hwqs
;
452 * send_tmf() - sends a Task Management Function (TMF)
453 * @afu: AFU to checkout from.
454 * @scp: SCSI command from stack.
455 * @tmfcmd: TMF command to send.
458 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
460 static int send_tmf(struct afu
*afu
, struct scsi_cmnd
*scp
, u64 tmfcmd
)
462 struct Scsi_Host
*host
= scp
->device
->host
;
463 struct cxlflash_cfg
*cfg
= shost_priv(host
);
464 struct afu_cmd
*cmd
= sc_to_afucz(scp
);
465 struct device
*dev
= &cfg
->dev
->dev
;
466 int hwq_index
= cmd_to_target_hwq(host
, scp
, afu
);
467 struct hwq
*hwq
= get_hwq(afu
, hwq_index
);
472 /* When Task Management Function is active do not send another */
473 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
475 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
478 cfg
->tmf_active
= true;
479 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
484 cmd
->hwq_index
= hwq_index
;
486 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
487 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
488 cmd
->rcb
.port_sel
= CHAN2PORTMASK(scp
->device
->channel
);
489 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
490 cmd
->rcb
.req_flags
= (SISL_REQ_FLAGS_PORT_LUN_ID
|
491 SISL_REQ_FLAGS_SUP_UNDERRUN
|
492 SISL_REQ_FLAGS_TMF_CMD
);
493 memcpy(cmd
->rcb
.cdb
, &tmfcmd
, sizeof(tmfcmd
));
495 rc
= afu
->send_cmd(afu
, cmd
);
497 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
498 cfg
->tmf_active
= false;
499 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
503 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
504 to
= msecs_to_jiffies(5000);
505 to
= wait_event_interruptible_lock_irq_timeout(cfg
->tmf_waitq
,
510 cfg
->tmf_active
= false;
511 dev_err(dev
, "%s: TMF timed out\n", __func__
);
514 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
520 * cxlflash_driver_info() - information handler for this host driver
521 * @host: SCSI host associated with device.
523 * Return: A string describing the device.
525 static const char *cxlflash_driver_info(struct Scsi_Host
*host
)
527 return CXLFLASH_ADAPTER_NAME
;
531 * cxlflash_queuecommand() - sends a mid-layer request
532 * @host: SCSI host associated with device.
533 * @scp: SCSI command to send.
535 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
537 static int cxlflash_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
)
539 struct cxlflash_cfg
*cfg
= shost_priv(host
);
540 struct afu
*afu
= cfg
->afu
;
541 struct device
*dev
= &cfg
->dev
->dev
;
542 struct afu_cmd
*cmd
= sc_to_afucz(scp
);
543 struct scatterlist
*sg
= scsi_sglist(scp
);
544 int hwq_index
= cmd_to_target_hwq(host
, scp
, afu
);
545 struct hwq
*hwq
= get_hwq(afu
, hwq_index
);
546 u16 req_flags
= SISL_REQ_FLAGS_SUP_UNDERRUN
;
550 dev_dbg_ratelimited(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
551 "cdb=(%08x-%08x-%08x-%08x)\n",
552 __func__
, scp
, host
->host_no
, scp
->device
->channel
,
553 scp
->device
->id
, scp
->device
->lun
,
554 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
555 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
556 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
557 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
560 * If a Task Management Function is active, wait for it to complete
561 * before continuing with regular commands.
563 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
564 if (cfg
->tmf_active
) {
565 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
566 rc
= SCSI_MLQUEUE_HOST_BUSY
;
569 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
571 switch (cfg
->state
) {
575 dev_dbg_ratelimited(dev
, "%s: device is in reset\n", __func__
);
576 rc
= SCSI_MLQUEUE_HOST_BUSY
;
579 dev_dbg_ratelimited(dev
, "%s: device has failed\n", __func__
);
580 scp
->result
= (DID_NO_CONNECT
<< 16);
589 cmd
->rcb
.data_len
= sg
->length
;
590 cmd
->rcb
.data_ea
= (uintptr_t)sg_virt(sg
);
595 cmd
->hwq_index
= hwq_index
;
597 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
598 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
599 cmd
->rcb
.port_sel
= CHAN2PORTMASK(scp
->device
->channel
);
600 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
602 if (scp
->sc_data_direction
== DMA_TO_DEVICE
)
603 req_flags
|= SISL_REQ_FLAGS_HOST_WRITE
;
605 cmd
->rcb
.req_flags
= req_flags
;
606 memcpy(cmd
->rcb
.cdb
, scp
->cmnd
, sizeof(cmd
->rcb
.cdb
));
608 rc
= afu
->send_cmd(afu
, cmd
);
614 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
615 * @cfg: Internal structure associated with the host.
617 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg
*cfg
)
619 struct pci_dev
*pdev
= cfg
->dev
;
621 if (pci_channel_offline(pdev
))
622 wait_event_timeout(cfg
->reset_waitq
,
623 !pci_channel_offline(pdev
),
624 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT
);
628 * free_mem() - free memory associated with the AFU
629 * @cfg: Internal structure associated with the host.
631 static void free_mem(struct cxlflash_cfg
*cfg
)
633 struct afu
*afu
= cfg
->afu
;
636 free_pages((ulong
)afu
, get_order(sizeof(struct afu
)));
642 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
643 * @cfg: Internal structure associated with the host.
645 static void cxlflash_reset_sync(struct cxlflash_cfg
*cfg
)
647 if (cfg
->async_reset_cookie
== 0)
650 /* Wait until all async calls prior to this cookie have completed */
651 async_synchronize_cookie(cfg
->async_reset_cookie
+ 1);
652 cfg
->async_reset_cookie
= 0;
656 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
657 * @cfg: Internal structure associated with the host.
659 * Safe to call with AFU in a partially allocated/initialized state.
661 * Cancels scheduled worker threads, waits for any active internal AFU
662 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
664 static void stop_afu(struct cxlflash_cfg
*cfg
)
666 struct afu
*afu
= cfg
->afu
;
670 cancel_work_sync(&cfg
->work_q
);
671 if (!current_is_async())
672 cxlflash_reset_sync(cfg
);
675 while (atomic_read(&afu
->cmds_active
))
678 if (afu_is_irqpoll_enabled(afu
)) {
679 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
680 hwq
= get_hwq(afu
, i
);
682 irq_poll_disable(&hwq
->irqpoll
);
686 if (likely(afu
->afu_map
)) {
687 cxl_psa_unmap((void __iomem
*)afu
->afu_map
);
694 * term_intr() - disables all AFU interrupts
695 * @cfg: Internal structure associated with the host.
696 * @level: Depth of allocation, where to begin waterfall tear down.
697 * @index: Index of the hardware queue.
699 * Safe to call with AFU/MC in partially allocated/initialized state.
701 static void term_intr(struct cxlflash_cfg
*cfg
, enum undo_level level
,
704 struct afu
*afu
= cfg
->afu
;
705 struct device
*dev
= &cfg
->dev
->dev
;
709 dev_err(dev
, "%s: returning with NULL afu\n", __func__
);
713 hwq
= get_hwq(afu
, index
);
716 dev_err(dev
, "%s: returning with NULL MC\n", __func__
);
722 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
723 if (index
== PRIMARY_HWQ
)
724 cxl_unmap_afu_irq(hwq
->ctx
, 3, hwq
);
726 cxl_unmap_afu_irq(hwq
->ctx
, 2, hwq
);
728 cxl_unmap_afu_irq(hwq
->ctx
, 1, hwq
);
730 cxl_free_afu_irqs(hwq
->ctx
);
733 /* No action required */
739 * term_mc() - terminates the master context
740 * @cfg: Internal structure associated with the host.
741 * @index: Index of the hardware queue.
743 * Safe to call with AFU/MC in partially allocated/initialized state.
745 static void term_mc(struct cxlflash_cfg
*cfg
, u32 index
)
747 struct afu
*afu
= cfg
->afu
;
748 struct device
*dev
= &cfg
->dev
->dev
;
753 dev_err(dev
, "%s: returning with NULL afu\n", __func__
);
757 hwq
= get_hwq(afu
, index
);
760 dev_err(dev
, "%s: returning with NULL MC\n", __func__
);
764 WARN_ON(cxl_stop_context(hwq
->ctx
));
765 if (index
!= PRIMARY_HWQ
)
766 WARN_ON(cxl_release_context(hwq
->ctx
));
769 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
770 flush_pending_cmds(hwq
);
771 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
775 * term_afu() - terminates the AFU
776 * @cfg: Internal structure associated with the host.
778 * Safe to call with AFU/MC in partially allocated/initialized state.
780 static void term_afu(struct cxlflash_cfg
*cfg
)
782 struct device
*dev
= &cfg
->dev
->dev
;
786 * Tear down is carefully orchestrated to ensure
787 * no interrupts can come in when the problem state
790 * 1) Disable all AFU interrupts for each master
791 * 2) Unmap the problem state area
792 * 3) Stop each master context
794 for (k
= cfg
->afu
->num_hwqs
- 1; k
>= 0; k
--)
795 term_intr(cfg
, UNMAP_THREE
, k
);
800 for (k
= cfg
->afu
->num_hwqs
- 1; k
>= 0; k
--)
803 dev_dbg(dev
, "%s: returning\n", __func__
);
807 * notify_shutdown() - notifies device of pending shutdown
808 * @cfg: Internal structure associated with the host.
809 * @wait: Whether to wait for shutdown processing to complete.
811 * This function will notify the AFU that the adapter is being shutdown
812 * and will wait for shutdown processing to complete if wait is true.
813 * This notification should flush pending I/Os to the device and halt
814 * further I/Os until the next AFU reset is issued and device restarted.
816 static void notify_shutdown(struct cxlflash_cfg
*cfg
, bool wait
)
818 struct afu
*afu
= cfg
->afu
;
819 struct device
*dev
= &cfg
->dev
->dev
;
820 struct dev_dependent_vals
*ddv
;
821 __be64 __iomem
*fc_port_regs
;
823 int i
, retry_cnt
= 0;
825 ddv
= (struct dev_dependent_vals
*)cfg
->dev_id
->driver_data
;
826 if (!(ddv
->flags
& CXLFLASH_NOTIFY_SHUTDOWN
))
829 if (!afu
|| !afu
->afu_map
) {
830 dev_dbg(dev
, "%s: Problem state area not mapped\n", __func__
);
835 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
836 fc_port_regs
= get_fc_port_regs(cfg
, i
);
838 reg
= readq_be(&fc_port_regs
[FC_CONFIG2
/ 8]);
839 reg
|= SISL_FC_SHUTDOWN_NORMAL
;
840 writeq_be(reg
, &fc_port_regs
[FC_CONFIG2
/ 8]);
846 /* Wait up to 1.5 seconds for shutdown processing to complete */
847 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
848 fc_port_regs
= get_fc_port_regs(cfg
, i
);
852 status
= readq_be(&fc_port_regs
[FC_STATUS
/ 8]);
853 if (status
& SISL_STATUS_SHUTDOWN_COMPLETE
)
855 if (++retry_cnt
>= MC_RETRY_CNT
) {
856 dev_dbg(dev
, "%s: port %d shutdown processing "
857 "not yet completed\n", __func__
, i
);
860 msleep(100 * retry_cnt
);
866 * cxlflash_remove() - PCI entry point to tear down host
867 * @pdev: PCI device associated with the host.
869 * Safe to use as a cleanup in partially allocated/initialized state. Note that
870 * the reset_waitq is flushed as part of the stop/termination of user contexts.
872 static void cxlflash_remove(struct pci_dev
*pdev
)
874 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
875 struct device
*dev
= &pdev
->dev
;
878 if (!pci_is_enabled(pdev
)) {
879 dev_dbg(dev
, "%s: Device is disabled\n", __func__
);
883 /* If a Task Management Function is active, wait for it to complete
884 * before continuing with remove.
886 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
888 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
891 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
893 /* Notify AFU and wait for shutdown processing to complete */
894 notify_shutdown(cfg
, true);
896 cfg
->state
= STATE_FAILTERM
;
897 cxlflash_stop_term_user_contexts(cfg
);
899 switch (cfg
->init_state
) {
900 case INIT_STATE_SCSI
:
901 cxlflash_term_local_luns(cfg
);
902 scsi_remove_host(cfg
->host
);
906 pci_disable_device(pdev
);
907 case INIT_STATE_NONE
:
909 scsi_host_put(cfg
->host
);
913 dev_dbg(dev
, "%s: returning\n", __func__
);
917 * alloc_mem() - allocates the AFU and its command pool
918 * @cfg: Internal structure associated with the host.
920 * A partially allocated state remains on failure.
924 * -ENOMEM on failure to allocate memory
926 static int alloc_mem(struct cxlflash_cfg
*cfg
)
929 struct device
*dev
= &cfg
->dev
->dev
;
931 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
932 cfg
->afu
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
933 get_order(sizeof(struct afu
)));
934 if (unlikely(!cfg
->afu
)) {
935 dev_err(dev
, "%s: cannot get %d free pages\n",
936 __func__
, get_order(sizeof(struct afu
)));
940 cfg
->afu
->parent
= cfg
;
941 cfg
->afu
->desired_hwqs
= CXLFLASH_DEF_HWQS
;
942 cfg
->afu
->afu_map
= NULL
;
948 * init_pci() - initializes the host as a PCI device
949 * @cfg: Internal structure associated with the host.
951 * Return: 0 on success, -errno on failure
953 static int init_pci(struct cxlflash_cfg
*cfg
)
955 struct pci_dev
*pdev
= cfg
->dev
;
956 struct device
*dev
= &cfg
->dev
->dev
;
959 rc
= pci_enable_device(pdev
);
960 if (rc
|| pci_channel_offline(pdev
)) {
961 if (pci_channel_offline(pdev
)) {
962 cxlflash_wait_for_pci_err_recovery(cfg
);
963 rc
= pci_enable_device(pdev
);
967 dev_err(dev
, "%s: Cannot enable adapter\n", __func__
);
968 cxlflash_wait_for_pci_err_recovery(cfg
);
974 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
979 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
980 * @cfg: Internal structure associated with the host.
982 * Return: 0 on success, -errno on failure
984 static int init_scsi(struct cxlflash_cfg
*cfg
)
986 struct pci_dev
*pdev
= cfg
->dev
;
987 struct device
*dev
= &cfg
->dev
->dev
;
990 rc
= scsi_add_host(cfg
->host
, &pdev
->dev
);
992 dev_err(dev
, "%s: scsi_add_host failed rc=%d\n", __func__
, rc
);
996 scsi_scan_host(cfg
->host
);
999 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1004 * set_port_online() - transitions the specified host FC port to online state
1005 * @fc_regs: Top of MMIO region defined for specified port.
1007 * The provided MMIO region must be mapped prior to call. Online state means
1008 * that the FC link layer has synced, completed the handshaking process, and
1009 * is ready for login to start.
1011 static void set_port_online(__be64 __iomem
*fc_regs
)
1015 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1016 cmdcfg
&= (~FC_MTIP_CMDCONFIG_OFFLINE
); /* clear OFF_LINE */
1017 cmdcfg
|= (FC_MTIP_CMDCONFIG_ONLINE
); /* set ON_LINE */
1018 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1022 * set_port_offline() - transitions the specified host FC port to offline state
1023 * @fc_regs: Top of MMIO region defined for specified port.
1025 * The provided MMIO region must be mapped prior to call.
1027 static void set_port_offline(__be64 __iomem
*fc_regs
)
1031 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1032 cmdcfg
&= (~FC_MTIP_CMDCONFIG_ONLINE
); /* clear ON_LINE */
1033 cmdcfg
|= (FC_MTIP_CMDCONFIG_OFFLINE
); /* set OFF_LINE */
1034 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1038 * wait_port_online() - waits for the specified host FC port come online
1039 * @fc_regs: Top of MMIO region defined for specified port.
1040 * @delay_us: Number of microseconds to delay between reading port status.
1041 * @nretry: Number of cycles to retry reading port status.
1043 * The provided MMIO region must be mapped prior to call. This will timeout
1044 * when the cable is not plugged in.
1047 * TRUE (1) when the specified port is online
1048 * FALSE (0) when the specified port fails to come online after timeout
1050 static bool wait_port_online(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
1054 WARN_ON(delay_us
< 1000);
1057 msleep(delay_us
/ 1000);
1058 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
1059 if (status
== U64_MAX
)
1061 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_ONLINE
&&
1064 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_ONLINE
);
1068 * wait_port_offline() - waits for the specified host FC port go offline
1069 * @fc_regs: Top of MMIO region defined for specified port.
1070 * @delay_us: Number of microseconds to delay between reading port status.
1071 * @nretry: Number of cycles to retry reading port status.
1073 * The provided MMIO region must be mapped prior to call.
1076 * TRUE (1) when the specified port is offline
1077 * FALSE (0) when the specified port fails to go offline after timeout
1079 static bool wait_port_offline(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
1083 WARN_ON(delay_us
< 1000);
1086 msleep(delay_us
/ 1000);
1087 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
1088 if (status
== U64_MAX
)
1090 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_OFFLINE
&&
1093 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_OFFLINE
);
1097 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1098 * @afu: AFU associated with the host that owns the specified FC port.
1099 * @port: Port number being configured.
1100 * @fc_regs: Top of MMIO region defined for specified port.
1101 * @wwpn: The world-wide-port-number previously discovered for port.
1103 * The provided MMIO region must be mapped prior to call. As part of the
1104 * sequence to configure the WWPN, the port is toggled offline and then back
1105 * online. This toggling action can cause this routine to delay up to a few
1106 * seconds. When configured to use the internal LUN feature of the AFU, a
1107 * failure to come online is overridden.
1109 static void afu_set_wwpn(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
,
1112 struct cxlflash_cfg
*cfg
= afu
->parent
;
1113 struct device
*dev
= &cfg
->dev
->dev
;
1115 set_port_offline(fc_regs
);
1116 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1117 FC_PORT_STATUS_RETRY_CNT
)) {
1118 dev_dbg(dev
, "%s: wait on port %d to go offline timed out\n",
1122 writeq_be(wwpn
, &fc_regs
[FC_PNAME
/ 8]);
1124 set_port_online(fc_regs
);
1125 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1126 FC_PORT_STATUS_RETRY_CNT
)) {
1127 dev_dbg(dev
, "%s: wait on port %d to go online timed out\n",
1133 * afu_link_reset() - resets the specified host FC port
1134 * @afu: AFU associated with the host that owns the specified FC port.
1135 * @port: Port number being configured.
1136 * @fc_regs: Top of MMIO region defined for specified port.
1138 * The provided MMIO region must be mapped prior to call. The sequence to
1139 * reset the port involves toggling it offline and then back online. This
1140 * action can cause this routine to delay up to a few seconds. An effort
1141 * is made to maintain link with the device by switching to host to use
1142 * the alternate port exclusively while the reset takes place.
1143 * failure to come online is overridden.
1145 static void afu_link_reset(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
)
1147 struct cxlflash_cfg
*cfg
= afu
->parent
;
1148 struct device
*dev
= &cfg
->dev
->dev
;
1151 /* first switch the AFU to the other links, if any */
1152 port_sel
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
1153 port_sel
&= ~(1ULL << port
);
1154 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1155 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1157 set_port_offline(fc_regs
);
1158 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1159 FC_PORT_STATUS_RETRY_CNT
))
1160 dev_err(dev
, "%s: wait on port %d to go offline timed out\n",
1163 set_port_online(fc_regs
);
1164 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1165 FC_PORT_STATUS_RETRY_CNT
))
1166 dev_err(dev
, "%s: wait on port %d to go online timed out\n",
1169 /* switch back to include this port */
1170 port_sel
|= (1ULL << port
);
1171 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1172 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1174 dev_dbg(dev
, "%s: returning port_sel=%016llx\n", __func__
, port_sel
);
1178 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1179 * @afu: AFU associated with the host.
1181 static void afu_err_intr_init(struct afu
*afu
)
1183 struct cxlflash_cfg
*cfg
= afu
->parent
;
1184 __be64 __iomem
*fc_port_regs
;
1186 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
1189 /* global async interrupts: AFU clears afu_ctrl on context exit
1190 * if async interrupts were sent to that context. This prevents
1191 * the AFU form sending further async interrupts when
1193 * nobody to receive them.
1197 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_mask
);
1198 /* set LISN# to send and point to primary master context */
1199 reg
= ((u64
) (((hwq
->ctx_hndl
<< 8) | SISL_MSI_ASYNC_ERROR
)) << 40);
1201 if (afu
->internal_lun
)
1202 reg
|= 1; /* Bit 63 indicates local lun */
1203 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_ctrl
);
1205 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1206 /* unmask bits that are of interest */
1207 /* note: afu can send an interrupt after this step */
1208 writeq_be(SISL_ASTATUS_MASK
, &afu
->afu_map
->global
.regs
.aintr_mask
);
1209 /* clear again in case a bit came on after previous clear but before */
1211 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1213 /* Clear/Set internal lun bits */
1214 fc_port_regs
= get_fc_port_regs(cfg
, 0);
1215 reg
= readq_be(&fc_port_regs
[FC_CONFIG2
/ 8]);
1216 reg
&= SISL_FC_INTERNAL_MASK
;
1217 if (afu
->internal_lun
)
1218 reg
|= ((u64
)(afu
->internal_lun
- 1) << SISL_FC_INTERNAL_SHIFT
);
1219 writeq_be(reg
, &fc_port_regs
[FC_CONFIG2
/ 8]);
1221 /* now clear FC errors */
1222 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
1223 fc_port_regs
= get_fc_port_regs(cfg
, i
);
1225 writeq_be(0xFFFFFFFFU
, &fc_port_regs
[FC_ERROR
/ 8]);
1226 writeq_be(0, &fc_port_regs
[FC_ERRCAP
/ 8]);
1229 /* sync interrupts for master's IOARRIN write */
1230 /* note that unlike asyncs, there can be no pending sync interrupts */
1231 /* at this time (this is a fresh context and master has not written */
1232 /* IOARRIN yet), so there is nothing to clear. */
1234 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1235 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1236 hwq
= get_hwq(afu
, i
);
1238 writeq_be(SISL_MSI_SYNC_ERROR
, &hwq
->host_map
->ctx_ctrl
);
1239 writeq_be(SISL_ISTATUS_MASK
, &hwq
->host_map
->intr_mask
);
1244 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1245 * @irq: Interrupt number.
1246 * @data: Private data provided at interrupt registration, the AFU.
1248 * Return: Always return IRQ_HANDLED.
1250 static irqreturn_t
cxlflash_sync_err_irq(int irq
, void *data
)
1252 struct hwq
*hwq
= (struct hwq
*)data
;
1253 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
1254 struct device
*dev
= &cfg
->dev
->dev
;
1258 reg
= readq_be(&hwq
->host_map
->intr_status
);
1259 reg_unmasked
= (reg
& SISL_ISTATUS_UNMASK
);
1261 if (reg_unmasked
== 0UL) {
1262 dev_err(dev
, "%s: spurious interrupt, intr_status=%016llx\n",
1264 goto cxlflash_sync_err_irq_exit
;
1267 dev_err(dev
, "%s: unexpected interrupt, intr_status=%016llx\n",
1270 writeq_be(reg_unmasked
, &hwq
->host_map
->intr_clear
);
1272 cxlflash_sync_err_irq_exit
:
1277 * process_hrrq() - process the read-response queue
1278 * @afu: AFU associated with the host.
1279 * @doneq: Queue of commands harvested from the RRQ.
1280 * @budget: Threshold of RRQ entries to process.
1282 * This routine must be called holding the disabled RRQ spin lock.
1284 * Return: The number of entries processed.
1286 static int process_hrrq(struct hwq
*hwq
, struct list_head
*doneq
, int budget
)
1288 struct afu
*afu
= hwq
->afu
;
1289 struct afu_cmd
*cmd
;
1290 struct sisl_ioasa
*ioasa
;
1291 struct sisl_ioarcb
*ioarcb
;
1292 bool toggle
= hwq
->toggle
;
1295 *hrrq_start
= hwq
->hrrq_start
,
1296 *hrrq_end
= hwq
->hrrq_end
,
1297 *hrrq_curr
= hwq
->hrrq_curr
;
1299 /* Process ready RRQ entries up to the specified budget (if any) */
1303 if ((entry
& SISL_RESP_HANDLE_T_BIT
) != toggle
)
1306 entry
&= ~SISL_RESP_HANDLE_T_BIT
;
1308 if (afu_is_sq_cmd_mode(afu
)) {
1309 ioasa
= (struct sisl_ioasa
*)entry
;
1310 cmd
= container_of(ioasa
, struct afu_cmd
, sa
);
1312 ioarcb
= (struct sisl_ioarcb
*)entry
;
1313 cmd
= container_of(ioarcb
, struct afu_cmd
, rcb
);
1316 list_add_tail(&cmd
->queue
, doneq
);
1318 /* Advance to next entry or wrap and flip the toggle bit */
1319 if (hrrq_curr
< hrrq_end
)
1322 hrrq_curr
= hrrq_start
;
1323 toggle
^= SISL_RESP_HANDLE_T_BIT
;
1326 atomic_inc(&hwq
->hsq_credits
);
1329 if (budget
> 0 && num_hrrq
>= budget
)
1333 hwq
->hrrq_curr
= hrrq_curr
;
1334 hwq
->toggle
= toggle
;
1340 * process_cmd_doneq() - process a queue of harvested RRQ commands
1341 * @doneq: Queue of completed commands.
1343 * Note that upon return the queue can no longer be trusted.
1345 static void process_cmd_doneq(struct list_head
*doneq
)
1347 struct afu_cmd
*cmd
, *tmp
;
1349 WARN_ON(list_empty(doneq
));
1351 list_for_each_entry_safe(cmd
, tmp
, doneq
, queue
)
1356 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1357 * @irqpoll: IRQ poll structure associated with queue to poll.
1358 * @budget: Threshold of RRQ entries to process per poll.
1360 * Return: The number of entries processed.
1362 static int cxlflash_irqpoll(struct irq_poll
*irqpoll
, int budget
)
1364 struct hwq
*hwq
= container_of(irqpoll
, struct hwq
, irqpoll
);
1365 unsigned long hrrq_flags
;
1367 int num_entries
= 0;
1369 spin_lock_irqsave(&hwq
->hrrq_slock
, hrrq_flags
);
1371 num_entries
= process_hrrq(hwq
, &doneq
, budget
);
1372 if (num_entries
< budget
)
1373 irq_poll_complete(irqpoll
);
1375 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1377 process_cmd_doneq(&doneq
);
1382 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1383 * @irq: Interrupt number.
1384 * @data: Private data provided at interrupt registration, the AFU.
1386 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1388 static irqreturn_t
cxlflash_rrq_irq(int irq
, void *data
)
1390 struct hwq
*hwq
= (struct hwq
*)data
;
1391 struct afu
*afu
= hwq
->afu
;
1392 unsigned long hrrq_flags
;
1394 int num_entries
= 0;
1396 spin_lock_irqsave(&hwq
->hrrq_slock
, hrrq_flags
);
1398 if (afu_is_irqpoll_enabled(afu
)) {
1399 irq_poll_sched(&hwq
->irqpoll
);
1400 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1404 num_entries
= process_hrrq(hwq
, &doneq
, -1);
1405 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1407 if (num_entries
== 0)
1410 process_cmd_doneq(&doneq
);
1415 * Asynchronous interrupt information table
1418 * - Order matters here as this array is indexed by bit position.
1420 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1421 * as complex and complains due to a lack of parentheses/braces.
1423 #define ASTATUS_FC(_a, _b, _c, _d) \
1424 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1426 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1427 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1428 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1429 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1430 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1431 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1432 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1433 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1434 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1436 static const struct asyc_intr_info ainfo
[] = {
1437 BUILD_SISL_ASTATUS_FC_PORT(1),
1438 BUILD_SISL_ASTATUS_FC_PORT(0),
1439 BUILD_SISL_ASTATUS_FC_PORT(3),
1440 BUILD_SISL_ASTATUS_FC_PORT(2)
1444 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1445 * @irq: Interrupt number.
1446 * @data: Private data provided at interrupt registration, the AFU.
1448 * Return: Always return IRQ_HANDLED.
1450 static irqreturn_t
cxlflash_async_err_irq(int irq
, void *data
)
1452 struct hwq
*hwq
= (struct hwq
*)data
;
1453 struct afu
*afu
= hwq
->afu
;
1454 struct cxlflash_cfg
*cfg
= afu
->parent
;
1455 struct device
*dev
= &cfg
->dev
->dev
;
1456 const struct asyc_intr_info
*info
;
1457 struct sisl_global_map __iomem
*global
= &afu
->afu_map
->global
;
1458 __be64 __iomem
*fc_port_regs
;
1464 reg
= readq_be(&global
->regs
.aintr_status
);
1465 reg_unmasked
= (reg
& SISL_ASTATUS_UNMASK
);
1467 if (unlikely(reg_unmasked
== 0)) {
1468 dev_err(dev
, "%s: spurious interrupt, aintr_status=%016llx\n",
1473 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1474 writeq_be(reg_unmasked
, &global
->regs
.aintr_clear
);
1476 /* Check each bit that is on */
1477 for_each_set_bit(bit
, (ulong
*)®_unmasked
, BITS_PER_LONG
) {
1478 if (unlikely(bit
>= ARRAY_SIZE(ainfo
))) {
1484 if (unlikely(info
->status
!= 1ULL << bit
)) {
1490 fc_port_regs
= get_fc_port_regs(cfg
, port
);
1492 dev_err(dev
, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1493 __func__
, port
, info
->desc
,
1494 readq_be(&fc_port_regs
[FC_STATUS
/ 8]));
1497 * Do link reset first, some OTHER errors will set FC_ERROR
1498 * again if cleared before or w/o a reset
1500 if (info
->action
& LINK_RESET
) {
1501 dev_err(dev
, "%s: FC Port %d: resetting link\n",
1503 cfg
->lr_state
= LINK_RESET_REQUIRED
;
1504 cfg
->lr_port
= port
;
1505 schedule_work(&cfg
->work_q
);
1508 if (info
->action
& CLR_FC_ERROR
) {
1509 reg
= readq_be(&fc_port_regs
[FC_ERROR
/ 8]);
1512 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1513 * should be the same and tracing one is sufficient.
1516 dev_err(dev
, "%s: fc %d: clearing fc_error=%016llx\n",
1517 __func__
, port
, reg
);
1519 writeq_be(reg
, &fc_port_regs
[FC_ERROR
/ 8]);
1520 writeq_be(0, &fc_port_regs
[FC_ERRCAP
/ 8]);
1523 if (info
->action
& SCAN_HOST
) {
1524 atomic_inc(&cfg
->scan_host_needed
);
1525 schedule_work(&cfg
->work_q
);
1534 * start_context() - starts the master context
1535 * @cfg: Internal structure associated with the host.
1536 * @index: Index of the hardware queue.
1538 * Return: A success or failure value from CXL services.
1540 static int start_context(struct cxlflash_cfg
*cfg
, u32 index
)
1542 struct device
*dev
= &cfg
->dev
->dev
;
1543 struct hwq
*hwq
= get_hwq(cfg
->afu
, index
);
1546 rc
= cxl_start_context(hwq
->ctx
,
1547 hwq
->work
.work_element_descriptor
,
1550 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1555 * read_vpd() - obtains the WWPNs from VPD
1556 * @cfg: Internal structure associated with the host.
1557 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1559 * Return: 0 on success, -errno on failure
1561 static int read_vpd(struct cxlflash_cfg
*cfg
, u64 wwpn
[])
1563 struct device
*dev
= &cfg
->dev
->dev
;
1564 struct pci_dev
*pdev
= cfg
->dev
;
1566 int ro_start
, ro_size
, i
, j
, k
;
1568 char vpd_data
[CXLFLASH_VPD_LEN
];
1569 char tmp_buf
[WWPN_BUF_LEN
] = { 0 };
1570 char *wwpn_vpd_tags
[MAX_FC_PORTS
] = { "V5", "V6", "V7", "V8" };
1572 /* Get the VPD data from the device */
1573 vpd_size
= cxl_read_adapter_vpd(pdev
, vpd_data
, sizeof(vpd_data
));
1574 if (unlikely(vpd_size
<= 0)) {
1575 dev_err(dev
, "%s: Unable to read VPD (size = %ld)\n",
1576 __func__
, vpd_size
);
1581 /* Get the read only section offset */
1582 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
,
1583 PCI_VPD_LRDT_RO_DATA
);
1584 if (unlikely(ro_start
< 0)) {
1585 dev_err(dev
, "%s: VPD Read-only data not found\n", __func__
);
1590 /* Get the read only section size, cap when extends beyond read VPD */
1591 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
1593 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1594 if (unlikely((i
+ j
) > vpd_size
)) {
1595 dev_dbg(dev
, "%s: Might need to read more VPD (%d > %ld)\n",
1596 __func__
, (i
+ j
), vpd_size
);
1597 ro_size
= vpd_size
- i
;
1601 * Find the offset of the WWPN tag within the read only
1602 * VPD data and validate the found field (partials are
1603 * no good to us). Convert the ASCII data to an integer
1604 * value. Note that we must copy to a temporary buffer
1605 * because the conversion service requires that the ASCII
1606 * string be terminated.
1608 for (k
= 0; k
< cfg
->num_fc_ports
; k
++) {
1610 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1612 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, wwpn_vpd_tags
[k
]);
1613 if (unlikely(i
< 0)) {
1614 dev_err(dev
, "%s: Port %d WWPN not found in VPD\n",
1620 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
1621 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
1622 if (unlikely((i
+ j
> vpd_size
) || (j
!= WWPN_LEN
))) {
1623 dev_err(dev
, "%s: Port %d WWPN incomplete or bad VPD\n",
1629 memcpy(tmp_buf
, &vpd_data
[i
], WWPN_LEN
);
1630 rc
= kstrtoul(tmp_buf
, WWPN_LEN
, (ulong
*)&wwpn
[k
]);
1632 dev_err(dev
, "%s: WWPN conversion failed for port %d\n",
1638 dev_dbg(dev
, "%s: wwpn%d=%016llx\n", __func__
, k
, wwpn
[k
]);
1642 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1647 * init_pcr() - initialize the provisioning and control registers
1648 * @cfg: Internal structure associated with the host.
1650 * Also sets up fast access to the mapped registers and initializes AFU
1651 * command fields that never change.
1653 static void init_pcr(struct cxlflash_cfg
*cfg
)
1655 struct afu
*afu
= cfg
->afu
;
1656 struct sisl_ctrl_map __iomem
*ctrl_map
;
1660 for (i
= 0; i
< MAX_CONTEXT
; i
++) {
1661 ctrl_map
= &afu
->afu_map
->ctrls
[i
].ctrl
;
1662 /* Disrupt any clients that could be running */
1663 /* e.g. clients that survived a master restart */
1664 writeq_be(0, &ctrl_map
->rht_start
);
1665 writeq_be(0, &ctrl_map
->rht_cnt_id
);
1666 writeq_be(0, &ctrl_map
->ctx_cap
);
1669 /* Copy frequently used fields into hwq */
1670 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1671 hwq
= get_hwq(afu
, i
);
1673 hwq
->ctx_hndl
= (u16
) cxl_process_element(hwq
->ctx
);
1674 hwq
->host_map
= &afu
->afu_map
->hosts
[hwq
->ctx_hndl
].host
;
1675 hwq
->ctrl_map
= &afu
->afu_map
->ctrls
[hwq
->ctx_hndl
].ctrl
;
1677 /* Program the Endian Control for the master context */
1678 writeq_be(SISL_ENDIAN_CTRL
, &hwq
->host_map
->endian_ctrl
);
1683 * init_global() - initialize AFU global registers
1684 * @cfg: Internal structure associated with the host.
1686 static int init_global(struct cxlflash_cfg
*cfg
)
1688 struct afu
*afu
= cfg
->afu
;
1689 struct device
*dev
= &cfg
->dev
->dev
;
1691 struct sisl_host_map __iomem
*hmap
;
1692 __be64 __iomem
*fc_port_regs
;
1693 u64 wwpn
[MAX_FC_PORTS
]; /* wwpn of AFU ports */
1694 int i
= 0, num_ports
= 0;
1698 rc
= read_vpd(cfg
, &wwpn
[0]);
1700 dev_err(dev
, "%s: could not read vpd rc=%d\n", __func__
, rc
);
1704 /* Set up RRQ and SQ in HWQ for master issued cmds */
1705 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1706 hwq
= get_hwq(afu
, i
);
1707 hmap
= hwq
->host_map
;
1709 writeq_be((u64
) hwq
->hrrq_start
, &hmap
->rrq_start
);
1710 writeq_be((u64
) hwq
->hrrq_end
, &hmap
->rrq_end
);
1712 if (afu_is_sq_cmd_mode(afu
)) {
1713 writeq_be((u64
)hwq
->hsq_start
, &hmap
->sq_start
);
1714 writeq_be((u64
)hwq
->hsq_end
, &hmap
->sq_end
);
1718 /* AFU configuration */
1719 reg
= readq_be(&afu
->afu_map
->global
.regs
.afu_config
);
1720 reg
|= SISL_AFUCONF_AR_ALL
|SISL_AFUCONF_ENDIAN
;
1721 /* enable all auto retry options and control endianness */
1722 /* leave others at default: */
1723 /* CTX_CAP write protected, mbox_r does not clear on read and */
1724 /* checker on if dual afu */
1725 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_config
);
1727 /* Global port select: select either port */
1728 if (afu
->internal_lun
) {
1729 /* Only use port 0 */
1730 writeq_be(PORT0
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1733 writeq_be(PORT_MASK(cfg
->num_fc_ports
),
1734 &afu
->afu_map
->global
.regs
.afu_port_sel
);
1735 num_ports
= cfg
->num_fc_ports
;
1738 for (i
= 0; i
< num_ports
; i
++) {
1739 fc_port_regs
= get_fc_port_regs(cfg
, i
);
1741 /* Unmask all errors (but they are still masked at AFU) */
1742 writeq_be(0, &fc_port_regs
[FC_ERRMSK
/ 8]);
1743 /* Clear CRC error cnt & set a threshold */
1744 (void)readq_be(&fc_port_regs
[FC_CNT_CRCERR
/ 8]);
1745 writeq_be(MC_CRC_THRESH
, &fc_port_regs
[FC_CRC_THRESH
/ 8]);
1747 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1749 afu_set_wwpn(afu
, i
, &fc_port_regs
[0], wwpn
[i
]);
1750 /* Programming WWPN back to back causes additional
1751 * offline/online transitions and a PLOGI
1756 /* Set up master's own CTX_CAP to allow real mode, host translation */
1757 /* tables, afu cmds and read/write GSCSI cmds. */
1758 /* First, unlock ctx_cap write by reading mbox */
1759 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1760 hwq
= get_hwq(afu
, i
);
1762 (void)readq_be(&hwq
->ctrl_map
->mbox_r
); /* unlock ctx_cap */
1763 writeq_be((SISL_CTX_CAP_REAL_MODE
| SISL_CTX_CAP_HOST_XLATE
|
1764 SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
|
1765 SISL_CTX_CAP_AFU_CMD
| SISL_CTX_CAP_GSCSI_CMD
),
1766 &hwq
->ctrl_map
->ctx_cap
);
1768 /* Initialize heartbeat */
1769 afu
->hb
= readq_be(&afu
->afu_map
->global
.regs
.afu_hb
);
1775 * start_afu() - initializes and starts the AFU
1776 * @cfg: Internal structure associated with the host.
1778 static int start_afu(struct cxlflash_cfg
*cfg
)
1780 struct afu
*afu
= cfg
->afu
;
1781 struct device
*dev
= &cfg
->dev
->dev
;
1788 /* Initialize each HWQ */
1789 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1790 hwq
= get_hwq(afu
, i
);
1792 /* After an AFU reset, RRQ entries are stale, clear them */
1793 memset(&hwq
->rrq_entry
, 0, sizeof(hwq
->rrq_entry
));
1795 /* Initialize RRQ pointers */
1796 hwq
->hrrq_start
= &hwq
->rrq_entry
[0];
1797 hwq
->hrrq_end
= &hwq
->rrq_entry
[NUM_RRQ_ENTRY
- 1];
1798 hwq
->hrrq_curr
= hwq
->hrrq_start
;
1801 /* Initialize spin locks */
1802 spin_lock_init(&hwq
->hrrq_slock
);
1803 spin_lock_init(&hwq
->hsq_slock
);
1806 if (afu_is_sq_cmd_mode(afu
)) {
1807 memset(&hwq
->sq
, 0, sizeof(hwq
->sq
));
1808 hwq
->hsq_start
= &hwq
->sq
[0];
1809 hwq
->hsq_end
= &hwq
->sq
[NUM_SQ_ENTRY
- 1];
1810 hwq
->hsq_curr
= hwq
->hsq_start
;
1812 atomic_set(&hwq
->hsq_credits
, NUM_SQ_ENTRY
- 1);
1815 /* Initialize IRQ poll */
1816 if (afu_is_irqpoll_enabled(afu
))
1817 irq_poll_init(&hwq
->irqpoll
, afu
->irqpoll_weight
,
1822 rc
= init_global(cfg
);
1824 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1829 * init_intr() - setup interrupt handlers for the master context
1830 * @cfg: Internal structure associated with the host.
1831 * @hwq: Hardware queue to initialize.
1833 * Return: 0 on success, -errno on failure
1835 static enum undo_level
init_intr(struct cxlflash_cfg
*cfg
,
1838 struct device
*dev
= &cfg
->dev
->dev
;
1839 struct cxl_context
*ctx
= hwq
->ctx
;
1841 enum undo_level level
= UNDO_NOOP
;
1842 bool is_primary_hwq
= (hwq
->index
== PRIMARY_HWQ
);
1843 int num_irqs
= is_primary_hwq
? 3 : 2;
1845 rc
= cxl_allocate_afu_irqs(ctx
, num_irqs
);
1847 dev_err(dev
, "%s: allocate_afu_irqs failed rc=%d\n",
1853 rc
= cxl_map_afu_irq(ctx
, 1, cxlflash_sync_err_irq
, hwq
,
1854 "SISL_MSI_SYNC_ERROR");
1855 if (unlikely(rc
<= 0)) {
1856 dev_err(dev
, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__
);
1861 rc
= cxl_map_afu_irq(ctx
, 2, cxlflash_rrq_irq
, hwq
,
1862 "SISL_MSI_RRQ_UPDATED");
1863 if (unlikely(rc
<= 0)) {
1864 dev_err(dev
, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__
);
1869 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1870 if (!is_primary_hwq
)
1873 rc
= cxl_map_afu_irq(ctx
, 3, cxlflash_async_err_irq
, hwq
,
1874 "SISL_MSI_ASYNC_ERROR");
1875 if (unlikely(rc
<= 0)) {
1876 dev_err(dev
, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__
);
1885 * init_mc() - create and register as the master context
1886 * @cfg: Internal structure associated with the host.
1887 * index: HWQ Index of the master context.
1889 * Return: 0 on success, -errno on failure
1891 static int init_mc(struct cxlflash_cfg
*cfg
, u32 index
)
1893 struct cxl_context
*ctx
;
1894 struct device
*dev
= &cfg
->dev
->dev
;
1895 struct hwq
*hwq
= get_hwq(cfg
->afu
, index
);
1897 enum undo_level level
;
1899 hwq
->afu
= cfg
->afu
;
1901 INIT_LIST_HEAD(&hwq
->pending_cmds
);
1903 if (index
== PRIMARY_HWQ
)
1904 ctx
= cxl_get_context(cfg
->dev
);
1906 ctx
= cxl_dev_context_init(cfg
->dev
);
1907 if (unlikely(!ctx
)) {
1915 /* Set it up as a master with the CXL */
1916 cxl_set_master(ctx
);
1918 /* Reset AFU when initializing primary context */
1919 if (index
== PRIMARY_HWQ
) {
1920 rc
= cxl_afu_reset(ctx
);
1922 dev_err(dev
, "%s: AFU reset failed rc=%d\n",
1928 level
= init_intr(cfg
, hwq
);
1929 if (unlikely(level
)) {
1930 dev_err(dev
, "%s: interrupt init failed rc=%d\n", __func__
, rc
);
1934 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1935 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1936 * element (pe) that is embedded in the context (ctx)
1938 rc
= start_context(cfg
, index
);
1940 dev_err(dev
, "%s: start context failed rc=%d\n", __func__
, rc
);
1941 level
= UNMAP_THREE
;
1946 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1949 term_intr(cfg
, level
, index
);
1950 if (index
!= PRIMARY_HWQ
)
1951 cxl_release_context(ctx
);
1958 * get_num_afu_ports() - determines and configures the number of AFU ports
1959 * @cfg: Internal structure associated with the host.
1961 * This routine determines the number of AFU ports by converting the global
1962 * port selection mask. The converted value is only valid following an AFU
1963 * reset (explicit or power-on). This routine must be invoked shortly after
1964 * mapping as other routines are dependent on the number of ports during the
1965 * initialization sequence.
1967 * To support legacy AFUs that might not have reflected an initial global
1968 * port mask (value read is 0), default to the number of ports originally
1969 * supported by the cxlflash driver (2) before hardware with other port
1970 * offerings was introduced.
1972 static void get_num_afu_ports(struct cxlflash_cfg
*cfg
)
1974 struct afu
*afu
= cfg
->afu
;
1975 struct device
*dev
= &cfg
->dev
->dev
;
1977 int num_fc_ports
= LEGACY_FC_PORTS
;
1979 port_mask
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
1980 if (port_mask
!= 0ULL)
1981 num_fc_ports
= min(ilog2(port_mask
) + 1, MAX_FC_PORTS
);
1983 dev_dbg(dev
, "%s: port_mask=%016llx num_fc_ports=%d\n",
1984 __func__
, port_mask
, num_fc_ports
);
1986 cfg
->num_fc_ports
= num_fc_ports
;
1987 cfg
->host
->max_channel
= PORTNUM2CHAN(num_fc_ports
);
1991 * init_afu() - setup as master context and start AFU
1992 * @cfg: Internal structure associated with the host.
1994 * This routine is a higher level of control for configuring the
1995 * AFU on probe and reset paths.
1997 * Return: 0 on success, -errno on failure
1999 static int init_afu(struct cxlflash_cfg
*cfg
)
2003 struct afu
*afu
= cfg
->afu
;
2004 struct device
*dev
= &cfg
->dev
->dev
;
2008 cxl_perst_reloads_same_image(cfg
->cxl_afu
, true);
2010 afu
->num_hwqs
= afu
->desired_hwqs
;
2011 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2012 rc
= init_mc(cfg
, i
);
2014 dev_err(dev
, "%s: init_mc failed rc=%d index=%d\n",
2020 /* Map the entire MMIO space of the AFU using the first context */
2021 hwq
= get_hwq(afu
, PRIMARY_HWQ
);
2022 afu
->afu_map
= cxl_psa_map(hwq
->ctx
);
2023 if (!afu
->afu_map
) {
2024 dev_err(dev
, "%s: cxl_psa_map failed\n", __func__
);
2029 /* No byte reverse on reading afu_version or string will be backwards */
2030 reg
= readq(&afu
->afu_map
->global
.regs
.afu_version
);
2031 memcpy(afu
->version
, ®
, sizeof(reg
));
2032 afu
->interface_version
=
2033 readq_be(&afu
->afu_map
->global
.regs
.interface_version
);
2034 if ((afu
->interface_version
+ 1) == 0) {
2035 dev_err(dev
, "Back level AFU, please upgrade. AFU version %s "
2036 "interface version %016llx\n", afu
->version
,
2037 afu
->interface_version
);
2042 if (afu_is_sq_cmd_mode(afu
)) {
2043 afu
->send_cmd
= send_cmd_sq
;
2044 afu
->context_reset
= context_reset_sq
;
2046 afu
->send_cmd
= send_cmd_ioarrin
;
2047 afu
->context_reset
= context_reset_ioarrin
;
2050 dev_dbg(dev
, "%s: afu_ver=%s interface_ver=%016llx\n", __func__
,
2051 afu
->version
, afu
->interface_version
);
2053 get_num_afu_ports(cfg
);
2055 rc
= start_afu(cfg
);
2057 dev_err(dev
, "%s: start_afu failed, rc=%d\n", __func__
, rc
);
2061 afu_err_intr_init(cfg
->afu
);
2062 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2063 hwq
= get_hwq(afu
, i
);
2065 hwq
->room
= readq_be(&hwq
->host_map
->cmd_room
);
2068 /* Restore the LUN mappings */
2069 cxlflash_restore_luntable(cfg
);
2071 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2075 for (i
= afu
->num_hwqs
- 1; i
>= 0; i
--) {
2076 term_intr(cfg
, UNMAP_THREE
, i
);
2083 * afu_reset() - resets the AFU
2084 * @cfg: Internal structure associated with the host.
2086 * Return: 0 on success, -errno on failure
2088 static int afu_reset(struct cxlflash_cfg
*cfg
)
2090 struct device
*dev
= &cfg
->dev
->dev
;
2093 /* Stop the context before the reset. Since the context is
2094 * no longer available restart it after the reset is complete
2100 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2105 * drain_ioctls() - wait until all currently executing ioctls have completed
2106 * @cfg: Internal structure associated with the host.
2108 * Obtain write access to read/write semaphore that wraps ioctl
2109 * handling to 'drain' ioctls currently executing.
2111 static void drain_ioctls(struct cxlflash_cfg
*cfg
)
2113 down_write(&cfg
->ioctl_rwsem
);
2114 up_write(&cfg
->ioctl_rwsem
);
2118 * cxlflash_async_reset_host() - asynchronous host reset handler
2119 * @data: Private data provided while scheduling reset.
2120 * @cookie: Cookie that can be used for checkpointing.
2122 static void cxlflash_async_reset_host(void *data
, async_cookie_t cookie
)
2124 struct cxlflash_cfg
*cfg
= data
;
2125 struct device
*dev
= &cfg
->dev
->dev
;
2128 if (cfg
->state
!= STATE_RESET
) {
2129 dev_dbg(dev
, "%s: Not performing a reset, state=%d\n",
2130 __func__
, cfg
->state
);
2135 cxlflash_mark_contexts_error(cfg
);
2136 rc
= afu_reset(cfg
);
2138 cfg
->state
= STATE_FAILTERM
;
2140 cfg
->state
= STATE_NORMAL
;
2141 wake_up_all(&cfg
->reset_waitq
);
2144 scsi_unblock_requests(cfg
->host
);
2148 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2149 * @cfg: Internal structure associated with the host.
2151 static void cxlflash_schedule_async_reset(struct cxlflash_cfg
*cfg
)
2153 struct device
*dev
= &cfg
->dev
->dev
;
2155 if (cfg
->state
!= STATE_NORMAL
) {
2156 dev_dbg(dev
, "%s: Not performing reset state=%d\n",
2157 __func__
, cfg
->state
);
2161 cfg
->state
= STATE_RESET
;
2162 scsi_block_requests(cfg
->host
);
2163 cfg
->async_reset_cookie
= async_schedule(cxlflash_async_reset_host
,
2168 * cxlflash_afu_sync() - builds and sends an AFU sync command
2169 * @afu: AFU associated with the host.
2170 * @ctx_hndl_u: Identifies context requesting sync.
2171 * @res_hndl_u: Identifies resource requesting sync.
2172 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2174 * The AFU can only take 1 sync command at a time. This routine enforces this
2175 * limitation by using a mutex to provide exclusive access to the AFU during
2176 * the sync. This design point requires calling threads to not be on interrupt
2177 * context due to the possibility of sleeping during concurrent sync operations.
2179 * AFU sync operations are only necessary and allowed when the device is
2180 * operating normally. When not operating normally, sync requests can occur as
2181 * part of cleaning up resources associated with an adapter prior to removal.
2182 * In this scenario, these requests are simply ignored (safe due to the AFU
2186 * 0 on success, -errno on failure
2188 int cxlflash_afu_sync(struct afu
*afu
, ctx_hndl_t ctx_hndl_u
,
2189 res_hndl_t res_hndl_u
, u8 mode
)
2191 struct cxlflash_cfg
*cfg
= afu
->parent
;
2192 struct device
*dev
= &cfg
->dev
->dev
;
2193 struct afu_cmd
*cmd
= NULL
;
2194 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
2198 static DEFINE_MUTEX(sync_active
);
2200 if (cfg
->state
!= STATE_NORMAL
) {
2201 dev_dbg(dev
, "%s: Sync not required state=%u\n",
2202 __func__
, cfg
->state
);
2206 mutex_lock(&sync_active
);
2207 atomic_inc(&afu
->cmds_active
);
2208 buf
= kmalloc(sizeof(*cmd
) + __alignof__(*cmd
) - 1, GFP_KERNEL
);
2209 if (unlikely(!buf
)) {
2210 dev_err(dev
, "%s: no memory for command\n", __func__
);
2215 cmd
= (struct afu_cmd
*)PTR_ALIGN(buf
, __alignof__(*cmd
));
2218 memset(cmd
, 0, sizeof(*cmd
));
2219 INIT_LIST_HEAD(&cmd
->queue
);
2220 init_completion(&cmd
->cevent
);
2222 cmd
->hwq_index
= hwq
->index
;
2224 dev_dbg(dev
, "%s: afu=%p cmd=%p ctx=%d nretry=%d\n",
2225 __func__
, afu
, cmd
, ctx_hndl_u
, nretry
);
2227 cmd
->rcb
.req_flags
= SISL_REQ_FLAGS_AFU_CMD
;
2228 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
2229 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
2230 cmd
->rcb
.timeout
= MC_AFU_SYNC_TIMEOUT
;
2232 cmd
->rcb
.cdb
[0] = 0xC0; /* AFU Sync */
2233 cmd
->rcb
.cdb
[1] = mode
;
2235 /* The cdb is aligned, no unaligned accessors required */
2236 *((__be16
*)&cmd
->rcb
.cdb
[2]) = cpu_to_be16(ctx_hndl_u
);
2237 *((__be32
*)&cmd
->rcb
.cdb
[4]) = cpu_to_be32(res_hndl_u
);
2239 rc
= afu
->send_cmd(afu
, cmd
);
2245 rc
= wait_resp(afu
, cmd
);
2248 rc
= afu
->context_reset(hwq
);
2250 cxlflash_schedule_async_reset(cfg
);
2253 /* fall through to retry */
2257 /* fall through to exit */
2263 atomic_dec(&afu
->cmds_active
);
2264 mutex_unlock(&sync_active
);
2266 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2271 * cxlflash_eh_abort_handler() - abort a SCSI command
2272 * @scp: SCSI command to abort.
2274 * CXL Flash devices do not support a single command abort. Reset the context
2275 * as per SISLite specification. Flush any pending commands in the hardware
2276 * queue before the reset.
2278 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2280 static int cxlflash_eh_abort_handler(struct scsi_cmnd
*scp
)
2283 struct Scsi_Host
*host
= scp
->device
->host
;
2284 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2285 struct afu_cmd
*cmd
= sc_to_afuc(scp
);
2286 struct device
*dev
= &cfg
->dev
->dev
;
2287 struct afu
*afu
= cfg
->afu
;
2288 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
2290 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2291 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2292 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2293 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2294 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2295 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2296 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2298 /* When the state is not normal, another reset/reload is in progress.
2299 * Return failed and the mid-layer will invoke host reset handler.
2301 if (cfg
->state
!= STATE_NORMAL
) {
2302 dev_dbg(dev
, "%s: Invalid state for abort, state=%d\n",
2303 __func__
, cfg
->state
);
2307 rc
= afu
->context_reset(hwq
);
2314 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2319 * cxlflash_eh_device_reset_handler() - reset a single LUN
2320 * @scp: SCSI command to send.
2323 * SUCCESS as defined in scsi/scsi.h
2324 * FAILED as defined in scsi/scsi.h
2326 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd
*scp
)
2329 struct Scsi_Host
*host
= scp
->device
->host
;
2330 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2331 struct device
*dev
= &cfg
->dev
->dev
;
2332 struct afu
*afu
= cfg
->afu
;
2335 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2336 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2337 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2338 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2339 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2340 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2341 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2344 switch (cfg
->state
) {
2346 rcr
= send_tmf(afu
, scp
, TMF_LUN_RESET
);
2351 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2358 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2363 * cxlflash_eh_host_reset_handler() - reset the host adapter
2364 * @scp: SCSI command from stack identifying host.
2366 * Following a reset, the state is evaluated again in case an EEH occurred
2367 * during the reset. In such a scenario, the host reset will either yield
2368 * until the EEH recovery is complete or return success or failure based
2369 * upon the current device state.
2372 * SUCCESS as defined in scsi/scsi.h
2373 * FAILED as defined in scsi/scsi.h
2375 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd
*scp
)
2379 struct Scsi_Host
*host
= scp
->device
->host
;
2380 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2381 struct device
*dev
= &cfg
->dev
->dev
;
2383 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2384 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2385 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2386 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2387 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2388 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2389 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2391 switch (cfg
->state
) {
2393 cfg
->state
= STATE_RESET
;
2395 cxlflash_mark_contexts_error(cfg
);
2396 rcr
= afu_reset(cfg
);
2399 cfg
->state
= STATE_FAILTERM
;
2401 cfg
->state
= STATE_NORMAL
;
2402 wake_up_all(&cfg
->reset_waitq
);
2406 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2407 if (cfg
->state
== STATE_NORMAL
)
2415 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2420 * cxlflash_change_queue_depth() - change the queue depth for the device
2421 * @sdev: SCSI device destined for queue depth change.
2422 * @qdepth: Requested queue depth value to set.
2424 * The requested queue depth is capped to the maximum supported value.
2426 * Return: The actual queue depth set.
2428 static int cxlflash_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2431 if (qdepth
> CXLFLASH_MAX_CMDS_PER_LUN
)
2432 qdepth
= CXLFLASH_MAX_CMDS_PER_LUN
;
2434 scsi_change_queue_depth(sdev
, qdepth
);
2435 return sdev
->queue_depth
;
2439 * cxlflash_show_port_status() - queries and presents the current port status
2440 * @port: Desired port for status reporting.
2441 * @cfg: Internal structure associated with the host.
2442 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2444 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2446 static ssize_t
cxlflash_show_port_status(u32 port
,
2447 struct cxlflash_cfg
*cfg
,
2450 struct device
*dev
= &cfg
->dev
->dev
;
2453 __be64 __iomem
*fc_port_regs
;
2455 WARN_ON(port
>= MAX_FC_PORTS
);
2457 if (port
>= cfg
->num_fc_ports
) {
2458 dev_info(dev
, "%s: Port %d not supported on this card.\n",
2463 fc_port_regs
= get_fc_port_regs(cfg
, port
);
2464 status
= readq_be(&fc_port_regs
[FC_MTIP_STATUS
/ 8]);
2465 status
&= FC_MTIP_STATUS_MASK
;
2467 if (status
== FC_MTIP_STATUS_ONLINE
)
2468 disp_status
= "online";
2469 else if (status
== FC_MTIP_STATUS_OFFLINE
)
2470 disp_status
= "offline";
2472 disp_status
= "unknown";
2474 return scnprintf(buf
, PAGE_SIZE
, "%s\n", disp_status
);
2478 * port0_show() - queries and presents the current status of port 0
2479 * @dev: Generic device associated with the host owning the port.
2480 * @attr: Device attribute representing the port.
2481 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2483 * Return: The size of the ASCII string returned in @buf.
2485 static ssize_t
port0_show(struct device
*dev
,
2486 struct device_attribute
*attr
,
2489 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2491 return cxlflash_show_port_status(0, cfg
, buf
);
2495 * port1_show() - queries and presents the current status of port 1
2496 * @dev: Generic device associated with the host owning the port.
2497 * @attr: Device attribute representing the port.
2498 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2500 * Return: The size of the ASCII string returned in @buf.
2502 static ssize_t
port1_show(struct device
*dev
,
2503 struct device_attribute
*attr
,
2506 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2508 return cxlflash_show_port_status(1, cfg
, buf
);
2512 * port2_show() - queries and presents the current status of port 2
2513 * @dev: Generic device associated with the host owning the port.
2514 * @attr: Device attribute representing the port.
2515 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2517 * Return: The size of the ASCII string returned in @buf.
2519 static ssize_t
port2_show(struct device
*dev
,
2520 struct device_attribute
*attr
,
2523 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2525 return cxlflash_show_port_status(2, cfg
, buf
);
2529 * port3_show() - queries and presents the current status of port 3
2530 * @dev: Generic device associated with the host owning the port.
2531 * @attr: Device attribute representing the port.
2532 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2534 * Return: The size of the ASCII string returned in @buf.
2536 static ssize_t
port3_show(struct device
*dev
,
2537 struct device_attribute
*attr
,
2540 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2542 return cxlflash_show_port_status(3, cfg
, buf
);
2546 * lun_mode_show() - presents the current LUN mode of the host
2547 * @dev: Generic device associated with the host.
2548 * @attr: Device attribute representing the LUN mode.
2549 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2551 * Return: The size of the ASCII string returned in @buf.
2553 static ssize_t
lun_mode_show(struct device
*dev
,
2554 struct device_attribute
*attr
, char *buf
)
2556 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2557 struct afu
*afu
= cfg
->afu
;
2559 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->internal_lun
);
2563 * lun_mode_store() - sets the LUN mode of the host
2564 * @dev: Generic device associated with the host.
2565 * @attr: Device attribute representing the LUN mode.
2566 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2567 * @count: Length of data resizing in @buf.
2569 * The CXL Flash AFU supports a dummy LUN mode where the external
2570 * links and storage are not required. Space on the FPGA is used
2571 * to create 1 or 2 small LUNs which are presented to the system
2572 * as if they were a normal storage device. This feature is useful
2573 * during development and also provides manufacturing with a way
2574 * to test the AFU without an actual device.
2576 * 0 = external LUN[s] (default)
2577 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2578 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2579 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2580 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2582 * Return: The size of the ASCII string returned in @buf.
2584 static ssize_t
lun_mode_store(struct device
*dev
,
2585 struct device_attribute
*attr
,
2586 const char *buf
, size_t count
)
2588 struct Scsi_Host
*shost
= class_to_shost(dev
);
2589 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
2590 struct afu
*afu
= cfg
->afu
;
2594 rc
= kstrtouint(buf
, 10, &lun_mode
);
2595 if (!rc
&& (lun_mode
< 5) && (lun_mode
!= afu
->internal_lun
)) {
2596 afu
->internal_lun
= lun_mode
;
2599 * When configured for internal LUN, there is only one channel,
2600 * channel number 0, else there will be one less than the number
2601 * of fc ports for this card.
2603 if (afu
->internal_lun
)
2604 shost
->max_channel
= 0;
2606 shost
->max_channel
= PORTNUM2CHAN(cfg
->num_fc_ports
);
2609 scsi_scan_host(cfg
->host
);
2616 * ioctl_version_show() - presents the current ioctl version of the host
2617 * @dev: Generic device associated with the host.
2618 * @attr: Device attribute representing the ioctl version.
2619 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2621 * Return: The size of the ASCII string returned in @buf.
2623 static ssize_t
ioctl_version_show(struct device
*dev
,
2624 struct device_attribute
*attr
, char *buf
)
2626 return scnprintf(buf
, PAGE_SIZE
, "%u\n", DK_CXLFLASH_VERSION_0
);
2630 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2631 * @port: Desired port for status reporting.
2632 * @cfg: Internal structure associated with the host.
2633 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2635 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2637 static ssize_t
cxlflash_show_port_lun_table(u32 port
,
2638 struct cxlflash_cfg
*cfg
,
2641 struct device
*dev
= &cfg
->dev
->dev
;
2642 __be64 __iomem
*fc_port_luns
;
2646 WARN_ON(port
>= MAX_FC_PORTS
);
2648 if (port
>= cfg
->num_fc_ports
) {
2649 dev_info(dev
, "%s: Port %d not supported on this card.\n",
2654 fc_port_luns
= get_fc_port_luns(cfg
, port
);
2656 for (i
= 0; i
< CXLFLASH_NUM_VLUNS
; i
++)
2657 bytes
+= scnprintf(buf
+ bytes
, PAGE_SIZE
- bytes
,
2659 i
, readq_be(&fc_port_luns
[i
]));
2664 * port0_lun_table_show() - presents the current LUN table of port 0
2665 * @dev: Generic device associated with the host owning the port.
2666 * @attr: Device attribute representing the port.
2667 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2669 * Return: The size of the ASCII string returned in @buf.
2671 static ssize_t
port0_lun_table_show(struct device
*dev
,
2672 struct device_attribute
*attr
,
2675 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2677 return cxlflash_show_port_lun_table(0, cfg
, buf
);
2681 * port1_lun_table_show() - presents the current LUN table of port 1
2682 * @dev: Generic device associated with the host owning the port.
2683 * @attr: Device attribute representing the port.
2684 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2686 * Return: The size of the ASCII string returned in @buf.
2688 static ssize_t
port1_lun_table_show(struct device
*dev
,
2689 struct device_attribute
*attr
,
2692 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2694 return cxlflash_show_port_lun_table(1, cfg
, buf
);
2698 * port2_lun_table_show() - presents the current LUN table of port 2
2699 * @dev: Generic device associated with the host owning the port.
2700 * @attr: Device attribute representing the port.
2701 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2703 * Return: The size of the ASCII string returned in @buf.
2705 static ssize_t
port2_lun_table_show(struct device
*dev
,
2706 struct device_attribute
*attr
,
2709 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2711 return cxlflash_show_port_lun_table(2, cfg
, buf
);
2715 * port3_lun_table_show() - presents the current LUN table of port 3
2716 * @dev: Generic device associated with the host owning the port.
2717 * @attr: Device attribute representing the port.
2718 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2720 * Return: The size of the ASCII string returned in @buf.
2722 static ssize_t
port3_lun_table_show(struct device
*dev
,
2723 struct device_attribute
*attr
,
2726 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2728 return cxlflash_show_port_lun_table(3, cfg
, buf
);
2732 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2733 * @dev: Generic device associated with the host.
2734 * @attr: Device attribute representing the IRQ poll weight.
2735 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2738 * An IRQ poll weight of 0 indicates polling is disabled.
2740 * Return: The size of the ASCII string returned in @buf.
2742 static ssize_t
irqpoll_weight_show(struct device
*dev
,
2743 struct device_attribute
*attr
, char *buf
)
2745 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2746 struct afu
*afu
= cfg
->afu
;
2748 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->irqpoll_weight
);
2752 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2753 * @dev: Generic device associated with the host.
2754 * @attr: Device attribute representing the IRQ poll weight.
2755 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2757 * @count: Length of data resizing in @buf.
2759 * An IRQ poll weight of 0 indicates polling is disabled.
2761 * Return: The size of the ASCII string returned in @buf.
2763 static ssize_t
irqpoll_weight_store(struct device
*dev
,
2764 struct device_attribute
*attr
,
2765 const char *buf
, size_t count
)
2767 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2768 struct device
*cfgdev
= &cfg
->dev
->dev
;
2769 struct afu
*afu
= cfg
->afu
;
2774 rc
= kstrtouint(buf
, 10, &weight
);
2780 "Invalid IRQ poll weight. It must be 256 or less.\n");
2784 if (weight
== afu
->irqpoll_weight
) {
2786 "Current IRQ poll weight has the same weight.\n");
2790 if (afu_is_irqpoll_enabled(afu
)) {
2791 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2792 hwq
= get_hwq(afu
, i
);
2794 irq_poll_disable(&hwq
->irqpoll
);
2798 afu
->irqpoll_weight
= weight
;
2801 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2802 hwq
= get_hwq(afu
, i
);
2804 irq_poll_init(&hwq
->irqpoll
, weight
, cxlflash_irqpoll
);
2812 * num_hwqs_show() - presents the number of hardware queues for the host
2813 * @dev: Generic device associated with the host.
2814 * @attr: Device attribute representing the number of hardware queues.
2815 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2818 * Return: The size of the ASCII string returned in @buf.
2820 static ssize_t
num_hwqs_show(struct device
*dev
,
2821 struct device_attribute
*attr
, char *buf
)
2823 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2824 struct afu
*afu
= cfg
->afu
;
2826 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->num_hwqs
);
2830 * num_hwqs_store() - sets the number of hardware queues for the host
2831 * @dev: Generic device associated with the host.
2832 * @attr: Device attribute representing the number of hardware queues.
2833 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2835 * @count: Length of data resizing in @buf.
2837 * n > 0: num_hwqs = n
2838 * n = 0: num_hwqs = num_online_cpus()
2839 * n < 0: num_online_cpus() / abs(n)
2841 * Return: The size of the ASCII string returned in @buf.
2843 static ssize_t
num_hwqs_store(struct device
*dev
,
2844 struct device_attribute
*attr
,
2845 const char *buf
, size_t count
)
2847 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2848 struct afu
*afu
= cfg
->afu
;
2850 int nhwqs
, num_hwqs
;
2852 rc
= kstrtoint(buf
, 10, &nhwqs
);
2858 else if (nhwqs
== 0)
2859 num_hwqs
= num_online_cpus();
2861 num_hwqs
= num_online_cpus() / abs(nhwqs
);
2863 afu
->desired_hwqs
= min(num_hwqs
, CXLFLASH_MAX_HWQS
);
2864 WARN_ON_ONCE(afu
->desired_hwqs
== 0);
2867 switch (cfg
->state
) {
2869 cfg
->state
= STATE_RESET
;
2871 cxlflash_mark_contexts_error(cfg
);
2872 rc
= afu_reset(cfg
);
2874 cfg
->state
= STATE_FAILTERM
;
2876 cfg
->state
= STATE_NORMAL
;
2877 wake_up_all(&cfg
->reset_waitq
);
2880 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2881 if (cfg
->state
== STATE_NORMAL
)
2884 /* Ideally should not happen */
2885 dev_err(dev
, "%s: Device is not ready, state=%d\n",
2886 __func__
, cfg
->state
);
2893 static const char *hwq_mode_name
[MAX_HWQ_MODE
] = { "rr", "tag", "cpu" };
2896 * hwq_mode_show() - presents the HWQ steering mode for the host
2897 * @dev: Generic device associated with the host.
2898 * @attr: Device attribute representing the HWQ steering mode.
2899 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
2900 * as a character string.
2902 * Return: The size of the ASCII string returned in @buf.
2904 static ssize_t
hwq_mode_show(struct device
*dev
,
2905 struct device_attribute
*attr
, char *buf
)
2907 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2908 struct afu
*afu
= cfg
->afu
;
2910 return scnprintf(buf
, PAGE_SIZE
, "%s\n", hwq_mode_name
[afu
->hwq_mode
]);
2914 * hwq_mode_store() - sets the HWQ steering mode for the host
2915 * @dev: Generic device associated with the host.
2916 * @attr: Device attribute representing the HWQ steering mode.
2917 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
2918 * as a character string.
2919 * @count: Length of data resizing in @buf.
2922 * tag = Block MQ Tagging
2923 * cpu = CPU Affinity
2925 * Return: The size of the ASCII string returned in @buf.
2927 static ssize_t
hwq_mode_store(struct device
*dev
,
2928 struct device_attribute
*attr
,
2929 const char *buf
, size_t count
)
2931 struct Scsi_Host
*shost
= class_to_shost(dev
);
2932 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
2933 struct device
*cfgdev
= &cfg
->dev
->dev
;
2934 struct afu
*afu
= cfg
->afu
;
2936 u32 mode
= MAX_HWQ_MODE
;
2938 for (i
= 0; i
< MAX_HWQ_MODE
; i
++) {
2939 if (!strncmp(hwq_mode_name
[i
], buf
, strlen(hwq_mode_name
[i
]))) {
2945 if (mode
>= MAX_HWQ_MODE
) {
2946 dev_info(cfgdev
, "Invalid HWQ steering mode.\n");
2950 if ((mode
== HWQ_MODE_TAG
) && !shost_use_blk_mq(shost
)) {
2951 dev_info(cfgdev
, "SCSI-MQ is not enabled, use a different "
2952 "HWQ steering mode.\n");
2956 afu
->hwq_mode
= mode
;
2962 * mode_show() - presents the current mode of the device
2963 * @dev: Generic device associated with the device.
2964 * @attr: Device attribute representing the device mode.
2965 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2967 * Return: The size of the ASCII string returned in @buf.
2969 static ssize_t
mode_show(struct device
*dev
,
2970 struct device_attribute
*attr
, char *buf
)
2972 struct scsi_device
*sdev
= to_scsi_device(dev
);
2974 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
2975 sdev
->hostdata
? "superpipe" : "legacy");
2981 static DEVICE_ATTR_RO(port0
);
2982 static DEVICE_ATTR_RO(port1
);
2983 static DEVICE_ATTR_RO(port2
);
2984 static DEVICE_ATTR_RO(port3
);
2985 static DEVICE_ATTR_RW(lun_mode
);
2986 static DEVICE_ATTR_RO(ioctl_version
);
2987 static DEVICE_ATTR_RO(port0_lun_table
);
2988 static DEVICE_ATTR_RO(port1_lun_table
);
2989 static DEVICE_ATTR_RO(port2_lun_table
);
2990 static DEVICE_ATTR_RO(port3_lun_table
);
2991 static DEVICE_ATTR_RW(irqpoll_weight
);
2992 static DEVICE_ATTR_RW(num_hwqs
);
2993 static DEVICE_ATTR_RW(hwq_mode
);
2995 static struct device_attribute
*cxlflash_host_attrs
[] = {
3001 &dev_attr_ioctl_version
,
3002 &dev_attr_port0_lun_table
,
3003 &dev_attr_port1_lun_table
,
3004 &dev_attr_port2_lun_table
,
3005 &dev_attr_port3_lun_table
,
3006 &dev_attr_irqpoll_weight
,
3015 static DEVICE_ATTR_RO(mode
);
3017 static struct device_attribute
*cxlflash_dev_attrs
[] = {
3025 static struct scsi_host_template driver_template
= {
3026 .module
= THIS_MODULE
,
3027 .name
= CXLFLASH_ADAPTER_NAME
,
3028 .info
= cxlflash_driver_info
,
3029 .ioctl
= cxlflash_ioctl
,
3030 .proc_name
= CXLFLASH_NAME
,
3031 .queuecommand
= cxlflash_queuecommand
,
3032 .eh_abort_handler
= cxlflash_eh_abort_handler
,
3033 .eh_device_reset_handler
= cxlflash_eh_device_reset_handler
,
3034 .eh_host_reset_handler
= cxlflash_eh_host_reset_handler
,
3035 .change_queue_depth
= cxlflash_change_queue_depth
,
3036 .cmd_per_lun
= CXLFLASH_MAX_CMDS_PER_LUN
,
3037 .can_queue
= CXLFLASH_MAX_CMDS
,
3038 .cmd_size
= sizeof(struct afu_cmd
) + __alignof__(struct afu_cmd
) - 1,
3040 .sg_tablesize
= 1, /* No scatter gather support */
3041 .max_sectors
= CXLFLASH_MAX_SECTORS
,
3042 .use_clustering
= ENABLE_CLUSTERING
,
3043 .shost_attrs
= cxlflash_host_attrs
,
3044 .sdev_attrs
= cxlflash_dev_attrs
,
3048 * Device dependent values
3050 static struct dev_dependent_vals dev_corsa_vals
= { CXLFLASH_MAX_SECTORS
,
3052 static struct dev_dependent_vals dev_flash_gt_vals
= { CXLFLASH_MAX_SECTORS
,
3053 CXLFLASH_NOTIFY_SHUTDOWN
};
3054 static struct dev_dependent_vals dev_briard_vals
= { CXLFLASH_MAX_SECTORS
,
3055 CXLFLASH_NOTIFY_SHUTDOWN
};
3058 * PCI device binding table
3060 static struct pci_device_id cxlflash_pci_table
[] = {
3061 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CORSA
,
3062 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_corsa_vals
},
3063 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_FLASH_GT
,
3064 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_flash_gt_vals
},
3065 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_BRIARD
,
3066 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_briard_vals
},
3070 MODULE_DEVICE_TABLE(pci
, cxlflash_pci_table
);
3073 * cxlflash_worker_thread() - work thread handler for the AFU
3074 * @work: Work structure contained within cxlflash associated with host.
3076 * Handles the following events:
3077 * - Link reset which cannot be performed on interrupt context due to
3078 * blocking up to a few seconds
3081 static void cxlflash_worker_thread(struct work_struct
*work
)
3083 struct cxlflash_cfg
*cfg
= container_of(work
, struct cxlflash_cfg
,
3085 struct afu
*afu
= cfg
->afu
;
3086 struct device
*dev
= &cfg
->dev
->dev
;
3087 __be64 __iomem
*fc_port_regs
;
3091 /* Avoid MMIO if the device has failed */
3093 if (cfg
->state
!= STATE_NORMAL
)
3096 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
3098 if (cfg
->lr_state
== LINK_RESET_REQUIRED
) {
3099 port
= cfg
->lr_port
;
3101 dev_err(dev
, "%s: invalid port index %d\n",
3104 spin_unlock_irqrestore(cfg
->host
->host_lock
,
3107 /* The reset can block... */
3108 fc_port_regs
= get_fc_port_regs(cfg
, port
);
3109 afu_link_reset(afu
, port
, fc_port_regs
);
3110 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
3113 cfg
->lr_state
= LINK_RESET_COMPLETE
;
3116 spin_unlock_irqrestore(cfg
->host
->host_lock
, lock_flags
);
3118 if (atomic_dec_if_positive(&cfg
->scan_host_needed
) >= 0)
3119 scsi_scan_host(cfg
->host
);
3123 * cxlflash_probe() - PCI entry point to add host
3124 * @pdev: PCI device associated with the host.
3125 * @dev_id: PCI device id associated with device.
3127 * The device will initially start out in a 'probing' state and
3128 * transition to the 'normal' state at the end of a successful
3129 * probe. Should an EEH event occur during probe, the notification
3130 * thread (error_detected()) will wait until the probe handler
3131 * is nearly complete. At that time, the device will be moved to
3132 * a 'probed' state and the EEH thread woken up to drive the slot
3133 * reset and recovery (device moves to 'normal' state). Meanwhile,
3134 * the probe will be allowed to exit successfully.
3136 * Return: 0 on success, -errno on failure
3138 static int cxlflash_probe(struct pci_dev
*pdev
,
3139 const struct pci_device_id
*dev_id
)
3141 struct Scsi_Host
*host
;
3142 struct cxlflash_cfg
*cfg
= NULL
;
3143 struct device
*dev
= &pdev
->dev
;
3144 struct dev_dependent_vals
*ddv
;
3148 dev_dbg(&pdev
->dev
, "%s: Found CXLFLASH with IRQ: %d\n",
3149 __func__
, pdev
->irq
);
3151 ddv
= (struct dev_dependent_vals
*)dev_id
->driver_data
;
3152 driver_template
.max_sectors
= ddv
->max_sectors
;
3154 host
= scsi_host_alloc(&driver_template
, sizeof(struct cxlflash_cfg
));
3156 dev_err(dev
, "%s: scsi_host_alloc failed\n", __func__
);
3161 host
->max_id
= CXLFLASH_MAX_NUM_TARGETS_PER_BUS
;
3162 host
->max_lun
= CXLFLASH_MAX_NUM_LUNS_PER_TARGET
;
3163 host
->unique_id
= host
->host_no
;
3164 host
->max_cmd_len
= CXLFLASH_MAX_CDB_LEN
;
3166 cfg
= shost_priv(host
);
3168 rc
= alloc_mem(cfg
);
3170 dev_err(dev
, "%s: alloc_mem failed\n", __func__
);
3172 scsi_host_put(cfg
->host
);
3176 cfg
->init_state
= INIT_STATE_NONE
;
3178 cfg
->cxl_fops
= cxlflash_cxl_fops
;
3181 * Promoted LUNs move to the top of the LUN table. The rest stay on
3182 * the bottom half. The bottom half grows from the end (index = 255),
3183 * whereas the top half grows from the beginning (index = 0).
3185 * Initialize the last LUN index for all possible ports.
3187 cfg
->promote_lun_index
= 0;
3189 for (k
= 0; k
< MAX_FC_PORTS
; k
++)
3190 cfg
->last_lun_index
[k
] = CXLFLASH_NUM_VLUNS
/2 - 1;
3192 cfg
->dev_id
= (struct pci_device_id
*)dev_id
;
3194 init_waitqueue_head(&cfg
->tmf_waitq
);
3195 init_waitqueue_head(&cfg
->reset_waitq
);
3197 INIT_WORK(&cfg
->work_q
, cxlflash_worker_thread
);
3198 cfg
->lr_state
= LINK_RESET_INVALID
;
3200 spin_lock_init(&cfg
->tmf_slock
);
3201 mutex_init(&cfg
->ctx_tbl_list_mutex
);
3202 mutex_init(&cfg
->ctx_recovery_mutex
);
3203 init_rwsem(&cfg
->ioctl_rwsem
);
3204 INIT_LIST_HEAD(&cfg
->ctx_err_recovery
);
3205 INIT_LIST_HEAD(&cfg
->lluns
);
3207 pci_set_drvdata(pdev
, cfg
);
3209 cfg
->cxl_afu
= cxl_pci_to_afu(pdev
);
3213 dev_err(dev
, "%s: init_pci failed rc=%d\n", __func__
, rc
);
3216 cfg
->init_state
= INIT_STATE_PCI
;
3219 if (rc
&& !wq_has_sleeper(&cfg
->reset_waitq
)) {
3220 dev_err(dev
, "%s: init_afu failed rc=%d\n", __func__
, rc
);
3223 cfg
->init_state
= INIT_STATE_AFU
;
3225 rc
= init_scsi(cfg
);
3227 dev_err(dev
, "%s: init_scsi failed rc=%d\n", __func__
, rc
);
3230 cfg
->init_state
= INIT_STATE_SCSI
;
3232 if (wq_has_sleeper(&cfg
->reset_waitq
)) {
3233 cfg
->state
= STATE_PROBED
;
3234 wake_up_all(&cfg
->reset_waitq
);
3236 cfg
->state
= STATE_NORMAL
;
3238 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3242 cxlflash_remove(pdev
);
3247 * cxlflash_pci_error_detected() - called when a PCI error is detected
3248 * @pdev: PCI device struct.
3249 * @state: PCI channel state.
3251 * When an EEH occurs during an active reset, wait until the reset is
3252 * complete and then take action based upon the device state.
3254 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3256 static pci_ers_result_t
cxlflash_pci_error_detected(struct pci_dev
*pdev
,
3257 pci_channel_state_t state
)
3260 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3261 struct device
*dev
= &cfg
->dev
->dev
;
3263 dev_dbg(dev
, "%s: pdev=%p state=%u\n", __func__
, pdev
, state
);
3266 case pci_channel_io_frozen
:
3267 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
&&
3268 cfg
->state
!= STATE_PROBING
);
3269 if (cfg
->state
== STATE_FAILTERM
)
3270 return PCI_ERS_RESULT_DISCONNECT
;
3272 cfg
->state
= STATE_RESET
;
3273 scsi_block_requests(cfg
->host
);
3275 rc
= cxlflash_mark_contexts_error(cfg
);
3277 dev_err(dev
, "%s: Failed to mark user contexts rc=%d\n",
3280 return PCI_ERS_RESULT_NEED_RESET
;
3281 case pci_channel_io_perm_failure
:
3282 cfg
->state
= STATE_FAILTERM
;
3283 wake_up_all(&cfg
->reset_waitq
);
3284 scsi_unblock_requests(cfg
->host
);
3285 return PCI_ERS_RESULT_DISCONNECT
;
3289 return PCI_ERS_RESULT_NEED_RESET
;
3293 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3294 * @pdev: PCI device struct.
3296 * This routine is called by the pci error recovery code after the PCI
3297 * slot has been reset, just before we should resume normal operations.
3299 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3301 static pci_ers_result_t
cxlflash_pci_slot_reset(struct pci_dev
*pdev
)
3304 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3305 struct device
*dev
= &cfg
->dev
->dev
;
3307 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
3311 dev_err(dev
, "%s: EEH recovery failed rc=%d\n", __func__
, rc
);
3312 return PCI_ERS_RESULT_DISCONNECT
;
3315 return PCI_ERS_RESULT_RECOVERED
;
3319 * cxlflash_pci_resume() - called when normal operation can resume
3320 * @pdev: PCI device struct
3322 static void cxlflash_pci_resume(struct pci_dev
*pdev
)
3324 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3325 struct device
*dev
= &cfg
->dev
->dev
;
3327 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
3329 cfg
->state
= STATE_NORMAL
;
3330 wake_up_all(&cfg
->reset_waitq
);
3331 scsi_unblock_requests(cfg
->host
);
3334 static const struct pci_error_handlers cxlflash_err_handler
= {
3335 .error_detected
= cxlflash_pci_error_detected
,
3336 .slot_reset
= cxlflash_pci_slot_reset
,
3337 .resume
= cxlflash_pci_resume
,
3341 * PCI device structure
3343 static struct pci_driver cxlflash_driver
= {
3344 .name
= CXLFLASH_NAME
,
3345 .id_table
= cxlflash_pci_table
,
3346 .probe
= cxlflash_probe
,
3347 .remove
= cxlflash_remove
,
3348 .shutdown
= cxlflash_remove
,
3349 .err_handler
= &cxlflash_err_handler
,
3353 * init_cxlflash() - module entry point
3355 * Return: 0 on success, -errno on failure
3357 static int __init
init_cxlflash(void)
3360 cxlflash_list_init();
3362 return pci_register_driver(&cxlflash_driver
);
3366 * exit_cxlflash() - module exit point
3368 static void __exit
exit_cxlflash(void)
3370 cxlflash_term_global_luns();
3371 cxlflash_free_errpage();
3373 pci_unregister_driver(&cxlflash_driver
);
3376 module_init(init_cxlflash
);
3377 module_exit(exit_cxlflash
);