2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME
);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
38 * process_cmd_err() - command error handler
39 * @cmd: AFU command that experienced the error.
40 * @scp: SCSI command associated with the AFU command in error.
42 * Translates error bits from AFU command to SCSI command results.
44 static void process_cmd_err(struct afu_cmd
*cmd
, struct scsi_cmnd
*scp
)
46 struct afu
*afu
= cmd
->parent
;
47 struct cxlflash_cfg
*cfg
= afu
->parent
;
48 struct device
*dev
= &cfg
->dev
->dev
;
49 struct sisl_ioarcb
*ioarcb
;
50 struct sisl_ioasa
*ioasa
;
59 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_UNDERRUN
) {
61 scsi_set_resid(scp
, resid
);
62 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
63 __func__
, cmd
, scp
, resid
);
66 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
) {
67 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p\n",
69 scp
->result
= (DID_ERROR
<< 16);
72 dev_dbg(dev
, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__
,
74 ioasa
->rc
.afu_rc
, ioasa
->rc
.scsi_rc
, ioasa
->rc
.fc_rc
,
75 ioasa
->afu_extra
, ioasa
->scsi_extra
, ioasa
->fc_extra
);
77 if (ioasa
->rc
.scsi_rc
) {
78 /* We have a SCSI status */
79 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_SENSE_VALID
) {
80 memcpy(scp
->sense_buffer
, ioasa
->sense_data
,
82 scp
->result
= ioasa
->rc
.scsi_rc
;
84 scp
->result
= ioasa
->rc
.scsi_rc
| (DID_ERROR
<< 16);
88 * We encountered an error. Set scp->result based on nature
91 if (ioasa
->rc
.fc_rc
) {
92 /* We have an FC status */
93 switch (ioasa
->rc
.fc_rc
) {
94 case SISL_FC_RC_LINKDOWN
:
95 scp
->result
= (DID_REQUEUE
<< 16);
97 case SISL_FC_RC_RESID
:
98 /* This indicates an FCP resid underrun */
99 if (!(ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
)) {
100 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
101 * then we will handle this error else where.
102 * If not then we must handle it here.
103 * This is probably an AFU bug.
105 scp
->result
= (DID_ERROR
<< 16);
108 case SISL_FC_RC_RESIDERR
:
109 /* Resid mismatch between adapter and device */
110 case SISL_FC_RC_TGTABORT
:
111 case SISL_FC_RC_ABORTOK
:
112 case SISL_FC_RC_ABORTFAIL
:
113 case SISL_FC_RC_NOLOGI
:
114 case SISL_FC_RC_ABORTPEND
:
115 case SISL_FC_RC_WRABORTPEND
:
116 case SISL_FC_RC_NOEXP
:
117 case SISL_FC_RC_INUSE
:
118 scp
->result
= (DID_ERROR
<< 16);
123 if (ioasa
->rc
.afu_rc
) {
124 /* We have an AFU error */
125 switch (ioasa
->rc
.afu_rc
) {
126 case SISL_AFU_RC_NO_CHANNELS
:
127 scp
->result
= (DID_NO_CONNECT
<< 16);
129 case SISL_AFU_RC_DATA_DMA_ERR
:
130 switch (ioasa
->afu_extra
) {
131 case SISL_AFU_DMA_ERR_PAGE_IN
:
133 scp
->result
= (DID_IMM_RETRY
<< 16);
135 case SISL_AFU_DMA_ERR_INVALID_EA
:
137 scp
->result
= (DID_ERROR
<< 16);
140 case SISL_AFU_RC_OUT_OF_DATA_BUFS
:
142 scp
->result
= (DID_ALLOC_FAILURE
<< 16);
145 scp
->result
= (DID_ERROR
<< 16);
151 * cmd_complete() - command completion handler
152 * @cmd: AFU command that has completed.
154 * Prepares and submits command that has either completed or timed out to
155 * the SCSI stack. Checks AFU command back into command pool for non-internal
156 * (cmd->scp populated) commands.
158 static void cmd_complete(struct afu_cmd
*cmd
)
160 struct scsi_cmnd
*scp
;
162 struct afu
*afu
= cmd
->parent
;
163 struct cxlflash_cfg
*cfg
= afu
->parent
;
164 struct device
*dev
= &cfg
->dev
->dev
;
169 if (unlikely(cmd
->sa
.ioasc
))
170 process_cmd_err(cmd
, scp
);
172 scp
->result
= (DID_OK
<< 16);
174 cmd_is_tmf
= cmd
->cmd_tmf
;
176 dev_dbg_ratelimited(dev
, "%s:scp=%p result=%08x ioasc=%08x\n",
177 __func__
, scp
, scp
->result
, cmd
->sa
.ioasc
);
183 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
184 cfg
->tmf_active
= false;
185 wake_up_all_locked(&cfg
->tmf_waitq
);
186 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
189 complete(&cmd
->cevent
);
193 * context_reset() - reset command owner context via specified register
194 * @cmd: AFU command that timed out.
195 * @reset_reg: MMIO register to perform reset.
197 static void context_reset(struct afu_cmd
*cmd
, __be64 __iomem
*reset_reg
)
201 struct afu
*afu
= cmd
->parent
;
202 struct cxlflash_cfg
*cfg
= afu
->parent
;
203 struct device
*dev
= &cfg
->dev
->dev
;
205 dev_dbg(dev
, "%s: cmd=%p\n", __func__
, cmd
);
207 writeq_be(rrin
, reset_reg
);
209 rrin
= readq_be(reset_reg
);
212 /* Double delay each time */
214 } while (nretry
++ < MC_ROOM_RETRY_CNT
);
216 dev_dbg(dev
, "%s: returning rrin=%016llx nretry=%d\n",
217 __func__
, rrin
, nretry
);
221 * context_reset_ioarrin() - reset command owner context via IOARRIN register
222 * @cmd: AFU command that timed out.
224 static void context_reset_ioarrin(struct afu_cmd
*cmd
)
226 struct afu
*afu
= cmd
->parent
;
228 context_reset(cmd
, &afu
->host_map
->ioarrin
);
232 * context_reset_sq() - reset command owner context w/ SQ Context Reset register
233 * @cmd: AFU command that timed out.
235 static void context_reset_sq(struct afu_cmd
*cmd
)
237 struct afu
*afu
= cmd
->parent
;
239 context_reset(cmd
, &afu
->host_map
->sq_ctx_reset
);
243 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
244 * @afu: AFU associated with the host.
245 * @cmd: AFU command to send.
248 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
250 static int send_cmd_ioarrin(struct afu
*afu
, struct afu_cmd
*cmd
)
252 struct cxlflash_cfg
*cfg
= afu
->parent
;
253 struct device
*dev
= &cfg
->dev
->dev
;
259 * To avoid the performance penalty of MMIO, spread the update of
260 * 'room' over multiple commands.
262 spin_lock_irqsave(&afu
->rrin_slock
, lock_flags
);
263 if (--afu
->room
< 0) {
264 room
= readq_be(&afu
->host_map
->cmd_room
);
266 dev_dbg_ratelimited(dev
, "%s: no cmd_room to send "
267 "0x%02X, room=0x%016llX\n",
268 __func__
, cmd
->rcb
.cdb
[0], room
);
270 rc
= SCSI_MLQUEUE_HOST_BUSY
;
273 afu
->room
= room
- 1;
276 writeq_be((u64
)&cmd
->rcb
, &afu
->host_map
->ioarrin
);
278 spin_unlock_irqrestore(&afu
->rrin_slock
, lock_flags
);
279 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__
,
280 cmd
, cmd
->rcb
.data_len
, cmd
->rcb
.data_ea
, rc
);
285 * send_cmd_sq() - sends an AFU command via SQ ring
286 * @afu: AFU associated with the host.
287 * @cmd: AFU command to send.
290 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
292 static int send_cmd_sq(struct afu
*afu
, struct afu_cmd
*cmd
)
294 struct cxlflash_cfg
*cfg
= afu
->parent
;
295 struct device
*dev
= &cfg
->dev
->dev
;
300 newval
= atomic_dec_if_positive(&afu
->hsq_credits
);
302 rc
= SCSI_MLQUEUE_HOST_BUSY
;
306 cmd
->rcb
.ioasa
= &cmd
->sa
;
308 spin_lock_irqsave(&afu
->hsq_slock
, lock_flags
);
310 *afu
->hsq_curr
= cmd
->rcb
;
311 if (afu
->hsq_curr
< afu
->hsq_end
)
314 afu
->hsq_curr
= afu
->hsq_start
;
315 writeq_be((u64
)afu
->hsq_curr
, &afu
->host_map
->sq_tail
);
317 spin_unlock_irqrestore(&afu
->hsq_slock
, lock_flags
);
319 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
320 "head=%016llx tail=%016llx\n", __func__
, cmd
, cmd
->rcb
.data_len
,
321 cmd
->rcb
.data_ea
, cmd
->rcb
.ioasa
, rc
, afu
->hsq_curr
,
322 readq_be(&afu
->host_map
->sq_head
),
323 readq_be(&afu
->host_map
->sq_tail
));
328 * wait_resp() - polls for a response or timeout to a sent AFU command
329 * @afu: AFU associated with the host.
330 * @cmd: AFU command that was sent.
333 * 0 on success, -1 on timeout/error
335 static int wait_resp(struct afu
*afu
, struct afu_cmd
*cmd
)
337 struct cxlflash_cfg
*cfg
= afu
->parent
;
338 struct device
*dev
= &cfg
->dev
->dev
;
340 ulong timeout
= msecs_to_jiffies(cmd
->rcb
.timeout
* 2 * 1000);
342 timeout
= wait_for_completion_timeout(&cmd
->cevent
, timeout
);
344 afu
->context_reset(cmd
);
348 if (unlikely(cmd
->sa
.ioasc
!= 0)) {
349 dev_err(dev
, "%s: cmd %02x failed, ioasc=%08x\n",
350 __func__
, cmd
->rcb
.cdb
[0], cmd
->sa
.ioasc
);
358 * send_tmf() - sends a Task Management Function (TMF)
359 * @afu: AFU to checkout from.
360 * @scp: SCSI command from stack.
361 * @tmfcmd: TMF command to send.
364 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
366 static int send_tmf(struct afu
*afu
, struct scsi_cmnd
*scp
, u64 tmfcmd
)
368 u32 port_sel
= scp
->device
->channel
+ 1;
369 struct cxlflash_cfg
*cfg
= shost_priv(scp
->device
->host
);
370 struct afu_cmd
*cmd
= sc_to_afucz(scp
);
371 struct device
*dev
= &cfg
->dev
->dev
;
376 /* When Task Management Function is active do not send another */
377 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
379 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
382 cfg
->tmf_active
= true;
383 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
389 cmd
->rcb
.ctx_id
= afu
->ctx_hndl
;
390 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
391 cmd
->rcb
.port_sel
= port_sel
;
392 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
393 cmd
->rcb
.req_flags
= (SISL_REQ_FLAGS_PORT_LUN_ID
|
394 SISL_REQ_FLAGS_SUP_UNDERRUN
|
395 SISL_REQ_FLAGS_TMF_CMD
);
396 memcpy(cmd
->rcb
.cdb
, &tmfcmd
, sizeof(tmfcmd
));
398 rc
= afu
->send_cmd(afu
, cmd
);
400 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
401 cfg
->tmf_active
= false;
402 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
406 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
407 to
= msecs_to_jiffies(5000);
408 to
= wait_event_interruptible_lock_irq_timeout(cfg
->tmf_waitq
,
413 cfg
->tmf_active
= false;
414 dev_err(dev
, "%s: TMF timed out\n", __func__
);
417 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
423 * cxlflash_driver_info() - information handler for this host driver
424 * @host: SCSI host associated with device.
426 * Return: A string describing the device.
428 static const char *cxlflash_driver_info(struct Scsi_Host
*host
)
430 return CXLFLASH_ADAPTER_NAME
;
434 * cxlflash_queuecommand() - sends a mid-layer request
435 * @host: SCSI host associated with device.
436 * @scp: SCSI command to send.
438 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
440 static int cxlflash_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
)
442 struct cxlflash_cfg
*cfg
= shost_priv(host
);
443 struct afu
*afu
= cfg
->afu
;
444 struct device
*dev
= &cfg
->dev
->dev
;
445 struct afu_cmd
*cmd
= sc_to_afucz(scp
);
446 struct scatterlist
*sg
= scsi_sglist(scp
);
447 u32 port_sel
= scp
->device
->channel
+ 1;
448 u16 req_flags
= SISL_REQ_FLAGS_SUP_UNDERRUN
;
453 dev_dbg_ratelimited(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
454 "cdb=(%08x-%08x-%08x-%08x)\n",
455 __func__
, scp
, host
->host_no
, scp
->device
->channel
,
456 scp
->device
->id
, scp
->device
->lun
,
457 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
458 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
459 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
460 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
463 * If a Task Management Function is active, wait for it to complete
464 * before continuing with regular commands.
466 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
467 if (cfg
->tmf_active
) {
468 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
469 rc
= SCSI_MLQUEUE_HOST_BUSY
;
472 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
474 switch (cfg
->state
) {
476 dev_dbg_ratelimited(dev
, "%s: device is in reset\n", __func__
);
477 rc
= SCSI_MLQUEUE_HOST_BUSY
;
480 dev_dbg_ratelimited(dev
, "%s: device has failed\n", __func__
);
481 scp
->result
= (DID_NO_CONNECT
<< 16);
490 nseg
= scsi_dma_map(scp
);
491 if (unlikely(nseg
< 0)) {
492 dev_err(dev
, "%s: Fail DMA map\n", __func__
);
493 rc
= SCSI_MLQUEUE_HOST_BUSY
;
497 cmd
->rcb
.data_len
= sg_dma_len(sg
);
498 cmd
->rcb
.data_ea
= sg_dma_address(sg
);
504 cmd
->rcb
.ctx_id
= afu
->ctx_hndl
;
505 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
506 cmd
->rcb
.port_sel
= port_sel
;
507 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
509 if (scp
->sc_data_direction
== DMA_TO_DEVICE
)
510 req_flags
|= SISL_REQ_FLAGS_HOST_WRITE
;
512 cmd
->rcb
.req_flags
= req_flags
;
513 memcpy(cmd
->rcb
.cdb
, scp
->cmnd
, sizeof(cmd
->rcb
.cdb
));
515 rc
= afu
->send_cmd(afu
, cmd
);
523 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
524 * @cfg: Internal structure associated with the host.
526 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg
*cfg
)
528 struct pci_dev
*pdev
= cfg
->dev
;
530 if (pci_channel_offline(pdev
))
531 wait_event_timeout(cfg
->reset_waitq
,
532 !pci_channel_offline(pdev
),
533 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT
);
537 * free_mem() - free memory associated with the AFU
538 * @cfg: Internal structure associated with the host.
540 static void free_mem(struct cxlflash_cfg
*cfg
)
542 struct afu
*afu
= cfg
->afu
;
545 free_pages((ulong
)afu
, get_order(sizeof(struct afu
)));
551 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
552 * @cfg: Internal structure associated with the host.
554 * Safe to call with AFU in a partially allocated/initialized state.
556 * Cancels scheduled worker threads, waits for any active internal AFU
557 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
559 static void stop_afu(struct cxlflash_cfg
*cfg
)
561 struct afu
*afu
= cfg
->afu
;
563 cancel_work_sync(&cfg
->work_q
);
566 while (atomic_read(&afu
->cmds_active
))
568 if (afu_is_irqpoll_enabled(afu
))
569 irq_poll_disable(&afu
->irqpoll
);
570 if (likely(afu
->afu_map
)) {
571 cxl_psa_unmap((void __iomem
*)afu
->afu_map
);
578 * term_intr() - disables all AFU interrupts
579 * @cfg: Internal structure associated with the host.
580 * @level: Depth of allocation, where to begin waterfall tear down.
582 * Safe to call with AFU/MC in partially allocated/initialized state.
584 static void term_intr(struct cxlflash_cfg
*cfg
, enum undo_level level
)
586 struct afu
*afu
= cfg
->afu
;
587 struct device
*dev
= &cfg
->dev
->dev
;
589 if (!afu
|| !cfg
->mcctx
) {
590 dev_err(dev
, "%s: returning with NULL afu or MC\n", __func__
);
596 cxl_unmap_afu_irq(cfg
->mcctx
, 3, afu
);
598 cxl_unmap_afu_irq(cfg
->mcctx
, 2, afu
);
600 cxl_unmap_afu_irq(cfg
->mcctx
, 1, afu
);
602 cxl_free_afu_irqs(cfg
->mcctx
);
605 /* No action required */
611 * term_mc() - terminates the master context
612 * @cfg: Internal structure associated with the host.
613 * @level: Depth of allocation, where to begin waterfall tear down.
615 * Safe to call with AFU/MC in partially allocated/initialized state.
617 static void term_mc(struct cxlflash_cfg
*cfg
)
620 struct afu
*afu
= cfg
->afu
;
621 struct device
*dev
= &cfg
->dev
->dev
;
623 if (!afu
|| !cfg
->mcctx
) {
624 dev_err(dev
, "%s: returning with NULL afu or MC\n", __func__
);
628 rc
= cxl_stop_context(cfg
->mcctx
);
634 * term_afu() - terminates the AFU
635 * @cfg: Internal structure associated with the host.
637 * Safe to call with AFU/MC in partially allocated/initialized state.
639 static void term_afu(struct cxlflash_cfg
*cfg
)
641 struct device
*dev
= &cfg
->dev
->dev
;
644 * Tear down is carefully orchestrated to ensure
645 * no interrupts can come in when the problem state
648 * 1) Disable all AFU interrupts
649 * 2) Unmap the problem state area
650 * 3) Stop the master context
652 term_intr(cfg
, UNMAP_THREE
);
658 dev_dbg(dev
, "%s: returning\n", __func__
);
662 * notify_shutdown() - notifies device of pending shutdown
663 * @cfg: Internal structure associated with the host.
664 * @wait: Whether to wait for shutdown processing to complete.
666 * This function will notify the AFU that the adapter is being shutdown
667 * and will wait for shutdown processing to complete if wait is true.
668 * This notification should flush pending I/Os to the device and halt
669 * further I/Os until the next AFU reset is issued and device restarted.
671 static void notify_shutdown(struct cxlflash_cfg
*cfg
, bool wait
)
673 struct afu
*afu
= cfg
->afu
;
674 struct device
*dev
= &cfg
->dev
->dev
;
675 struct sisl_global_map __iomem
*global
;
676 struct dev_dependent_vals
*ddv
;
678 int i
, retry_cnt
= 0;
680 ddv
= (struct dev_dependent_vals
*)cfg
->dev_id
->driver_data
;
681 if (!(ddv
->flags
& CXLFLASH_NOTIFY_SHUTDOWN
))
684 if (!afu
|| !afu
->afu_map
) {
685 dev_dbg(dev
, "%s: Problem state area not mapped\n", __func__
);
689 global
= &afu
->afu_map
->global
;
692 for (i
= 0; i
< NUM_FC_PORTS
; i
++) {
693 reg
= readq_be(&global
->fc_regs
[i
][FC_CONFIG2
/ 8]);
694 reg
|= SISL_FC_SHUTDOWN_NORMAL
;
695 writeq_be(reg
, &global
->fc_regs
[i
][FC_CONFIG2
/ 8]);
701 /* Wait up to 1.5 seconds for shutdown processing to complete */
702 for (i
= 0; i
< NUM_FC_PORTS
; i
++) {
705 status
= readq_be(&global
->fc_regs
[i
][FC_STATUS
/ 8]);
706 if (status
& SISL_STATUS_SHUTDOWN_COMPLETE
)
708 if (++retry_cnt
>= MC_RETRY_CNT
) {
709 dev_dbg(dev
, "%s: port %d shutdown processing "
710 "not yet completed\n", __func__
, i
);
713 msleep(100 * retry_cnt
);
719 * cxlflash_remove() - PCI entry point to tear down host
720 * @pdev: PCI device associated with the host.
722 * Safe to use as a cleanup in partially allocated/initialized state.
724 static void cxlflash_remove(struct pci_dev
*pdev
)
726 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
727 struct device
*dev
= &pdev
->dev
;
730 if (!pci_is_enabled(pdev
)) {
731 dev_dbg(dev
, "%s: Device is disabled\n", __func__
);
735 /* If a Task Management Function is active, wait for it to complete
736 * before continuing with remove.
738 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
740 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
743 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
745 /* Notify AFU and wait for shutdown processing to complete */
746 notify_shutdown(cfg
, true);
748 cfg
->state
= STATE_FAILTERM
;
749 cxlflash_stop_term_user_contexts(cfg
);
751 switch (cfg
->init_state
) {
752 case INIT_STATE_SCSI
:
753 cxlflash_term_local_luns(cfg
);
754 scsi_remove_host(cfg
->host
);
759 pci_disable_device(pdev
);
760 case INIT_STATE_NONE
:
762 scsi_host_put(cfg
->host
);
766 dev_dbg(dev
, "%s: returning\n", __func__
);
770 * alloc_mem() - allocates the AFU and its command pool
771 * @cfg: Internal structure associated with the host.
773 * A partially allocated state remains on failure.
777 * -ENOMEM on failure to allocate memory
779 static int alloc_mem(struct cxlflash_cfg
*cfg
)
782 struct device
*dev
= &cfg
->dev
->dev
;
784 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
785 cfg
->afu
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
786 get_order(sizeof(struct afu
)));
787 if (unlikely(!cfg
->afu
)) {
788 dev_err(dev
, "%s: cannot get %d free pages\n",
789 __func__
, get_order(sizeof(struct afu
)));
793 cfg
->afu
->parent
= cfg
;
794 cfg
->afu
->afu_map
= NULL
;
800 * init_pci() - initializes the host as a PCI device
801 * @cfg: Internal structure associated with the host.
803 * Return: 0 on success, -errno on failure
805 static int init_pci(struct cxlflash_cfg
*cfg
)
807 struct pci_dev
*pdev
= cfg
->dev
;
808 struct device
*dev
= &cfg
->dev
->dev
;
811 rc
= pci_enable_device(pdev
);
812 if (rc
|| pci_channel_offline(pdev
)) {
813 if (pci_channel_offline(pdev
)) {
814 cxlflash_wait_for_pci_err_recovery(cfg
);
815 rc
= pci_enable_device(pdev
);
819 dev_err(dev
, "%s: Cannot enable adapter\n", __func__
);
820 cxlflash_wait_for_pci_err_recovery(cfg
);
826 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
831 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
832 * @cfg: Internal structure associated with the host.
834 * Return: 0 on success, -errno on failure
836 static int init_scsi(struct cxlflash_cfg
*cfg
)
838 struct pci_dev
*pdev
= cfg
->dev
;
839 struct device
*dev
= &cfg
->dev
->dev
;
842 rc
= scsi_add_host(cfg
->host
, &pdev
->dev
);
844 dev_err(dev
, "%s: scsi_add_host failed rc=%d\n", __func__
, rc
);
848 scsi_scan_host(cfg
->host
);
851 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
856 * set_port_online() - transitions the specified host FC port to online state
857 * @fc_regs: Top of MMIO region defined for specified port.
859 * The provided MMIO region must be mapped prior to call. Online state means
860 * that the FC link layer has synced, completed the handshaking process, and
861 * is ready for login to start.
863 static void set_port_online(__be64 __iomem
*fc_regs
)
867 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
868 cmdcfg
&= (~FC_MTIP_CMDCONFIG_OFFLINE
); /* clear OFF_LINE */
869 cmdcfg
|= (FC_MTIP_CMDCONFIG_ONLINE
); /* set ON_LINE */
870 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
874 * set_port_offline() - transitions the specified host FC port to offline state
875 * @fc_regs: Top of MMIO region defined for specified port.
877 * The provided MMIO region must be mapped prior to call.
879 static void set_port_offline(__be64 __iomem
*fc_regs
)
883 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
884 cmdcfg
&= (~FC_MTIP_CMDCONFIG_ONLINE
); /* clear ON_LINE */
885 cmdcfg
|= (FC_MTIP_CMDCONFIG_OFFLINE
); /* set OFF_LINE */
886 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
890 * wait_port_online() - waits for the specified host FC port come online
891 * @fc_regs: Top of MMIO region defined for specified port.
892 * @delay_us: Number of microseconds to delay between reading port status.
893 * @nretry: Number of cycles to retry reading port status.
895 * The provided MMIO region must be mapped prior to call. This will timeout
896 * when the cable is not plugged in.
899 * TRUE (1) when the specified port is online
900 * FALSE (0) when the specified port fails to come online after timeout
902 static bool wait_port_online(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
906 WARN_ON(delay_us
< 1000);
909 msleep(delay_us
/ 1000);
910 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
911 if (status
== U64_MAX
)
913 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_ONLINE
&&
916 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_ONLINE
);
920 * wait_port_offline() - waits for the specified host FC port go offline
921 * @fc_regs: Top of MMIO region defined for specified port.
922 * @delay_us: Number of microseconds to delay between reading port status.
923 * @nretry: Number of cycles to retry reading port status.
925 * The provided MMIO region must be mapped prior to call.
928 * TRUE (1) when the specified port is offline
929 * FALSE (0) when the specified port fails to go offline after timeout
931 static bool wait_port_offline(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
935 WARN_ON(delay_us
< 1000);
938 msleep(delay_us
/ 1000);
939 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
940 if (status
== U64_MAX
)
942 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_OFFLINE
&&
945 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_OFFLINE
);
949 * afu_set_wwpn() - configures the WWPN for the specified host FC port
950 * @afu: AFU associated with the host that owns the specified FC port.
951 * @port: Port number being configured.
952 * @fc_regs: Top of MMIO region defined for specified port.
953 * @wwpn: The world-wide-port-number previously discovered for port.
955 * The provided MMIO region must be mapped prior to call. As part of the
956 * sequence to configure the WWPN, the port is toggled offline and then back
957 * online. This toggling action can cause this routine to delay up to a few
958 * seconds. When configured to use the internal LUN feature of the AFU, a
959 * failure to come online is overridden.
961 static void afu_set_wwpn(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
,
964 struct cxlflash_cfg
*cfg
= afu
->parent
;
965 struct device
*dev
= &cfg
->dev
->dev
;
967 set_port_offline(fc_regs
);
968 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
969 FC_PORT_STATUS_RETRY_CNT
)) {
970 dev_dbg(dev
, "%s: wait on port %d to go offline timed out\n",
974 writeq_be(wwpn
, &fc_regs
[FC_PNAME
/ 8]);
976 set_port_online(fc_regs
);
977 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
978 FC_PORT_STATUS_RETRY_CNT
)) {
979 dev_dbg(dev
, "%s: wait on port %d to go online timed out\n",
985 * afu_link_reset() - resets the specified host FC port
986 * @afu: AFU associated with the host that owns the specified FC port.
987 * @port: Port number being configured.
988 * @fc_regs: Top of MMIO region defined for specified port.
990 * The provided MMIO region must be mapped prior to call. The sequence to
991 * reset the port involves toggling it offline and then back online. This
992 * action can cause this routine to delay up to a few seconds. An effort
993 * is made to maintain link with the device by switching to host to use
994 * the alternate port exclusively while the reset takes place.
995 * failure to come online is overridden.
997 static void afu_link_reset(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
)
999 struct cxlflash_cfg
*cfg
= afu
->parent
;
1000 struct device
*dev
= &cfg
->dev
->dev
;
1003 /* first switch the AFU to the other links, if any */
1004 port_sel
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
1005 port_sel
&= ~(1ULL << port
);
1006 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1007 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1009 set_port_offline(fc_regs
);
1010 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1011 FC_PORT_STATUS_RETRY_CNT
))
1012 dev_err(dev
, "%s: wait on port %d to go offline timed out\n",
1015 set_port_online(fc_regs
);
1016 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1017 FC_PORT_STATUS_RETRY_CNT
))
1018 dev_err(dev
, "%s: wait on port %d to go online timed out\n",
1021 /* switch back to include this port */
1022 port_sel
|= (1ULL << port
);
1023 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1024 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1026 dev_dbg(dev
, "%s: returning port_sel=%016llx\n", __func__
, port_sel
);
1030 * Asynchronous interrupt information table
1032 static const struct asyc_intr_info ainfo
[] = {
1033 {SISL_ASTATUS_FC0_OTHER
, "other error", 0, CLR_FC_ERROR
| LINK_RESET
},
1034 {SISL_ASTATUS_FC0_LOGO
, "target initiated LOGO", 0, 0},
1035 {SISL_ASTATUS_FC0_CRC_T
, "CRC threshold exceeded", 0, LINK_RESET
},
1036 {SISL_ASTATUS_FC0_LOGI_R
, "login timed out, retrying", 0, LINK_RESET
},
1037 {SISL_ASTATUS_FC0_LOGI_F
, "login failed", 0, CLR_FC_ERROR
},
1038 {SISL_ASTATUS_FC0_LOGI_S
, "login succeeded", 0, SCAN_HOST
},
1039 {SISL_ASTATUS_FC0_LINK_DN
, "link down", 0, 0},
1040 {SISL_ASTATUS_FC0_LINK_UP
, "link up", 0, 0},
1041 {SISL_ASTATUS_FC1_OTHER
, "other error", 1, CLR_FC_ERROR
| LINK_RESET
},
1042 {SISL_ASTATUS_FC1_LOGO
, "target initiated LOGO", 1, 0},
1043 {SISL_ASTATUS_FC1_CRC_T
, "CRC threshold exceeded", 1, LINK_RESET
},
1044 {SISL_ASTATUS_FC1_LOGI_R
, "login timed out, retrying", 1, LINK_RESET
},
1045 {SISL_ASTATUS_FC1_LOGI_F
, "login failed", 1, CLR_FC_ERROR
},
1046 {SISL_ASTATUS_FC1_LOGI_S
, "login succeeded", 1, SCAN_HOST
},
1047 {SISL_ASTATUS_FC1_LINK_DN
, "link down", 1, 0},
1048 {SISL_ASTATUS_FC1_LINK_UP
, "link up", 1, 0},
1049 {0x0, "", 0, 0} /* terminator */
1053 * find_ainfo() - locates and returns asynchronous interrupt information
1054 * @status: Status code set by AFU on error.
1056 * Return: The located information or NULL when the status code is invalid.
1058 static const struct asyc_intr_info
*find_ainfo(u64 status
)
1060 const struct asyc_intr_info
*info
;
1062 for (info
= &ainfo
[0]; info
->status
; info
++)
1063 if (info
->status
== status
)
1070 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1071 * @afu: AFU associated with the host.
1073 static void afu_err_intr_init(struct afu
*afu
)
1078 /* global async interrupts: AFU clears afu_ctrl on context exit
1079 * if async interrupts were sent to that context. This prevents
1080 * the AFU form sending further async interrupts when
1082 * nobody to receive them.
1086 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_mask
);
1087 /* set LISN# to send and point to master context */
1088 reg
= ((u64
) (((afu
->ctx_hndl
<< 8) | SISL_MSI_ASYNC_ERROR
)) << 40);
1090 if (afu
->internal_lun
)
1091 reg
|= 1; /* Bit 63 indicates local lun */
1092 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_ctrl
);
1094 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1095 /* unmask bits that are of interest */
1096 /* note: afu can send an interrupt after this step */
1097 writeq_be(SISL_ASTATUS_MASK
, &afu
->afu_map
->global
.regs
.aintr_mask
);
1098 /* clear again in case a bit came on after previous clear but before */
1100 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1102 /* Clear/Set internal lun bits */
1103 reg
= readq_be(&afu
->afu_map
->global
.fc_regs
[0][FC_CONFIG2
/ 8]);
1104 reg
&= SISL_FC_INTERNAL_MASK
;
1105 if (afu
->internal_lun
)
1106 reg
|= ((u64
)(afu
->internal_lun
- 1) << SISL_FC_INTERNAL_SHIFT
);
1107 writeq_be(reg
, &afu
->afu_map
->global
.fc_regs
[0][FC_CONFIG2
/ 8]);
1109 /* now clear FC errors */
1110 for (i
= 0; i
< NUM_FC_PORTS
; i
++) {
1111 writeq_be(0xFFFFFFFFU
,
1112 &afu
->afu_map
->global
.fc_regs
[i
][FC_ERROR
/ 8]);
1113 writeq_be(0, &afu
->afu_map
->global
.fc_regs
[i
][FC_ERRCAP
/ 8]);
1116 /* sync interrupts for master's IOARRIN write */
1117 /* note that unlike asyncs, there can be no pending sync interrupts */
1118 /* at this time (this is a fresh context and master has not written */
1119 /* IOARRIN yet), so there is nothing to clear. */
1121 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1122 writeq_be(SISL_MSI_SYNC_ERROR
, &afu
->host_map
->ctx_ctrl
);
1123 writeq_be(SISL_ISTATUS_MASK
, &afu
->host_map
->intr_mask
);
1127 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1128 * @irq: Interrupt number.
1129 * @data: Private data provided at interrupt registration, the AFU.
1131 * Return: Always return IRQ_HANDLED.
1133 static irqreturn_t
cxlflash_sync_err_irq(int irq
, void *data
)
1135 struct afu
*afu
= (struct afu
*)data
;
1136 struct cxlflash_cfg
*cfg
= afu
->parent
;
1137 struct device
*dev
= &cfg
->dev
->dev
;
1141 reg
= readq_be(&afu
->host_map
->intr_status
);
1142 reg_unmasked
= (reg
& SISL_ISTATUS_UNMASK
);
1144 if (reg_unmasked
== 0UL) {
1145 dev_err(dev
, "%s: spurious interrupt, intr_status=%016llx\n",
1147 goto cxlflash_sync_err_irq_exit
;
1150 dev_err(dev
, "%s: unexpected interrupt, intr_status=%016llx\n",
1153 writeq_be(reg_unmasked
, &afu
->host_map
->intr_clear
);
1155 cxlflash_sync_err_irq_exit
:
1160 * process_hrrq() - process the read-response queue
1161 * @afu: AFU associated with the host.
1162 * @doneq: Queue of commands harvested from the RRQ.
1163 * @budget: Threshold of RRQ entries to process.
1165 * This routine must be called holding the disabled RRQ spin lock.
1167 * Return: The number of entries processed.
1169 static int process_hrrq(struct afu
*afu
, struct list_head
*doneq
, int budget
)
1171 struct afu_cmd
*cmd
;
1172 struct sisl_ioasa
*ioasa
;
1173 struct sisl_ioarcb
*ioarcb
;
1174 bool toggle
= afu
->toggle
;
1177 *hrrq_start
= afu
->hrrq_start
,
1178 *hrrq_end
= afu
->hrrq_end
,
1179 *hrrq_curr
= afu
->hrrq_curr
;
1181 /* Process ready RRQ entries up to the specified budget (if any) */
1185 if ((entry
& SISL_RESP_HANDLE_T_BIT
) != toggle
)
1188 entry
&= ~SISL_RESP_HANDLE_T_BIT
;
1190 if (afu_is_sq_cmd_mode(afu
)) {
1191 ioasa
= (struct sisl_ioasa
*)entry
;
1192 cmd
= container_of(ioasa
, struct afu_cmd
, sa
);
1194 ioarcb
= (struct sisl_ioarcb
*)entry
;
1195 cmd
= container_of(ioarcb
, struct afu_cmd
, rcb
);
1198 list_add_tail(&cmd
->queue
, doneq
);
1200 /* Advance to next entry or wrap and flip the toggle bit */
1201 if (hrrq_curr
< hrrq_end
)
1204 hrrq_curr
= hrrq_start
;
1205 toggle
^= SISL_RESP_HANDLE_T_BIT
;
1208 atomic_inc(&afu
->hsq_credits
);
1211 if (budget
> 0 && num_hrrq
>= budget
)
1215 afu
->hrrq_curr
= hrrq_curr
;
1216 afu
->toggle
= toggle
;
1222 * process_cmd_doneq() - process a queue of harvested RRQ commands
1223 * @doneq: Queue of completed commands.
1225 * Note that upon return the queue can no longer be trusted.
1227 static void process_cmd_doneq(struct list_head
*doneq
)
1229 struct afu_cmd
*cmd
, *tmp
;
1231 WARN_ON(list_empty(doneq
));
1233 list_for_each_entry_safe(cmd
, tmp
, doneq
, queue
)
1238 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1239 * @irqpoll: IRQ poll structure associated with queue to poll.
1240 * @budget: Threshold of RRQ entries to process per poll.
1242 * Return: The number of entries processed.
1244 static int cxlflash_irqpoll(struct irq_poll
*irqpoll
, int budget
)
1246 struct afu
*afu
= container_of(irqpoll
, struct afu
, irqpoll
);
1247 unsigned long hrrq_flags
;
1249 int num_entries
= 0;
1251 spin_lock_irqsave(&afu
->hrrq_slock
, hrrq_flags
);
1253 num_entries
= process_hrrq(afu
, &doneq
, budget
);
1254 if (num_entries
< budget
)
1255 irq_poll_complete(irqpoll
);
1257 spin_unlock_irqrestore(&afu
->hrrq_slock
, hrrq_flags
);
1259 process_cmd_doneq(&doneq
);
1264 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1265 * @irq: Interrupt number.
1266 * @data: Private data provided at interrupt registration, the AFU.
1268 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1270 static irqreturn_t
cxlflash_rrq_irq(int irq
, void *data
)
1272 struct afu
*afu
= (struct afu
*)data
;
1273 unsigned long hrrq_flags
;
1275 int num_entries
= 0;
1277 spin_lock_irqsave(&afu
->hrrq_slock
, hrrq_flags
);
1279 if (afu_is_irqpoll_enabled(afu
)) {
1280 irq_poll_sched(&afu
->irqpoll
);
1281 spin_unlock_irqrestore(&afu
->hrrq_slock
, hrrq_flags
);
1285 num_entries
= process_hrrq(afu
, &doneq
, -1);
1286 spin_unlock_irqrestore(&afu
->hrrq_slock
, hrrq_flags
);
1288 if (num_entries
== 0)
1291 process_cmd_doneq(&doneq
);
1296 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1297 * @irq: Interrupt number.
1298 * @data: Private data provided at interrupt registration, the AFU.
1300 * Return: Always return IRQ_HANDLED.
1302 static irqreturn_t
cxlflash_async_err_irq(int irq
, void *data
)
1304 struct afu
*afu
= (struct afu
*)data
;
1305 struct cxlflash_cfg
*cfg
= afu
->parent
;
1306 struct device
*dev
= &cfg
->dev
->dev
;
1308 const struct asyc_intr_info
*info
;
1309 struct sisl_global_map __iomem
*global
= &afu
->afu_map
->global
;
1314 reg
= readq_be(&global
->regs
.aintr_status
);
1315 reg_unmasked
= (reg
& SISL_ASTATUS_UNMASK
);
1317 if (reg_unmasked
== 0) {
1318 dev_err(dev
, "%s: spurious interrupt, aintr_status=%016llx\n",
1323 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1324 writeq_be(reg_unmasked
, &global
->regs
.aintr_clear
);
1326 /* Check each bit that is on */
1327 for (i
= 0; reg_unmasked
; i
++, reg_unmasked
= (reg_unmasked
>> 1)) {
1328 info
= find_ainfo(1ULL << i
);
1329 if (((reg_unmasked
& 0x1) == 0) || !info
)
1334 dev_err(dev
, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1335 __func__
, port
, info
->desc
,
1336 readq_be(&global
->fc_regs
[port
][FC_STATUS
/ 8]));
1339 * Do link reset first, some OTHER errors will set FC_ERROR
1340 * again if cleared before or w/o a reset
1342 if (info
->action
& LINK_RESET
) {
1343 dev_err(dev
, "%s: FC Port %d: resetting link\n",
1345 cfg
->lr_state
= LINK_RESET_REQUIRED
;
1346 cfg
->lr_port
= port
;
1347 schedule_work(&cfg
->work_q
);
1350 if (info
->action
& CLR_FC_ERROR
) {
1351 reg
= readq_be(&global
->fc_regs
[port
][FC_ERROR
/ 8]);
1354 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1355 * should be the same and tracing one is sufficient.
1358 dev_err(dev
, "%s: fc %d: clearing fc_error=%016llx\n",
1359 __func__
, port
, reg
);
1361 writeq_be(reg
, &global
->fc_regs
[port
][FC_ERROR
/ 8]);
1362 writeq_be(0, &global
->fc_regs
[port
][FC_ERRCAP
/ 8]);
1365 if (info
->action
& SCAN_HOST
) {
1366 atomic_inc(&cfg
->scan_host_needed
);
1367 schedule_work(&cfg
->work_q
);
1376 * start_context() - starts the master context
1377 * @cfg: Internal structure associated with the host.
1379 * Return: A success or failure value from CXL services.
1381 static int start_context(struct cxlflash_cfg
*cfg
)
1383 struct device
*dev
= &cfg
->dev
->dev
;
1386 rc
= cxl_start_context(cfg
->mcctx
,
1387 cfg
->afu
->work
.work_element_descriptor
,
1390 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1395 * read_vpd() - obtains the WWPNs from VPD
1396 * @cfg: Internal structure associated with the host.
1397 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1399 * Return: 0 on success, -errno on failure
1401 static int read_vpd(struct cxlflash_cfg
*cfg
, u64 wwpn
[])
1403 struct device
*dev
= &cfg
->dev
->dev
;
1404 struct pci_dev
*pdev
= cfg
->dev
;
1406 int ro_start
, ro_size
, i
, j
, k
;
1408 char vpd_data
[CXLFLASH_VPD_LEN
];
1409 char tmp_buf
[WWPN_BUF_LEN
] = { 0 };
1410 char *wwpn_vpd_tags
[NUM_FC_PORTS
] = { "V5", "V6" };
1412 /* Get the VPD data from the device */
1413 vpd_size
= cxl_read_adapter_vpd(pdev
, vpd_data
, sizeof(vpd_data
));
1414 if (unlikely(vpd_size
<= 0)) {
1415 dev_err(dev
, "%s: Unable to read VPD (size = %ld)\n",
1416 __func__
, vpd_size
);
1421 /* Get the read only section offset */
1422 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
,
1423 PCI_VPD_LRDT_RO_DATA
);
1424 if (unlikely(ro_start
< 0)) {
1425 dev_err(dev
, "%s: VPD Read-only data not found\n", __func__
);
1430 /* Get the read only section size, cap when extends beyond read VPD */
1431 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
1433 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1434 if (unlikely((i
+ j
) > vpd_size
)) {
1435 dev_dbg(dev
, "%s: Might need to read more VPD (%d > %ld)\n",
1436 __func__
, (i
+ j
), vpd_size
);
1437 ro_size
= vpd_size
- i
;
1441 * Find the offset of the WWPN tag within the read only
1442 * VPD data and validate the found field (partials are
1443 * no good to us). Convert the ASCII data to an integer
1444 * value. Note that we must copy to a temporary buffer
1445 * because the conversion service requires that the ASCII
1446 * string be terminated.
1448 for (k
= 0; k
< NUM_FC_PORTS
; k
++) {
1450 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1452 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, wwpn_vpd_tags
[k
]);
1453 if (unlikely(i
< 0)) {
1454 dev_err(dev
, "%s: Port %d WWPN not found in VPD\n",
1460 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
1461 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
1462 if (unlikely((i
+ j
> vpd_size
) || (j
!= WWPN_LEN
))) {
1463 dev_err(dev
, "%s: Port %d WWPN incomplete or bad VPD\n",
1469 memcpy(tmp_buf
, &vpd_data
[i
], WWPN_LEN
);
1470 rc
= kstrtoul(tmp_buf
, WWPN_LEN
, (ulong
*)&wwpn
[k
]);
1472 dev_err(dev
, "%s: WWPN conversion failed for port %d\n",
1480 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1485 * init_pcr() - initialize the provisioning and control registers
1486 * @cfg: Internal structure associated with the host.
1488 * Also sets up fast access to the mapped registers and initializes AFU
1489 * command fields that never change.
1491 static void init_pcr(struct cxlflash_cfg
*cfg
)
1493 struct afu
*afu
= cfg
->afu
;
1494 struct sisl_ctrl_map __iomem
*ctrl_map
;
1497 for (i
= 0; i
< MAX_CONTEXT
; i
++) {
1498 ctrl_map
= &afu
->afu_map
->ctrls
[i
].ctrl
;
1499 /* Disrupt any clients that could be running */
1500 /* e.g. clients that survived a master restart */
1501 writeq_be(0, &ctrl_map
->rht_start
);
1502 writeq_be(0, &ctrl_map
->rht_cnt_id
);
1503 writeq_be(0, &ctrl_map
->ctx_cap
);
1506 /* Copy frequently used fields into afu */
1507 afu
->ctx_hndl
= (u16
) cxl_process_element(cfg
->mcctx
);
1508 afu
->host_map
= &afu
->afu_map
->hosts
[afu
->ctx_hndl
].host
;
1509 afu
->ctrl_map
= &afu
->afu_map
->ctrls
[afu
->ctx_hndl
].ctrl
;
1511 /* Program the Endian Control for the master context */
1512 writeq_be(SISL_ENDIAN_CTRL
, &afu
->host_map
->endian_ctrl
);
1516 * init_global() - initialize AFU global registers
1517 * @cfg: Internal structure associated with the host.
1519 static int init_global(struct cxlflash_cfg
*cfg
)
1521 struct afu
*afu
= cfg
->afu
;
1522 struct device
*dev
= &cfg
->dev
->dev
;
1523 u64 wwpn
[NUM_FC_PORTS
]; /* wwpn of AFU ports */
1524 int i
= 0, num_ports
= 0;
1528 rc
= read_vpd(cfg
, &wwpn
[0]);
1530 dev_err(dev
, "%s: could not read vpd rc=%d\n", __func__
, rc
);
1534 dev_dbg(dev
, "%s: wwpn0=%016llx wwpn1=%016llx\n",
1535 __func__
, wwpn
[0], wwpn
[1]);
1537 /* Set up RRQ and SQ in AFU for master issued cmds */
1538 writeq_be((u64
) afu
->hrrq_start
, &afu
->host_map
->rrq_start
);
1539 writeq_be((u64
) afu
->hrrq_end
, &afu
->host_map
->rrq_end
);
1541 if (afu_is_sq_cmd_mode(afu
)) {
1542 writeq_be((u64
)afu
->hsq_start
, &afu
->host_map
->sq_start
);
1543 writeq_be((u64
)afu
->hsq_end
, &afu
->host_map
->sq_end
);
1546 /* AFU configuration */
1547 reg
= readq_be(&afu
->afu_map
->global
.regs
.afu_config
);
1548 reg
|= SISL_AFUCONF_AR_ALL
|SISL_AFUCONF_ENDIAN
;
1549 /* enable all auto retry options and control endianness */
1550 /* leave others at default: */
1551 /* CTX_CAP write protected, mbox_r does not clear on read and */
1552 /* checker on if dual afu */
1553 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_config
);
1555 /* Global port select: select either port */
1556 if (afu
->internal_lun
) {
1557 /* Only use port 0 */
1558 writeq_be(PORT0
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1559 num_ports
= NUM_FC_PORTS
- 1;
1561 writeq_be(BOTH_PORTS
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1562 num_ports
= NUM_FC_PORTS
;
1565 for (i
= 0; i
< num_ports
; i
++) {
1566 /* Unmask all errors (but they are still masked at AFU) */
1567 writeq_be(0, &afu
->afu_map
->global
.fc_regs
[i
][FC_ERRMSK
/ 8]);
1568 /* Clear CRC error cnt & set a threshold */
1569 (void)readq_be(&afu
->afu_map
->global
.
1570 fc_regs
[i
][FC_CNT_CRCERR
/ 8]);
1571 writeq_be(MC_CRC_THRESH
, &afu
->afu_map
->global
.fc_regs
[i
]
1572 [FC_CRC_THRESH
/ 8]);
1574 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1576 afu_set_wwpn(afu
, i
,
1577 &afu
->afu_map
->global
.fc_regs
[i
][0],
1579 /* Programming WWPN back to back causes additional
1580 * offline/online transitions and a PLOGI
1585 /* Set up master's own CTX_CAP to allow real mode, host translation */
1586 /* tables, afu cmds and read/write GSCSI cmds. */
1587 /* First, unlock ctx_cap write by reading mbox */
1588 (void)readq_be(&afu
->ctrl_map
->mbox_r
); /* unlock ctx_cap */
1589 writeq_be((SISL_CTX_CAP_REAL_MODE
| SISL_CTX_CAP_HOST_XLATE
|
1590 SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
|
1591 SISL_CTX_CAP_AFU_CMD
| SISL_CTX_CAP_GSCSI_CMD
),
1592 &afu
->ctrl_map
->ctx_cap
);
1593 /* Initialize heartbeat */
1594 afu
->hb
= readq_be(&afu
->afu_map
->global
.regs
.afu_hb
);
1600 * start_afu() - initializes and starts the AFU
1601 * @cfg: Internal structure associated with the host.
1603 static int start_afu(struct cxlflash_cfg
*cfg
)
1605 struct afu
*afu
= cfg
->afu
;
1606 struct device
*dev
= &cfg
->dev
->dev
;
1611 /* Initialize RRQ */
1612 memset(&afu
->rrq_entry
, 0, sizeof(afu
->rrq_entry
));
1613 afu
->hrrq_start
= &afu
->rrq_entry
[0];
1614 afu
->hrrq_end
= &afu
->rrq_entry
[NUM_RRQ_ENTRY
- 1];
1615 afu
->hrrq_curr
= afu
->hrrq_start
;
1617 spin_lock_init(&afu
->hrrq_slock
);
1620 if (afu_is_sq_cmd_mode(afu
)) {
1621 memset(&afu
->sq
, 0, sizeof(afu
->sq
));
1622 afu
->hsq_start
= &afu
->sq
[0];
1623 afu
->hsq_end
= &afu
->sq
[NUM_SQ_ENTRY
- 1];
1624 afu
->hsq_curr
= afu
->hsq_start
;
1626 spin_lock_init(&afu
->hsq_slock
);
1627 atomic_set(&afu
->hsq_credits
, NUM_SQ_ENTRY
- 1);
1630 /* Initialize IRQ poll */
1631 if (afu_is_irqpoll_enabled(afu
))
1632 irq_poll_init(&afu
->irqpoll
, afu
->irqpoll_weight
,
1635 rc
= init_global(cfg
);
1637 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1642 * init_intr() - setup interrupt handlers for the master context
1643 * @cfg: Internal structure associated with the host.
1645 * Return: 0 on success, -errno on failure
1647 static enum undo_level
init_intr(struct cxlflash_cfg
*cfg
,
1648 struct cxl_context
*ctx
)
1650 struct afu
*afu
= cfg
->afu
;
1651 struct device
*dev
= &cfg
->dev
->dev
;
1653 enum undo_level level
= UNDO_NOOP
;
1655 rc
= cxl_allocate_afu_irqs(ctx
, 3);
1657 dev_err(dev
, "%s: allocate_afu_irqs failed rc=%d\n",
1663 rc
= cxl_map_afu_irq(ctx
, 1, cxlflash_sync_err_irq
, afu
,
1664 "SISL_MSI_SYNC_ERROR");
1665 if (unlikely(rc
<= 0)) {
1666 dev_err(dev
, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__
);
1671 rc
= cxl_map_afu_irq(ctx
, 2, cxlflash_rrq_irq
, afu
,
1672 "SISL_MSI_RRQ_UPDATED");
1673 if (unlikely(rc
<= 0)) {
1674 dev_err(dev
, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__
);
1679 rc
= cxl_map_afu_irq(ctx
, 3, cxlflash_async_err_irq
, afu
,
1680 "SISL_MSI_ASYNC_ERROR");
1681 if (unlikely(rc
<= 0)) {
1682 dev_err(dev
, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__
);
1691 * init_mc() - create and register as the master context
1692 * @cfg: Internal structure associated with the host.
1694 * Return: 0 on success, -errno on failure
1696 static int init_mc(struct cxlflash_cfg
*cfg
)
1698 struct cxl_context
*ctx
;
1699 struct device
*dev
= &cfg
->dev
->dev
;
1701 enum undo_level level
;
1703 ctx
= cxl_get_context(cfg
->dev
);
1704 if (unlikely(!ctx
)) {
1710 /* Set it up as a master with the CXL */
1711 cxl_set_master(ctx
);
1713 /* During initialization reset the AFU to start from a clean slate */
1714 rc
= cxl_afu_reset(cfg
->mcctx
);
1716 dev_err(dev
, "%s: AFU reset failed rc=%d\n", __func__
, rc
);
1720 level
= init_intr(cfg
, ctx
);
1721 if (unlikely(level
)) {
1722 dev_err(dev
, "%s: interrupt init failed rc=%d\n", __func__
, rc
);
1726 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1727 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1728 * element (pe) that is embedded in the context (ctx)
1730 rc
= start_context(cfg
);
1732 dev_err(dev
, "%s: start context failed rc=%d\n", __func__
, rc
);
1733 level
= UNMAP_THREE
;
1737 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1740 term_intr(cfg
, level
);
1745 * init_afu() - setup as master context and start AFU
1746 * @cfg: Internal structure associated with the host.
1748 * This routine is a higher level of control for configuring the
1749 * AFU on probe and reset paths.
1751 * Return: 0 on success, -errno on failure
1753 static int init_afu(struct cxlflash_cfg
*cfg
)
1757 struct afu
*afu
= cfg
->afu
;
1758 struct device
*dev
= &cfg
->dev
->dev
;
1760 cxl_perst_reloads_same_image(cfg
->cxl_afu
, true);
1764 dev_err(dev
, "%s: init_mc failed rc=%d\n",
1769 /* Map the entire MMIO space of the AFU */
1770 afu
->afu_map
= cxl_psa_map(cfg
->mcctx
);
1771 if (!afu
->afu_map
) {
1772 dev_err(dev
, "%s: cxl_psa_map failed\n", __func__
);
1777 /* No byte reverse on reading afu_version or string will be backwards */
1778 reg
= readq(&afu
->afu_map
->global
.regs
.afu_version
);
1779 memcpy(afu
->version
, ®
, sizeof(reg
));
1780 afu
->interface_version
=
1781 readq_be(&afu
->afu_map
->global
.regs
.interface_version
);
1782 if ((afu
->interface_version
+ 1) == 0) {
1783 dev_err(dev
, "Back level AFU, please upgrade. AFU version %s "
1784 "interface version %016llx\n", afu
->version
,
1785 afu
->interface_version
);
1790 if (afu_is_sq_cmd_mode(afu
)) {
1791 afu
->send_cmd
= send_cmd_sq
;
1792 afu
->context_reset
= context_reset_sq
;
1794 afu
->send_cmd
= send_cmd_ioarrin
;
1795 afu
->context_reset
= context_reset_ioarrin
;
1798 dev_dbg(dev
, "%s: afu_ver=%s interface_ver=%016llx\n", __func__
,
1799 afu
->version
, afu
->interface_version
);
1801 rc
= start_afu(cfg
);
1803 dev_err(dev
, "%s: start_afu failed, rc=%d\n", __func__
, rc
);
1807 afu_err_intr_init(cfg
->afu
);
1808 spin_lock_init(&afu
->rrin_slock
);
1809 afu
->room
= readq_be(&afu
->host_map
->cmd_room
);
1811 /* Restore the LUN mappings */
1812 cxlflash_restore_luntable(cfg
);
1814 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1818 term_intr(cfg
, UNMAP_THREE
);
1824 * cxlflash_afu_sync() - builds and sends an AFU sync command
1825 * @afu: AFU associated with the host.
1826 * @ctx_hndl_u: Identifies context requesting sync.
1827 * @res_hndl_u: Identifies resource requesting sync.
1828 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1830 * The AFU can only take 1 sync command at a time. This routine enforces this
1831 * limitation by using a mutex to provide exclusive access to the AFU during
1832 * the sync. This design point requires calling threads to not be on interrupt
1833 * context due to the possibility of sleeping during concurrent sync operations.
1835 * AFU sync operations are only necessary and allowed when the device is
1836 * operating normally. When not operating normally, sync requests can occur as
1837 * part of cleaning up resources associated with an adapter prior to removal.
1838 * In this scenario, these requests are simply ignored (safe due to the AFU
1845 int cxlflash_afu_sync(struct afu
*afu
, ctx_hndl_t ctx_hndl_u
,
1846 res_hndl_t res_hndl_u
, u8 mode
)
1848 struct cxlflash_cfg
*cfg
= afu
->parent
;
1849 struct device
*dev
= &cfg
->dev
->dev
;
1850 struct afu_cmd
*cmd
= NULL
;
1853 static DEFINE_MUTEX(sync_active
);
1855 if (cfg
->state
!= STATE_NORMAL
) {
1856 dev_dbg(dev
, "%s: Sync not required state=%u\n",
1857 __func__
, cfg
->state
);
1861 mutex_lock(&sync_active
);
1862 atomic_inc(&afu
->cmds_active
);
1863 buf
= kzalloc(sizeof(*cmd
) + __alignof__(*cmd
) - 1, GFP_KERNEL
);
1864 if (unlikely(!buf
)) {
1865 dev_err(dev
, "%s: no memory for command\n", __func__
);
1870 cmd
= (struct afu_cmd
*)PTR_ALIGN(buf
, __alignof__(*cmd
));
1871 init_completion(&cmd
->cevent
);
1874 dev_dbg(dev
, "%s: afu=%p cmd=%p %d\n", __func__
, afu
, cmd
, ctx_hndl_u
);
1876 cmd
->rcb
.req_flags
= SISL_REQ_FLAGS_AFU_CMD
;
1877 cmd
->rcb
.ctx_id
= afu
->ctx_hndl
;
1878 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
1879 cmd
->rcb
.timeout
= MC_AFU_SYNC_TIMEOUT
;
1881 cmd
->rcb
.cdb
[0] = 0xC0; /* AFU Sync */
1882 cmd
->rcb
.cdb
[1] = mode
;
1884 /* The cdb is aligned, no unaligned accessors required */
1885 *((__be16
*)&cmd
->rcb
.cdb
[2]) = cpu_to_be16(ctx_hndl_u
);
1886 *((__be32
*)&cmd
->rcb
.cdb
[4]) = cpu_to_be32(res_hndl_u
);
1888 rc
= afu
->send_cmd(afu
, cmd
);
1892 rc
= wait_resp(afu
, cmd
);
1896 atomic_dec(&afu
->cmds_active
);
1897 mutex_unlock(&sync_active
);
1899 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1904 * afu_reset() - resets the AFU
1905 * @cfg: Internal structure associated with the host.
1907 * Return: 0 on success, -errno on failure
1909 static int afu_reset(struct cxlflash_cfg
*cfg
)
1911 struct device
*dev
= &cfg
->dev
->dev
;
1914 /* Stop the context before the reset. Since the context is
1915 * no longer available restart it after the reset is complete
1921 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1926 * drain_ioctls() - wait until all currently executing ioctls have completed
1927 * @cfg: Internal structure associated with the host.
1929 * Obtain write access to read/write semaphore that wraps ioctl
1930 * handling to 'drain' ioctls currently executing.
1932 static void drain_ioctls(struct cxlflash_cfg
*cfg
)
1934 down_write(&cfg
->ioctl_rwsem
);
1935 up_write(&cfg
->ioctl_rwsem
);
1939 * cxlflash_eh_device_reset_handler() - reset a single LUN
1940 * @scp: SCSI command to send.
1943 * SUCCESS as defined in scsi/scsi.h
1944 * FAILED as defined in scsi/scsi.h
1946 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd
*scp
)
1949 struct Scsi_Host
*host
= scp
->device
->host
;
1950 struct cxlflash_cfg
*cfg
= shost_priv(host
);
1951 struct device
*dev
= &cfg
->dev
->dev
;
1952 struct afu
*afu
= cfg
->afu
;
1955 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
1956 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
1957 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
1958 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
1959 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
1960 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
1961 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
1964 switch (cfg
->state
) {
1966 rcr
= send_tmf(afu
, scp
, TMF_LUN_RESET
);
1971 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
1978 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1983 * cxlflash_eh_host_reset_handler() - reset the host adapter
1984 * @scp: SCSI command from stack identifying host.
1986 * Following a reset, the state is evaluated again in case an EEH occurred
1987 * during the reset. In such a scenario, the host reset will either yield
1988 * until the EEH recovery is complete or return success or failure based
1989 * upon the current device state.
1992 * SUCCESS as defined in scsi/scsi.h
1993 * FAILED as defined in scsi/scsi.h
1995 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd
*scp
)
1999 struct Scsi_Host
*host
= scp
->device
->host
;
2000 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2001 struct device
*dev
= &cfg
->dev
->dev
;
2003 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2004 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2005 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2006 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2007 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2008 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2009 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2011 switch (cfg
->state
) {
2013 cfg
->state
= STATE_RESET
;
2015 cxlflash_mark_contexts_error(cfg
);
2016 rcr
= afu_reset(cfg
);
2019 cfg
->state
= STATE_FAILTERM
;
2021 cfg
->state
= STATE_NORMAL
;
2022 wake_up_all(&cfg
->reset_waitq
);
2026 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2027 if (cfg
->state
== STATE_NORMAL
)
2035 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2040 * cxlflash_change_queue_depth() - change the queue depth for the device
2041 * @sdev: SCSI device destined for queue depth change.
2042 * @qdepth: Requested queue depth value to set.
2044 * The requested queue depth is capped to the maximum supported value.
2046 * Return: The actual queue depth set.
2048 static int cxlflash_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2051 if (qdepth
> CXLFLASH_MAX_CMDS_PER_LUN
)
2052 qdepth
= CXLFLASH_MAX_CMDS_PER_LUN
;
2054 scsi_change_queue_depth(sdev
, qdepth
);
2055 return sdev
->queue_depth
;
2059 * cxlflash_show_port_status() - queries and presents the current port status
2060 * @port: Desired port for status reporting.
2061 * @cfg: Internal structure associated with the host.
2062 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2064 * Return: The size of the ASCII string returned in @buf.
2066 static ssize_t
cxlflash_show_port_status(u32 port
,
2067 struct cxlflash_cfg
*cfg
,
2070 struct afu
*afu
= cfg
->afu
;
2073 __be64 __iomem
*fc_regs
;
2075 if (port
>= NUM_FC_PORTS
)
2078 fc_regs
= &afu
->afu_map
->global
.fc_regs
[port
][0];
2079 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
2080 status
&= FC_MTIP_STATUS_MASK
;
2082 if (status
== FC_MTIP_STATUS_ONLINE
)
2083 disp_status
= "online";
2084 else if (status
== FC_MTIP_STATUS_OFFLINE
)
2085 disp_status
= "offline";
2087 disp_status
= "unknown";
2089 return scnprintf(buf
, PAGE_SIZE
, "%s\n", disp_status
);
2093 * port0_show() - queries and presents the current status of port 0
2094 * @dev: Generic device associated with the host owning the port.
2095 * @attr: Device attribute representing the port.
2096 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2098 * Return: The size of the ASCII string returned in @buf.
2100 static ssize_t
port0_show(struct device
*dev
,
2101 struct device_attribute
*attr
,
2104 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2106 return cxlflash_show_port_status(0, cfg
, buf
);
2110 * port1_show() - queries and presents the current status of port 1
2111 * @dev: Generic device associated with the host owning the port.
2112 * @attr: Device attribute representing the port.
2113 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2115 * Return: The size of the ASCII string returned in @buf.
2117 static ssize_t
port1_show(struct device
*dev
,
2118 struct device_attribute
*attr
,
2121 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2123 return cxlflash_show_port_status(1, cfg
, buf
);
2127 * lun_mode_show() - presents the current LUN mode of the host
2128 * @dev: Generic device associated with the host.
2129 * @attr: Device attribute representing the LUN mode.
2130 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2132 * Return: The size of the ASCII string returned in @buf.
2134 static ssize_t
lun_mode_show(struct device
*dev
,
2135 struct device_attribute
*attr
, char *buf
)
2137 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2138 struct afu
*afu
= cfg
->afu
;
2140 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->internal_lun
);
2144 * lun_mode_store() - sets the LUN mode of the host
2145 * @dev: Generic device associated with the host.
2146 * @attr: Device attribute representing the LUN mode.
2147 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2148 * @count: Length of data resizing in @buf.
2150 * The CXL Flash AFU supports a dummy LUN mode where the external
2151 * links and storage are not required. Space on the FPGA is used
2152 * to create 1 or 2 small LUNs which are presented to the system
2153 * as if they were a normal storage device. This feature is useful
2154 * during development and also provides manufacturing with a way
2155 * to test the AFU without an actual device.
2157 * 0 = external LUN[s] (default)
2158 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2159 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2160 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2161 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2163 * Return: The size of the ASCII string returned in @buf.
2165 static ssize_t
lun_mode_store(struct device
*dev
,
2166 struct device_attribute
*attr
,
2167 const char *buf
, size_t count
)
2169 struct Scsi_Host
*shost
= class_to_shost(dev
);
2170 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
2171 struct afu
*afu
= cfg
->afu
;
2175 rc
= kstrtouint(buf
, 10, &lun_mode
);
2176 if (!rc
&& (lun_mode
< 5) && (lun_mode
!= afu
->internal_lun
)) {
2177 afu
->internal_lun
= lun_mode
;
2180 * When configured for internal LUN, there is only one channel,
2181 * channel number 0, else there will be 2 (default).
2183 if (afu
->internal_lun
)
2184 shost
->max_channel
= 0;
2186 shost
->max_channel
= NUM_FC_PORTS
- 1;
2189 scsi_scan_host(cfg
->host
);
2196 * ioctl_version_show() - presents the current ioctl version of the host
2197 * @dev: Generic device associated with the host.
2198 * @attr: Device attribute representing the ioctl version.
2199 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2201 * Return: The size of the ASCII string returned in @buf.
2203 static ssize_t
ioctl_version_show(struct device
*dev
,
2204 struct device_attribute
*attr
, char *buf
)
2206 return scnprintf(buf
, PAGE_SIZE
, "%u\n", DK_CXLFLASH_VERSION_0
);
2210 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2211 * @port: Desired port for status reporting.
2212 * @cfg: Internal structure associated with the host.
2213 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2215 * Return: The size of the ASCII string returned in @buf.
2217 static ssize_t
cxlflash_show_port_lun_table(u32 port
,
2218 struct cxlflash_cfg
*cfg
,
2221 struct afu
*afu
= cfg
->afu
;
2224 __be64 __iomem
*fc_port
;
2226 if (port
>= NUM_FC_PORTS
)
2229 fc_port
= &afu
->afu_map
->global
.fc_port
[port
][0];
2231 for (i
= 0; i
< CXLFLASH_NUM_VLUNS
; i
++)
2232 bytes
+= scnprintf(buf
+ bytes
, PAGE_SIZE
- bytes
,
2233 "%03d: %016llx\n", i
, readq_be(&fc_port
[i
]));
2238 * port0_lun_table_show() - presents the current LUN table of port 0
2239 * @dev: Generic device associated with the host owning the port.
2240 * @attr: Device attribute representing the port.
2241 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2243 * Return: The size of the ASCII string returned in @buf.
2245 static ssize_t
port0_lun_table_show(struct device
*dev
,
2246 struct device_attribute
*attr
,
2249 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2251 return cxlflash_show_port_lun_table(0, cfg
, buf
);
2255 * port1_lun_table_show() - presents the current LUN table of port 1
2256 * @dev: Generic device associated with the host owning the port.
2257 * @attr: Device attribute representing the port.
2258 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2260 * Return: The size of the ASCII string returned in @buf.
2262 static ssize_t
port1_lun_table_show(struct device
*dev
,
2263 struct device_attribute
*attr
,
2266 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2268 return cxlflash_show_port_lun_table(1, cfg
, buf
);
2272 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2273 * @dev: Generic device associated with the host.
2274 * @attr: Device attribute representing the IRQ poll weight.
2275 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2278 * An IRQ poll weight of 0 indicates polling is disabled.
2280 * Return: The size of the ASCII string returned in @buf.
2282 static ssize_t
irqpoll_weight_show(struct device
*dev
,
2283 struct device_attribute
*attr
, char *buf
)
2285 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2286 struct afu
*afu
= cfg
->afu
;
2288 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->irqpoll_weight
);
2292 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2293 * @dev: Generic device associated with the host.
2294 * @attr: Device attribute representing the IRQ poll weight.
2295 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2297 * @count: Length of data resizing in @buf.
2299 * An IRQ poll weight of 0 indicates polling is disabled.
2301 * Return: The size of the ASCII string returned in @buf.
2303 static ssize_t
irqpoll_weight_store(struct device
*dev
,
2304 struct device_attribute
*attr
,
2305 const char *buf
, size_t count
)
2307 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2308 struct device
*cfgdev
= &cfg
->dev
->dev
;
2309 struct afu
*afu
= cfg
->afu
;
2313 rc
= kstrtouint(buf
, 10, &weight
);
2319 "Invalid IRQ poll weight. It must be 256 or less.\n");
2323 if (weight
== afu
->irqpoll_weight
) {
2325 "Current IRQ poll weight has the same weight.\n");
2329 if (afu_is_irqpoll_enabled(afu
))
2330 irq_poll_disable(&afu
->irqpoll
);
2332 afu
->irqpoll_weight
= weight
;
2335 irq_poll_init(&afu
->irqpoll
, weight
, cxlflash_irqpoll
);
2341 * mode_show() - presents the current mode of the device
2342 * @dev: Generic device associated with the device.
2343 * @attr: Device attribute representing the device mode.
2344 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2346 * Return: The size of the ASCII string returned in @buf.
2348 static ssize_t
mode_show(struct device
*dev
,
2349 struct device_attribute
*attr
, char *buf
)
2351 struct scsi_device
*sdev
= to_scsi_device(dev
);
2353 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
2354 sdev
->hostdata
? "superpipe" : "legacy");
2360 static DEVICE_ATTR_RO(port0
);
2361 static DEVICE_ATTR_RO(port1
);
2362 static DEVICE_ATTR_RW(lun_mode
);
2363 static DEVICE_ATTR_RO(ioctl_version
);
2364 static DEVICE_ATTR_RO(port0_lun_table
);
2365 static DEVICE_ATTR_RO(port1_lun_table
);
2366 static DEVICE_ATTR_RW(irqpoll_weight
);
2368 static struct device_attribute
*cxlflash_host_attrs
[] = {
2372 &dev_attr_ioctl_version
,
2373 &dev_attr_port0_lun_table
,
2374 &dev_attr_port1_lun_table
,
2375 &dev_attr_irqpoll_weight
,
2382 static DEVICE_ATTR_RO(mode
);
2384 static struct device_attribute
*cxlflash_dev_attrs
[] = {
2392 static struct scsi_host_template driver_template
= {
2393 .module
= THIS_MODULE
,
2394 .name
= CXLFLASH_ADAPTER_NAME
,
2395 .info
= cxlflash_driver_info
,
2396 .ioctl
= cxlflash_ioctl
,
2397 .proc_name
= CXLFLASH_NAME
,
2398 .queuecommand
= cxlflash_queuecommand
,
2399 .eh_device_reset_handler
= cxlflash_eh_device_reset_handler
,
2400 .eh_host_reset_handler
= cxlflash_eh_host_reset_handler
,
2401 .change_queue_depth
= cxlflash_change_queue_depth
,
2402 .cmd_per_lun
= CXLFLASH_MAX_CMDS_PER_LUN
,
2403 .can_queue
= CXLFLASH_MAX_CMDS
,
2404 .cmd_size
= sizeof(struct afu_cmd
) + __alignof__(struct afu_cmd
) - 1,
2406 .sg_tablesize
= 1, /* No scatter gather support */
2407 .max_sectors
= CXLFLASH_MAX_SECTORS
,
2408 .use_clustering
= ENABLE_CLUSTERING
,
2409 .shost_attrs
= cxlflash_host_attrs
,
2410 .sdev_attrs
= cxlflash_dev_attrs
,
2414 * Device dependent values
2416 static struct dev_dependent_vals dev_corsa_vals
= { CXLFLASH_MAX_SECTORS
,
2418 static struct dev_dependent_vals dev_flash_gt_vals
= { CXLFLASH_MAX_SECTORS
,
2419 CXLFLASH_NOTIFY_SHUTDOWN
};
2420 static struct dev_dependent_vals dev_briard_vals
= { CXLFLASH_MAX_SECTORS
,
2421 CXLFLASH_NOTIFY_SHUTDOWN
};
2424 * PCI device binding table
2426 static struct pci_device_id cxlflash_pci_table
[] = {
2427 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CORSA
,
2428 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_corsa_vals
},
2429 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_FLASH_GT
,
2430 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_flash_gt_vals
},
2431 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_BRIARD
,
2432 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_briard_vals
},
2436 MODULE_DEVICE_TABLE(pci
, cxlflash_pci_table
);
2439 * cxlflash_worker_thread() - work thread handler for the AFU
2440 * @work: Work structure contained within cxlflash associated with host.
2442 * Handles the following events:
2443 * - Link reset which cannot be performed on interrupt context due to
2444 * blocking up to a few seconds
2447 static void cxlflash_worker_thread(struct work_struct
*work
)
2449 struct cxlflash_cfg
*cfg
= container_of(work
, struct cxlflash_cfg
,
2451 struct afu
*afu
= cfg
->afu
;
2452 struct device
*dev
= &cfg
->dev
->dev
;
2456 /* Avoid MMIO if the device has failed */
2458 if (cfg
->state
!= STATE_NORMAL
)
2461 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
2463 if (cfg
->lr_state
== LINK_RESET_REQUIRED
) {
2464 port
= cfg
->lr_port
;
2466 dev_err(dev
, "%s: invalid port index %d\n",
2469 spin_unlock_irqrestore(cfg
->host
->host_lock
,
2472 /* The reset can block... */
2473 afu_link_reset(afu
, port
,
2474 &afu
->afu_map
->global
.fc_regs
[port
][0]);
2475 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
2478 cfg
->lr_state
= LINK_RESET_COMPLETE
;
2481 spin_unlock_irqrestore(cfg
->host
->host_lock
, lock_flags
);
2483 if (atomic_dec_if_positive(&cfg
->scan_host_needed
) >= 0)
2484 scsi_scan_host(cfg
->host
);
2488 * cxlflash_probe() - PCI entry point to add host
2489 * @pdev: PCI device associated with the host.
2490 * @dev_id: PCI device id associated with device.
2492 * Return: 0 on success, -errno on failure
2494 static int cxlflash_probe(struct pci_dev
*pdev
,
2495 const struct pci_device_id
*dev_id
)
2497 struct Scsi_Host
*host
;
2498 struct cxlflash_cfg
*cfg
= NULL
;
2499 struct device
*dev
= &pdev
->dev
;
2500 struct dev_dependent_vals
*ddv
;
2503 dev_dbg(&pdev
->dev
, "%s: Found CXLFLASH with IRQ: %d\n",
2504 __func__
, pdev
->irq
);
2506 ddv
= (struct dev_dependent_vals
*)dev_id
->driver_data
;
2507 driver_template
.max_sectors
= ddv
->max_sectors
;
2509 host
= scsi_host_alloc(&driver_template
, sizeof(struct cxlflash_cfg
));
2511 dev_err(dev
, "%s: scsi_host_alloc failed\n", __func__
);
2516 host
->max_id
= CXLFLASH_MAX_NUM_TARGETS_PER_BUS
;
2517 host
->max_lun
= CXLFLASH_MAX_NUM_LUNS_PER_TARGET
;
2518 host
->max_channel
= NUM_FC_PORTS
- 1;
2519 host
->unique_id
= host
->host_no
;
2520 host
->max_cmd_len
= CXLFLASH_MAX_CDB_LEN
;
2522 cfg
= shost_priv(host
);
2524 rc
= alloc_mem(cfg
);
2526 dev_err(dev
, "%s: alloc_mem failed\n", __func__
);
2528 scsi_host_put(cfg
->host
);
2532 cfg
->init_state
= INIT_STATE_NONE
;
2534 cfg
->cxl_fops
= cxlflash_cxl_fops
;
2537 * The promoted LUNs move to the top of the LUN table. The rest stay
2538 * on the bottom half. The bottom half grows from the end
2539 * (index = 255), whereas the top half grows from the beginning
2542 cfg
->promote_lun_index
= 0;
2543 cfg
->last_lun_index
[0] = CXLFLASH_NUM_VLUNS
/2 - 1;
2544 cfg
->last_lun_index
[1] = CXLFLASH_NUM_VLUNS
/2 - 1;
2546 cfg
->dev_id
= (struct pci_device_id
*)dev_id
;
2548 init_waitqueue_head(&cfg
->tmf_waitq
);
2549 init_waitqueue_head(&cfg
->reset_waitq
);
2551 INIT_WORK(&cfg
->work_q
, cxlflash_worker_thread
);
2552 cfg
->lr_state
= LINK_RESET_INVALID
;
2554 spin_lock_init(&cfg
->tmf_slock
);
2555 mutex_init(&cfg
->ctx_tbl_list_mutex
);
2556 mutex_init(&cfg
->ctx_recovery_mutex
);
2557 init_rwsem(&cfg
->ioctl_rwsem
);
2558 INIT_LIST_HEAD(&cfg
->ctx_err_recovery
);
2559 INIT_LIST_HEAD(&cfg
->lluns
);
2561 pci_set_drvdata(pdev
, cfg
);
2563 cfg
->cxl_afu
= cxl_pci_to_afu(pdev
);
2567 dev_err(dev
, "%s: init_pci failed rc=%d\n", __func__
, rc
);
2570 cfg
->init_state
= INIT_STATE_PCI
;
2574 dev_err(dev
, "%s: init_afu failed rc=%d\n", __func__
, rc
);
2577 cfg
->init_state
= INIT_STATE_AFU
;
2579 rc
= init_scsi(cfg
);
2581 dev_err(dev
, "%s: init_scsi failed rc=%d\n", __func__
, rc
);
2584 cfg
->init_state
= INIT_STATE_SCSI
;
2587 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2591 cxlflash_remove(pdev
);
2596 * cxlflash_pci_error_detected() - called when a PCI error is detected
2597 * @pdev: PCI device struct.
2598 * @state: PCI channel state.
2600 * When an EEH occurs during an active reset, wait until the reset is
2601 * complete and then take action based upon the device state.
2603 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2605 static pci_ers_result_t
cxlflash_pci_error_detected(struct pci_dev
*pdev
,
2606 pci_channel_state_t state
)
2609 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
2610 struct device
*dev
= &cfg
->dev
->dev
;
2612 dev_dbg(dev
, "%s: pdev=%p state=%u\n", __func__
, pdev
, state
);
2615 case pci_channel_io_frozen
:
2616 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2617 if (cfg
->state
== STATE_FAILTERM
)
2618 return PCI_ERS_RESULT_DISCONNECT
;
2620 cfg
->state
= STATE_RESET
;
2621 scsi_block_requests(cfg
->host
);
2623 rc
= cxlflash_mark_contexts_error(cfg
);
2625 dev_err(dev
, "%s: Failed to mark user contexts rc=%d\n",
2628 return PCI_ERS_RESULT_NEED_RESET
;
2629 case pci_channel_io_perm_failure
:
2630 cfg
->state
= STATE_FAILTERM
;
2631 wake_up_all(&cfg
->reset_waitq
);
2632 scsi_unblock_requests(cfg
->host
);
2633 return PCI_ERS_RESULT_DISCONNECT
;
2637 return PCI_ERS_RESULT_NEED_RESET
;
2641 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2642 * @pdev: PCI device struct.
2644 * This routine is called by the pci error recovery code after the PCI
2645 * slot has been reset, just before we should resume normal operations.
2647 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2649 static pci_ers_result_t
cxlflash_pci_slot_reset(struct pci_dev
*pdev
)
2652 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
2653 struct device
*dev
= &cfg
->dev
->dev
;
2655 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
2659 dev_err(dev
, "%s: EEH recovery failed rc=%d\n", __func__
, rc
);
2660 return PCI_ERS_RESULT_DISCONNECT
;
2663 return PCI_ERS_RESULT_RECOVERED
;
2667 * cxlflash_pci_resume() - called when normal operation can resume
2668 * @pdev: PCI device struct
2670 static void cxlflash_pci_resume(struct pci_dev
*pdev
)
2672 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
2673 struct device
*dev
= &cfg
->dev
->dev
;
2675 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
2677 cfg
->state
= STATE_NORMAL
;
2678 wake_up_all(&cfg
->reset_waitq
);
2679 scsi_unblock_requests(cfg
->host
);
2682 static const struct pci_error_handlers cxlflash_err_handler
= {
2683 .error_detected
= cxlflash_pci_error_detected
,
2684 .slot_reset
= cxlflash_pci_slot_reset
,
2685 .resume
= cxlflash_pci_resume
,
2689 * PCI device structure
2691 static struct pci_driver cxlflash_driver
= {
2692 .name
= CXLFLASH_NAME
,
2693 .id_table
= cxlflash_pci_table
,
2694 .probe
= cxlflash_probe
,
2695 .remove
= cxlflash_remove
,
2696 .shutdown
= cxlflash_remove
,
2697 .err_handler
= &cxlflash_err_handler
,
2701 * init_cxlflash() - module entry point
2703 * Return: 0 on success, -errno on failure
2705 static int __init
init_cxlflash(void)
2707 cxlflash_list_init();
2709 return pci_register_driver(&cxlflash_driver
);
2713 * exit_cxlflash() - module exit point
2715 static void __exit
exit_cxlflash(void)
2717 cxlflash_term_global_luns();
2718 cxlflash_free_errpage();
2720 pci_unregister_driver(&cxlflash_driver
);
2723 module_init(init_cxlflash
);
2724 module_exit(exit_cxlflash
);