2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME
);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
37 static struct class *cxlflash_class
;
38 static u32 cxlflash_major
;
39 static DECLARE_BITMAP(cxlflash_minor
, CXLFLASH_MAX_ADAPTERS
);
42 * process_cmd_err() - command error handler
43 * @cmd: AFU command that experienced the error.
44 * @scp: SCSI command associated with the AFU command in error.
46 * Translates error bits from AFU command to SCSI command results.
48 static void process_cmd_err(struct afu_cmd
*cmd
, struct scsi_cmnd
*scp
)
50 struct afu
*afu
= cmd
->parent
;
51 struct cxlflash_cfg
*cfg
= afu
->parent
;
52 struct device
*dev
= &cfg
->dev
->dev
;
53 struct sisl_ioarcb
*ioarcb
;
54 struct sisl_ioasa
*ioasa
;
63 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_UNDERRUN
) {
65 scsi_set_resid(scp
, resid
);
66 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
67 __func__
, cmd
, scp
, resid
);
70 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
) {
71 dev_dbg(dev
, "%s: cmd underrun cmd = %p scp = %p\n",
73 scp
->result
= (DID_ERROR
<< 16);
76 dev_dbg(dev
, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
77 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__
,
78 ioasa
->rc
.afu_rc
, ioasa
->rc
.scsi_rc
, ioasa
->rc
.fc_rc
,
79 ioasa
->afu_extra
, ioasa
->scsi_extra
, ioasa
->fc_extra
);
81 if (ioasa
->rc
.scsi_rc
) {
82 /* We have a SCSI status */
83 if (ioasa
->rc
.flags
& SISL_RC_FLAGS_SENSE_VALID
) {
84 memcpy(scp
->sense_buffer
, ioasa
->sense_data
,
86 scp
->result
= ioasa
->rc
.scsi_rc
;
88 scp
->result
= ioasa
->rc
.scsi_rc
| (DID_ERROR
<< 16);
92 * We encountered an error. Set scp->result based on nature
95 if (ioasa
->rc
.fc_rc
) {
96 /* We have an FC status */
97 switch (ioasa
->rc
.fc_rc
) {
98 case SISL_FC_RC_LINKDOWN
:
99 scp
->result
= (DID_REQUEUE
<< 16);
101 case SISL_FC_RC_RESID
:
102 /* This indicates an FCP resid underrun */
103 if (!(ioasa
->rc
.flags
& SISL_RC_FLAGS_OVERRUN
)) {
104 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
105 * then we will handle this error else where.
106 * If not then we must handle it here.
107 * This is probably an AFU bug.
109 scp
->result
= (DID_ERROR
<< 16);
112 case SISL_FC_RC_RESIDERR
:
113 /* Resid mismatch between adapter and device */
114 case SISL_FC_RC_TGTABORT
:
115 case SISL_FC_RC_ABORTOK
:
116 case SISL_FC_RC_ABORTFAIL
:
117 case SISL_FC_RC_NOLOGI
:
118 case SISL_FC_RC_ABORTPEND
:
119 case SISL_FC_RC_WRABORTPEND
:
120 case SISL_FC_RC_NOEXP
:
121 case SISL_FC_RC_INUSE
:
122 scp
->result
= (DID_ERROR
<< 16);
127 if (ioasa
->rc
.afu_rc
) {
128 /* We have an AFU error */
129 switch (ioasa
->rc
.afu_rc
) {
130 case SISL_AFU_RC_NO_CHANNELS
:
131 scp
->result
= (DID_NO_CONNECT
<< 16);
133 case SISL_AFU_RC_DATA_DMA_ERR
:
134 switch (ioasa
->afu_extra
) {
135 case SISL_AFU_DMA_ERR_PAGE_IN
:
137 scp
->result
= (DID_IMM_RETRY
<< 16);
139 case SISL_AFU_DMA_ERR_INVALID_EA
:
141 scp
->result
= (DID_ERROR
<< 16);
144 case SISL_AFU_RC_OUT_OF_DATA_BUFS
:
146 scp
->result
= (DID_ALLOC_FAILURE
<< 16);
149 scp
->result
= (DID_ERROR
<< 16);
155 * cmd_complete() - command completion handler
156 * @cmd: AFU command that has completed.
158 * Prepares and submits command that has either completed or timed out to
159 * the SCSI stack. Checks AFU command back into command pool for non-internal
160 * (cmd->scp populated) commands.
162 static void cmd_complete(struct afu_cmd
*cmd
)
164 struct scsi_cmnd
*scp
;
166 struct afu
*afu
= cmd
->parent
;
167 struct cxlflash_cfg
*cfg
= afu
->parent
;
168 struct device
*dev
= &cfg
->dev
->dev
;
169 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
172 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
173 list_del(&cmd
->list
);
174 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
178 if (unlikely(cmd
->sa
.ioasc
))
179 process_cmd_err(cmd
, scp
);
181 scp
->result
= (DID_OK
<< 16);
183 cmd_is_tmf
= cmd
->cmd_tmf
;
185 dev_dbg_ratelimited(dev
, "%s:scp=%p result=%08x ioasc=%08x\n",
186 __func__
, scp
, scp
->result
, cmd
->sa
.ioasc
);
191 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
192 cfg
->tmf_active
= false;
193 wake_up_all_locked(&cfg
->tmf_waitq
);
194 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
197 complete(&cmd
->cevent
);
201 * flush_pending_cmds() - flush all pending commands on this hardware queue
202 * @hwq: Hardware queue to flush.
204 * The hardware send queue lock associated with this hardware queue must be
205 * held when calling this routine.
207 static void flush_pending_cmds(struct hwq
*hwq
)
209 struct afu_cmd
*cmd
, *tmp
;
210 struct scsi_cmnd
*scp
;
212 list_for_each_entry_safe(cmd
, tmp
, &hwq
->pending_cmds
, list
) {
213 /* Bypass command when on a doneq, cmd_complete() will handle */
214 if (!list_empty(&cmd
->queue
))
217 list_del(&cmd
->list
);
221 scp
->result
= (DID_IMM_RETRY
<< 16);
224 cmd
->cmd_aborted
= true;
225 complete(&cmd
->cevent
);
231 * context_reset() - reset context via specified register
232 * @hwq: Hardware queue owning the context to be reset.
233 * @reset_reg: MMIO register to perform reset.
235 * When the reset is successful, the SISLite specification guarantees that
236 * the AFU has aborted all currently pending I/O. Accordingly, these commands
239 * Return: 0 on success, -errno on failure
241 static int context_reset(struct hwq
*hwq
, __be64 __iomem
*reset_reg
)
243 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
244 struct device
*dev
= &cfg
->dev
->dev
;
250 dev_dbg(dev
, "%s: hwq=%p\n", __func__
, hwq
);
252 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
254 writeq_be(val
, reset_reg
);
256 val
= readq_be(reset_reg
);
257 if ((val
& 0x1) == 0x0) {
262 /* Double delay each time */
264 } while (nretry
++ < MC_ROOM_RETRY_CNT
);
267 flush_pending_cmds(hwq
);
269 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
271 dev_dbg(dev
, "%s: returning rc=%d, val=%016llx nretry=%d\n",
272 __func__
, rc
, val
, nretry
);
277 * context_reset_ioarrin() - reset context via IOARRIN register
278 * @hwq: Hardware queue owning the context to be reset.
280 * Return: 0 on success, -errno on failure
282 static int context_reset_ioarrin(struct hwq
*hwq
)
284 return context_reset(hwq
, &hwq
->host_map
->ioarrin
);
288 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
289 * @hwq: Hardware queue owning the context to be reset.
291 * Return: 0 on success, -errno on failure
293 static int context_reset_sq(struct hwq
*hwq
)
295 return context_reset(hwq
, &hwq
->host_map
->sq_ctx_reset
);
299 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
300 * @afu: AFU associated with the host.
301 * @cmd: AFU command to send.
304 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
306 static int send_cmd_ioarrin(struct afu
*afu
, struct afu_cmd
*cmd
)
308 struct cxlflash_cfg
*cfg
= afu
->parent
;
309 struct device
*dev
= &cfg
->dev
->dev
;
310 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
316 * To avoid the performance penalty of MMIO, spread the update of
317 * 'room' over multiple commands.
319 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
320 if (--hwq
->room
< 0) {
321 room
= readq_be(&hwq
->host_map
->cmd_room
);
323 dev_dbg_ratelimited(dev
, "%s: no cmd_room to send "
324 "0x%02X, room=0x%016llX\n",
325 __func__
, cmd
->rcb
.cdb
[0], room
);
327 rc
= SCSI_MLQUEUE_HOST_BUSY
;
330 hwq
->room
= room
- 1;
333 list_add(&cmd
->list
, &hwq
->pending_cmds
);
334 writeq_be((u64
)&cmd
->rcb
, &hwq
->host_map
->ioarrin
);
336 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
337 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__
,
338 cmd
, cmd
->rcb
.data_len
, cmd
->rcb
.data_ea
, rc
);
343 * send_cmd_sq() - sends an AFU command via SQ ring
344 * @afu: AFU associated with the host.
345 * @cmd: AFU command to send.
348 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
350 static int send_cmd_sq(struct afu
*afu
, struct afu_cmd
*cmd
)
352 struct cxlflash_cfg
*cfg
= afu
->parent
;
353 struct device
*dev
= &cfg
->dev
->dev
;
354 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
359 newval
= atomic_dec_if_positive(&hwq
->hsq_credits
);
361 rc
= SCSI_MLQUEUE_HOST_BUSY
;
365 cmd
->rcb
.ioasa
= &cmd
->sa
;
367 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
369 *hwq
->hsq_curr
= cmd
->rcb
;
370 if (hwq
->hsq_curr
< hwq
->hsq_end
)
373 hwq
->hsq_curr
= hwq
->hsq_start
;
375 list_add(&cmd
->list
, &hwq
->pending_cmds
);
376 writeq_be((u64
)hwq
->hsq_curr
, &hwq
->host_map
->sq_tail
);
378 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
380 dev_dbg(dev
, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
381 "head=%016llx tail=%016llx\n", __func__
, cmd
, cmd
->rcb
.data_len
,
382 cmd
->rcb
.data_ea
, cmd
->rcb
.ioasa
, rc
, hwq
->hsq_curr
,
383 readq_be(&hwq
->host_map
->sq_head
),
384 readq_be(&hwq
->host_map
->sq_tail
));
389 * wait_resp() - polls for a response or timeout to a sent AFU command
390 * @afu: AFU associated with the host.
391 * @cmd: AFU command that was sent.
393 * Return: 0 on success, -errno on failure
395 static int wait_resp(struct afu
*afu
, struct afu_cmd
*cmd
)
397 struct cxlflash_cfg
*cfg
= afu
->parent
;
398 struct device
*dev
= &cfg
->dev
->dev
;
400 ulong timeout
= msecs_to_jiffies(cmd
->rcb
.timeout
* 2 * 1000);
402 timeout
= wait_for_completion_timeout(&cmd
->cevent
, timeout
);
406 if (cmd
->cmd_aborted
)
409 if (unlikely(cmd
->sa
.ioasc
!= 0)) {
410 dev_err(dev
, "%s: cmd %02x failed, ioasc=%08x\n",
411 __func__
, cmd
->rcb
.cdb
[0], cmd
->sa
.ioasc
);
419 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
420 * @host: SCSI host associated with device.
421 * @scp: SCSI command to send.
422 * @afu: SCSI command to send.
424 * Hashes a command based upon the hardware queue mode.
426 * Return: Trusted index of target hardware queue
428 static u32
cmd_to_target_hwq(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
,
434 if (afu
->num_hwqs
== 1)
437 switch (afu
->hwq_mode
) {
439 hwq
= afu
->hwq_rr_count
++ % afu
->num_hwqs
;
442 tag
= blk_mq_unique_tag(scp
->request
);
443 hwq
= blk_mq_unique_tag_to_hwq(tag
);
446 hwq
= smp_processor_id() % afu
->num_hwqs
;
456 * send_tmf() - sends a Task Management Function (TMF)
457 * @afu: AFU to checkout from.
458 * @scp: SCSI command from stack.
459 * @tmfcmd: TMF command to send.
462 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
464 static int send_tmf(struct afu
*afu
, struct scsi_cmnd
*scp
, u64 tmfcmd
)
466 struct Scsi_Host
*host
= scp
->device
->host
;
467 struct cxlflash_cfg
*cfg
= shost_priv(host
);
468 struct afu_cmd
*cmd
= sc_to_afucz(scp
);
469 struct device
*dev
= &cfg
->dev
->dev
;
470 int hwq_index
= cmd_to_target_hwq(host
, scp
, afu
);
471 struct hwq
*hwq
= get_hwq(afu
, hwq_index
);
476 /* When Task Management Function is active do not send another */
477 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
479 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
482 cfg
->tmf_active
= true;
483 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
488 cmd
->hwq_index
= hwq_index
;
490 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
491 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
492 cmd
->rcb
.port_sel
= CHAN2PORTMASK(scp
->device
->channel
);
493 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
494 cmd
->rcb
.req_flags
= (SISL_REQ_FLAGS_PORT_LUN_ID
|
495 SISL_REQ_FLAGS_SUP_UNDERRUN
|
496 SISL_REQ_FLAGS_TMF_CMD
);
497 memcpy(cmd
->rcb
.cdb
, &tmfcmd
, sizeof(tmfcmd
));
499 rc
= afu
->send_cmd(afu
, cmd
);
501 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
502 cfg
->tmf_active
= false;
503 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
507 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
508 to
= msecs_to_jiffies(5000);
509 to
= wait_event_interruptible_lock_irq_timeout(cfg
->tmf_waitq
,
514 cfg
->tmf_active
= false;
515 dev_err(dev
, "%s: TMF timed out\n", __func__
);
518 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
524 * cxlflash_driver_info() - information handler for this host driver
525 * @host: SCSI host associated with device.
527 * Return: A string describing the device.
529 static const char *cxlflash_driver_info(struct Scsi_Host
*host
)
531 return CXLFLASH_ADAPTER_NAME
;
535 * cxlflash_queuecommand() - sends a mid-layer request
536 * @host: SCSI host associated with device.
537 * @scp: SCSI command to send.
539 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
541 static int cxlflash_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*scp
)
543 struct cxlflash_cfg
*cfg
= shost_priv(host
);
544 struct afu
*afu
= cfg
->afu
;
545 struct device
*dev
= &cfg
->dev
->dev
;
546 struct afu_cmd
*cmd
= sc_to_afucz(scp
);
547 struct scatterlist
*sg
= scsi_sglist(scp
);
548 int hwq_index
= cmd_to_target_hwq(host
, scp
, afu
);
549 struct hwq
*hwq
= get_hwq(afu
, hwq_index
);
550 u16 req_flags
= SISL_REQ_FLAGS_SUP_UNDERRUN
;
554 dev_dbg_ratelimited(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
555 "cdb=(%08x-%08x-%08x-%08x)\n",
556 __func__
, scp
, host
->host_no
, scp
->device
->channel
,
557 scp
->device
->id
, scp
->device
->lun
,
558 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
559 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
560 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
561 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
564 * If a Task Management Function is active, wait for it to complete
565 * before continuing with regular commands.
567 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
568 if (cfg
->tmf_active
) {
569 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
570 rc
= SCSI_MLQUEUE_HOST_BUSY
;
573 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
575 switch (cfg
->state
) {
579 dev_dbg_ratelimited(dev
, "%s: device is in reset\n", __func__
);
580 rc
= SCSI_MLQUEUE_HOST_BUSY
;
583 dev_dbg_ratelimited(dev
, "%s: device has failed\n", __func__
);
584 scp
->result
= (DID_NO_CONNECT
<< 16);
593 cmd
->rcb
.data_len
= sg
->length
;
594 cmd
->rcb
.data_ea
= (uintptr_t)sg_virt(sg
);
599 cmd
->hwq_index
= hwq_index
;
601 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
602 cmd
->rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
603 cmd
->rcb
.port_sel
= CHAN2PORTMASK(scp
->device
->channel
);
604 cmd
->rcb
.lun_id
= lun_to_lunid(scp
->device
->lun
);
606 if (scp
->sc_data_direction
== DMA_TO_DEVICE
)
607 req_flags
|= SISL_REQ_FLAGS_HOST_WRITE
;
609 cmd
->rcb
.req_flags
= req_flags
;
610 memcpy(cmd
->rcb
.cdb
, scp
->cmnd
, sizeof(cmd
->rcb
.cdb
));
612 rc
= afu
->send_cmd(afu
, cmd
);
618 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
619 * @cfg: Internal structure associated with the host.
621 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg
*cfg
)
623 struct pci_dev
*pdev
= cfg
->dev
;
625 if (pci_channel_offline(pdev
))
626 wait_event_timeout(cfg
->reset_waitq
,
627 !pci_channel_offline(pdev
),
628 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT
);
632 * free_mem() - free memory associated with the AFU
633 * @cfg: Internal structure associated with the host.
635 static void free_mem(struct cxlflash_cfg
*cfg
)
637 struct afu
*afu
= cfg
->afu
;
640 free_pages((ulong
)afu
, get_order(sizeof(struct afu
)));
646 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
647 * @cfg: Internal structure associated with the host.
649 static void cxlflash_reset_sync(struct cxlflash_cfg
*cfg
)
651 if (cfg
->async_reset_cookie
== 0)
654 /* Wait until all async calls prior to this cookie have completed */
655 async_synchronize_cookie(cfg
->async_reset_cookie
+ 1);
656 cfg
->async_reset_cookie
= 0;
660 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
661 * @cfg: Internal structure associated with the host.
663 * Safe to call with AFU in a partially allocated/initialized state.
665 * Cancels scheduled worker threads, waits for any active internal AFU
666 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
668 static void stop_afu(struct cxlflash_cfg
*cfg
)
670 struct afu
*afu
= cfg
->afu
;
674 cancel_work_sync(&cfg
->work_q
);
675 if (!current_is_async())
676 cxlflash_reset_sync(cfg
);
679 while (atomic_read(&afu
->cmds_active
))
682 if (afu_is_irqpoll_enabled(afu
)) {
683 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
684 hwq
= get_hwq(afu
, i
);
686 irq_poll_disable(&hwq
->irqpoll
);
690 if (likely(afu
->afu_map
)) {
691 cxl_psa_unmap((void __iomem
*)afu
->afu_map
);
698 * term_intr() - disables all AFU interrupts
699 * @cfg: Internal structure associated with the host.
700 * @level: Depth of allocation, where to begin waterfall tear down.
701 * @index: Index of the hardware queue.
703 * Safe to call with AFU/MC in partially allocated/initialized state.
705 static void term_intr(struct cxlflash_cfg
*cfg
, enum undo_level level
,
708 struct afu
*afu
= cfg
->afu
;
709 struct device
*dev
= &cfg
->dev
->dev
;
713 dev_err(dev
, "%s: returning with NULL afu\n", __func__
);
717 hwq
= get_hwq(afu
, index
);
720 dev_err(dev
, "%s: returning with NULL MC\n", __func__
);
726 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
727 if (index
== PRIMARY_HWQ
)
728 cxl_unmap_afu_irq(hwq
->ctx
, 3, hwq
);
730 cxl_unmap_afu_irq(hwq
->ctx
, 2, hwq
);
732 cxl_unmap_afu_irq(hwq
->ctx
, 1, hwq
);
734 cxl_free_afu_irqs(hwq
->ctx
);
737 /* No action required */
743 * term_mc() - terminates the master context
744 * @cfg: Internal structure associated with the host.
745 * @index: Index of the hardware queue.
747 * Safe to call with AFU/MC in partially allocated/initialized state.
749 static void term_mc(struct cxlflash_cfg
*cfg
, u32 index
)
751 struct afu
*afu
= cfg
->afu
;
752 struct device
*dev
= &cfg
->dev
->dev
;
757 dev_err(dev
, "%s: returning with NULL afu\n", __func__
);
761 hwq
= get_hwq(afu
, index
);
764 dev_err(dev
, "%s: returning with NULL MC\n", __func__
);
768 WARN_ON(cxl_stop_context(hwq
->ctx
));
769 if (index
!= PRIMARY_HWQ
)
770 WARN_ON(cxl_release_context(hwq
->ctx
));
773 spin_lock_irqsave(&hwq
->hsq_slock
, lock_flags
);
774 flush_pending_cmds(hwq
);
775 spin_unlock_irqrestore(&hwq
->hsq_slock
, lock_flags
);
779 * term_afu() - terminates the AFU
780 * @cfg: Internal structure associated with the host.
782 * Safe to call with AFU/MC in partially allocated/initialized state.
784 static void term_afu(struct cxlflash_cfg
*cfg
)
786 struct device
*dev
= &cfg
->dev
->dev
;
790 * Tear down is carefully orchestrated to ensure
791 * no interrupts can come in when the problem state
794 * 1) Disable all AFU interrupts for each master
795 * 2) Unmap the problem state area
796 * 3) Stop each master context
798 for (k
= cfg
->afu
->num_hwqs
- 1; k
>= 0; k
--)
799 term_intr(cfg
, UNMAP_THREE
, k
);
804 for (k
= cfg
->afu
->num_hwqs
- 1; k
>= 0; k
--)
807 dev_dbg(dev
, "%s: returning\n", __func__
);
811 * notify_shutdown() - notifies device of pending shutdown
812 * @cfg: Internal structure associated with the host.
813 * @wait: Whether to wait for shutdown processing to complete.
815 * This function will notify the AFU that the adapter is being shutdown
816 * and will wait for shutdown processing to complete if wait is true.
817 * This notification should flush pending I/Os to the device and halt
818 * further I/Os until the next AFU reset is issued and device restarted.
820 static void notify_shutdown(struct cxlflash_cfg
*cfg
, bool wait
)
822 struct afu
*afu
= cfg
->afu
;
823 struct device
*dev
= &cfg
->dev
->dev
;
824 struct dev_dependent_vals
*ddv
;
825 __be64 __iomem
*fc_port_regs
;
827 int i
, retry_cnt
= 0;
829 ddv
= (struct dev_dependent_vals
*)cfg
->dev_id
->driver_data
;
830 if (!(ddv
->flags
& CXLFLASH_NOTIFY_SHUTDOWN
))
833 if (!afu
|| !afu
->afu_map
) {
834 dev_dbg(dev
, "%s: Problem state area not mapped\n", __func__
);
839 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
840 fc_port_regs
= get_fc_port_regs(cfg
, i
);
842 reg
= readq_be(&fc_port_regs
[FC_CONFIG2
/ 8]);
843 reg
|= SISL_FC_SHUTDOWN_NORMAL
;
844 writeq_be(reg
, &fc_port_regs
[FC_CONFIG2
/ 8]);
850 /* Wait up to 1.5 seconds for shutdown processing to complete */
851 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
852 fc_port_regs
= get_fc_port_regs(cfg
, i
);
856 status
= readq_be(&fc_port_regs
[FC_STATUS
/ 8]);
857 if (status
& SISL_STATUS_SHUTDOWN_COMPLETE
)
859 if (++retry_cnt
>= MC_RETRY_CNT
) {
860 dev_dbg(dev
, "%s: port %d shutdown processing "
861 "not yet completed\n", __func__
, i
);
864 msleep(100 * retry_cnt
);
870 * cxlflash_get_minor() - gets the first available minor number
872 * Return: Unique minor number that can be used to create the character device.
874 static int cxlflash_get_minor(void)
879 bit
= find_first_zero_bit(cxlflash_minor
, CXLFLASH_MAX_ADAPTERS
);
880 if (bit
>= CXLFLASH_MAX_ADAPTERS
)
883 minor
= bit
& MINORMASK
;
884 set_bit(minor
, cxlflash_minor
);
889 * cxlflash_put_minor() - releases the minor number
890 * @minor: Minor number that is no longer needed.
892 static void cxlflash_put_minor(int minor
)
894 clear_bit(minor
, cxlflash_minor
);
898 * cxlflash_release_chrdev() - release the character device for the host
899 * @cfg: Internal structure associated with the host.
901 static void cxlflash_release_chrdev(struct cxlflash_cfg
*cfg
)
903 put_device(cfg
->chardev
);
904 device_unregister(cfg
->chardev
);
906 cdev_del(&cfg
->cdev
);
907 cxlflash_put_minor(MINOR(cfg
->cdev
.dev
));
911 * cxlflash_remove() - PCI entry point to tear down host
912 * @pdev: PCI device associated with the host.
914 * Safe to use as a cleanup in partially allocated/initialized state. Note that
915 * the reset_waitq is flushed as part of the stop/termination of user contexts.
917 static void cxlflash_remove(struct pci_dev
*pdev
)
919 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
920 struct device
*dev
= &pdev
->dev
;
923 if (!pci_is_enabled(pdev
)) {
924 dev_dbg(dev
, "%s: Device is disabled\n", __func__
);
928 /* If a Task Management Function is active, wait for it to complete
929 * before continuing with remove.
931 spin_lock_irqsave(&cfg
->tmf_slock
, lock_flags
);
933 wait_event_interruptible_lock_irq(cfg
->tmf_waitq
,
936 spin_unlock_irqrestore(&cfg
->tmf_slock
, lock_flags
);
938 /* Notify AFU and wait for shutdown processing to complete */
939 notify_shutdown(cfg
, true);
941 cfg
->state
= STATE_FAILTERM
;
942 cxlflash_stop_term_user_contexts(cfg
);
944 switch (cfg
->init_state
) {
945 case INIT_STATE_CDEV
:
946 cxlflash_release_chrdev(cfg
);
947 case INIT_STATE_SCSI
:
948 cxlflash_term_local_luns(cfg
);
949 scsi_remove_host(cfg
->host
);
953 pci_disable_device(pdev
);
954 case INIT_STATE_NONE
:
956 scsi_host_put(cfg
->host
);
960 dev_dbg(dev
, "%s: returning\n", __func__
);
964 * alloc_mem() - allocates the AFU and its command pool
965 * @cfg: Internal structure associated with the host.
967 * A partially allocated state remains on failure.
971 * -ENOMEM on failure to allocate memory
973 static int alloc_mem(struct cxlflash_cfg
*cfg
)
976 struct device
*dev
= &cfg
->dev
->dev
;
978 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
979 cfg
->afu
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
980 get_order(sizeof(struct afu
)));
981 if (unlikely(!cfg
->afu
)) {
982 dev_err(dev
, "%s: cannot get %d free pages\n",
983 __func__
, get_order(sizeof(struct afu
)));
987 cfg
->afu
->parent
= cfg
;
988 cfg
->afu
->desired_hwqs
= CXLFLASH_DEF_HWQS
;
989 cfg
->afu
->afu_map
= NULL
;
995 * init_pci() - initializes the host as a PCI device
996 * @cfg: Internal structure associated with the host.
998 * Return: 0 on success, -errno on failure
1000 static int init_pci(struct cxlflash_cfg
*cfg
)
1002 struct pci_dev
*pdev
= cfg
->dev
;
1003 struct device
*dev
= &cfg
->dev
->dev
;
1006 rc
= pci_enable_device(pdev
);
1007 if (rc
|| pci_channel_offline(pdev
)) {
1008 if (pci_channel_offline(pdev
)) {
1009 cxlflash_wait_for_pci_err_recovery(cfg
);
1010 rc
= pci_enable_device(pdev
);
1014 dev_err(dev
, "%s: Cannot enable adapter\n", __func__
);
1015 cxlflash_wait_for_pci_err_recovery(cfg
);
1021 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1026 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1027 * @cfg: Internal structure associated with the host.
1029 * Return: 0 on success, -errno on failure
1031 static int init_scsi(struct cxlflash_cfg
*cfg
)
1033 struct pci_dev
*pdev
= cfg
->dev
;
1034 struct device
*dev
= &cfg
->dev
->dev
;
1037 rc
= scsi_add_host(cfg
->host
, &pdev
->dev
);
1039 dev_err(dev
, "%s: scsi_add_host failed rc=%d\n", __func__
, rc
);
1043 scsi_scan_host(cfg
->host
);
1046 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1051 * set_port_online() - transitions the specified host FC port to online state
1052 * @fc_regs: Top of MMIO region defined for specified port.
1054 * The provided MMIO region must be mapped prior to call. Online state means
1055 * that the FC link layer has synced, completed the handshaking process, and
1056 * is ready for login to start.
1058 static void set_port_online(__be64 __iomem
*fc_regs
)
1062 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1063 cmdcfg
&= (~FC_MTIP_CMDCONFIG_OFFLINE
); /* clear OFF_LINE */
1064 cmdcfg
|= (FC_MTIP_CMDCONFIG_ONLINE
); /* set ON_LINE */
1065 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1069 * set_port_offline() - transitions the specified host FC port to offline state
1070 * @fc_regs: Top of MMIO region defined for specified port.
1072 * The provided MMIO region must be mapped prior to call.
1074 static void set_port_offline(__be64 __iomem
*fc_regs
)
1078 cmdcfg
= readq_be(&fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1079 cmdcfg
&= (~FC_MTIP_CMDCONFIG_ONLINE
); /* clear ON_LINE */
1080 cmdcfg
|= (FC_MTIP_CMDCONFIG_OFFLINE
); /* set OFF_LINE */
1081 writeq_be(cmdcfg
, &fc_regs
[FC_MTIP_CMDCONFIG
/ 8]);
1085 * wait_port_online() - waits for the specified host FC port come online
1086 * @fc_regs: Top of MMIO region defined for specified port.
1087 * @delay_us: Number of microseconds to delay between reading port status.
1088 * @nretry: Number of cycles to retry reading port status.
1090 * The provided MMIO region must be mapped prior to call. This will timeout
1091 * when the cable is not plugged in.
1094 * TRUE (1) when the specified port is online
1095 * FALSE (0) when the specified port fails to come online after timeout
1097 static bool wait_port_online(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
1101 WARN_ON(delay_us
< 1000);
1104 msleep(delay_us
/ 1000);
1105 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
1106 if (status
== U64_MAX
)
1108 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_ONLINE
&&
1111 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_ONLINE
);
1115 * wait_port_offline() - waits for the specified host FC port go offline
1116 * @fc_regs: Top of MMIO region defined for specified port.
1117 * @delay_us: Number of microseconds to delay between reading port status.
1118 * @nretry: Number of cycles to retry reading port status.
1120 * The provided MMIO region must be mapped prior to call.
1123 * TRUE (1) when the specified port is offline
1124 * FALSE (0) when the specified port fails to go offline after timeout
1126 static bool wait_port_offline(__be64 __iomem
*fc_regs
, u32 delay_us
, u32 nretry
)
1130 WARN_ON(delay_us
< 1000);
1133 msleep(delay_us
/ 1000);
1134 status
= readq_be(&fc_regs
[FC_MTIP_STATUS
/ 8]);
1135 if (status
== U64_MAX
)
1137 } while ((status
& FC_MTIP_STATUS_MASK
) != FC_MTIP_STATUS_OFFLINE
&&
1140 return ((status
& FC_MTIP_STATUS_MASK
) == FC_MTIP_STATUS_OFFLINE
);
1144 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1145 * @afu: AFU associated with the host that owns the specified FC port.
1146 * @port: Port number being configured.
1147 * @fc_regs: Top of MMIO region defined for specified port.
1148 * @wwpn: The world-wide-port-number previously discovered for port.
1150 * The provided MMIO region must be mapped prior to call. As part of the
1151 * sequence to configure the WWPN, the port is toggled offline and then back
1152 * online. This toggling action can cause this routine to delay up to a few
1153 * seconds. When configured to use the internal LUN feature of the AFU, a
1154 * failure to come online is overridden.
1156 static void afu_set_wwpn(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
,
1159 struct cxlflash_cfg
*cfg
= afu
->parent
;
1160 struct device
*dev
= &cfg
->dev
->dev
;
1162 set_port_offline(fc_regs
);
1163 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1164 FC_PORT_STATUS_RETRY_CNT
)) {
1165 dev_dbg(dev
, "%s: wait on port %d to go offline timed out\n",
1169 writeq_be(wwpn
, &fc_regs
[FC_PNAME
/ 8]);
1171 set_port_online(fc_regs
);
1172 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1173 FC_PORT_STATUS_RETRY_CNT
)) {
1174 dev_dbg(dev
, "%s: wait on port %d to go online timed out\n",
1180 * afu_link_reset() - resets the specified host FC port
1181 * @afu: AFU associated with the host that owns the specified FC port.
1182 * @port: Port number being configured.
1183 * @fc_regs: Top of MMIO region defined for specified port.
1185 * The provided MMIO region must be mapped prior to call. The sequence to
1186 * reset the port involves toggling it offline and then back online. This
1187 * action can cause this routine to delay up to a few seconds. An effort
1188 * is made to maintain link with the device by switching to host to use
1189 * the alternate port exclusively while the reset takes place.
1190 * failure to come online is overridden.
1192 static void afu_link_reset(struct afu
*afu
, int port
, __be64 __iomem
*fc_regs
)
1194 struct cxlflash_cfg
*cfg
= afu
->parent
;
1195 struct device
*dev
= &cfg
->dev
->dev
;
1198 /* first switch the AFU to the other links, if any */
1199 port_sel
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
1200 port_sel
&= ~(1ULL << port
);
1201 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1202 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1204 set_port_offline(fc_regs
);
1205 if (!wait_port_offline(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1206 FC_PORT_STATUS_RETRY_CNT
))
1207 dev_err(dev
, "%s: wait on port %d to go offline timed out\n",
1210 set_port_online(fc_regs
);
1211 if (!wait_port_online(fc_regs
, FC_PORT_STATUS_RETRY_INTERVAL_US
,
1212 FC_PORT_STATUS_RETRY_CNT
))
1213 dev_err(dev
, "%s: wait on port %d to go online timed out\n",
1216 /* switch back to include this port */
1217 port_sel
|= (1ULL << port
);
1218 writeq_be(port_sel
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1219 cxlflash_afu_sync(afu
, 0, 0, AFU_GSYNC
);
1221 dev_dbg(dev
, "%s: returning port_sel=%016llx\n", __func__
, port_sel
);
1225 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1226 * @afu: AFU associated with the host.
1228 static void afu_err_intr_init(struct afu
*afu
)
1230 struct cxlflash_cfg
*cfg
= afu
->parent
;
1231 __be64 __iomem
*fc_port_regs
;
1233 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
1236 /* global async interrupts: AFU clears afu_ctrl on context exit
1237 * if async interrupts were sent to that context. This prevents
1238 * the AFU form sending further async interrupts when
1240 * nobody to receive them.
1244 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_mask
);
1245 /* set LISN# to send and point to primary master context */
1246 reg
= ((u64
) (((hwq
->ctx_hndl
<< 8) | SISL_MSI_ASYNC_ERROR
)) << 40);
1248 if (afu
->internal_lun
)
1249 reg
|= 1; /* Bit 63 indicates local lun */
1250 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_ctrl
);
1252 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1253 /* unmask bits that are of interest */
1254 /* note: afu can send an interrupt after this step */
1255 writeq_be(SISL_ASTATUS_MASK
, &afu
->afu_map
->global
.regs
.aintr_mask
);
1256 /* clear again in case a bit came on after previous clear but before */
1258 writeq_be(-1ULL, &afu
->afu_map
->global
.regs
.aintr_clear
);
1260 /* Clear/Set internal lun bits */
1261 fc_port_regs
= get_fc_port_regs(cfg
, 0);
1262 reg
= readq_be(&fc_port_regs
[FC_CONFIG2
/ 8]);
1263 reg
&= SISL_FC_INTERNAL_MASK
;
1264 if (afu
->internal_lun
)
1265 reg
|= ((u64
)(afu
->internal_lun
- 1) << SISL_FC_INTERNAL_SHIFT
);
1266 writeq_be(reg
, &fc_port_regs
[FC_CONFIG2
/ 8]);
1268 /* now clear FC errors */
1269 for (i
= 0; i
< cfg
->num_fc_ports
; i
++) {
1270 fc_port_regs
= get_fc_port_regs(cfg
, i
);
1272 writeq_be(0xFFFFFFFFU
, &fc_port_regs
[FC_ERROR
/ 8]);
1273 writeq_be(0, &fc_port_regs
[FC_ERRCAP
/ 8]);
1276 /* sync interrupts for master's IOARRIN write */
1277 /* note that unlike asyncs, there can be no pending sync interrupts */
1278 /* at this time (this is a fresh context and master has not written */
1279 /* IOARRIN yet), so there is nothing to clear. */
1281 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1282 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1283 hwq
= get_hwq(afu
, i
);
1285 writeq_be(SISL_MSI_SYNC_ERROR
, &hwq
->host_map
->ctx_ctrl
);
1286 writeq_be(SISL_ISTATUS_MASK
, &hwq
->host_map
->intr_mask
);
1291 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1292 * @irq: Interrupt number.
1293 * @data: Private data provided at interrupt registration, the AFU.
1295 * Return: Always return IRQ_HANDLED.
1297 static irqreturn_t
cxlflash_sync_err_irq(int irq
, void *data
)
1299 struct hwq
*hwq
= (struct hwq
*)data
;
1300 struct cxlflash_cfg
*cfg
= hwq
->afu
->parent
;
1301 struct device
*dev
= &cfg
->dev
->dev
;
1305 reg
= readq_be(&hwq
->host_map
->intr_status
);
1306 reg_unmasked
= (reg
& SISL_ISTATUS_UNMASK
);
1308 if (reg_unmasked
== 0UL) {
1309 dev_err(dev
, "%s: spurious interrupt, intr_status=%016llx\n",
1311 goto cxlflash_sync_err_irq_exit
;
1314 dev_err(dev
, "%s: unexpected interrupt, intr_status=%016llx\n",
1317 writeq_be(reg_unmasked
, &hwq
->host_map
->intr_clear
);
1319 cxlflash_sync_err_irq_exit
:
1324 * process_hrrq() - process the read-response queue
1325 * @afu: AFU associated with the host.
1326 * @doneq: Queue of commands harvested from the RRQ.
1327 * @budget: Threshold of RRQ entries to process.
1329 * This routine must be called holding the disabled RRQ spin lock.
1331 * Return: The number of entries processed.
1333 static int process_hrrq(struct hwq
*hwq
, struct list_head
*doneq
, int budget
)
1335 struct afu
*afu
= hwq
->afu
;
1336 struct afu_cmd
*cmd
;
1337 struct sisl_ioasa
*ioasa
;
1338 struct sisl_ioarcb
*ioarcb
;
1339 bool toggle
= hwq
->toggle
;
1342 *hrrq_start
= hwq
->hrrq_start
,
1343 *hrrq_end
= hwq
->hrrq_end
,
1344 *hrrq_curr
= hwq
->hrrq_curr
;
1346 /* Process ready RRQ entries up to the specified budget (if any) */
1350 if ((entry
& SISL_RESP_HANDLE_T_BIT
) != toggle
)
1353 entry
&= ~SISL_RESP_HANDLE_T_BIT
;
1355 if (afu_is_sq_cmd_mode(afu
)) {
1356 ioasa
= (struct sisl_ioasa
*)entry
;
1357 cmd
= container_of(ioasa
, struct afu_cmd
, sa
);
1359 ioarcb
= (struct sisl_ioarcb
*)entry
;
1360 cmd
= container_of(ioarcb
, struct afu_cmd
, rcb
);
1363 list_add_tail(&cmd
->queue
, doneq
);
1365 /* Advance to next entry or wrap and flip the toggle bit */
1366 if (hrrq_curr
< hrrq_end
)
1369 hrrq_curr
= hrrq_start
;
1370 toggle
^= SISL_RESP_HANDLE_T_BIT
;
1373 atomic_inc(&hwq
->hsq_credits
);
1376 if (budget
> 0 && num_hrrq
>= budget
)
1380 hwq
->hrrq_curr
= hrrq_curr
;
1381 hwq
->toggle
= toggle
;
1387 * process_cmd_doneq() - process a queue of harvested RRQ commands
1388 * @doneq: Queue of completed commands.
1390 * Note that upon return the queue can no longer be trusted.
1392 static void process_cmd_doneq(struct list_head
*doneq
)
1394 struct afu_cmd
*cmd
, *tmp
;
1396 WARN_ON(list_empty(doneq
));
1398 list_for_each_entry_safe(cmd
, tmp
, doneq
, queue
)
1403 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1404 * @irqpoll: IRQ poll structure associated with queue to poll.
1405 * @budget: Threshold of RRQ entries to process per poll.
1407 * Return: The number of entries processed.
1409 static int cxlflash_irqpoll(struct irq_poll
*irqpoll
, int budget
)
1411 struct hwq
*hwq
= container_of(irqpoll
, struct hwq
, irqpoll
);
1412 unsigned long hrrq_flags
;
1414 int num_entries
= 0;
1416 spin_lock_irqsave(&hwq
->hrrq_slock
, hrrq_flags
);
1418 num_entries
= process_hrrq(hwq
, &doneq
, budget
);
1419 if (num_entries
< budget
)
1420 irq_poll_complete(irqpoll
);
1422 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1424 process_cmd_doneq(&doneq
);
1429 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1430 * @irq: Interrupt number.
1431 * @data: Private data provided at interrupt registration, the AFU.
1433 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1435 static irqreturn_t
cxlflash_rrq_irq(int irq
, void *data
)
1437 struct hwq
*hwq
= (struct hwq
*)data
;
1438 struct afu
*afu
= hwq
->afu
;
1439 unsigned long hrrq_flags
;
1441 int num_entries
= 0;
1443 spin_lock_irqsave(&hwq
->hrrq_slock
, hrrq_flags
);
1445 if (afu_is_irqpoll_enabled(afu
)) {
1446 irq_poll_sched(&hwq
->irqpoll
);
1447 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1451 num_entries
= process_hrrq(hwq
, &doneq
, -1);
1452 spin_unlock_irqrestore(&hwq
->hrrq_slock
, hrrq_flags
);
1454 if (num_entries
== 0)
1457 process_cmd_doneq(&doneq
);
1462 * Asynchronous interrupt information table
1465 * - Order matters here as this array is indexed by bit position.
1467 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1468 * as complex and complains due to a lack of parentheses/braces.
1470 #define ASTATUS_FC(_a, _b, _c, _d) \
1471 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1473 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1474 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1475 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1476 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1477 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1478 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1479 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1480 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1481 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1483 static const struct asyc_intr_info ainfo
[] = {
1484 BUILD_SISL_ASTATUS_FC_PORT(1),
1485 BUILD_SISL_ASTATUS_FC_PORT(0),
1486 BUILD_SISL_ASTATUS_FC_PORT(3),
1487 BUILD_SISL_ASTATUS_FC_PORT(2)
1491 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1492 * @irq: Interrupt number.
1493 * @data: Private data provided at interrupt registration, the AFU.
1495 * Return: Always return IRQ_HANDLED.
1497 static irqreturn_t
cxlflash_async_err_irq(int irq
, void *data
)
1499 struct hwq
*hwq
= (struct hwq
*)data
;
1500 struct afu
*afu
= hwq
->afu
;
1501 struct cxlflash_cfg
*cfg
= afu
->parent
;
1502 struct device
*dev
= &cfg
->dev
->dev
;
1503 const struct asyc_intr_info
*info
;
1504 struct sisl_global_map __iomem
*global
= &afu
->afu_map
->global
;
1505 __be64 __iomem
*fc_port_regs
;
1511 reg
= readq_be(&global
->regs
.aintr_status
);
1512 reg_unmasked
= (reg
& SISL_ASTATUS_UNMASK
);
1514 if (unlikely(reg_unmasked
== 0)) {
1515 dev_err(dev
, "%s: spurious interrupt, aintr_status=%016llx\n",
1520 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1521 writeq_be(reg_unmasked
, &global
->regs
.aintr_clear
);
1523 /* Check each bit that is on */
1524 for_each_set_bit(bit
, (ulong
*)®_unmasked
, BITS_PER_LONG
) {
1525 if (unlikely(bit
>= ARRAY_SIZE(ainfo
))) {
1531 if (unlikely(info
->status
!= 1ULL << bit
)) {
1537 fc_port_regs
= get_fc_port_regs(cfg
, port
);
1539 dev_err(dev
, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1540 __func__
, port
, info
->desc
,
1541 readq_be(&fc_port_regs
[FC_STATUS
/ 8]));
1544 * Do link reset first, some OTHER errors will set FC_ERROR
1545 * again if cleared before or w/o a reset
1547 if (info
->action
& LINK_RESET
) {
1548 dev_err(dev
, "%s: FC Port %d: resetting link\n",
1550 cfg
->lr_state
= LINK_RESET_REQUIRED
;
1551 cfg
->lr_port
= port
;
1552 schedule_work(&cfg
->work_q
);
1555 if (info
->action
& CLR_FC_ERROR
) {
1556 reg
= readq_be(&fc_port_regs
[FC_ERROR
/ 8]);
1559 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1560 * should be the same and tracing one is sufficient.
1563 dev_err(dev
, "%s: fc %d: clearing fc_error=%016llx\n",
1564 __func__
, port
, reg
);
1566 writeq_be(reg
, &fc_port_regs
[FC_ERROR
/ 8]);
1567 writeq_be(0, &fc_port_regs
[FC_ERRCAP
/ 8]);
1570 if (info
->action
& SCAN_HOST
) {
1571 atomic_inc(&cfg
->scan_host_needed
);
1572 schedule_work(&cfg
->work_q
);
1581 * start_context() - starts the master context
1582 * @cfg: Internal structure associated with the host.
1583 * @index: Index of the hardware queue.
1585 * Return: A success or failure value from CXL services.
1587 static int start_context(struct cxlflash_cfg
*cfg
, u32 index
)
1589 struct device
*dev
= &cfg
->dev
->dev
;
1590 struct hwq
*hwq
= get_hwq(cfg
->afu
, index
);
1593 rc
= cxl_start_context(hwq
->ctx
,
1594 hwq
->work
.work_element_descriptor
,
1597 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1602 * read_vpd() - obtains the WWPNs from VPD
1603 * @cfg: Internal structure associated with the host.
1604 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1606 * Return: 0 on success, -errno on failure
1608 static int read_vpd(struct cxlflash_cfg
*cfg
, u64 wwpn
[])
1610 struct device
*dev
= &cfg
->dev
->dev
;
1611 struct pci_dev
*pdev
= cfg
->dev
;
1613 int ro_start
, ro_size
, i
, j
, k
;
1615 char vpd_data
[CXLFLASH_VPD_LEN
];
1616 char tmp_buf
[WWPN_BUF_LEN
] = { 0 };
1617 char *wwpn_vpd_tags
[MAX_FC_PORTS
] = { "V5", "V6", "V7", "V8" };
1619 /* Get the VPD data from the device */
1620 vpd_size
= cxl_read_adapter_vpd(pdev
, vpd_data
, sizeof(vpd_data
));
1621 if (unlikely(vpd_size
<= 0)) {
1622 dev_err(dev
, "%s: Unable to read VPD (size = %ld)\n",
1623 __func__
, vpd_size
);
1628 /* Get the read only section offset */
1629 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
,
1630 PCI_VPD_LRDT_RO_DATA
);
1631 if (unlikely(ro_start
< 0)) {
1632 dev_err(dev
, "%s: VPD Read-only data not found\n", __func__
);
1637 /* Get the read only section size, cap when extends beyond read VPD */
1638 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
1640 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1641 if (unlikely((i
+ j
) > vpd_size
)) {
1642 dev_dbg(dev
, "%s: Might need to read more VPD (%d > %ld)\n",
1643 __func__
, (i
+ j
), vpd_size
);
1644 ro_size
= vpd_size
- i
;
1648 * Find the offset of the WWPN tag within the read only
1649 * VPD data and validate the found field (partials are
1650 * no good to us). Convert the ASCII data to an integer
1651 * value. Note that we must copy to a temporary buffer
1652 * because the conversion service requires that the ASCII
1653 * string be terminated.
1655 for (k
= 0; k
< cfg
->num_fc_ports
; k
++) {
1657 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1659 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, wwpn_vpd_tags
[k
]);
1660 if (unlikely(i
< 0)) {
1661 dev_err(dev
, "%s: Port %d WWPN not found in VPD\n",
1667 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
1668 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
1669 if (unlikely((i
+ j
> vpd_size
) || (j
!= WWPN_LEN
))) {
1670 dev_err(dev
, "%s: Port %d WWPN incomplete or bad VPD\n",
1676 memcpy(tmp_buf
, &vpd_data
[i
], WWPN_LEN
);
1677 rc
= kstrtoul(tmp_buf
, WWPN_LEN
, (ulong
*)&wwpn
[k
]);
1679 dev_err(dev
, "%s: WWPN conversion failed for port %d\n",
1685 dev_dbg(dev
, "%s: wwpn%d=%016llx\n", __func__
, k
, wwpn
[k
]);
1689 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1694 * init_pcr() - initialize the provisioning and control registers
1695 * @cfg: Internal structure associated with the host.
1697 * Also sets up fast access to the mapped registers and initializes AFU
1698 * command fields that never change.
1700 static void init_pcr(struct cxlflash_cfg
*cfg
)
1702 struct afu
*afu
= cfg
->afu
;
1703 struct sisl_ctrl_map __iomem
*ctrl_map
;
1707 for (i
= 0; i
< MAX_CONTEXT
; i
++) {
1708 ctrl_map
= &afu
->afu_map
->ctrls
[i
].ctrl
;
1709 /* Disrupt any clients that could be running */
1710 /* e.g. clients that survived a master restart */
1711 writeq_be(0, &ctrl_map
->rht_start
);
1712 writeq_be(0, &ctrl_map
->rht_cnt_id
);
1713 writeq_be(0, &ctrl_map
->ctx_cap
);
1716 /* Copy frequently used fields into hwq */
1717 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1718 hwq
= get_hwq(afu
, i
);
1720 hwq
->ctx_hndl
= (u16
) cxl_process_element(hwq
->ctx
);
1721 hwq
->host_map
= &afu
->afu_map
->hosts
[hwq
->ctx_hndl
].host
;
1722 hwq
->ctrl_map
= &afu
->afu_map
->ctrls
[hwq
->ctx_hndl
].ctrl
;
1724 /* Program the Endian Control for the master context */
1725 writeq_be(SISL_ENDIAN_CTRL
, &hwq
->host_map
->endian_ctrl
);
1730 * init_global() - initialize AFU global registers
1731 * @cfg: Internal structure associated with the host.
1733 static int init_global(struct cxlflash_cfg
*cfg
)
1735 struct afu
*afu
= cfg
->afu
;
1736 struct device
*dev
= &cfg
->dev
->dev
;
1738 struct sisl_host_map __iomem
*hmap
;
1739 __be64 __iomem
*fc_port_regs
;
1740 u64 wwpn
[MAX_FC_PORTS
]; /* wwpn of AFU ports */
1741 int i
= 0, num_ports
= 0;
1745 rc
= read_vpd(cfg
, &wwpn
[0]);
1747 dev_err(dev
, "%s: could not read vpd rc=%d\n", __func__
, rc
);
1751 /* Set up RRQ and SQ in HWQ for master issued cmds */
1752 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1753 hwq
= get_hwq(afu
, i
);
1754 hmap
= hwq
->host_map
;
1756 writeq_be((u64
) hwq
->hrrq_start
, &hmap
->rrq_start
);
1757 writeq_be((u64
) hwq
->hrrq_end
, &hmap
->rrq_end
);
1759 if (afu_is_sq_cmd_mode(afu
)) {
1760 writeq_be((u64
)hwq
->hsq_start
, &hmap
->sq_start
);
1761 writeq_be((u64
)hwq
->hsq_end
, &hmap
->sq_end
);
1765 /* AFU configuration */
1766 reg
= readq_be(&afu
->afu_map
->global
.regs
.afu_config
);
1767 reg
|= SISL_AFUCONF_AR_ALL
|SISL_AFUCONF_ENDIAN
;
1768 /* enable all auto retry options and control endianness */
1769 /* leave others at default: */
1770 /* CTX_CAP write protected, mbox_r does not clear on read and */
1771 /* checker on if dual afu */
1772 writeq_be(reg
, &afu
->afu_map
->global
.regs
.afu_config
);
1774 /* Global port select: select either port */
1775 if (afu
->internal_lun
) {
1776 /* Only use port 0 */
1777 writeq_be(PORT0
, &afu
->afu_map
->global
.regs
.afu_port_sel
);
1780 writeq_be(PORT_MASK(cfg
->num_fc_ports
),
1781 &afu
->afu_map
->global
.regs
.afu_port_sel
);
1782 num_ports
= cfg
->num_fc_ports
;
1785 for (i
= 0; i
< num_ports
; i
++) {
1786 fc_port_regs
= get_fc_port_regs(cfg
, i
);
1788 /* Unmask all errors (but they are still masked at AFU) */
1789 writeq_be(0, &fc_port_regs
[FC_ERRMSK
/ 8]);
1790 /* Clear CRC error cnt & set a threshold */
1791 (void)readq_be(&fc_port_regs
[FC_CNT_CRCERR
/ 8]);
1792 writeq_be(MC_CRC_THRESH
, &fc_port_regs
[FC_CRC_THRESH
/ 8]);
1794 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1796 afu_set_wwpn(afu
, i
, &fc_port_regs
[0], wwpn
[i
]);
1797 /* Programming WWPN back to back causes additional
1798 * offline/online transitions and a PLOGI
1803 /* Set up master's own CTX_CAP to allow real mode, host translation */
1804 /* tables, afu cmds and read/write GSCSI cmds. */
1805 /* First, unlock ctx_cap write by reading mbox */
1806 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1807 hwq
= get_hwq(afu
, i
);
1809 (void)readq_be(&hwq
->ctrl_map
->mbox_r
); /* unlock ctx_cap */
1810 writeq_be((SISL_CTX_CAP_REAL_MODE
| SISL_CTX_CAP_HOST_XLATE
|
1811 SISL_CTX_CAP_READ_CMD
| SISL_CTX_CAP_WRITE_CMD
|
1812 SISL_CTX_CAP_AFU_CMD
| SISL_CTX_CAP_GSCSI_CMD
),
1813 &hwq
->ctrl_map
->ctx_cap
);
1815 /* Initialize heartbeat */
1816 afu
->hb
= readq_be(&afu
->afu_map
->global
.regs
.afu_hb
);
1822 * start_afu() - initializes and starts the AFU
1823 * @cfg: Internal structure associated with the host.
1825 static int start_afu(struct cxlflash_cfg
*cfg
)
1827 struct afu
*afu
= cfg
->afu
;
1828 struct device
*dev
= &cfg
->dev
->dev
;
1835 /* Initialize each HWQ */
1836 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
1837 hwq
= get_hwq(afu
, i
);
1839 /* After an AFU reset, RRQ entries are stale, clear them */
1840 memset(&hwq
->rrq_entry
, 0, sizeof(hwq
->rrq_entry
));
1842 /* Initialize RRQ pointers */
1843 hwq
->hrrq_start
= &hwq
->rrq_entry
[0];
1844 hwq
->hrrq_end
= &hwq
->rrq_entry
[NUM_RRQ_ENTRY
- 1];
1845 hwq
->hrrq_curr
= hwq
->hrrq_start
;
1848 /* Initialize spin locks */
1849 spin_lock_init(&hwq
->hrrq_slock
);
1850 spin_lock_init(&hwq
->hsq_slock
);
1853 if (afu_is_sq_cmd_mode(afu
)) {
1854 memset(&hwq
->sq
, 0, sizeof(hwq
->sq
));
1855 hwq
->hsq_start
= &hwq
->sq
[0];
1856 hwq
->hsq_end
= &hwq
->sq
[NUM_SQ_ENTRY
- 1];
1857 hwq
->hsq_curr
= hwq
->hsq_start
;
1859 atomic_set(&hwq
->hsq_credits
, NUM_SQ_ENTRY
- 1);
1862 /* Initialize IRQ poll */
1863 if (afu_is_irqpoll_enabled(afu
))
1864 irq_poll_init(&hwq
->irqpoll
, afu
->irqpoll_weight
,
1869 rc
= init_global(cfg
);
1871 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1876 * init_intr() - setup interrupt handlers for the master context
1877 * @cfg: Internal structure associated with the host.
1878 * @hwq: Hardware queue to initialize.
1880 * Return: 0 on success, -errno on failure
1882 static enum undo_level
init_intr(struct cxlflash_cfg
*cfg
,
1885 struct device
*dev
= &cfg
->dev
->dev
;
1886 struct cxl_context
*ctx
= hwq
->ctx
;
1888 enum undo_level level
= UNDO_NOOP
;
1889 bool is_primary_hwq
= (hwq
->index
== PRIMARY_HWQ
);
1890 int num_irqs
= is_primary_hwq
? 3 : 2;
1892 rc
= cxl_allocate_afu_irqs(ctx
, num_irqs
);
1894 dev_err(dev
, "%s: allocate_afu_irqs failed rc=%d\n",
1900 rc
= cxl_map_afu_irq(ctx
, 1, cxlflash_sync_err_irq
, hwq
,
1901 "SISL_MSI_SYNC_ERROR");
1902 if (unlikely(rc
<= 0)) {
1903 dev_err(dev
, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__
);
1908 rc
= cxl_map_afu_irq(ctx
, 2, cxlflash_rrq_irq
, hwq
,
1909 "SISL_MSI_RRQ_UPDATED");
1910 if (unlikely(rc
<= 0)) {
1911 dev_err(dev
, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__
);
1916 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1917 if (!is_primary_hwq
)
1920 rc
= cxl_map_afu_irq(ctx
, 3, cxlflash_async_err_irq
, hwq
,
1921 "SISL_MSI_ASYNC_ERROR");
1922 if (unlikely(rc
<= 0)) {
1923 dev_err(dev
, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__
);
1932 * init_mc() - create and register as the master context
1933 * @cfg: Internal structure associated with the host.
1934 * index: HWQ Index of the master context.
1936 * Return: 0 on success, -errno on failure
1938 static int init_mc(struct cxlflash_cfg
*cfg
, u32 index
)
1940 struct cxl_context
*ctx
;
1941 struct device
*dev
= &cfg
->dev
->dev
;
1942 struct hwq
*hwq
= get_hwq(cfg
->afu
, index
);
1944 enum undo_level level
;
1946 hwq
->afu
= cfg
->afu
;
1948 INIT_LIST_HEAD(&hwq
->pending_cmds
);
1950 if (index
== PRIMARY_HWQ
)
1951 ctx
= cxl_get_context(cfg
->dev
);
1953 ctx
= cxl_dev_context_init(cfg
->dev
);
1954 if (unlikely(!ctx
)) {
1962 /* Set it up as a master with the CXL */
1963 cxl_set_master(ctx
);
1965 /* Reset AFU when initializing primary context */
1966 if (index
== PRIMARY_HWQ
) {
1967 rc
= cxl_afu_reset(ctx
);
1969 dev_err(dev
, "%s: AFU reset failed rc=%d\n",
1975 level
= init_intr(cfg
, hwq
);
1976 if (unlikely(level
)) {
1977 dev_err(dev
, "%s: interrupt init failed rc=%d\n", __func__
, rc
);
1981 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1982 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1983 * element (pe) that is embedded in the context (ctx)
1985 rc
= start_context(cfg
, index
);
1987 dev_err(dev
, "%s: start context failed rc=%d\n", __func__
, rc
);
1988 level
= UNMAP_THREE
;
1993 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
1996 term_intr(cfg
, level
, index
);
1997 if (index
!= PRIMARY_HWQ
)
1998 cxl_release_context(ctx
);
2005 * get_num_afu_ports() - determines and configures the number of AFU ports
2006 * @cfg: Internal structure associated with the host.
2008 * This routine determines the number of AFU ports by converting the global
2009 * port selection mask. The converted value is only valid following an AFU
2010 * reset (explicit or power-on). This routine must be invoked shortly after
2011 * mapping as other routines are dependent on the number of ports during the
2012 * initialization sequence.
2014 * To support legacy AFUs that might not have reflected an initial global
2015 * port mask (value read is 0), default to the number of ports originally
2016 * supported by the cxlflash driver (2) before hardware with other port
2017 * offerings was introduced.
2019 static void get_num_afu_ports(struct cxlflash_cfg
*cfg
)
2021 struct afu
*afu
= cfg
->afu
;
2022 struct device
*dev
= &cfg
->dev
->dev
;
2024 int num_fc_ports
= LEGACY_FC_PORTS
;
2026 port_mask
= readq_be(&afu
->afu_map
->global
.regs
.afu_port_sel
);
2027 if (port_mask
!= 0ULL)
2028 num_fc_ports
= min(ilog2(port_mask
) + 1, MAX_FC_PORTS
);
2030 dev_dbg(dev
, "%s: port_mask=%016llx num_fc_ports=%d\n",
2031 __func__
, port_mask
, num_fc_ports
);
2033 cfg
->num_fc_ports
= num_fc_ports
;
2034 cfg
->host
->max_channel
= PORTNUM2CHAN(num_fc_ports
);
2038 * init_afu() - setup as master context and start AFU
2039 * @cfg: Internal structure associated with the host.
2041 * This routine is a higher level of control for configuring the
2042 * AFU on probe and reset paths.
2044 * Return: 0 on success, -errno on failure
2046 static int init_afu(struct cxlflash_cfg
*cfg
)
2050 struct afu
*afu
= cfg
->afu
;
2051 struct device
*dev
= &cfg
->dev
->dev
;
2055 cxl_perst_reloads_same_image(cfg
->cxl_afu
, true);
2057 afu
->num_hwqs
= afu
->desired_hwqs
;
2058 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2059 rc
= init_mc(cfg
, i
);
2061 dev_err(dev
, "%s: init_mc failed rc=%d index=%d\n",
2067 /* Map the entire MMIO space of the AFU using the first context */
2068 hwq
= get_hwq(afu
, PRIMARY_HWQ
);
2069 afu
->afu_map
= cxl_psa_map(hwq
->ctx
);
2070 if (!afu
->afu_map
) {
2071 dev_err(dev
, "%s: cxl_psa_map failed\n", __func__
);
2076 /* No byte reverse on reading afu_version or string will be backwards */
2077 reg
= readq(&afu
->afu_map
->global
.regs
.afu_version
);
2078 memcpy(afu
->version
, ®
, sizeof(reg
));
2079 afu
->interface_version
=
2080 readq_be(&afu
->afu_map
->global
.regs
.interface_version
);
2081 if ((afu
->interface_version
+ 1) == 0) {
2082 dev_err(dev
, "Back level AFU, please upgrade. AFU version %s "
2083 "interface version %016llx\n", afu
->version
,
2084 afu
->interface_version
);
2089 if (afu_is_sq_cmd_mode(afu
)) {
2090 afu
->send_cmd
= send_cmd_sq
;
2091 afu
->context_reset
= context_reset_sq
;
2093 afu
->send_cmd
= send_cmd_ioarrin
;
2094 afu
->context_reset
= context_reset_ioarrin
;
2097 dev_dbg(dev
, "%s: afu_ver=%s interface_ver=%016llx\n", __func__
,
2098 afu
->version
, afu
->interface_version
);
2100 get_num_afu_ports(cfg
);
2102 rc
= start_afu(cfg
);
2104 dev_err(dev
, "%s: start_afu failed, rc=%d\n", __func__
, rc
);
2108 afu_err_intr_init(cfg
->afu
);
2109 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2110 hwq
= get_hwq(afu
, i
);
2112 hwq
->room
= readq_be(&hwq
->host_map
->cmd_room
);
2115 /* Restore the LUN mappings */
2116 cxlflash_restore_luntable(cfg
);
2118 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2122 for (i
= afu
->num_hwqs
- 1; i
>= 0; i
--) {
2123 term_intr(cfg
, UNMAP_THREE
, i
);
2130 * afu_reset() - resets the AFU
2131 * @cfg: Internal structure associated with the host.
2133 * Return: 0 on success, -errno on failure
2135 static int afu_reset(struct cxlflash_cfg
*cfg
)
2137 struct device
*dev
= &cfg
->dev
->dev
;
2140 /* Stop the context before the reset. Since the context is
2141 * no longer available restart it after the reset is complete
2147 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2152 * drain_ioctls() - wait until all currently executing ioctls have completed
2153 * @cfg: Internal structure associated with the host.
2155 * Obtain write access to read/write semaphore that wraps ioctl
2156 * handling to 'drain' ioctls currently executing.
2158 static void drain_ioctls(struct cxlflash_cfg
*cfg
)
2160 down_write(&cfg
->ioctl_rwsem
);
2161 up_write(&cfg
->ioctl_rwsem
);
2165 * cxlflash_async_reset_host() - asynchronous host reset handler
2166 * @data: Private data provided while scheduling reset.
2167 * @cookie: Cookie that can be used for checkpointing.
2169 static void cxlflash_async_reset_host(void *data
, async_cookie_t cookie
)
2171 struct cxlflash_cfg
*cfg
= data
;
2172 struct device
*dev
= &cfg
->dev
->dev
;
2175 if (cfg
->state
!= STATE_RESET
) {
2176 dev_dbg(dev
, "%s: Not performing a reset, state=%d\n",
2177 __func__
, cfg
->state
);
2182 cxlflash_mark_contexts_error(cfg
);
2183 rc
= afu_reset(cfg
);
2185 cfg
->state
= STATE_FAILTERM
;
2187 cfg
->state
= STATE_NORMAL
;
2188 wake_up_all(&cfg
->reset_waitq
);
2191 scsi_unblock_requests(cfg
->host
);
2195 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2196 * @cfg: Internal structure associated with the host.
2198 static void cxlflash_schedule_async_reset(struct cxlflash_cfg
*cfg
)
2200 struct device
*dev
= &cfg
->dev
->dev
;
2202 if (cfg
->state
!= STATE_NORMAL
) {
2203 dev_dbg(dev
, "%s: Not performing reset state=%d\n",
2204 __func__
, cfg
->state
);
2208 cfg
->state
= STATE_RESET
;
2209 scsi_block_requests(cfg
->host
);
2210 cfg
->async_reset_cookie
= async_schedule(cxlflash_async_reset_host
,
2215 * send_afu_cmd() - builds and sends an internal AFU command
2216 * @afu: AFU associated with the host.
2217 * @rcb: Pre-populated IOARCB describing command to send.
2219 * The AFU can only take one internal AFU command at a time. This limitation is
2220 * enforced by using a mutex to provide exclusive access to the AFU during the
2221 * operation. This design point requires calling threads to not be on interrupt
2222 * context due to the possibility of sleeping during concurrent AFU operations.
2224 * The command status is optionally passed back to the caller when the caller
2225 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2228 * 0 on success, -errno on failure
2230 static int send_afu_cmd(struct afu
*afu
, struct sisl_ioarcb
*rcb
)
2232 struct cxlflash_cfg
*cfg
= afu
->parent
;
2233 struct device
*dev
= &cfg
->dev
->dev
;
2234 struct afu_cmd
*cmd
= NULL
;
2235 struct hwq
*hwq
= get_hwq(afu
, PRIMARY_HWQ
);
2239 static DEFINE_MUTEX(sync_active
);
2241 if (cfg
->state
!= STATE_NORMAL
) {
2242 dev_dbg(dev
, "%s: Sync not required state=%u\n",
2243 __func__
, cfg
->state
);
2247 mutex_lock(&sync_active
);
2248 atomic_inc(&afu
->cmds_active
);
2249 buf
= kmalloc(sizeof(*cmd
) + __alignof__(*cmd
) - 1, GFP_KERNEL
);
2250 if (unlikely(!buf
)) {
2251 dev_err(dev
, "%s: no memory for command\n", __func__
);
2256 cmd
= (struct afu_cmd
*)PTR_ALIGN(buf
, __alignof__(*cmd
));
2259 memset(cmd
, 0, sizeof(*cmd
));
2260 memcpy(&cmd
->rcb
, rcb
, sizeof(*rcb
));
2261 INIT_LIST_HEAD(&cmd
->queue
);
2262 init_completion(&cmd
->cevent
);
2264 cmd
->hwq_index
= hwq
->index
;
2265 cmd
->rcb
.ctx_id
= hwq
->ctx_hndl
;
2267 dev_dbg(dev
, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2268 __func__
, afu
, cmd
, cmd
->rcb
.cdb
[0], nretry
);
2270 rc
= afu
->send_cmd(afu
, cmd
);
2276 rc
= wait_resp(afu
, cmd
);
2279 rc
= afu
->context_reset(hwq
);
2281 cxlflash_schedule_async_reset(cfg
);
2284 /* fall through to retry */
2288 /* fall through to exit */
2294 *rcb
->ioasa
= cmd
->sa
;
2296 atomic_dec(&afu
->cmds_active
);
2297 mutex_unlock(&sync_active
);
2299 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2304 * cxlflash_afu_sync() - builds and sends an AFU sync command
2305 * @afu: AFU associated with the host.
2306 * @ctx: Identifies context requesting sync.
2307 * @res: Identifies resource requesting sync.
2308 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2310 * AFU sync operations are only necessary and allowed when the device is
2311 * operating normally. When not operating normally, sync requests can occur as
2312 * part of cleaning up resources associated with an adapter prior to removal.
2313 * In this scenario, these requests are simply ignored (safe due to the AFU
2317 * 0 on success, -errno on failure
2319 int cxlflash_afu_sync(struct afu
*afu
, ctx_hndl_t ctx
, res_hndl_t res
, u8 mode
)
2321 struct cxlflash_cfg
*cfg
= afu
->parent
;
2322 struct device
*dev
= &cfg
->dev
->dev
;
2323 struct sisl_ioarcb rcb
= { 0 };
2325 dev_dbg(dev
, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2326 __func__
, afu
, ctx
, res
, mode
);
2328 rcb
.req_flags
= SISL_REQ_FLAGS_AFU_CMD
;
2329 rcb
.msi
= SISL_MSI_RRQ_UPDATED
;
2330 rcb
.timeout
= MC_AFU_SYNC_TIMEOUT
;
2332 rcb
.cdb
[0] = SISL_AFU_CMD_SYNC
;
2334 put_unaligned_be16(ctx
, &rcb
.cdb
[2]);
2335 put_unaligned_be32(res
, &rcb
.cdb
[4]);
2337 return send_afu_cmd(afu
, &rcb
);
2341 * cxlflash_eh_abort_handler() - abort a SCSI command
2342 * @scp: SCSI command to abort.
2344 * CXL Flash devices do not support a single command abort. Reset the context
2345 * as per SISLite specification. Flush any pending commands in the hardware
2346 * queue before the reset.
2348 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2350 static int cxlflash_eh_abort_handler(struct scsi_cmnd
*scp
)
2353 struct Scsi_Host
*host
= scp
->device
->host
;
2354 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2355 struct afu_cmd
*cmd
= sc_to_afuc(scp
);
2356 struct device
*dev
= &cfg
->dev
->dev
;
2357 struct afu
*afu
= cfg
->afu
;
2358 struct hwq
*hwq
= get_hwq(afu
, cmd
->hwq_index
);
2360 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2361 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2362 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2363 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2364 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2365 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2366 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2368 /* When the state is not normal, another reset/reload is in progress.
2369 * Return failed and the mid-layer will invoke host reset handler.
2371 if (cfg
->state
!= STATE_NORMAL
) {
2372 dev_dbg(dev
, "%s: Invalid state for abort, state=%d\n",
2373 __func__
, cfg
->state
);
2377 rc
= afu
->context_reset(hwq
);
2384 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2389 * cxlflash_eh_device_reset_handler() - reset a single LUN
2390 * @scp: SCSI command to send.
2393 * SUCCESS as defined in scsi/scsi.h
2394 * FAILED as defined in scsi/scsi.h
2396 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd
*scp
)
2399 struct Scsi_Host
*host
= scp
->device
->host
;
2400 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2401 struct device
*dev
= &cfg
->dev
->dev
;
2402 struct afu
*afu
= cfg
->afu
;
2405 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2406 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2407 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2408 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2409 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2410 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2411 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2414 switch (cfg
->state
) {
2416 rcr
= send_tmf(afu
, scp
, TMF_LUN_RESET
);
2421 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2428 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2433 * cxlflash_eh_host_reset_handler() - reset the host adapter
2434 * @scp: SCSI command from stack identifying host.
2436 * Following a reset, the state is evaluated again in case an EEH occurred
2437 * during the reset. In such a scenario, the host reset will either yield
2438 * until the EEH recovery is complete or return success or failure based
2439 * upon the current device state.
2442 * SUCCESS as defined in scsi/scsi.h
2443 * FAILED as defined in scsi/scsi.h
2445 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd
*scp
)
2449 struct Scsi_Host
*host
= scp
->device
->host
;
2450 struct cxlflash_cfg
*cfg
= shost_priv(host
);
2451 struct device
*dev
= &cfg
->dev
->dev
;
2453 dev_dbg(dev
, "%s: (scp=%p) %d/%d/%d/%llu "
2454 "cdb=(%08x-%08x-%08x-%08x)\n", __func__
, scp
, host
->host_no
,
2455 scp
->device
->channel
, scp
->device
->id
, scp
->device
->lun
,
2456 get_unaligned_be32(&((u32
*)scp
->cmnd
)[0]),
2457 get_unaligned_be32(&((u32
*)scp
->cmnd
)[1]),
2458 get_unaligned_be32(&((u32
*)scp
->cmnd
)[2]),
2459 get_unaligned_be32(&((u32
*)scp
->cmnd
)[3]));
2461 switch (cfg
->state
) {
2463 cfg
->state
= STATE_RESET
;
2465 cxlflash_mark_contexts_error(cfg
);
2466 rcr
= afu_reset(cfg
);
2469 cfg
->state
= STATE_FAILTERM
;
2471 cfg
->state
= STATE_NORMAL
;
2472 wake_up_all(&cfg
->reset_waitq
);
2476 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2477 if (cfg
->state
== STATE_NORMAL
)
2485 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
2490 * cxlflash_change_queue_depth() - change the queue depth for the device
2491 * @sdev: SCSI device destined for queue depth change.
2492 * @qdepth: Requested queue depth value to set.
2494 * The requested queue depth is capped to the maximum supported value.
2496 * Return: The actual queue depth set.
2498 static int cxlflash_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2501 if (qdepth
> CXLFLASH_MAX_CMDS_PER_LUN
)
2502 qdepth
= CXLFLASH_MAX_CMDS_PER_LUN
;
2504 scsi_change_queue_depth(sdev
, qdepth
);
2505 return sdev
->queue_depth
;
2509 * cxlflash_show_port_status() - queries and presents the current port status
2510 * @port: Desired port for status reporting.
2511 * @cfg: Internal structure associated with the host.
2512 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2514 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2516 static ssize_t
cxlflash_show_port_status(u32 port
,
2517 struct cxlflash_cfg
*cfg
,
2520 struct device
*dev
= &cfg
->dev
->dev
;
2523 __be64 __iomem
*fc_port_regs
;
2525 WARN_ON(port
>= MAX_FC_PORTS
);
2527 if (port
>= cfg
->num_fc_ports
) {
2528 dev_info(dev
, "%s: Port %d not supported on this card.\n",
2533 fc_port_regs
= get_fc_port_regs(cfg
, port
);
2534 status
= readq_be(&fc_port_regs
[FC_MTIP_STATUS
/ 8]);
2535 status
&= FC_MTIP_STATUS_MASK
;
2537 if (status
== FC_MTIP_STATUS_ONLINE
)
2538 disp_status
= "online";
2539 else if (status
== FC_MTIP_STATUS_OFFLINE
)
2540 disp_status
= "offline";
2542 disp_status
= "unknown";
2544 return scnprintf(buf
, PAGE_SIZE
, "%s\n", disp_status
);
2548 * port0_show() - queries and presents the current status of port 0
2549 * @dev: Generic device associated with the host owning the port.
2550 * @attr: Device attribute representing the port.
2551 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2553 * Return: The size of the ASCII string returned in @buf.
2555 static ssize_t
port0_show(struct device
*dev
,
2556 struct device_attribute
*attr
,
2559 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2561 return cxlflash_show_port_status(0, cfg
, buf
);
2565 * port1_show() - queries and presents the current status of port 1
2566 * @dev: Generic device associated with the host owning the port.
2567 * @attr: Device attribute representing the port.
2568 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2570 * Return: The size of the ASCII string returned in @buf.
2572 static ssize_t
port1_show(struct device
*dev
,
2573 struct device_attribute
*attr
,
2576 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2578 return cxlflash_show_port_status(1, cfg
, buf
);
2582 * port2_show() - queries and presents the current status of port 2
2583 * @dev: Generic device associated with the host owning the port.
2584 * @attr: Device attribute representing the port.
2585 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2587 * Return: The size of the ASCII string returned in @buf.
2589 static ssize_t
port2_show(struct device
*dev
,
2590 struct device_attribute
*attr
,
2593 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2595 return cxlflash_show_port_status(2, cfg
, buf
);
2599 * port3_show() - queries and presents the current status of port 3
2600 * @dev: Generic device associated with the host owning the port.
2601 * @attr: Device attribute representing the port.
2602 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2604 * Return: The size of the ASCII string returned in @buf.
2606 static ssize_t
port3_show(struct device
*dev
,
2607 struct device_attribute
*attr
,
2610 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2612 return cxlflash_show_port_status(3, cfg
, buf
);
2616 * lun_mode_show() - presents the current LUN mode of the host
2617 * @dev: Generic device associated with the host.
2618 * @attr: Device attribute representing the LUN mode.
2619 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2621 * Return: The size of the ASCII string returned in @buf.
2623 static ssize_t
lun_mode_show(struct device
*dev
,
2624 struct device_attribute
*attr
, char *buf
)
2626 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2627 struct afu
*afu
= cfg
->afu
;
2629 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->internal_lun
);
2633 * lun_mode_store() - sets the LUN mode of the host
2634 * @dev: Generic device associated with the host.
2635 * @attr: Device attribute representing the LUN mode.
2636 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2637 * @count: Length of data resizing in @buf.
2639 * The CXL Flash AFU supports a dummy LUN mode where the external
2640 * links and storage are not required. Space on the FPGA is used
2641 * to create 1 or 2 small LUNs which are presented to the system
2642 * as if they were a normal storage device. This feature is useful
2643 * during development and also provides manufacturing with a way
2644 * to test the AFU without an actual device.
2646 * 0 = external LUN[s] (default)
2647 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2648 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2649 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2650 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2652 * Return: The size of the ASCII string returned in @buf.
2654 static ssize_t
lun_mode_store(struct device
*dev
,
2655 struct device_attribute
*attr
,
2656 const char *buf
, size_t count
)
2658 struct Scsi_Host
*shost
= class_to_shost(dev
);
2659 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
2660 struct afu
*afu
= cfg
->afu
;
2664 rc
= kstrtouint(buf
, 10, &lun_mode
);
2665 if (!rc
&& (lun_mode
< 5) && (lun_mode
!= afu
->internal_lun
)) {
2666 afu
->internal_lun
= lun_mode
;
2669 * When configured for internal LUN, there is only one channel,
2670 * channel number 0, else there will be one less than the number
2671 * of fc ports for this card.
2673 if (afu
->internal_lun
)
2674 shost
->max_channel
= 0;
2676 shost
->max_channel
= PORTNUM2CHAN(cfg
->num_fc_ports
);
2679 scsi_scan_host(cfg
->host
);
2686 * ioctl_version_show() - presents the current ioctl version of the host
2687 * @dev: Generic device associated with the host.
2688 * @attr: Device attribute representing the ioctl version.
2689 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2691 * Return: The size of the ASCII string returned in @buf.
2693 static ssize_t
ioctl_version_show(struct device
*dev
,
2694 struct device_attribute
*attr
, char *buf
)
2696 return scnprintf(buf
, PAGE_SIZE
, "%u\n", DK_CXLFLASH_VERSION_0
);
2700 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2701 * @port: Desired port for status reporting.
2702 * @cfg: Internal structure associated with the host.
2703 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2705 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2707 static ssize_t
cxlflash_show_port_lun_table(u32 port
,
2708 struct cxlflash_cfg
*cfg
,
2711 struct device
*dev
= &cfg
->dev
->dev
;
2712 __be64 __iomem
*fc_port_luns
;
2716 WARN_ON(port
>= MAX_FC_PORTS
);
2718 if (port
>= cfg
->num_fc_ports
) {
2719 dev_info(dev
, "%s: Port %d not supported on this card.\n",
2724 fc_port_luns
= get_fc_port_luns(cfg
, port
);
2726 for (i
= 0; i
< CXLFLASH_NUM_VLUNS
; i
++)
2727 bytes
+= scnprintf(buf
+ bytes
, PAGE_SIZE
- bytes
,
2729 i
, readq_be(&fc_port_luns
[i
]));
2734 * port0_lun_table_show() - presents the current LUN table of port 0
2735 * @dev: Generic device associated with the host owning the port.
2736 * @attr: Device attribute representing the port.
2737 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2739 * Return: The size of the ASCII string returned in @buf.
2741 static ssize_t
port0_lun_table_show(struct device
*dev
,
2742 struct device_attribute
*attr
,
2745 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2747 return cxlflash_show_port_lun_table(0, cfg
, buf
);
2751 * port1_lun_table_show() - presents the current LUN table of port 1
2752 * @dev: Generic device associated with the host owning the port.
2753 * @attr: Device attribute representing the port.
2754 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2756 * Return: The size of the ASCII string returned in @buf.
2758 static ssize_t
port1_lun_table_show(struct device
*dev
,
2759 struct device_attribute
*attr
,
2762 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2764 return cxlflash_show_port_lun_table(1, cfg
, buf
);
2768 * port2_lun_table_show() - presents the current LUN table of port 2
2769 * @dev: Generic device associated with the host owning the port.
2770 * @attr: Device attribute representing the port.
2771 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2773 * Return: The size of the ASCII string returned in @buf.
2775 static ssize_t
port2_lun_table_show(struct device
*dev
,
2776 struct device_attribute
*attr
,
2779 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2781 return cxlflash_show_port_lun_table(2, cfg
, buf
);
2785 * port3_lun_table_show() - presents the current LUN table of port 3
2786 * @dev: Generic device associated with the host owning the port.
2787 * @attr: Device attribute representing the port.
2788 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2790 * Return: The size of the ASCII string returned in @buf.
2792 static ssize_t
port3_lun_table_show(struct device
*dev
,
2793 struct device_attribute
*attr
,
2796 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2798 return cxlflash_show_port_lun_table(3, cfg
, buf
);
2802 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2803 * @dev: Generic device associated with the host.
2804 * @attr: Device attribute representing the IRQ poll weight.
2805 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2808 * An IRQ poll weight of 0 indicates polling is disabled.
2810 * Return: The size of the ASCII string returned in @buf.
2812 static ssize_t
irqpoll_weight_show(struct device
*dev
,
2813 struct device_attribute
*attr
, char *buf
)
2815 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2816 struct afu
*afu
= cfg
->afu
;
2818 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->irqpoll_weight
);
2822 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2823 * @dev: Generic device associated with the host.
2824 * @attr: Device attribute representing the IRQ poll weight.
2825 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2827 * @count: Length of data resizing in @buf.
2829 * An IRQ poll weight of 0 indicates polling is disabled.
2831 * Return: The size of the ASCII string returned in @buf.
2833 static ssize_t
irqpoll_weight_store(struct device
*dev
,
2834 struct device_attribute
*attr
,
2835 const char *buf
, size_t count
)
2837 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2838 struct device
*cfgdev
= &cfg
->dev
->dev
;
2839 struct afu
*afu
= cfg
->afu
;
2844 rc
= kstrtouint(buf
, 10, &weight
);
2850 "Invalid IRQ poll weight. It must be 256 or less.\n");
2854 if (weight
== afu
->irqpoll_weight
) {
2856 "Current IRQ poll weight has the same weight.\n");
2860 if (afu_is_irqpoll_enabled(afu
)) {
2861 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2862 hwq
= get_hwq(afu
, i
);
2864 irq_poll_disable(&hwq
->irqpoll
);
2868 afu
->irqpoll_weight
= weight
;
2871 for (i
= 0; i
< afu
->num_hwqs
; i
++) {
2872 hwq
= get_hwq(afu
, i
);
2874 irq_poll_init(&hwq
->irqpoll
, weight
, cxlflash_irqpoll
);
2882 * num_hwqs_show() - presents the number of hardware queues for the host
2883 * @dev: Generic device associated with the host.
2884 * @attr: Device attribute representing the number of hardware queues.
2885 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2888 * Return: The size of the ASCII string returned in @buf.
2890 static ssize_t
num_hwqs_show(struct device
*dev
,
2891 struct device_attribute
*attr
, char *buf
)
2893 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2894 struct afu
*afu
= cfg
->afu
;
2896 return scnprintf(buf
, PAGE_SIZE
, "%u\n", afu
->num_hwqs
);
2900 * num_hwqs_store() - sets the number of hardware queues for the host
2901 * @dev: Generic device associated with the host.
2902 * @attr: Device attribute representing the number of hardware queues.
2903 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2905 * @count: Length of data resizing in @buf.
2907 * n > 0: num_hwqs = n
2908 * n = 0: num_hwqs = num_online_cpus()
2909 * n < 0: num_online_cpus() / abs(n)
2911 * Return: The size of the ASCII string returned in @buf.
2913 static ssize_t
num_hwqs_store(struct device
*dev
,
2914 struct device_attribute
*attr
,
2915 const char *buf
, size_t count
)
2917 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2918 struct afu
*afu
= cfg
->afu
;
2920 int nhwqs
, num_hwqs
;
2922 rc
= kstrtoint(buf
, 10, &nhwqs
);
2928 else if (nhwqs
== 0)
2929 num_hwqs
= num_online_cpus();
2931 num_hwqs
= num_online_cpus() / abs(nhwqs
);
2933 afu
->desired_hwqs
= min(num_hwqs
, CXLFLASH_MAX_HWQS
);
2934 WARN_ON_ONCE(afu
->desired_hwqs
== 0);
2937 switch (cfg
->state
) {
2939 cfg
->state
= STATE_RESET
;
2941 cxlflash_mark_contexts_error(cfg
);
2942 rc
= afu_reset(cfg
);
2944 cfg
->state
= STATE_FAILTERM
;
2946 cfg
->state
= STATE_NORMAL
;
2947 wake_up_all(&cfg
->reset_waitq
);
2950 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
);
2951 if (cfg
->state
== STATE_NORMAL
)
2954 /* Ideally should not happen */
2955 dev_err(dev
, "%s: Device is not ready, state=%d\n",
2956 __func__
, cfg
->state
);
2963 static const char *hwq_mode_name
[MAX_HWQ_MODE
] = { "rr", "tag", "cpu" };
2966 * hwq_mode_show() - presents the HWQ steering mode for the host
2967 * @dev: Generic device associated with the host.
2968 * @attr: Device attribute representing the HWQ steering mode.
2969 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
2970 * as a character string.
2972 * Return: The size of the ASCII string returned in @buf.
2974 static ssize_t
hwq_mode_show(struct device
*dev
,
2975 struct device_attribute
*attr
, char *buf
)
2977 struct cxlflash_cfg
*cfg
= shost_priv(class_to_shost(dev
));
2978 struct afu
*afu
= cfg
->afu
;
2980 return scnprintf(buf
, PAGE_SIZE
, "%s\n", hwq_mode_name
[afu
->hwq_mode
]);
2984 * hwq_mode_store() - sets the HWQ steering mode for the host
2985 * @dev: Generic device associated with the host.
2986 * @attr: Device attribute representing the HWQ steering mode.
2987 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
2988 * as a character string.
2989 * @count: Length of data resizing in @buf.
2992 * tag = Block MQ Tagging
2993 * cpu = CPU Affinity
2995 * Return: The size of the ASCII string returned in @buf.
2997 static ssize_t
hwq_mode_store(struct device
*dev
,
2998 struct device_attribute
*attr
,
2999 const char *buf
, size_t count
)
3001 struct Scsi_Host
*shost
= class_to_shost(dev
);
3002 struct cxlflash_cfg
*cfg
= shost_priv(shost
);
3003 struct device
*cfgdev
= &cfg
->dev
->dev
;
3004 struct afu
*afu
= cfg
->afu
;
3006 u32 mode
= MAX_HWQ_MODE
;
3008 for (i
= 0; i
< MAX_HWQ_MODE
; i
++) {
3009 if (!strncmp(hwq_mode_name
[i
], buf
, strlen(hwq_mode_name
[i
]))) {
3015 if (mode
>= MAX_HWQ_MODE
) {
3016 dev_info(cfgdev
, "Invalid HWQ steering mode.\n");
3020 if ((mode
== HWQ_MODE_TAG
) && !shost_use_blk_mq(shost
)) {
3021 dev_info(cfgdev
, "SCSI-MQ is not enabled, use a different "
3022 "HWQ steering mode.\n");
3026 afu
->hwq_mode
= mode
;
3032 * mode_show() - presents the current mode of the device
3033 * @dev: Generic device associated with the device.
3034 * @attr: Device attribute representing the device mode.
3035 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3037 * Return: The size of the ASCII string returned in @buf.
3039 static ssize_t
mode_show(struct device
*dev
,
3040 struct device_attribute
*attr
, char *buf
)
3042 struct scsi_device
*sdev
= to_scsi_device(dev
);
3044 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
3045 sdev
->hostdata
? "superpipe" : "legacy");
3051 static DEVICE_ATTR_RO(port0
);
3052 static DEVICE_ATTR_RO(port1
);
3053 static DEVICE_ATTR_RO(port2
);
3054 static DEVICE_ATTR_RO(port3
);
3055 static DEVICE_ATTR_RW(lun_mode
);
3056 static DEVICE_ATTR_RO(ioctl_version
);
3057 static DEVICE_ATTR_RO(port0_lun_table
);
3058 static DEVICE_ATTR_RO(port1_lun_table
);
3059 static DEVICE_ATTR_RO(port2_lun_table
);
3060 static DEVICE_ATTR_RO(port3_lun_table
);
3061 static DEVICE_ATTR_RW(irqpoll_weight
);
3062 static DEVICE_ATTR_RW(num_hwqs
);
3063 static DEVICE_ATTR_RW(hwq_mode
);
3065 static struct device_attribute
*cxlflash_host_attrs
[] = {
3071 &dev_attr_ioctl_version
,
3072 &dev_attr_port0_lun_table
,
3073 &dev_attr_port1_lun_table
,
3074 &dev_attr_port2_lun_table
,
3075 &dev_attr_port3_lun_table
,
3076 &dev_attr_irqpoll_weight
,
3085 static DEVICE_ATTR_RO(mode
);
3087 static struct device_attribute
*cxlflash_dev_attrs
[] = {
3095 static struct scsi_host_template driver_template
= {
3096 .module
= THIS_MODULE
,
3097 .name
= CXLFLASH_ADAPTER_NAME
,
3098 .info
= cxlflash_driver_info
,
3099 .ioctl
= cxlflash_ioctl
,
3100 .proc_name
= CXLFLASH_NAME
,
3101 .queuecommand
= cxlflash_queuecommand
,
3102 .eh_abort_handler
= cxlflash_eh_abort_handler
,
3103 .eh_device_reset_handler
= cxlflash_eh_device_reset_handler
,
3104 .eh_host_reset_handler
= cxlflash_eh_host_reset_handler
,
3105 .change_queue_depth
= cxlflash_change_queue_depth
,
3106 .cmd_per_lun
= CXLFLASH_MAX_CMDS_PER_LUN
,
3107 .can_queue
= CXLFLASH_MAX_CMDS
,
3108 .cmd_size
= sizeof(struct afu_cmd
) + __alignof__(struct afu_cmd
) - 1,
3110 .sg_tablesize
= 1, /* No scatter gather support */
3111 .max_sectors
= CXLFLASH_MAX_SECTORS
,
3112 .use_clustering
= ENABLE_CLUSTERING
,
3113 .shost_attrs
= cxlflash_host_attrs
,
3114 .sdev_attrs
= cxlflash_dev_attrs
,
3118 * Device dependent values
3120 static struct dev_dependent_vals dev_corsa_vals
= { CXLFLASH_MAX_SECTORS
,
3122 static struct dev_dependent_vals dev_flash_gt_vals
= { CXLFLASH_MAX_SECTORS
,
3123 CXLFLASH_NOTIFY_SHUTDOWN
};
3124 static struct dev_dependent_vals dev_briard_vals
= { CXLFLASH_MAX_SECTORS
,
3125 CXLFLASH_NOTIFY_SHUTDOWN
};
3128 * PCI device binding table
3130 static struct pci_device_id cxlflash_pci_table
[] = {
3131 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CORSA
,
3132 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_corsa_vals
},
3133 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_FLASH_GT
,
3134 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_flash_gt_vals
},
3135 {PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_BRIARD
,
3136 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (kernel_ulong_t
)&dev_briard_vals
},
3140 MODULE_DEVICE_TABLE(pci
, cxlflash_pci_table
);
3143 * cxlflash_worker_thread() - work thread handler for the AFU
3144 * @work: Work structure contained within cxlflash associated with host.
3146 * Handles the following events:
3147 * - Link reset which cannot be performed on interrupt context due to
3148 * blocking up to a few seconds
3151 static void cxlflash_worker_thread(struct work_struct
*work
)
3153 struct cxlflash_cfg
*cfg
= container_of(work
, struct cxlflash_cfg
,
3155 struct afu
*afu
= cfg
->afu
;
3156 struct device
*dev
= &cfg
->dev
->dev
;
3157 __be64 __iomem
*fc_port_regs
;
3161 /* Avoid MMIO if the device has failed */
3163 if (cfg
->state
!= STATE_NORMAL
)
3166 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
3168 if (cfg
->lr_state
== LINK_RESET_REQUIRED
) {
3169 port
= cfg
->lr_port
;
3171 dev_err(dev
, "%s: invalid port index %d\n",
3174 spin_unlock_irqrestore(cfg
->host
->host_lock
,
3177 /* The reset can block... */
3178 fc_port_regs
= get_fc_port_regs(cfg
, port
);
3179 afu_link_reset(afu
, port
, fc_port_regs
);
3180 spin_lock_irqsave(cfg
->host
->host_lock
, lock_flags
);
3183 cfg
->lr_state
= LINK_RESET_COMPLETE
;
3186 spin_unlock_irqrestore(cfg
->host
->host_lock
, lock_flags
);
3188 if (atomic_dec_if_positive(&cfg
->scan_host_needed
) >= 0)
3189 scsi_scan_host(cfg
->host
);
3193 * cxlflash_chr_open() - character device open handler
3194 * @inode: Device inode associated with this character device.
3195 * @file: File pointer for this device.
3197 * Only users with admin privileges are allowed to open the character device.
3199 * Return: 0 on success, -errno on failure
3201 static int cxlflash_chr_open(struct inode
*inode
, struct file
*file
)
3203 struct cxlflash_cfg
*cfg
;
3205 if (!capable(CAP_SYS_ADMIN
))
3208 cfg
= container_of(inode
->i_cdev
, struct cxlflash_cfg
, cdev
);
3209 file
->private_data
= cfg
;
3215 * Character device file operations
3217 static const struct file_operations cxlflash_chr_fops
= {
3218 .owner
= THIS_MODULE
,
3219 .open
= cxlflash_chr_open
,
3223 * init_chrdev() - initialize the character device for the host
3224 * @cfg: Internal structure associated with the host.
3226 * Return: 0 on success, -errno on failure
3228 static int init_chrdev(struct cxlflash_cfg
*cfg
)
3230 struct device
*dev
= &cfg
->dev
->dev
;
3231 struct device
*char_dev
;
3236 minor
= cxlflash_get_minor();
3237 if (unlikely(minor
< 0)) {
3238 dev_err(dev
, "%s: Exhausted allowed adapters\n", __func__
);
3243 devno
= MKDEV(cxlflash_major
, minor
);
3244 cdev_init(&cfg
->cdev
, &cxlflash_chr_fops
);
3246 rc
= cdev_add(&cfg
->cdev
, devno
, 1);
3248 dev_err(dev
, "%s: cdev_add failed rc=%d\n", __func__
, rc
);
3252 char_dev
= device_create(cxlflash_class
, NULL
, devno
,
3253 NULL
, "cxlflash%d", minor
);
3254 if (IS_ERR(char_dev
)) {
3255 rc
= PTR_ERR(char_dev
);
3256 dev_err(dev
, "%s: device_create failed rc=%d\n",
3261 cfg
->chardev
= char_dev
;
3263 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3266 cdev_del(&cfg
->cdev
);
3268 cxlflash_put_minor(minor
);
3273 * cxlflash_probe() - PCI entry point to add host
3274 * @pdev: PCI device associated with the host.
3275 * @dev_id: PCI device id associated with device.
3277 * The device will initially start out in a 'probing' state and
3278 * transition to the 'normal' state at the end of a successful
3279 * probe. Should an EEH event occur during probe, the notification
3280 * thread (error_detected()) will wait until the probe handler
3281 * is nearly complete. At that time, the device will be moved to
3282 * a 'probed' state and the EEH thread woken up to drive the slot
3283 * reset and recovery (device moves to 'normal' state). Meanwhile,
3284 * the probe will be allowed to exit successfully.
3286 * Return: 0 on success, -errno on failure
3288 static int cxlflash_probe(struct pci_dev
*pdev
,
3289 const struct pci_device_id
*dev_id
)
3291 struct Scsi_Host
*host
;
3292 struct cxlflash_cfg
*cfg
= NULL
;
3293 struct device
*dev
= &pdev
->dev
;
3294 struct dev_dependent_vals
*ddv
;
3298 dev_dbg(&pdev
->dev
, "%s: Found CXLFLASH with IRQ: %d\n",
3299 __func__
, pdev
->irq
);
3301 ddv
= (struct dev_dependent_vals
*)dev_id
->driver_data
;
3302 driver_template
.max_sectors
= ddv
->max_sectors
;
3304 host
= scsi_host_alloc(&driver_template
, sizeof(struct cxlflash_cfg
));
3306 dev_err(dev
, "%s: scsi_host_alloc failed\n", __func__
);
3311 host
->max_id
= CXLFLASH_MAX_NUM_TARGETS_PER_BUS
;
3312 host
->max_lun
= CXLFLASH_MAX_NUM_LUNS_PER_TARGET
;
3313 host
->unique_id
= host
->host_no
;
3314 host
->max_cmd_len
= CXLFLASH_MAX_CDB_LEN
;
3316 cfg
= shost_priv(host
);
3318 rc
= alloc_mem(cfg
);
3320 dev_err(dev
, "%s: alloc_mem failed\n", __func__
);
3322 scsi_host_put(cfg
->host
);
3326 cfg
->init_state
= INIT_STATE_NONE
;
3328 cfg
->cxl_fops
= cxlflash_cxl_fops
;
3331 * Promoted LUNs move to the top of the LUN table. The rest stay on
3332 * the bottom half. The bottom half grows from the end (index = 255),
3333 * whereas the top half grows from the beginning (index = 0).
3335 * Initialize the last LUN index for all possible ports.
3337 cfg
->promote_lun_index
= 0;
3339 for (k
= 0; k
< MAX_FC_PORTS
; k
++)
3340 cfg
->last_lun_index
[k
] = CXLFLASH_NUM_VLUNS
/2 - 1;
3342 cfg
->dev_id
= (struct pci_device_id
*)dev_id
;
3344 init_waitqueue_head(&cfg
->tmf_waitq
);
3345 init_waitqueue_head(&cfg
->reset_waitq
);
3347 INIT_WORK(&cfg
->work_q
, cxlflash_worker_thread
);
3348 cfg
->lr_state
= LINK_RESET_INVALID
;
3350 spin_lock_init(&cfg
->tmf_slock
);
3351 mutex_init(&cfg
->ctx_tbl_list_mutex
);
3352 mutex_init(&cfg
->ctx_recovery_mutex
);
3353 init_rwsem(&cfg
->ioctl_rwsem
);
3354 INIT_LIST_HEAD(&cfg
->ctx_err_recovery
);
3355 INIT_LIST_HEAD(&cfg
->lluns
);
3357 pci_set_drvdata(pdev
, cfg
);
3359 cfg
->cxl_afu
= cxl_pci_to_afu(pdev
);
3363 dev_err(dev
, "%s: init_pci failed rc=%d\n", __func__
, rc
);
3366 cfg
->init_state
= INIT_STATE_PCI
;
3369 if (rc
&& !wq_has_sleeper(&cfg
->reset_waitq
)) {
3370 dev_err(dev
, "%s: init_afu failed rc=%d\n", __func__
, rc
);
3373 cfg
->init_state
= INIT_STATE_AFU
;
3375 rc
= init_scsi(cfg
);
3377 dev_err(dev
, "%s: init_scsi failed rc=%d\n", __func__
, rc
);
3380 cfg
->init_state
= INIT_STATE_SCSI
;
3382 rc
= init_chrdev(cfg
);
3384 dev_err(dev
, "%s: init_chrdev failed rc=%d\n", __func__
, rc
);
3387 cfg
->init_state
= INIT_STATE_CDEV
;
3389 if (wq_has_sleeper(&cfg
->reset_waitq
)) {
3390 cfg
->state
= STATE_PROBED
;
3391 wake_up_all(&cfg
->reset_waitq
);
3393 cfg
->state
= STATE_NORMAL
;
3395 dev_dbg(dev
, "%s: returning rc=%d\n", __func__
, rc
);
3399 cxlflash_remove(pdev
);
3404 * cxlflash_pci_error_detected() - called when a PCI error is detected
3405 * @pdev: PCI device struct.
3406 * @state: PCI channel state.
3408 * When an EEH occurs during an active reset, wait until the reset is
3409 * complete and then take action based upon the device state.
3411 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3413 static pci_ers_result_t
cxlflash_pci_error_detected(struct pci_dev
*pdev
,
3414 pci_channel_state_t state
)
3417 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3418 struct device
*dev
= &cfg
->dev
->dev
;
3420 dev_dbg(dev
, "%s: pdev=%p state=%u\n", __func__
, pdev
, state
);
3423 case pci_channel_io_frozen
:
3424 wait_event(cfg
->reset_waitq
, cfg
->state
!= STATE_RESET
&&
3425 cfg
->state
!= STATE_PROBING
);
3426 if (cfg
->state
== STATE_FAILTERM
)
3427 return PCI_ERS_RESULT_DISCONNECT
;
3429 cfg
->state
= STATE_RESET
;
3430 scsi_block_requests(cfg
->host
);
3432 rc
= cxlflash_mark_contexts_error(cfg
);
3434 dev_err(dev
, "%s: Failed to mark user contexts rc=%d\n",
3437 return PCI_ERS_RESULT_NEED_RESET
;
3438 case pci_channel_io_perm_failure
:
3439 cfg
->state
= STATE_FAILTERM
;
3440 wake_up_all(&cfg
->reset_waitq
);
3441 scsi_unblock_requests(cfg
->host
);
3442 return PCI_ERS_RESULT_DISCONNECT
;
3446 return PCI_ERS_RESULT_NEED_RESET
;
3450 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3451 * @pdev: PCI device struct.
3453 * This routine is called by the pci error recovery code after the PCI
3454 * slot has been reset, just before we should resume normal operations.
3456 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3458 static pci_ers_result_t
cxlflash_pci_slot_reset(struct pci_dev
*pdev
)
3461 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3462 struct device
*dev
= &cfg
->dev
->dev
;
3464 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
3468 dev_err(dev
, "%s: EEH recovery failed rc=%d\n", __func__
, rc
);
3469 return PCI_ERS_RESULT_DISCONNECT
;
3472 return PCI_ERS_RESULT_RECOVERED
;
3476 * cxlflash_pci_resume() - called when normal operation can resume
3477 * @pdev: PCI device struct
3479 static void cxlflash_pci_resume(struct pci_dev
*pdev
)
3481 struct cxlflash_cfg
*cfg
= pci_get_drvdata(pdev
);
3482 struct device
*dev
= &cfg
->dev
->dev
;
3484 dev_dbg(dev
, "%s: pdev=%p\n", __func__
, pdev
);
3486 cfg
->state
= STATE_NORMAL
;
3487 wake_up_all(&cfg
->reset_waitq
);
3488 scsi_unblock_requests(cfg
->host
);
3492 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3493 * @dev: Character device.
3494 * @mode: Mode that can be used to verify access.
3496 * Return: Allocated string describing the devtmpfs structure.
3498 static char *cxlflash_devnode(struct device
*dev
, umode_t
*mode
)
3500 return kasprintf(GFP_KERNEL
, "cxlflash/%s", dev_name(dev
));
3504 * cxlflash_class_init() - create character device class
3506 * Return: 0 on success, -errno on failure
3508 static int cxlflash_class_init(void)
3513 rc
= alloc_chrdev_region(&devno
, 0, CXLFLASH_MAX_ADAPTERS
, "cxlflash");
3515 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__
, rc
);
3519 cxlflash_major
= MAJOR(devno
);
3521 cxlflash_class
= class_create(THIS_MODULE
, "cxlflash");
3522 if (IS_ERR(cxlflash_class
)) {
3523 rc
= PTR_ERR(cxlflash_class
);
3524 pr_err("%s: class_create failed rc=%d\n", __func__
, rc
);
3528 cxlflash_class
->devnode
= cxlflash_devnode
;
3530 pr_debug("%s: returning rc=%d\n", __func__
, rc
);
3533 unregister_chrdev_region(devno
, CXLFLASH_MAX_ADAPTERS
);
3538 * cxlflash_class_exit() - destroy character device class
3540 static void cxlflash_class_exit(void)
3542 dev_t devno
= MKDEV(cxlflash_major
, 0);
3544 class_destroy(cxlflash_class
);
3545 unregister_chrdev_region(devno
, CXLFLASH_MAX_ADAPTERS
);
3548 static const struct pci_error_handlers cxlflash_err_handler
= {
3549 .error_detected
= cxlflash_pci_error_detected
,
3550 .slot_reset
= cxlflash_pci_slot_reset
,
3551 .resume
= cxlflash_pci_resume
,
3555 * PCI device structure
3557 static struct pci_driver cxlflash_driver
= {
3558 .name
= CXLFLASH_NAME
,
3559 .id_table
= cxlflash_pci_table
,
3560 .probe
= cxlflash_probe
,
3561 .remove
= cxlflash_remove
,
3562 .shutdown
= cxlflash_remove
,
3563 .err_handler
= &cxlflash_err_handler
,
3567 * init_cxlflash() - module entry point
3569 * Return: 0 on success, -errno on failure
3571 static int __init
init_cxlflash(void)
3576 cxlflash_list_init();
3577 rc
= cxlflash_class_init();
3581 rc
= pci_register_driver(&cxlflash_driver
);
3585 pr_debug("%s: returning rc=%d\n", __func__
, rc
);
3588 cxlflash_class_exit();
3593 * exit_cxlflash() - module exit point
3595 static void __exit
exit_cxlflash(void)
3597 cxlflash_term_global_luns();
3598 cxlflash_free_errpage();
3600 pci_unregister_driver(&cxlflash_driver
);
3601 cxlflash_class_exit();
3604 module_init(init_cxlflash
);
3605 module_exit(exit_cxlflash
);