]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/cxlflash/main.c
Merge tag 'media/v4.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / cxlflash / main.c
1 /*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19
20 #include <asm/unaligned.h>
21
22 #include <misc/cxl.h>
23
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36
37
38 /**
39 * cxlflash_cmd_checkout() - checks out an AFU command
40 * @afu: AFU to checkout from.
41 *
42 * Commands are checked out in a round-robin fashion. Note that since
43 * the command pool is larger than the hardware queue, the majority of
44 * times we will only loop once or twice before getting a command. The
45 * buffer and CDB within the command are initialized (zeroed) prior to
46 * returning.
47 *
48 * Return: The checked out command or NULL when command pool is empty.
49 */
50 struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
51 {
52 int k, dec = CXLFLASH_NUM_CMDS;
53 struct afu_cmd *cmd;
54
55 while (dec--) {
56 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
57
58 cmd = &afu->cmd[k];
59
60 if (!atomic_dec_if_positive(&cmd->free)) {
61 pr_debug("%s: returning found index=%d\n",
62 __func__, cmd->slot);
63 memset(cmd->buf, 0, CMD_BUFSIZE);
64 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
65 return cmd;
66 }
67 }
68
69 return NULL;
70 }
71
72 /**
73 * cxlflash_cmd_checkin() - checks in an AFU command
74 * @cmd: AFU command to checkin.
75 *
76 * Safe to pass commands that have already been checked in. Several
77 * internal tracking fields are reset as part of the checkin. Note
78 * that these are intentionally reset prior to toggling the free bit
79 * to avoid clobbering values in the event that the command is checked
80 * out right away.
81 */
82 void cxlflash_cmd_checkin(struct afu_cmd *cmd)
83 {
84 cmd->rcb.scp = NULL;
85 cmd->rcb.timeout = 0;
86 cmd->sa.ioasc = 0;
87 cmd->cmd_tmf = false;
88 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
89
90 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
91 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
92 __func__, cmd->slot);
93 return;
94 }
95
96 pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
97 }
98
99 /**
100 * process_cmd_err() - command error handler
101 * @cmd: AFU command that experienced the error.
102 * @scp: SCSI command associated with the AFU command in error.
103 *
104 * Translates error bits from AFU command to SCSI command results.
105 */
106 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
107 {
108 struct sisl_ioarcb *ioarcb;
109 struct sisl_ioasa *ioasa;
110
111 if (unlikely(!cmd))
112 return;
113
114 ioarcb = &(cmd->rcb);
115 ioasa = &(cmd->sa);
116
117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
119 __func__, cmd, scp);
120 scp->result = (DID_ERROR << 16);
121 }
122
123 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
124 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
125 __func__, cmd, scp);
126 scp->result = (DID_ERROR << 16);
127 }
128
129 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
130 "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
131 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
132 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
133 ioasa->fc_extra);
134
135 if (ioasa->rc.scsi_rc) {
136 /* We have a SCSI status */
137 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
138 memcpy(scp->sense_buffer, ioasa->sense_data,
139 SISL_SENSE_DATA_LEN);
140 scp->result = ioasa->rc.scsi_rc;
141 } else
142 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
143 }
144
145 /*
146 * We encountered an error. Set scp->result based on nature
147 * of error.
148 */
149 if (ioasa->rc.fc_rc) {
150 /* We have an FC status */
151 switch (ioasa->rc.fc_rc) {
152 case SISL_FC_RC_LINKDOWN:
153 scp->result = (DID_REQUEUE << 16);
154 break;
155 case SISL_FC_RC_RESID:
156 /* This indicates an FCP resid underrun */
157 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
158 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
159 * then we will handle this error else where.
160 * If not then we must handle it here.
161 * This is probably an AFU bug. We will
162 * attempt a retry to see if that resolves it.
163 */
164 scp->result = (DID_ERROR << 16);
165 }
166 break;
167 case SISL_FC_RC_RESIDERR:
168 /* Resid mismatch between adapter and device */
169 case SISL_FC_RC_TGTABORT:
170 case SISL_FC_RC_ABORTOK:
171 case SISL_FC_RC_ABORTFAIL:
172 case SISL_FC_RC_NOLOGI:
173 case SISL_FC_RC_ABORTPEND:
174 case SISL_FC_RC_WRABORTPEND:
175 case SISL_FC_RC_NOEXP:
176 case SISL_FC_RC_INUSE:
177 scp->result = (DID_ERROR << 16);
178 break;
179 }
180 }
181
182 if (ioasa->rc.afu_rc) {
183 /* We have an AFU error */
184 switch (ioasa->rc.afu_rc) {
185 case SISL_AFU_RC_NO_CHANNELS:
186 scp->result = (DID_MEDIUM_ERROR << 16);
187 break;
188 case SISL_AFU_RC_DATA_DMA_ERR:
189 switch (ioasa->afu_extra) {
190 case SISL_AFU_DMA_ERR_PAGE_IN:
191 /* Retry */
192 scp->result = (DID_IMM_RETRY << 16);
193 break;
194 case SISL_AFU_DMA_ERR_INVALID_EA:
195 default:
196 scp->result = (DID_ERROR << 16);
197 }
198 break;
199 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 /* Retry */
201 scp->result = (DID_ALLOC_FAILURE << 16);
202 break;
203 default:
204 scp->result = (DID_ERROR << 16);
205 }
206 }
207 }
208
209 /**
210 * cmd_complete() - command completion handler
211 * @cmd: AFU command that has completed.
212 *
213 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands.
216 */
217 static void cmd_complete(struct afu_cmd *cmd)
218 {
219 struct scsi_cmnd *scp;
220 u32 resid;
221 ulong lock_flags;
222 struct afu *afu = cmd->parent;
223 struct cxlflash_cfg *cfg = afu->parent;
224 bool cmd_is_tmf;
225
226 spin_lock_irqsave(&cmd->slock, lock_flags);
227 cmd->sa.host_use_b[0] |= B_DONE;
228 spin_unlock_irqrestore(&cmd->slock, lock_flags);
229
230 if (cmd->rcb.scp) {
231 scp = cmd->rcb.scp;
232 if (unlikely(cmd->sa.rc.afu_rc ||
233 cmd->sa.rc.scsi_rc ||
234 cmd->sa.rc.fc_rc))
235 process_cmd_err(cmd, scp);
236 else
237 scp->result = (DID_OK << 16);
238
239 resid = cmd->sa.resid;
240 cmd_is_tmf = cmd->cmd_tmf;
241 cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */
242
243 pr_debug("%s: calling scsi_set_resid, scp=%p "
244 "result=%X resid=%d\n", __func__,
245 scp, scp->result, resid);
246
247 scsi_set_resid(scp, resid);
248 scsi_dma_unmap(scp);
249 scp->scsi_done(scp);
250
251 if (cmd_is_tmf) {
252 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
253 cfg->tmf_active = false;
254 wake_up_all_locked(&cfg->tmf_waitq);
255 spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
256 lock_flags);
257 }
258 } else
259 complete(&cmd->cevent);
260 }
261
262 /**
263 * send_tmf() - sends a Task Management Function (TMF)
264 * @afu: AFU to checkout from.
265 * @scp: SCSI command from stack.
266 * @tmfcmd: TMF command to send.
267 *
268 * Return:
269 * 0 on success
270 * SCSI_MLQUEUE_HOST_BUSY when host is busy
271 */
272 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
273 {
274 struct afu_cmd *cmd;
275
276 u32 port_sel = scp->device->channel + 1;
277 short lflag = 0;
278 struct Scsi_Host *host = scp->device->host;
279 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
280 ulong lock_flags;
281 int rc = 0;
282
283 cmd = cxlflash_cmd_checkout(afu);
284 if (unlikely(!cmd)) {
285 pr_err("%s: could not get a free command\n", __func__);
286 rc = SCSI_MLQUEUE_HOST_BUSY;
287 goto out;
288 }
289
290 /* If a Task Management Function is active, do not send one more.
291 */
292 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
293 if (cfg->tmf_active)
294 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
295 !cfg->tmf_active);
296 cfg->tmf_active = true;
297 cmd->cmd_tmf = true;
298 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
299
300 cmd->rcb.ctx_id = afu->ctx_hndl;
301 cmd->rcb.port_sel = port_sel;
302 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
303
304 lflag = SISL_REQ_FLAGS_TMF_CMD;
305
306 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
307 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
308
309 /* Stash the scp in the reserved field, for reuse during interrupt */
310 cmd->rcb.scp = scp;
311
312 /* Copy the CDB from the cmd passed in */
313 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
314
315 /* Send the command */
316 rc = cxlflash_send_cmd(afu, cmd);
317 if (unlikely(rc)) {
318 cxlflash_cmd_checkin(cmd);
319 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
320 cfg->tmf_active = false;
321 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
322 goto out;
323 }
324
325 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
326 wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
327 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
328 out:
329 return rc;
330 }
331
332 /**
333 * cxlflash_driver_info() - information handler for this host driver
334 * @host: SCSI host associated with device.
335 *
336 * Return: A string describing the device.
337 */
338 static const char *cxlflash_driver_info(struct Scsi_Host *host)
339 {
340 return CXLFLASH_ADAPTER_NAME;
341 }
342
343 /**
344 * cxlflash_queuecommand() - sends a mid-layer request
345 * @host: SCSI host associated with device.
346 * @scp: SCSI command to send.
347 *
348 * Return:
349 * 0 on success
350 * SCSI_MLQUEUE_HOST_BUSY when host is busy
351 */
352 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
353 {
354 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
355 struct afu *afu = cfg->afu;
356 struct pci_dev *pdev = cfg->dev;
357 struct afu_cmd *cmd;
358 u32 port_sel = scp->device->channel + 1;
359 int nseg, i, ncount;
360 struct scatterlist *sg;
361 ulong lock_flags;
362 short lflag = 0;
363 int rc = 0;
364
365 pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
366 __func__, scp, host->host_no, scp->device->channel,
367 scp->device->id, scp->device->lun,
368 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
369 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
370 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
371 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
372
373 /* If a Task Management Function is active, wait for it to complete
374 * before continuing with regular commands.
375 */
376 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
377 if (cfg->tmf_active) {
378 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
379 rc = SCSI_MLQUEUE_HOST_BUSY;
380 goto out;
381 }
382 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
383
384 switch (cfg->state) {
385 case STATE_LIMBO:
386 dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
387 __func__);
388 rc = SCSI_MLQUEUE_HOST_BUSY;
389 goto out;
390 case STATE_FAILTERM:
391 dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
392 __func__);
393 scp->result = (DID_NO_CONNECT << 16);
394 scp->scsi_done(scp);
395 rc = 0;
396 goto out;
397 default:
398 break;
399 }
400
401 cmd = cxlflash_cmd_checkout(afu);
402 if (unlikely(!cmd)) {
403 pr_err("%s: could not get a free command\n", __func__);
404 rc = SCSI_MLQUEUE_HOST_BUSY;
405 goto out;
406 }
407
408 cmd->rcb.ctx_id = afu->ctx_hndl;
409 cmd->rcb.port_sel = port_sel;
410 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
411
412 if (scp->sc_data_direction == DMA_TO_DEVICE)
413 lflag = SISL_REQ_FLAGS_HOST_WRITE;
414 else
415 lflag = SISL_REQ_FLAGS_HOST_READ;
416
417 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
418 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
419
420 /* Stash the scp in the reserved field, for reuse during interrupt */
421 cmd->rcb.scp = scp;
422
423 nseg = scsi_dma_map(scp);
424 if (unlikely(nseg < 0)) {
425 dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
426 __func__, nseg);
427 rc = SCSI_MLQUEUE_HOST_BUSY;
428 goto out;
429 }
430
431 ncount = scsi_sg_count(scp);
432 scsi_for_each_sg(scp, sg, ncount, i) {
433 cmd->rcb.data_len = sg_dma_len(sg);
434 cmd->rcb.data_ea = sg_dma_address(sg);
435 }
436
437 /* Copy the CDB from the scsi_cmnd passed in */
438 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
439
440 /* Send the command */
441 rc = cxlflash_send_cmd(afu, cmd);
442 if (unlikely(rc)) {
443 cxlflash_cmd_checkin(cmd);
444 scsi_dma_unmap(scp);
445 }
446
447 out:
448 return rc;
449 }
450
451 /**
452 * cxlflash_eh_device_reset_handler() - reset a single LUN
453 * @scp: SCSI command to send.
454 *
455 * Return:
456 * SUCCESS as defined in scsi/scsi.h
457 * FAILED as defined in scsi/scsi.h
458 */
459 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
460 {
461 int rc = SUCCESS;
462 struct Scsi_Host *host = scp->device->host;
463 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
464 struct afu *afu = cfg->afu;
465 int rcr = 0;
466
467 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
468 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
469 host->host_no, scp->device->channel,
470 scp->device->id, scp->device->lun,
471 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
472 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
473 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
474 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
475
476 switch (cfg->state) {
477 case STATE_NORMAL:
478 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
479 if (unlikely(rcr))
480 rc = FAILED;
481 break;
482 case STATE_LIMBO:
483 wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
484 if (cfg->state == STATE_NORMAL)
485 break;
486 /* fall through */
487 default:
488 rc = FAILED;
489 break;
490 }
491
492 pr_debug("%s: returning rc=%d\n", __func__, rc);
493 return rc;
494 }
495
496 /**
497 * cxlflash_eh_host_reset_handler() - reset the host adapter
498 * @scp: SCSI command from stack identifying host.
499 *
500 * Return:
501 * SUCCESS as defined in scsi/scsi.h
502 * FAILED as defined in scsi/scsi.h
503 */
504 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
505 {
506 int rc = SUCCESS;
507 int rcr = 0;
508 struct Scsi_Host *host = scp->device->host;
509 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
510
511 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
512 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
513 host->host_no, scp->device->channel,
514 scp->device->id, scp->device->lun,
515 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
516 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
517 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
518 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
519
520 switch (cfg->state) {
521 case STATE_NORMAL:
522 cfg->state = STATE_LIMBO;
523 scsi_block_requests(cfg->host);
524 cxlflash_mark_contexts_error(cfg);
525 rcr = cxlflash_afu_reset(cfg);
526 if (rcr) {
527 rc = FAILED;
528 cfg->state = STATE_FAILTERM;
529 } else
530 cfg->state = STATE_NORMAL;
531 wake_up_all(&cfg->limbo_waitq);
532 scsi_unblock_requests(cfg->host);
533 break;
534 case STATE_LIMBO:
535 wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
536 if (cfg->state == STATE_NORMAL)
537 break;
538 /* fall through */
539 default:
540 rc = FAILED;
541 break;
542 }
543
544 pr_debug("%s: returning rc=%d\n", __func__, rc);
545 return rc;
546 }
547
548 /**
549 * cxlflash_change_queue_depth() - change the queue depth for the device
550 * @sdev: SCSI device destined for queue depth change.
551 * @qdepth: Requested queue depth value to set.
552 *
553 * The requested queue depth is capped to the maximum supported value.
554 *
555 * Return: The actual queue depth set.
556 */
557 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
558 {
559
560 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
561 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
562
563 scsi_change_queue_depth(sdev, qdepth);
564 return sdev->queue_depth;
565 }
566
567 /**
568 * cxlflash_show_port_status() - queries and presents the current port status
569 * @dev: Generic device associated with the host owning the port.
570 * @attr: Device attribute representing the port.
571 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
572 *
573 * Return: The size of the ASCII string returned in @buf.
574 */
575 static ssize_t cxlflash_show_port_status(struct device *dev,
576 struct device_attribute *attr,
577 char *buf)
578 {
579 struct Scsi_Host *shost = class_to_shost(dev);
580 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
581 struct afu *afu = cfg->afu;
582
583 char *disp_status;
584 int rc;
585 u32 port;
586 u64 status;
587 u64 *fc_regs;
588
589 rc = kstrtouint((attr->attr.name + 4), 10, &port);
590 if (rc || (port >= NUM_FC_PORTS))
591 return 0;
592
593 fc_regs = &afu->afu_map->global.fc_regs[port][0];
594 status =
595 (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK);
596
597 if (status == FC_MTIP_STATUS_ONLINE)
598 disp_status = "online";
599 else if (status == FC_MTIP_STATUS_OFFLINE)
600 disp_status = "offline";
601 else
602 disp_status = "unknown";
603
604 return snprintf(buf, PAGE_SIZE, "%s\n", disp_status);
605 }
606
607 /**
608 * cxlflash_show_lun_mode() - presents the current LUN mode of the host
609 * @dev: Generic device associated with the host.
610 * @attr: Device attribute representing the lun mode.
611 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
612 *
613 * Return: The size of the ASCII string returned in @buf.
614 */
615 static ssize_t cxlflash_show_lun_mode(struct device *dev,
616 struct device_attribute *attr, char *buf)
617 {
618 struct Scsi_Host *shost = class_to_shost(dev);
619 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
620 struct afu *afu = cfg->afu;
621
622 return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
623 }
624
625 /**
626 * cxlflash_store_lun_mode() - sets the LUN mode of the host
627 * @dev: Generic device associated with the host.
628 * @attr: Device attribute representing the lun mode.
629 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
630 * @count: Length of data resizing in @buf.
631 *
632 * The CXL Flash AFU supports a dummy LUN mode where the external
633 * links and storage are not required. Space on the FPGA is used
634 * to create 1 or 2 small LUNs which are presented to the system
635 * as if they were a normal storage device. This feature is useful
636 * during development and also provides manufacturing with a way
637 * to test the AFU without an actual device.
638 *
639 * 0 = external LUN[s] (default)
640 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
641 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
642 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
643 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
644 *
645 * Return: The size of the ASCII string returned in @buf.
646 */
647 static ssize_t cxlflash_store_lun_mode(struct device *dev,
648 struct device_attribute *attr,
649 const char *buf, size_t count)
650 {
651 struct Scsi_Host *shost = class_to_shost(dev);
652 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
653 struct afu *afu = cfg->afu;
654 int rc;
655 u32 lun_mode;
656
657 rc = kstrtouint(buf, 10, &lun_mode);
658 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
659 afu->internal_lun = lun_mode;
660 cxlflash_afu_reset(cfg);
661 scsi_scan_host(cfg->host);
662 }
663
664 return count;
665 }
666
667 /**
668 * cxlflash_show_ioctl_version() - presents the current ioctl version of the host
669 * @dev: Generic device associated with the host.
670 * @attr: Device attribute representing the ioctl version.
671 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
672 *
673 * Return: The size of the ASCII string returned in @buf.
674 */
675 static ssize_t cxlflash_show_ioctl_version(struct device *dev,
676 struct device_attribute *attr,
677 char *buf)
678 {
679 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
680 }
681
682 /**
683 * cxlflash_show_dev_mode() - presents the current mode of the device
684 * @dev: Generic device associated with the device.
685 * @attr: Device attribute representing the device mode.
686 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
687 *
688 * Return: The size of the ASCII string returned in @buf.
689 */
690 static ssize_t cxlflash_show_dev_mode(struct device *dev,
691 struct device_attribute *attr, char *buf)
692 {
693 struct scsi_device *sdev = to_scsi_device(dev);
694
695 return snprintf(buf, PAGE_SIZE, "%s\n",
696 sdev->hostdata ? "superpipe" : "legacy");
697 }
698
699 /**
700 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
701 * @cxlflash: Internal structure associated with the host.
702 */
703 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
704 {
705 struct pci_dev *pdev = cfg->dev;
706
707 if (pci_channel_offline(pdev))
708 wait_event_timeout(cfg->limbo_waitq,
709 !pci_channel_offline(pdev),
710 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
711 }
712
713 /*
714 * Host attributes
715 */
716 static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL);
717 static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL);
718 static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode,
719 cxlflash_store_lun_mode);
720 static DEVICE_ATTR(ioctl_version, S_IRUGO, cxlflash_show_ioctl_version, NULL);
721
722 static struct device_attribute *cxlflash_host_attrs[] = {
723 &dev_attr_port0,
724 &dev_attr_port1,
725 &dev_attr_lun_mode,
726 &dev_attr_ioctl_version,
727 NULL
728 };
729
730 /*
731 * Device attributes
732 */
733 static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL);
734
735 static struct device_attribute *cxlflash_dev_attrs[] = {
736 &dev_attr_mode,
737 NULL
738 };
739
740 /*
741 * Host template
742 */
743 static struct scsi_host_template driver_template = {
744 .module = THIS_MODULE,
745 .name = CXLFLASH_ADAPTER_NAME,
746 .info = cxlflash_driver_info,
747 .ioctl = cxlflash_ioctl,
748 .proc_name = CXLFLASH_NAME,
749 .queuecommand = cxlflash_queuecommand,
750 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
751 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
752 .change_queue_depth = cxlflash_change_queue_depth,
753 .cmd_per_lun = 16,
754 .can_queue = CXLFLASH_MAX_CMDS,
755 .this_id = -1,
756 .sg_tablesize = SG_NONE, /* No scatter gather support. */
757 .max_sectors = CXLFLASH_MAX_SECTORS,
758 .use_clustering = ENABLE_CLUSTERING,
759 .shost_attrs = cxlflash_host_attrs,
760 .sdev_attrs = cxlflash_dev_attrs,
761 };
762
763 /*
764 * Device dependent values
765 */
766 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
767
768 /*
769 * PCI device binding table
770 */
771 static struct pci_device_id cxlflash_pci_table[] = {
772 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
773 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
774 {}
775 };
776
777 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
778
779 /**
780 * free_mem() - free memory associated with the AFU
781 * @cxlflash: Internal structure associated with the host.
782 */
783 static void free_mem(struct cxlflash_cfg *cfg)
784 {
785 int i;
786 char *buf = NULL;
787 struct afu *afu = cfg->afu;
788
789 if (cfg->afu) {
790 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
791 buf = afu->cmd[i].buf;
792 if (!((u64)buf & (PAGE_SIZE - 1)))
793 free_page((ulong)buf);
794 }
795
796 free_pages((ulong)afu, get_order(sizeof(struct afu)));
797 cfg->afu = NULL;
798 }
799 }
800
801 /**
802 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
803 * @cxlflash: Internal structure associated with the host.
804 *
805 * Safe to call with AFU in a partially allocated/initialized state.
806 */
807 static void stop_afu(struct cxlflash_cfg *cfg)
808 {
809 int i;
810 struct afu *afu = cfg->afu;
811
812 if (likely(afu)) {
813 for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
814 complete(&afu->cmd[i].cevent);
815
816 if (likely(afu->afu_map)) {
817 cxl_psa_unmap((void *)afu->afu_map);
818 afu->afu_map = NULL;
819 }
820 }
821 }
822
823 /**
824 * term_mc() - terminates the master context
825 * @cxlflash: Internal structure associated with the host.
826 * @level: Depth of allocation, where to begin waterfall tear down.
827 *
828 * Safe to call with AFU/MC in partially allocated/initialized state.
829 */
830 static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
831 {
832 int rc = 0;
833 struct afu *afu = cfg->afu;
834
835 if (!afu || !cfg->mcctx) {
836 pr_err("%s: returning from term_mc with NULL afu or MC\n",
837 __func__);
838 return;
839 }
840
841 switch (level) {
842 case UNDO_START:
843 rc = cxl_stop_context(cfg->mcctx);
844 BUG_ON(rc);
845 case UNMAP_THREE:
846 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
847 case UNMAP_TWO:
848 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
849 case UNMAP_ONE:
850 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
851 case FREE_IRQ:
852 cxl_free_afu_irqs(cfg->mcctx);
853 case RELEASE_CONTEXT:
854 cfg->mcctx = NULL;
855 }
856 }
857
858 /**
859 * term_afu() - terminates the AFU
860 * @cxlflash: Internal structure associated with the host.
861 *
862 * Safe to call with AFU/MC in partially allocated/initialized state.
863 */
864 static void term_afu(struct cxlflash_cfg *cfg)
865 {
866 term_mc(cfg, UNDO_START);
867
868 if (cfg->afu)
869 stop_afu(cfg);
870
871 pr_debug("%s: returning\n", __func__);
872 }
873
874 /**
875 * cxlflash_remove() - PCI entry point to tear down host
876 * @pdev: PCI device associated with the host.
877 *
878 * Safe to use as a cleanup in partially allocated/initialized state.
879 */
880 static void cxlflash_remove(struct pci_dev *pdev)
881 {
882 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
883 ulong lock_flags;
884
885 /* If a Task Management Function is active, wait for it to complete
886 * before continuing with remove.
887 */
888 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
889 if (cfg->tmf_active)
890 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
891 !cfg->tmf_active);
892 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
893
894 cfg->state = STATE_FAILTERM;
895 cxlflash_stop_term_user_contexts(cfg);
896
897 switch (cfg->init_state) {
898 case INIT_STATE_SCSI:
899 cxlflash_term_local_luns(cfg);
900 scsi_remove_host(cfg->host);
901 scsi_host_put(cfg->host);
902 /* Fall through */
903 case INIT_STATE_AFU:
904 term_afu(cfg);
905 case INIT_STATE_PCI:
906 pci_release_regions(cfg->dev);
907 pci_disable_device(pdev);
908 case INIT_STATE_NONE:
909 flush_work(&cfg->work_q);
910 free_mem(cfg);
911 break;
912 }
913
914 pr_debug("%s: returning\n", __func__);
915 }
916
917 /**
918 * alloc_mem() - allocates the AFU and its command pool
919 * @cxlflash: Internal structure associated with the host.
920 *
921 * A partially allocated state remains on failure.
922 *
923 * Return:
924 * 0 on success
925 * -ENOMEM on failure to allocate memory
926 */
927 static int alloc_mem(struct cxlflash_cfg *cfg)
928 {
929 int rc = 0;
930 int i;
931 char *buf = NULL;
932
933 /* This allocation is about 12K, i.e. only 1 64k page
934 * and upto 4 4k pages
935 */
936 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
937 get_order(sizeof(struct afu)));
938 if (unlikely(!cfg->afu)) {
939 pr_err("%s: cannot get %d free pages\n",
940 __func__, get_order(sizeof(struct afu)));
941 rc = -ENOMEM;
942 goto out;
943 }
944 cfg->afu->parent = cfg;
945 cfg->afu->afu_map = NULL;
946
947 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
948 if (!((u64)buf & (PAGE_SIZE - 1))) {
949 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
950 if (unlikely(!buf)) {
951 pr_err("%s: Allocate command buffers fail!\n",
952 __func__);
953 rc = -ENOMEM;
954 free_mem(cfg);
955 goto out;
956 }
957 }
958
959 cfg->afu->cmd[i].buf = buf;
960 atomic_set(&cfg->afu->cmd[i].free, 1);
961 cfg->afu->cmd[i].slot = i;
962 }
963
964 out:
965 return rc;
966 }
967
968 /**
969 * init_pci() - initializes the host as a PCI device
970 * @cxlflash: Internal structure associated with the host.
971 *
972 * Return:
973 * 0 on success
974 * -EIO on unable to communicate with device
975 * A return code from the PCI sub-routines
976 */
977 static int init_pci(struct cxlflash_cfg *cfg)
978 {
979 struct pci_dev *pdev = cfg->dev;
980 int rc = 0;
981
982 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
983 rc = pci_request_regions(pdev, CXLFLASH_NAME);
984 if (rc < 0) {
985 dev_err(&pdev->dev,
986 "%s: Couldn't register memory range of registers\n",
987 __func__);
988 goto out;
989 }
990
991 rc = pci_enable_device(pdev);
992 if (rc || pci_channel_offline(pdev)) {
993 if (pci_channel_offline(pdev)) {
994 cxlflash_wait_for_pci_err_recovery(cfg);
995 rc = pci_enable_device(pdev);
996 }
997
998 if (rc) {
999 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
1000 __func__);
1001 cxlflash_wait_for_pci_err_recovery(cfg);
1002 goto out_release_regions;
1003 }
1004 }
1005
1006 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1007 if (rc < 0) {
1008 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
1009 __func__);
1010 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1011 }
1012
1013 if (rc < 0) {
1014 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
1015 __func__);
1016 goto out_disable;
1017 }
1018
1019 pci_set_master(pdev);
1020
1021 if (pci_channel_offline(pdev)) {
1022 cxlflash_wait_for_pci_err_recovery(cfg);
1023 if (pci_channel_offline(pdev)) {
1024 rc = -EIO;
1025 goto out_msi_disable;
1026 }
1027 }
1028
1029 rc = pci_save_state(pdev);
1030
1031 if (rc != PCIBIOS_SUCCESSFUL) {
1032 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
1033 __func__);
1034 rc = -EIO;
1035 goto cleanup_nolog;
1036 }
1037
1038 out:
1039 pr_debug("%s: returning rc=%d\n", __func__, rc);
1040 return rc;
1041
1042 cleanup_nolog:
1043 out_msi_disable:
1044 cxlflash_wait_for_pci_err_recovery(cfg);
1045 out_disable:
1046 pci_disable_device(pdev);
1047 out_release_regions:
1048 pci_release_regions(pdev);
1049 goto out;
1050
1051 }
1052
1053 /**
1054 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1055 * @cxlflash: Internal structure associated with the host.
1056 *
1057 * Return:
1058 * 0 on success
1059 * A return code from adding the host
1060 */
1061 static int init_scsi(struct cxlflash_cfg *cfg)
1062 {
1063 struct pci_dev *pdev = cfg->dev;
1064 int rc = 0;
1065
1066 rc = scsi_add_host(cfg->host, &pdev->dev);
1067 if (rc) {
1068 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
1069 __func__, rc);
1070 goto out;
1071 }
1072
1073 scsi_scan_host(cfg->host);
1074
1075 out:
1076 pr_debug("%s: returning rc=%d\n", __func__, rc);
1077 return rc;
1078 }
1079
1080 /**
1081 * set_port_online() - transitions the specified host FC port to online state
1082 * @fc_regs: Top of MMIO region defined for specified port.
1083 *
1084 * The provided MMIO region must be mapped prior to call. Online state means
1085 * that the FC link layer has synced, completed the handshaking process, and
1086 * is ready for login to start.
1087 */
1088 static void set_port_online(u64 *fc_regs)
1089 {
1090 u64 cmdcfg;
1091
1092 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1093 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1094 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
1095 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1096 }
1097
1098 /**
1099 * set_port_offline() - transitions the specified host FC port to offline state
1100 * @fc_regs: Top of MMIO region defined for specified port.
1101 *
1102 * The provided MMIO region must be mapped prior to call.
1103 */
1104 static void set_port_offline(u64 *fc_regs)
1105 {
1106 u64 cmdcfg;
1107
1108 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1109 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
1110 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
1111 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1112 }
1113
1114 /**
1115 * wait_port_online() - waits for the specified host FC port come online
1116 * @fc_regs: Top of MMIO region defined for specified port.
1117 * @delay_us: Number of microseconds to delay between reading port status.
1118 * @nretry: Number of cycles to retry reading port status.
1119 *
1120 * The provided MMIO region must be mapped prior to call. This will timeout
1121 * when the cable is not plugged in.
1122 *
1123 * Return:
1124 * TRUE (1) when the specified port is online
1125 * FALSE (0) when the specified port fails to come online after timeout
1126 * -EINVAL when @delay_us is less than 1000
1127 */
1128 static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
1129 {
1130 u64 status;
1131
1132 if (delay_us < 1000) {
1133 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1134 return -EINVAL;
1135 }
1136
1137 do {
1138 msleep(delay_us / 1000);
1139 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1140 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141 nretry--);
1142
1143 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144 }
1145
1146 /**
1147 * wait_port_offline() - waits for the specified host FC port go offline
1148 * @fc_regs: Top of MMIO region defined for specified port.
1149 * @delay_us: Number of microseconds to delay between reading port status.
1150 * @nretry: Number of cycles to retry reading port status.
1151 *
1152 * The provided MMIO region must be mapped prior to call.
1153 *
1154 * Return:
1155 * TRUE (1) when the specified port is offline
1156 * FALSE (0) when the specified port fails to go offline after timeout
1157 * -EINVAL when @delay_us is less than 1000
1158 */
1159 static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
1160 {
1161 u64 status;
1162
1163 if (delay_us < 1000) {
1164 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1165 return -EINVAL;
1166 }
1167
1168 do {
1169 msleep(delay_us / 1000);
1170 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1171 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1172 nretry--);
1173
1174 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1175 }
1176
1177 /**
1178 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1179 * @afu: AFU associated with the host that owns the specified FC port.
1180 * @port: Port number being configured.
1181 * @fc_regs: Top of MMIO region defined for specified port.
1182 * @wwpn: The world-wide-port-number previously discovered for port.
1183 *
1184 * The provided MMIO region must be mapped prior to call. As part of the
1185 * sequence to configure the WWPN, the port is toggled offline and then back
1186 * online. This toggling action can cause this routine to delay up to a few
1187 * seconds. When configured to use the internal LUN feature of the AFU, a
1188 * failure to come online is overridden.
1189 *
1190 * Return:
1191 * 0 when the WWPN is successfully written and the port comes back online
1192 * -1 when the port fails to go offline or come back up online
1193 */
1194 static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
1195 {
1196 int ret = 0;
1197
1198 set_port_offline(fc_regs);
1199
1200 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1201 FC_PORT_STATUS_RETRY_CNT)) {
1202 pr_debug("%s: wait on port %d to go offline timed out\n",
1203 __func__, port);
1204 ret = -1; /* but continue on to leave the port back online */
1205 }
1206
1207 if (ret == 0)
1208 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1209
1210 set_port_online(fc_regs);
1211
1212 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1213 FC_PORT_STATUS_RETRY_CNT)) {
1214 pr_debug("%s: wait on port %d to go online timed out\n",
1215 __func__, port);
1216 ret = -1;
1217
1218 /*
1219 * Override for internal lun!!!
1220 */
1221 if (afu->internal_lun) {
1222 pr_debug("%s: Overriding port %d online timeout!!!\n",
1223 __func__, port);
1224 ret = 0;
1225 }
1226 }
1227
1228 pr_debug("%s: returning rc=%d\n", __func__, ret);
1229
1230 return ret;
1231 }
1232
1233 /**
1234 * afu_link_reset() - resets the specified host FC port
1235 * @afu: AFU associated with the host that owns the specified FC port.
1236 * @port: Port number being configured.
1237 * @fc_regs: Top of MMIO region defined for specified port.
1238 *
1239 * The provided MMIO region must be mapped prior to call. The sequence to
1240 * reset the port involves toggling it offline and then back online. This
1241 * action can cause this routine to delay up to a few seconds. An effort
1242 * is made to maintain link with the device by switching to host to use
1243 * the alternate port exclusively while the reset takes place.
1244 * failure to come online is overridden.
1245 */
1246 static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
1247 {
1248 u64 port_sel;
1249
1250 /* first switch the AFU to the other links, if any */
1251 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1252 port_sel &= ~(1ULL << port);
1253 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1254 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1255
1256 set_port_offline(fc_regs);
1257 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1258 FC_PORT_STATUS_RETRY_CNT))
1259 pr_err("%s: wait on port %d to go offline timed out\n",
1260 __func__, port);
1261
1262 set_port_online(fc_regs);
1263 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1264 FC_PORT_STATUS_RETRY_CNT))
1265 pr_err("%s: wait on port %d to go online timed out\n",
1266 __func__, port);
1267
1268 /* switch back to include this port */
1269 port_sel |= (1ULL << port);
1270 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1271 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1272
1273 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1274 }
1275
1276 /*
1277 * Asynchronous interrupt information table
1278 */
1279 static const struct asyc_intr_info ainfo[] = {
1280 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1281 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1282 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1283 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
1284 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1285 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
1286 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1287 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1288 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1289 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1290 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1291 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
1292 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1293 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
1294 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1295 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1296 {0x0, "", 0, 0} /* terminator */
1297 };
1298
1299 /**
1300 * find_ainfo() - locates and returns asynchronous interrupt information
1301 * @status: Status code set by AFU on error.
1302 *
1303 * Return: The located information or NULL when the status code is invalid.
1304 */
1305 static const struct asyc_intr_info *find_ainfo(u64 status)
1306 {
1307 const struct asyc_intr_info *info;
1308
1309 for (info = &ainfo[0]; info->status; info++)
1310 if (info->status == status)
1311 return info;
1312
1313 return NULL;
1314 }
1315
1316 /**
1317 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1318 * @afu: AFU associated with the host.
1319 */
1320 static void afu_err_intr_init(struct afu *afu)
1321 {
1322 int i;
1323 u64 reg;
1324
1325 /* global async interrupts: AFU clears afu_ctrl on context exit
1326 * if async interrupts were sent to that context. This prevents
1327 * the AFU form sending further async interrupts when
1328 * there is
1329 * nobody to receive them.
1330 */
1331
1332 /* mask all */
1333 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1334 /* set LISN# to send and point to master context */
1335 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1336
1337 if (afu->internal_lun)
1338 reg |= 1; /* Bit 63 indicates local lun */
1339 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1340 /* clear all */
1341 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1342 /* unmask bits that are of interest */
1343 /* note: afu can send an interrupt after this step */
1344 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1345 /* clear again in case a bit came on after previous clear but before */
1346 /* unmask */
1347 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1348
1349 /* Clear/Set internal lun bits */
1350 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1351 reg &= SISL_FC_INTERNAL_MASK;
1352 if (afu->internal_lun)
1353 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1354 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1355
1356 /* now clear FC errors */
1357 for (i = 0; i < NUM_FC_PORTS; i++) {
1358 writeq_be(0xFFFFFFFFU,
1359 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1360 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1361 }
1362
1363 /* sync interrupts for master's IOARRIN write */
1364 /* note that unlike asyncs, there can be no pending sync interrupts */
1365 /* at this time (this is a fresh context and master has not written */
1366 /* IOARRIN yet), so there is nothing to clear. */
1367
1368 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1369 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1370 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1371 }
1372
1373 /**
1374 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1375 * @irq: Interrupt number.
1376 * @data: Private data provided at interrupt registration, the AFU.
1377 *
1378 * Return: Always return IRQ_HANDLED.
1379 */
1380 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1381 {
1382 struct afu *afu = (struct afu *)data;
1383 u64 reg;
1384 u64 reg_unmasked;
1385
1386 reg = readq_be(&afu->host_map->intr_status);
1387 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1388
1389 if (reg_unmasked == 0UL) {
1390 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1391 __func__, (u64)afu, reg);
1392 goto cxlflash_sync_err_irq_exit;
1393 }
1394
1395 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1396 __func__, (u64)afu, reg);
1397
1398 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1399
1400 cxlflash_sync_err_irq_exit:
1401 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1402 return IRQ_HANDLED;
1403 }
1404
1405 /**
1406 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1407 * @irq: Interrupt number.
1408 * @data: Private data provided at interrupt registration, the AFU.
1409 *
1410 * Return: Always return IRQ_HANDLED.
1411 */
1412 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1413 {
1414 struct afu *afu = (struct afu *)data;
1415 struct afu_cmd *cmd;
1416 bool toggle = afu->toggle;
1417 u64 entry,
1418 *hrrq_start = afu->hrrq_start,
1419 *hrrq_end = afu->hrrq_end,
1420 *hrrq_curr = afu->hrrq_curr;
1421
1422 /* Process however many RRQ entries that are ready */
1423 while (true) {
1424 entry = *hrrq_curr;
1425
1426 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1427 break;
1428
1429 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1430 cmd_complete(cmd);
1431
1432 /* Advance to next entry or wrap and flip the toggle bit */
1433 if (hrrq_curr < hrrq_end)
1434 hrrq_curr++;
1435 else {
1436 hrrq_curr = hrrq_start;
1437 toggle ^= SISL_RESP_HANDLE_T_BIT;
1438 }
1439 }
1440
1441 afu->hrrq_curr = hrrq_curr;
1442 afu->toggle = toggle;
1443
1444 return IRQ_HANDLED;
1445 }
1446
1447 /**
1448 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1449 * @irq: Interrupt number.
1450 * @data: Private data provided at interrupt registration, the AFU.
1451 *
1452 * Return: Always return IRQ_HANDLED.
1453 */
1454 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1455 {
1456 struct afu *afu = (struct afu *)data;
1457 struct cxlflash_cfg *cfg;
1458 u64 reg_unmasked;
1459 const struct asyc_intr_info *info;
1460 struct sisl_global_map *global = &afu->afu_map->global;
1461 u64 reg;
1462 u8 port;
1463 int i;
1464
1465 cfg = afu->parent;
1466
1467 reg = readq_be(&global->regs.aintr_status);
1468 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1469
1470 if (reg_unmasked == 0) {
1471 pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
1472 __func__, reg);
1473 goto out;
1474 }
1475
1476 /* it is OK to clear AFU status before FC_ERROR */
1477 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1478
1479 /* check each bit that is on */
1480 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1481 info = find_ainfo(1ULL << i);
1482 if ((reg_unmasked & 0x1) || !info)
1483 continue;
1484
1485 port = info->port;
1486
1487 pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1488 __func__, port, info->desc,
1489 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1490
1491 /*
1492 * do link reset first, some OTHER errors will set FC_ERROR
1493 * again if cleared before or w/o a reset
1494 */
1495 if (info->action & LINK_RESET) {
1496 pr_err("%s: FC Port %d: resetting link\n",
1497 __func__, port);
1498 cfg->lr_state = LINK_RESET_REQUIRED;
1499 cfg->lr_port = port;
1500 schedule_work(&cfg->work_q);
1501 }
1502
1503 if (info->action & CLR_FC_ERROR) {
1504 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1505
1506 /*
1507 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
1508 * should be the same and tracing one is sufficient.
1509 */
1510
1511 pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
1512 __func__, port, reg);
1513
1514 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1515 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1516 }
1517 }
1518
1519 out:
1520 pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
1521 return IRQ_HANDLED;
1522 }
1523
1524 /**
1525 * start_context() - starts the master context
1526 * @cxlflash: Internal structure associated with the host.
1527 *
1528 * Return: A success or failure value from CXL services.
1529 */
1530 static int start_context(struct cxlflash_cfg *cfg)
1531 {
1532 int rc = 0;
1533
1534 rc = cxl_start_context(cfg->mcctx,
1535 cfg->afu->work.work_element_descriptor,
1536 NULL);
1537
1538 pr_debug("%s: returning rc=%d\n", __func__, rc);
1539 return rc;
1540 }
1541
1542 /**
1543 * read_vpd() - obtains the WWPNs from VPD
1544 * @cxlflash: Internal structure associated with the host.
1545 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1546 *
1547 * Return:
1548 * 0 on success
1549 * -ENODEV when VPD or WWPN keywords not found
1550 */
1551 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1552 {
1553 struct pci_dev *dev = cfg->parent_dev;
1554 int rc = 0;
1555 int ro_start, ro_size, i, j, k;
1556 ssize_t vpd_size;
1557 char vpd_data[CXLFLASH_VPD_LEN];
1558 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1559 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1560
1561 /* Get the VPD data from the device */
1562 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1563 if (unlikely(vpd_size <= 0)) {
1564 pr_err("%s: Unable to read VPD (size = %ld)\n",
1565 __func__, vpd_size);
1566 rc = -ENODEV;
1567 goto out;
1568 }
1569
1570 /* Get the read only section offset */
1571 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1572 PCI_VPD_LRDT_RO_DATA);
1573 if (unlikely(ro_start < 0)) {
1574 pr_err("%s: VPD Read-only data not found\n", __func__);
1575 rc = -ENODEV;
1576 goto out;
1577 }
1578
1579 /* Get the read only section size, cap when extends beyond read VPD */
1580 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1581 j = ro_size;
1582 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1583 if (unlikely((i + j) > vpd_size)) {
1584 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1585 __func__, (i + j), vpd_size);
1586 ro_size = vpd_size - i;
1587 }
1588
1589 /*
1590 * Find the offset of the WWPN tag within the read only
1591 * VPD data and validate the found field (partials are
1592 * no good to us). Convert the ASCII data to an integer
1593 * value. Note that we must copy to a temporary buffer
1594 * because the conversion service requires that the ASCII
1595 * string be terminated.
1596 */
1597 for (k = 0; k < NUM_FC_PORTS; k++) {
1598 j = ro_size;
1599 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1600
1601 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1602 if (unlikely(i < 0)) {
1603 pr_err("%s: Port %d WWPN not found in VPD\n",
1604 __func__, k);
1605 rc = -ENODEV;
1606 goto out;
1607 }
1608
1609 j = pci_vpd_info_field_size(&vpd_data[i]);
1610 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1611 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1612 pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
1613 __func__, k);
1614 rc = -ENODEV;
1615 goto out;
1616 }
1617
1618 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1619 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1620 if (unlikely(rc)) {
1621 pr_err("%s: Fail to convert port %d WWPN to integer\n",
1622 __func__, k);
1623 rc = -ENODEV;
1624 goto out;
1625 }
1626 }
1627
1628 out:
1629 pr_debug("%s: returning rc=%d\n", __func__, rc);
1630 return rc;
1631 }
1632
1633 /**
1634 * cxlflash_context_reset() - timeout handler for AFU commands
1635 * @cmd: AFU command that timed out.
1636 *
1637 * Sends a reset to the AFU.
1638 */
1639 void cxlflash_context_reset(struct afu_cmd *cmd)
1640 {
1641 int nretry = 0;
1642 u64 rrin = 0x1;
1643 u64 room = 0;
1644 struct afu *afu = cmd->parent;
1645 ulong lock_flags;
1646
1647 pr_debug("%s: cmd=%p\n", __func__, cmd);
1648
1649 spin_lock_irqsave(&cmd->slock, lock_flags);
1650
1651 /* Already completed? */
1652 if (cmd->sa.host_use_b[0] & B_DONE) {
1653 spin_unlock_irqrestore(&cmd->slock, lock_flags);
1654 return;
1655 }
1656
1657 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
1658 spin_unlock_irqrestore(&cmd->slock, lock_flags);
1659
1660 /*
1661 * We really want to send this reset at all costs, so spread
1662 * out wait time on successive retries for available room.
1663 */
1664 do {
1665 room = readq_be(&afu->host_map->cmd_room);
1666 atomic64_set(&afu->room, room);
1667 if (room)
1668 goto write_rrin;
1669 udelay(nretry);
1670 } while (nretry++ < MC_ROOM_RETRY_CNT);
1671
1672 pr_err("%s: no cmd_room to send reset\n", __func__);
1673 return;
1674
1675 write_rrin:
1676 nretry = 0;
1677 writeq_be(rrin, &afu->host_map->ioarrin);
1678 do {
1679 rrin = readq_be(&afu->host_map->ioarrin);
1680 if (rrin != 0x1)
1681 break;
1682 /* Double delay each time */
1683 udelay(2 ^ nretry);
1684 } while (nretry++ < MC_ROOM_RETRY_CNT);
1685 }
1686
1687 /**
1688 * init_pcr() - initialize the provisioning and control registers
1689 * @cxlflash: Internal structure associated with the host.
1690 *
1691 * Also sets up fast access to the mapped registers and initializes AFU
1692 * command fields that never change.
1693 */
1694 void init_pcr(struct cxlflash_cfg *cfg)
1695 {
1696 struct afu *afu = cfg->afu;
1697 struct sisl_ctrl_map *ctrl_map;
1698 int i;
1699
1700 for (i = 0; i < MAX_CONTEXT; i++) {
1701 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1702 /* disrupt any clients that could be running */
1703 /* e. g. clients that survived a master restart */
1704 writeq_be(0, &ctrl_map->rht_start);
1705 writeq_be(0, &ctrl_map->rht_cnt_id);
1706 writeq_be(0, &ctrl_map->ctx_cap);
1707 }
1708
1709 /* copy frequently used fields into afu */
1710 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1711 /* ctx_hndl is 16 bits in CAIA */
1712 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1713 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1714
1715 /* Program the Endian Control for the master context */
1716 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1717
1718 /* initialize cmd fields that never change */
1719 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1720 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1721 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1722 afu->cmd[i].rcb.rrq = 0x0;
1723 }
1724 }
1725
1726 /**
1727 * init_global() - initialize AFU global registers
1728 * @cxlflash: Internal structure associated with the host.
1729 */
1730 int init_global(struct cxlflash_cfg *cfg)
1731 {
1732 struct afu *afu = cfg->afu;
1733 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1734 int i = 0, num_ports = 0;
1735 int rc = 0;
1736 u64 reg;
1737
1738 rc = read_vpd(cfg, &wwpn[0]);
1739 if (rc) {
1740 pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
1741 goto out;
1742 }
1743
1744 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1745
1746 /* set up RRQ in AFU for master issued cmds */
1747 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1748 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1749
1750 /* AFU configuration */
1751 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1752 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1753 /* enable all auto retry options and control endianness */
1754 /* leave others at default: */
1755 /* CTX_CAP write protected, mbox_r does not clear on read and */
1756 /* checker on if dual afu */
1757 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1758
1759 /* global port select: select either port */
1760 if (afu->internal_lun) {
1761 /* only use port 0 */
1762 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1763 num_ports = NUM_FC_PORTS - 1;
1764 } else {
1765 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1766 num_ports = NUM_FC_PORTS;
1767 }
1768
1769 for (i = 0; i < num_ports; i++) {
1770 /* unmask all errors (but they are still masked at AFU) */
1771 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1772 /* clear CRC error cnt & set a threshold */
1773 (void)readq_be(&afu->afu_map->global.
1774 fc_regs[i][FC_CNT_CRCERR / 8]);
1775 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1776 [FC_CRC_THRESH / 8]);
1777
1778 /* set WWPNs. If already programmed, wwpn[i] is 0 */
1779 if (wwpn[i] != 0 &&
1780 afu_set_wwpn(afu, i,
1781 &afu->afu_map->global.fc_regs[i][0],
1782 wwpn[i])) {
1783 pr_err("%s: failed to set WWPN on port %d\n",
1784 __func__, i);
1785 rc = -EIO;
1786 goto out;
1787 }
1788 /* Programming WWPN back to back causes additional
1789 * offline/online transitions and a PLOGI
1790 */
1791 msleep(100);
1792
1793 }
1794
1795 /* set up master's own CTX_CAP to allow real mode, host translation */
1796 /* tbls, afu cmds and read/write GSCSI cmds. */
1797 /* First, unlock ctx_cap write by reading mbox */
1798 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1799 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1800 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1801 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1802 &afu->ctrl_map->ctx_cap);
1803 /* init heartbeat */
1804 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1805
1806 out:
1807 return rc;
1808 }
1809
1810 /**
1811 * start_afu() - initializes and starts the AFU
1812 * @cxlflash: Internal structure associated with the host.
1813 */
1814 static int start_afu(struct cxlflash_cfg *cfg)
1815 {
1816 struct afu *afu = cfg->afu;
1817 struct afu_cmd *cmd;
1818
1819 int i = 0;
1820 int rc = 0;
1821
1822 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1823 cmd = &afu->cmd[i];
1824
1825 init_completion(&cmd->cevent);
1826 spin_lock_init(&cmd->slock);
1827 cmd->parent = afu;
1828 }
1829
1830 init_pcr(cfg);
1831
1832 /* initialize RRQ pointers */
1833 afu->hrrq_start = &afu->rrq_entry[0];
1834 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1835 afu->hrrq_curr = afu->hrrq_start;
1836 afu->toggle = 1;
1837
1838 rc = init_global(cfg);
1839
1840 pr_debug("%s: returning rc=%d\n", __func__, rc);
1841 return rc;
1842 }
1843
1844 /**
1845 * init_mc() - create and register as the master context
1846 * @cxlflash: Internal structure associated with the host.
1847 *
1848 * Return:
1849 * 0 on success
1850 * -ENOMEM when unable to obtain a context from CXL services
1851 * A failure value from CXL services.
1852 */
1853 static int init_mc(struct cxlflash_cfg *cfg)
1854 {
1855 struct cxl_context *ctx;
1856 struct device *dev = &cfg->dev->dev;
1857 struct afu *afu = cfg->afu;
1858 int rc = 0;
1859 enum undo_level level;
1860
1861 ctx = cxl_get_context(cfg->dev);
1862 if (unlikely(!ctx))
1863 return -ENOMEM;
1864 cfg->mcctx = ctx;
1865
1866 /* Set it up as a master with the CXL */
1867 cxl_set_master(ctx);
1868
1869 /* During initialization reset the AFU to start from a clean slate */
1870 rc = cxl_afu_reset(cfg->mcctx);
1871 if (unlikely(rc)) {
1872 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1873 __func__, rc);
1874 level = RELEASE_CONTEXT;
1875 goto out;
1876 }
1877
1878 rc = cxl_allocate_afu_irqs(ctx, 3);
1879 if (unlikely(rc)) {
1880 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1881 __func__, rc);
1882 level = RELEASE_CONTEXT;
1883 goto out;
1884 }
1885
1886 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1887 "SISL_MSI_SYNC_ERROR");
1888 if (unlikely(rc <= 0)) {
1889 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1890 __func__);
1891 level = FREE_IRQ;
1892 goto out;
1893 }
1894
1895 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1896 "SISL_MSI_RRQ_UPDATED");
1897 if (unlikely(rc <= 0)) {
1898 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1899 __func__);
1900 level = UNMAP_ONE;
1901 goto out;
1902 }
1903
1904 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1905 "SISL_MSI_ASYNC_ERROR");
1906 if (unlikely(rc <= 0)) {
1907 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1908 __func__);
1909 level = UNMAP_TWO;
1910 goto out;
1911 }
1912
1913 rc = 0;
1914
1915 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1916 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1917 * element (pe) that is embedded in the context (ctx)
1918 */
1919 rc = start_context(cfg);
1920 if (unlikely(rc)) {
1921 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1922 level = UNMAP_THREE;
1923 goto out;
1924 }
1925 ret:
1926 pr_debug("%s: returning rc=%d\n", __func__, rc);
1927 return rc;
1928 out:
1929 term_mc(cfg, level);
1930 goto ret;
1931 }
1932
1933 /**
1934 * init_afu() - setup as master context and start AFU
1935 * @cxlflash: Internal structure associated with the host.
1936 *
1937 * This routine is a higher level of control for configuring the
1938 * AFU on probe and reset paths.
1939 *
1940 * Return:
1941 * 0 on success
1942 * -ENOMEM when unable to map the AFU MMIO space
1943 * A failure value from internal services.
1944 */
1945 static int init_afu(struct cxlflash_cfg *cfg)
1946 {
1947 u64 reg;
1948 int rc = 0;
1949 struct afu *afu = cfg->afu;
1950 struct device *dev = &cfg->dev->dev;
1951
1952 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1953
1954 rc = init_mc(cfg);
1955 if (rc) {
1956 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1957 __func__, rc);
1958 goto err1;
1959 }
1960
1961 /* Map the entire MMIO space of the AFU.
1962 */
1963 afu->afu_map = cxl_psa_map(cfg->mcctx);
1964 if (!afu->afu_map) {
1965 rc = -ENOMEM;
1966 term_mc(cfg, UNDO_START);
1967 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1968 goto err1;
1969 }
1970
1971 /* don't byte reverse on reading afu_version, else the string form */
1972 /* will be backwards */
1973 reg = afu->afu_map->global.regs.afu_version;
1974 memcpy(afu->version, &reg, 8);
1975 afu->interface_version =
1976 readq_be(&afu->afu_map->global.regs.interface_version);
1977 pr_debug("%s: afu version %s, interface version 0x%llX\n",
1978 __func__, afu->version, afu->interface_version);
1979
1980 rc = start_afu(cfg);
1981 if (rc) {
1982 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1983 __func__, rc);
1984 term_mc(cfg, UNDO_START);
1985 cxl_psa_unmap((void *)afu->afu_map);
1986 afu->afu_map = NULL;
1987 goto err1;
1988 }
1989
1990 afu_err_intr_init(cfg->afu);
1991 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1992
1993 /* Restore the LUN mappings */
1994 cxlflash_restore_luntable(cfg);
1995 err1:
1996 pr_debug("%s: returning rc=%d\n", __func__, rc);
1997 return rc;
1998 }
1999
2000 /**
2001 * cxlflash_send_cmd() - sends an AFU command
2002 * @afu: AFU associated with the host.
2003 * @cmd: AFU command to send.
2004 *
2005 * Return:
2006 * 0 on success
2007 * -1 on failure
2008 */
2009 int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd)
2010 {
2011 struct cxlflash_cfg *cfg = afu->parent;
2012 int nretry = 0;
2013 int rc = 0;
2014 u64 room;
2015 long newval;
2016
2017 /*
2018 * This routine is used by critical users such an AFU sync and to
2019 * send a task management function (TMF). Thus we want to retry a
2020 * bit before returning an error. To avoid the performance penalty
2021 * of MMIO, we spread the update of 'room' over multiple commands.
2022 */
2023 retry:
2024 newval = atomic64_dec_if_positive(&afu->room);
2025 if (!newval) {
2026 do {
2027 room = readq_be(&afu->host_map->cmd_room);
2028 atomic64_set(&afu->room, room);
2029 if (room)
2030 goto write_ioarrin;
2031 udelay(nretry);
2032 } while (nretry++ < MC_ROOM_RETRY_CNT);
2033
2034 pr_err("%s: no cmd_room to send 0x%X\n",
2035 __func__, cmd->rcb.cdb[0]);
2036
2037 goto no_room;
2038 } else if (unlikely(newval < 0)) {
2039 /* This should be rare. i.e. Only if two threads race and
2040 * decrement before the MMIO read is done. In this case
2041 * just benefit from the other thread having updated
2042 * afu->room.
2043 */
2044 if (nretry++ < MC_ROOM_RETRY_CNT) {
2045 udelay(nretry);
2046 goto retry;
2047 }
2048
2049 goto no_room;
2050 }
2051
2052 write_ioarrin:
2053 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
2054 out:
2055 pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
2056 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
2057 return rc;
2058
2059 no_room:
2060 afu->read_room = true;
2061 schedule_work(&cfg->work_q);
2062 rc = SCSI_MLQUEUE_HOST_BUSY;
2063 goto out;
2064 }
2065
2066 /**
2067 * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command
2068 * @afu: AFU associated with the host.
2069 * @cmd: AFU command that was sent.
2070 */
2071 void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
2072 {
2073 ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ);
2074
2075 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
2076 if (!timeout)
2077 cxlflash_context_reset(cmd);
2078
2079 if (unlikely(cmd->sa.ioasc != 0))
2080 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
2081 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
2082 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
2083 cmd->sa.rc.fc_rc);
2084 }
2085
2086 /**
2087 * cxlflash_afu_sync() - builds and sends an AFU sync command
2088 * @afu: AFU associated with the host.
2089 * @ctx_hndl_u: Identifies context requesting sync.
2090 * @res_hndl_u: Identifies resource requesting sync.
2091 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2092 *
2093 * The AFU can only take 1 sync command at a time. This routine enforces this
2094 * limitation by using a mutex to provide exlusive access to the AFU during
2095 * the sync. This design point requires calling threads to not be on interrupt
2096 * context due to the possibility of sleeping during concurrent sync operations.
2097 *
2098 * AFU sync operations are only necessary and allowed when the device is
2099 * operating normally. When not operating normally, sync requests can occur as
2100 * part of cleaning up resources associated with an adapter prior to removal.
2101 * In this scenario, these requests are simply ignored (safe due to the AFU
2102 * going away).
2103 *
2104 * Return:
2105 * 0 on success
2106 * -1 on failure
2107 */
2108 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
2109 res_hndl_t res_hndl_u, u8 mode)
2110 {
2111 struct cxlflash_cfg *cfg = afu->parent;
2112 struct afu_cmd *cmd = NULL;
2113 int rc = 0;
2114 int retry_cnt = 0;
2115 static DEFINE_MUTEX(sync_active);
2116
2117 if (cfg->state != STATE_NORMAL) {
2118 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
2119 return 0;
2120 }
2121
2122 mutex_lock(&sync_active);
2123 retry:
2124 cmd = cxlflash_cmd_checkout(afu);
2125 if (unlikely(!cmd)) {
2126 retry_cnt++;
2127 udelay(1000 * retry_cnt);
2128 if (retry_cnt < MC_RETRY_CNT)
2129 goto retry;
2130 pr_err("%s: could not get a free command\n", __func__);
2131 rc = -1;
2132 goto out;
2133 }
2134
2135 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
2136
2137 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
2138
2139 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2140 cmd->rcb.port_sel = 0x0; /* NA */
2141 cmd->rcb.lun_id = 0x0; /* NA */
2142 cmd->rcb.data_len = 0x0;
2143 cmd->rcb.data_ea = 0x0;
2144 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2145
2146 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
2147 cmd->rcb.cdb[1] = mode;
2148
2149 /* The cdb is aligned, no unaligned accessors required */
2150 *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
2151 *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
2152
2153 rc = cxlflash_send_cmd(afu, cmd);
2154 if (unlikely(rc))
2155 goto out;
2156
2157 cxlflash_wait_resp(afu, cmd);
2158
2159 /* set on timeout */
2160 if (unlikely((cmd->sa.ioasc != 0) ||
2161 (cmd->sa.host_use_b[0] & B_ERROR)))
2162 rc = -1;
2163 out:
2164 mutex_unlock(&sync_active);
2165 if (cmd)
2166 cxlflash_cmd_checkin(cmd);
2167 pr_debug("%s: returning rc=%d\n", __func__, rc);
2168 return rc;
2169 }
2170
2171 /**
2172 * cxlflash_afu_reset() - resets the AFU
2173 * @cxlflash: Internal structure associated with the host.
2174 *
2175 * Return:
2176 * 0 on success
2177 * A failure value from internal services.
2178 */
2179 int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
2180 {
2181 int rc = 0;
2182 /* Stop the context before the reset. Since the context is
2183 * no longer available restart it after the reset is complete
2184 */
2185
2186 term_afu(cfg);
2187
2188 rc = init_afu(cfg);
2189
2190 pr_debug("%s: returning rc=%d\n", __func__, rc);
2191 return rc;
2192 }
2193
2194 /**
2195 * cxlflash_worker_thread() - work thread handler for the AFU
2196 * @work: Work structure contained within cxlflash associated with host.
2197 *
2198 * Handles the following events:
2199 * - Link reset which cannot be performed on interrupt context due to
2200 * blocking up to a few seconds
2201 * - Read AFU command room
2202 */
2203 static void cxlflash_worker_thread(struct work_struct *work)
2204 {
2205 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2206 work_q);
2207 struct afu *afu = cfg->afu;
2208 int port;
2209 ulong lock_flags;
2210
2211 /* Avoid MMIO if the device has failed */
2212
2213 if (cfg->state != STATE_NORMAL)
2214 return;
2215
2216 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2217
2218 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2219 port = cfg->lr_port;
2220 if (port < 0)
2221 pr_err("%s: invalid port index %d\n", __func__, port);
2222 else {
2223 spin_unlock_irqrestore(cfg->host->host_lock,
2224 lock_flags);
2225
2226 /* The reset can block... */
2227 afu_link_reset(afu, port,
2228 &afu->afu_map->
2229 global.fc_regs[port][0]);
2230 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2231 }
2232
2233 cfg->lr_state = LINK_RESET_COMPLETE;
2234 }
2235
2236 if (afu->read_room) {
2237 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2238 afu->read_room = false;
2239 }
2240
2241 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2242 }
2243
2244 /**
2245 * cxlflash_probe() - PCI entry point to add host
2246 * @pdev: PCI device associated with the host.
2247 * @dev_id: PCI device id associated with device.
2248 *
2249 * Return: 0 on success / non-zero on failure
2250 */
2251 static int cxlflash_probe(struct pci_dev *pdev,
2252 const struct pci_device_id *dev_id)
2253 {
2254 struct Scsi_Host *host;
2255 struct cxlflash_cfg *cfg = NULL;
2256 struct device *phys_dev;
2257 struct dev_dependent_vals *ddv;
2258 int rc = 0;
2259
2260 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2261 __func__, pdev->irq);
2262
2263 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2264 driver_template.max_sectors = ddv->max_sectors;
2265
2266 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2267 if (!host) {
2268 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2269 __func__);
2270 rc = -ENOMEM;
2271 goto out;
2272 }
2273
2274 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2275 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2276 host->max_channel = NUM_FC_PORTS - 1;
2277 host->unique_id = host->host_no;
2278 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2279
2280 cfg = (struct cxlflash_cfg *)host->hostdata;
2281 cfg->host = host;
2282 rc = alloc_mem(cfg);
2283 if (rc) {
2284 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2285 __func__);
2286 rc = -ENOMEM;
2287 goto out;
2288 }
2289
2290 cfg->init_state = INIT_STATE_NONE;
2291 cfg->dev = pdev;
2292
2293 /*
2294 * The promoted LUNs move to the top of the LUN table. The rest stay
2295 * on the bottom half. The bottom half grows from the end
2296 * (index = 255), whereas the top half grows from the beginning
2297 * (index = 0).
2298 */
2299 cfg->promote_lun_index = 0;
2300 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2301 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2302
2303 cfg->dev_id = (struct pci_device_id *)dev_id;
2304 cfg->mcctx = NULL;
2305
2306 init_waitqueue_head(&cfg->tmf_waitq);
2307 init_waitqueue_head(&cfg->limbo_waitq);
2308
2309 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2310 cfg->lr_state = LINK_RESET_INVALID;
2311 cfg->lr_port = -1;
2312 mutex_init(&cfg->ctx_tbl_list_mutex);
2313 mutex_init(&cfg->ctx_recovery_mutex);
2314 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2315 INIT_LIST_HEAD(&cfg->lluns);
2316
2317 pci_set_drvdata(pdev, cfg);
2318
2319 /* Use the special service provided to look up the physical
2320 * PCI device, since we are called on the probe of the virtual
2321 * PCI host bus (vphb)
2322 */
2323 phys_dev = cxl_get_phys_dev(pdev);
2324 if (!dev_is_pci(phys_dev)) {
2325 pr_err("%s: not a pci dev\n", __func__);
2326 rc = -ENODEV;
2327 goto out_remove;
2328 }
2329 cfg->parent_dev = to_pci_dev(phys_dev);
2330
2331 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2332
2333 rc = init_pci(cfg);
2334 if (rc) {
2335 dev_err(&pdev->dev, "%s: call to init_pci "
2336 "failed rc=%d!\n", __func__, rc);
2337 goto out_remove;
2338 }
2339 cfg->init_state = INIT_STATE_PCI;
2340
2341 rc = init_afu(cfg);
2342 if (rc) {
2343 dev_err(&pdev->dev, "%s: call to init_afu "
2344 "failed rc=%d!\n", __func__, rc);
2345 goto out_remove;
2346 }
2347 cfg->init_state = INIT_STATE_AFU;
2348
2349
2350 rc = init_scsi(cfg);
2351 if (rc) {
2352 dev_err(&pdev->dev, "%s: call to init_scsi "
2353 "failed rc=%d!\n", __func__, rc);
2354 goto out_remove;
2355 }
2356 cfg->init_state = INIT_STATE_SCSI;
2357
2358 out:
2359 pr_debug("%s: returning rc=%d\n", __func__, rc);
2360 return rc;
2361
2362 out_remove:
2363 cxlflash_remove(pdev);
2364 goto out;
2365 }
2366
2367 /**
2368 * cxlflash_pci_error_detected() - called when a PCI error is detected
2369 * @pdev: PCI device struct.
2370 * @state: PCI channel state.
2371 *
2372 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2373 */
2374 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2375 pci_channel_state_t state)
2376 {
2377 int rc = 0;
2378 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2379 struct device *dev = &cfg->dev->dev;
2380
2381 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2382
2383 switch (state) {
2384 case pci_channel_io_frozen:
2385 cfg->state = STATE_LIMBO;
2386
2387 /* Turn off legacy I/O */
2388 scsi_block_requests(cfg->host);
2389 rc = cxlflash_mark_contexts_error(cfg);
2390 if (unlikely(rc))
2391 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2392 __func__, rc);
2393 term_mc(cfg, UNDO_START);
2394 stop_afu(cfg);
2395
2396 return PCI_ERS_RESULT_NEED_RESET;
2397 case pci_channel_io_perm_failure:
2398 cfg->state = STATE_FAILTERM;
2399 wake_up_all(&cfg->limbo_waitq);
2400 scsi_unblock_requests(cfg->host);
2401 return PCI_ERS_RESULT_DISCONNECT;
2402 default:
2403 break;
2404 }
2405 return PCI_ERS_RESULT_NEED_RESET;
2406 }
2407
2408 /**
2409 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2410 * @pdev: PCI device struct.
2411 *
2412 * This routine is called by the pci error recovery code after the PCI
2413 * slot has been reset, just before we should resume normal operations.
2414 *
2415 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2416 */
2417 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2418 {
2419 int rc = 0;
2420 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2421 struct device *dev = &cfg->dev->dev;
2422
2423 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2424
2425 rc = init_afu(cfg);
2426 if (unlikely(rc)) {
2427 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2428 return PCI_ERS_RESULT_DISCONNECT;
2429 }
2430
2431 return PCI_ERS_RESULT_RECOVERED;
2432 }
2433
2434 /**
2435 * cxlflash_pci_resume() - called when normal operation can resume
2436 * @pdev: PCI device struct
2437 */
2438 static void cxlflash_pci_resume(struct pci_dev *pdev)
2439 {
2440 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2441 struct device *dev = &cfg->dev->dev;
2442
2443 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2444
2445 cfg->state = STATE_NORMAL;
2446 wake_up_all(&cfg->limbo_waitq);
2447 scsi_unblock_requests(cfg->host);
2448 }
2449
2450 static const struct pci_error_handlers cxlflash_err_handler = {
2451 .error_detected = cxlflash_pci_error_detected,
2452 .slot_reset = cxlflash_pci_slot_reset,
2453 .resume = cxlflash_pci_resume,
2454 };
2455
2456 /*
2457 * PCI device structure
2458 */
2459 static struct pci_driver cxlflash_driver = {
2460 .name = CXLFLASH_NAME,
2461 .id_table = cxlflash_pci_table,
2462 .probe = cxlflash_probe,
2463 .remove = cxlflash_remove,
2464 .err_handler = &cxlflash_err_handler,
2465 };
2466
2467 /**
2468 * init_cxlflash() - module entry point
2469 *
2470 * Return: 0 on success / non-zero on failure
2471 */
2472 static int __init init_cxlflash(void)
2473 {
2474 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2475 __func__, CXLFLASH_DRIVER_DATE);
2476
2477 cxlflash_list_init();
2478
2479 return pci_register_driver(&cxlflash_driver);
2480 }
2481
2482 /**
2483 * exit_cxlflash() - module exit point
2484 */
2485 static void __exit exit_cxlflash(void)
2486 {
2487 cxlflash_term_global_luns();
2488 cxlflash_free_errpage();
2489
2490 pci_unregister_driver(&cxlflash_driver);
2491 }
2492
2493 module_init(init_cxlflash);
2494 module_exit(exit_cxlflash);