]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/cxlflash/main.c
Merge tag 'block-5.13-2021-06-03' of git://git.kernel.dk/linux-block
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / cxlflash / main.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * CXL Flash Device Driver
4 *
5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 *
8 * Copyright (C) 2015 IBM Corporation
9 */
10
11 #include <linux/delay.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15
16 #include <asm/unaligned.h>
17
18 #include <scsi/scsi_cmnd.h>
19 #include <scsi/scsi_host.h>
20 #include <uapi/scsi/cxlflash_ioctl.h>
21
22 #include "main.h"
23 #include "sislite.h"
24 #include "common.h"
25
26 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
27 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29 MODULE_LICENSE("GPL");
30
31 static struct class *cxlflash_class;
32 static u32 cxlflash_major;
33 static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
34
35 /**
36 * process_cmd_err() - command error handler
37 * @cmd: AFU command that experienced the error.
38 * @scp: SCSI command associated with the AFU command in error.
39 *
40 * Translates error bits from AFU command to SCSI command results.
41 */
42 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
43 {
44 struct afu *afu = cmd->parent;
45 struct cxlflash_cfg *cfg = afu->parent;
46 struct device *dev = &cfg->dev->dev;
47 struct sisl_ioasa *ioasa;
48 u32 resid;
49
50 ioasa = &(cmd->sa);
51
52 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
53 resid = ioasa->resid;
54 scsi_set_resid(scp, resid);
55 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
56 __func__, cmd, scp, resid);
57 }
58
59 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
60 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
61 __func__, cmd, scp);
62 scp->result = (DID_ERROR << 16);
63 }
64
65 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
66 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
67 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
68 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
69
70 if (ioasa->rc.scsi_rc) {
71 /* We have a SCSI status */
72 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
73 memcpy(scp->sense_buffer, ioasa->sense_data,
74 SISL_SENSE_DATA_LEN);
75 scp->result = ioasa->rc.scsi_rc;
76 } else
77 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
78 }
79
80 /*
81 * We encountered an error. Set scp->result based on nature
82 * of error.
83 */
84 if (ioasa->rc.fc_rc) {
85 /* We have an FC status */
86 switch (ioasa->rc.fc_rc) {
87 case SISL_FC_RC_LINKDOWN:
88 scp->result = (DID_REQUEUE << 16);
89 break;
90 case SISL_FC_RC_RESID:
91 /* This indicates an FCP resid underrun */
92 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
93 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
94 * then we will handle this error else where.
95 * If not then we must handle it here.
96 * This is probably an AFU bug.
97 */
98 scp->result = (DID_ERROR << 16);
99 }
100 break;
101 case SISL_FC_RC_RESIDERR:
102 /* Resid mismatch between adapter and device */
103 case SISL_FC_RC_TGTABORT:
104 case SISL_FC_RC_ABORTOK:
105 case SISL_FC_RC_ABORTFAIL:
106 case SISL_FC_RC_NOLOGI:
107 case SISL_FC_RC_ABORTPEND:
108 case SISL_FC_RC_WRABORTPEND:
109 case SISL_FC_RC_NOEXP:
110 case SISL_FC_RC_INUSE:
111 scp->result = (DID_ERROR << 16);
112 break;
113 }
114 }
115
116 if (ioasa->rc.afu_rc) {
117 /* We have an AFU error */
118 switch (ioasa->rc.afu_rc) {
119 case SISL_AFU_RC_NO_CHANNELS:
120 scp->result = (DID_NO_CONNECT << 16);
121 break;
122 case SISL_AFU_RC_DATA_DMA_ERR:
123 switch (ioasa->afu_extra) {
124 case SISL_AFU_DMA_ERR_PAGE_IN:
125 /* Retry */
126 scp->result = (DID_IMM_RETRY << 16);
127 break;
128 case SISL_AFU_DMA_ERR_INVALID_EA:
129 default:
130 scp->result = (DID_ERROR << 16);
131 }
132 break;
133 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
134 /* Retry */
135 scp->result = (DID_ALLOC_FAILURE << 16);
136 break;
137 default:
138 scp->result = (DID_ERROR << 16);
139 }
140 }
141 }
142
143 /**
144 * cmd_complete() - command completion handler
145 * @cmd: AFU command that has completed.
146 *
147 * For SCSI commands this routine prepares and submits commands that have
148 * either completed or timed out to the SCSI stack. For internal commands
149 * (TMF or AFU), this routine simply notifies the originator that the
150 * command has completed.
151 */
152 static void cmd_complete(struct afu_cmd *cmd)
153 {
154 struct scsi_cmnd *scp;
155 ulong lock_flags;
156 struct afu *afu = cmd->parent;
157 struct cxlflash_cfg *cfg = afu->parent;
158 struct device *dev = &cfg->dev->dev;
159 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
160
161 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
162 list_del(&cmd->list);
163 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
164
165 if (cmd->scp) {
166 scp = cmd->scp;
167 if (unlikely(cmd->sa.ioasc))
168 process_cmd_err(cmd, scp);
169 else
170 scp->result = (DID_OK << 16);
171
172 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
173 __func__, scp, scp->result, cmd->sa.ioasc);
174 scp->scsi_done(scp);
175 } else if (cmd->cmd_tmf) {
176 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
177 cfg->tmf_active = false;
178 wake_up_all_locked(&cfg->tmf_waitq);
179 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
180 } else
181 complete(&cmd->cevent);
182 }
183
184 /**
185 * flush_pending_cmds() - flush all pending commands on this hardware queue
186 * @hwq: Hardware queue to flush.
187 *
188 * The hardware send queue lock associated with this hardware queue must be
189 * held when calling this routine.
190 */
191 static void flush_pending_cmds(struct hwq *hwq)
192 {
193 struct cxlflash_cfg *cfg = hwq->afu->parent;
194 struct afu_cmd *cmd, *tmp;
195 struct scsi_cmnd *scp;
196 ulong lock_flags;
197
198 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
199 /* Bypass command when on a doneq, cmd_complete() will handle */
200 if (!list_empty(&cmd->queue))
201 continue;
202
203 list_del(&cmd->list);
204
205 if (cmd->scp) {
206 scp = cmd->scp;
207 scp->result = (DID_IMM_RETRY << 16);
208 scp->scsi_done(scp);
209 } else {
210 cmd->cmd_aborted = true;
211
212 if (cmd->cmd_tmf) {
213 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
214 cfg->tmf_active = false;
215 wake_up_all_locked(&cfg->tmf_waitq);
216 spin_unlock_irqrestore(&cfg->tmf_slock,
217 lock_flags);
218 } else
219 complete(&cmd->cevent);
220 }
221 }
222 }
223
224 /**
225 * context_reset() - reset context via specified register
226 * @hwq: Hardware queue owning the context to be reset.
227 * @reset_reg: MMIO register to perform reset.
228 *
229 * When the reset is successful, the SISLite specification guarantees that
230 * the AFU has aborted all currently pending I/O. Accordingly, these commands
231 * must be flushed.
232 *
233 * Return: 0 on success, -errno on failure
234 */
235 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
236 {
237 struct cxlflash_cfg *cfg = hwq->afu->parent;
238 struct device *dev = &cfg->dev->dev;
239 int rc = -ETIMEDOUT;
240 int nretry = 0;
241 u64 val = 0x1;
242 ulong lock_flags;
243
244 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
245
246 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
247
248 writeq_be(val, reset_reg);
249 do {
250 val = readq_be(reset_reg);
251 if ((val & 0x1) == 0x0) {
252 rc = 0;
253 break;
254 }
255
256 /* Double delay each time */
257 udelay(1 << nretry);
258 } while (nretry++ < MC_ROOM_RETRY_CNT);
259
260 if (!rc)
261 flush_pending_cmds(hwq);
262
263 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
264
265 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
266 __func__, rc, val, nretry);
267 return rc;
268 }
269
270 /**
271 * context_reset_ioarrin() - reset context via IOARRIN register
272 * @hwq: Hardware queue owning the context to be reset.
273 *
274 * Return: 0 on success, -errno on failure
275 */
276 static int context_reset_ioarrin(struct hwq *hwq)
277 {
278 return context_reset(hwq, &hwq->host_map->ioarrin);
279 }
280
281 /**
282 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
283 * @hwq: Hardware queue owning the context to be reset.
284 *
285 * Return: 0 on success, -errno on failure
286 */
287 static int context_reset_sq(struct hwq *hwq)
288 {
289 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
290 }
291
292 /**
293 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
294 * @afu: AFU associated with the host.
295 * @cmd: AFU command to send.
296 *
297 * Return:
298 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
299 */
300 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
301 {
302 struct cxlflash_cfg *cfg = afu->parent;
303 struct device *dev = &cfg->dev->dev;
304 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
305 int rc = 0;
306 s64 room;
307 ulong lock_flags;
308
309 /*
310 * To avoid the performance penalty of MMIO, spread the update of
311 * 'room' over multiple commands.
312 */
313 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
314 if (--hwq->room < 0) {
315 room = readq_be(&hwq->host_map->cmd_room);
316 if (room <= 0) {
317 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
318 "0x%02X, room=0x%016llX\n",
319 __func__, cmd->rcb.cdb[0], room);
320 hwq->room = 0;
321 rc = SCSI_MLQUEUE_HOST_BUSY;
322 goto out;
323 }
324 hwq->room = room - 1;
325 }
326
327 list_add(&cmd->list, &hwq->pending_cmds);
328 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
329 out:
330 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
331 dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
332 __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
333 return rc;
334 }
335
336 /**
337 * send_cmd_sq() - sends an AFU command via SQ ring
338 * @afu: AFU associated with the host.
339 * @cmd: AFU command to send.
340 *
341 * Return:
342 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
343 */
344 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
345 {
346 struct cxlflash_cfg *cfg = afu->parent;
347 struct device *dev = &cfg->dev->dev;
348 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
349 int rc = 0;
350 int newval;
351 ulong lock_flags;
352
353 newval = atomic_dec_if_positive(&hwq->hsq_credits);
354 if (newval <= 0) {
355 rc = SCSI_MLQUEUE_HOST_BUSY;
356 goto out;
357 }
358
359 cmd->rcb.ioasa = &cmd->sa;
360
361 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
362
363 *hwq->hsq_curr = cmd->rcb;
364 if (hwq->hsq_curr < hwq->hsq_end)
365 hwq->hsq_curr++;
366 else
367 hwq->hsq_curr = hwq->hsq_start;
368
369 list_add(&cmd->list, &hwq->pending_cmds);
370 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
371
372 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
373 out:
374 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
375 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
376 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
377 readq_be(&hwq->host_map->sq_head),
378 readq_be(&hwq->host_map->sq_tail));
379 return rc;
380 }
381
382 /**
383 * wait_resp() - polls for a response or timeout to a sent AFU command
384 * @afu: AFU associated with the host.
385 * @cmd: AFU command that was sent.
386 *
387 * Return: 0 on success, -errno on failure
388 */
389 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
390 {
391 struct cxlflash_cfg *cfg = afu->parent;
392 struct device *dev = &cfg->dev->dev;
393 int rc = 0;
394 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
395
396 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
397 if (!timeout)
398 rc = -ETIMEDOUT;
399
400 if (cmd->cmd_aborted)
401 rc = -EAGAIN;
402
403 if (unlikely(cmd->sa.ioasc != 0)) {
404 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
405 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
406 rc = -EIO;
407 }
408
409 return rc;
410 }
411
412 /**
413 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
414 * @host: SCSI host associated with device.
415 * @scp: SCSI command to send.
416 * @afu: SCSI command to send.
417 *
418 * Hashes a command based upon the hardware queue mode.
419 *
420 * Return: Trusted index of target hardware queue
421 */
422 static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
423 struct afu *afu)
424 {
425 u32 tag;
426 u32 hwq = 0;
427
428 if (afu->num_hwqs == 1)
429 return 0;
430
431 switch (afu->hwq_mode) {
432 case HWQ_MODE_RR:
433 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
434 break;
435 case HWQ_MODE_TAG:
436 tag = blk_mq_unique_tag(scp->request);
437 hwq = blk_mq_unique_tag_to_hwq(tag);
438 break;
439 case HWQ_MODE_CPU:
440 hwq = smp_processor_id() % afu->num_hwqs;
441 break;
442 default:
443 WARN_ON_ONCE(1);
444 }
445
446 return hwq;
447 }
448
449 /**
450 * send_tmf() - sends a Task Management Function (TMF)
451 * @cfg: Internal structure associated with the host.
452 * @sdev: SCSI device destined for TMF.
453 * @tmfcmd: TMF command to send.
454 *
455 * Return:
456 * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
457 */
458 static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
459 u64 tmfcmd)
460 {
461 struct afu *afu = cfg->afu;
462 struct afu_cmd *cmd = NULL;
463 struct device *dev = &cfg->dev->dev;
464 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
465 bool needs_deletion = false;
466 char *buf = NULL;
467 ulong lock_flags;
468 int rc = 0;
469 ulong to;
470
471 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
472 if (unlikely(!buf)) {
473 dev_err(dev, "%s: no memory for command\n", __func__);
474 rc = -ENOMEM;
475 goto out;
476 }
477
478 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
479 INIT_LIST_HEAD(&cmd->queue);
480
481 /* When Task Management Function is active do not send another */
482 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
483 if (cfg->tmf_active)
484 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
485 !cfg->tmf_active,
486 cfg->tmf_slock);
487 cfg->tmf_active = true;
488 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
489
490 cmd->parent = afu;
491 cmd->cmd_tmf = true;
492 cmd->hwq_index = hwq->index;
493
494 cmd->rcb.ctx_id = hwq->ctx_hndl;
495 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
496 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
497 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
498 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
499 SISL_REQ_FLAGS_SUP_UNDERRUN |
500 SISL_REQ_FLAGS_TMF_CMD);
501 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
502
503 rc = afu->send_cmd(afu, cmd);
504 if (unlikely(rc)) {
505 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
506 cfg->tmf_active = false;
507 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
508 goto out;
509 }
510
511 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
512 to = msecs_to_jiffies(5000);
513 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
514 !cfg->tmf_active,
515 cfg->tmf_slock,
516 to);
517 if (!to) {
518 dev_err(dev, "%s: TMF timed out\n", __func__);
519 rc = -ETIMEDOUT;
520 needs_deletion = true;
521 } else if (cmd->cmd_aborted) {
522 dev_err(dev, "%s: TMF aborted\n", __func__);
523 rc = -EAGAIN;
524 } else if (cmd->sa.ioasc) {
525 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
526 __func__, cmd->sa.ioasc);
527 rc = -EIO;
528 }
529 cfg->tmf_active = false;
530 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
531
532 if (needs_deletion) {
533 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
534 list_del(&cmd->list);
535 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
536 }
537 out:
538 kfree(buf);
539 return rc;
540 }
541
542 /**
543 * cxlflash_driver_info() - information handler for this host driver
544 * @host: SCSI host associated with device.
545 *
546 * Return: A string describing the device.
547 */
548 static const char *cxlflash_driver_info(struct Scsi_Host *host)
549 {
550 return CXLFLASH_ADAPTER_NAME;
551 }
552
553 /**
554 * cxlflash_queuecommand() - sends a mid-layer request
555 * @host: SCSI host associated with device.
556 * @scp: SCSI command to send.
557 *
558 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
559 */
560 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
561 {
562 struct cxlflash_cfg *cfg = shost_priv(host);
563 struct afu *afu = cfg->afu;
564 struct device *dev = &cfg->dev->dev;
565 struct afu_cmd *cmd = sc_to_afuci(scp);
566 struct scatterlist *sg = scsi_sglist(scp);
567 int hwq_index = cmd_to_target_hwq(host, scp, afu);
568 struct hwq *hwq = get_hwq(afu, hwq_index);
569 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
570 ulong lock_flags;
571 int rc = 0;
572
573 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
574 "cdb=(%08x-%08x-%08x-%08x)\n",
575 __func__, scp, host->host_no, scp->device->channel,
576 scp->device->id, scp->device->lun,
577 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
578 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
579 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
580 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
581
582 /*
583 * If a Task Management Function is active, wait for it to complete
584 * before continuing with regular commands.
585 */
586 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
587 if (cfg->tmf_active) {
588 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
589 rc = SCSI_MLQUEUE_HOST_BUSY;
590 goto out;
591 }
592 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
593
594 switch (cfg->state) {
595 case STATE_PROBING:
596 case STATE_PROBED:
597 case STATE_RESET:
598 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
599 rc = SCSI_MLQUEUE_HOST_BUSY;
600 goto out;
601 case STATE_FAILTERM:
602 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
603 scp->result = (DID_NO_CONNECT << 16);
604 scp->scsi_done(scp);
605 rc = 0;
606 goto out;
607 default:
608 atomic_inc(&afu->cmds_active);
609 break;
610 }
611
612 if (likely(sg)) {
613 cmd->rcb.data_len = sg->length;
614 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
615 }
616
617 cmd->scp = scp;
618 cmd->parent = afu;
619 cmd->hwq_index = hwq_index;
620
621 cmd->sa.ioasc = 0;
622 cmd->rcb.ctx_id = hwq->ctx_hndl;
623 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
624 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
625 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
626
627 if (scp->sc_data_direction == DMA_TO_DEVICE)
628 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
629
630 cmd->rcb.req_flags = req_flags;
631 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
632
633 rc = afu->send_cmd(afu, cmd);
634 atomic_dec(&afu->cmds_active);
635 out:
636 return rc;
637 }
638
639 /**
640 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
641 * @cfg: Internal structure associated with the host.
642 */
643 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
644 {
645 struct pci_dev *pdev = cfg->dev;
646
647 if (pci_channel_offline(pdev))
648 wait_event_timeout(cfg->reset_waitq,
649 !pci_channel_offline(pdev),
650 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
651 }
652
653 /**
654 * free_mem() - free memory associated with the AFU
655 * @cfg: Internal structure associated with the host.
656 */
657 static void free_mem(struct cxlflash_cfg *cfg)
658 {
659 struct afu *afu = cfg->afu;
660
661 if (cfg->afu) {
662 free_pages((ulong)afu, get_order(sizeof(struct afu)));
663 cfg->afu = NULL;
664 }
665 }
666
667 /**
668 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
669 * @cfg: Internal structure associated with the host.
670 */
671 static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
672 {
673 if (cfg->async_reset_cookie == 0)
674 return;
675
676 /* Wait until all async calls prior to this cookie have completed */
677 async_synchronize_cookie(cfg->async_reset_cookie + 1);
678 cfg->async_reset_cookie = 0;
679 }
680
681 /**
682 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
683 * @cfg: Internal structure associated with the host.
684 *
685 * Safe to call with AFU in a partially allocated/initialized state.
686 *
687 * Cancels scheduled worker threads, waits for any active internal AFU
688 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
689 */
690 static void stop_afu(struct cxlflash_cfg *cfg)
691 {
692 struct afu *afu = cfg->afu;
693 struct hwq *hwq;
694 int i;
695
696 cancel_work_sync(&cfg->work_q);
697 if (!current_is_async())
698 cxlflash_reset_sync(cfg);
699
700 if (likely(afu)) {
701 while (atomic_read(&afu->cmds_active))
702 ssleep(1);
703
704 if (afu_is_irqpoll_enabled(afu)) {
705 for (i = 0; i < afu->num_hwqs; i++) {
706 hwq = get_hwq(afu, i);
707
708 irq_poll_disable(&hwq->irqpoll);
709 }
710 }
711
712 if (likely(afu->afu_map)) {
713 cfg->ops->psa_unmap(afu->afu_map);
714 afu->afu_map = NULL;
715 }
716 }
717 }
718
719 /**
720 * term_intr() - disables all AFU interrupts
721 * @cfg: Internal structure associated with the host.
722 * @level: Depth of allocation, where to begin waterfall tear down.
723 * @index: Index of the hardware queue.
724 *
725 * Safe to call with AFU/MC in partially allocated/initialized state.
726 */
727 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
728 u32 index)
729 {
730 struct afu *afu = cfg->afu;
731 struct device *dev = &cfg->dev->dev;
732 struct hwq *hwq;
733
734 if (!afu) {
735 dev_err(dev, "%s: returning with NULL afu\n", __func__);
736 return;
737 }
738
739 hwq = get_hwq(afu, index);
740
741 if (!hwq->ctx_cookie) {
742 dev_err(dev, "%s: returning with NULL MC\n", __func__);
743 return;
744 }
745
746 switch (level) {
747 case UNMAP_THREE:
748 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
749 if (index == PRIMARY_HWQ)
750 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
751 fallthrough;
752 case UNMAP_TWO:
753 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
754 fallthrough;
755 case UNMAP_ONE:
756 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
757 fallthrough;
758 case FREE_IRQ:
759 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
760 fallthrough;
761 case UNDO_NOOP:
762 /* No action required */
763 break;
764 }
765 }
766
767 /**
768 * term_mc() - terminates the master context
769 * @cfg: Internal structure associated with the host.
770 * @index: Index of the hardware queue.
771 *
772 * Safe to call with AFU/MC in partially allocated/initialized state.
773 */
774 static void term_mc(struct cxlflash_cfg *cfg, u32 index)
775 {
776 struct afu *afu = cfg->afu;
777 struct device *dev = &cfg->dev->dev;
778 struct hwq *hwq;
779 ulong lock_flags;
780
781 if (!afu) {
782 dev_err(dev, "%s: returning with NULL afu\n", __func__);
783 return;
784 }
785
786 hwq = get_hwq(afu, index);
787
788 if (!hwq->ctx_cookie) {
789 dev_err(dev, "%s: returning with NULL MC\n", __func__);
790 return;
791 }
792
793 WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
794 if (index != PRIMARY_HWQ)
795 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
796 hwq->ctx_cookie = NULL;
797
798 spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
799 hwq->hrrq_online = false;
800 spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
801
802 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
803 flush_pending_cmds(hwq);
804 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
805 }
806
807 /**
808 * term_afu() - terminates the AFU
809 * @cfg: Internal structure associated with the host.
810 *
811 * Safe to call with AFU/MC in partially allocated/initialized state.
812 */
813 static void term_afu(struct cxlflash_cfg *cfg)
814 {
815 struct device *dev = &cfg->dev->dev;
816 int k;
817
818 /*
819 * Tear down is carefully orchestrated to ensure
820 * no interrupts can come in when the problem state
821 * area is unmapped.
822 *
823 * 1) Disable all AFU interrupts for each master
824 * 2) Unmap the problem state area
825 * 3) Stop each master context
826 */
827 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
828 term_intr(cfg, UNMAP_THREE, k);
829
830 stop_afu(cfg);
831
832 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
833 term_mc(cfg, k);
834
835 dev_dbg(dev, "%s: returning\n", __func__);
836 }
837
838 /**
839 * notify_shutdown() - notifies device of pending shutdown
840 * @cfg: Internal structure associated with the host.
841 * @wait: Whether to wait for shutdown processing to complete.
842 *
843 * This function will notify the AFU that the adapter is being shutdown
844 * and will wait for shutdown processing to complete if wait is true.
845 * This notification should flush pending I/Os to the device and halt
846 * further I/Os until the next AFU reset is issued and device restarted.
847 */
848 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
849 {
850 struct afu *afu = cfg->afu;
851 struct device *dev = &cfg->dev->dev;
852 struct dev_dependent_vals *ddv;
853 __be64 __iomem *fc_port_regs;
854 u64 reg, status;
855 int i, retry_cnt = 0;
856
857 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
858 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
859 return;
860
861 if (!afu || !afu->afu_map) {
862 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
863 return;
864 }
865
866 /* Notify AFU */
867 for (i = 0; i < cfg->num_fc_ports; i++) {
868 fc_port_regs = get_fc_port_regs(cfg, i);
869
870 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
871 reg |= SISL_FC_SHUTDOWN_NORMAL;
872 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
873 }
874
875 if (!wait)
876 return;
877
878 /* Wait up to 1.5 seconds for shutdown processing to complete */
879 for (i = 0; i < cfg->num_fc_ports; i++) {
880 fc_port_regs = get_fc_port_regs(cfg, i);
881 retry_cnt = 0;
882
883 while (true) {
884 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
885 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
886 break;
887 if (++retry_cnt >= MC_RETRY_CNT) {
888 dev_dbg(dev, "%s: port %d shutdown processing "
889 "not yet completed\n", __func__, i);
890 break;
891 }
892 msleep(100 * retry_cnt);
893 }
894 }
895 }
896
897 /**
898 * cxlflash_get_minor() - gets the first available minor number
899 *
900 * Return: Unique minor number that can be used to create the character device.
901 */
902 static int cxlflash_get_minor(void)
903 {
904 int minor;
905 long bit;
906
907 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
908 if (bit >= CXLFLASH_MAX_ADAPTERS)
909 return -1;
910
911 minor = bit & MINORMASK;
912 set_bit(minor, cxlflash_minor);
913 return minor;
914 }
915
916 /**
917 * cxlflash_put_minor() - releases the minor number
918 * @minor: Minor number that is no longer needed.
919 */
920 static void cxlflash_put_minor(int minor)
921 {
922 clear_bit(minor, cxlflash_minor);
923 }
924
925 /**
926 * cxlflash_release_chrdev() - release the character device for the host
927 * @cfg: Internal structure associated with the host.
928 */
929 static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
930 {
931 device_unregister(cfg->chardev);
932 cfg->chardev = NULL;
933 cdev_del(&cfg->cdev);
934 cxlflash_put_minor(MINOR(cfg->cdev.dev));
935 }
936
937 /**
938 * cxlflash_remove() - PCI entry point to tear down host
939 * @pdev: PCI device associated with the host.
940 *
941 * Safe to use as a cleanup in partially allocated/initialized state. Note that
942 * the reset_waitq is flushed as part of the stop/termination of user contexts.
943 */
944 static void cxlflash_remove(struct pci_dev *pdev)
945 {
946 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
947 struct device *dev = &pdev->dev;
948 ulong lock_flags;
949
950 if (!pci_is_enabled(pdev)) {
951 dev_dbg(dev, "%s: Device is disabled\n", __func__);
952 return;
953 }
954
955 /* Yield to running recovery threads before continuing with remove */
956 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
957 cfg->state != STATE_PROBING);
958 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
959 if (cfg->tmf_active)
960 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
961 !cfg->tmf_active,
962 cfg->tmf_slock);
963 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
964
965 /* Notify AFU and wait for shutdown processing to complete */
966 notify_shutdown(cfg, true);
967
968 cfg->state = STATE_FAILTERM;
969 cxlflash_stop_term_user_contexts(cfg);
970
971 switch (cfg->init_state) {
972 case INIT_STATE_CDEV:
973 cxlflash_release_chrdev(cfg);
974 fallthrough;
975 case INIT_STATE_SCSI:
976 cxlflash_term_local_luns(cfg);
977 scsi_remove_host(cfg->host);
978 fallthrough;
979 case INIT_STATE_AFU:
980 term_afu(cfg);
981 fallthrough;
982 case INIT_STATE_PCI:
983 cfg->ops->destroy_afu(cfg->afu_cookie);
984 pci_disable_device(pdev);
985 fallthrough;
986 case INIT_STATE_NONE:
987 free_mem(cfg);
988 scsi_host_put(cfg->host);
989 break;
990 }
991
992 dev_dbg(dev, "%s: returning\n", __func__);
993 }
994
995 /**
996 * alloc_mem() - allocates the AFU and its command pool
997 * @cfg: Internal structure associated with the host.
998 *
999 * A partially allocated state remains on failure.
1000 *
1001 * Return:
1002 * 0 on success
1003 * -ENOMEM on failure to allocate memory
1004 */
1005 static int alloc_mem(struct cxlflash_cfg *cfg)
1006 {
1007 int rc = 0;
1008 struct device *dev = &cfg->dev->dev;
1009
1010 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1011 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1012 get_order(sizeof(struct afu)));
1013 if (unlikely(!cfg->afu)) {
1014 dev_err(dev, "%s: cannot get %d free pages\n",
1015 __func__, get_order(sizeof(struct afu)));
1016 rc = -ENOMEM;
1017 goto out;
1018 }
1019 cfg->afu->parent = cfg;
1020 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1021 cfg->afu->afu_map = NULL;
1022 out:
1023 return rc;
1024 }
1025
1026 /**
1027 * init_pci() - initializes the host as a PCI device
1028 * @cfg: Internal structure associated with the host.
1029 *
1030 * Return: 0 on success, -errno on failure
1031 */
1032 static int init_pci(struct cxlflash_cfg *cfg)
1033 {
1034 struct pci_dev *pdev = cfg->dev;
1035 struct device *dev = &cfg->dev->dev;
1036 int rc = 0;
1037
1038 rc = pci_enable_device(pdev);
1039 if (rc || pci_channel_offline(pdev)) {
1040 if (pci_channel_offline(pdev)) {
1041 cxlflash_wait_for_pci_err_recovery(cfg);
1042 rc = pci_enable_device(pdev);
1043 }
1044
1045 if (rc) {
1046 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1047 cxlflash_wait_for_pci_err_recovery(cfg);
1048 goto out;
1049 }
1050 }
1051
1052 out:
1053 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1054 return rc;
1055 }
1056
1057 /**
1058 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1059 * @cfg: Internal structure associated with the host.
1060 *
1061 * Return: 0 on success, -errno on failure
1062 */
1063 static int init_scsi(struct cxlflash_cfg *cfg)
1064 {
1065 struct pci_dev *pdev = cfg->dev;
1066 struct device *dev = &cfg->dev->dev;
1067 int rc = 0;
1068
1069 rc = scsi_add_host(cfg->host, &pdev->dev);
1070 if (rc) {
1071 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1072 goto out;
1073 }
1074
1075 scsi_scan_host(cfg->host);
1076
1077 out:
1078 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1079 return rc;
1080 }
1081
1082 /**
1083 * set_port_online() - transitions the specified host FC port to online state
1084 * @fc_regs: Top of MMIO region defined for specified port.
1085 *
1086 * The provided MMIO region must be mapped prior to call. Online state means
1087 * that the FC link layer has synced, completed the handshaking process, and
1088 * is ready for login to start.
1089 */
1090 static void set_port_online(__be64 __iomem *fc_regs)
1091 {
1092 u64 cmdcfg;
1093
1094 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1095 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1096 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
1097 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1098 }
1099
1100 /**
1101 * set_port_offline() - transitions the specified host FC port to offline state
1102 * @fc_regs: Top of MMIO region defined for specified port.
1103 *
1104 * The provided MMIO region must be mapped prior to call.
1105 */
1106 static void set_port_offline(__be64 __iomem *fc_regs)
1107 {
1108 u64 cmdcfg;
1109
1110 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1111 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
1112 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
1113 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1114 }
1115
1116 /**
1117 * wait_port_online() - waits for the specified host FC port come online
1118 * @fc_regs: Top of MMIO region defined for specified port.
1119 * @delay_us: Number of microseconds to delay between reading port status.
1120 * @nretry: Number of cycles to retry reading port status.
1121 *
1122 * The provided MMIO region must be mapped prior to call. This will timeout
1123 * when the cable is not plugged in.
1124 *
1125 * Return:
1126 * TRUE (1) when the specified port is online
1127 * FALSE (0) when the specified port fails to come online after timeout
1128 */
1129 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1130 {
1131 u64 status;
1132
1133 WARN_ON(delay_us < 1000);
1134
1135 do {
1136 msleep(delay_us / 1000);
1137 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1138 if (status == U64_MAX)
1139 nretry /= 2;
1140 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141 nretry--);
1142
1143 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144 }
1145
1146 /**
1147 * wait_port_offline() - waits for the specified host FC port go offline
1148 * @fc_regs: Top of MMIO region defined for specified port.
1149 * @delay_us: Number of microseconds to delay between reading port status.
1150 * @nretry: Number of cycles to retry reading port status.
1151 *
1152 * The provided MMIO region must be mapped prior to call.
1153 *
1154 * Return:
1155 * TRUE (1) when the specified port is offline
1156 * FALSE (0) when the specified port fails to go offline after timeout
1157 */
1158 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1159 {
1160 u64 status;
1161
1162 WARN_ON(delay_us < 1000);
1163
1164 do {
1165 msleep(delay_us / 1000);
1166 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1167 if (status == U64_MAX)
1168 nretry /= 2;
1169 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1170 nretry--);
1171
1172 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1173 }
1174
1175 /**
1176 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1177 * @afu: AFU associated with the host that owns the specified FC port.
1178 * @port: Port number being configured.
1179 * @fc_regs: Top of MMIO region defined for specified port.
1180 * @wwpn: The world-wide-port-number previously discovered for port.
1181 *
1182 * The provided MMIO region must be mapped prior to call. As part of the
1183 * sequence to configure the WWPN, the port is toggled offline and then back
1184 * online. This toggling action can cause this routine to delay up to a few
1185 * seconds. When configured to use the internal LUN feature of the AFU, a
1186 * failure to come online is overridden.
1187 */
1188 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1189 u64 wwpn)
1190 {
1191 struct cxlflash_cfg *cfg = afu->parent;
1192 struct device *dev = &cfg->dev->dev;
1193
1194 set_port_offline(fc_regs);
1195 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1196 FC_PORT_STATUS_RETRY_CNT)) {
1197 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1198 __func__, port);
1199 }
1200
1201 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1202
1203 set_port_online(fc_regs);
1204 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1205 FC_PORT_STATUS_RETRY_CNT)) {
1206 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1207 __func__, port);
1208 }
1209 }
1210
1211 /**
1212 * afu_link_reset() - resets the specified host FC port
1213 * @afu: AFU associated with the host that owns the specified FC port.
1214 * @port: Port number being configured.
1215 * @fc_regs: Top of MMIO region defined for specified port.
1216 *
1217 * The provided MMIO region must be mapped prior to call. The sequence to
1218 * reset the port involves toggling it offline and then back online. This
1219 * action can cause this routine to delay up to a few seconds. An effort
1220 * is made to maintain link with the device by switching to host to use
1221 * the alternate port exclusively while the reset takes place.
1222 * failure to come online is overridden.
1223 */
1224 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1225 {
1226 struct cxlflash_cfg *cfg = afu->parent;
1227 struct device *dev = &cfg->dev->dev;
1228 u64 port_sel;
1229
1230 /* first switch the AFU to the other links, if any */
1231 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1232 port_sel &= ~(1ULL << port);
1233 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1234 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1235
1236 set_port_offline(fc_regs);
1237 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1238 FC_PORT_STATUS_RETRY_CNT))
1239 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1240 __func__, port);
1241
1242 set_port_online(fc_regs);
1243 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1244 FC_PORT_STATUS_RETRY_CNT))
1245 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1246 __func__, port);
1247
1248 /* switch back to include this port */
1249 port_sel |= (1ULL << port);
1250 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1251 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1252
1253 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1254 }
1255
1256 /**
1257 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1258 * @afu: AFU associated with the host.
1259 */
1260 static void afu_err_intr_init(struct afu *afu)
1261 {
1262 struct cxlflash_cfg *cfg = afu->parent;
1263 __be64 __iomem *fc_port_regs;
1264 int i;
1265 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1266 u64 reg;
1267
1268 /* global async interrupts: AFU clears afu_ctrl on context exit
1269 * if async interrupts were sent to that context. This prevents
1270 * the AFU form sending further async interrupts when
1271 * there is
1272 * nobody to receive them.
1273 */
1274
1275 /* mask all */
1276 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1277 /* set LISN# to send and point to primary master context */
1278 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1279
1280 if (afu->internal_lun)
1281 reg |= 1; /* Bit 63 indicates local lun */
1282 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1283 /* clear all */
1284 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1285 /* unmask bits that are of interest */
1286 /* note: afu can send an interrupt after this step */
1287 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1288 /* clear again in case a bit came on after previous clear but before */
1289 /* unmask */
1290 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1291
1292 /* Clear/Set internal lun bits */
1293 fc_port_regs = get_fc_port_regs(cfg, 0);
1294 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1295 reg &= SISL_FC_INTERNAL_MASK;
1296 if (afu->internal_lun)
1297 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1298 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1299
1300 /* now clear FC errors */
1301 for (i = 0; i < cfg->num_fc_ports; i++) {
1302 fc_port_regs = get_fc_port_regs(cfg, i);
1303
1304 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1305 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1306 }
1307
1308 /* sync interrupts for master's IOARRIN write */
1309 /* note that unlike asyncs, there can be no pending sync interrupts */
1310 /* at this time (this is a fresh context and master has not written */
1311 /* IOARRIN yet), so there is nothing to clear. */
1312
1313 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1314 for (i = 0; i < afu->num_hwqs; i++) {
1315 hwq = get_hwq(afu, i);
1316
1317 reg = readq_be(&hwq->host_map->ctx_ctrl);
1318 WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1319 reg |= SISL_MSI_SYNC_ERROR;
1320 writeq_be(reg, &hwq->host_map->ctx_ctrl);
1321 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1322 }
1323 }
1324
1325 /**
1326 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1327 * @irq: Interrupt number.
1328 * @data: Private data provided at interrupt registration, the AFU.
1329 *
1330 * Return: Always return IRQ_HANDLED.
1331 */
1332 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1333 {
1334 struct hwq *hwq = (struct hwq *)data;
1335 struct cxlflash_cfg *cfg = hwq->afu->parent;
1336 struct device *dev = &cfg->dev->dev;
1337 u64 reg;
1338 u64 reg_unmasked;
1339
1340 reg = readq_be(&hwq->host_map->intr_status);
1341 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1342
1343 if (reg_unmasked == 0UL) {
1344 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1345 __func__, reg);
1346 goto cxlflash_sync_err_irq_exit;
1347 }
1348
1349 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1350 __func__, reg);
1351
1352 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1353
1354 cxlflash_sync_err_irq_exit:
1355 return IRQ_HANDLED;
1356 }
1357
1358 /**
1359 * process_hrrq() - process the read-response queue
1360 * @hwq: HWQ associated with the host.
1361 * @doneq: Queue of commands harvested from the RRQ.
1362 * @budget: Threshold of RRQ entries to process.
1363 *
1364 * This routine must be called holding the disabled RRQ spin lock.
1365 *
1366 * Return: The number of entries processed.
1367 */
1368 static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1369 {
1370 struct afu *afu = hwq->afu;
1371 struct afu_cmd *cmd;
1372 struct sisl_ioasa *ioasa;
1373 struct sisl_ioarcb *ioarcb;
1374 bool toggle = hwq->toggle;
1375 int num_hrrq = 0;
1376 u64 entry,
1377 *hrrq_start = hwq->hrrq_start,
1378 *hrrq_end = hwq->hrrq_end,
1379 *hrrq_curr = hwq->hrrq_curr;
1380
1381 /* Process ready RRQ entries up to the specified budget (if any) */
1382 while (true) {
1383 entry = *hrrq_curr;
1384
1385 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1386 break;
1387
1388 entry &= ~SISL_RESP_HANDLE_T_BIT;
1389
1390 if (afu_is_sq_cmd_mode(afu)) {
1391 ioasa = (struct sisl_ioasa *)entry;
1392 cmd = container_of(ioasa, struct afu_cmd, sa);
1393 } else {
1394 ioarcb = (struct sisl_ioarcb *)entry;
1395 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1396 }
1397
1398 list_add_tail(&cmd->queue, doneq);
1399
1400 /* Advance to next entry or wrap and flip the toggle bit */
1401 if (hrrq_curr < hrrq_end)
1402 hrrq_curr++;
1403 else {
1404 hrrq_curr = hrrq_start;
1405 toggle ^= SISL_RESP_HANDLE_T_BIT;
1406 }
1407
1408 atomic_inc(&hwq->hsq_credits);
1409 num_hrrq++;
1410
1411 if (budget > 0 && num_hrrq >= budget)
1412 break;
1413 }
1414
1415 hwq->hrrq_curr = hrrq_curr;
1416 hwq->toggle = toggle;
1417
1418 return num_hrrq;
1419 }
1420
1421 /**
1422 * process_cmd_doneq() - process a queue of harvested RRQ commands
1423 * @doneq: Queue of completed commands.
1424 *
1425 * Note that upon return the queue can no longer be trusted.
1426 */
1427 static void process_cmd_doneq(struct list_head *doneq)
1428 {
1429 struct afu_cmd *cmd, *tmp;
1430
1431 WARN_ON(list_empty(doneq));
1432
1433 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1434 cmd_complete(cmd);
1435 }
1436
1437 /**
1438 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1439 * @irqpoll: IRQ poll structure associated with queue to poll.
1440 * @budget: Threshold of RRQ entries to process per poll.
1441 *
1442 * Return: The number of entries processed.
1443 */
1444 static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1445 {
1446 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1447 unsigned long hrrq_flags;
1448 LIST_HEAD(doneq);
1449 int num_entries = 0;
1450
1451 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1452
1453 num_entries = process_hrrq(hwq, &doneq, budget);
1454 if (num_entries < budget)
1455 irq_poll_complete(irqpoll);
1456
1457 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1458
1459 process_cmd_doneq(&doneq);
1460 return num_entries;
1461 }
1462
1463 /**
1464 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1465 * @irq: Interrupt number.
1466 * @data: Private data provided at interrupt registration, the AFU.
1467 *
1468 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1469 */
1470 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1471 {
1472 struct hwq *hwq = (struct hwq *)data;
1473 struct afu *afu = hwq->afu;
1474 unsigned long hrrq_flags;
1475 LIST_HEAD(doneq);
1476 int num_entries = 0;
1477
1478 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1479
1480 /* Silently drop spurious interrupts when queue is not online */
1481 if (!hwq->hrrq_online) {
1482 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1483 return IRQ_HANDLED;
1484 }
1485
1486 if (afu_is_irqpoll_enabled(afu)) {
1487 irq_poll_sched(&hwq->irqpoll);
1488 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1489 return IRQ_HANDLED;
1490 }
1491
1492 num_entries = process_hrrq(hwq, &doneq, -1);
1493 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494
1495 if (num_entries == 0)
1496 return IRQ_NONE;
1497
1498 process_cmd_doneq(&doneq);
1499 return IRQ_HANDLED;
1500 }
1501
1502 /*
1503 * Asynchronous interrupt information table
1504 *
1505 * NOTE:
1506 * - Order matters here as this array is indexed by bit position.
1507 *
1508 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1509 * as complex and complains due to a lack of parentheses/braces.
1510 */
1511 #define ASTATUS_FC(_a, _b, _c, _d) \
1512 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1513
1514 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1515 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1516 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1517 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1518 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1519 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1520 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1521 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1522 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1523
1524 static const struct asyc_intr_info ainfo[] = {
1525 BUILD_SISL_ASTATUS_FC_PORT(1),
1526 BUILD_SISL_ASTATUS_FC_PORT(0),
1527 BUILD_SISL_ASTATUS_FC_PORT(3),
1528 BUILD_SISL_ASTATUS_FC_PORT(2)
1529 };
1530
1531 /**
1532 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1533 * @irq: Interrupt number.
1534 * @data: Private data provided at interrupt registration, the AFU.
1535 *
1536 * Return: Always return IRQ_HANDLED.
1537 */
1538 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1539 {
1540 struct hwq *hwq = (struct hwq *)data;
1541 struct afu *afu = hwq->afu;
1542 struct cxlflash_cfg *cfg = afu->parent;
1543 struct device *dev = &cfg->dev->dev;
1544 const struct asyc_intr_info *info;
1545 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1546 __be64 __iomem *fc_port_regs;
1547 u64 reg_unmasked;
1548 u64 reg;
1549 u64 bit;
1550 u8 port;
1551
1552 reg = readq_be(&global->regs.aintr_status);
1553 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1554
1555 if (unlikely(reg_unmasked == 0)) {
1556 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1557 __func__, reg);
1558 goto out;
1559 }
1560
1561 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1562 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1563
1564 /* Check each bit that is on */
1565 for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1566 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1567 WARN_ON_ONCE(1);
1568 continue;
1569 }
1570
1571 info = &ainfo[bit];
1572 if (unlikely(info->status != 1ULL << bit)) {
1573 WARN_ON_ONCE(1);
1574 continue;
1575 }
1576
1577 port = info->port;
1578 fc_port_regs = get_fc_port_regs(cfg, port);
1579
1580 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1581 __func__, port, info->desc,
1582 readq_be(&fc_port_regs[FC_STATUS / 8]));
1583
1584 /*
1585 * Do link reset first, some OTHER errors will set FC_ERROR
1586 * again if cleared before or w/o a reset
1587 */
1588 if (info->action & LINK_RESET) {
1589 dev_err(dev, "%s: FC Port %d: resetting link\n",
1590 __func__, port);
1591 cfg->lr_state = LINK_RESET_REQUIRED;
1592 cfg->lr_port = port;
1593 schedule_work(&cfg->work_q);
1594 }
1595
1596 if (info->action & CLR_FC_ERROR) {
1597 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1598
1599 /*
1600 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1601 * should be the same and tracing one is sufficient.
1602 */
1603
1604 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1605 __func__, port, reg);
1606
1607 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1608 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1609 }
1610
1611 if (info->action & SCAN_HOST) {
1612 atomic_inc(&cfg->scan_host_needed);
1613 schedule_work(&cfg->work_q);
1614 }
1615 }
1616
1617 out:
1618 return IRQ_HANDLED;
1619 }
1620
1621 /**
1622 * read_vpd() - obtains the WWPNs from VPD
1623 * @cfg: Internal structure associated with the host.
1624 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1625 *
1626 * Return: 0 on success, -errno on failure
1627 */
1628 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1629 {
1630 struct device *dev = &cfg->dev->dev;
1631 struct pci_dev *pdev = cfg->dev;
1632 int rc = 0;
1633 int ro_start, ro_size, i, j, k;
1634 ssize_t vpd_size;
1635 char vpd_data[CXLFLASH_VPD_LEN];
1636 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638 cfg->dev_id->driver_data;
1639 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1641
1642 /* Get the VPD data from the device */
1643 vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644 if (unlikely(vpd_size <= 0)) {
1645 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646 __func__, vpd_size);
1647 rc = -ENODEV;
1648 goto out;
1649 }
1650
1651 /* Get the read only section offset */
1652 ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
1653 if (unlikely(ro_start < 0)) {
1654 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1655 rc = -ENODEV;
1656 goto out;
1657 }
1658
1659 /* Get the read only section size, cap when extends beyond read VPD */
1660 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1661 j = ro_size;
1662 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1663 if (unlikely((i + j) > vpd_size)) {
1664 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1665 __func__, (i + j), vpd_size);
1666 ro_size = vpd_size - i;
1667 }
1668
1669 /*
1670 * Find the offset of the WWPN tag within the read only
1671 * VPD data and validate the found field (partials are
1672 * no good to us). Convert the ASCII data to an integer
1673 * value. Note that we must copy to a temporary buffer
1674 * because the conversion service requires that the ASCII
1675 * string be terminated.
1676 *
1677 * Allow for WWPN not being found for all devices, setting
1678 * the returned WWPN to zero when not found. Notify with a
1679 * log error for cards that should have had WWPN keywords
1680 * in the VPD - cards requiring WWPN will not have their
1681 * ports programmed and operate in an undefined state.
1682 */
1683 for (k = 0; k < cfg->num_fc_ports; k++) {
1684 j = ro_size;
1685 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1686
1687 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1688 if (i < 0) {
1689 if (wwpn_vpd_required)
1690 dev_err(dev, "%s: Port %d WWPN not found\n",
1691 __func__, k);
1692 wwpn[k] = 0ULL;
1693 continue;
1694 }
1695
1696 j = pci_vpd_info_field_size(&vpd_data[i]);
1697 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1698 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1699 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1700 __func__, k);
1701 rc = -ENODEV;
1702 goto out;
1703 }
1704
1705 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1706 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1707 if (unlikely(rc)) {
1708 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1709 __func__, k);
1710 rc = -ENODEV;
1711 goto out;
1712 }
1713
1714 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1715 }
1716
1717 out:
1718 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1719 return rc;
1720 }
1721
1722 /**
1723 * init_pcr() - initialize the provisioning and control registers
1724 * @cfg: Internal structure associated with the host.
1725 *
1726 * Also sets up fast access to the mapped registers and initializes AFU
1727 * command fields that never change.
1728 */
1729 static void init_pcr(struct cxlflash_cfg *cfg)
1730 {
1731 struct afu *afu = cfg->afu;
1732 struct sisl_ctrl_map __iomem *ctrl_map;
1733 struct hwq *hwq;
1734 void *cookie;
1735 int i;
1736
1737 for (i = 0; i < MAX_CONTEXT; i++) {
1738 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1739 /* Disrupt any clients that could be running */
1740 /* e.g. clients that survived a master restart */
1741 writeq_be(0, &ctrl_map->rht_start);
1742 writeq_be(0, &ctrl_map->rht_cnt_id);
1743 writeq_be(0, &ctrl_map->ctx_cap);
1744 }
1745
1746 /* Copy frequently used fields into hwq */
1747 for (i = 0; i < afu->num_hwqs; i++) {
1748 hwq = get_hwq(afu, i);
1749 cookie = hwq->ctx_cookie;
1750
1751 hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1752 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1753 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1754
1755 /* Program the Endian Control for the master context */
1756 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1757 }
1758 }
1759
1760 /**
1761 * init_global() - initialize AFU global registers
1762 * @cfg: Internal structure associated with the host.
1763 */
1764 static int init_global(struct cxlflash_cfg *cfg)
1765 {
1766 struct afu *afu = cfg->afu;
1767 struct device *dev = &cfg->dev->dev;
1768 struct hwq *hwq;
1769 struct sisl_host_map __iomem *hmap;
1770 __be64 __iomem *fc_port_regs;
1771 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1772 int i = 0, num_ports = 0;
1773 int rc = 0;
1774 int j;
1775 void *ctx;
1776 u64 reg;
1777
1778 rc = read_vpd(cfg, &wwpn[0]);
1779 if (rc) {
1780 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1781 goto out;
1782 }
1783
1784 /* Set up RRQ and SQ in HWQ for master issued cmds */
1785 for (i = 0; i < afu->num_hwqs; i++) {
1786 hwq = get_hwq(afu, i);
1787 hmap = hwq->host_map;
1788
1789 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1790 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1791 hwq->hrrq_online = true;
1792
1793 if (afu_is_sq_cmd_mode(afu)) {
1794 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1795 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1796 }
1797 }
1798
1799 /* AFU configuration */
1800 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1801 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1802 /* enable all auto retry options and control endianness */
1803 /* leave others at default: */
1804 /* CTX_CAP write protected, mbox_r does not clear on read and */
1805 /* checker on if dual afu */
1806 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1807
1808 /* Global port select: select either port */
1809 if (afu->internal_lun) {
1810 /* Only use port 0 */
1811 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1812 num_ports = 0;
1813 } else {
1814 writeq_be(PORT_MASK(cfg->num_fc_ports),
1815 &afu->afu_map->global.regs.afu_port_sel);
1816 num_ports = cfg->num_fc_ports;
1817 }
1818
1819 for (i = 0; i < num_ports; i++) {
1820 fc_port_regs = get_fc_port_regs(cfg, i);
1821
1822 /* Unmask all errors (but they are still masked at AFU) */
1823 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1824 /* Clear CRC error cnt & set a threshold */
1825 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1826 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1827
1828 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1829 if (wwpn[i] != 0)
1830 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1831 /* Programming WWPN back to back causes additional
1832 * offline/online transitions and a PLOGI
1833 */
1834 msleep(100);
1835 }
1836
1837 if (afu_is_ocxl_lisn(afu)) {
1838 /* Set up the LISN effective address for each master */
1839 for (i = 0; i < afu->num_hwqs; i++) {
1840 hwq = get_hwq(afu, i);
1841 ctx = hwq->ctx_cookie;
1842
1843 for (j = 0; j < hwq->num_irqs; j++) {
1844 reg = cfg->ops->get_irq_objhndl(ctx, j);
1845 writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1846 }
1847
1848 reg = hwq->ctx_hndl;
1849 writeq_be(SISL_LISN_PASID(reg, reg),
1850 &hwq->ctrl_map->lisn_pasid[0]);
1851 writeq_be(SISL_LISN_PASID(0UL, reg),
1852 &hwq->ctrl_map->lisn_pasid[1]);
1853 }
1854 }
1855
1856 /* Set up master's own CTX_CAP to allow real mode, host translation */
1857 /* tables, afu cmds and read/write GSCSI cmds. */
1858 /* First, unlock ctx_cap write by reading mbox */
1859 for (i = 0; i < afu->num_hwqs; i++) {
1860 hwq = get_hwq(afu, i);
1861
1862 (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1863 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1864 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1865 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1866 &hwq->ctrl_map->ctx_cap);
1867 }
1868
1869 /*
1870 * Determine write-same unmap support for host by evaluating the unmap
1871 * sector support bit of the context control register associated with
1872 * the primary hardware queue. Note that while this status is reflected
1873 * in a context register, the outcome can be assumed to be host-wide.
1874 */
1875 hwq = get_hwq(afu, PRIMARY_HWQ);
1876 reg = readq_be(&hwq->host_map->ctx_ctrl);
1877 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1878 cfg->ws_unmap = true;
1879
1880 /* Initialize heartbeat */
1881 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1882 out:
1883 return rc;
1884 }
1885
1886 /**
1887 * start_afu() - initializes and starts the AFU
1888 * @cfg: Internal structure associated with the host.
1889 */
1890 static int start_afu(struct cxlflash_cfg *cfg)
1891 {
1892 struct afu *afu = cfg->afu;
1893 struct device *dev = &cfg->dev->dev;
1894 struct hwq *hwq;
1895 int rc = 0;
1896 int i;
1897
1898 init_pcr(cfg);
1899
1900 /* Initialize each HWQ */
1901 for (i = 0; i < afu->num_hwqs; i++) {
1902 hwq = get_hwq(afu, i);
1903
1904 /* After an AFU reset, RRQ entries are stale, clear them */
1905 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1906
1907 /* Initialize RRQ pointers */
1908 hwq->hrrq_start = &hwq->rrq_entry[0];
1909 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1910 hwq->hrrq_curr = hwq->hrrq_start;
1911 hwq->toggle = 1;
1912
1913 /* Initialize spin locks */
1914 spin_lock_init(&hwq->hrrq_slock);
1915 spin_lock_init(&hwq->hsq_slock);
1916
1917 /* Initialize SQ */
1918 if (afu_is_sq_cmd_mode(afu)) {
1919 memset(&hwq->sq, 0, sizeof(hwq->sq));
1920 hwq->hsq_start = &hwq->sq[0];
1921 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1922 hwq->hsq_curr = hwq->hsq_start;
1923
1924 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1925 }
1926
1927 /* Initialize IRQ poll */
1928 if (afu_is_irqpoll_enabled(afu))
1929 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1930 cxlflash_irqpoll);
1931
1932 }
1933
1934 rc = init_global(cfg);
1935
1936 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1937 return rc;
1938 }
1939
1940 /**
1941 * init_intr() - setup interrupt handlers for the master context
1942 * @cfg: Internal structure associated with the host.
1943 * @hwq: Hardware queue to initialize.
1944 *
1945 * Return: 0 on success, -errno on failure
1946 */
1947 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1948 struct hwq *hwq)
1949 {
1950 struct device *dev = &cfg->dev->dev;
1951 void *ctx = hwq->ctx_cookie;
1952 int rc = 0;
1953 enum undo_level level = UNDO_NOOP;
1954 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1955 int num_irqs = hwq->num_irqs;
1956
1957 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1958 if (unlikely(rc)) {
1959 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1960 __func__, rc);
1961 level = UNDO_NOOP;
1962 goto out;
1963 }
1964
1965 rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1966 "SISL_MSI_SYNC_ERROR");
1967 if (unlikely(rc <= 0)) {
1968 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1969 level = FREE_IRQ;
1970 goto out;
1971 }
1972
1973 rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1974 "SISL_MSI_RRQ_UPDATED");
1975 if (unlikely(rc <= 0)) {
1976 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1977 level = UNMAP_ONE;
1978 goto out;
1979 }
1980
1981 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1982 if (!is_primary_hwq)
1983 goto out;
1984
1985 rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1986 "SISL_MSI_ASYNC_ERROR");
1987 if (unlikely(rc <= 0)) {
1988 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1989 level = UNMAP_TWO;
1990 goto out;
1991 }
1992 out:
1993 return level;
1994 }
1995
1996 /**
1997 * init_mc() - create and register as the master context
1998 * @cfg: Internal structure associated with the host.
1999 * @index: HWQ Index of the master context.
2000 *
2001 * Return: 0 on success, -errno on failure
2002 */
2003 static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2004 {
2005 void *ctx;
2006 struct device *dev = &cfg->dev->dev;
2007 struct hwq *hwq = get_hwq(cfg->afu, index);
2008 int rc = 0;
2009 int num_irqs;
2010 enum undo_level level;
2011
2012 hwq->afu = cfg->afu;
2013 hwq->index = index;
2014 INIT_LIST_HEAD(&hwq->pending_cmds);
2015
2016 if (index == PRIMARY_HWQ) {
2017 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2018 num_irqs = 3;
2019 } else {
2020 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2021 num_irqs = 2;
2022 }
2023 if (IS_ERR_OR_NULL(ctx)) {
2024 rc = -ENOMEM;
2025 goto err1;
2026 }
2027
2028 WARN_ON(hwq->ctx_cookie);
2029 hwq->ctx_cookie = ctx;
2030 hwq->num_irqs = num_irqs;
2031
2032 /* Set it up as a master with the CXL */
2033 cfg->ops->set_master(ctx);
2034
2035 /* Reset AFU when initializing primary context */
2036 if (index == PRIMARY_HWQ) {
2037 rc = cfg->ops->afu_reset(ctx);
2038 if (unlikely(rc)) {
2039 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2040 __func__, rc);
2041 goto err1;
2042 }
2043 }
2044
2045 level = init_intr(cfg, hwq);
2046 if (unlikely(level)) {
2047 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2048 goto err2;
2049 }
2050
2051 /* Finally, activate the context by starting it */
2052 rc = cfg->ops->start_context(hwq->ctx_cookie);
2053 if (unlikely(rc)) {
2054 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2055 level = UNMAP_THREE;
2056 goto err2;
2057 }
2058
2059 out:
2060 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2061 return rc;
2062 err2:
2063 term_intr(cfg, level, index);
2064 if (index != PRIMARY_HWQ)
2065 cfg->ops->release_context(ctx);
2066 err1:
2067 hwq->ctx_cookie = NULL;
2068 goto out;
2069 }
2070
2071 /**
2072 * get_num_afu_ports() - determines and configures the number of AFU ports
2073 * @cfg: Internal structure associated with the host.
2074 *
2075 * This routine determines the number of AFU ports by converting the global
2076 * port selection mask. The converted value is only valid following an AFU
2077 * reset (explicit or power-on). This routine must be invoked shortly after
2078 * mapping as other routines are dependent on the number of ports during the
2079 * initialization sequence.
2080 *
2081 * To support legacy AFUs that might not have reflected an initial global
2082 * port mask (value read is 0), default to the number of ports originally
2083 * supported by the cxlflash driver (2) before hardware with other port
2084 * offerings was introduced.
2085 */
2086 static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2087 {
2088 struct afu *afu = cfg->afu;
2089 struct device *dev = &cfg->dev->dev;
2090 u64 port_mask;
2091 int num_fc_ports = LEGACY_FC_PORTS;
2092
2093 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2094 if (port_mask != 0ULL)
2095 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2096
2097 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2098 __func__, port_mask, num_fc_ports);
2099
2100 cfg->num_fc_ports = num_fc_ports;
2101 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2102 }
2103
2104 /**
2105 * init_afu() - setup as master context and start AFU
2106 * @cfg: Internal structure associated with the host.
2107 *
2108 * This routine is a higher level of control for configuring the
2109 * AFU on probe and reset paths.
2110 *
2111 * Return: 0 on success, -errno on failure
2112 */
2113 static int init_afu(struct cxlflash_cfg *cfg)
2114 {
2115 u64 reg;
2116 int rc = 0;
2117 struct afu *afu = cfg->afu;
2118 struct device *dev = &cfg->dev->dev;
2119 struct hwq *hwq;
2120 int i;
2121
2122 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2123
2124 mutex_init(&afu->sync_active);
2125 afu->num_hwqs = afu->desired_hwqs;
2126 for (i = 0; i < afu->num_hwqs; i++) {
2127 rc = init_mc(cfg, i);
2128 if (rc) {
2129 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2130 __func__, rc, i);
2131 goto err1;
2132 }
2133 }
2134
2135 /* Map the entire MMIO space of the AFU using the first context */
2136 hwq = get_hwq(afu, PRIMARY_HWQ);
2137 afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2138 if (!afu->afu_map) {
2139 dev_err(dev, "%s: psa_map failed\n", __func__);
2140 rc = -ENOMEM;
2141 goto err1;
2142 }
2143
2144 /* No byte reverse on reading afu_version or string will be backwards */
2145 reg = readq(&afu->afu_map->global.regs.afu_version);
2146 memcpy(afu->version, &reg, sizeof(reg));
2147 afu->interface_version =
2148 readq_be(&afu->afu_map->global.regs.interface_version);
2149 if ((afu->interface_version + 1) == 0) {
2150 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2151 "interface version %016llx\n", afu->version,
2152 afu->interface_version);
2153 rc = -EINVAL;
2154 goto err1;
2155 }
2156
2157 if (afu_is_sq_cmd_mode(afu)) {
2158 afu->send_cmd = send_cmd_sq;
2159 afu->context_reset = context_reset_sq;
2160 } else {
2161 afu->send_cmd = send_cmd_ioarrin;
2162 afu->context_reset = context_reset_ioarrin;
2163 }
2164
2165 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2166 afu->version, afu->interface_version);
2167
2168 get_num_afu_ports(cfg);
2169
2170 rc = start_afu(cfg);
2171 if (rc) {
2172 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2173 goto err1;
2174 }
2175
2176 afu_err_intr_init(cfg->afu);
2177 for (i = 0; i < afu->num_hwqs; i++) {
2178 hwq = get_hwq(afu, i);
2179
2180 hwq->room = readq_be(&hwq->host_map->cmd_room);
2181 }
2182
2183 /* Restore the LUN mappings */
2184 cxlflash_restore_luntable(cfg);
2185 out:
2186 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2187 return rc;
2188
2189 err1:
2190 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2191 term_intr(cfg, UNMAP_THREE, i);
2192 term_mc(cfg, i);
2193 }
2194 goto out;
2195 }
2196
2197 /**
2198 * afu_reset() - resets the AFU
2199 * @cfg: Internal structure associated with the host.
2200 *
2201 * Return: 0 on success, -errno on failure
2202 */
2203 static int afu_reset(struct cxlflash_cfg *cfg)
2204 {
2205 struct device *dev = &cfg->dev->dev;
2206 int rc = 0;
2207
2208 /* Stop the context before the reset. Since the context is
2209 * no longer available restart it after the reset is complete
2210 */
2211 term_afu(cfg);
2212
2213 rc = init_afu(cfg);
2214
2215 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2216 return rc;
2217 }
2218
2219 /**
2220 * drain_ioctls() - wait until all currently executing ioctls have completed
2221 * @cfg: Internal structure associated with the host.
2222 *
2223 * Obtain write access to read/write semaphore that wraps ioctl
2224 * handling to 'drain' ioctls currently executing.
2225 */
2226 static void drain_ioctls(struct cxlflash_cfg *cfg)
2227 {
2228 down_write(&cfg->ioctl_rwsem);
2229 up_write(&cfg->ioctl_rwsem);
2230 }
2231
2232 /**
2233 * cxlflash_async_reset_host() - asynchronous host reset handler
2234 * @data: Private data provided while scheduling reset.
2235 * @cookie: Cookie that can be used for checkpointing.
2236 */
2237 static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2238 {
2239 struct cxlflash_cfg *cfg = data;
2240 struct device *dev = &cfg->dev->dev;
2241 int rc = 0;
2242
2243 if (cfg->state != STATE_RESET) {
2244 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2245 __func__, cfg->state);
2246 goto out;
2247 }
2248
2249 drain_ioctls(cfg);
2250 cxlflash_mark_contexts_error(cfg);
2251 rc = afu_reset(cfg);
2252 if (rc)
2253 cfg->state = STATE_FAILTERM;
2254 else
2255 cfg->state = STATE_NORMAL;
2256 wake_up_all(&cfg->reset_waitq);
2257
2258 out:
2259 scsi_unblock_requests(cfg->host);
2260 }
2261
2262 /**
2263 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2264 * @cfg: Internal structure associated with the host.
2265 */
2266 static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2267 {
2268 struct device *dev = &cfg->dev->dev;
2269
2270 if (cfg->state != STATE_NORMAL) {
2271 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2272 __func__, cfg->state);
2273 return;
2274 }
2275
2276 cfg->state = STATE_RESET;
2277 scsi_block_requests(cfg->host);
2278 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2279 cfg);
2280 }
2281
2282 /**
2283 * send_afu_cmd() - builds and sends an internal AFU command
2284 * @afu: AFU associated with the host.
2285 * @rcb: Pre-populated IOARCB describing command to send.
2286 *
2287 * The AFU can only take one internal AFU command at a time. This limitation is
2288 * enforced by using a mutex to provide exclusive access to the AFU during the
2289 * operation. This design point requires calling threads to not be on interrupt
2290 * context due to the possibility of sleeping during concurrent AFU operations.
2291 *
2292 * The command status is optionally passed back to the caller when the caller
2293 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2294 *
2295 * Return:
2296 * 0 on success, -errno on failure
2297 */
2298 static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2299 {
2300 struct cxlflash_cfg *cfg = afu->parent;
2301 struct device *dev = &cfg->dev->dev;
2302 struct afu_cmd *cmd = NULL;
2303 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2304 ulong lock_flags;
2305 char *buf = NULL;
2306 int rc = 0;
2307 int nretry = 0;
2308
2309 if (cfg->state != STATE_NORMAL) {
2310 dev_dbg(dev, "%s: Sync not required state=%u\n",
2311 __func__, cfg->state);
2312 return 0;
2313 }
2314
2315 mutex_lock(&afu->sync_active);
2316 atomic_inc(&afu->cmds_active);
2317 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2318 if (unlikely(!buf)) {
2319 dev_err(dev, "%s: no memory for command\n", __func__);
2320 rc = -ENOMEM;
2321 goto out;
2322 }
2323
2324 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2325
2326 retry:
2327 memset(cmd, 0, sizeof(*cmd));
2328 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2329 INIT_LIST_HEAD(&cmd->queue);
2330 init_completion(&cmd->cevent);
2331 cmd->parent = afu;
2332 cmd->hwq_index = hwq->index;
2333 cmd->rcb.ctx_id = hwq->ctx_hndl;
2334
2335 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2336 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2337
2338 rc = afu->send_cmd(afu, cmd);
2339 if (unlikely(rc)) {
2340 rc = -ENOBUFS;
2341 goto out;
2342 }
2343
2344 rc = wait_resp(afu, cmd);
2345 switch (rc) {
2346 case -ETIMEDOUT:
2347 rc = afu->context_reset(hwq);
2348 if (rc) {
2349 /* Delete the command from pending_cmds list */
2350 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2351 list_del(&cmd->list);
2352 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2353
2354 cxlflash_schedule_async_reset(cfg);
2355 break;
2356 }
2357 fallthrough; /* to retry */
2358 case -EAGAIN:
2359 if (++nretry < 2)
2360 goto retry;
2361 fallthrough; /* to exit */
2362 default:
2363 break;
2364 }
2365
2366 if (rcb->ioasa)
2367 *rcb->ioasa = cmd->sa;
2368 out:
2369 atomic_dec(&afu->cmds_active);
2370 mutex_unlock(&afu->sync_active);
2371 kfree(buf);
2372 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2373 return rc;
2374 }
2375
2376 /**
2377 * cxlflash_afu_sync() - builds and sends an AFU sync command
2378 * @afu: AFU associated with the host.
2379 * @ctx: Identifies context requesting sync.
2380 * @res: Identifies resource requesting sync.
2381 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2382 *
2383 * AFU sync operations are only necessary and allowed when the device is
2384 * operating normally. When not operating normally, sync requests can occur as
2385 * part of cleaning up resources associated with an adapter prior to removal.
2386 * In this scenario, these requests are simply ignored (safe due to the AFU
2387 * going away).
2388 *
2389 * Return:
2390 * 0 on success, -errno on failure
2391 */
2392 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2393 {
2394 struct cxlflash_cfg *cfg = afu->parent;
2395 struct device *dev = &cfg->dev->dev;
2396 struct sisl_ioarcb rcb = { 0 };
2397
2398 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2399 __func__, afu, ctx, res, mode);
2400
2401 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2402 rcb.msi = SISL_MSI_RRQ_UPDATED;
2403 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2404
2405 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2406 rcb.cdb[1] = mode;
2407 put_unaligned_be16(ctx, &rcb.cdb[2]);
2408 put_unaligned_be32(res, &rcb.cdb[4]);
2409
2410 return send_afu_cmd(afu, &rcb);
2411 }
2412
2413 /**
2414 * cxlflash_eh_abort_handler() - abort a SCSI command
2415 * @scp: SCSI command to abort.
2416 *
2417 * CXL Flash devices do not support a single command abort. Reset the context
2418 * as per SISLite specification. Flush any pending commands in the hardware
2419 * queue before the reset.
2420 *
2421 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2422 */
2423 static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2424 {
2425 int rc = FAILED;
2426 struct Scsi_Host *host = scp->device->host;
2427 struct cxlflash_cfg *cfg = shost_priv(host);
2428 struct afu_cmd *cmd = sc_to_afuc(scp);
2429 struct device *dev = &cfg->dev->dev;
2430 struct afu *afu = cfg->afu;
2431 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2432
2433 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2434 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2435 scp->device->channel, scp->device->id, scp->device->lun,
2436 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2437 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2438 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2439 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2440
2441 /* When the state is not normal, another reset/reload is in progress.
2442 * Return failed and the mid-layer will invoke host reset handler.
2443 */
2444 if (cfg->state != STATE_NORMAL) {
2445 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2446 __func__, cfg->state);
2447 goto out;
2448 }
2449
2450 rc = afu->context_reset(hwq);
2451 if (unlikely(rc))
2452 goto out;
2453
2454 rc = SUCCESS;
2455
2456 out:
2457 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2458 return rc;
2459 }
2460
2461 /**
2462 * cxlflash_eh_device_reset_handler() - reset a single LUN
2463 * @scp: SCSI command to send.
2464 *
2465 * Return:
2466 * SUCCESS as defined in scsi/scsi.h
2467 * FAILED as defined in scsi/scsi.h
2468 */
2469 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2470 {
2471 int rc = SUCCESS;
2472 struct scsi_device *sdev = scp->device;
2473 struct Scsi_Host *host = sdev->host;
2474 struct cxlflash_cfg *cfg = shost_priv(host);
2475 struct device *dev = &cfg->dev->dev;
2476 int rcr = 0;
2477
2478 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2479 host->host_no, sdev->channel, sdev->id, sdev->lun);
2480 retry:
2481 switch (cfg->state) {
2482 case STATE_NORMAL:
2483 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2484 if (unlikely(rcr))
2485 rc = FAILED;
2486 break;
2487 case STATE_RESET:
2488 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2489 goto retry;
2490 default:
2491 rc = FAILED;
2492 break;
2493 }
2494
2495 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2496 return rc;
2497 }
2498
2499 /**
2500 * cxlflash_eh_host_reset_handler() - reset the host adapter
2501 * @scp: SCSI command from stack identifying host.
2502 *
2503 * Following a reset, the state is evaluated again in case an EEH occurred
2504 * during the reset. In such a scenario, the host reset will either yield
2505 * until the EEH recovery is complete or return success or failure based
2506 * upon the current device state.
2507 *
2508 * Return:
2509 * SUCCESS as defined in scsi/scsi.h
2510 * FAILED as defined in scsi/scsi.h
2511 */
2512 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2513 {
2514 int rc = SUCCESS;
2515 int rcr = 0;
2516 struct Scsi_Host *host = scp->device->host;
2517 struct cxlflash_cfg *cfg = shost_priv(host);
2518 struct device *dev = &cfg->dev->dev;
2519
2520 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2521
2522 switch (cfg->state) {
2523 case STATE_NORMAL:
2524 cfg->state = STATE_RESET;
2525 drain_ioctls(cfg);
2526 cxlflash_mark_contexts_error(cfg);
2527 rcr = afu_reset(cfg);
2528 if (rcr) {
2529 rc = FAILED;
2530 cfg->state = STATE_FAILTERM;
2531 } else
2532 cfg->state = STATE_NORMAL;
2533 wake_up_all(&cfg->reset_waitq);
2534 ssleep(1);
2535 fallthrough;
2536 case STATE_RESET:
2537 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2538 if (cfg->state == STATE_NORMAL)
2539 break;
2540 fallthrough;
2541 default:
2542 rc = FAILED;
2543 break;
2544 }
2545
2546 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2547 return rc;
2548 }
2549
2550 /**
2551 * cxlflash_change_queue_depth() - change the queue depth for the device
2552 * @sdev: SCSI device destined for queue depth change.
2553 * @qdepth: Requested queue depth value to set.
2554 *
2555 * The requested queue depth is capped to the maximum supported value.
2556 *
2557 * Return: The actual queue depth set.
2558 */
2559 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2560 {
2561
2562 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2563 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2564
2565 scsi_change_queue_depth(sdev, qdepth);
2566 return sdev->queue_depth;
2567 }
2568
2569 /**
2570 * cxlflash_show_port_status() - queries and presents the current port status
2571 * @port: Desired port for status reporting.
2572 * @cfg: Internal structure associated with the host.
2573 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2574 *
2575 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2576 */
2577 static ssize_t cxlflash_show_port_status(u32 port,
2578 struct cxlflash_cfg *cfg,
2579 char *buf)
2580 {
2581 struct device *dev = &cfg->dev->dev;
2582 char *disp_status;
2583 u64 status;
2584 __be64 __iomem *fc_port_regs;
2585
2586 WARN_ON(port >= MAX_FC_PORTS);
2587
2588 if (port >= cfg->num_fc_ports) {
2589 dev_info(dev, "%s: Port %d not supported on this card.\n",
2590 __func__, port);
2591 return -EINVAL;
2592 }
2593
2594 fc_port_regs = get_fc_port_regs(cfg, port);
2595 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2596 status &= FC_MTIP_STATUS_MASK;
2597
2598 if (status == FC_MTIP_STATUS_ONLINE)
2599 disp_status = "online";
2600 else if (status == FC_MTIP_STATUS_OFFLINE)
2601 disp_status = "offline";
2602 else
2603 disp_status = "unknown";
2604
2605 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2606 }
2607
2608 /**
2609 * port0_show() - queries and presents the current status of port 0
2610 * @dev: Generic device associated with the host owning the port.
2611 * @attr: Device attribute representing the port.
2612 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2613 *
2614 * Return: The size of the ASCII string returned in @buf.
2615 */
2616 static ssize_t port0_show(struct device *dev,
2617 struct device_attribute *attr,
2618 char *buf)
2619 {
2620 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2621
2622 return cxlflash_show_port_status(0, cfg, buf);
2623 }
2624
2625 /**
2626 * port1_show() - queries and presents the current status of port 1
2627 * @dev: Generic device associated with the host owning the port.
2628 * @attr: Device attribute representing the port.
2629 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2630 *
2631 * Return: The size of the ASCII string returned in @buf.
2632 */
2633 static ssize_t port1_show(struct device *dev,
2634 struct device_attribute *attr,
2635 char *buf)
2636 {
2637 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2638
2639 return cxlflash_show_port_status(1, cfg, buf);
2640 }
2641
2642 /**
2643 * port2_show() - queries and presents the current status of port 2
2644 * @dev: Generic device associated with the host owning the port.
2645 * @attr: Device attribute representing the port.
2646 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2647 *
2648 * Return: The size of the ASCII string returned in @buf.
2649 */
2650 static ssize_t port2_show(struct device *dev,
2651 struct device_attribute *attr,
2652 char *buf)
2653 {
2654 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2655
2656 return cxlflash_show_port_status(2, cfg, buf);
2657 }
2658
2659 /**
2660 * port3_show() - queries and presents the current status of port 3
2661 * @dev: Generic device associated with the host owning the port.
2662 * @attr: Device attribute representing the port.
2663 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2664 *
2665 * Return: The size of the ASCII string returned in @buf.
2666 */
2667 static ssize_t port3_show(struct device *dev,
2668 struct device_attribute *attr,
2669 char *buf)
2670 {
2671 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2672
2673 return cxlflash_show_port_status(3, cfg, buf);
2674 }
2675
2676 /**
2677 * lun_mode_show() - presents the current LUN mode of the host
2678 * @dev: Generic device associated with the host.
2679 * @attr: Device attribute representing the LUN mode.
2680 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2681 *
2682 * Return: The size of the ASCII string returned in @buf.
2683 */
2684 static ssize_t lun_mode_show(struct device *dev,
2685 struct device_attribute *attr, char *buf)
2686 {
2687 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2688 struct afu *afu = cfg->afu;
2689
2690 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2691 }
2692
2693 /**
2694 * lun_mode_store() - sets the LUN mode of the host
2695 * @dev: Generic device associated with the host.
2696 * @attr: Device attribute representing the LUN mode.
2697 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2698 * @count: Length of data resizing in @buf.
2699 *
2700 * The CXL Flash AFU supports a dummy LUN mode where the external
2701 * links and storage are not required. Space on the FPGA is used
2702 * to create 1 or 2 small LUNs which are presented to the system
2703 * as if they were a normal storage device. This feature is useful
2704 * during development and also provides manufacturing with a way
2705 * to test the AFU without an actual device.
2706 *
2707 * 0 = external LUN[s] (default)
2708 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2709 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2710 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2711 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2712 *
2713 * Return: The size of the ASCII string returned in @buf.
2714 */
2715 static ssize_t lun_mode_store(struct device *dev,
2716 struct device_attribute *attr,
2717 const char *buf, size_t count)
2718 {
2719 struct Scsi_Host *shost = class_to_shost(dev);
2720 struct cxlflash_cfg *cfg = shost_priv(shost);
2721 struct afu *afu = cfg->afu;
2722 int rc;
2723 u32 lun_mode;
2724
2725 rc = kstrtouint(buf, 10, &lun_mode);
2726 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2727 afu->internal_lun = lun_mode;
2728
2729 /*
2730 * When configured for internal LUN, there is only one channel,
2731 * channel number 0, else there will be one less than the number
2732 * of fc ports for this card.
2733 */
2734 if (afu->internal_lun)
2735 shost->max_channel = 0;
2736 else
2737 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2738
2739 afu_reset(cfg);
2740 scsi_scan_host(cfg->host);
2741 }
2742
2743 return count;
2744 }
2745
2746 /**
2747 * ioctl_version_show() - presents the current ioctl version of the host
2748 * @dev: Generic device associated with the host.
2749 * @attr: Device attribute representing the ioctl version.
2750 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2751 *
2752 * Return: The size of the ASCII string returned in @buf.
2753 */
2754 static ssize_t ioctl_version_show(struct device *dev,
2755 struct device_attribute *attr, char *buf)
2756 {
2757 ssize_t bytes = 0;
2758
2759 bytes = scnprintf(buf, PAGE_SIZE,
2760 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2761 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2762 "host: %u\n", HT_CXLFLASH_VERSION_0);
2763
2764 return bytes;
2765 }
2766
2767 /**
2768 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2769 * @port: Desired port for status reporting.
2770 * @cfg: Internal structure associated with the host.
2771 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2772 *
2773 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2774 */
2775 static ssize_t cxlflash_show_port_lun_table(u32 port,
2776 struct cxlflash_cfg *cfg,
2777 char *buf)
2778 {
2779 struct device *dev = &cfg->dev->dev;
2780 __be64 __iomem *fc_port_luns;
2781 int i;
2782 ssize_t bytes = 0;
2783
2784 WARN_ON(port >= MAX_FC_PORTS);
2785
2786 if (port >= cfg->num_fc_ports) {
2787 dev_info(dev, "%s: Port %d not supported on this card.\n",
2788 __func__, port);
2789 return -EINVAL;
2790 }
2791
2792 fc_port_luns = get_fc_port_luns(cfg, port);
2793
2794 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2795 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2796 "%03d: %016llx\n",
2797 i, readq_be(&fc_port_luns[i]));
2798 return bytes;
2799 }
2800
2801 /**
2802 * port0_lun_table_show() - presents the current LUN table of port 0
2803 * @dev: Generic device associated with the host owning the port.
2804 * @attr: Device attribute representing the port.
2805 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2806 *
2807 * Return: The size of the ASCII string returned in @buf.
2808 */
2809 static ssize_t port0_lun_table_show(struct device *dev,
2810 struct device_attribute *attr,
2811 char *buf)
2812 {
2813 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2814
2815 return cxlflash_show_port_lun_table(0, cfg, buf);
2816 }
2817
2818 /**
2819 * port1_lun_table_show() - presents the current LUN table of port 1
2820 * @dev: Generic device associated with the host owning the port.
2821 * @attr: Device attribute representing the port.
2822 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2823 *
2824 * Return: The size of the ASCII string returned in @buf.
2825 */
2826 static ssize_t port1_lun_table_show(struct device *dev,
2827 struct device_attribute *attr,
2828 char *buf)
2829 {
2830 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2831
2832 return cxlflash_show_port_lun_table(1, cfg, buf);
2833 }
2834
2835 /**
2836 * port2_lun_table_show() - presents the current LUN table of port 2
2837 * @dev: Generic device associated with the host owning the port.
2838 * @attr: Device attribute representing the port.
2839 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2840 *
2841 * Return: The size of the ASCII string returned in @buf.
2842 */
2843 static ssize_t port2_lun_table_show(struct device *dev,
2844 struct device_attribute *attr,
2845 char *buf)
2846 {
2847 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2848
2849 return cxlflash_show_port_lun_table(2, cfg, buf);
2850 }
2851
2852 /**
2853 * port3_lun_table_show() - presents the current LUN table of port 3
2854 * @dev: Generic device associated with the host owning the port.
2855 * @attr: Device attribute representing the port.
2856 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2857 *
2858 * Return: The size of the ASCII string returned in @buf.
2859 */
2860 static ssize_t port3_lun_table_show(struct device *dev,
2861 struct device_attribute *attr,
2862 char *buf)
2863 {
2864 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2865
2866 return cxlflash_show_port_lun_table(3, cfg, buf);
2867 }
2868
2869 /**
2870 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2871 * @dev: Generic device associated with the host.
2872 * @attr: Device attribute representing the IRQ poll weight.
2873 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2874 * weight in ASCII.
2875 *
2876 * An IRQ poll weight of 0 indicates polling is disabled.
2877 *
2878 * Return: The size of the ASCII string returned in @buf.
2879 */
2880 static ssize_t irqpoll_weight_show(struct device *dev,
2881 struct device_attribute *attr, char *buf)
2882 {
2883 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2884 struct afu *afu = cfg->afu;
2885
2886 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2887 }
2888
2889 /**
2890 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2891 * @dev: Generic device associated with the host.
2892 * @attr: Device attribute representing the IRQ poll weight.
2893 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2894 * weight in ASCII.
2895 * @count: Length of data resizing in @buf.
2896 *
2897 * An IRQ poll weight of 0 indicates polling is disabled.
2898 *
2899 * Return: The size of the ASCII string returned in @buf.
2900 */
2901 static ssize_t irqpoll_weight_store(struct device *dev,
2902 struct device_attribute *attr,
2903 const char *buf, size_t count)
2904 {
2905 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2906 struct device *cfgdev = &cfg->dev->dev;
2907 struct afu *afu = cfg->afu;
2908 struct hwq *hwq;
2909 u32 weight;
2910 int rc, i;
2911
2912 rc = kstrtouint(buf, 10, &weight);
2913 if (rc)
2914 return -EINVAL;
2915
2916 if (weight > 256) {
2917 dev_info(cfgdev,
2918 "Invalid IRQ poll weight. It must be 256 or less.\n");
2919 return -EINVAL;
2920 }
2921
2922 if (weight == afu->irqpoll_weight) {
2923 dev_info(cfgdev,
2924 "Current IRQ poll weight has the same weight.\n");
2925 return -EINVAL;
2926 }
2927
2928 if (afu_is_irqpoll_enabled(afu)) {
2929 for (i = 0; i < afu->num_hwqs; i++) {
2930 hwq = get_hwq(afu, i);
2931
2932 irq_poll_disable(&hwq->irqpoll);
2933 }
2934 }
2935
2936 afu->irqpoll_weight = weight;
2937
2938 if (weight > 0) {
2939 for (i = 0; i < afu->num_hwqs; i++) {
2940 hwq = get_hwq(afu, i);
2941
2942 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2943 }
2944 }
2945
2946 return count;
2947 }
2948
2949 /**
2950 * num_hwqs_show() - presents the number of hardware queues for the host
2951 * @dev: Generic device associated with the host.
2952 * @attr: Device attribute representing the number of hardware queues.
2953 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2954 * queues in ASCII.
2955 *
2956 * Return: The size of the ASCII string returned in @buf.
2957 */
2958 static ssize_t num_hwqs_show(struct device *dev,
2959 struct device_attribute *attr, char *buf)
2960 {
2961 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2962 struct afu *afu = cfg->afu;
2963
2964 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2965 }
2966
2967 /**
2968 * num_hwqs_store() - sets the number of hardware queues for the host
2969 * @dev: Generic device associated with the host.
2970 * @attr: Device attribute representing the number of hardware queues.
2971 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2972 * queues in ASCII.
2973 * @count: Length of data resizing in @buf.
2974 *
2975 * n > 0: num_hwqs = n
2976 * n = 0: num_hwqs = num_online_cpus()
2977 * n < 0: num_online_cpus() / abs(n)
2978 *
2979 * Return: The size of the ASCII string returned in @buf.
2980 */
2981 static ssize_t num_hwqs_store(struct device *dev,
2982 struct device_attribute *attr,
2983 const char *buf, size_t count)
2984 {
2985 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2986 struct afu *afu = cfg->afu;
2987 int rc;
2988 int nhwqs, num_hwqs;
2989
2990 rc = kstrtoint(buf, 10, &nhwqs);
2991 if (rc)
2992 return -EINVAL;
2993
2994 if (nhwqs >= 1)
2995 num_hwqs = nhwqs;
2996 else if (nhwqs == 0)
2997 num_hwqs = num_online_cpus();
2998 else
2999 num_hwqs = num_online_cpus() / abs(nhwqs);
3000
3001 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3002 WARN_ON_ONCE(afu->desired_hwqs == 0);
3003
3004 retry:
3005 switch (cfg->state) {
3006 case STATE_NORMAL:
3007 cfg->state = STATE_RESET;
3008 drain_ioctls(cfg);
3009 cxlflash_mark_contexts_error(cfg);
3010 rc = afu_reset(cfg);
3011 if (rc)
3012 cfg->state = STATE_FAILTERM;
3013 else
3014 cfg->state = STATE_NORMAL;
3015 wake_up_all(&cfg->reset_waitq);
3016 break;
3017 case STATE_RESET:
3018 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3019 if (cfg->state == STATE_NORMAL)
3020 goto retry;
3021 fallthrough;
3022 default:
3023 /* Ideally should not happen */
3024 dev_err(dev, "%s: Device is not ready, state=%d\n",
3025 __func__, cfg->state);
3026 break;
3027 }
3028
3029 return count;
3030 }
3031
3032 static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3033
3034 /**
3035 * hwq_mode_show() - presents the HWQ steering mode for the host
3036 * @dev: Generic device associated with the host.
3037 * @attr: Device attribute representing the HWQ steering mode.
3038 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
3039 * as a character string.
3040 *
3041 * Return: The size of the ASCII string returned in @buf.
3042 */
3043 static ssize_t hwq_mode_show(struct device *dev,
3044 struct device_attribute *attr, char *buf)
3045 {
3046 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3047 struct afu *afu = cfg->afu;
3048
3049 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3050 }
3051
3052 /**
3053 * hwq_mode_store() - sets the HWQ steering mode for the host
3054 * @dev: Generic device associated with the host.
3055 * @attr: Device attribute representing the HWQ steering mode.
3056 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
3057 * as a character string.
3058 * @count: Length of data resizing in @buf.
3059 *
3060 * rr = Round-Robin
3061 * tag = Block MQ Tagging
3062 * cpu = CPU Affinity
3063 *
3064 * Return: The size of the ASCII string returned in @buf.
3065 */
3066 static ssize_t hwq_mode_store(struct device *dev,
3067 struct device_attribute *attr,
3068 const char *buf, size_t count)
3069 {
3070 struct Scsi_Host *shost = class_to_shost(dev);
3071 struct cxlflash_cfg *cfg = shost_priv(shost);
3072 struct device *cfgdev = &cfg->dev->dev;
3073 struct afu *afu = cfg->afu;
3074 int i;
3075 u32 mode = MAX_HWQ_MODE;
3076
3077 for (i = 0; i < MAX_HWQ_MODE; i++) {
3078 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3079 mode = i;
3080 break;
3081 }
3082 }
3083
3084 if (mode >= MAX_HWQ_MODE) {
3085 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3086 return -EINVAL;
3087 }
3088
3089 afu->hwq_mode = mode;
3090
3091 return count;
3092 }
3093
3094 /**
3095 * mode_show() - presents the current mode of the device
3096 * @dev: Generic device associated with the device.
3097 * @attr: Device attribute representing the device mode.
3098 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3099 *
3100 * Return: The size of the ASCII string returned in @buf.
3101 */
3102 static ssize_t mode_show(struct device *dev,
3103 struct device_attribute *attr, char *buf)
3104 {
3105 struct scsi_device *sdev = to_scsi_device(dev);
3106
3107 return scnprintf(buf, PAGE_SIZE, "%s\n",
3108 sdev->hostdata ? "superpipe" : "legacy");
3109 }
3110
3111 /*
3112 * Host attributes
3113 */
3114 static DEVICE_ATTR_RO(port0);
3115 static DEVICE_ATTR_RO(port1);
3116 static DEVICE_ATTR_RO(port2);
3117 static DEVICE_ATTR_RO(port3);
3118 static DEVICE_ATTR_RW(lun_mode);
3119 static DEVICE_ATTR_RO(ioctl_version);
3120 static DEVICE_ATTR_RO(port0_lun_table);
3121 static DEVICE_ATTR_RO(port1_lun_table);
3122 static DEVICE_ATTR_RO(port2_lun_table);
3123 static DEVICE_ATTR_RO(port3_lun_table);
3124 static DEVICE_ATTR_RW(irqpoll_weight);
3125 static DEVICE_ATTR_RW(num_hwqs);
3126 static DEVICE_ATTR_RW(hwq_mode);
3127
3128 static struct device_attribute *cxlflash_host_attrs[] = {
3129 &dev_attr_port0,
3130 &dev_attr_port1,
3131 &dev_attr_port2,
3132 &dev_attr_port3,
3133 &dev_attr_lun_mode,
3134 &dev_attr_ioctl_version,
3135 &dev_attr_port0_lun_table,
3136 &dev_attr_port1_lun_table,
3137 &dev_attr_port2_lun_table,
3138 &dev_attr_port3_lun_table,
3139 &dev_attr_irqpoll_weight,
3140 &dev_attr_num_hwqs,
3141 &dev_attr_hwq_mode,
3142 NULL
3143 };
3144
3145 /*
3146 * Device attributes
3147 */
3148 static DEVICE_ATTR_RO(mode);
3149
3150 static struct device_attribute *cxlflash_dev_attrs[] = {
3151 &dev_attr_mode,
3152 NULL
3153 };
3154
3155 /*
3156 * Host template
3157 */
3158 static struct scsi_host_template driver_template = {
3159 .module = THIS_MODULE,
3160 .name = CXLFLASH_ADAPTER_NAME,
3161 .info = cxlflash_driver_info,
3162 .ioctl = cxlflash_ioctl,
3163 .proc_name = CXLFLASH_NAME,
3164 .queuecommand = cxlflash_queuecommand,
3165 .eh_abort_handler = cxlflash_eh_abort_handler,
3166 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3167 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3168 .change_queue_depth = cxlflash_change_queue_depth,
3169 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3170 .can_queue = CXLFLASH_MAX_CMDS,
3171 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3172 .this_id = -1,
3173 .sg_tablesize = 1, /* No scatter gather support */
3174 .max_sectors = CXLFLASH_MAX_SECTORS,
3175 .shost_attrs = cxlflash_host_attrs,
3176 .sdev_attrs = cxlflash_dev_attrs,
3177 };
3178
3179 /*
3180 * Device dependent values
3181 */
3182 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3183 CXLFLASH_WWPN_VPD_REQUIRED };
3184 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3185 CXLFLASH_NOTIFY_SHUTDOWN };
3186 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3187 (CXLFLASH_NOTIFY_SHUTDOWN |
3188 CXLFLASH_OCXL_DEV) };
3189
3190 /*
3191 * PCI device binding table
3192 */
3193 static struct pci_device_id cxlflash_pci_table[] = {
3194 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3196 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3198 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3200 {}
3201 };
3202
3203 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3204
3205 /**
3206 * cxlflash_worker_thread() - work thread handler for the AFU
3207 * @work: Work structure contained within cxlflash associated with host.
3208 *
3209 * Handles the following events:
3210 * - Link reset which cannot be performed on interrupt context due to
3211 * blocking up to a few seconds
3212 * - Rescan the host
3213 */
3214 static void cxlflash_worker_thread(struct work_struct *work)
3215 {
3216 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3217 work_q);
3218 struct afu *afu = cfg->afu;
3219 struct device *dev = &cfg->dev->dev;
3220 __be64 __iomem *fc_port_regs;
3221 int port;
3222 ulong lock_flags;
3223
3224 /* Avoid MMIO if the device has failed */
3225
3226 if (cfg->state != STATE_NORMAL)
3227 return;
3228
3229 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3230
3231 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3232 port = cfg->lr_port;
3233 if (port < 0)
3234 dev_err(dev, "%s: invalid port index %d\n",
3235 __func__, port);
3236 else {
3237 spin_unlock_irqrestore(cfg->host->host_lock,
3238 lock_flags);
3239
3240 /* The reset can block... */
3241 fc_port_regs = get_fc_port_regs(cfg, port);
3242 afu_link_reset(afu, port, fc_port_regs);
3243 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3244 }
3245
3246 cfg->lr_state = LINK_RESET_COMPLETE;
3247 }
3248
3249 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3250
3251 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3252 scsi_scan_host(cfg->host);
3253 }
3254
3255 /**
3256 * cxlflash_chr_open() - character device open handler
3257 * @inode: Device inode associated with this character device.
3258 * @file: File pointer for this device.
3259 *
3260 * Only users with admin privileges are allowed to open the character device.
3261 *
3262 * Return: 0 on success, -errno on failure
3263 */
3264 static int cxlflash_chr_open(struct inode *inode, struct file *file)
3265 {
3266 struct cxlflash_cfg *cfg;
3267
3268 if (!capable(CAP_SYS_ADMIN))
3269 return -EACCES;
3270
3271 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3272 file->private_data = cfg;
3273
3274 return 0;
3275 }
3276
3277 /**
3278 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3279 * @cmd: The host ioctl command to decode.
3280 *
3281 * Return: A string identifying the decoded host ioctl.
3282 */
3283 static char *decode_hioctl(unsigned int cmd)
3284 {
3285 switch (cmd) {
3286 case HT_CXLFLASH_LUN_PROVISION:
3287 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3288 }
3289
3290 return "UNKNOWN";
3291 }
3292
3293 /**
3294 * cxlflash_lun_provision() - host LUN provisioning handler
3295 * @cfg: Internal structure associated with the host.
3296 * @lunprov: Kernel copy of userspace ioctl data structure.
3297 *
3298 * Return: 0 on success, -errno on failure
3299 */
3300 static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3301 struct ht_cxlflash_lun_provision *lunprov)
3302 {
3303 struct afu *afu = cfg->afu;
3304 struct device *dev = &cfg->dev->dev;
3305 struct sisl_ioarcb rcb;
3306 struct sisl_ioasa asa;
3307 __be64 __iomem *fc_port_regs;
3308 u16 port = lunprov->port;
3309 u16 scmd = lunprov->hdr.subcmd;
3310 u16 type;
3311 u64 reg;
3312 u64 size;
3313 u64 lun_id;
3314 int rc = 0;
3315
3316 if (!afu_is_lun_provision(afu)) {
3317 rc = -ENOTSUPP;
3318 goto out;
3319 }
3320
3321 if (port >= cfg->num_fc_ports) {
3322 rc = -EINVAL;
3323 goto out;
3324 }
3325
3326 switch (scmd) {
3327 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3328 type = SISL_AFU_LUN_PROVISION_CREATE;
3329 size = lunprov->size;
3330 lun_id = 0;
3331 break;
3332 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3333 type = SISL_AFU_LUN_PROVISION_DELETE;
3334 size = 0;
3335 lun_id = lunprov->lun_id;
3336 break;
3337 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3338 fc_port_regs = get_fc_port_regs(cfg, port);
3339
3340 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3341 lunprov->max_num_luns = reg;
3342 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3343 lunprov->cur_num_luns = reg;
3344 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3345 lunprov->max_cap_port = reg;
3346 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3347 lunprov->cur_cap_port = reg;
3348
3349 goto out;
3350 default:
3351 rc = -EINVAL;
3352 goto out;
3353 }
3354
3355 memset(&rcb, 0, sizeof(rcb));
3356 memset(&asa, 0, sizeof(asa));
3357 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3358 rcb.lun_id = lun_id;
3359 rcb.msi = SISL_MSI_RRQ_UPDATED;
3360 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3361 rcb.ioasa = &asa;
3362
3363 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3364 rcb.cdb[1] = type;
3365 rcb.cdb[2] = port;
3366 put_unaligned_be64(size, &rcb.cdb[8]);
3367
3368 rc = send_afu_cmd(afu, &rcb);
3369 if (rc) {
3370 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3371 __func__, rc, asa.ioasc, asa.afu_extra);
3372 goto out;
3373 }
3374
3375 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3376 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3377 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3378 }
3379 out:
3380 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3381 return rc;
3382 }
3383
3384 /**
3385 * cxlflash_afu_debug() - host AFU debug handler
3386 * @cfg: Internal structure associated with the host.
3387 * @afu_dbg: Kernel copy of userspace ioctl data structure.
3388 *
3389 * For debug requests requiring a data buffer, always provide an aligned
3390 * (cache line) buffer to the AFU to appease any alignment requirements.
3391 *
3392 * Return: 0 on success, -errno on failure
3393 */
3394 static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3395 struct ht_cxlflash_afu_debug *afu_dbg)
3396 {
3397 struct afu *afu = cfg->afu;
3398 struct device *dev = &cfg->dev->dev;
3399 struct sisl_ioarcb rcb;
3400 struct sisl_ioasa asa;
3401 char *buf = NULL;
3402 char *kbuf = NULL;
3403 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3404 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3405 u32 ulen = afu_dbg->data_len;
3406 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3407 int rc = 0;
3408
3409 if (!afu_is_afu_debug(afu)) {
3410 rc = -ENOTSUPP;
3411 goto out;
3412 }
3413
3414 if (ulen) {
3415 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3416
3417 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3418 rc = -EINVAL;
3419 goto out;
3420 }
3421
3422 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3423 if (unlikely(!buf)) {
3424 rc = -ENOMEM;
3425 goto out;
3426 }
3427
3428 kbuf = PTR_ALIGN(buf, cache_line_size());
3429
3430 if (is_write) {
3431 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3432
3433 if (copy_from_user(kbuf, ubuf, ulen)) {
3434 rc = -EFAULT;
3435 goto out;
3436 }
3437 }
3438 }
3439
3440 memset(&rcb, 0, sizeof(rcb));
3441 memset(&asa, 0, sizeof(asa));
3442
3443 rcb.req_flags = req_flags;
3444 rcb.msi = SISL_MSI_RRQ_UPDATED;
3445 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3446 rcb.ioasa = &asa;
3447
3448 if (ulen) {
3449 rcb.data_len = ulen;
3450 rcb.data_ea = (uintptr_t)kbuf;
3451 }
3452
3453 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3454 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3455 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3456
3457 rc = send_afu_cmd(afu, &rcb);
3458 if (rc) {
3459 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3460 __func__, rc, asa.ioasc, asa.afu_extra);
3461 goto out;
3462 }
3463
3464 if (ulen && !is_write) {
3465 if (copy_to_user(ubuf, kbuf, ulen))
3466 rc = -EFAULT;
3467 }
3468 out:
3469 kfree(buf);
3470 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3471 return rc;
3472 }
3473
3474 /**
3475 * cxlflash_chr_ioctl() - character device IOCTL handler
3476 * @file: File pointer for this device.
3477 * @cmd: IOCTL command.
3478 * @arg: Userspace ioctl data structure.
3479 *
3480 * A read/write semaphore is used to implement a 'drain' of currently
3481 * running ioctls. The read semaphore is taken at the beginning of each
3482 * ioctl thread and released upon concluding execution. Additionally the
3483 * semaphore should be released and then reacquired in any ioctl execution
3484 * path which will wait for an event to occur that is outside the scope of
3485 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3486 * a thread simply needs to acquire the write semaphore.
3487 *
3488 * Return: 0 on success, -errno on failure
3489 */
3490 static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3491 unsigned long arg)
3492 {
3493 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3494
3495 struct cxlflash_cfg *cfg = file->private_data;
3496 struct device *dev = &cfg->dev->dev;
3497 char buf[sizeof(union cxlflash_ht_ioctls)];
3498 void __user *uarg = (void __user *)arg;
3499 struct ht_cxlflash_hdr *hdr;
3500 size_t size = 0;
3501 bool known_ioctl = false;
3502 int idx = 0;
3503 int rc = 0;
3504 hioctl do_ioctl = NULL;
3505
3506 static const struct {
3507 size_t size;
3508 hioctl ioctl;
3509 } ioctl_tbl[] = { /* NOTE: order matters here */
3510 { sizeof(struct ht_cxlflash_lun_provision),
3511 (hioctl)cxlflash_lun_provision },
3512 { sizeof(struct ht_cxlflash_afu_debug),
3513 (hioctl)cxlflash_afu_debug },
3514 };
3515
3516 /* Hold read semaphore so we can drain if needed */
3517 down_read(&cfg->ioctl_rwsem);
3518
3519 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3520 __func__, cmd, idx, sizeof(ioctl_tbl));
3521
3522 switch (cmd) {
3523 case HT_CXLFLASH_LUN_PROVISION:
3524 case HT_CXLFLASH_AFU_DEBUG:
3525 known_ioctl = true;
3526 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3527 size = ioctl_tbl[idx].size;
3528 do_ioctl = ioctl_tbl[idx].ioctl;
3529
3530 if (likely(do_ioctl))
3531 break;
3532
3533 fallthrough;
3534 default:
3535 rc = -EINVAL;
3536 goto out;
3537 }
3538
3539 if (unlikely(copy_from_user(&buf, uarg, size))) {
3540 dev_err(dev, "%s: copy_from_user() fail "
3541 "size=%lu cmd=%d (%s) uarg=%p\n",
3542 __func__, size, cmd, decode_hioctl(cmd), uarg);
3543 rc = -EFAULT;
3544 goto out;
3545 }
3546
3547 hdr = (struct ht_cxlflash_hdr *)&buf;
3548 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3549 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3550 __func__, hdr->version, decode_hioctl(cmd));
3551 rc = -EINVAL;
3552 goto out;
3553 }
3554
3555 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3556 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3557 rc = -EINVAL;
3558 goto out;
3559 }
3560
3561 rc = do_ioctl(cfg, (void *)&buf);
3562 if (likely(!rc))
3563 if (unlikely(copy_to_user(uarg, &buf, size))) {
3564 dev_err(dev, "%s: copy_to_user() fail "
3565 "size=%lu cmd=%d (%s) uarg=%p\n",
3566 __func__, size, cmd, decode_hioctl(cmd), uarg);
3567 rc = -EFAULT;
3568 }
3569
3570 /* fall through to exit */
3571
3572 out:
3573 up_read(&cfg->ioctl_rwsem);
3574 if (unlikely(rc && known_ioctl))
3575 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3576 __func__, decode_hioctl(cmd), cmd, rc);
3577 else
3578 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3579 __func__, decode_hioctl(cmd), cmd, rc);
3580 return rc;
3581 }
3582
3583 /*
3584 * Character device file operations
3585 */
3586 static const struct file_operations cxlflash_chr_fops = {
3587 .owner = THIS_MODULE,
3588 .open = cxlflash_chr_open,
3589 .unlocked_ioctl = cxlflash_chr_ioctl,
3590 .compat_ioctl = compat_ptr_ioctl,
3591 };
3592
3593 /**
3594 * init_chrdev() - initialize the character device for the host
3595 * @cfg: Internal structure associated with the host.
3596 *
3597 * Return: 0 on success, -errno on failure
3598 */
3599 static int init_chrdev(struct cxlflash_cfg *cfg)
3600 {
3601 struct device *dev = &cfg->dev->dev;
3602 struct device *char_dev;
3603 dev_t devno;
3604 int minor;
3605 int rc = 0;
3606
3607 minor = cxlflash_get_minor();
3608 if (unlikely(minor < 0)) {
3609 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3610 rc = -ENOSPC;
3611 goto out;
3612 }
3613
3614 devno = MKDEV(cxlflash_major, minor);
3615 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3616
3617 rc = cdev_add(&cfg->cdev, devno, 1);
3618 if (rc) {
3619 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3620 goto err1;
3621 }
3622
3623 char_dev = device_create(cxlflash_class, NULL, devno,
3624 NULL, "cxlflash%d", minor);
3625 if (IS_ERR(char_dev)) {
3626 rc = PTR_ERR(char_dev);
3627 dev_err(dev, "%s: device_create failed rc=%d\n",
3628 __func__, rc);
3629 goto err2;
3630 }
3631
3632 cfg->chardev = char_dev;
3633 out:
3634 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3635 return rc;
3636 err2:
3637 cdev_del(&cfg->cdev);
3638 err1:
3639 cxlflash_put_minor(minor);
3640 goto out;
3641 }
3642
3643 /**
3644 * cxlflash_probe() - PCI entry point to add host
3645 * @pdev: PCI device associated with the host.
3646 * @dev_id: PCI device id associated with device.
3647 *
3648 * The device will initially start out in a 'probing' state and
3649 * transition to the 'normal' state at the end of a successful
3650 * probe. Should an EEH event occur during probe, the notification
3651 * thread (error_detected()) will wait until the probe handler
3652 * is nearly complete. At that time, the device will be moved to
3653 * a 'probed' state and the EEH thread woken up to drive the slot
3654 * reset and recovery (device moves to 'normal' state). Meanwhile,
3655 * the probe will be allowed to exit successfully.
3656 *
3657 * Return: 0 on success, -errno on failure
3658 */
3659 static int cxlflash_probe(struct pci_dev *pdev,
3660 const struct pci_device_id *dev_id)
3661 {
3662 struct Scsi_Host *host;
3663 struct cxlflash_cfg *cfg = NULL;
3664 struct device *dev = &pdev->dev;
3665 struct dev_dependent_vals *ddv;
3666 int rc = 0;
3667 int k;
3668
3669 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3670 __func__, pdev->irq);
3671
3672 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3673 driver_template.max_sectors = ddv->max_sectors;
3674
3675 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3676 if (!host) {
3677 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3678 rc = -ENOMEM;
3679 goto out;
3680 }
3681
3682 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3683 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3684 host->unique_id = host->host_no;
3685 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3686
3687 cfg = shost_priv(host);
3688 cfg->state = STATE_PROBING;
3689 cfg->host = host;
3690 rc = alloc_mem(cfg);
3691 if (rc) {
3692 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3693 rc = -ENOMEM;
3694 scsi_host_put(cfg->host);
3695 goto out;
3696 }
3697
3698 cfg->init_state = INIT_STATE_NONE;
3699 cfg->dev = pdev;
3700 cfg->cxl_fops = cxlflash_cxl_fops;
3701 cfg->ops = cxlflash_assign_ops(ddv);
3702 WARN_ON_ONCE(!cfg->ops);
3703
3704 /*
3705 * Promoted LUNs move to the top of the LUN table. The rest stay on
3706 * the bottom half. The bottom half grows from the end (index = 255),
3707 * whereas the top half grows from the beginning (index = 0).
3708 *
3709 * Initialize the last LUN index for all possible ports.
3710 */
3711 cfg->promote_lun_index = 0;
3712
3713 for (k = 0; k < MAX_FC_PORTS; k++)
3714 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3715
3716 cfg->dev_id = (struct pci_device_id *)dev_id;
3717
3718 init_waitqueue_head(&cfg->tmf_waitq);
3719 init_waitqueue_head(&cfg->reset_waitq);
3720
3721 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3722 cfg->lr_state = LINK_RESET_INVALID;
3723 cfg->lr_port = -1;
3724 spin_lock_init(&cfg->tmf_slock);
3725 mutex_init(&cfg->ctx_tbl_list_mutex);
3726 mutex_init(&cfg->ctx_recovery_mutex);
3727 init_rwsem(&cfg->ioctl_rwsem);
3728 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3729 INIT_LIST_HEAD(&cfg->lluns);
3730
3731 pci_set_drvdata(pdev, cfg);
3732
3733 rc = init_pci(cfg);
3734 if (rc) {
3735 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3736 goto out_remove;
3737 }
3738 cfg->init_state = INIT_STATE_PCI;
3739
3740 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3741 if (unlikely(!cfg->afu_cookie)) {
3742 dev_err(dev, "%s: create_afu failed\n", __func__);
3743 rc = -ENOMEM;
3744 goto out_remove;
3745 }
3746
3747 rc = init_afu(cfg);
3748 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3749 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3750 goto out_remove;
3751 }
3752 cfg->init_state = INIT_STATE_AFU;
3753
3754 rc = init_scsi(cfg);
3755 if (rc) {
3756 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3757 goto out_remove;
3758 }
3759 cfg->init_state = INIT_STATE_SCSI;
3760
3761 rc = init_chrdev(cfg);
3762 if (rc) {
3763 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3764 goto out_remove;
3765 }
3766 cfg->init_state = INIT_STATE_CDEV;
3767
3768 if (wq_has_sleeper(&cfg->reset_waitq)) {
3769 cfg->state = STATE_PROBED;
3770 wake_up_all(&cfg->reset_waitq);
3771 } else
3772 cfg->state = STATE_NORMAL;
3773 out:
3774 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3775 return rc;
3776
3777 out_remove:
3778 cfg->state = STATE_PROBED;
3779 cxlflash_remove(pdev);
3780 goto out;
3781 }
3782
3783 /**
3784 * cxlflash_pci_error_detected() - called when a PCI error is detected
3785 * @pdev: PCI device struct.
3786 * @state: PCI channel state.
3787 *
3788 * When an EEH occurs during an active reset, wait until the reset is
3789 * complete and then take action based upon the device state.
3790 *
3791 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3792 */
3793 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3794 pci_channel_state_t state)
3795 {
3796 int rc = 0;
3797 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3798 struct device *dev = &cfg->dev->dev;
3799
3800 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3801
3802 switch (state) {
3803 case pci_channel_io_frozen:
3804 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3805 cfg->state != STATE_PROBING);
3806 if (cfg->state == STATE_FAILTERM)
3807 return PCI_ERS_RESULT_DISCONNECT;
3808
3809 cfg->state = STATE_RESET;
3810 scsi_block_requests(cfg->host);
3811 drain_ioctls(cfg);
3812 rc = cxlflash_mark_contexts_error(cfg);
3813 if (unlikely(rc))
3814 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3815 __func__, rc);
3816 term_afu(cfg);
3817 return PCI_ERS_RESULT_NEED_RESET;
3818 case pci_channel_io_perm_failure:
3819 cfg->state = STATE_FAILTERM;
3820 wake_up_all(&cfg->reset_waitq);
3821 scsi_unblock_requests(cfg->host);
3822 return PCI_ERS_RESULT_DISCONNECT;
3823 default:
3824 break;
3825 }
3826 return PCI_ERS_RESULT_NEED_RESET;
3827 }
3828
3829 /**
3830 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3831 * @pdev: PCI device struct.
3832 *
3833 * This routine is called by the pci error recovery code after the PCI
3834 * slot has been reset, just before we should resume normal operations.
3835 *
3836 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3837 */
3838 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3839 {
3840 int rc = 0;
3841 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3842 struct device *dev = &cfg->dev->dev;
3843
3844 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3845
3846 rc = init_afu(cfg);
3847 if (unlikely(rc)) {
3848 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3849 return PCI_ERS_RESULT_DISCONNECT;
3850 }
3851
3852 return PCI_ERS_RESULT_RECOVERED;
3853 }
3854
3855 /**
3856 * cxlflash_pci_resume() - called when normal operation can resume
3857 * @pdev: PCI device struct
3858 */
3859 static void cxlflash_pci_resume(struct pci_dev *pdev)
3860 {
3861 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3862 struct device *dev = &cfg->dev->dev;
3863
3864 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3865
3866 cfg->state = STATE_NORMAL;
3867 wake_up_all(&cfg->reset_waitq);
3868 scsi_unblock_requests(cfg->host);
3869 }
3870
3871 /**
3872 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3873 * @dev: Character device.
3874 * @mode: Mode that can be used to verify access.
3875 *
3876 * Return: Allocated string describing the devtmpfs structure.
3877 */
3878 static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3879 {
3880 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3881 }
3882
3883 /**
3884 * cxlflash_class_init() - create character device class
3885 *
3886 * Return: 0 on success, -errno on failure
3887 */
3888 static int cxlflash_class_init(void)
3889 {
3890 dev_t devno;
3891 int rc = 0;
3892
3893 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3894 if (unlikely(rc)) {
3895 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3896 goto out;
3897 }
3898
3899 cxlflash_major = MAJOR(devno);
3900
3901 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3902 if (IS_ERR(cxlflash_class)) {
3903 rc = PTR_ERR(cxlflash_class);
3904 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3905 goto err;
3906 }
3907
3908 cxlflash_class->devnode = cxlflash_devnode;
3909 out:
3910 pr_debug("%s: returning rc=%d\n", __func__, rc);
3911 return rc;
3912 err:
3913 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3914 goto out;
3915 }
3916
3917 /**
3918 * cxlflash_class_exit() - destroy character device class
3919 */
3920 static void cxlflash_class_exit(void)
3921 {
3922 dev_t devno = MKDEV(cxlflash_major, 0);
3923
3924 class_destroy(cxlflash_class);
3925 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3926 }
3927
3928 static const struct pci_error_handlers cxlflash_err_handler = {
3929 .error_detected = cxlflash_pci_error_detected,
3930 .slot_reset = cxlflash_pci_slot_reset,
3931 .resume = cxlflash_pci_resume,
3932 };
3933
3934 /*
3935 * PCI device structure
3936 */
3937 static struct pci_driver cxlflash_driver = {
3938 .name = CXLFLASH_NAME,
3939 .id_table = cxlflash_pci_table,
3940 .probe = cxlflash_probe,
3941 .remove = cxlflash_remove,
3942 .shutdown = cxlflash_remove,
3943 .err_handler = &cxlflash_err_handler,
3944 };
3945
3946 /**
3947 * init_cxlflash() - module entry point
3948 *
3949 * Return: 0 on success, -errno on failure
3950 */
3951 static int __init init_cxlflash(void)
3952 {
3953 int rc;
3954
3955 check_sizes();
3956 cxlflash_list_init();
3957 rc = cxlflash_class_init();
3958 if (unlikely(rc))
3959 goto out;
3960
3961 rc = pci_register_driver(&cxlflash_driver);
3962 if (unlikely(rc))
3963 goto err;
3964 out:
3965 pr_debug("%s: returning rc=%d\n", __func__, rc);
3966 return rc;
3967 err:
3968 cxlflash_class_exit();
3969 goto out;
3970 }
3971
3972 /**
3973 * exit_cxlflash() - module exit point
3974 */
3975 static void __exit exit_cxlflash(void)
3976 {
3977 cxlflash_term_global_luns();
3978 cxlflash_free_errpage();
3979
3980 pci_unregister_driver(&cxlflash_driver);
3981 cxlflash_class_exit();
3982 }
3983
3984 module_init(init_cxlflash);
3985 module_exit(exit_cxlflash);