]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/cxlflash/main.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / cxlflash / main.c
1 /*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19
20 #include <asm/unaligned.h>
21
22 #include <misc/cxl.h>
23
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36
37 static struct class *cxlflash_class;
38 static u32 cxlflash_major;
39 static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
40
41 /**
42 * process_cmd_err() - command error handler
43 * @cmd: AFU command that experienced the error.
44 * @scp: SCSI command associated with the AFU command in error.
45 *
46 * Translates error bits from AFU command to SCSI command results.
47 */
48 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
49 {
50 struct afu *afu = cmd->parent;
51 struct cxlflash_cfg *cfg = afu->parent;
52 struct device *dev = &cfg->dev->dev;
53 struct sisl_ioarcb *ioarcb;
54 struct sisl_ioasa *ioasa;
55 u32 resid;
56
57 if (unlikely(!cmd))
58 return;
59
60 ioarcb = &(cmd->rcb);
61 ioasa = &(cmd->sa);
62
63 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
64 resid = ioasa->resid;
65 scsi_set_resid(scp, resid);
66 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
67 __func__, cmd, scp, resid);
68 }
69
70 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
71 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
72 __func__, cmd, scp);
73 scp->result = (DID_ERROR << 16);
74 }
75
76 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
77 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
78 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
79 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
80
81 if (ioasa->rc.scsi_rc) {
82 /* We have a SCSI status */
83 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
84 memcpy(scp->sense_buffer, ioasa->sense_data,
85 SISL_SENSE_DATA_LEN);
86 scp->result = ioasa->rc.scsi_rc;
87 } else
88 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
89 }
90
91 /*
92 * We encountered an error. Set scp->result based on nature
93 * of error.
94 */
95 if (ioasa->rc.fc_rc) {
96 /* We have an FC status */
97 switch (ioasa->rc.fc_rc) {
98 case SISL_FC_RC_LINKDOWN:
99 scp->result = (DID_REQUEUE << 16);
100 break;
101 case SISL_FC_RC_RESID:
102 /* This indicates an FCP resid underrun */
103 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
104 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
105 * then we will handle this error else where.
106 * If not then we must handle it here.
107 * This is probably an AFU bug.
108 */
109 scp->result = (DID_ERROR << 16);
110 }
111 break;
112 case SISL_FC_RC_RESIDERR:
113 /* Resid mismatch between adapter and device */
114 case SISL_FC_RC_TGTABORT:
115 case SISL_FC_RC_ABORTOK:
116 case SISL_FC_RC_ABORTFAIL:
117 case SISL_FC_RC_NOLOGI:
118 case SISL_FC_RC_ABORTPEND:
119 case SISL_FC_RC_WRABORTPEND:
120 case SISL_FC_RC_NOEXP:
121 case SISL_FC_RC_INUSE:
122 scp->result = (DID_ERROR << 16);
123 break;
124 }
125 }
126
127 if (ioasa->rc.afu_rc) {
128 /* We have an AFU error */
129 switch (ioasa->rc.afu_rc) {
130 case SISL_AFU_RC_NO_CHANNELS:
131 scp->result = (DID_NO_CONNECT << 16);
132 break;
133 case SISL_AFU_RC_DATA_DMA_ERR:
134 switch (ioasa->afu_extra) {
135 case SISL_AFU_DMA_ERR_PAGE_IN:
136 /* Retry */
137 scp->result = (DID_IMM_RETRY << 16);
138 break;
139 case SISL_AFU_DMA_ERR_INVALID_EA:
140 default:
141 scp->result = (DID_ERROR << 16);
142 }
143 break;
144 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
145 /* Retry */
146 scp->result = (DID_ALLOC_FAILURE << 16);
147 break;
148 default:
149 scp->result = (DID_ERROR << 16);
150 }
151 }
152 }
153
154 /**
155 * cmd_complete() - command completion handler
156 * @cmd: AFU command that has completed.
157 *
158 * For SCSI commands this routine prepares and submits commands that have
159 * either completed or timed out to the SCSI stack. For internal commands
160 * (TMF or AFU), this routine simply notifies the originator that the
161 * command has completed.
162 */
163 static void cmd_complete(struct afu_cmd *cmd)
164 {
165 struct scsi_cmnd *scp;
166 ulong lock_flags;
167 struct afu *afu = cmd->parent;
168 struct cxlflash_cfg *cfg = afu->parent;
169 struct device *dev = &cfg->dev->dev;
170 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
171
172 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
173 list_del(&cmd->list);
174 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
175
176 if (cmd->scp) {
177 scp = cmd->scp;
178 if (unlikely(cmd->sa.ioasc))
179 process_cmd_err(cmd, scp);
180 else
181 scp->result = (DID_OK << 16);
182
183 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
184 __func__, scp, scp->result, cmd->sa.ioasc);
185 scp->scsi_done(scp);
186 } else if (cmd->cmd_tmf) {
187 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
188 cfg->tmf_active = false;
189 wake_up_all_locked(&cfg->tmf_waitq);
190 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
191 } else
192 complete(&cmd->cevent);
193 }
194
195 /**
196 * flush_pending_cmds() - flush all pending commands on this hardware queue
197 * @hwq: Hardware queue to flush.
198 *
199 * The hardware send queue lock associated with this hardware queue must be
200 * held when calling this routine.
201 */
202 static void flush_pending_cmds(struct hwq *hwq)
203 {
204 struct cxlflash_cfg *cfg = hwq->afu->parent;
205 struct afu_cmd *cmd, *tmp;
206 struct scsi_cmnd *scp;
207 ulong lock_flags;
208
209 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
210 /* Bypass command when on a doneq, cmd_complete() will handle */
211 if (!list_empty(&cmd->queue))
212 continue;
213
214 list_del(&cmd->list);
215
216 if (cmd->scp) {
217 scp = cmd->scp;
218 scp->result = (DID_IMM_RETRY << 16);
219 scp->scsi_done(scp);
220 } else {
221 cmd->cmd_aborted = true;
222
223 if (cmd->cmd_tmf) {
224 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
225 cfg->tmf_active = false;
226 wake_up_all_locked(&cfg->tmf_waitq);
227 spin_unlock_irqrestore(&cfg->tmf_slock,
228 lock_flags);
229 } else
230 complete(&cmd->cevent);
231 }
232 }
233 }
234
235 /**
236 * context_reset() - reset context via specified register
237 * @hwq: Hardware queue owning the context to be reset.
238 * @reset_reg: MMIO register to perform reset.
239 *
240 * When the reset is successful, the SISLite specification guarantees that
241 * the AFU has aborted all currently pending I/O. Accordingly, these commands
242 * must be flushed.
243 *
244 * Return: 0 on success, -errno on failure
245 */
246 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
247 {
248 struct cxlflash_cfg *cfg = hwq->afu->parent;
249 struct device *dev = &cfg->dev->dev;
250 int rc = -ETIMEDOUT;
251 int nretry = 0;
252 u64 val = 0x1;
253 ulong lock_flags;
254
255 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
256
257 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
258
259 writeq_be(val, reset_reg);
260 do {
261 val = readq_be(reset_reg);
262 if ((val & 0x1) == 0x0) {
263 rc = 0;
264 break;
265 }
266
267 /* Double delay each time */
268 udelay(1 << nretry);
269 } while (nretry++ < MC_ROOM_RETRY_CNT);
270
271 if (!rc)
272 flush_pending_cmds(hwq);
273
274 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
275
276 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
277 __func__, rc, val, nretry);
278 return rc;
279 }
280
281 /**
282 * context_reset_ioarrin() - reset context via IOARRIN register
283 * @hwq: Hardware queue owning the context to be reset.
284 *
285 * Return: 0 on success, -errno on failure
286 */
287 static int context_reset_ioarrin(struct hwq *hwq)
288 {
289 return context_reset(hwq, &hwq->host_map->ioarrin);
290 }
291
292 /**
293 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
294 * @hwq: Hardware queue owning the context to be reset.
295 *
296 * Return: 0 on success, -errno on failure
297 */
298 static int context_reset_sq(struct hwq *hwq)
299 {
300 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
301 }
302
303 /**
304 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
305 * @afu: AFU associated with the host.
306 * @cmd: AFU command to send.
307 *
308 * Return:
309 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
310 */
311 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
312 {
313 struct cxlflash_cfg *cfg = afu->parent;
314 struct device *dev = &cfg->dev->dev;
315 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
316 int rc = 0;
317 s64 room;
318 ulong lock_flags;
319
320 /*
321 * To avoid the performance penalty of MMIO, spread the update of
322 * 'room' over multiple commands.
323 */
324 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
325 if (--hwq->room < 0) {
326 room = readq_be(&hwq->host_map->cmd_room);
327 if (room <= 0) {
328 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
329 "0x%02X, room=0x%016llX\n",
330 __func__, cmd->rcb.cdb[0], room);
331 hwq->room = 0;
332 rc = SCSI_MLQUEUE_HOST_BUSY;
333 goto out;
334 }
335 hwq->room = room - 1;
336 }
337
338 list_add(&cmd->list, &hwq->pending_cmds);
339 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
340 out:
341 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
342 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
343 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
344 return rc;
345 }
346
347 /**
348 * send_cmd_sq() - sends an AFU command via SQ ring
349 * @afu: AFU associated with the host.
350 * @cmd: AFU command to send.
351 *
352 * Return:
353 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
354 */
355 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
356 {
357 struct cxlflash_cfg *cfg = afu->parent;
358 struct device *dev = &cfg->dev->dev;
359 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
360 int rc = 0;
361 int newval;
362 ulong lock_flags;
363
364 newval = atomic_dec_if_positive(&hwq->hsq_credits);
365 if (newval <= 0) {
366 rc = SCSI_MLQUEUE_HOST_BUSY;
367 goto out;
368 }
369
370 cmd->rcb.ioasa = &cmd->sa;
371
372 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
373
374 *hwq->hsq_curr = cmd->rcb;
375 if (hwq->hsq_curr < hwq->hsq_end)
376 hwq->hsq_curr++;
377 else
378 hwq->hsq_curr = hwq->hsq_start;
379
380 list_add(&cmd->list, &hwq->pending_cmds);
381 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
382
383 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
384 out:
385 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
386 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
387 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
388 readq_be(&hwq->host_map->sq_head),
389 readq_be(&hwq->host_map->sq_tail));
390 return rc;
391 }
392
393 /**
394 * wait_resp() - polls for a response or timeout to a sent AFU command
395 * @afu: AFU associated with the host.
396 * @cmd: AFU command that was sent.
397 *
398 * Return: 0 on success, -errno on failure
399 */
400 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
401 {
402 struct cxlflash_cfg *cfg = afu->parent;
403 struct device *dev = &cfg->dev->dev;
404 int rc = 0;
405 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
406
407 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
408 if (!timeout)
409 rc = -ETIMEDOUT;
410
411 if (cmd->cmd_aborted)
412 rc = -EAGAIN;
413
414 if (unlikely(cmd->sa.ioasc != 0)) {
415 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
416 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
417 rc = -EIO;
418 }
419
420 return rc;
421 }
422
423 /**
424 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
425 * @host: SCSI host associated with device.
426 * @scp: SCSI command to send.
427 * @afu: SCSI command to send.
428 *
429 * Hashes a command based upon the hardware queue mode.
430 *
431 * Return: Trusted index of target hardware queue
432 */
433 static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
434 struct afu *afu)
435 {
436 u32 tag;
437 u32 hwq = 0;
438
439 if (afu->num_hwqs == 1)
440 return 0;
441
442 switch (afu->hwq_mode) {
443 case HWQ_MODE_RR:
444 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
445 break;
446 case HWQ_MODE_TAG:
447 tag = blk_mq_unique_tag(scp->request);
448 hwq = blk_mq_unique_tag_to_hwq(tag);
449 break;
450 case HWQ_MODE_CPU:
451 hwq = smp_processor_id() % afu->num_hwqs;
452 break;
453 default:
454 WARN_ON_ONCE(1);
455 }
456
457 return hwq;
458 }
459
460 /**
461 * send_tmf() - sends a Task Management Function (TMF)
462 * @cfg: Internal structure associated with the host.
463 * @sdev: SCSI device destined for TMF.
464 * @tmfcmd: TMF command to send.
465 *
466 * Return:
467 * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
468 */
469 static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
470 u64 tmfcmd)
471 {
472 struct afu *afu = cfg->afu;
473 struct afu_cmd *cmd = NULL;
474 struct device *dev = &cfg->dev->dev;
475 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
476 char *buf = NULL;
477 ulong lock_flags;
478 int rc = 0;
479 ulong to;
480
481 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
482 if (unlikely(!buf)) {
483 dev_err(dev, "%s: no memory for command\n", __func__);
484 rc = -ENOMEM;
485 goto out;
486 }
487
488 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
489 INIT_LIST_HEAD(&cmd->queue);
490
491 /* When Task Management Function is active do not send another */
492 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
493 if (cfg->tmf_active)
494 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
495 !cfg->tmf_active,
496 cfg->tmf_slock);
497 cfg->tmf_active = true;
498 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
499
500 cmd->parent = afu;
501 cmd->cmd_tmf = true;
502 cmd->hwq_index = hwq->index;
503
504 cmd->rcb.ctx_id = hwq->ctx_hndl;
505 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
506 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
507 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
508 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
509 SISL_REQ_FLAGS_SUP_UNDERRUN |
510 SISL_REQ_FLAGS_TMF_CMD);
511 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
512
513 rc = afu->send_cmd(afu, cmd);
514 if (unlikely(rc)) {
515 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
516 cfg->tmf_active = false;
517 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
518 goto out;
519 }
520
521 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
522 to = msecs_to_jiffies(5000);
523 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
524 !cfg->tmf_active,
525 cfg->tmf_slock,
526 to);
527 if (!to) {
528 dev_err(dev, "%s: TMF timed out\n", __func__);
529 rc = -ETIMEDOUT;
530 } else if (cmd->cmd_aborted) {
531 dev_err(dev, "%s: TMF aborted\n", __func__);
532 rc = -EAGAIN;
533 } else if (cmd->sa.ioasc) {
534 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
535 __func__, cmd->sa.ioasc);
536 rc = -EIO;
537 }
538 cfg->tmf_active = false;
539 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
540 out:
541 kfree(buf);
542 return rc;
543 }
544
545 /**
546 * cxlflash_driver_info() - information handler for this host driver
547 * @host: SCSI host associated with device.
548 *
549 * Return: A string describing the device.
550 */
551 static const char *cxlflash_driver_info(struct Scsi_Host *host)
552 {
553 return CXLFLASH_ADAPTER_NAME;
554 }
555
556 /**
557 * cxlflash_queuecommand() - sends a mid-layer request
558 * @host: SCSI host associated with device.
559 * @scp: SCSI command to send.
560 *
561 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
562 */
563 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
564 {
565 struct cxlflash_cfg *cfg = shost_priv(host);
566 struct afu *afu = cfg->afu;
567 struct device *dev = &cfg->dev->dev;
568 struct afu_cmd *cmd = sc_to_afuci(scp);
569 struct scatterlist *sg = scsi_sglist(scp);
570 int hwq_index = cmd_to_target_hwq(host, scp, afu);
571 struct hwq *hwq = get_hwq(afu, hwq_index);
572 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
573 ulong lock_flags;
574 int rc = 0;
575
576 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
577 "cdb=(%08x-%08x-%08x-%08x)\n",
578 __func__, scp, host->host_no, scp->device->channel,
579 scp->device->id, scp->device->lun,
580 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
581 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
582 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
583 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
584
585 /*
586 * If a Task Management Function is active, wait for it to complete
587 * before continuing with regular commands.
588 */
589 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
590 if (cfg->tmf_active) {
591 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
592 rc = SCSI_MLQUEUE_HOST_BUSY;
593 goto out;
594 }
595 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
596
597 switch (cfg->state) {
598 case STATE_PROBING:
599 case STATE_PROBED:
600 case STATE_RESET:
601 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
602 rc = SCSI_MLQUEUE_HOST_BUSY;
603 goto out;
604 case STATE_FAILTERM:
605 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
606 scp->result = (DID_NO_CONNECT << 16);
607 scp->scsi_done(scp);
608 rc = 0;
609 goto out;
610 default:
611 break;
612 }
613
614 if (likely(sg)) {
615 cmd->rcb.data_len = sg->length;
616 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
617 }
618
619 cmd->scp = scp;
620 cmd->parent = afu;
621 cmd->hwq_index = hwq_index;
622
623 cmd->rcb.ctx_id = hwq->ctx_hndl;
624 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
625 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
626 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
627
628 if (scp->sc_data_direction == DMA_TO_DEVICE)
629 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
630
631 cmd->rcb.req_flags = req_flags;
632 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
633
634 rc = afu->send_cmd(afu, cmd);
635 out:
636 return rc;
637 }
638
639 /**
640 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
641 * @cfg: Internal structure associated with the host.
642 */
643 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
644 {
645 struct pci_dev *pdev = cfg->dev;
646
647 if (pci_channel_offline(pdev))
648 wait_event_timeout(cfg->reset_waitq,
649 !pci_channel_offline(pdev),
650 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
651 }
652
653 /**
654 * free_mem() - free memory associated with the AFU
655 * @cfg: Internal structure associated with the host.
656 */
657 static void free_mem(struct cxlflash_cfg *cfg)
658 {
659 struct afu *afu = cfg->afu;
660
661 if (cfg->afu) {
662 free_pages((ulong)afu, get_order(sizeof(struct afu)));
663 cfg->afu = NULL;
664 }
665 }
666
667 /**
668 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
669 * @cfg: Internal structure associated with the host.
670 */
671 static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
672 {
673 if (cfg->async_reset_cookie == 0)
674 return;
675
676 /* Wait until all async calls prior to this cookie have completed */
677 async_synchronize_cookie(cfg->async_reset_cookie + 1);
678 cfg->async_reset_cookie = 0;
679 }
680
681 /**
682 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
683 * @cfg: Internal structure associated with the host.
684 *
685 * Safe to call with AFU in a partially allocated/initialized state.
686 *
687 * Cancels scheduled worker threads, waits for any active internal AFU
688 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
689 */
690 static void stop_afu(struct cxlflash_cfg *cfg)
691 {
692 struct afu *afu = cfg->afu;
693 struct hwq *hwq;
694 int i;
695
696 cancel_work_sync(&cfg->work_q);
697 if (!current_is_async())
698 cxlflash_reset_sync(cfg);
699
700 if (likely(afu)) {
701 while (atomic_read(&afu->cmds_active))
702 ssleep(1);
703
704 if (afu_is_irqpoll_enabled(afu)) {
705 for (i = 0; i < afu->num_hwqs; i++) {
706 hwq = get_hwq(afu, i);
707
708 irq_poll_disable(&hwq->irqpoll);
709 }
710 }
711
712 if (likely(afu->afu_map)) {
713 cxl_psa_unmap((void __iomem *)afu->afu_map);
714 afu->afu_map = NULL;
715 }
716 }
717 }
718
719 /**
720 * term_intr() - disables all AFU interrupts
721 * @cfg: Internal structure associated with the host.
722 * @level: Depth of allocation, where to begin waterfall tear down.
723 * @index: Index of the hardware queue.
724 *
725 * Safe to call with AFU/MC in partially allocated/initialized state.
726 */
727 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
728 u32 index)
729 {
730 struct afu *afu = cfg->afu;
731 struct device *dev = &cfg->dev->dev;
732 struct hwq *hwq;
733
734 if (!afu) {
735 dev_err(dev, "%s: returning with NULL afu\n", __func__);
736 return;
737 }
738
739 hwq = get_hwq(afu, index);
740
741 if (!hwq->ctx) {
742 dev_err(dev, "%s: returning with NULL MC\n", __func__);
743 return;
744 }
745
746 switch (level) {
747 case UNMAP_THREE:
748 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
749 if (index == PRIMARY_HWQ)
750 cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
751 case UNMAP_TWO:
752 cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
753 case UNMAP_ONE:
754 cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
755 case FREE_IRQ:
756 cxl_free_afu_irqs(hwq->ctx);
757 /* fall through */
758 case UNDO_NOOP:
759 /* No action required */
760 break;
761 }
762 }
763
764 /**
765 * term_mc() - terminates the master context
766 * @cfg: Internal structure associated with the host.
767 * @index: Index of the hardware queue.
768 *
769 * Safe to call with AFU/MC in partially allocated/initialized state.
770 */
771 static void term_mc(struct cxlflash_cfg *cfg, u32 index)
772 {
773 struct afu *afu = cfg->afu;
774 struct device *dev = &cfg->dev->dev;
775 struct hwq *hwq;
776 ulong lock_flags;
777
778 if (!afu) {
779 dev_err(dev, "%s: returning with NULL afu\n", __func__);
780 return;
781 }
782
783 hwq = get_hwq(afu, index);
784
785 if (!hwq->ctx) {
786 dev_err(dev, "%s: returning with NULL MC\n", __func__);
787 return;
788 }
789
790 WARN_ON(cxl_stop_context(hwq->ctx));
791 if (index != PRIMARY_HWQ)
792 WARN_ON(cxl_release_context(hwq->ctx));
793 hwq->ctx = NULL;
794
795 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
796 flush_pending_cmds(hwq);
797 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
798 }
799
800 /**
801 * term_afu() - terminates the AFU
802 * @cfg: Internal structure associated with the host.
803 *
804 * Safe to call with AFU/MC in partially allocated/initialized state.
805 */
806 static void term_afu(struct cxlflash_cfg *cfg)
807 {
808 struct device *dev = &cfg->dev->dev;
809 int k;
810
811 /*
812 * Tear down is carefully orchestrated to ensure
813 * no interrupts can come in when the problem state
814 * area is unmapped.
815 *
816 * 1) Disable all AFU interrupts for each master
817 * 2) Unmap the problem state area
818 * 3) Stop each master context
819 */
820 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
821 term_intr(cfg, UNMAP_THREE, k);
822
823 if (cfg->afu)
824 stop_afu(cfg);
825
826 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
827 term_mc(cfg, k);
828
829 dev_dbg(dev, "%s: returning\n", __func__);
830 }
831
832 /**
833 * notify_shutdown() - notifies device of pending shutdown
834 * @cfg: Internal structure associated with the host.
835 * @wait: Whether to wait for shutdown processing to complete.
836 *
837 * This function will notify the AFU that the adapter is being shutdown
838 * and will wait for shutdown processing to complete if wait is true.
839 * This notification should flush pending I/Os to the device and halt
840 * further I/Os until the next AFU reset is issued and device restarted.
841 */
842 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
843 {
844 struct afu *afu = cfg->afu;
845 struct device *dev = &cfg->dev->dev;
846 struct dev_dependent_vals *ddv;
847 __be64 __iomem *fc_port_regs;
848 u64 reg, status;
849 int i, retry_cnt = 0;
850
851 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
852 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
853 return;
854
855 if (!afu || !afu->afu_map) {
856 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
857 return;
858 }
859
860 /* Notify AFU */
861 for (i = 0; i < cfg->num_fc_ports; i++) {
862 fc_port_regs = get_fc_port_regs(cfg, i);
863
864 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
865 reg |= SISL_FC_SHUTDOWN_NORMAL;
866 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
867 }
868
869 if (!wait)
870 return;
871
872 /* Wait up to 1.5 seconds for shutdown processing to complete */
873 for (i = 0; i < cfg->num_fc_ports; i++) {
874 fc_port_regs = get_fc_port_regs(cfg, i);
875 retry_cnt = 0;
876
877 while (true) {
878 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
879 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
880 break;
881 if (++retry_cnt >= MC_RETRY_CNT) {
882 dev_dbg(dev, "%s: port %d shutdown processing "
883 "not yet completed\n", __func__, i);
884 break;
885 }
886 msleep(100 * retry_cnt);
887 }
888 }
889 }
890
891 /**
892 * cxlflash_get_minor() - gets the first available minor number
893 *
894 * Return: Unique minor number that can be used to create the character device.
895 */
896 static int cxlflash_get_minor(void)
897 {
898 int minor;
899 long bit;
900
901 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
902 if (bit >= CXLFLASH_MAX_ADAPTERS)
903 return -1;
904
905 minor = bit & MINORMASK;
906 set_bit(minor, cxlflash_minor);
907 return minor;
908 }
909
910 /**
911 * cxlflash_put_minor() - releases the minor number
912 * @minor: Minor number that is no longer needed.
913 */
914 static void cxlflash_put_minor(int minor)
915 {
916 clear_bit(minor, cxlflash_minor);
917 }
918
919 /**
920 * cxlflash_release_chrdev() - release the character device for the host
921 * @cfg: Internal structure associated with the host.
922 */
923 static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
924 {
925 device_unregister(cfg->chardev);
926 cfg->chardev = NULL;
927 cdev_del(&cfg->cdev);
928 cxlflash_put_minor(MINOR(cfg->cdev.dev));
929 }
930
931 /**
932 * cxlflash_remove() - PCI entry point to tear down host
933 * @pdev: PCI device associated with the host.
934 *
935 * Safe to use as a cleanup in partially allocated/initialized state. Note that
936 * the reset_waitq is flushed as part of the stop/termination of user contexts.
937 */
938 static void cxlflash_remove(struct pci_dev *pdev)
939 {
940 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
941 struct device *dev = &pdev->dev;
942 ulong lock_flags;
943
944 if (!pci_is_enabled(pdev)) {
945 dev_dbg(dev, "%s: Device is disabled\n", __func__);
946 return;
947 }
948
949 /* If a Task Management Function is active, wait for it to complete
950 * before continuing with remove.
951 */
952 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
953 if (cfg->tmf_active)
954 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
955 !cfg->tmf_active,
956 cfg->tmf_slock);
957 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
958
959 /* Notify AFU and wait for shutdown processing to complete */
960 notify_shutdown(cfg, true);
961
962 cfg->state = STATE_FAILTERM;
963 cxlflash_stop_term_user_contexts(cfg);
964
965 switch (cfg->init_state) {
966 case INIT_STATE_CDEV:
967 cxlflash_release_chrdev(cfg);
968 case INIT_STATE_SCSI:
969 cxlflash_term_local_luns(cfg);
970 scsi_remove_host(cfg->host);
971 case INIT_STATE_AFU:
972 term_afu(cfg);
973 case INIT_STATE_PCI:
974 pci_disable_device(pdev);
975 case INIT_STATE_NONE:
976 free_mem(cfg);
977 scsi_host_put(cfg->host);
978 break;
979 }
980
981 dev_dbg(dev, "%s: returning\n", __func__);
982 }
983
984 /**
985 * alloc_mem() - allocates the AFU and its command pool
986 * @cfg: Internal structure associated with the host.
987 *
988 * A partially allocated state remains on failure.
989 *
990 * Return:
991 * 0 on success
992 * -ENOMEM on failure to allocate memory
993 */
994 static int alloc_mem(struct cxlflash_cfg *cfg)
995 {
996 int rc = 0;
997 struct device *dev = &cfg->dev->dev;
998
999 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1000 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1001 get_order(sizeof(struct afu)));
1002 if (unlikely(!cfg->afu)) {
1003 dev_err(dev, "%s: cannot get %d free pages\n",
1004 __func__, get_order(sizeof(struct afu)));
1005 rc = -ENOMEM;
1006 goto out;
1007 }
1008 cfg->afu->parent = cfg;
1009 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1010 cfg->afu->afu_map = NULL;
1011 out:
1012 return rc;
1013 }
1014
1015 /**
1016 * init_pci() - initializes the host as a PCI device
1017 * @cfg: Internal structure associated with the host.
1018 *
1019 * Return: 0 on success, -errno on failure
1020 */
1021 static int init_pci(struct cxlflash_cfg *cfg)
1022 {
1023 struct pci_dev *pdev = cfg->dev;
1024 struct device *dev = &cfg->dev->dev;
1025 int rc = 0;
1026
1027 rc = pci_enable_device(pdev);
1028 if (rc || pci_channel_offline(pdev)) {
1029 if (pci_channel_offline(pdev)) {
1030 cxlflash_wait_for_pci_err_recovery(cfg);
1031 rc = pci_enable_device(pdev);
1032 }
1033
1034 if (rc) {
1035 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1036 cxlflash_wait_for_pci_err_recovery(cfg);
1037 goto out;
1038 }
1039 }
1040
1041 out:
1042 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1043 return rc;
1044 }
1045
1046 /**
1047 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1048 * @cfg: Internal structure associated with the host.
1049 *
1050 * Return: 0 on success, -errno on failure
1051 */
1052 static int init_scsi(struct cxlflash_cfg *cfg)
1053 {
1054 struct pci_dev *pdev = cfg->dev;
1055 struct device *dev = &cfg->dev->dev;
1056 int rc = 0;
1057
1058 rc = scsi_add_host(cfg->host, &pdev->dev);
1059 if (rc) {
1060 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1061 goto out;
1062 }
1063
1064 scsi_scan_host(cfg->host);
1065
1066 out:
1067 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1068 return rc;
1069 }
1070
1071 /**
1072 * set_port_online() - transitions the specified host FC port to online state
1073 * @fc_regs: Top of MMIO region defined for specified port.
1074 *
1075 * The provided MMIO region must be mapped prior to call. Online state means
1076 * that the FC link layer has synced, completed the handshaking process, and
1077 * is ready for login to start.
1078 */
1079 static void set_port_online(__be64 __iomem *fc_regs)
1080 {
1081 u64 cmdcfg;
1082
1083 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1084 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1085 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
1086 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1087 }
1088
1089 /**
1090 * set_port_offline() - transitions the specified host FC port to offline state
1091 * @fc_regs: Top of MMIO region defined for specified port.
1092 *
1093 * The provided MMIO region must be mapped prior to call.
1094 */
1095 static void set_port_offline(__be64 __iomem *fc_regs)
1096 {
1097 u64 cmdcfg;
1098
1099 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1100 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
1101 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
1102 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1103 }
1104
1105 /**
1106 * wait_port_online() - waits for the specified host FC port come online
1107 * @fc_regs: Top of MMIO region defined for specified port.
1108 * @delay_us: Number of microseconds to delay between reading port status.
1109 * @nretry: Number of cycles to retry reading port status.
1110 *
1111 * The provided MMIO region must be mapped prior to call. This will timeout
1112 * when the cable is not plugged in.
1113 *
1114 * Return:
1115 * TRUE (1) when the specified port is online
1116 * FALSE (0) when the specified port fails to come online after timeout
1117 */
1118 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1119 {
1120 u64 status;
1121
1122 WARN_ON(delay_us < 1000);
1123
1124 do {
1125 msleep(delay_us / 1000);
1126 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1127 if (status == U64_MAX)
1128 nretry /= 2;
1129 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1130 nretry--);
1131
1132 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1133 }
1134
1135 /**
1136 * wait_port_offline() - waits for the specified host FC port go offline
1137 * @fc_regs: Top of MMIO region defined for specified port.
1138 * @delay_us: Number of microseconds to delay between reading port status.
1139 * @nretry: Number of cycles to retry reading port status.
1140 *
1141 * The provided MMIO region must be mapped prior to call.
1142 *
1143 * Return:
1144 * TRUE (1) when the specified port is offline
1145 * FALSE (0) when the specified port fails to go offline after timeout
1146 */
1147 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1148 {
1149 u64 status;
1150
1151 WARN_ON(delay_us < 1000);
1152
1153 do {
1154 msleep(delay_us / 1000);
1155 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1156 if (status == U64_MAX)
1157 nretry /= 2;
1158 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1159 nretry--);
1160
1161 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1162 }
1163
1164 /**
1165 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1166 * @afu: AFU associated with the host that owns the specified FC port.
1167 * @port: Port number being configured.
1168 * @fc_regs: Top of MMIO region defined for specified port.
1169 * @wwpn: The world-wide-port-number previously discovered for port.
1170 *
1171 * The provided MMIO region must be mapped prior to call. As part of the
1172 * sequence to configure the WWPN, the port is toggled offline and then back
1173 * online. This toggling action can cause this routine to delay up to a few
1174 * seconds. When configured to use the internal LUN feature of the AFU, a
1175 * failure to come online is overridden.
1176 */
1177 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1178 u64 wwpn)
1179 {
1180 struct cxlflash_cfg *cfg = afu->parent;
1181 struct device *dev = &cfg->dev->dev;
1182
1183 set_port_offline(fc_regs);
1184 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1185 FC_PORT_STATUS_RETRY_CNT)) {
1186 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1187 __func__, port);
1188 }
1189
1190 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1191
1192 set_port_online(fc_regs);
1193 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1194 FC_PORT_STATUS_RETRY_CNT)) {
1195 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1196 __func__, port);
1197 }
1198 }
1199
1200 /**
1201 * afu_link_reset() - resets the specified host FC port
1202 * @afu: AFU associated with the host that owns the specified FC port.
1203 * @port: Port number being configured.
1204 * @fc_regs: Top of MMIO region defined for specified port.
1205 *
1206 * The provided MMIO region must be mapped prior to call. The sequence to
1207 * reset the port involves toggling it offline and then back online. This
1208 * action can cause this routine to delay up to a few seconds. An effort
1209 * is made to maintain link with the device by switching to host to use
1210 * the alternate port exclusively while the reset takes place.
1211 * failure to come online is overridden.
1212 */
1213 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1214 {
1215 struct cxlflash_cfg *cfg = afu->parent;
1216 struct device *dev = &cfg->dev->dev;
1217 u64 port_sel;
1218
1219 /* first switch the AFU to the other links, if any */
1220 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1221 port_sel &= ~(1ULL << port);
1222 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1223 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1224
1225 set_port_offline(fc_regs);
1226 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1227 FC_PORT_STATUS_RETRY_CNT))
1228 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1229 __func__, port);
1230
1231 set_port_online(fc_regs);
1232 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1233 FC_PORT_STATUS_RETRY_CNT))
1234 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1235 __func__, port);
1236
1237 /* switch back to include this port */
1238 port_sel |= (1ULL << port);
1239 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1240 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1241
1242 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1243 }
1244
1245 /**
1246 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1247 * @afu: AFU associated with the host.
1248 */
1249 static void afu_err_intr_init(struct afu *afu)
1250 {
1251 struct cxlflash_cfg *cfg = afu->parent;
1252 __be64 __iomem *fc_port_regs;
1253 int i;
1254 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1255 u64 reg;
1256
1257 /* global async interrupts: AFU clears afu_ctrl on context exit
1258 * if async interrupts were sent to that context. This prevents
1259 * the AFU form sending further async interrupts when
1260 * there is
1261 * nobody to receive them.
1262 */
1263
1264 /* mask all */
1265 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1266 /* set LISN# to send and point to primary master context */
1267 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1268
1269 if (afu->internal_lun)
1270 reg |= 1; /* Bit 63 indicates local lun */
1271 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1272 /* clear all */
1273 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1274 /* unmask bits that are of interest */
1275 /* note: afu can send an interrupt after this step */
1276 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1277 /* clear again in case a bit came on after previous clear but before */
1278 /* unmask */
1279 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1280
1281 /* Clear/Set internal lun bits */
1282 fc_port_regs = get_fc_port_regs(cfg, 0);
1283 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1284 reg &= SISL_FC_INTERNAL_MASK;
1285 if (afu->internal_lun)
1286 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1287 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1288
1289 /* now clear FC errors */
1290 for (i = 0; i < cfg->num_fc_ports; i++) {
1291 fc_port_regs = get_fc_port_regs(cfg, i);
1292
1293 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1294 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1295 }
1296
1297 /* sync interrupts for master's IOARRIN write */
1298 /* note that unlike asyncs, there can be no pending sync interrupts */
1299 /* at this time (this is a fresh context and master has not written */
1300 /* IOARRIN yet), so there is nothing to clear. */
1301
1302 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1303 for (i = 0; i < afu->num_hwqs; i++) {
1304 hwq = get_hwq(afu, i);
1305
1306 writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1307 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1308 }
1309 }
1310
1311 /**
1312 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1313 * @irq: Interrupt number.
1314 * @data: Private data provided at interrupt registration, the AFU.
1315 *
1316 * Return: Always return IRQ_HANDLED.
1317 */
1318 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1319 {
1320 struct hwq *hwq = (struct hwq *)data;
1321 struct cxlflash_cfg *cfg = hwq->afu->parent;
1322 struct device *dev = &cfg->dev->dev;
1323 u64 reg;
1324 u64 reg_unmasked;
1325
1326 reg = readq_be(&hwq->host_map->intr_status);
1327 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1328
1329 if (reg_unmasked == 0UL) {
1330 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1331 __func__, reg);
1332 goto cxlflash_sync_err_irq_exit;
1333 }
1334
1335 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1336 __func__, reg);
1337
1338 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1339
1340 cxlflash_sync_err_irq_exit:
1341 return IRQ_HANDLED;
1342 }
1343
1344 /**
1345 * process_hrrq() - process the read-response queue
1346 * @afu: AFU associated with the host.
1347 * @doneq: Queue of commands harvested from the RRQ.
1348 * @budget: Threshold of RRQ entries to process.
1349 *
1350 * This routine must be called holding the disabled RRQ spin lock.
1351 *
1352 * Return: The number of entries processed.
1353 */
1354 static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1355 {
1356 struct afu *afu = hwq->afu;
1357 struct afu_cmd *cmd;
1358 struct sisl_ioasa *ioasa;
1359 struct sisl_ioarcb *ioarcb;
1360 bool toggle = hwq->toggle;
1361 int num_hrrq = 0;
1362 u64 entry,
1363 *hrrq_start = hwq->hrrq_start,
1364 *hrrq_end = hwq->hrrq_end,
1365 *hrrq_curr = hwq->hrrq_curr;
1366
1367 /* Process ready RRQ entries up to the specified budget (if any) */
1368 while (true) {
1369 entry = *hrrq_curr;
1370
1371 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1372 break;
1373
1374 entry &= ~SISL_RESP_HANDLE_T_BIT;
1375
1376 if (afu_is_sq_cmd_mode(afu)) {
1377 ioasa = (struct sisl_ioasa *)entry;
1378 cmd = container_of(ioasa, struct afu_cmd, sa);
1379 } else {
1380 ioarcb = (struct sisl_ioarcb *)entry;
1381 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1382 }
1383
1384 list_add_tail(&cmd->queue, doneq);
1385
1386 /* Advance to next entry or wrap and flip the toggle bit */
1387 if (hrrq_curr < hrrq_end)
1388 hrrq_curr++;
1389 else {
1390 hrrq_curr = hrrq_start;
1391 toggle ^= SISL_RESP_HANDLE_T_BIT;
1392 }
1393
1394 atomic_inc(&hwq->hsq_credits);
1395 num_hrrq++;
1396
1397 if (budget > 0 && num_hrrq >= budget)
1398 break;
1399 }
1400
1401 hwq->hrrq_curr = hrrq_curr;
1402 hwq->toggle = toggle;
1403
1404 return num_hrrq;
1405 }
1406
1407 /**
1408 * process_cmd_doneq() - process a queue of harvested RRQ commands
1409 * @doneq: Queue of completed commands.
1410 *
1411 * Note that upon return the queue can no longer be trusted.
1412 */
1413 static void process_cmd_doneq(struct list_head *doneq)
1414 {
1415 struct afu_cmd *cmd, *tmp;
1416
1417 WARN_ON(list_empty(doneq));
1418
1419 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1420 cmd_complete(cmd);
1421 }
1422
1423 /**
1424 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1425 * @irqpoll: IRQ poll structure associated with queue to poll.
1426 * @budget: Threshold of RRQ entries to process per poll.
1427 *
1428 * Return: The number of entries processed.
1429 */
1430 static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1431 {
1432 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1433 unsigned long hrrq_flags;
1434 LIST_HEAD(doneq);
1435 int num_entries = 0;
1436
1437 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1438
1439 num_entries = process_hrrq(hwq, &doneq, budget);
1440 if (num_entries < budget)
1441 irq_poll_complete(irqpoll);
1442
1443 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1444
1445 process_cmd_doneq(&doneq);
1446 return num_entries;
1447 }
1448
1449 /**
1450 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1451 * @irq: Interrupt number.
1452 * @data: Private data provided at interrupt registration, the AFU.
1453 *
1454 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1455 */
1456 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1457 {
1458 struct hwq *hwq = (struct hwq *)data;
1459 struct afu *afu = hwq->afu;
1460 unsigned long hrrq_flags;
1461 LIST_HEAD(doneq);
1462 int num_entries = 0;
1463
1464 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1465
1466 if (afu_is_irqpoll_enabled(afu)) {
1467 irq_poll_sched(&hwq->irqpoll);
1468 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1469 return IRQ_HANDLED;
1470 }
1471
1472 num_entries = process_hrrq(hwq, &doneq, -1);
1473 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1474
1475 if (num_entries == 0)
1476 return IRQ_NONE;
1477
1478 process_cmd_doneq(&doneq);
1479 return IRQ_HANDLED;
1480 }
1481
1482 /*
1483 * Asynchronous interrupt information table
1484 *
1485 * NOTE:
1486 * - Order matters here as this array is indexed by bit position.
1487 *
1488 * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1489 * as complex and complains due to a lack of parentheses/braces.
1490 */
1491 #define ASTATUS_FC(_a, _b, _c, _d) \
1492 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1493
1494 #define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1495 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1496 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1497 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1498 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1499 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1500 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1501 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1502 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1503
1504 static const struct asyc_intr_info ainfo[] = {
1505 BUILD_SISL_ASTATUS_FC_PORT(1),
1506 BUILD_SISL_ASTATUS_FC_PORT(0),
1507 BUILD_SISL_ASTATUS_FC_PORT(3),
1508 BUILD_SISL_ASTATUS_FC_PORT(2)
1509 };
1510
1511 /**
1512 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1513 * @irq: Interrupt number.
1514 * @data: Private data provided at interrupt registration, the AFU.
1515 *
1516 * Return: Always return IRQ_HANDLED.
1517 */
1518 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1519 {
1520 struct hwq *hwq = (struct hwq *)data;
1521 struct afu *afu = hwq->afu;
1522 struct cxlflash_cfg *cfg = afu->parent;
1523 struct device *dev = &cfg->dev->dev;
1524 const struct asyc_intr_info *info;
1525 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1526 __be64 __iomem *fc_port_regs;
1527 u64 reg_unmasked;
1528 u64 reg;
1529 u64 bit;
1530 u8 port;
1531
1532 reg = readq_be(&global->regs.aintr_status);
1533 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1534
1535 if (unlikely(reg_unmasked == 0)) {
1536 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1537 __func__, reg);
1538 goto out;
1539 }
1540
1541 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1542 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1543
1544 /* Check each bit that is on */
1545 for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1546 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1547 WARN_ON_ONCE(1);
1548 continue;
1549 }
1550
1551 info = &ainfo[bit];
1552 if (unlikely(info->status != 1ULL << bit)) {
1553 WARN_ON_ONCE(1);
1554 continue;
1555 }
1556
1557 port = info->port;
1558 fc_port_regs = get_fc_port_regs(cfg, port);
1559
1560 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1561 __func__, port, info->desc,
1562 readq_be(&fc_port_regs[FC_STATUS / 8]));
1563
1564 /*
1565 * Do link reset first, some OTHER errors will set FC_ERROR
1566 * again if cleared before or w/o a reset
1567 */
1568 if (info->action & LINK_RESET) {
1569 dev_err(dev, "%s: FC Port %d: resetting link\n",
1570 __func__, port);
1571 cfg->lr_state = LINK_RESET_REQUIRED;
1572 cfg->lr_port = port;
1573 schedule_work(&cfg->work_q);
1574 }
1575
1576 if (info->action & CLR_FC_ERROR) {
1577 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1578
1579 /*
1580 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1581 * should be the same and tracing one is sufficient.
1582 */
1583
1584 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1585 __func__, port, reg);
1586
1587 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1588 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1589 }
1590
1591 if (info->action & SCAN_HOST) {
1592 atomic_inc(&cfg->scan_host_needed);
1593 schedule_work(&cfg->work_q);
1594 }
1595 }
1596
1597 out:
1598 return IRQ_HANDLED;
1599 }
1600
1601 /**
1602 * start_context() - starts the master context
1603 * @cfg: Internal structure associated with the host.
1604 * @index: Index of the hardware queue.
1605 *
1606 * Return: A success or failure value from CXL services.
1607 */
1608 static int start_context(struct cxlflash_cfg *cfg, u32 index)
1609 {
1610 struct device *dev = &cfg->dev->dev;
1611 struct hwq *hwq = get_hwq(cfg->afu, index);
1612 int rc = 0;
1613
1614 rc = cxl_start_context(hwq->ctx,
1615 hwq->work.work_element_descriptor,
1616 NULL);
1617
1618 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1619 return rc;
1620 }
1621
1622 /**
1623 * read_vpd() - obtains the WWPNs from VPD
1624 * @cfg: Internal structure associated with the host.
1625 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
1626 *
1627 * Return: 0 on success, -errno on failure
1628 */
1629 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1630 {
1631 struct device *dev = &cfg->dev->dev;
1632 struct pci_dev *pdev = cfg->dev;
1633 int rc = 0;
1634 int ro_start, ro_size, i, j, k;
1635 ssize_t vpd_size;
1636 char vpd_data[CXLFLASH_VPD_LEN];
1637 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1638 char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1639
1640 /* Get the VPD data from the device */
1641 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1642 if (unlikely(vpd_size <= 0)) {
1643 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1644 __func__, vpd_size);
1645 rc = -ENODEV;
1646 goto out;
1647 }
1648
1649 /* Get the read only section offset */
1650 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1651 PCI_VPD_LRDT_RO_DATA);
1652 if (unlikely(ro_start < 0)) {
1653 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1654 rc = -ENODEV;
1655 goto out;
1656 }
1657
1658 /* Get the read only section size, cap when extends beyond read VPD */
1659 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1660 j = ro_size;
1661 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1662 if (unlikely((i + j) > vpd_size)) {
1663 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1664 __func__, (i + j), vpd_size);
1665 ro_size = vpd_size - i;
1666 }
1667
1668 /*
1669 * Find the offset of the WWPN tag within the read only
1670 * VPD data and validate the found field (partials are
1671 * no good to us). Convert the ASCII data to an integer
1672 * value. Note that we must copy to a temporary buffer
1673 * because the conversion service requires that the ASCII
1674 * string be terminated.
1675 */
1676 for (k = 0; k < cfg->num_fc_ports; k++) {
1677 j = ro_size;
1678 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1679
1680 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1681 if (unlikely(i < 0)) {
1682 dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1683 __func__, k);
1684 rc = -ENODEV;
1685 goto out;
1686 }
1687
1688 j = pci_vpd_info_field_size(&vpd_data[i]);
1689 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1690 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1691 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1692 __func__, k);
1693 rc = -ENODEV;
1694 goto out;
1695 }
1696
1697 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1698 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1699 if (unlikely(rc)) {
1700 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1701 __func__, k);
1702 rc = -ENODEV;
1703 goto out;
1704 }
1705
1706 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1707 }
1708
1709 out:
1710 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1711 return rc;
1712 }
1713
1714 /**
1715 * init_pcr() - initialize the provisioning and control registers
1716 * @cfg: Internal structure associated with the host.
1717 *
1718 * Also sets up fast access to the mapped registers and initializes AFU
1719 * command fields that never change.
1720 */
1721 static void init_pcr(struct cxlflash_cfg *cfg)
1722 {
1723 struct afu *afu = cfg->afu;
1724 struct sisl_ctrl_map __iomem *ctrl_map;
1725 struct hwq *hwq;
1726 int i;
1727
1728 for (i = 0; i < MAX_CONTEXT; i++) {
1729 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1730 /* Disrupt any clients that could be running */
1731 /* e.g. clients that survived a master restart */
1732 writeq_be(0, &ctrl_map->rht_start);
1733 writeq_be(0, &ctrl_map->rht_cnt_id);
1734 writeq_be(0, &ctrl_map->ctx_cap);
1735 }
1736
1737 /* Copy frequently used fields into hwq */
1738 for (i = 0; i < afu->num_hwqs; i++) {
1739 hwq = get_hwq(afu, i);
1740
1741 hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
1742 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1743 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1744
1745 /* Program the Endian Control for the master context */
1746 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1747 }
1748 }
1749
1750 /**
1751 * init_global() - initialize AFU global registers
1752 * @cfg: Internal structure associated with the host.
1753 */
1754 static int init_global(struct cxlflash_cfg *cfg)
1755 {
1756 struct afu *afu = cfg->afu;
1757 struct device *dev = &cfg->dev->dev;
1758 struct hwq *hwq;
1759 struct sisl_host_map __iomem *hmap;
1760 __be64 __iomem *fc_port_regs;
1761 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1762 int i = 0, num_ports = 0;
1763 int rc = 0;
1764 u64 reg;
1765
1766 rc = read_vpd(cfg, &wwpn[0]);
1767 if (rc) {
1768 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1769 goto out;
1770 }
1771
1772 /* Set up RRQ and SQ in HWQ for master issued cmds */
1773 for (i = 0; i < afu->num_hwqs; i++) {
1774 hwq = get_hwq(afu, i);
1775 hmap = hwq->host_map;
1776
1777 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1778 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1779
1780 if (afu_is_sq_cmd_mode(afu)) {
1781 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1782 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1783 }
1784 }
1785
1786 /* AFU configuration */
1787 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1788 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1789 /* enable all auto retry options and control endianness */
1790 /* leave others at default: */
1791 /* CTX_CAP write protected, mbox_r does not clear on read and */
1792 /* checker on if dual afu */
1793 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1794
1795 /* Global port select: select either port */
1796 if (afu->internal_lun) {
1797 /* Only use port 0 */
1798 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1799 num_ports = 0;
1800 } else {
1801 writeq_be(PORT_MASK(cfg->num_fc_ports),
1802 &afu->afu_map->global.regs.afu_port_sel);
1803 num_ports = cfg->num_fc_ports;
1804 }
1805
1806 for (i = 0; i < num_ports; i++) {
1807 fc_port_regs = get_fc_port_regs(cfg, i);
1808
1809 /* Unmask all errors (but they are still masked at AFU) */
1810 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1811 /* Clear CRC error cnt & set a threshold */
1812 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1813 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1814
1815 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1816 if (wwpn[i] != 0)
1817 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1818 /* Programming WWPN back to back causes additional
1819 * offline/online transitions and a PLOGI
1820 */
1821 msleep(100);
1822 }
1823
1824 /* Set up master's own CTX_CAP to allow real mode, host translation */
1825 /* tables, afu cmds and read/write GSCSI cmds. */
1826 /* First, unlock ctx_cap write by reading mbox */
1827 for (i = 0; i < afu->num_hwqs; i++) {
1828 hwq = get_hwq(afu, i);
1829
1830 (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1831 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1832 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1833 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1834 &hwq->ctrl_map->ctx_cap);
1835 }
1836
1837 /*
1838 * Determine write-same unmap support for host by evaluating the unmap
1839 * sector support bit of the context control register associated with
1840 * the primary hardware queue. Note that while this status is reflected
1841 * in a context register, the outcome can be assumed to be host-wide.
1842 */
1843 hwq = get_hwq(afu, PRIMARY_HWQ);
1844 reg = readq_be(&hwq->host_map->ctx_ctrl);
1845 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1846 cfg->ws_unmap = true;
1847
1848 /* Initialize heartbeat */
1849 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1850 out:
1851 return rc;
1852 }
1853
1854 /**
1855 * start_afu() - initializes and starts the AFU
1856 * @cfg: Internal structure associated with the host.
1857 */
1858 static int start_afu(struct cxlflash_cfg *cfg)
1859 {
1860 struct afu *afu = cfg->afu;
1861 struct device *dev = &cfg->dev->dev;
1862 struct hwq *hwq;
1863 int rc = 0;
1864 int i;
1865
1866 init_pcr(cfg);
1867
1868 /* Initialize each HWQ */
1869 for (i = 0; i < afu->num_hwqs; i++) {
1870 hwq = get_hwq(afu, i);
1871
1872 /* After an AFU reset, RRQ entries are stale, clear them */
1873 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1874
1875 /* Initialize RRQ pointers */
1876 hwq->hrrq_start = &hwq->rrq_entry[0];
1877 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1878 hwq->hrrq_curr = hwq->hrrq_start;
1879 hwq->toggle = 1;
1880
1881 /* Initialize spin locks */
1882 spin_lock_init(&hwq->hrrq_slock);
1883 spin_lock_init(&hwq->hsq_slock);
1884
1885 /* Initialize SQ */
1886 if (afu_is_sq_cmd_mode(afu)) {
1887 memset(&hwq->sq, 0, sizeof(hwq->sq));
1888 hwq->hsq_start = &hwq->sq[0];
1889 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1890 hwq->hsq_curr = hwq->hsq_start;
1891
1892 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1893 }
1894
1895 /* Initialize IRQ poll */
1896 if (afu_is_irqpoll_enabled(afu))
1897 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1898 cxlflash_irqpoll);
1899
1900 }
1901
1902 rc = init_global(cfg);
1903
1904 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1905 return rc;
1906 }
1907
1908 /**
1909 * init_intr() - setup interrupt handlers for the master context
1910 * @cfg: Internal structure associated with the host.
1911 * @hwq: Hardware queue to initialize.
1912 *
1913 * Return: 0 on success, -errno on failure
1914 */
1915 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1916 struct hwq *hwq)
1917 {
1918 struct device *dev = &cfg->dev->dev;
1919 struct cxl_context *ctx = hwq->ctx;
1920 int rc = 0;
1921 enum undo_level level = UNDO_NOOP;
1922 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1923 int num_irqs = is_primary_hwq ? 3 : 2;
1924
1925 rc = cxl_allocate_afu_irqs(ctx, num_irqs);
1926 if (unlikely(rc)) {
1927 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1928 __func__, rc);
1929 level = UNDO_NOOP;
1930 goto out;
1931 }
1932
1933 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1934 "SISL_MSI_SYNC_ERROR");
1935 if (unlikely(rc <= 0)) {
1936 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1937 level = FREE_IRQ;
1938 goto out;
1939 }
1940
1941 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1942 "SISL_MSI_RRQ_UPDATED");
1943 if (unlikely(rc <= 0)) {
1944 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1945 level = UNMAP_ONE;
1946 goto out;
1947 }
1948
1949 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1950 if (!is_primary_hwq)
1951 goto out;
1952
1953 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1954 "SISL_MSI_ASYNC_ERROR");
1955 if (unlikely(rc <= 0)) {
1956 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1957 level = UNMAP_TWO;
1958 goto out;
1959 }
1960 out:
1961 return level;
1962 }
1963
1964 /**
1965 * init_mc() - create and register as the master context
1966 * @cfg: Internal structure associated with the host.
1967 * index: HWQ Index of the master context.
1968 *
1969 * Return: 0 on success, -errno on failure
1970 */
1971 static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1972 {
1973 struct cxl_context *ctx;
1974 struct device *dev = &cfg->dev->dev;
1975 struct hwq *hwq = get_hwq(cfg->afu, index);
1976 int rc = 0;
1977 enum undo_level level;
1978
1979 hwq->afu = cfg->afu;
1980 hwq->index = index;
1981 INIT_LIST_HEAD(&hwq->pending_cmds);
1982
1983 if (index == PRIMARY_HWQ)
1984 ctx = cxl_get_context(cfg->dev);
1985 else
1986 ctx = cxl_dev_context_init(cfg->dev);
1987 if (unlikely(!ctx)) {
1988 rc = -ENOMEM;
1989 goto err1;
1990 }
1991
1992 WARN_ON(hwq->ctx);
1993 hwq->ctx = ctx;
1994
1995 /* Set it up as a master with the CXL */
1996 cxl_set_master(ctx);
1997
1998 /* Reset AFU when initializing primary context */
1999 if (index == PRIMARY_HWQ) {
2000 rc = cxl_afu_reset(ctx);
2001 if (unlikely(rc)) {
2002 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2003 __func__, rc);
2004 goto err1;
2005 }
2006 }
2007
2008 level = init_intr(cfg, hwq);
2009 if (unlikely(level)) {
2010 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2011 goto err2;
2012 }
2013
2014 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
2015 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
2016 * element (pe) that is embedded in the context (ctx)
2017 */
2018 rc = start_context(cfg, index);
2019 if (unlikely(rc)) {
2020 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2021 level = UNMAP_THREE;
2022 goto err2;
2023 }
2024
2025 out:
2026 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2027 return rc;
2028 err2:
2029 term_intr(cfg, level, index);
2030 if (index != PRIMARY_HWQ)
2031 cxl_release_context(ctx);
2032 err1:
2033 hwq->ctx = NULL;
2034 goto out;
2035 }
2036
2037 /**
2038 * get_num_afu_ports() - determines and configures the number of AFU ports
2039 * @cfg: Internal structure associated with the host.
2040 *
2041 * This routine determines the number of AFU ports by converting the global
2042 * port selection mask. The converted value is only valid following an AFU
2043 * reset (explicit or power-on). This routine must be invoked shortly after
2044 * mapping as other routines are dependent on the number of ports during the
2045 * initialization sequence.
2046 *
2047 * To support legacy AFUs that might not have reflected an initial global
2048 * port mask (value read is 0), default to the number of ports originally
2049 * supported by the cxlflash driver (2) before hardware with other port
2050 * offerings was introduced.
2051 */
2052 static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2053 {
2054 struct afu *afu = cfg->afu;
2055 struct device *dev = &cfg->dev->dev;
2056 u64 port_mask;
2057 int num_fc_ports = LEGACY_FC_PORTS;
2058
2059 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2060 if (port_mask != 0ULL)
2061 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2062
2063 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2064 __func__, port_mask, num_fc_ports);
2065
2066 cfg->num_fc_ports = num_fc_ports;
2067 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2068 }
2069
2070 /**
2071 * init_afu() - setup as master context and start AFU
2072 * @cfg: Internal structure associated with the host.
2073 *
2074 * This routine is a higher level of control for configuring the
2075 * AFU on probe and reset paths.
2076 *
2077 * Return: 0 on success, -errno on failure
2078 */
2079 static int init_afu(struct cxlflash_cfg *cfg)
2080 {
2081 u64 reg;
2082 int rc = 0;
2083 struct afu *afu = cfg->afu;
2084 struct device *dev = &cfg->dev->dev;
2085 struct hwq *hwq;
2086 int i;
2087
2088 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
2089
2090 afu->num_hwqs = afu->desired_hwqs;
2091 for (i = 0; i < afu->num_hwqs; i++) {
2092 rc = init_mc(cfg, i);
2093 if (rc) {
2094 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2095 __func__, rc, i);
2096 goto err1;
2097 }
2098 }
2099
2100 /* Map the entire MMIO space of the AFU using the first context */
2101 hwq = get_hwq(afu, PRIMARY_HWQ);
2102 afu->afu_map = cxl_psa_map(hwq->ctx);
2103 if (!afu->afu_map) {
2104 dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
2105 rc = -ENOMEM;
2106 goto err1;
2107 }
2108
2109 /* No byte reverse on reading afu_version or string will be backwards */
2110 reg = readq(&afu->afu_map->global.regs.afu_version);
2111 memcpy(afu->version, &reg, sizeof(reg));
2112 afu->interface_version =
2113 readq_be(&afu->afu_map->global.regs.interface_version);
2114 if ((afu->interface_version + 1) == 0) {
2115 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2116 "interface version %016llx\n", afu->version,
2117 afu->interface_version);
2118 rc = -EINVAL;
2119 goto err1;
2120 }
2121
2122 if (afu_is_sq_cmd_mode(afu)) {
2123 afu->send_cmd = send_cmd_sq;
2124 afu->context_reset = context_reset_sq;
2125 } else {
2126 afu->send_cmd = send_cmd_ioarrin;
2127 afu->context_reset = context_reset_ioarrin;
2128 }
2129
2130 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2131 afu->version, afu->interface_version);
2132
2133 get_num_afu_ports(cfg);
2134
2135 rc = start_afu(cfg);
2136 if (rc) {
2137 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2138 goto err1;
2139 }
2140
2141 afu_err_intr_init(cfg->afu);
2142 for (i = 0; i < afu->num_hwqs; i++) {
2143 hwq = get_hwq(afu, i);
2144
2145 hwq->room = readq_be(&hwq->host_map->cmd_room);
2146 }
2147
2148 /* Restore the LUN mappings */
2149 cxlflash_restore_luntable(cfg);
2150 out:
2151 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2152 return rc;
2153
2154 err1:
2155 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2156 term_intr(cfg, UNMAP_THREE, i);
2157 term_mc(cfg, i);
2158 }
2159 goto out;
2160 }
2161
2162 /**
2163 * afu_reset() - resets the AFU
2164 * @cfg: Internal structure associated with the host.
2165 *
2166 * Return: 0 on success, -errno on failure
2167 */
2168 static int afu_reset(struct cxlflash_cfg *cfg)
2169 {
2170 struct device *dev = &cfg->dev->dev;
2171 int rc = 0;
2172
2173 /* Stop the context before the reset. Since the context is
2174 * no longer available restart it after the reset is complete
2175 */
2176 term_afu(cfg);
2177
2178 rc = init_afu(cfg);
2179
2180 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2181 return rc;
2182 }
2183
2184 /**
2185 * drain_ioctls() - wait until all currently executing ioctls have completed
2186 * @cfg: Internal structure associated with the host.
2187 *
2188 * Obtain write access to read/write semaphore that wraps ioctl
2189 * handling to 'drain' ioctls currently executing.
2190 */
2191 static void drain_ioctls(struct cxlflash_cfg *cfg)
2192 {
2193 down_write(&cfg->ioctl_rwsem);
2194 up_write(&cfg->ioctl_rwsem);
2195 }
2196
2197 /**
2198 * cxlflash_async_reset_host() - asynchronous host reset handler
2199 * @data: Private data provided while scheduling reset.
2200 * @cookie: Cookie that can be used for checkpointing.
2201 */
2202 static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2203 {
2204 struct cxlflash_cfg *cfg = data;
2205 struct device *dev = &cfg->dev->dev;
2206 int rc = 0;
2207
2208 if (cfg->state != STATE_RESET) {
2209 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2210 __func__, cfg->state);
2211 goto out;
2212 }
2213
2214 drain_ioctls(cfg);
2215 cxlflash_mark_contexts_error(cfg);
2216 rc = afu_reset(cfg);
2217 if (rc)
2218 cfg->state = STATE_FAILTERM;
2219 else
2220 cfg->state = STATE_NORMAL;
2221 wake_up_all(&cfg->reset_waitq);
2222
2223 out:
2224 scsi_unblock_requests(cfg->host);
2225 }
2226
2227 /**
2228 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2229 * @cfg: Internal structure associated with the host.
2230 */
2231 static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2232 {
2233 struct device *dev = &cfg->dev->dev;
2234
2235 if (cfg->state != STATE_NORMAL) {
2236 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2237 __func__, cfg->state);
2238 return;
2239 }
2240
2241 cfg->state = STATE_RESET;
2242 scsi_block_requests(cfg->host);
2243 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2244 cfg);
2245 }
2246
2247 /**
2248 * send_afu_cmd() - builds and sends an internal AFU command
2249 * @afu: AFU associated with the host.
2250 * @rcb: Pre-populated IOARCB describing command to send.
2251 *
2252 * The AFU can only take one internal AFU command at a time. This limitation is
2253 * enforced by using a mutex to provide exclusive access to the AFU during the
2254 * operation. This design point requires calling threads to not be on interrupt
2255 * context due to the possibility of sleeping during concurrent AFU operations.
2256 *
2257 * The command status is optionally passed back to the caller when the caller
2258 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2259 *
2260 * Return:
2261 * 0 on success, -errno on failure
2262 */
2263 static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2264 {
2265 struct cxlflash_cfg *cfg = afu->parent;
2266 struct device *dev = &cfg->dev->dev;
2267 struct afu_cmd *cmd = NULL;
2268 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2269 char *buf = NULL;
2270 int rc = 0;
2271 int nretry = 0;
2272 static DEFINE_MUTEX(sync_active);
2273
2274 if (cfg->state != STATE_NORMAL) {
2275 dev_dbg(dev, "%s: Sync not required state=%u\n",
2276 __func__, cfg->state);
2277 return 0;
2278 }
2279
2280 mutex_lock(&sync_active);
2281 atomic_inc(&afu->cmds_active);
2282 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2283 if (unlikely(!buf)) {
2284 dev_err(dev, "%s: no memory for command\n", __func__);
2285 rc = -ENOMEM;
2286 goto out;
2287 }
2288
2289 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2290
2291 retry:
2292 memset(cmd, 0, sizeof(*cmd));
2293 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2294 INIT_LIST_HEAD(&cmd->queue);
2295 init_completion(&cmd->cevent);
2296 cmd->parent = afu;
2297 cmd->hwq_index = hwq->index;
2298 cmd->rcb.ctx_id = hwq->ctx_hndl;
2299
2300 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2301 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2302
2303 rc = afu->send_cmd(afu, cmd);
2304 if (unlikely(rc)) {
2305 rc = -ENOBUFS;
2306 goto out;
2307 }
2308
2309 rc = wait_resp(afu, cmd);
2310 switch (rc) {
2311 case -ETIMEDOUT:
2312 rc = afu->context_reset(hwq);
2313 if (rc) {
2314 cxlflash_schedule_async_reset(cfg);
2315 break;
2316 }
2317 /* fall through to retry */
2318 case -EAGAIN:
2319 if (++nretry < 2)
2320 goto retry;
2321 /* fall through to exit */
2322 default:
2323 break;
2324 }
2325
2326 if (rcb->ioasa)
2327 *rcb->ioasa = cmd->sa;
2328 out:
2329 atomic_dec(&afu->cmds_active);
2330 mutex_unlock(&sync_active);
2331 kfree(buf);
2332 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2333 return rc;
2334 }
2335
2336 /**
2337 * cxlflash_afu_sync() - builds and sends an AFU sync command
2338 * @afu: AFU associated with the host.
2339 * @ctx: Identifies context requesting sync.
2340 * @res: Identifies resource requesting sync.
2341 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2342 *
2343 * AFU sync operations are only necessary and allowed when the device is
2344 * operating normally. When not operating normally, sync requests can occur as
2345 * part of cleaning up resources associated with an adapter prior to removal.
2346 * In this scenario, these requests are simply ignored (safe due to the AFU
2347 * going away).
2348 *
2349 * Return:
2350 * 0 on success, -errno on failure
2351 */
2352 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2353 {
2354 struct cxlflash_cfg *cfg = afu->parent;
2355 struct device *dev = &cfg->dev->dev;
2356 struct sisl_ioarcb rcb = { 0 };
2357
2358 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2359 __func__, afu, ctx, res, mode);
2360
2361 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2362 rcb.msi = SISL_MSI_RRQ_UPDATED;
2363 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2364
2365 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2366 rcb.cdb[1] = mode;
2367 put_unaligned_be16(ctx, &rcb.cdb[2]);
2368 put_unaligned_be32(res, &rcb.cdb[4]);
2369
2370 return send_afu_cmd(afu, &rcb);
2371 }
2372
2373 /**
2374 * cxlflash_eh_abort_handler() - abort a SCSI command
2375 * @scp: SCSI command to abort.
2376 *
2377 * CXL Flash devices do not support a single command abort. Reset the context
2378 * as per SISLite specification. Flush any pending commands in the hardware
2379 * queue before the reset.
2380 *
2381 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2382 */
2383 static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2384 {
2385 int rc = FAILED;
2386 struct Scsi_Host *host = scp->device->host;
2387 struct cxlflash_cfg *cfg = shost_priv(host);
2388 struct afu_cmd *cmd = sc_to_afuc(scp);
2389 struct device *dev = &cfg->dev->dev;
2390 struct afu *afu = cfg->afu;
2391 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2392
2393 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2394 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2395 scp->device->channel, scp->device->id, scp->device->lun,
2396 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2397 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2398 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2399 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2400
2401 /* When the state is not normal, another reset/reload is in progress.
2402 * Return failed and the mid-layer will invoke host reset handler.
2403 */
2404 if (cfg->state != STATE_NORMAL) {
2405 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2406 __func__, cfg->state);
2407 goto out;
2408 }
2409
2410 rc = afu->context_reset(hwq);
2411 if (unlikely(rc))
2412 goto out;
2413
2414 rc = SUCCESS;
2415
2416 out:
2417 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2418 return rc;
2419 }
2420
2421 /**
2422 * cxlflash_eh_device_reset_handler() - reset a single LUN
2423 * @scp: SCSI command to send.
2424 *
2425 * Return:
2426 * SUCCESS as defined in scsi/scsi.h
2427 * FAILED as defined in scsi/scsi.h
2428 */
2429 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2430 {
2431 int rc = SUCCESS;
2432 struct scsi_device *sdev = scp->device;
2433 struct Scsi_Host *host = sdev->host;
2434 struct cxlflash_cfg *cfg = shost_priv(host);
2435 struct device *dev = &cfg->dev->dev;
2436 int rcr = 0;
2437
2438 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2439 host->host_no, sdev->channel, sdev->id, sdev->lun);
2440 retry:
2441 switch (cfg->state) {
2442 case STATE_NORMAL:
2443 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2444 if (unlikely(rcr))
2445 rc = FAILED;
2446 break;
2447 case STATE_RESET:
2448 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2449 goto retry;
2450 default:
2451 rc = FAILED;
2452 break;
2453 }
2454
2455 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2456 return rc;
2457 }
2458
2459 /**
2460 * cxlflash_eh_host_reset_handler() - reset the host adapter
2461 * @scp: SCSI command from stack identifying host.
2462 *
2463 * Following a reset, the state is evaluated again in case an EEH occurred
2464 * during the reset. In such a scenario, the host reset will either yield
2465 * until the EEH recovery is complete or return success or failure based
2466 * upon the current device state.
2467 *
2468 * Return:
2469 * SUCCESS as defined in scsi/scsi.h
2470 * FAILED as defined in scsi/scsi.h
2471 */
2472 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2473 {
2474 int rc = SUCCESS;
2475 int rcr = 0;
2476 struct Scsi_Host *host = scp->device->host;
2477 struct cxlflash_cfg *cfg = shost_priv(host);
2478 struct device *dev = &cfg->dev->dev;
2479
2480 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2481
2482 switch (cfg->state) {
2483 case STATE_NORMAL:
2484 cfg->state = STATE_RESET;
2485 drain_ioctls(cfg);
2486 cxlflash_mark_contexts_error(cfg);
2487 rcr = afu_reset(cfg);
2488 if (rcr) {
2489 rc = FAILED;
2490 cfg->state = STATE_FAILTERM;
2491 } else
2492 cfg->state = STATE_NORMAL;
2493 wake_up_all(&cfg->reset_waitq);
2494 ssleep(1);
2495 /* fall through */
2496 case STATE_RESET:
2497 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2498 if (cfg->state == STATE_NORMAL)
2499 break;
2500 /* fall through */
2501 default:
2502 rc = FAILED;
2503 break;
2504 }
2505
2506 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2507 return rc;
2508 }
2509
2510 /**
2511 * cxlflash_change_queue_depth() - change the queue depth for the device
2512 * @sdev: SCSI device destined for queue depth change.
2513 * @qdepth: Requested queue depth value to set.
2514 *
2515 * The requested queue depth is capped to the maximum supported value.
2516 *
2517 * Return: The actual queue depth set.
2518 */
2519 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2520 {
2521
2522 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2523 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2524
2525 scsi_change_queue_depth(sdev, qdepth);
2526 return sdev->queue_depth;
2527 }
2528
2529 /**
2530 * cxlflash_show_port_status() - queries and presents the current port status
2531 * @port: Desired port for status reporting.
2532 * @cfg: Internal structure associated with the host.
2533 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2534 *
2535 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2536 */
2537 static ssize_t cxlflash_show_port_status(u32 port,
2538 struct cxlflash_cfg *cfg,
2539 char *buf)
2540 {
2541 struct device *dev = &cfg->dev->dev;
2542 char *disp_status;
2543 u64 status;
2544 __be64 __iomem *fc_port_regs;
2545
2546 WARN_ON(port >= MAX_FC_PORTS);
2547
2548 if (port >= cfg->num_fc_ports) {
2549 dev_info(dev, "%s: Port %d not supported on this card.\n",
2550 __func__, port);
2551 return -EINVAL;
2552 }
2553
2554 fc_port_regs = get_fc_port_regs(cfg, port);
2555 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2556 status &= FC_MTIP_STATUS_MASK;
2557
2558 if (status == FC_MTIP_STATUS_ONLINE)
2559 disp_status = "online";
2560 else if (status == FC_MTIP_STATUS_OFFLINE)
2561 disp_status = "offline";
2562 else
2563 disp_status = "unknown";
2564
2565 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2566 }
2567
2568 /**
2569 * port0_show() - queries and presents the current status of port 0
2570 * @dev: Generic device associated with the host owning the port.
2571 * @attr: Device attribute representing the port.
2572 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2573 *
2574 * Return: The size of the ASCII string returned in @buf.
2575 */
2576 static ssize_t port0_show(struct device *dev,
2577 struct device_attribute *attr,
2578 char *buf)
2579 {
2580 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2581
2582 return cxlflash_show_port_status(0, cfg, buf);
2583 }
2584
2585 /**
2586 * port1_show() - queries and presents the current status of port 1
2587 * @dev: Generic device associated with the host owning the port.
2588 * @attr: Device attribute representing the port.
2589 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2590 *
2591 * Return: The size of the ASCII string returned in @buf.
2592 */
2593 static ssize_t port1_show(struct device *dev,
2594 struct device_attribute *attr,
2595 char *buf)
2596 {
2597 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2598
2599 return cxlflash_show_port_status(1, cfg, buf);
2600 }
2601
2602 /**
2603 * port2_show() - queries and presents the current status of port 2
2604 * @dev: Generic device associated with the host owning the port.
2605 * @attr: Device attribute representing the port.
2606 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2607 *
2608 * Return: The size of the ASCII string returned in @buf.
2609 */
2610 static ssize_t port2_show(struct device *dev,
2611 struct device_attribute *attr,
2612 char *buf)
2613 {
2614 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2615
2616 return cxlflash_show_port_status(2, cfg, buf);
2617 }
2618
2619 /**
2620 * port3_show() - queries and presents the current status of port 3
2621 * @dev: Generic device associated with the host owning the port.
2622 * @attr: Device attribute representing the port.
2623 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2624 *
2625 * Return: The size of the ASCII string returned in @buf.
2626 */
2627 static ssize_t port3_show(struct device *dev,
2628 struct device_attribute *attr,
2629 char *buf)
2630 {
2631 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2632
2633 return cxlflash_show_port_status(3, cfg, buf);
2634 }
2635
2636 /**
2637 * lun_mode_show() - presents the current LUN mode of the host
2638 * @dev: Generic device associated with the host.
2639 * @attr: Device attribute representing the LUN mode.
2640 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2641 *
2642 * Return: The size of the ASCII string returned in @buf.
2643 */
2644 static ssize_t lun_mode_show(struct device *dev,
2645 struct device_attribute *attr, char *buf)
2646 {
2647 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2648 struct afu *afu = cfg->afu;
2649
2650 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2651 }
2652
2653 /**
2654 * lun_mode_store() - sets the LUN mode of the host
2655 * @dev: Generic device associated with the host.
2656 * @attr: Device attribute representing the LUN mode.
2657 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2658 * @count: Length of data resizing in @buf.
2659 *
2660 * The CXL Flash AFU supports a dummy LUN mode where the external
2661 * links and storage are not required. Space on the FPGA is used
2662 * to create 1 or 2 small LUNs which are presented to the system
2663 * as if they were a normal storage device. This feature is useful
2664 * during development and also provides manufacturing with a way
2665 * to test the AFU without an actual device.
2666 *
2667 * 0 = external LUN[s] (default)
2668 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2669 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2670 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2671 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2672 *
2673 * Return: The size of the ASCII string returned in @buf.
2674 */
2675 static ssize_t lun_mode_store(struct device *dev,
2676 struct device_attribute *attr,
2677 const char *buf, size_t count)
2678 {
2679 struct Scsi_Host *shost = class_to_shost(dev);
2680 struct cxlflash_cfg *cfg = shost_priv(shost);
2681 struct afu *afu = cfg->afu;
2682 int rc;
2683 u32 lun_mode;
2684
2685 rc = kstrtouint(buf, 10, &lun_mode);
2686 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2687 afu->internal_lun = lun_mode;
2688
2689 /*
2690 * When configured for internal LUN, there is only one channel,
2691 * channel number 0, else there will be one less than the number
2692 * of fc ports for this card.
2693 */
2694 if (afu->internal_lun)
2695 shost->max_channel = 0;
2696 else
2697 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2698
2699 afu_reset(cfg);
2700 scsi_scan_host(cfg->host);
2701 }
2702
2703 return count;
2704 }
2705
2706 /**
2707 * ioctl_version_show() - presents the current ioctl version of the host
2708 * @dev: Generic device associated with the host.
2709 * @attr: Device attribute representing the ioctl version.
2710 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2711 *
2712 * Return: The size of the ASCII string returned in @buf.
2713 */
2714 static ssize_t ioctl_version_show(struct device *dev,
2715 struct device_attribute *attr, char *buf)
2716 {
2717 ssize_t bytes = 0;
2718
2719 bytes = scnprintf(buf, PAGE_SIZE,
2720 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2721 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2722 "host: %u\n", HT_CXLFLASH_VERSION_0);
2723
2724 return bytes;
2725 }
2726
2727 /**
2728 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2729 * @port: Desired port for status reporting.
2730 * @cfg: Internal structure associated with the host.
2731 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2732 *
2733 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2734 */
2735 static ssize_t cxlflash_show_port_lun_table(u32 port,
2736 struct cxlflash_cfg *cfg,
2737 char *buf)
2738 {
2739 struct device *dev = &cfg->dev->dev;
2740 __be64 __iomem *fc_port_luns;
2741 int i;
2742 ssize_t bytes = 0;
2743
2744 WARN_ON(port >= MAX_FC_PORTS);
2745
2746 if (port >= cfg->num_fc_ports) {
2747 dev_info(dev, "%s: Port %d not supported on this card.\n",
2748 __func__, port);
2749 return -EINVAL;
2750 }
2751
2752 fc_port_luns = get_fc_port_luns(cfg, port);
2753
2754 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2755 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2756 "%03d: %016llx\n",
2757 i, readq_be(&fc_port_luns[i]));
2758 return bytes;
2759 }
2760
2761 /**
2762 * port0_lun_table_show() - presents the current LUN table of port 0
2763 * @dev: Generic device associated with the host owning the port.
2764 * @attr: Device attribute representing the port.
2765 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2766 *
2767 * Return: The size of the ASCII string returned in @buf.
2768 */
2769 static ssize_t port0_lun_table_show(struct device *dev,
2770 struct device_attribute *attr,
2771 char *buf)
2772 {
2773 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2774
2775 return cxlflash_show_port_lun_table(0, cfg, buf);
2776 }
2777
2778 /**
2779 * port1_lun_table_show() - presents the current LUN table of port 1
2780 * @dev: Generic device associated with the host owning the port.
2781 * @attr: Device attribute representing the port.
2782 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2783 *
2784 * Return: The size of the ASCII string returned in @buf.
2785 */
2786 static ssize_t port1_lun_table_show(struct device *dev,
2787 struct device_attribute *attr,
2788 char *buf)
2789 {
2790 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2791
2792 return cxlflash_show_port_lun_table(1, cfg, buf);
2793 }
2794
2795 /**
2796 * port2_lun_table_show() - presents the current LUN table of port 2
2797 * @dev: Generic device associated with the host owning the port.
2798 * @attr: Device attribute representing the port.
2799 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2800 *
2801 * Return: The size of the ASCII string returned in @buf.
2802 */
2803 static ssize_t port2_lun_table_show(struct device *dev,
2804 struct device_attribute *attr,
2805 char *buf)
2806 {
2807 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2808
2809 return cxlflash_show_port_lun_table(2, cfg, buf);
2810 }
2811
2812 /**
2813 * port3_lun_table_show() - presents the current LUN table of port 3
2814 * @dev: Generic device associated with the host owning the port.
2815 * @attr: Device attribute representing the port.
2816 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2817 *
2818 * Return: The size of the ASCII string returned in @buf.
2819 */
2820 static ssize_t port3_lun_table_show(struct device *dev,
2821 struct device_attribute *attr,
2822 char *buf)
2823 {
2824 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2825
2826 return cxlflash_show_port_lun_table(3, cfg, buf);
2827 }
2828
2829 /**
2830 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2831 * @dev: Generic device associated with the host.
2832 * @attr: Device attribute representing the IRQ poll weight.
2833 * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
2834 * weight in ASCII.
2835 *
2836 * An IRQ poll weight of 0 indicates polling is disabled.
2837 *
2838 * Return: The size of the ASCII string returned in @buf.
2839 */
2840 static ssize_t irqpoll_weight_show(struct device *dev,
2841 struct device_attribute *attr, char *buf)
2842 {
2843 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2844 struct afu *afu = cfg->afu;
2845
2846 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2847 }
2848
2849 /**
2850 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2851 * @dev: Generic device associated with the host.
2852 * @attr: Device attribute representing the IRQ poll weight.
2853 * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
2854 * weight in ASCII.
2855 * @count: Length of data resizing in @buf.
2856 *
2857 * An IRQ poll weight of 0 indicates polling is disabled.
2858 *
2859 * Return: The size of the ASCII string returned in @buf.
2860 */
2861 static ssize_t irqpoll_weight_store(struct device *dev,
2862 struct device_attribute *attr,
2863 const char *buf, size_t count)
2864 {
2865 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2866 struct device *cfgdev = &cfg->dev->dev;
2867 struct afu *afu = cfg->afu;
2868 struct hwq *hwq;
2869 u32 weight;
2870 int rc, i;
2871
2872 rc = kstrtouint(buf, 10, &weight);
2873 if (rc)
2874 return -EINVAL;
2875
2876 if (weight > 256) {
2877 dev_info(cfgdev,
2878 "Invalid IRQ poll weight. It must be 256 or less.\n");
2879 return -EINVAL;
2880 }
2881
2882 if (weight == afu->irqpoll_weight) {
2883 dev_info(cfgdev,
2884 "Current IRQ poll weight has the same weight.\n");
2885 return -EINVAL;
2886 }
2887
2888 if (afu_is_irqpoll_enabled(afu)) {
2889 for (i = 0; i < afu->num_hwqs; i++) {
2890 hwq = get_hwq(afu, i);
2891
2892 irq_poll_disable(&hwq->irqpoll);
2893 }
2894 }
2895
2896 afu->irqpoll_weight = weight;
2897
2898 if (weight > 0) {
2899 for (i = 0; i < afu->num_hwqs; i++) {
2900 hwq = get_hwq(afu, i);
2901
2902 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2903 }
2904 }
2905
2906 return count;
2907 }
2908
2909 /**
2910 * num_hwqs_show() - presents the number of hardware queues for the host
2911 * @dev: Generic device associated with the host.
2912 * @attr: Device attribute representing the number of hardware queues.
2913 * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
2914 * queues in ASCII.
2915 *
2916 * Return: The size of the ASCII string returned in @buf.
2917 */
2918 static ssize_t num_hwqs_show(struct device *dev,
2919 struct device_attribute *attr, char *buf)
2920 {
2921 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2922 struct afu *afu = cfg->afu;
2923
2924 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2925 }
2926
2927 /**
2928 * num_hwqs_store() - sets the number of hardware queues for the host
2929 * @dev: Generic device associated with the host.
2930 * @attr: Device attribute representing the number of hardware queues.
2931 * @buf: Buffer of length PAGE_SIZE containing the number of hardware
2932 * queues in ASCII.
2933 * @count: Length of data resizing in @buf.
2934 *
2935 * n > 0: num_hwqs = n
2936 * n = 0: num_hwqs = num_online_cpus()
2937 * n < 0: num_online_cpus() / abs(n)
2938 *
2939 * Return: The size of the ASCII string returned in @buf.
2940 */
2941 static ssize_t num_hwqs_store(struct device *dev,
2942 struct device_attribute *attr,
2943 const char *buf, size_t count)
2944 {
2945 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2946 struct afu *afu = cfg->afu;
2947 int rc;
2948 int nhwqs, num_hwqs;
2949
2950 rc = kstrtoint(buf, 10, &nhwqs);
2951 if (rc)
2952 return -EINVAL;
2953
2954 if (nhwqs >= 1)
2955 num_hwqs = nhwqs;
2956 else if (nhwqs == 0)
2957 num_hwqs = num_online_cpus();
2958 else
2959 num_hwqs = num_online_cpus() / abs(nhwqs);
2960
2961 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
2962 WARN_ON_ONCE(afu->desired_hwqs == 0);
2963
2964 retry:
2965 switch (cfg->state) {
2966 case STATE_NORMAL:
2967 cfg->state = STATE_RESET;
2968 drain_ioctls(cfg);
2969 cxlflash_mark_contexts_error(cfg);
2970 rc = afu_reset(cfg);
2971 if (rc)
2972 cfg->state = STATE_FAILTERM;
2973 else
2974 cfg->state = STATE_NORMAL;
2975 wake_up_all(&cfg->reset_waitq);
2976 break;
2977 case STATE_RESET:
2978 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2979 if (cfg->state == STATE_NORMAL)
2980 goto retry;
2981 default:
2982 /* Ideally should not happen */
2983 dev_err(dev, "%s: Device is not ready, state=%d\n",
2984 __func__, cfg->state);
2985 break;
2986 }
2987
2988 return count;
2989 }
2990
2991 static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
2992
2993 /**
2994 * hwq_mode_show() - presents the HWQ steering mode for the host
2995 * @dev: Generic device associated with the host.
2996 * @attr: Device attribute representing the HWQ steering mode.
2997 * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
2998 * as a character string.
2999 *
3000 * Return: The size of the ASCII string returned in @buf.
3001 */
3002 static ssize_t hwq_mode_show(struct device *dev,
3003 struct device_attribute *attr, char *buf)
3004 {
3005 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3006 struct afu *afu = cfg->afu;
3007
3008 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3009 }
3010
3011 /**
3012 * hwq_mode_store() - sets the HWQ steering mode for the host
3013 * @dev: Generic device associated with the host.
3014 * @attr: Device attribute representing the HWQ steering mode.
3015 * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
3016 * as a character string.
3017 * @count: Length of data resizing in @buf.
3018 *
3019 * rr = Round-Robin
3020 * tag = Block MQ Tagging
3021 * cpu = CPU Affinity
3022 *
3023 * Return: The size of the ASCII string returned in @buf.
3024 */
3025 static ssize_t hwq_mode_store(struct device *dev,
3026 struct device_attribute *attr,
3027 const char *buf, size_t count)
3028 {
3029 struct Scsi_Host *shost = class_to_shost(dev);
3030 struct cxlflash_cfg *cfg = shost_priv(shost);
3031 struct device *cfgdev = &cfg->dev->dev;
3032 struct afu *afu = cfg->afu;
3033 int i;
3034 u32 mode = MAX_HWQ_MODE;
3035
3036 for (i = 0; i < MAX_HWQ_MODE; i++) {
3037 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3038 mode = i;
3039 break;
3040 }
3041 }
3042
3043 if (mode >= MAX_HWQ_MODE) {
3044 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3045 return -EINVAL;
3046 }
3047
3048 if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
3049 dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
3050 "HWQ steering mode.\n");
3051 return -EINVAL;
3052 }
3053
3054 afu->hwq_mode = mode;
3055
3056 return count;
3057 }
3058
3059 /**
3060 * mode_show() - presents the current mode of the device
3061 * @dev: Generic device associated with the device.
3062 * @attr: Device attribute representing the device mode.
3063 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3064 *
3065 * Return: The size of the ASCII string returned in @buf.
3066 */
3067 static ssize_t mode_show(struct device *dev,
3068 struct device_attribute *attr, char *buf)
3069 {
3070 struct scsi_device *sdev = to_scsi_device(dev);
3071
3072 return scnprintf(buf, PAGE_SIZE, "%s\n",
3073 sdev->hostdata ? "superpipe" : "legacy");
3074 }
3075
3076 /*
3077 * Host attributes
3078 */
3079 static DEVICE_ATTR_RO(port0);
3080 static DEVICE_ATTR_RO(port1);
3081 static DEVICE_ATTR_RO(port2);
3082 static DEVICE_ATTR_RO(port3);
3083 static DEVICE_ATTR_RW(lun_mode);
3084 static DEVICE_ATTR_RO(ioctl_version);
3085 static DEVICE_ATTR_RO(port0_lun_table);
3086 static DEVICE_ATTR_RO(port1_lun_table);
3087 static DEVICE_ATTR_RO(port2_lun_table);
3088 static DEVICE_ATTR_RO(port3_lun_table);
3089 static DEVICE_ATTR_RW(irqpoll_weight);
3090 static DEVICE_ATTR_RW(num_hwqs);
3091 static DEVICE_ATTR_RW(hwq_mode);
3092
3093 static struct device_attribute *cxlflash_host_attrs[] = {
3094 &dev_attr_port0,
3095 &dev_attr_port1,
3096 &dev_attr_port2,
3097 &dev_attr_port3,
3098 &dev_attr_lun_mode,
3099 &dev_attr_ioctl_version,
3100 &dev_attr_port0_lun_table,
3101 &dev_attr_port1_lun_table,
3102 &dev_attr_port2_lun_table,
3103 &dev_attr_port3_lun_table,
3104 &dev_attr_irqpoll_weight,
3105 &dev_attr_num_hwqs,
3106 &dev_attr_hwq_mode,
3107 NULL
3108 };
3109
3110 /*
3111 * Device attributes
3112 */
3113 static DEVICE_ATTR_RO(mode);
3114
3115 static struct device_attribute *cxlflash_dev_attrs[] = {
3116 &dev_attr_mode,
3117 NULL
3118 };
3119
3120 /*
3121 * Host template
3122 */
3123 static struct scsi_host_template driver_template = {
3124 .module = THIS_MODULE,
3125 .name = CXLFLASH_ADAPTER_NAME,
3126 .info = cxlflash_driver_info,
3127 .ioctl = cxlflash_ioctl,
3128 .proc_name = CXLFLASH_NAME,
3129 .queuecommand = cxlflash_queuecommand,
3130 .eh_abort_handler = cxlflash_eh_abort_handler,
3131 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3132 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3133 .change_queue_depth = cxlflash_change_queue_depth,
3134 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3135 .can_queue = CXLFLASH_MAX_CMDS,
3136 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3137 .this_id = -1,
3138 .sg_tablesize = 1, /* No scatter gather support */
3139 .max_sectors = CXLFLASH_MAX_SECTORS,
3140 .use_clustering = ENABLE_CLUSTERING,
3141 .shost_attrs = cxlflash_host_attrs,
3142 .sdev_attrs = cxlflash_dev_attrs,
3143 };
3144
3145 /*
3146 * Device dependent values
3147 */
3148 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3149 0ULL };
3150 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3151 CXLFLASH_NOTIFY_SHUTDOWN };
3152 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3153 CXLFLASH_NOTIFY_SHUTDOWN };
3154
3155 /*
3156 * PCI device binding table
3157 */
3158 static struct pci_device_id cxlflash_pci_table[] = {
3159 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3161 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3163 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3165 {}
3166 };
3167
3168 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3169
3170 /**
3171 * cxlflash_worker_thread() - work thread handler for the AFU
3172 * @work: Work structure contained within cxlflash associated with host.
3173 *
3174 * Handles the following events:
3175 * - Link reset which cannot be performed on interrupt context due to
3176 * blocking up to a few seconds
3177 * - Rescan the host
3178 */
3179 static void cxlflash_worker_thread(struct work_struct *work)
3180 {
3181 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3182 work_q);
3183 struct afu *afu = cfg->afu;
3184 struct device *dev = &cfg->dev->dev;
3185 __be64 __iomem *fc_port_regs;
3186 int port;
3187 ulong lock_flags;
3188
3189 /* Avoid MMIO if the device has failed */
3190
3191 if (cfg->state != STATE_NORMAL)
3192 return;
3193
3194 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3195
3196 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3197 port = cfg->lr_port;
3198 if (port < 0)
3199 dev_err(dev, "%s: invalid port index %d\n",
3200 __func__, port);
3201 else {
3202 spin_unlock_irqrestore(cfg->host->host_lock,
3203 lock_flags);
3204
3205 /* The reset can block... */
3206 fc_port_regs = get_fc_port_regs(cfg, port);
3207 afu_link_reset(afu, port, fc_port_regs);
3208 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3209 }
3210
3211 cfg->lr_state = LINK_RESET_COMPLETE;
3212 }
3213
3214 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3215
3216 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3217 scsi_scan_host(cfg->host);
3218 }
3219
3220 /**
3221 * cxlflash_chr_open() - character device open handler
3222 * @inode: Device inode associated with this character device.
3223 * @file: File pointer for this device.
3224 *
3225 * Only users with admin privileges are allowed to open the character device.
3226 *
3227 * Return: 0 on success, -errno on failure
3228 */
3229 static int cxlflash_chr_open(struct inode *inode, struct file *file)
3230 {
3231 struct cxlflash_cfg *cfg;
3232
3233 if (!capable(CAP_SYS_ADMIN))
3234 return -EACCES;
3235
3236 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3237 file->private_data = cfg;
3238
3239 return 0;
3240 }
3241
3242 /**
3243 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3244 * @cmd: The host ioctl command to decode.
3245 *
3246 * Return: A string identifying the decoded host ioctl.
3247 */
3248 static char *decode_hioctl(int cmd)
3249 {
3250 switch (cmd) {
3251 case HT_CXLFLASH_LUN_PROVISION:
3252 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3253 }
3254
3255 return "UNKNOWN";
3256 }
3257
3258 /**
3259 * cxlflash_lun_provision() - host LUN provisioning handler
3260 * @cfg: Internal structure associated with the host.
3261 * @arg: Kernel copy of userspace ioctl data structure.
3262 *
3263 * Return: 0 on success, -errno on failure
3264 */
3265 static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3266 struct ht_cxlflash_lun_provision *lunprov)
3267 {
3268 struct afu *afu = cfg->afu;
3269 struct device *dev = &cfg->dev->dev;
3270 struct sisl_ioarcb rcb;
3271 struct sisl_ioasa asa;
3272 __be64 __iomem *fc_port_regs;
3273 u16 port = lunprov->port;
3274 u16 scmd = lunprov->hdr.subcmd;
3275 u16 type;
3276 u64 reg;
3277 u64 size;
3278 u64 lun_id;
3279 int rc = 0;
3280
3281 if (!afu_is_lun_provision(afu)) {
3282 rc = -ENOTSUPP;
3283 goto out;
3284 }
3285
3286 if (port >= cfg->num_fc_ports) {
3287 rc = -EINVAL;
3288 goto out;
3289 }
3290
3291 switch (scmd) {
3292 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3293 type = SISL_AFU_LUN_PROVISION_CREATE;
3294 size = lunprov->size;
3295 lun_id = 0;
3296 break;
3297 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3298 type = SISL_AFU_LUN_PROVISION_DELETE;
3299 size = 0;
3300 lun_id = lunprov->lun_id;
3301 break;
3302 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3303 fc_port_regs = get_fc_port_regs(cfg, port);
3304
3305 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3306 lunprov->max_num_luns = reg;
3307 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3308 lunprov->cur_num_luns = reg;
3309 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3310 lunprov->max_cap_port = reg;
3311 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3312 lunprov->cur_cap_port = reg;
3313
3314 goto out;
3315 default:
3316 rc = -EINVAL;
3317 goto out;
3318 }
3319
3320 memset(&rcb, 0, sizeof(rcb));
3321 memset(&asa, 0, sizeof(asa));
3322 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3323 rcb.lun_id = lun_id;
3324 rcb.msi = SISL_MSI_RRQ_UPDATED;
3325 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3326 rcb.ioasa = &asa;
3327
3328 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3329 rcb.cdb[1] = type;
3330 rcb.cdb[2] = port;
3331 put_unaligned_be64(size, &rcb.cdb[8]);
3332
3333 rc = send_afu_cmd(afu, &rcb);
3334 if (rc) {
3335 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3336 __func__, rc, asa.ioasc, asa.afu_extra);
3337 goto out;
3338 }
3339
3340 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3341 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3342 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3343 }
3344 out:
3345 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3346 return rc;
3347 }
3348
3349 /**
3350 * cxlflash_afu_debug() - host AFU debug handler
3351 * @cfg: Internal structure associated with the host.
3352 * @arg: Kernel copy of userspace ioctl data structure.
3353 *
3354 * For debug requests requiring a data buffer, always provide an aligned
3355 * (cache line) buffer to the AFU to appease any alignment requirements.
3356 *
3357 * Return: 0 on success, -errno on failure
3358 */
3359 static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3360 struct ht_cxlflash_afu_debug *afu_dbg)
3361 {
3362 struct afu *afu = cfg->afu;
3363 struct device *dev = &cfg->dev->dev;
3364 struct sisl_ioarcb rcb;
3365 struct sisl_ioasa asa;
3366 char *buf = NULL;
3367 char *kbuf = NULL;
3368 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3369 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3370 u32 ulen = afu_dbg->data_len;
3371 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3372 int rc = 0;
3373
3374 if (!afu_is_afu_debug(afu)) {
3375 rc = -ENOTSUPP;
3376 goto out;
3377 }
3378
3379 if (ulen) {
3380 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3381
3382 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3383 rc = -EINVAL;
3384 goto out;
3385 }
3386
3387 if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE,
3388 ubuf, ulen))) {
3389 rc = -EFAULT;
3390 goto out;
3391 }
3392
3393 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3394 if (unlikely(!buf)) {
3395 rc = -ENOMEM;
3396 goto out;
3397 }
3398
3399 kbuf = PTR_ALIGN(buf, cache_line_size());
3400
3401 if (is_write) {
3402 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3403
3404 if (copy_from_user(kbuf, ubuf, ulen)) {
3405 rc = -EFAULT;
3406 goto out;
3407 }
3408 }
3409 }
3410
3411 memset(&rcb, 0, sizeof(rcb));
3412 memset(&asa, 0, sizeof(asa));
3413
3414 rcb.req_flags = req_flags;
3415 rcb.msi = SISL_MSI_RRQ_UPDATED;
3416 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3417 rcb.ioasa = &asa;
3418
3419 if (ulen) {
3420 rcb.data_len = ulen;
3421 rcb.data_ea = (uintptr_t)kbuf;
3422 }
3423
3424 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3425 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3426 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3427
3428 rc = send_afu_cmd(afu, &rcb);
3429 if (rc) {
3430 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3431 __func__, rc, asa.ioasc, asa.afu_extra);
3432 goto out;
3433 }
3434
3435 if (ulen && !is_write) {
3436 if (copy_to_user(ubuf, kbuf, ulen))
3437 rc = -EFAULT;
3438 }
3439 out:
3440 kfree(buf);
3441 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3442 return rc;
3443 }
3444
3445 /**
3446 * cxlflash_chr_ioctl() - character device IOCTL handler
3447 * @file: File pointer for this device.
3448 * @cmd: IOCTL command.
3449 * @arg: Userspace ioctl data structure.
3450 *
3451 * A read/write semaphore is used to implement a 'drain' of currently
3452 * running ioctls. The read semaphore is taken at the beginning of each
3453 * ioctl thread and released upon concluding execution. Additionally the
3454 * semaphore should be released and then reacquired in any ioctl execution
3455 * path which will wait for an event to occur that is outside the scope of
3456 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3457 * a thread simply needs to acquire the write semaphore.
3458 *
3459 * Return: 0 on success, -errno on failure
3460 */
3461 static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3462 unsigned long arg)
3463 {
3464 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3465
3466 struct cxlflash_cfg *cfg = file->private_data;
3467 struct device *dev = &cfg->dev->dev;
3468 char buf[sizeof(union cxlflash_ht_ioctls)];
3469 void __user *uarg = (void __user *)arg;
3470 struct ht_cxlflash_hdr *hdr;
3471 size_t size = 0;
3472 bool known_ioctl = false;
3473 int idx = 0;
3474 int rc = 0;
3475 hioctl do_ioctl = NULL;
3476
3477 static const struct {
3478 size_t size;
3479 hioctl ioctl;
3480 } ioctl_tbl[] = { /* NOTE: order matters here */
3481 { sizeof(struct ht_cxlflash_lun_provision),
3482 (hioctl)cxlflash_lun_provision },
3483 { sizeof(struct ht_cxlflash_afu_debug),
3484 (hioctl)cxlflash_afu_debug },
3485 };
3486
3487 /* Hold read semaphore so we can drain if needed */
3488 down_read(&cfg->ioctl_rwsem);
3489
3490 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3491 __func__, cmd, idx, sizeof(ioctl_tbl));
3492
3493 switch (cmd) {
3494 case HT_CXLFLASH_LUN_PROVISION:
3495 case HT_CXLFLASH_AFU_DEBUG:
3496 known_ioctl = true;
3497 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3498 size = ioctl_tbl[idx].size;
3499 do_ioctl = ioctl_tbl[idx].ioctl;
3500
3501 if (likely(do_ioctl))
3502 break;
3503
3504 /* fall through */
3505 default:
3506 rc = -EINVAL;
3507 goto out;
3508 }
3509
3510 if (unlikely(copy_from_user(&buf, uarg, size))) {
3511 dev_err(dev, "%s: copy_from_user() fail "
3512 "size=%lu cmd=%d (%s) uarg=%p\n",
3513 __func__, size, cmd, decode_hioctl(cmd), uarg);
3514 rc = -EFAULT;
3515 goto out;
3516 }
3517
3518 hdr = (struct ht_cxlflash_hdr *)&buf;
3519 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3520 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3521 __func__, hdr->version, decode_hioctl(cmd));
3522 rc = -EINVAL;
3523 goto out;
3524 }
3525
3526 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3527 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3528 rc = -EINVAL;
3529 goto out;
3530 }
3531
3532 rc = do_ioctl(cfg, (void *)&buf);
3533 if (likely(!rc))
3534 if (unlikely(copy_to_user(uarg, &buf, size))) {
3535 dev_err(dev, "%s: copy_to_user() fail "
3536 "size=%lu cmd=%d (%s) uarg=%p\n",
3537 __func__, size, cmd, decode_hioctl(cmd), uarg);
3538 rc = -EFAULT;
3539 }
3540
3541 /* fall through to exit */
3542
3543 out:
3544 up_read(&cfg->ioctl_rwsem);
3545 if (unlikely(rc && known_ioctl))
3546 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3547 __func__, decode_hioctl(cmd), cmd, rc);
3548 else
3549 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3550 __func__, decode_hioctl(cmd), cmd, rc);
3551 return rc;
3552 }
3553
3554 /*
3555 * Character device file operations
3556 */
3557 static const struct file_operations cxlflash_chr_fops = {
3558 .owner = THIS_MODULE,
3559 .open = cxlflash_chr_open,
3560 .unlocked_ioctl = cxlflash_chr_ioctl,
3561 .compat_ioctl = cxlflash_chr_ioctl,
3562 };
3563
3564 /**
3565 * init_chrdev() - initialize the character device for the host
3566 * @cfg: Internal structure associated with the host.
3567 *
3568 * Return: 0 on success, -errno on failure
3569 */
3570 static int init_chrdev(struct cxlflash_cfg *cfg)
3571 {
3572 struct device *dev = &cfg->dev->dev;
3573 struct device *char_dev;
3574 dev_t devno;
3575 int minor;
3576 int rc = 0;
3577
3578 minor = cxlflash_get_minor();
3579 if (unlikely(minor < 0)) {
3580 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3581 rc = -ENOSPC;
3582 goto out;
3583 }
3584
3585 devno = MKDEV(cxlflash_major, minor);
3586 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3587
3588 rc = cdev_add(&cfg->cdev, devno, 1);
3589 if (rc) {
3590 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3591 goto err1;
3592 }
3593
3594 char_dev = device_create(cxlflash_class, NULL, devno,
3595 NULL, "cxlflash%d", minor);
3596 if (IS_ERR(char_dev)) {
3597 rc = PTR_ERR(char_dev);
3598 dev_err(dev, "%s: device_create failed rc=%d\n",
3599 __func__, rc);
3600 goto err2;
3601 }
3602
3603 cfg->chardev = char_dev;
3604 out:
3605 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3606 return rc;
3607 err2:
3608 cdev_del(&cfg->cdev);
3609 err1:
3610 cxlflash_put_minor(minor);
3611 goto out;
3612 }
3613
3614 /**
3615 * cxlflash_probe() - PCI entry point to add host
3616 * @pdev: PCI device associated with the host.
3617 * @dev_id: PCI device id associated with device.
3618 *
3619 * The device will initially start out in a 'probing' state and
3620 * transition to the 'normal' state at the end of a successful
3621 * probe. Should an EEH event occur during probe, the notification
3622 * thread (error_detected()) will wait until the probe handler
3623 * is nearly complete. At that time, the device will be moved to
3624 * a 'probed' state and the EEH thread woken up to drive the slot
3625 * reset and recovery (device moves to 'normal' state). Meanwhile,
3626 * the probe will be allowed to exit successfully.
3627 *
3628 * Return: 0 on success, -errno on failure
3629 */
3630 static int cxlflash_probe(struct pci_dev *pdev,
3631 const struct pci_device_id *dev_id)
3632 {
3633 struct Scsi_Host *host;
3634 struct cxlflash_cfg *cfg = NULL;
3635 struct device *dev = &pdev->dev;
3636 struct dev_dependent_vals *ddv;
3637 int rc = 0;
3638 int k;
3639
3640 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3641 __func__, pdev->irq);
3642
3643 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3644 driver_template.max_sectors = ddv->max_sectors;
3645
3646 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3647 if (!host) {
3648 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3649 rc = -ENOMEM;
3650 goto out;
3651 }
3652
3653 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3654 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3655 host->unique_id = host->host_no;
3656 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3657
3658 cfg = shost_priv(host);
3659 cfg->host = host;
3660 rc = alloc_mem(cfg);
3661 if (rc) {
3662 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3663 rc = -ENOMEM;
3664 scsi_host_put(cfg->host);
3665 goto out;
3666 }
3667
3668 cfg->init_state = INIT_STATE_NONE;
3669 cfg->dev = pdev;
3670 cfg->cxl_fops = cxlflash_cxl_fops;
3671
3672 /*
3673 * Promoted LUNs move to the top of the LUN table. The rest stay on
3674 * the bottom half. The bottom half grows from the end (index = 255),
3675 * whereas the top half grows from the beginning (index = 0).
3676 *
3677 * Initialize the last LUN index for all possible ports.
3678 */
3679 cfg->promote_lun_index = 0;
3680
3681 for (k = 0; k < MAX_FC_PORTS; k++)
3682 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3683
3684 cfg->dev_id = (struct pci_device_id *)dev_id;
3685
3686 init_waitqueue_head(&cfg->tmf_waitq);
3687 init_waitqueue_head(&cfg->reset_waitq);
3688
3689 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3690 cfg->lr_state = LINK_RESET_INVALID;
3691 cfg->lr_port = -1;
3692 spin_lock_init(&cfg->tmf_slock);
3693 mutex_init(&cfg->ctx_tbl_list_mutex);
3694 mutex_init(&cfg->ctx_recovery_mutex);
3695 init_rwsem(&cfg->ioctl_rwsem);
3696 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3697 INIT_LIST_HEAD(&cfg->lluns);
3698
3699 pci_set_drvdata(pdev, cfg);
3700
3701 cfg->cxl_afu = cxl_pci_to_afu(pdev);
3702
3703 rc = init_pci(cfg);
3704 if (rc) {
3705 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3706 goto out_remove;
3707 }
3708 cfg->init_state = INIT_STATE_PCI;
3709
3710 rc = init_afu(cfg);
3711 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3712 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3713 goto out_remove;
3714 }
3715 cfg->init_state = INIT_STATE_AFU;
3716
3717 rc = init_scsi(cfg);
3718 if (rc) {
3719 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3720 goto out_remove;
3721 }
3722 cfg->init_state = INIT_STATE_SCSI;
3723
3724 rc = init_chrdev(cfg);
3725 if (rc) {
3726 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3727 goto out_remove;
3728 }
3729 cfg->init_state = INIT_STATE_CDEV;
3730
3731 if (wq_has_sleeper(&cfg->reset_waitq)) {
3732 cfg->state = STATE_PROBED;
3733 wake_up_all(&cfg->reset_waitq);
3734 } else
3735 cfg->state = STATE_NORMAL;
3736 out:
3737 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3738 return rc;
3739
3740 out_remove:
3741 cxlflash_remove(pdev);
3742 goto out;
3743 }
3744
3745 /**
3746 * cxlflash_pci_error_detected() - called when a PCI error is detected
3747 * @pdev: PCI device struct.
3748 * @state: PCI channel state.
3749 *
3750 * When an EEH occurs during an active reset, wait until the reset is
3751 * complete and then take action based upon the device state.
3752 *
3753 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3754 */
3755 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3756 pci_channel_state_t state)
3757 {
3758 int rc = 0;
3759 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3760 struct device *dev = &cfg->dev->dev;
3761
3762 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3763
3764 switch (state) {
3765 case pci_channel_io_frozen:
3766 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3767 cfg->state != STATE_PROBING);
3768 if (cfg->state == STATE_FAILTERM)
3769 return PCI_ERS_RESULT_DISCONNECT;
3770
3771 cfg->state = STATE_RESET;
3772 scsi_block_requests(cfg->host);
3773 drain_ioctls(cfg);
3774 rc = cxlflash_mark_contexts_error(cfg);
3775 if (unlikely(rc))
3776 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3777 __func__, rc);
3778 term_afu(cfg);
3779 return PCI_ERS_RESULT_NEED_RESET;
3780 case pci_channel_io_perm_failure:
3781 cfg->state = STATE_FAILTERM;
3782 wake_up_all(&cfg->reset_waitq);
3783 scsi_unblock_requests(cfg->host);
3784 return PCI_ERS_RESULT_DISCONNECT;
3785 default:
3786 break;
3787 }
3788 return PCI_ERS_RESULT_NEED_RESET;
3789 }
3790
3791 /**
3792 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3793 * @pdev: PCI device struct.
3794 *
3795 * This routine is called by the pci error recovery code after the PCI
3796 * slot has been reset, just before we should resume normal operations.
3797 *
3798 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3799 */
3800 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3801 {
3802 int rc = 0;
3803 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3804 struct device *dev = &cfg->dev->dev;
3805
3806 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3807
3808 rc = init_afu(cfg);
3809 if (unlikely(rc)) {
3810 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3811 return PCI_ERS_RESULT_DISCONNECT;
3812 }
3813
3814 return PCI_ERS_RESULT_RECOVERED;
3815 }
3816
3817 /**
3818 * cxlflash_pci_resume() - called when normal operation can resume
3819 * @pdev: PCI device struct
3820 */
3821 static void cxlflash_pci_resume(struct pci_dev *pdev)
3822 {
3823 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3824 struct device *dev = &cfg->dev->dev;
3825
3826 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3827
3828 cfg->state = STATE_NORMAL;
3829 wake_up_all(&cfg->reset_waitq);
3830 scsi_unblock_requests(cfg->host);
3831 }
3832
3833 /**
3834 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3835 * @dev: Character device.
3836 * @mode: Mode that can be used to verify access.
3837 *
3838 * Return: Allocated string describing the devtmpfs structure.
3839 */
3840 static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3841 {
3842 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3843 }
3844
3845 /**
3846 * cxlflash_class_init() - create character device class
3847 *
3848 * Return: 0 on success, -errno on failure
3849 */
3850 static int cxlflash_class_init(void)
3851 {
3852 dev_t devno;
3853 int rc = 0;
3854
3855 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3856 if (unlikely(rc)) {
3857 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3858 goto out;
3859 }
3860
3861 cxlflash_major = MAJOR(devno);
3862
3863 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3864 if (IS_ERR(cxlflash_class)) {
3865 rc = PTR_ERR(cxlflash_class);
3866 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3867 goto err;
3868 }
3869
3870 cxlflash_class->devnode = cxlflash_devnode;
3871 out:
3872 pr_debug("%s: returning rc=%d\n", __func__, rc);
3873 return rc;
3874 err:
3875 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3876 goto out;
3877 }
3878
3879 /**
3880 * cxlflash_class_exit() - destroy character device class
3881 */
3882 static void cxlflash_class_exit(void)
3883 {
3884 dev_t devno = MKDEV(cxlflash_major, 0);
3885
3886 class_destroy(cxlflash_class);
3887 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3888 }
3889
3890 static const struct pci_error_handlers cxlflash_err_handler = {
3891 .error_detected = cxlflash_pci_error_detected,
3892 .slot_reset = cxlflash_pci_slot_reset,
3893 .resume = cxlflash_pci_resume,
3894 };
3895
3896 /*
3897 * PCI device structure
3898 */
3899 static struct pci_driver cxlflash_driver = {
3900 .name = CXLFLASH_NAME,
3901 .id_table = cxlflash_pci_table,
3902 .probe = cxlflash_probe,
3903 .remove = cxlflash_remove,
3904 .shutdown = cxlflash_remove,
3905 .err_handler = &cxlflash_err_handler,
3906 };
3907
3908 /**
3909 * init_cxlflash() - module entry point
3910 *
3911 * Return: 0 on success, -errno on failure
3912 */
3913 static int __init init_cxlflash(void)
3914 {
3915 int rc;
3916
3917 check_sizes();
3918 cxlflash_list_init();
3919 rc = cxlflash_class_init();
3920 if (unlikely(rc))
3921 goto out;
3922
3923 rc = pci_register_driver(&cxlflash_driver);
3924 if (unlikely(rc))
3925 goto err;
3926 out:
3927 pr_debug("%s: returning rc=%d\n", __func__, rc);
3928 return rc;
3929 err:
3930 cxlflash_class_exit();
3931 goto out;
3932 }
3933
3934 /**
3935 * exit_cxlflash() - module exit point
3936 */
3937 static void __exit exit_cxlflash(void)
3938 {
3939 cxlflash_term_global_luns();
3940 cxlflash_free_errpage();
3941
3942 pci_unregister_driver(&cxlflash_driver);
3943 cxlflash_class_exit();
3944 }
3945
3946 module_init(init_cxlflash);
3947 module_exit(exit_cxlflash);