]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/cxlflash/main.c
scsi: cxlflash: Remove private command pool
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / cxlflash / main.c
CommitLineData
c21e0bbf
MO
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19
20#include <asm/unaligned.h>
21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h>
65be2c79 26#include <uapi/scsi/cxlflash_ioctl.h>
c21e0bbf
MO
27
28#include "main.h"
29#include "sislite.h"
30#include "common.h"
31
32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL");
36
c21e0bbf
MO
37/**
38 * process_cmd_err() - command error handler
39 * @cmd: AFU command that experienced the error.
40 * @scp: SCSI command associated with the AFU command in error.
41 *
42 * Translates error bits from AFU command to SCSI command results.
43 */
44static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
45{
46 struct sisl_ioarcb *ioarcb;
47 struct sisl_ioasa *ioasa;
8396012f 48 u32 resid;
c21e0bbf
MO
49
50 if (unlikely(!cmd))
51 return;
52
53 ioarcb = &(cmd->rcb);
54 ioasa = &(cmd->sa);
55
56 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
8396012f
MO
57 resid = ioasa->resid;
58 scsi_set_resid(scp, resid);
59 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
60 __func__, cmd, scp, resid);
c21e0bbf
MO
61 }
62
63 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
64 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
65 __func__, cmd, scp);
66 scp->result = (DID_ERROR << 16);
67 }
68
69 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
4392ba49 70 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
c21e0bbf
MO
71 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
72 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
73 ioasa->fc_extra);
74
75 if (ioasa->rc.scsi_rc) {
76 /* We have a SCSI status */
77 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
78 memcpy(scp->sense_buffer, ioasa->sense_data,
79 SISL_SENSE_DATA_LEN);
80 scp->result = ioasa->rc.scsi_rc;
81 } else
82 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
83 }
84
85 /*
86 * We encountered an error. Set scp->result based on nature
87 * of error.
88 */
89 if (ioasa->rc.fc_rc) {
90 /* We have an FC status */
91 switch (ioasa->rc.fc_rc) {
92 case SISL_FC_RC_LINKDOWN:
93 scp->result = (DID_REQUEUE << 16);
94 break;
95 case SISL_FC_RC_RESID:
96 /* This indicates an FCP resid underrun */
97 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
98 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
99 * then we will handle this error else where.
100 * If not then we must handle it here.
8396012f 101 * This is probably an AFU bug.
c21e0bbf
MO
102 */
103 scp->result = (DID_ERROR << 16);
104 }
105 break;
106 case SISL_FC_RC_RESIDERR:
107 /* Resid mismatch between adapter and device */
108 case SISL_FC_RC_TGTABORT:
109 case SISL_FC_RC_ABORTOK:
110 case SISL_FC_RC_ABORTFAIL:
111 case SISL_FC_RC_NOLOGI:
112 case SISL_FC_RC_ABORTPEND:
113 case SISL_FC_RC_WRABORTPEND:
114 case SISL_FC_RC_NOEXP:
115 case SISL_FC_RC_INUSE:
116 scp->result = (DID_ERROR << 16);
117 break;
118 }
119 }
120
121 if (ioasa->rc.afu_rc) {
122 /* We have an AFU error */
123 switch (ioasa->rc.afu_rc) {
124 case SISL_AFU_RC_NO_CHANNELS:
8396012f 125 scp->result = (DID_NO_CONNECT << 16);
c21e0bbf
MO
126 break;
127 case SISL_AFU_RC_DATA_DMA_ERR:
128 switch (ioasa->afu_extra) {
129 case SISL_AFU_DMA_ERR_PAGE_IN:
130 /* Retry */
131 scp->result = (DID_IMM_RETRY << 16);
132 break;
133 case SISL_AFU_DMA_ERR_INVALID_EA:
134 default:
135 scp->result = (DID_ERROR << 16);
136 }
137 break;
138 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
139 /* Retry */
140 scp->result = (DID_ALLOC_FAILURE << 16);
141 break;
142 default:
143 scp->result = (DID_ERROR << 16);
144 }
145 }
146}
147
148/**
149 * cmd_complete() - command completion handler
150 * @cmd: AFU command that has completed.
151 *
152 * Prepares and submits command that has either completed or timed out to
153 * the SCSI stack. Checks AFU command back into command pool for non-internal
154 * (rcb.scp populated) commands.
155 */
156static void cmd_complete(struct afu_cmd *cmd)
157{
158 struct scsi_cmnd *scp;
c21e0bbf
MO
159 ulong lock_flags;
160 struct afu *afu = cmd->parent;
161 struct cxlflash_cfg *cfg = afu->parent;
162 bool cmd_is_tmf;
163
164 spin_lock_irqsave(&cmd->slock, lock_flags);
165 cmd->sa.host_use_b[0] |= B_DONE;
166 spin_unlock_irqrestore(&cmd->slock, lock_flags);
167
168 if (cmd->rcb.scp) {
169 scp = cmd->rcb.scp;
8396012f 170 if (unlikely(cmd->sa.ioasc))
c21e0bbf
MO
171 process_cmd_err(cmd, scp);
172 else
173 scp->result = (DID_OK << 16);
174
c21e0bbf 175 cmd_is_tmf = cmd->cmd_tmf;
c21e0bbf 176
4392ba49
MO
177 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
178 "ioasc=%d\n", __func__, scp, scp->result,
179 cmd->sa.ioasc);
c21e0bbf 180
c21e0bbf
MO
181 scsi_dma_unmap(scp);
182 scp->scsi_done(scp);
183
184 if (cmd_is_tmf) {
018d1dc9 185 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
c21e0bbf
MO
186 cfg->tmf_active = false;
187 wake_up_all_locked(&cfg->tmf_waitq);
018d1dc9 188 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf
MO
189 }
190 } else
191 complete(&cmd->cevent);
192}
193
15305514
MO
194/**
195 * context_reset() - timeout handler for AFU commands
196 * @cmd: AFU command that timed out.
197 *
198 * Sends a reset to the AFU.
199 */
200static void context_reset(struct afu_cmd *cmd)
201{
202 int nretry = 0;
203 u64 rrin = 0x1;
15305514 204 struct afu *afu = cmd->parent;
3d2f617d
UK
205 struct cxlflash_cfg *cfg = afu->parent;
206 struct device *dev = &cfg->dev->dev;
15305514
MO
207 ulong lock_flags;
208
209 pr_debug("%s: cmd=%p\n", __func__, cmd);
210
211 spin_lock_irqsave(&cmd->slock, lock_flags);
212
213 /* Already completed? */
214 if (cmd->sa.host_use_b[0] & B_DONE) {
215 spin_unlock_irqrestore(&cmd->slock, lock_flags);
216 return;
217 }
218
219 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
220 spin_unlock_irqrestore(&cmd->slock, lock_flags);
221
15305514
MO
222 writeq_be(rrin, &afu->host_map->ioarrin);
223 do {
224 rrin = readq_be(&afu->host_map->ioarrin);
225 if (rrin != 0x1)
226 break;
227 /* Double delay each time */
ea765431 228 udelay(1 << nretry);
15305514 229 } while (nretry++ < MC_ROOM_RETRY_CNT);
3d2f617d
UK
230
231 dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
232 __func__, rrin, nretry);
15305514
MO
233}
234
235/**
236 * send_cmd() - sends an AFU command
237 * @afu: AFU associated with the host.
238 * @cmd: AFU command to send.
239 *
240 * Return:
1284fb0c 241 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
15305514
MO
242 */
243static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
244{
245 struct cxlflash_cfg *cfg = afu->parent;
246 struct device *dev = &cfg->dev->dev;
15305514 247 int rc = 0;
11f7b184
UK
248 s64 room;
249 ulong lock_flags;
15305514
MO
250
251 /*
11f7b184
UK
252 * To avoid the performance penalty of MMIO, spread the update of
253 * 'room' over multiple commands.
15305514 254 */
11f7b184
UK
255 spin_lock_irqsave(&afu->rrin_slock, lock_flags);
256 if (--afu->room < 0) {
257 room = readq_be(&afu->host_map->cmd_room);
258 if (room <= 0) {
259 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
260 "0x%02X, room=0x%016llX\n",
261 __func__, cmd->rcb.cdb[0], room);
262 afu->room = 0;
263 rc = SCSI_MLQUEUE_HOST_BUSY;
264 goto out;
15305514 265 }
11f7b184 266 afu->room = room - 1;
15305514
MO
267 }
268
15305514
MO
269 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
270out:
11f7b184 271 spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
15305514
MO
272 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
273 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
274 return rc;
15305514
MO
275}
276
277/**
278 * wait_resp() - polls for a response or timeout to a sent AFU command
279 * @afu: AFU associated with the host.
280 * @cmd: AFU command that was sent.
281 */
282static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
283{
284 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
285
286 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
287 if (!timeout)
288 context_reset(cmd);
289
290 if (unlikely(cmd->sa.ioasc != 0))
291 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
292 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
293 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
294 cmd->sa.rc.fc_rc);
295}
296
c21e0bbf
MO
297/**
298 * send_tmf() - sends a Task Management Function (TMF)
299 * @afu: AFU to checkout from.
300 * @scp: SCSI command from stack.
301 * @tmfcmd: TMF command to send.
302 *
303 * Return:
1284fb0c 304 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
c21e0bbf
MO
305 */
306static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
307{
5fbb96c8 308 struct afu_cmd *cmd = sc_to_afucz(scp);
c21e0bbf
MO
309
310 u32 port_sel = scp->device->channel + 1;
311 short lflag = 0;
312 struct Scsi_Host *host = scp->device->host;
313 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
4392ba49 314 struct device *dev = &cfg->dev->dev;
c21e0bbf
MO
315 ulong lock_flags;
316 int rc = 0;
018d1dc9 317 ulong to;
c21e0bbf 318
018d1dc9
MO
319 /* When Task Management Function is active do not send another */
320 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
c21e0bbf 321 if (cfg->tmf_active)
018d1dc9
MO
322 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
323 !cfg->tmf_active,
324 cfg->tmf_slock);
c21e0bbf
MO
325 cfg->tmf_active = true;
326 cmd->cmd_tmf = true;
018d1dc9 327 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf
MO
328
329 cmd->rcb.ctx_id = afu->ctx_hndl;
5fbb96c8 330 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
c21e0bbf
MO
331 cmd->rcb.port_sel = port_sel;
332 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
333
334 lflag = SISL_REQ_FLAGS_TMF_CMD;
335
336 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
337 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
338
5fbb96c8 339 /* Stash the scp in the command, for reuse during interrupt */
c21e0bbf 340 cmd->rcb.scp = scp;
5fbb96c8
MO
341 cmd->parent = afu;
342 spin_lock_init(&cmd->slock);
c21e0bbf
MO
343
344 /* Copy the CDB from the cmd passed in */
345 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
346
347 /* Send the command */
15305514 348 rc = send_cmd(afu, cmd);
c21e0bbf 349 if (unlikely(rc)) {
018d1dc9 350 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
c21e0bbf 351 cfg->tmf_active = false;
018d1dc9 352 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf
MO
353 goto out;
354 }
355
018d1dc9
MO
356 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
357 to = msecs_to_jiffies(5000);
358 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
359 !cfg->tmf_active,
360 cfg->tmf_slock,
361 to);
362 if (!to) {
363 cfg->tmf_active = false;
364 dev_err(dev, "%s: TMF timed out!\n", __func__);
365 rc = -1;
366 }
367 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf
MO
368out:
369 return rc;
370}
371
b45cdbaf
MK
372static void afu_unmap(struct kref *ref)
373{
374 struct afu *afu = container_of(ref, struct afu, mapcount);
375
376 if (likely(afu->afu_map)) {
377 cxl_psa_unmap((void __iomem *)afu->afu_map);
378 afu->afu_map = NULL;
379 }
380}
381
c21e0bbf
MO
382/**
383 * cxlflash_driver_info() - information handler for this host driver
384 * @host: SCSI host associated with device.
385 *
386 * Return: A string describing the device.
387 */
388static const char *cxlflash_driver_info(struct Scsi_Host *host)
389{
390 return CXLFLASH_ADAPTER_NAME;
391}
392
393/**
394 * cxlflash_queuecommand() - sends a mid-layer request
395 * @host: SCSI host associated with device.
396 * @scp: SCSI command to send.
397 *
1284fb0c 398 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
c21e0bbf
MO
399 */
400static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
401{
402 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
403 struct afu *afu = cfg->afu;
4392ba49 404 struct device *dev = &cfg->dev->dev;
5fbb96c8 405 struct afu_cmd *cmd = sc_to_afucz(scp);
c21e0bbf
MO
406 u32 port_sel = scp->device->channel + 1;
407 int nseg, i, ncount;
408 struct scatterlist *sg;
409 ulong lock_flags;
410 short lflag = 0;
411 int rc = 0;
b45cdbaf 412 int kref_got = 0;
c21e0bbf 413
4392ba49
MO
414 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
415 "cdb=(%08X-%08X-%08X-%08X)\n",
416 __func__, scp, host->host_no, scp->device->channel,
417 scp->device->id, scp->device->lun,
418 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
419 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
420 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
421 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
c21e0bbf 422
018d1dc9
MO
423 /*
424 * If a Task Management Function is active, wait for it to complete
c21e0bbf
MO
425 * before continuing with regular commands.
426 */
018d1dc9 427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
c21e0bbf 428 if (cfg->tmf_active) {
018d1dc9 429 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf
MO
430 rc = SCSI_MLQUEUE_HOST_BUSY;
431 goto out;
432 }
018d1dc9 433 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf 434
5cdac81a 435 switch (cfg->state) {
439e85c1 436 case STATE_RESET:
4392ba49 437 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
5cdac81a
MO
438 rc = SCSI_MLQUEUE_HOST_BUSY;
439 goto out;
440 case STATE_FAILTERM:
4392ba49 441 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
5cdac81a
MO
442 scp->result = (DID_NO_CONNECT << 16);
443 scp->scsi_done(scp);
444 rc = 0;
445 goto out;
446 default:
447 break;
448 }
449
b45cdbaf
MK
450 kref_get(&cfg->afu->mapcount);
451 kref_got = 1;
452
c21e0bbf 453 cmd->rcb.ctx_id = afu->ctx_hndl;
5fbb96c8 454 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
c21e0bbf
MO
455 cmd->rcb.port_sel = port_sel;
456 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
457
458 if (scp->sc_data_direction == DMA_TO_DEVICE)
459 lflag = SISL_REQ_FLAGS_HOST_WRITE;
460 else
461 lflag = SISL_REQ_FLAGS_HOST_READ;
462
463 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
464 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
465
466 /* Stash the scp in the reserved field, for reuse during interrupt */
467 cmd->rcb.scp = scp;
5fbb96c8
MO
468 cmd->parent = afu;
469 spin_lock_init(&cmd->slock);
c21e0bbf
MO
470
471 nseg = scsi_dma_map(scp);
472 if (unlikely(nseg < 0)) {
4392ba49 473 dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
c21e0bbf
MO
474 __func__, nseg);
475 rc = SCSI_MLQUEUE_HOST_BUSY;
476 goto out;
477 }
478
479 ncount = scsi_sg_count(scp);
480 scsi_for_each_sg(scp, sg, ncount, i) {
481 cmd->rcb.data_len = sg_dma_len(sg);
482 cmd->rcb.data_ea = sg_dma_address(sg);
483 }
484
485 /* Copy the CDB from the scsi_cmnd passed in */
486 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
487
488 /* Send the command */
15305514 489 rc = send_cmd(afu, cmd);
5fbb96c8 490 if (unlikely(rc))
c21e0bbf 491 scsi_dma_unmap(scp);
c21e0bbf
MO
492
493out:
b45cdbaf
MK
494 if (kref_got)
495 kref_put(&afu->mapcount, afu_unmap);
4392ba49 496 pr_devel("%s: returning rc=%d\n", __func__, rc);
c21e0bbf
MO
497 return rc;
498}
499
500/**
15305514 501 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
1284fb0c 502 * @cfg: Internal structure associated with the host.
c21e0bbf 503 */
15305514 504static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
c21e0bbf 505{
15305514 506 struct pci_dev *pdev = cfg->dev;
c21e0bbf 507
15305514
MO
508 if (pci_channel_offline(pdev))
509 wait_event_timeout(cfg->reset_waitq,
510 !pci_channel_offline(pdev),
511 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
c21e0bbf
MO
512}
513
514/**
15305514 515 * free_mem() - free memory associated with the AFU
1284fb0c 516 * @cfg: Internal structure associated with the host.
c21e0bbf 517 */
15305514 518static void free_mem(struct cxlflash_cfg *cfg)
c21e0bbf 519{
15305514 520 struct afu *afu = cfg->afu;
c21e0bbf 521
15305514 522 if (cfg->afu) {
15305514
MO
523 free_pages((ulong)afu, get_order(sizeof(struct afu)));
524 cfg->afu = NULL;
5cdac81a 525 }
c21e0bbf
MO
526}
527
528/**
15305514 529 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
1284fb0c 530 * @cfg: Internal structure associated with the host.
c21e0bbf 531 *
15305514 532 * Safe to call with AFU in a partially allocated/initialized state.
ee91e332
MK
533 *
534 * Cleans up all state associated with the command queue, and unmaps
535 * the MMIO space.
c21e0bbf 536 */
15305514 537static void stop_afu(struct cxlflash_cfg *cfg)
c21e0bbf 538{
15305514 539 struct afu *afu = cfg->afu;
c21e0bbf 540
15305514 541 if (likely(afu)) {
c21e0bbf 542 if (likely(afu->afu_map)) {
1786f4a0 543 cxl_psa_unmap((void __iomem *)afu->afu_map);
c21e0bbf
MO
544 afu->afu_map = NULL;
545 }
b45cdbaf 546 kref_put(&afu->mapcount, afu_unmap);
c21e0bbf
MO
547 }
548}
549
550/**
9526f360 551 * term_intr() - disables all AFU interrupts
1284fb0c 552 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
553 * @level: Depth of allocation, where to begin waterfall tear down.
554 *
555 * Safe to call with AFU/MC in partially allocated/initialized state.
556 */
9526f360 557static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
c21e0bbf 558{
c21e0bbf 559 struct afu *afu = cfg->afu;
4392ba49 560 struct device *dev = &cfg->dev->dev;
c21e0bbf
MO
561
562 if (!afu || !cfg->mcctx) {
9526f360 563 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
c21e0bbf
MO
564 return;
565 }
566
567 switch (level) {
c21e0bbf
MO
568 case UNMAP_THREE:
569 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
570 case UNMAP_TWO:
571 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
572 case UNMAP_ONE:
573 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
574 case FREE_IRQ:
575 cxl_free_afu_irqs(cfg->mcctx);
9526f360
MK
576 /* fall through */
577 case UNDO_NOOP:
578 /* No action required */
579 break;
580 }
581}
582
583/**
584 * term_mc() - terminates the master context
585 * @cfg: Internal structure associated with the host.
586 * @level: Depth of allocation, where to begin waterfall tear down.
587 *
588 * Safe to call with AFU/MC in partially allocated/initialized state.
589 */
590static void term_mc(struct cxlflash_cfg *cfg)
591{
592 int rc = 0;
593 struct afu *afu = cfg->afu;
594 struct device *dev = &cfg->dev->dev;
595
596 if (!afu || !cfg->mcctx) {
597 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
598 return;
c21e0bbf 599 }
9526f360
MK
600
601 rc = cxl_stop_context(cfg->mcctx);
602 WARN_ON(rc);
603 cfg->mcctx = NULL;
c21e0bbf
MO
604}
605
606/**
607 * term_afu() - terminates the AFU
1284fb0c 608 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
609 *
610 * Safe to call with AFU/MC in partially allocated/initialized state.
611 */
612static void term_afu(struct cxlflash_cfg *cfg)
613{
9526f360
MK
614 /*
615 * Tear down is carefully orchestrated to ensure
616 * no interrupts can come in when the problem state
617 * area is unmapped.
618 *
619 * 1) Disable all AFU interrupts
620 * 2) Unmap the problem state area
621 * 3) Stop the master context
622 */
623 term_intr(cfg, UNMAP_THREE);
c21e0bbf
MO
624 if (cfg->afu)
625 stop_afu(cfg);
626
9526f360 627 term_mc(cfg);
6ded8b3c 628
c21e0bbf
MO
629 pr_debug("%s: returning\n", __func__);
630}
631
704c4b0d
UK
632/**
633 * notify_shutdown() - notifies device of pending shutdown
634 * @cfg: Internal structure associated with the host.
635 * @wait: Whether to wait for shutdown processing to complete.
636 *
637 * This function will notify the AFU that the adapter is being shutdown
638 * and will wait for shutdown processing to complete if wait is true.
639 * This notification should flush pending I/Os to the device and halt
640 * further I/Os until the next AFU reset is issued and device restarted.
641 */
642static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
643{
644 struct afu *afu = cfg->afu;
645 struct device *dev = &cfg->dev->dev;
1bd2b282 646 struct sisl_global_map __iomem *global;
704c4b0d
UK
647 struct dev_dependent_vals *ddv;
648 u64 reg, status;
649 int i, retry_cnt = 0;
650
651 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
652 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
653 return;
654
1bd2b282
UK
655 if (!afu || !afu->afu_map) {
656 dev_dbg(dev, "%s: The problem state area is not mapped\n",
657 __func__);
658 return;
659 }
660
661 global = &afu->afu_map->global;
662
704c4b0d
UK
663 /* Notify AFU */
664 for (i = 0; i < NUM_FC_PORTS; i++) {
665 reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
666 reg |= SISL_FC_SHUTDOWN_NORMAL;
667 writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
668 }
669
670 if (!wait)
671 return;
672
673 /* Wait up to 1.5 seconds for shutdown processing to complete */
674 for (i = 0; i < NUM_FC_PORTS; i++) {
675 retry_cnt = 0;
676 while (true) {
677 status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
678 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
679 break;
680 if (++retry_cnt >= MC_RETRY_CNT) {
681 dev_dbg(dev, "%s: port %d shutdown processing "
682 "not yet completed\n", __func__, i);
683 break;
684 }
685 msleep(100 * retry_cnt);
686 }
687 }
688}
689
c21e0bbf
MO
690/**
691 * cxlflash_remove() - PCI entry point to tear down host
692 * @pdev: PCI device associated with the host.
693 *
694 * Safe to use as a cleanup in partially allocated/initialized state.
695 */
696static void cxlflash_remove(struct pci_dev *pdev)
697{
698 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
699 ulong lock_flags;
700
babf985d
UK
701 if (!pci_is_enabled(pdev)) {
702 pr_debug("%s: Device is disabled\n", __func__);
703 return;
704 }
705
c21e0bbf
MO
706 /* If a Task Management Function is active, wait for it to complete
707 * before continuing with remove.
708 */
018d1dc9 709 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
c21e0bbf 710 if (cfg->tmf_active)
018d1dc9
MO
711 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
712 !cfg->tmf_active,
713 cfg->tmf_slock);
714 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
c21e0bbf 715
704c4b0d
UK
716 /* Notify AFU and wait for shutdown processing to complete */
717 notify_shutdown(cfg, true);
718
5cdac81a 719 cfg->state = STATE_FAILTERM;
65be2c79 720 cxlflash_stop_term_user_contexts(cfg);
5cdac81a 721
c21e0bbf
MO
722 switch (cfg->init_state) {
723 case INIT_STATE_SCSI:
65be2c79 724 cxlflash_term_local_luns(cfg);
c21e0bbf 725 scsi_remove_host(cfg->host);
f15fbf8d 726 /* fall through */
c21e0bbf 727 case INIT_STATE_AFU:
d804621d 728 cancel_work_sync(&cfg->work_q);
b45cdbaf 729 term_afu(cfg);
c21e0bbf 730 case INIT_STATE_PCI:
c21e0bbf
MO
731 pci_disable_device(pdev);
732 case INIT_STATE_NONE:
c21e0bbf 733 free_mem(cfg);
8b5b1e87 734 scsi_host_put(cfg->host);
c21e0bbf
MO
735 break;
736 }
737
738 pr_debug("%s: returning\n", __func__);
739}
740
741/**
742 * alloc_mem() - allocates the AFU and its command pool
1284fb0c 743 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
744 *
745 * A partially allocated state remains on failure.
746 *
747 * Return:
748 * 0 on success
749 * -ENOMEM on failure to allocate memory
750 */
751static int alloc_mem(struct cxlflash_cfg *cfg)
752{
753 int rc = 0;
4392ba49 754 struct device *dev = &cfg->dev->dev;
c21e0bbf 755
f15fbf8d 756 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
c21e0bbf
MO
757 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
758 get_order(sizeof(struct afu)));
759 if (unlikely(!cfg->afu)) {
4392ba49
MO
760 dev_err(dev, "%s: cannot get %d free pages\n",
761 __func__, get_order(sizeof(struct afu)));
c21e0bbf
MO
762 rc = -ENOMEM;
763 goto out;
764 }
765 cfg->afu->parent = cfg;
766 cfg->afu->afu_map = NULL;
c21e0bbf
MO
767out:
768 return rc;
769}
770
771/**
772 * init_pci() - initializes the host as a PCI device
1284fb0c 773 * @cfg: Internal structure associated with the host.
c21e0bbf 774 *
1284fb0c 775 * Return: 0 on success, -errno on failure
c21e0bbf
MO
776 */
777static int init_pci(struct cxlflash_cfg *cfg)
778{
779 struct pci_dev *pdev = cfg->dev;
780 int rc = 0;
781
c21e0bbf
MO
782 rc = pci_enable_device(pdev);
783 if (rc || pci_channel_offline(pdev)) {
784 if (pci_channel_offline(pdev)) {
785 cxlflash_wait_for_pci_err_recovery(cfg);
786 rc = pci_enable_device(pdev);
787 }
788
789 if (rc) {
790 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
791 __func__);
792 cxlflash_wait_for_pci_err_recovery(cfg);
961487e4 793 goto out;
c21e0bbf
MO
794 }
795 }
796
c21e0bbf
MO
797out:
798 pr_debug("%s: returning rc=%d\n", __func__, rc);
799 return rc;
c21e0bbf
MO
800}
801
802/**
803 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1284fb0c 804 * @cfg: Internal structure associated with the host.
c21e0bbf 805 *
1284fb0c 806 * Return: 0 on success, -errno on failure
c21e0bbf
MO
807 */
808static int init_scsi(struct cxlflash_cfg *cfg)
809{
810 struct pci_dev *pdev = cfg->dev;
811 int rc = 0;
812
813 rc = scsi_add_host(cfg->host, &pdev->dev);
814 if (rc) {
815 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
816 __func__, rc);
817 goto out;
818 }
819
820 scsi_scan_host(cfg->host);
821
822out:
823 pr_debug("%s: returning rc=%d\n", __func__, rc);
824 return rc;
825}
826
827/**
828 * set_port_online() - transitions the specified host FC port to online state
829 * @fc_regs: Top of MMIO region defined for specified port.
830 *
831 * The provided MMIO region must be mapped prior to call. Online state means
832 * that the FC link layer has synced, completed the handshaking process, and
833 * is ready for login to start.
834 */
1786f4a0 835static void set_port_online(__be64 __iomem *fc_regs)
c21e0bbf
MO
836{
837 u64 cmdcfg;
838
839 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
840 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
841 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
842 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
843}
844
845/**
846 * set_port_offline() - transitions the specified host FC port to offline state
847 * @fc_regs: Top of MMIO region defined for specified port.
848 *
849 * The provided MMIO region must be mapped prior to call.
850 */
1786f4a0 851static void set_port_offline(__be64 __iomem *fc_regs)
c21e0bbf
MO
852{
853 u64 cmdcfg;
854
855 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
856 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
857 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
858 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
859}
860
861/**
862 * wait_port_online() - waits for the specified host FC port come online
863 * @fc_regs: Top of MMIO region defined for specified port.
864 * @delay_us: Number of microseconds to delay between reading port status.
865 * @nretry: Number of cycles to retry reading port status.
866 *
867 * The provided MMIO region must be mapped prior to call. This will timeout
868 * when the cable is not plugged in.
869 *
870 * Return:
871 * TRUE (1) when the specified port is online
872 * FALSE (0) when the specified port fails to come online after timeout
873 * -EINVAL when @delay_us is less than 1000
874 */
1786f4a0 875static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
c21e0bbf
MO
876{
877 u64 status;
878
879 if (delay_us < 1000) {
880 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
881 return -EINVAL;
882 }
883
884 do {
885 msleep(delay_us / 1000);
886 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
05dab432
MO
887 if (status == U64_MAX)
888 nretry /= 2;
c21e0bbf
MO
889 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
890 nretry--);
891
892 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
893}
894
895/**
896 * wait_port_offline() - waits for the specified host FC port go offline
897 * @fc_regs: Top of MMIO region defined for specified port.
898 * @delay_us: Number of microseconds to delay between reading port status.
899 * @nretry: Number of cycles to retry reading port status.
900 *
901 * The provided MMIO region must be mapped prior to call.
902 *
903 * Return:
904 * TRUE (1) when the specified port is offline
905 * FALSE (0) when the specified port fails to go offline after timeout
906 * -EINVAL when @delay_us is less than 1000
907 */
1786f4a0 908static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
c21e0bbf
MO
909{
910 u64 status;
911
912 if (delay_us < 1000) {
913 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
914 return -EINVAL;
915 }
916
917 do {
918 msleep(delay_us / 1000);
919 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
05dab432
MO
920 if (status == U64_MAX)
921 nretry /= 2;
c21e0bbf
MO
922 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
923 nretry--);
924
925 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
926}
927
928/**
929 * afu_set_wwpn() - configures the WWPN for the specified host FC port
930 * @afu: AFU associated with the host that owns the specified FC port.
931 * @port: Port number being configured.
932 * @fc_regs: Top of MMIO region defined for specified port.
933 * @wwpn: The world-wide-port-number previously discovered for port.
934 *
935 * The provided MMIO region must be mapped prior to call. As part of the
936 * sequence to configure the WWPN, the port is toggled offline and then back
937 * online. This toggling action can cause this routine to delay up to a few
938 * seconds. When configured to use the internal LUN feature of the AFU, a
939 * failure to come online is overridden.
c21e0bbf 940 */
f8013261
MO
941static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
942 u64 wwpn)
c21e0bbf 943{
c21e0bbf 944 set_port_offline(fc_regs);
c21e0bbf
MO
945 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
946 FC_PORT_STATUS_RETRY_CNT)) {
947 pr_debug("%s: wait on port %d to go offline timed out\n",
948 __func__, port);
c21e0bbf
MO
949 }
950
f8013261 951 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
964497b3 952
c21e0bbf 953 set_port_online(fc_regs);
c21e0bbf
MO
954 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
955 FC_PORT_STATUS_RETRY_CNT)) {
f8013261
MO
956 pr_debug("%s: wait on port %d to go online timed out\n",
957 __func__, port);
c21e0bbf 958 }
c21e0bbf
MO
959}
960
961/**
962 * afu_link_reset() - resets the specified host FC port
963 * @afu: AFU associated with the host that owns the specified FC port.
964 * @port: Port number being configured.
965 * @fc_regs: Top of MMIO region defined for specified port.
966 *
967 * The provided MMIO region must be mapped prior to call. The sequence to
968 * reset the port involves toggling it offline and then back online. This
969 * action can cause this routine to delay up to a few seconds. An effort
970 * is made to maintain link with the device by switching to host to use
971 * the alternate port exclusively while the reset takes place.
972 * failure to come online is overridden.
973 */
1786f4a0 974static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
c21e0bbf
MO
975{
976 u64 port_sel;
977
978 /* first switch the AFU to the other links, if any */
979 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
4da74db0 980 port_sel &= ~(1ULL << port);
c21e0bbf
MO
981 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
982 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
983
984 set_port_offline(fc_regs);
985 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
986 FC_PORT_STATUS_RETRY_CNT))
987 pr_err("%s: wait on port %d to go offline timed out\n",
988 __func__, port);
989
990 set_port_online(fc_regs);
991 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
992 FC_PORT_STATUS_RETRY_CNT))
993 pr_err("%s: wait on port %d to go online timed out\n",
994 __func__, port);
995
996 /* switch back to include this port */
4da74db0 997 port_sel |= (1ULL << port);
c21e0bbf
MO
998 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
999 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1000
1001 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1002}
1003
1004/*
1005 * Asynchronous interrupt information table
1006 */
1007static const struct asyc_intr_info ainfo[] = {
1008 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1009 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1010 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
e6e6df3f 1011 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
c21e0bbf 1012 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
ef51074a 1013 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
c21e0bbf 1014 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
bbbfae96 1015 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
c21e0bbf
MO
1016 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1017 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1018 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
a9be294e 1019 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
c21e0bbf 1020 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
ef51074a 1021 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
c21e0bbf 1022 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
bbbfae96 1023 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
c21e0bbf
MO
1024 {0x0, "", 0, 0} /* terminator */
1025};
1026
1027/**
1028 * find_ainfo() - locates and returns asynchronous interrupt information
1029 * @status: Status code set by AFU on error.
1030 *
1031 * Return: The located information or NULL when the status code is invalid.
1032 */
1033static const struct asyc_intr_info *find_ainfo(u64 status)
1034{
1035 const struct asyc_intr_info *info;
1036
1037 for (info = &ainfo[0]; info->status; info++)
1038 if (info->status == status)
1039 return info;
1040
1041 return NULL;
1042}
1043
1044/**
1045 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1046 * @afu: AFU associated with the host.
1047 */
1048static void afu_err_intr_init(struct afu *afu)
1049{
1050 int i;
1051 u64 reg;
1052
1053 /* global async interrupts: AFU clears afu_ctrl on context exit
1054 * if async interrupts were sent to that context. This prevents
1055 * the AFU form sending further async interrupts when
1056 * there is
1057 * nobody to receive them.
1058 */
1059
1060 /* mask all */
1061 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1062 /* set LISN# to send and point to master context */
1063 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1064
1065 if (afu->internal_lun)
1066 reg |= 1; /* Bit 63 indicates local lun */
1067 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1068 /* clear all */
1069 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1070 /* unmask bits that are of interest */
1071 /* note: afu can send an interrupt after this step */
1072 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1073 /* clear again in case a bit came on after previous clear but before */
1074 /* unmask */
1075 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1076
1077 /* Clear/Set internal lun bits */
1078 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1079 reg &= SISL_FC_INTERNAL_MASK;
1080 if (afu->internal_lun)
1081 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1082 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1083
1084 /* now clear FC errors */
1085 for (i = 0; i < NUM_FC_PORTS; i++) {
1086 writeq_be(0xFFFFFFFFU,
1087 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1088 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1089 }
1090
1091 /* sync interrupts for master's IOARRIN write */
1092 /* note that unlike asyncs, there can be no pending sync interrupts */
1093 /* at this time (this is a fresh context and master has not written */
1094 /* IOARRIN yet), so there is nothing to clear. */
1095
1096 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1097 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1098 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1099}
1100
1101/**
1102 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1103 * @irq: Interrupt number.
1104 * @data: Private data provided at interrupt registration, the AFU.
1105 *
1106 * Return: Always return IRQ_HANDLED.
1107 */
1108static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1109{
1110 struct afu *afu = (struct afu *)data;
1111 u64 reg;
1112 u64 reg_unmasked;
1113
1114 reg = readq_be(&afu->host_map->intr_status);
1115 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1116
1117 if (reg_unmasked == 0UL) {
1118 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1119 __func__, (u64)afu, reg);
1120 goto cxlflash_sync_err_irq_exit;
1121 }
1122
1123 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1124 __func__, (u64)afu, reg);
1125
1126 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1127
1128cxlflash_sync_err_irq_exit:
1129 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1130 return IRQ_HANDLED;
1131}
1132
1133/**
1134 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1135 * @irq: Interrupt number.
1136 * @data: Private data provided at interrupt registration, the AFU.
1137 *
1138 * Return: Always return IRQ_HANDLED.
1139 */
1140static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1141{
1142 struct afu *afu = (struct afu *)data;
1143 struct afu_cmd *cmd;
1144 bool toggle = afu->toggle;
1145 u64 entry,
1146 *hrrq_start = afu->hrrq_start,
1147 *hrrq_end = afu->hrrq_end,
1148 *hrrq_curr = afu->hrrq_curr;
1149
1150 /* Process however many RRQ entries that are ready */
1151 while (true) {
1152 entry = *hrrq_curr;
1153
1154 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1155 break;
1156
1157 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1158 cmd_complete(cmd);
1159
1160 /* Advance to next entry or wrap and flip the toggle bit */
1161 if (hrrq_curr < hrrq_end)
1162 hrrq_curr++;
1163 else {
1164 hrrq_curr = hrrq_start;
1165 toggle ^= SISL_RESP_HANDLE_T_BIT;
1166 }
1167 }
1168
1169 afu->hrrq_curr = hrrq_curr;
1170 afu->toggle = toggle;
1171
1172 return IRQ_HANDLED;
1173}
1174
1175/**
1176 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1177 * @irq: Interrupt number.
1178 * @data: Private data provided at interrupt registration, the AFU.
1179 *
1180 * Return: Always return IRQ_HANDLED.
1181 */
1182static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1183{
1184 struct afu *afu = (struct afu *)data;
4392ba49
MO
1185 struct cxlflash_cfg *cfg = afu->parent;
1186 struct device *dev = &cfg->dev->dev;
c21e0bbf
MO
1187 u64 reg_unmasked;
1188 const struct asyc_intr_info *info;
1786f4a0 1189 struct sisl_global_map __iomem *global = &afu->afu_map->global;
c21e0bbf
MO
1190 u64 reg;
1191 u8 port;
1192 int i;
1193
c21e0bbf
MO
1194 reg = readq_be(&global->regs.aintr_status);
1195 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1196
1197 if (reg_unmasked == 0) {
4392ba49
MO
1198 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1199 __func__, reg);
c21e0bbf
MO
1200 goto out;
1201 }
1202
f15fbf8d 1203 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
c21e0bbf
MO
1204 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1205
f15fbf8d 1206 /* Check each bit that is on */
c21e0bbf
MO
1207 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1208 info = find_ainfo(1ULL << i);
16798d34 1209 if (((reg_unmasked & 0x1) == 0) || !info)
c21e0bbf
MO
1210 continue;
1211
1212 port = info->port;
1213
4392ba49
MO
1214 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1215 __func__, port, info->desc,
c21e0bbf
MO
1216 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1217
1218 /*
f15fbf8d 1219 * Do link reset first, some OTHER errors will set FC_ERROR
c21e0bbf
MO
1220 * again if cleared before or w/o a reset
1221 */
1222 if (info->action & LINK_RESET) {
4392ba49
MO
1223 dev_err(dev, "%s: FC Port %d: resetting link\n",
1224 __func__, port);
c21e0bbf
MO
1225 cfg->lr_state = LINK_RESET_REQUIRED;
1226 cfg->lr_port = port;
b45cdbaf 1227 kref_get(&cfg->afu->mapcount);
c21e0bbf
MO
1228 schedule_work(&cfg->work_q);
1229 }
1230
1231 if (info->action & CLR_FC_ERROR) {
1232 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1233
1234 /*
f15fbf8d 1235 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
c21e0bbf
MO
1236 * should be the same and tracing one is sufficient.
1237 */
1238
4392ba49
MO
1239 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1240 __func__, port, reg);
c21e0bbf
MO
1241
1242 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1243 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1244 }
ef51074a
MO
1245
1246 if (info->action & SCAN_HOST) {
1247 atomic_inc(&cfg->scan_host_needed);
b45cdbaf 1248 kref_get(&cfg->afu->mapcount);
ef51074a
MO
1249 schedule_work(&cfg->work_q);
1250 }
c21e0bbf
MO
1251 }
1252
1253out:
4392ba49 1254 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
c21e0bbf
MO
1255 return IRQ_HANDLED;
1256}
1257
1258/**
1259 * start_context() - starts the master context
1284fb0c 1260 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
1261 *
1262 * Return: A success or failure value from CXL services.
1263 */
1264static int start_context(struct cxlflash_cfg *cfg)
1265{
1266 int rc = 0;
1267
1268 rc = cxl_start_context(cfg->mcctx,
1269 cfg->afu->work.work_element_descriptor,
1270 NULL);
1271
1272 pr_debug("%s: returning rc=%d\n", __func__, rc);
1273 return rc;
1274}
1275
1276/**
1277 * read_vpd() - obtains the WWPNs from VPD
1284fb0c 1278 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
1279 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1280 *
1284fb0c 1281 * Return: 0 on success, -errno on failure
c21e0bbf
MO
1282 */
1283static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1284{
ca946d4e 1285 struct pci_dev *dev = cfg->dev;
c21e0bbf
MO
1286 int rc = 0;
1287 int ro_start, ro_size, i, j, k;
1288 ssize_t vpd_size;
1289 char vpd_data[CXLFLASH_VPD_LEN];
1290 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1291 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1292
1293 /* Get the VPD data from the device */
ca946d4e 1294 vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
c21e0bbf 1295 if (unlikely(vpd_size <= 0)) {
4392ba49 1296 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
c21e0bbf
MO
1297 __func__, vpd_size);
1298 rc = -ENODEV;
1299 goto out;
1300 }
1301
1302 /* Get the read only section offset */
1303 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1304 PCI_VPD_LRDT_RO_DATA);
1305 if (unlikely(ro_start < 0)) {
4392ba49
MO
1306 dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1307 __func__);
c21e0bbf
MO
1308 rc = -ENODEV;
1309 goto out;
1310 }
1311
1312 /* Get the read only section size, cap when extends beyond read VPD */
1313 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1314 j = ro_size;
1315 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1316 if (unlikely((i + j) > vpd_size)) {
1317 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1318 __func__, (i + j), vpd_size);
1319 ro_size = vpd_size - i;
1320 }
1321
1322 /*
1323 * Find the offset of the WWPN tag within the read only
1324 * VPD data and validate the found field (partials are
1325 * no good to us). Convert the ASCII data to an integer
1326 * value. Note that we must copy to a temporary buffer
1327 * because the conversion service requires that the ASCII
1328 * string be terminated.
1329 */
1330 for (k = 0; k < NUM_FC_PORTS; k++) {
1331 j = ro_size;
1332 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1333
1334 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1335 if (unlikely(i < 0)) {
4392ba49
MO
1336 dev_err(&dev->dev, "%s: Port %d WWPN not found "
1337 "in VPD\n", __func__, k);
c21e0bbf
MO
1338 rc = -ENODEV;
1339 goto out;
1340 }
1341
1342 j = pci_vpd_info_field_size(&vpd_data[i]);
1343 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1344 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
4392ba49
MO
1345 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1346 "VPD corrupt\n",
c21e0bbf
MO
1347 __func__, k);
1348 rc = -ENODEV;
1349 goto out;
1350 }
1351
1352 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1353 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1354 if (unlikely(rc)) {
4392ba49
MO
1355 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1356 "to integer\n", __func__, k);
c21e0bbf
MO
1357 rc = -ENODEV;
1358 goto out;
1359 }
1360 }
1361
1362out:
1363 pr_debug("%s: returning rc=%d\n", __func__, rc);
1364 return rc;
1365}
1366
1367/**
15305514 1368 * init_pcr() - initialize the provisioning and control registers
1284fb0c 1369 * @cfg: Internal structure associated with the host.
c21e0bbf 1370 *
15305514
MO
1371 * Also sets up fast access to the mapped registers and initializes AFU
1372 * command fields that never change.
c21e0bbf 1373 */
15305514 1374static void init_pcr(struct cxlflash_cfg *cfg)
c21e0bbf
MO
1375{
1376 struct afu *afu = cfg->afu;
1786f4a0 1377 struct sisl_ctrl_map __iomem *ctrl_map;
c21e0bbf
MO
1378 int i;
1379
1380 for (i = 0; i < MAX_CONTEXT; i++) {
1381 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
f15fbf8d
MO
1382 /* Disrupt any clients that could be running */
1383 /* e.g. clients that survived a master restart */
c21e0bbf
MO
1384 writeq_be(0, &ctrl_map->rht_start);
1385 writeq_be(0, &ctrl_map->rht_cnt_id);
1386 writeq_be(0, &ctrl_map->ctx_cap);
1387 }
1388
f15fbf8d 1389 /* Copy frequently used fields into afu */
c21e0bbf 1390 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
c21e0bbf
MO
1391 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1392 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1393
1394 /* Program the Endian Control for the master context */
1395 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
c21e0bbf
MO
1396}
1397
1398/**
1399 * init_global() - initialize AFU global registers
1284fb0c 1400 * @cfg: Internal structure associated with the host.
c21e0bbf 1401 */
15305514 1402static int init_global(struct cxlflash_cfg *cfg)
c21e0bbf
MO
1403{
1404 struct afu *afu = cfg->afu;
4392ba49 1405 struct device *dev = &cfg->dev->dev;
c21e0bbf
MO
1406 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1407 int i = 0, num_ports = 0;
1408 int rc = 0;
1409 u64 reg;
1410
1411 rc = read_vpd(cfg, &wwpn[0]);
1412 if (rc) {
4392ba49 1413 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
c21e0bbf
MO
1414 goto out;
1415 }
1416
1417 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1418
f15fbf8d 1419 /* Set up RRQ in AFU for master issued cmds */
c21e0bbf
MO
1420 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1421 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1422
1423 /* AFU configuration */
1424 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1425 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1426 /* enable all auto retry options and control endianness */
1427 /* leave others at default: */
1428 /* CTX_CAP write protected, mbox_r does not clear on read and */
1429 /* checker on if dual afu */
1430 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1431
f15fbf8d 1432 /* Global port select: select either port */
c21e0bbf 1433 if (afu->internal_lun) {
f15fbf8d 1434 /* Only use port 0 */
c21e0bbf
MO
1435 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1436 num_ports = NUM_FC_PORTS - 1;
1437 } else {
1438 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1439 num_ports = NUM_FC_PORTS;
1440 }
1441
1442 for (i = 0; i < num_ports; i++) {
f15fbf8d 1443 /* Unmask all errors (but they are still masked at AFU) */
c21e0bbf 1444 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
f15fbf8d 1445 /* Clear CRC error cnt & set a threshold */
c21e0bbf
MO
1446 (void)readq_be(&afu->afu_map->global.
1447 fc_regs[i][FC_CNT_CRCERR / 8]);
1448 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1449 [FC_CRC_THRESH / 8]);
1450
f15fbf8d 1451 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
f8013261
MO
1452 if (wwpn[i] != 0)
1453 afu_set_wwpn(afu, i,
1454 &afu->afu_map->global.fc_regs[i][0],
1455 wwpn[i]);
c21e0bbf
MO
1456 /* Programming WWPN back to back causes additional
1457 * offline/online transitions and a PLOGI
1458 */
1459 msleep(100);
c21e0bbf
MO
1460 }
1461
f15fbf8d
MO
1462 /* Set up master's own CTX_CAP to allow real mode, host translation */
1463 /* tables, afu cmds and read/write GSCSI cmds. */
c21e0bbf
MO
1464 /* First, unlock ctx_cap write by reading mbox */
1465 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1466 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1467 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1468 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1469 &afu->ctrl_map->ctx_cap);
f15fbf8d 1470 /* Initialize heartbeat */
c21e0bbf
MO
1471 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1472
1473out:
1474 return rc;
1475}
1476
1477/**
1478 * start_afu() - initializes and starts the AFU
1284fb0c 1479 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
1480 */
1481static int start_afu(struct cxlflash_cfg *cfg)
1482{
1483 struct afu *afu = cfg->afu;
c21e0bbf
MO
1484 int rc = 0;
1485
c21e0bbf
MO
1486 init_pcr(cfg);
1487
af10483e
MO
1488 /* After an AFU reset, RRQ entries are stale, clear them */
1489 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1490
f15fbf8d 1491 /* Initialize RRQ pointers */
c21e0bbf
MO
1492 afu->hrrq_start = &afu->rrq_entry[0];
1493 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1494 afu->hrrq_curr = afu->hrrq_start;
1495 afu->toggle = 1;
1496
1497 rc = init_global(cfg);
1498
1499 pr_debug("%s: returning rc=%d\n", __func__, rc);
1500 return rc;
1501}
1502
1503/**
9526f360 1504 * init_intr() - setup interrupt handlers for the master context
1284fb0c 1505 * @cfg: Internal structure associated with the host.
c21e0bbf 1506 *
1284fb0c 1507 * Return: 0 on success, -errno on failure
c21e0bbf 1508 */
9526f360
MK
1509static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1510 struct cxl_context *ctx)
c21e0bbf 1511{
c21e0bbf 1512 struct afu *afu = cfg->afu;
9526f360 1513 struct device *dev = &cfg->dev->dev;
c21e0bbf 1514 int rc = 0;
9526f360 1515 enum undo_level level = UNDO_NOOP;
c21e0bbf
MO
1516
1517 rc = cxl_allocate_afu_irqs(ctx, 3);
1518 if (unlikely(rc)) {
1519 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1520 __func__, rc);
9526f360 1521 level = UNDO_NOOP;
c21e0bbf
MO
1522 goto out;
1523 }
1524
1525 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1526 "SISL_MSI_SYNC_ERROR");
1527 if (unlikely(rc <= 0)) {
1528 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1529 __func__);
1530 level = FREE_IRQ;
1531 goto out;
1532 }
1533
1534 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1535 "SISL_MSI_RRQ_UPDATED");
1536 if (unlikely(rc <= 0)) {
1537 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1538 __func__);
1539 level = UNMAP_ONE;
1540 goto out;
1541 }
1542
1543 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1544 "SISL_MSI_ASYNC_ERROR");
1545 if (unlikely(rc <= 0)) {
1546 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1547 __func__);
1548 level = UNMAP_TWO;
1549 goto out;
1550 }
9526f360
MK
1551out:
1552 return level;
1553}
c21e0bbf 1554
9526f360
MK
1555/**
1556 * init_mc() - create and register as the master context
1557 * @cfg: Internal structure associated with the host.
1558 *
1559 * Return: 0 on success, -errno on failure
1560 */
1561static int init_mc(struct cxlflash_cfg *cfg)
1562{
1563 struct cxl_context *ctx;
1564 struct device *dev = &cfg->dev->dev;
1565 int rc = 0;
1566 enum undo_level level;
1567
1568 ctx = cxl_get_context(cfg->dev);
1569 if (unlikely(!ctx)) {
1570 rc = -ENOMEM;
1571 goto ret;
1572 }
1573 cfg->mcctx = ctx;
1574
1575 /* Set it up as a master with the CXL */
1576 cxl_set_master(ctx);
1577
1578 /* During initialization reset the AFU to start from a clean slate */
1579 rc = cxl_afu_reset(cfg->mcctx);
1580 if (unlikely(rc)) {
1581 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1582 __func__, rc);
1583 goto ret;
1584 }
1585
1586 level = init_intr(cfg, ctx);
1587 if (unlikely(level)) {
1588 dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1589 __func__, rc);
1590 goto out;
1591 }
c21e0bbf
MO
1592
1593 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1594 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1595 * element (pe) that is embedded in the context (ctx)
1596 */
1597 rc = start_context(cfg);
1598 if (unlikely(rc)) {
1599 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1600 level = UNMAP_THREE;
1601 goto out;
1602 }
1603ret:
1604 pr_debug("%s: returning rc=%d\n", __func__, rc);
1605 return rc;
1606out:
9526f360 1607 term_intr(cfg, level);
c21e0bbf
MO
1608 goto ret;
1609}
1610
1611/**
1612 * init_afu() - setup as master context and start AFU
1284fb0c 1613 * @cfg: Internal structure associated with the host.
c21e0bbf
MO
1614 *
1615 * This routine is a higher level of control for configuring the
1616 * AFU on probe and reset paths.
1617 *
1284fb0c 1618 * Return: 0 on success, -errno on failure
c21e0bbf
MO
1619 */
1620static int init_afu(struct cxlflash_cfg *cfg)
1621{
1622 u64 reg;
1623 int rc = 0;
1624 struct afu *afu = cfg->afu;
1625 struct device *dev = &cfg->dev->dev;
1626
5cdac81a
MO
1627 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1628
c21e0bbf
MO
1629 rc = init_mc(cfg);
1630 if (rc) {
1631 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1632 __func__, rc);
ee3491ba 1633 goto out;
c21e0bbf
MO
1634 }
1635
f15fbf8d 1636 /* Map the entire MMIO space of the AFU */
c21e0bbf
MO
1637 afu->afu_map = cxl_psa_map(cfg->mcctx);
1638 if (!afu->afu_map) {
c21e0bbf 1639 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
ee3491ba 1640 rc = -ENOMEM;
c21e0bbf
MO
1641 goto err1;
1642 }
b45cdbaf 1643 kref_init(&afu->mapcount);
c21e0bbf 1644
e5ce067b
MO
1645 /* No byte reverse on reading afu_version or string will be backwards */
1646 reg = readq(&afu->afu_map->global.regs.afu_version);
1647 memcpy(afu->version, &reg, sizeof(reg));
c21e0bbf
MO
1648 afu->interface_version =
1649 readq_be(&afu->afu_map->global.regs.interface_version);
e5ce067b
MO
1650 if ((afu->interface_version + 1) == 0) {
1651 pr_err("Back level AFU, please upgrade. AFU version %s "
1652 "interface version 0x%llx\n", afu->version,
1653 afu->interface_version);
1654 rc = -EINVAL;
ee3491ba
MO
1655 goto err2;
1656 }
1657
1658 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1659 afu->version, afu->interface_version);
c21e0bbf
MO
1660
1661 rc = start_afu(cfg);
1662 if (rc) {
1663 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1664 __func__, rc);
ee3491ba 1665 goto err2;
c21e0bbf
MO
1666 }
1667
1668 afu_err_intr_init(cfg->afu);
11f7b184
UK
1669 spin_lock_init(&afu->rrin_slock);
1670 afu->room = readq_be(&afu->host_map->cmd_room);
c21e0bbf 1671
2cb79266
MO
1672 /* Restore the LUN mappings */
1673 cxlflash_restore_luntable(cfg);
ee3491ba 1674out:
c21e0bbf
MO
1675 pr_debug("%s: returning rc=%d\n", __func__, rc);
1676 return rc;
ee3491ba
MO
1677
1678err2:
b45cdbaf 1679 kref_put(&afu->mapcount, afu_unmap);
ee3491ba 1680err1:
9526f360
MK
1681 term_intr(cfg, UNMAP_THREE);
1682 term_mc(cfg);
ee3491ba 1683 goto out;
c21e0bbf
MO
1684}
1685
c21e0bbf
MO
1686/**
1687 * cxlflash_afu_sync() - builds and sends an AFU sync command
1688 * @afu: AFU associated with the host.
1689 * @ctx_hndl_u: Identifies context requesting sync.
1690 * @res_hndl_u: Identifies resource requesting sync.
1691 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1692 *
1693 * The AFU can only take 1 sync command at a time. This routine enforces this
f15fbf8d 1694 * limitation by using a mutex to provide exclusive access to the AFU during
c21e0bbf
MO
1695 * the sync. This design point requires calling threads to not be on interrupt
1696 * context due to the possibility of sleeping during concurrent sync operations.
1697 *
5cdac81a
MO
1698 * AFU sync operations are only necessary and allowed when the device is
1699 * operating normally. When not operating normally, sync requests can occur as
1700 * part of cleaning up resources associated with an adapter prior to removal.
1701 * In this scenario, these requests are simply ignored (safe due to the AFU
1702 * going away).
1703 *
c21e0bbf
MO
1704 * Return:
1705 * 0 on success
1706 * -1 on failure
1707 */
1708int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1709 res_hndl_t res_hndl_u, u8 mode)
1710{
5cdac81a 1711 struct cxlflash_cfg *cfg = afu->parent;
4392ba49 1712 struct device *dev = &cfg->dev->dev;
c21e0bbf 1713 struct afu_cmd *cmd = NULL;
350bb478 1714 char *buf = NULL;
c21e0bbf 1715 int rc = 0;
c21e0bbf
MO
1716 static DEFINE_MUTEX(sync_active);
1717
5cdac81a
MO
1718 if (cfg->state != STATE_NORMAL) {
1719 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1720 return 0;
1721 }
1722
c21e0bbf 1723 mutex_lock(&sync_active);
350bb478
MO
1724 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
1725 if (unlikely(!buf)) {
1726 dev_err(dev, "%s: no memory for command\n", __func__);
c21e0bbf
MO
1727 rc = -1;
1728 goto out;
1729 }
1730
350bb478
MO
1731 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
1732 init_completion(&cmd->cevent);
1733 spin_lock_init(&cmd->slock);
1734 cmd->parent = afu;
c21e0bbf 1735
350bb478 1736 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
c21e0bbf
MO
1737
1738 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
350bb478
MO
1739 cmd->rcb.ctx_id = afu->ctx_hndl;
1740 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
c21e0bbf
MO
1741 cmd->rcb.port_sel = 0x0; /* NA */
1742 cmd->rcb.lun_id = 0x0; /* NA */
1743 cmd->rcb.data_len = 0x0;
1744 cmd->rcb.data_ea = 0x0;
1745 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1746
1747 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1748 cmd->rcb.cdb[1] = mode;
1749
1750 /* The cdb is aligned, no unaligned accessors required */
1786f4a0
MO
1751 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1752 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
c21e0bbf 1753
15305514 1754 rc = send_cmd(afu, cmd);
c21e0bbf
MO
1755 if (unlikely(rc))
1756 goto out;
1757
15305514 1758 wait_resp(afu, cmd);
c21e0bbf 1759
f15fbf8d 1760 /* Set on timeout */
c21e0bbf
MO
1761 if (unlikely((cmd->sa.ioasc != 0) ||
1762 (cmd->sa.host_use_b[0] & B_ERROR)))
1763 rc = -1;
1764out:
1765 mutex_unlock(&sync_active);
350bb478 1766 kfree(buf);
c21e0bbf
MO
1767 pr_debug("%s: returning rc=%d\n", __func__, rc);
1768 return rc;
1769}
1770
1771/**
15305514
MO
1772 * afu_reset() - resets the AFU
1773 * @cfg: Internal structure associated with the host.
c21e0bbf 1774 *
1284fb0c 1775 * Return: 0 on success, -errno on failure
c21e0bbf 1776 */
15305514 1777static int afu_reset(struct cxlflash_cfg *cfg)
c21e0bbf
MO
1778{
1779 int rc = 0;
1780 /* Stop the context before the reset. Since the context is
1781 * no longer available restart it after the reset is complete
1782 */
1783
1784 term_afu(cfg);
1785
1786 rc = init_afu(cfg);
1787
1788 pr_debug("%s: returning rc=%d\n", __func__, rc);
1789 return rc;
1790}
1791
f411396d
MK
1792/**
1793 * drain_ioctls() - wait until all currently executing ioctls have completed
1794 * @cfg: Internal structure associated with the host.
1795 *
1796 * Obtain write access to read/write semaphore that wraps ioctl
1797 * handling to 'drain' ioctls currently executing.
1798 */
1799static void drain_ioctls(struct cxlflash_cfg *cfg)
1800{
1801 down_write(&cfg->ioctl_rwsem);
1802 up_write(&cfg->ioctl_rwsem);
1803}
1804
15305514
MO
1805/**
1806 * cxlflash_eh_device_reset_handler() - reset a single LUN
1807 * @scp: SCSI command to send.
1808 *
1809 * Return:
1810 * SUCCESS as defined in scsi/scsi.h
1811 * FAILED as defined in scsi/scsi.h
1812 */
1813static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1814{
1815 int rc = SUCCESS;
1816 struct Scsi_Host *host = scp->device->host;
1817 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1818 struct afu *afu = cfg->afu;
1819 int rcr = 0;
1820
1821 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1822 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1823 host->host_no, scp->device->channel,
1824 scp->device->id, scp->device->lun,
1825 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1826 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1827 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1828 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1829
ed486daa 1830retry:
15305514
MO
1831 switch (cfg->state) {
1832 case STATE_NORMAL:
1833 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1834 if (unlikely(rcr))
1835 rc = FAILED;
1836 break;
1837 case STATE_RESET:
1838 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
ed486daa 1839 goto retry;
15305514
MO
1840 default:
1841 rc = FAILED;
1842 break;
1843 }
1844
1845 pr_debug("%s: returning rc=%d\n", __func__, rc);
1846 return rc;
1847}
1848
1849/**
1850 * cxlflash_eh_host_reset_handler() - reset the host adapter
1851 * @scp: SCSI command from stack identifying host.
1852 *
1d3324c3
MO
1853 * Following a reset, the state is evaluated again in case an EEH occurred
1854 * during the reset. In such a scenario, the host reset will either yield
1855 * until the EEH recovery is complete or return success or failure based
1856 * upon the current device state.
1857 *
15305514
MO
1858 * Return:
1859 * SUCCESS as defined in scsi/scsi.h
1860 * FAILED as defined in scsi/scsi.h
1861 */
1862static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1863{
1864 int rc = SUCCESS;
1865 int rcr = 0;
1866 struct Scsi_Host *host = scp->device->host;
1867 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1868
1869 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1870 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1871 host->host_no, scp->device->channel,
1872 scp->device->id, scp->device->lun,
1873 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1874 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1875 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1876 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1877
1878 switch (cfg->state) {
1879 case STATE_NORMAL:
1880 cfg->state = STATE_RESET;
f411396d 1881 drain_ioctls(cfg);
15305514
MO
1882 cxlflash_mark_contexts_error(cfg);
1883 rcr = afu_reset(cfg);
1884 if (rcr) {
1885 rc = FAILED;
1886 cfg->state = STATE_FAILTERM;
1887 } else
1888 cfg->state = STATE_NORMAL;
1889 wake_up_all(&cfg->reset_waitq);
1d3324c3
MO
1890 ssleep(1);
1891 /* fall through */
15305514
MO
1892 case STATE_RESET:
1893 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1894 if (cfg->state == STATE_NORMAL)
1895 break;
1896 /* fall through */
1897 default:
1898 rc = FAILED;
1899 break;
1900 }
1901
1902 pr_debug("%s: returning rc=%d\n", __func__, rc);
1903 return rc;
1904}
1905
1906/**
1907 * cxlflash_change_queue_depth() - change the queue depth for the device
1908 * @sdev: SCSI device destined for queue depth change.
1909 * @qdepth: Requested queue depth value to set.
1910 *
1911 * The requested queue depth is capped to the maximum supported value.
1912 *
1913 * Return: The actual queue depth set.
1914 */
1915static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1916{
1917
1918 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1919 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1920
1921 scsi_change_queue_depth(sdev, qdepth);
1922 return sdev->queue_depth;
1923}
1924
1925/**
1926 * cxlflash_show_port_status() - queries and presents the current port status
e0f01a21
MO
1927 * @port: Desired port for status reporting.
1928 * @afu: AFU owning the specified port.
15305514
MO
1929 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
1930 *
1931 * Return: The size of the ASCII string returned in @buf.
1932 */
e0f01a21 1933static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
15305514 1934{
15305514 1935 char *disp_status;
15305514 1936 u64 status;
e0f01a21 1937 __be64 __iomem *fc_regs;
15305514 1938
e0f01a21 1939 if (port >= NUM_FC_PORTS)
15305514
MO
1940 return 0;
1941
1942 fc_regs = &afu->afu_map->global.fc_regs[port][0];
e0f01a21
MO
1943 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1944 status &= FC_MTIP_STATUS_MASK;
15305514
MO
1945
1946 if (status == FC_MTIP_STATUS_ONLINE)
1947 disp_status = "online";
1948 else if (status == FC_MTIP_STATUS_OFFLINE)
1949 disp_status = "offline";
1950 else
1951 disp_status = "unknown";
1952
e0f01a21
MO
1953 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
1954}
1955
1956/**
1957 * port0_show() - queries and presents the current status of port 0
1958 * @dev: Generic device associated with the host owning the port.
1959 * @attr: Device attribute representing the port.
1960 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
1961 *
1962 * Return: The size of the ASCII string returned in @buf.
1963 */
1964static ssize_t port0_show(struct device *dev,
1965 struct device_attribute *attr,
1966 char *buf)
1967{
1968 struct Scsi_Host *shost = class_to_shost(dev);
1969 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1970 struct afu *afu = cfg->afu;
1971
1972 return cxlflash_show_port_status(0, afu, buf);
15305514
MO
1973}
1974
1975/**
e0f01a21
MO
1976 * port1_show() - queries and presents the current status of port 1
1977 * @dev: Generic device associated with the host owning the port.
1978 * @attr: Device attribute representing the port.
1979 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
1980 *
1981 * Return: The size of the ASCII string returned in @buf.
1982 */
1983static ssize_t port1_show(struct device *dev,
1984 struct device_attribute *attr,
1985 char *buf)
1986{
1987 struct Scsi_Host *shost = class_to_shost(dev);
1988 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1989 struct afu *afu = cfg->afu;
1990
1991 return cxlflash_show_port_status(1, afu, buf);
1992}
1993
1994/**
1995 * lun_mode_show() - presents the current LUN mode of the host
15305514 1996 * @dev: Generic device associated with the host.
e0f01a21 1997 * @attr: Device attribute representing the LUN mode.
15305514
MO
1998 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
1999 *
2000 * Return: The size of the ASCII string returned in @buf.
2001 */
e0f01a21
MO
2002static ssize_t lun_mode_show(struct device *dev,
2003 struct device_attribute *attr, char *buf)
15305514
MO
2004{
2005 struct Scsi_Host *shost = class_to_shost(dev);
2006 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2007 struct afu *afu = cfg->afu;
2008
e0f01a21 2009 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
15305514
MO
2010}
2011
2012/**
e0f01a21 2013 * lun_mode_store() - sets the LUN mode of the host
15305514 2014 * @dev: Generic device associated with the host.
e0f01a21 2015 * @attr: Device attribute representing the LUN mode.
15305514
MO
2016 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2017 * @count: Length of data resizing in @buf.
2018 *
2019 * The CXL Flash AFU supports a dummy LUN mode where the external
2020 * links and storage are not required. Space on the FPGA is used
2021 * to create 1 or 2 small LUNs which are presented to the system
2022 * as if they were a normal storage device. This feature is useful
2023 * during development and also provides manufacturing with a way
2024 * to test the AFU without an actual device.
2025 *
2026 * 0 = external LUN[s] (default)
2027 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2028 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2029 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2030 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2031 *
2032 * Return: The size of the ASCII string returned in @buf.
2033 */
e0f01a21
MO
2034static ssize_t lun_mode_store(struct device *dev,
2035 struct device_attribute *attr,
2036 const char *buf, size_t count)
15305514
MO
2037{
2038 struct Scsi_Host *shost = class_to_shost(dev);
2039 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2040 struct afu *afu = cfg->afu;
2041 int rc;
2042 u32 lun_mode;
2043
2044 rc = kstrtouint(buf, 10, &lun_mode);
2045 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2046 afu->internal_lun = lun_mode;
603ecce9
MK
2047
2048 /*
2049 * When configured for internal LUN, there is only one channel,
2050 * channel number 0, else there will be 2 (default).
2051 */
2052 if (afu->internal_lun)
2053 shost->max_channel = 0;
2054 else
2055 shost->max_channel = NUM_FC_PORTS - 1;
2056
15305514
MO
2057 afu_reset(cfg);
2058 scsi_scan_host(cfg->host);
2059 }
2060
2061 return count;
2062}
2063
2064/**
e0f01a21 2065 * ioctl_version_show() - presents the current ioctl version of the host
15305514
MO
2066 * @dev: Generic device associated with the host.
2067 * @attr: Device attribute representing the ioctl version.
2068 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2069 *
2070 * Return: The size of the ASCII string returned in @buf.
2071 */
e0f01a21
MO
2072static ssize_t ioctl_version_show(struct device *dev,
2073 struct device_attribute *attr, char *buf)
15305514
MO
2074{
2075 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2076}
2077
2078/**
e0f01a21
MO
2079 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2080 * @port: Desired port for status reporting.
2081 * @afu: AFU owning the specified port.
2082 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2083 *
2084 * Return: The size of the ASCII string returned in @buf.
2085 */
2086static ssize_t cxlflash_show_port_lun_table(u32 port,
2087 struct afu *afu,
2088 char *buf)
2089{
2090 int i;
2091 ssize_t bytes = 0;
2092 __be64 __iomem *fc_port;
2093
2094 if (port >= NUM_FC_PORTS)
2095 return 0;
2096
2097 fc_port = &afu->afu_map->global.fc_port[port][0];
2098
2099 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2100 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2101 "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2102 return bytes;
2103}
2104
2105/**
2106 * port0_lun_table_show() - presents the current LUN table of port 0
2107 * @dev: Generic device associated with the host owning the port.
2108 * @attr: Device attribute representing the port.
2109 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2110 *
2111 * Return: The size of the ASCII string returned in @buf.
2112 */
2113static ssize_t port0_lun_table_show(struct device *dev,
2114 struct device_attribute *attr,
2115 char *buf)
2116{
2117 struct Scsi_Host *shost = class_to_shost(dev);
2118 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2119 struct afu *afu = cfg->afu;
2120
2121 return cxlflash_show_port_lun_table(0, afu, buf);
2122}
2123
2124/**
2125 * port1_lun_table_show() - presents the current LUN table of port 1
2126 * @dev: Generic device associated with the host owning the port.
2127 * @attr: Device attribute representing the port.
2128 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2129 *
2130 * Return: The size of the ASCII string returned in @buf.
2131 */
2132static ssize_t port1_lun_table_show(struct device *dev,
2133 struct device_attribute *attr,
2134 char *buf)
2135{
2136 struct Scsi_Host *shost = class_to_shost(dev);
2137 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2138 struct afu *afu = cfg->afu;
2139
2140 return cxlflash_show_port_lun_table(1, afu, buf);
2141}
2142
2143/**
2144 * mode_show() - presents the current mode of the device
15305514
MO
2145 * @dev: Generic device associated with the device.
2146 * @attr: Device attribute representing the device mode.
2147 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2148 *
2149 * Return: The size of the ASCII string returned in @buf.
2150 */
e0f01a21
MO
2151static ssize_t mode_show(struct device *dev,
2152 struct device_attribute *attr, char *buf)
15305514
MO
2153{
2154 struct scsi_device *sdev = to_scsi_device(dev);
2155
e0f01a21
MO
2156 return scnprintf(buf, PAGE_SIZE, "%s\n",
2157 sdev->hostdata ? "superpipe" : "legacy");
15305514
MO
2158}
2159
2160/*
2161 * Host attributes
2162 */
e0f01a21
MO
2163static DEVICE_ATTR_RO(port0);
2164static DEVICE_ATTR_RO(port1);
2165static DEVICE_ATTR_RW(lun_mode);
2166static DEVICE_ATTR_RO(ioctl_version);
2167static DEVICE_ATTR_RO(port0_lun_table);
2168static DEVICE_ATTR_RO(port1_lun_table);
15305514
MO
2169
2170static struct device_attribute *cxlflash_host_attrs[] = {
2171 &dev_attr_port0,
2172 &dev_attr_port1,
2173 &dev_attr_lun_mode,
2174 &dev_attr_ioctl_version,
e0f01a21
MO
2175 &dev_attr_port0_lun_table,
2176 &dev_attr_port1_lun_table,
15305514
MO
2177 NULL
2178};
2179
2180/*
2181 * Device attributes
2182 */
e0f01a21 2183static DEVICE_ATTR_RO(mode);
15305514
MO
2184
2185static struct device_attribute *cxlflash_dev_attrs[] = {
2186 &dev_attr_mode,
2187 NULL
2188};
2189
2190/*
2191 * Host template
2192 */
2193static struct scsi_host_template driver_template = {
2194 .module = THIS_MODULE,
2195 .name = CXLFLASH_ADAPTER_NAME,
2196 .info = cxlflash_driver_info,
2197 .ioctl = cxlflash_ioctl,
2198 .proc_name = CXLFLASH_NAME,
2199 .queuecommand = cxlflash_queuecommand,
2200 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2201 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2202 .change_queue_depth = cxlflash_change_queue_depth,
83430833 2203 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
15305514 2204 .can_queue = CXLFLASH_MAX_CMDS,
5fbb96c8 2205 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
15305514 2206 .this_id = -1,
68ab2d76 2207 .sg_tablesize = 1, /* No scatter gather support */
15305514
MO
2208 .max_sectors = CXLFLASH_MAX_SECTORS,
2209 .use_clustering = ENABLE_CLUSTERING,
2210 .shost_attrs = cxlflash_host_attrs,
2211 .sdev_attrs = cxlflash_dev_attrs,
2212};
2213
2214/*
2215 * Device dependent values
2216 */
96e1b660
UK
2217static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2218 0ULL };
2219static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
704c4b0d 2220 CXLFLASH_NOTIFY_SHUTDOWN };
15305514
MO
2221
2222/*
2223 * PCI device binding table
2224 */
2225static struct pci_device_id cxlflash_pci_table[] = {
2226 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
a2746fb1
MK
2228 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
15305514
MO
2230 {}
2231};
2232
2233MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2234
c21e0bbf
MO
2235/**
2236 * cxlflash_worker_thread() - work thread handler for the AFU
2237 * @work: Work structure contained within cxlflash associated with host.
2238 *
2239 * Handles the following events:
2240 * - Link reset which cannot be performed on interrupt context due to
2241 * blocking up to a few seconds
ef51074a 2242 * - Rescan the host
c21e0bbf
MO
2243 */
2244static void cxlflash_worker_thread(struct work_struct *work)
2245{
5cdac81a
MO
2246 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2247 work_q);
c21e0bbf 2248 struct afu *afu = cfg->afu;
4392ba49 2249 struct device *dev = &cfg->dev->dev;
c21e0bbf
MO
2250 int port;
2251 ulong lock_flags;
2252
5cdac81a
MO
2253 /* Avoid MMIO if the device has failed */
2254
2255 if (cfg->state != STATE_NORMAL)
2256 return;
2257
c21e0bbf
MO
2258 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2259
2260 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2261 port = cfg->lr_port;
2262 if (port < 0)
4392ba49
MO
2263 dev_err(dev, "%s: invalid port index %d\n",
2264 __func__, port);
c21e0bbf
MO
2265 else {
2266 spin_unlock_irqrestore(cfg->host->host_lock,
2267 lock_flags);
2268
2269 /* The reset can block... */
2270 afu_link_reset(afu, port,
f15fbf8d 2271 &afu->afu_map->global.fc_regs[port][0]);
c21e0bbf
MO
2272 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2273 }
2274
2275 cfg->lr_state = LINK_RESET_COMPLETE;
2276 }
2277
c21e0bbf 2278 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
ef51074a
MO
2279
2280 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2281 scsi_scan_host(cfg->host);
b45cdbaf 2282 kref_put(&afu->mapcount, afu_unmap);
c21e0bbf
MO
2283}
2284
2285/**
2286 * cxlflash_probe() - PCI entry point to add host
2287 * @pdev: PCI device associated with the host.
2288 * @dev_id: PCI device id associated with device.
2289 *
1284fb0c 2290 * Return: 0 on success, -errno on failure
c21e0bbf
MO
2291 */
2292static int cxlflash_probe(struct pci_dev *pdev,
2293 const struct pci_device_id *dev_id)
2294{
2295 struct Scsi_Host *host;
2296 struct cxlflash_cfg *cfg = NULL;
c21e0bbf
MO
2297 struct dev_dependent_vals *ddv;
2298 int rc = 0;
2299
2300 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2301 __func__, pdev->irq);
2302
2303 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2304 driver_template.max_sectors = ddv->max_sectors;
2305
2306 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2307 if (!host) {
2308 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2309 __func__);
2310 rc = -ENOMEM;
2311 goto out;
2312 }
2313
2314 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2315 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2316 host->max_channel = NUM_FC_PORTS - 1;
2317 host->unique_id = host->host_no;
2318 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2319
2320 cfg = (struct cxlflash_cfg *)host->hostdata;
2321 cfg->host = host;
2322 rc = alloc_mem(cfg);
2323 if (rc) {
fa3f2c6e 2324 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
c21e0bbf
MO
2325 __func__);
2326 rc = -ENOMEM;
8b5b1e87 2327 scsi_host_put(cfg->host);
c21e0bbf
MO
2328 goto out;
2329 }
2330
2331 cfg->init_state = INIT_STATE_NONE;
2332 cfg->dev = pdev;
17ead26f 2333 cfg->cxl_fops = cxlflash_cxl_fops;
2cb79266
MO
2334
2335 /*
2336 * The promoted LUNs move to the top of the LUN table. The rest stay
2337 * on the bottom half. The bottom half grows from the end
2338 * (index = 255), whereas the top half grows from the beginning
2339 * (index = 0).
2340 */
2341 cfg->promote_lun_index = 0;
2342 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2343 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2344
c21e0bbf 2345 cfg->dev_id = (struct pci_device_id *)dev_id;
c21e0bbf
MO
2346
2347 init_waitqueue_head(&cfg->tmf_waitq);
439e85c1 2348 init_waitqueue_head(&cfg->reset_waitq);
c21e0bbf
MO
2349
2350 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2351 cfg->lr_state = LINK_RESET_INVALID;
2352 cfg->lr_port = -1;
0d73122c 2353 spin_lock_init(&cfg->tmf_slock);
65be2c79
MO
2354 mutex_init(&cfg->ctx_tbl_list_mutex);
2355 mutex_init(&cfg->ctx_recovery_mutex);
0a27ae51 2356 init_rwsem(&cfg->ioctl_rwsem);
65be2c79
MO
2357 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2358 INIT_LIST_HEAD(&cfg->lluns);
c21e0bbf
MO
2359
2360 pci_set_drvdata(pdev, cfg);
2361
c21e0bbf
MO
2362 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2363
2364 rc = init_pci(cfg);
2365 if (rc) {
2366 dev_err(&pdev->dev, "%s: call to init_pci "
2367 "failed rc=%d!\n", __func__, rc);
2368 goto out_remove;
2369 }
2370 cfg->init_state = INIT_STATE_PCI;
2371
2372 rc = init_afu(cfg);
2373 if (rc) {
2374 dev_err(&pdev->dev, "%s: call to init_afu "
2375 "failed rc=%d!\n", __func__, rc);
2376 goto out_remove;
2377 }
2378 cfg->init_state = INIT_STATE_AFU;
2379
c21e0bbf
MO
2380 rc = init_scsi(cfg);
2381 if (rc) {
2382 dev_err(&pdev->dev, "%s: call to init_scsi "
2383 "failed rc=%d!\n", __func__, rc);
2384 goto out_remove;
2385 }
2386 cfg->init_state = INIT_STATE_SCSI;
2387
2388out:
2389 pr_debug("%s: returning rc=%d\n", __func__, rc);
2390 return rc;
2391
2392out_remove:
2393 cxlflash_remove(pdev);
2394 goto out;
2395}
2396
5cdac81a
MO
2397/**
2398 * cxlflash_pci_error_detected() - called when a PCI error is detected
2399 * @pdev: PCI device struct.
2400 * @state: PCI channel state.
2401 *
1d3324c3
MO
2402 * When an EEH occurs during an active reset, wait until the reset is
2403 * complete and then take action based upon the device state.
2404 *
5cdac81a
MO
2405 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2406 */
2407static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2408 pci_channel_state_t state)
2409{
65be2c79 2410 int rc = 0;
5cdac81a
MO
2411 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2412 struct device *dev = &cfg->dev->dev;
2413
2414 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2415
2416 switch (state) {
2417 case pci_channel_io_frozen:
1d3324c3
MO
2418 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2419 if (cfg->state == STATE_FAILTERM)
2420 return PCI_ERS_RESULT_DISCONNECT;
2421
439e85c1 2422 cfg->state = STATE_RESET;
5cdac81a 2423 scsi_block_requests(cfg->host);
0a27ae51 2424 drain_ioctls(cfg);
65be2c79
MO
2425 rc = cxlflash_mark_contexts_error(cfg);
2426 if (unlikely(rc))
2427 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2428 __func__, rc);
9526f360 2429 term_afu(cfg);
5cdac81a
MO
2430 return PCI_ERS_RESULT_NEED_RESET;
2431 case pci_channel_io_perm_failure:
2432 cfg->state = STATE_FAILTERM;
439e85c1 2433 wake_up_all(&cfg->reset_waitq);
5cdac81a
MO
2434 scsi_unblock_requests(cfg->host);
2435 return PCI_ERS_RESULT_DISCONNECT;
2436 default:
2437 break;
2438 }
2439 return PCI_ERS_RESULT_NEED_RESET;
2440}
2441
2442/**
2443 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2444 * @pdev: PCI device struct.
2445 *
2446 * This routine is called by the pci error recovery code after the PCI
2447 * slot has been reset, just before we should resume normal operations.
2448 *
2449 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2450 */
2451static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2452{
2453 int rc = 0;
2454 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2455 struct device *dev = &cfg->dev->dev;
2456
2457 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2458
2459 rc = init_afu(cfg);
2460 if (unlikely(rc)) {
2461 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2462 return PCI_ERS_RESULT_DISCONNECT;
2463 }
2464
2465 return PCI_ERS_RESULT_RECOVERED;
2466}
2467
2468/**
2469 * cxlflash_pci_resume() - called when normal operation can resume
2470 * @pdev: PCI device struct
2471 */
2472static void cxlflash_pci_resume(struct pci_dev *pdev)
2473{
2474 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2475 struct device *dev = &cfg->dev->dev;
2476
2477 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2478
2479 cfg->state = STATE_NORMAL;
439e85c1 2480 wake_up_all(&cfg->reset_waitq);
5cdac81a
MO
2481 scsi_unblock_requests(cfg->host);
2482}
2483
2484static const struct pci_error_handlers cxlflash_err_handler = {
2485 .error_detected = cxlflash_pci_error_detected,
2486 .slot_reset = cxlflash_pci_slot_reset,
2487 .resume = cxlflash_pci_resume,
2488};
2489
c21e0bbf
MO
2490/*
2491 * PCI device structure
2492 */
2493static struct pci_driver cxlflash_driver = {
2494 .name = CXLFLASH_NAME,
2495 .id_table = cxlflash_pci_table,
2496 .probe = cxlflash_probe,
2497 .remove = cxlflash_remove,
babf985d 2498 .shutdown = cxlflash_remove,
5cdac81a 2499 .err_handler = &cxlflash_err_handler,
c21e0bbf
MO
2500};
2501
2502/**
2503 * init_cxlflash() - module entry point
2504 *
1284fb0c 2505 * Return: 0 on success, -errno on failure
c21e0bbf
MO
2506 */
2507static int __init init_cxlflash(void)
2508{
85599218 2509 pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
c21e0bbf 2510
65be2c79
MO
2511 cxlflash_list_init();
2512
c21e0bbf
MO
2513 return pci_register_driver(&cxlflash_driver);
2514}
2515
2516/**
2517 * exit_cxlflash() - module exit point
2518 */
2519static void __exit exit_cxlflash(void)
2520{
65be2c79
MO
2521 cxlflash_term_global_luns();
2522 cxlflash_free_errpage();
2523
c21e0bbf
MO
2524 pci_unregister_driver(&cxlflash_driver);
2525}
2526
2527module_init(init_cxlflash);
2528module_exit(exit_cxlflash);