]>
Commit | Line | Data |
---|---|---|
c21e0bbf MO |
1 | /* |
2 | * CXL Flash Device Driver | |
3 | * | |
4 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation | |
5 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation | |
6 | * | |
7 | * Copyright (C) 2015 IBM Corporation | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <linux/delay.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/pci.h> | |
19 | ||
20 | #include <asm/unaligned.h> | |
21 | ||
22 | #include <misc/cxl.h> | |
23 | ||
24 | #include <scsi/scsi_cmnd.h> | |
25 | #include <scsi/scsi_host.h> | |
65be2c79 | 26 | #include <uapi/scsi/cxlflash_ioctl.h> |
c21e0bbf MO |
27 | |
28 | #include "main.h" | |
29 | #include "sislite.h" | |
30 | #include "common.h" | |
31 | ||
32 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); | |
33 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); | |
34 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); | |
35 | MODULE_LICENSE("GPL"); | |
36 | ||
c21e0bbf MO |
37 | /** |
38 | * process_cmd_err() - command error handler | |
39 | * @cmd: AFU command that experienced the error. | |
40 | * @scp: SCSI command associated with the AFU command in error. | |
41 | * | |
42 | * Translates error bits from AFU command to SCSI command results. | |
43 | */ | |
44 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) | |
45 | { | |
88d33628 MO |
46 | struct afu *afu = cmd->parent; |
47 | struct cxlflash_cfg *cfg = afu->parent; | |
48 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
49 | struct sisl_ioarcb *ioarcb; |
50 | struct sisl_ioasa *ioasa; | |
8396012f | 51 | u32 resid; |
c21e0bbf MO |
52 | |
53 | if (unlikely(!cmd)) | |
54 | return; | |
55 | ||
56 | ioarcb = &(cmd->rcb); | |
57 | ioasa = &(cmd->sa); | |
58 | ||
59 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { | |
8396012f MO |
60 | resid = ioasa->resid; |
61 | scsi_set_resid(scp, resid); | |
88d33628 MO |
62 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", |
63 | __func__, cmd, scp, resid); | |
c21e0bbf MO |
64 | } |
65 | ||
66 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { | |
88d33628 MO |
67 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", |
68 | __func__, cmd, scp); | |
c21e0bbf MO |
69 | scp->result = (DID_ERROR << 16); |
70 | } | |
71 | ||
88d33628 MO |
72 | dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " |
73 | "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, | |
74 | ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, | |
75 | ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); | |
c21e0bbf MO |
76 | |
77 | if (ioasa->rc.scsi_rc) { | |
78 | /* We have a SCSI status */ | |
79 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { | |
80 | memcpy(scp->sense_buffer, ioasa->sense_data, | |
81 | SISL_SENSE_DATA_LEN); | |
82 | scp->result = ioasa->rc.scsi_rc; | |
83 | } else | |
84 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); | |
85 | } | |
86 | ||
87 | /* | |
88 | * We encountered an error. Set scp->result based on nature | |
89 | * of error. | |
90 | */ | |
91 | if (ioasa->rc.fc_rc) { | |
92 | /* We have an FC status */ | |
93 | switch (ioasa->rc.fc_rc) { | |
94 | case SISL_FC_RC_LINKDOWN: | |
95 | scp->result = (DID_REQUEUE << 16); | |
96 | break; | |
97 | case SISL_FC_RC_RESID: | |
98 | /* This indicates an FCP resid underrun */ | |
99 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { | |
100 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, | |
101 | * then we will handle this error else where. | |
102 | * If not then we must handle it here. | |
8396012f | 103 | * This is probably an AFU bug. |
c21e0bbf MO |
104 | */ |
105 | scp->result = (DID_ERROR << 16); | |
106 | } | |
107 | break; | |
108 | case SISL_FC_RC_RESIDERR: | |
109 | /* Resid mismatch between adapter and device */ | |
110 | case SISL_FC_RC_TGTABORT: | |
111 | case SISL_FC_RC_ABORTOK: | |
112 | case SISL_FC_RC_ABORTFAIL: | |
113 | case SISL_FC_RC_NOLOGI: | |
114 | case SISL_FC_RC_ABORTPEND: | |
115 | case SISL_FC_RC_WRABORTPEND: | |
116 | case SISL_FC_RC_NOEXP: | |
117 | case SISL_FC_RC_INUSE: | |
118 | scp->result = (DID_ERROR << 16); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
123 | if (ioasa->rc.afu_rc) { | |
124 | /* We have an AFU error */ | |
125 | switch (ioasa->rc.afu_rc) { | |
126 | case SISL_AFU_RC_NO_CHANNELS: | |
8396012f | 127 | scp->result = (DID_NO_CONNECT << 16); |
c21e0bbf MO |
128 | break; |
129 | case SISL_AFU_RC_DATA_DMA_ERR: | |
130 | switch (ioasa->afu_extra) { | |
131 | case SISL_AFU_DMA_ERR_PAGE_IN: | |
132 | /* Retry */ | |
133 | scp->result = (DID_IMM_RETRY << 16); | |
134 | break; | |
135 | case SISL_AFU_DMA_ERR_INVALID_EA: | |
136 | default: | |
137 | scp->result = (DID_ERROR << 16); | |
138 | } | |
139 | break; | |
140 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: | |
141 | /* Retry */ | |
142 | scp->result = (DID_ALLOC_FAILURE << 16); | |
143 | break; | |
144 | default: | |
145 | scp->result = (DID_ERROR << 16); | |
146 | } | |
147 | } | |
148 | } | |
149 | ||
150 | /** | |
151 | * cmd_complete() - command completion handler | |
152 | * @cmd: AFU command that has completed. | |
153 | * | |
154 | * Prepares and submits command that has either completed or timed out to | |
155 | * the SCSI stack. Checks AFU command back into command pool for non-internal | |
fe7f9698 | 156 | * (cmd->scp populated) commands. |
c21e0bbf MO |
157 | */ |
158 | static void cmd_complete(struct afu_cmd *cmd) | |
159 | { | |
160 | struct scsi_cmnd *scp; | |
c21e0bbf MO |
161 | ulong lock_flags; |
162 | struct afu *afu = cmd->parent; | |
163 | struct cxlflash_cfg *cfg = afu->parent; | |
88d33628 | 164 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
165 | bool cmd_is_tmf; |
166 | ||
fe7f9698 MO |
167 | if (cmd->scp) { |
168 | scp = cmd->scp; | |
8396012f | 169 | if (unlikely(cmd->sa.ioasc)) |
c21e0bbf MO |
170 | process_cmd_err(cmd, scp); |
171 | else | |
172 | scp->result = (DID_OK << 16); | |
173 | ||
c21e0bbf | 174 | cmd_is_tmf = cmd->cmd_tmf; |
c21e0bbf | 175 | |
88d33628 MO |
176 | dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", |
177 | __func__, scp, scp->result, cmd->sa.ioasc); | |
c21e0bbf | 178 | |
c21e0bbf MO |
179 | scp->scsi_done(scp); |
180 | ||
181 | if (cmd_is_tmf) { | |
018d1dc9 | 182 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
183 | cfg->tmf_active = false; |
184 | wake_up_all_locked(&cfg->tmf_waitq); | |
018d1dc9 | 185 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
186 | } |
187 | } else | |
188 | complete(&cmd->cevent); | |
189 | } | |
190 | ||
15305514 | 191 | /** |
ddc869e9 UK |
192 | * context_reset() - reset context via specified register |
193 | * @hwq: Hardware queue owning the context to be reset. | |
74579cfb | 194 | * @reset_reg: MMIO register to perform reset. |
ddc869e9 UK |
195 | * |
196 | * Return: 0 on success, -errno on failure | |
15305514 | 197 | */ |
ddc869e9 | 198 | static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) |
15305514 | 199 | { |
ddc869e9 | 200 | struct cxlflash_cfg *cfg = hwq->afu->parent; |
3d2f617d | 201 | struct device *dev = &cfg->dev->dev; |
ddc869e9 UK |
202 | int rc = -ETIMEDOUT; |
203 | int nretry = 0; | |
204 | u64 val = 0x1; | |
15305514 | 205 | |
ddc869e9 | 206 | dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); |
15305514 | 207 | |
ddc869e9 | 208 | writeq_be(val, reset_reg); |
15305514 | 209 | do { |
ddc869e9 UK |
210 | val = readq_be(reset_reg); |
211 | if ((val & 0x1) == 0x0) { | |
212 | rc = 0; | |
15305514 | 213 | break; |
ddc869e9 UK |
214 | } |
215 | ||
15305514 | 216 | /* Double delay each time */ |
ea765431 | 217 | udelay(1 << nretry); |
15305514 | 218 | } while (nretry++ < MC_ROOM_RETRY_CNT); |
3d2f617d | 219 | |
ddc869e9 UK |
220 | dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", |
221 | __func__, rc, val, nretry); | |
222 | return rc; | |
15305514 MO |
223 | } |
224 | ||
74579cfb | 225 | /** |
ddc869e9 UK |
226 | * context_reset_ioarrin() - reset context via IOARRIN register |
227 | * @hwq: Hardware queue owning the context to be reset. | |
228 | * | |
229 | * Return: 0 on success, -errno on failure | |
74579cfb | 230 | */ |
ddc869e9 | 231 | static int context_reset_ioarrin(struct hwq *hwq) |
74579cfb | 232 | { |
ddc869e9 | 233 | return context_reset(hwq, &hwq->host_map->ioarrin); |
74579cfb MO |
234 | } |
235 | ||
bae0ac69 | 236 | /** |
ddc869e9 UK |
237 | * context_reset_sq() - reset context via SQ_CONTEXT_RESET register |
238 | * @hwq: Hardware queue owning the context to be reset. | |
239 | * | |
240 | * Return: 0 on success, -errno on failure | |
bae0ac69 | 241 | */ |
ddc869e9 | 242 | static int context_reset_sq(struct hwq *hwq) |
bae0ac69 | 243 | { |
ddc869e9 | 244 | return context_reset(hwq, &hwq->host_map->sq_ctx_reset); |
bae0ac69 MO |
245 | } |
246 | ||
15305514 | 247 | /** |
48b4be36 | 248 | * send_cmd_ioarrin() - sends an AFU command via IOARRIN register |
15305514 MO |
249 | * @afu: AFU associated with the host. |
250 | * @cmd: AFU command to send. | |
251 | * | |
252 | * Return: | |
1284fb0c | 253 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
15305514 | 254 | */ |
48b4be36 | 255 | static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) |
15305514 MO |
256 | { |
257 | struct cxlflash_cfg *cfg = afu->parent; | |
258 | struct device *dev = &cfg->dev->dev; | |
a583d00a | 259 | struct hwq *hwq = get_hwq(afu, cmd->hwq_index); |
15305514 | 260 | int rc = 0; |
11f7b184 UK |
261 | s64 room; |
262 | ulong lock_flags; | |
15305514 MO |
263 | |
264 | /* | |
11f7b184 UK |
265 | * To avoid the performance penalty of MMIO, spread the update of |
266 | * 'room' over multiple commands. | |
15305514 | 267 | */ |
edc034e8 | 268 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
a583d00a UK |
269 | if (--hwq->room < 0) { |
270 | room = readq_be(&hwq->host_map->cmd_room); | |
11f7b184 UK |
271 | if (room <= 0) { |
272 | dev_dbg_ratelimited(dev, "%s: no cmd_room to send " | |
273 | "0x%02X, room=0x%016llX\n", | |
274 | __func__, cmd->rcb.cdb[0], room); | |
a583d00a | 275 | hwq->room = 0; |
11f7b184 UK |
276 | rc = SCSI_MLQUEUE_HOST_BUSY; |
277 | goto out; | |
15305514 | 278 | } |
a583d00a | 279 | hwq->room = room - 1; |
15305514 MO |
280 | } |
281 | ||
a583d00a | 282 | writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); |
15305514 | 283 | out: |
edc034e8 | 284 | spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); |
88d33628 MO |
285 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, |
286 | cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); | |
15305514 | 287 | return rc; |
15305514 MO |
288 | } |
289 | ||
bae0ac69 MO |
290 | /** |
291 | * send_cmd_sq() - sends an AFU command via SQ ring | |
292 | * @afu: AFU associated with the host. | |
293 | * @cmd: AFU command to send. | |
294 | * | |
295 | * Return: | |
296 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure | |
297 | */ | |
298 | static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) | |
299 | { | |
300 | struct cxlflash_cfg *cfg = afu->parent; | |
301 | struct device *dev = &cfg->dev->dev; | |
a583d00a | 302 | struct hwq *hwq = get_hwq(afu, cmd->hwq_index); |
bae0ac69 MO |
303 | int rc = 0; |
304 | int newval; | |
305 | ulong lock_flags; | |
306 | ||
a583d00a | 307 | newval = atomic_dec_if_positive(&hwq->hsq_credits); |
bae0ac69 MO |
308 | if (newval <= 0) { |
309 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
310 | goto out; | |
311 | } | |
312 | ||
313 | cmd->rcb.ioasa = &cmd->sa; | |
314 | ||
a583d00a | 315 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
bae0ac69 | 316 | |
a583d00a UK |
317 | *hwq->hsq_curr = cmd->rcb; |
318 | if (hwq->hsq_curr < hwq->hsq_end) | |
319 | hwq->hsq_curr++; | |
bae0ac69 | 320 | else |
a583d00a UK |
321 | hwq->hsq_curr = hwq->hsq_start; |
322 | writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); | |
bae0ac69 | 323 | |
a583d00a | 324 | spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); |
bae0ac69 | 325 | out: |
88d33628 MO |
326 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " |
327 | "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, | |
a583d00a UK |
328 | cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, |
329 | readq_be(&hwq->host_map->sq_head), | |
330 | readq_be(&hwq->host_map->sq_tail)); | |
bae0ac69 MO |
331 | return rc; |
332 | } | |
333 | ||
15305514 MO |
334 | /** |
335 | * wait_resp() - polls for a response or timeout to a sent AFU command | |
336 | * @afu: AFU associated with the host. | |
337 | * @cmd: AFU command that was sent. | |
9ba848ac | 338 | * |
ddc869e9 | 339 | * Return: 0 on success, -errno on failure |
15305514 | 340 | */ |
9ba848ac | 341 | static int wait_resp(struct afu *afu, struct afu_cmd *cmd) |
15305514 | 342 | { |
88d33628 MO |
343 | struct cxlflash_cfg *cfg = afu->parent; |
344 | struct device *dev = &cfg->dev->dev; | |
9ba848ac | 345 | int rc = 0; |
15305514 MO |
346 | ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); |
347 | ||
348 | timeout = wait_for_completion_timeout(&cmd->cevent, timeout); | |
ddc869e9 UK |
349 | if (!timeout) |
350 | rc = -ETIMEDOUT; | |
15305514 | 351 | |
9ba848ac | 352 | if (unlikely(cmd->sa.ioasc != 0)) { |
88d33628 MO |
353 | dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", |
354 | __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); | |
ddc869e9 | 355 | rc = -EIO; |
9ba848ac MO |
356 | } |
357 | ||
358 | return rc; | |
15305514 MO |
359 | } |
360 | ||
8c052e9e MO |
361 | /** |
362 | * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command | |
363 | * @host: SCSI host associated with device. | |
364 | * @scp: SCSI command to send. | |
365 | * @afu: SCSI command to send. | |
366 | * | |
367 | * Hashes a command based upon the hardware queue mode. | |
368 | * | |
369 | * Return: Trusted index of target hardware queue | |
370 | */ | |
371 | static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, | |
372 | struct afu *afu) | |
373 | { | |
374 | u32 tag; | |
375 | u32 hwq = 0; | |
376 | ||
377 | if (afu->num_hwqs == 1) | |
378 | return 0; | |
379 | ||
380 | switch (afu->hwq_mode) { | |
381 | case HWQ_MODE_RR: | |
382 | hwq = afu->hwq_rr_count++ % afu->num_hwqs; | |
383 | break; | |
384 | case HWQ_MODE_TAG: | |
385 | tag = blk_mq_unique_tag(scp->request); | |
386 | hwq = blk_mq_unique_tag_to_hwq(tag); | |
387 | break; | |
388 | case HWQ_MODE_CPU: | |
389 | hwq = smp_processor_id() % afu->num_hwqs; | |
390 | break; | |
391 | default: | |
392 | WARN_ON_ONCE(1); | |
393 | } | |
394 | ||
395 | return hwq; | |
396 | } | |
397 | ||
c21e0bbf MO |
398 | /** |
399 | * send_tmf() - sends a Task Management Function (TMF) | |
400 | * @afu: AFU to checkout from. | |
401 | * @scp: SCSI command from stack. | |
402 | * @tmfcmd: TMF command to send. | |
403 | * | |
404 | * Return: | |
1284fb0c | 405 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
c21e0bbf MO |
406 | */ |
407 | static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) | |
408 | { | |
8c052e9e MO |
409 | struct Scsi_Host *host = scp->device->host; |
410 | struct cxlflash_cfg *cfg = shost_priv(host); | |
d4ace351 | 411 | struct afu_cmd *cmd = sc_to_afucz(scp); |
4392ba49 | 412 | struct device *dev = &cfg->dev->dev; |
8c052e9e MO |
413 | int hwq_index = cmd_to_target_hwq(host, scp, afu); |
414 | struct hwq *hwq = get_hwq(afu, hwq_index); | |
c21e0bbf MO |
415 | ulong lock_flags; |
416 | int rc = 0; | |
018d1dc9 | 417 | ulong to; |
c21e0bbf | 418 | |
018d1dc9 MO |
419 | /* When Task Management Function is active do not send another */ |
420 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); | |
c21e0bbf | 421 | if (cfg->tmf_active) |
018d1dc9 MO |
422 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
423 | !cfg->tmf_active, | |
424 | cfg->tmf_slock); | |
c21e0bbf | 425 | cfg->tmf_active = true; |
018d1dc9 | 426 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 427 | |
fe7f9698 | 428 | cmd->scp = scp; |
d4ace351 MO |
429 | cmd->parent = afu; |
430 | cmd->cmd_tmf = true; | |
8c052e9e | 431 | cmd->hwq_index = hwq_index; |
d4ace351 | 432 | |
a583d00a | 433 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
5fbb96c8 | 434 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
e8e17ea6 | 435 | cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); |
c21e0bbf | 436 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); |
c21e0bbf | 437 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | |
d4ace351 MO |
438 | SISL_REQ_FLAGS_SUP_UNDERRUN | |
439 | SISL_REQ_FLAGS_TMF_CMD); | |
c21e0bbf MO |
440 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); |
441 | ||
48b4be36 | 442 | rc = afu->send_cmd(afu, cmd); |
c21e0bbf | 443 | if (unlikely(rc)) { |
018d1dc9 | 444 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 445 | cfg->tmf_active = false; |
018d1dc9 | 446 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
447 | goto out; |
448 | } | |
449 | ||
018d1dc9 MO |
450 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
451 | to = msecs_to_jiffies(5000); | |
452 | to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, | |
453 | !cfg->tmf_active, | |
454 | cfg->tmf_slock, | |
455 | to); | |
456 | if (!to) { | |
457 | cfg->tmf_active = false; | |
88d33628 | 458 | dev_err(dev, "%s: TMF timed out\n", __func__); |
018d1dc9 MO |
459 | rc = -1; |
460 | } | |
461 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); | |
c21e0bbf MO |
462 | out: |
463 | return rc; | |
464 | } | |
465 | ||
466 | /** | |
467 | * cxlflash_driver_info() - information handler for this host driver | |
468 | * @host: SCSI host associated with device. | |
469 | * | |
470 | * Return: A string describing the device. | |
471 | */ | |
472 | static const char *cxlflash_driver_info(struct Scsi_Host *host) | |
473 | { | |
474 | return CXLFLASH_ADAPTER_NAME; | |
475 | } | |
476 | ||
477 | /** | |
478 | * cxlflash_queuecommand() - sends a mid-layer request | |
479 | * @host: SCSI host associated with device. | |
480 | * @scp: SCSI command to send. | |
481 | * | |
1284fb0c | 482 | * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
c21e0bbf MO |
483 | */ |
484 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) | |
485 | { | |
88d33628 | 486 | struct cxlflash_cfg *cfg = shost_priv(host); |
c21e0bbf | 487 | struct afu *afu = cfg->afu; |
4392ba49 | 488 | struct device *dev = &cfg->dev->dev; |
5fbb96c8 | 489 | struct afu_cmd *cmd = sc_to_afucz(scp); |
9d89326c | 490 | struct scatterlist *sg = scsi_sglist(scp); |
8c052e9e MO |
491 | int hwq_index = cmd_to_target_hwq(host, scp, afu); |
492 | struct hwq *hwq = get_hwq(afu, hwq_index); | |
9d89326c | 493 | u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; |
c21e0bbf | 494 | ulong lock_flags; |
c21e0bbf MO |
495 | int rc = 0; |
496 | ||
4392ba49 | 497 | dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
88d33628 | 498 | "cdb=(%08x-%08x-%08x-%08x)\n", |
4392ba49 MO |
499 | __func__, scp, host->host_no, scp->device->channel, |
500 | scp->device->id, scp->device->lun, | |
501 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
502 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
503 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
504 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
c21e0bbf | 505 | |
018d1dc9 MO |
506 | /* |
507 | * If a Task Management Function is active, wait for it to complete | |
c21e0bbf MO |
508 | * before continuing with regular commands. |
509 | */ | |
018d1dc9 | 510 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 511 | if (cfg->tmf_active) { |
018d1dc9 | 512 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
513 | rc = SCSI_MLQUEUE_HOST_BUSY; |
514 | goto out; | |
515 | } | |
018d1dc9 | 516 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 517 | |
5cdac81a | 518 | switch (cfg->state) { |
f92ba507 MO |
519 | case STATE_PROBING: |
520 | case STATE_PROBED: | |
439e85c1 | 521 | case STATE_RESET: |
88d33628 | 522 | dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); |
5cdac81a MO |
523 | rc = SCSI_MLQUEUE_HOST_BUSY; |
524 | goto out; | |
525 | case STATE_FAILTERM: | |
88d33628 | 526 | dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); |
5cdac81a MO |
527 | scp->result = (DID_NO_CONNECT << 16); |
528 | scp->scsi_done(scp); | |
529 | rc = 0; | |
530 | goto out; | |
531 | default: | |
532 | break; | |
533 | } | |
534 | ||
9d89326c | 535 | if (likely(sg)) { |
fdc3f382 MO |
536 | cmd->rcb.data_len = sg->length; |
537 | cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); | |
9d89326c | 538 | } |
c21e0bbf | 539 | |
fe7f9698 | 540 | cmd->scp = scp; |
5fbb96c8 | 541 | cmd->parent = afu; |
8c052e9e | 542 | cmd->hwq_index = hwq_index; |
c21e0bbf | 543 | |
a583d00a | 544 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
9d89326c | 545 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
e8e17ea6 | 546 | cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); |
9d89326c | 547 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); |
c21e0bbf | 548 | |
9d89326c MO |
549 | if (scp->sc_data_direction == DMA_TO_DEVICE) |
550 | req_flags |= SISL_REQ_FLAGS_HOST_WRITE; | |
c21e0bbf | 551 | |
9d89326c | 552 | cmd->rcb.req_flags = req_flags; |
c21e0bbf MO |
553 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); |
554 | ||
48b4be36 | 555 | rc = afu->send_cmd(afu, cmd); |
c21e0bbf MO |
556 | out: |
557 | return rc; | |
558 | } | |
559 | ||
560 | /** | |
15305514 | 561 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe |
1284fb0c | 562 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 563 | */ |
15305514 | 564 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) |
c21e0bbf | 565 | { |
15305514 | 566 | struct pci_dev *pdev = cfg->dev; |
c21e0bbf | 567 | |
15305514 MO |
568 | if (pci_channel_offline(pdev)) |
569 | wait_event_timeout(cfg->reset_waitq, | |
570 | !pci_channel_offline(pdev), | |
571 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); | |
c21e0bbf MO |
572 | } |
573 | ||
574 | /** | |
15305514 | 575 | * free_mem() - free memory associated with the AFU |
1284fb0c | 576 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 577 | */ |
15305514 | 578 | static void free_mem(struct cxlflash_cfg *cfg) |
c21e0bbf | 579 | { |
15305514 | 580 | struct afu *afu = cfg->afu; |
c21e0bbf | 581 | |
15305514 | 582 | if (cfg->afu) { |
15305514 MO |
583 | free_pages((ulong)afu, get_order(sizeof(struct afu))); |
584 | cfg->afu = NULL; | |
5cdac81a | 585 | } |
c21e0bbf MO |
586 | } |
587 | ||
3b4f03cd UK |
588 | /** |
589 | * cxlflash_reset_sync() - synchronizing point for asynchronous resets | |
590 | * @cfg: Internal structure associated with the host. | |
591 | */ | |
592 | static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) | |
593 | { | |
594 | if (cfg->async_reset_cookie == 0) | |
595 | return; | |
596 | ||
597 | /* Wait until all async calls prior to this cookie have completed */ | |
598 | async_synchronize_cookie(cfg->async_reset_cookie + 1); | |
599 | cfg->async_reset_cookie = 0; | |
600 | } | |
601 | ||
c21e0bbf | 602 | /** |
15305514 | 603 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space |
1284fb0c | 604 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 605 | * |
15305514 | 606 | * Safe to call with AFU in a partially allocated/initialized state. |
ee91e332 | 607 | * |
d940f9ae | 608 | * Cancels scheduled worker threads, waits for any active internal AFU |
2588f222 | 609 | * commands to timeout, disables IRQ polling and then unmaps the MMIO space. |
c21e0bbf | 610 | */ |
15305514 | 611 | static void stop_afu(struct cxlflash_cfg *cfg) |
c21e0bbf | 612 | { |
15305514 | 613 | struct afu *afu = cfg->afu; |
a583d00a UK |
614 | struct hwq *hwq; |
615 | int i; | |
c21e0bbf | 616 | |
d940f9ae | 617 | cancel_work_sync(&cfg->work_q); |
3b4f03cd UK |
618 | if (!current_is_async()) |
619 | cxlflash_reset_sync(cfg); | |
d940f9ae | 620 | |
15305514 | 621 | if (likely(afu)) { |
de01283b MO |
622 | while (atomic_read(&afu->cmds_active)) |
623 | ssleep(1); | |
a583d00a UK |
624 | |
625 | if (afu_is_irqpoll_enabled(afu)) { | |
bb85ef68 | 626 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
627 | hwq = get_hwq(afu, i); |
628 | ||
629 | irq_poll_disable(&hwq->irqpoll); | |
630 | } | |
631 | } | |
632 | ||
c21e0bbf | 633 | if (likely(afu->afu_map)) { |
1786f4a0 | 634 | cxl_psa_unmap((void __iomem *)afu->afu_map); |
c21e0bbf MO |
635 | afu->afu_map = NULL; |
636 | } | |
637 | } | |
638 | } | |
639 | ||
640 | /** | |
9526f360 | 641 | * term_intr() - disables all AFU interrupts |
1284fb0c | 642 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 643 | * @level: Depth of allocation, where to begin waterfall tear down. |
a583d00a | 644 | * @index: Index of the hardware queue. |
c21e0bbf MO |
645 | * |
646 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
647 | */ | |
a583d00a UK |
648 | static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, |
649 | u32 index) | |
c21e0bbf | 650 | { |
c21e0bbf | 651 | struct afu *afu = cfg->afu; |
4392ba49 | 652 | struct device *dev = &cfg->dev->dev; |
a583d00a | 653 | struct hwq *hwq; |
c21e0bbf | 654 | |
a583d00a UK |
655 | if (!afu) { |
656 | dev_err(dev, "%s: returning with NULL afu\n", __func__); | |
657 | return; | |
658 | } | |
659 | ||
660 | hwq = get_hwq(afu, index); | |
661 | ||
662 | if (!hwq->ctx) { | |
663 | dev_err(dev, "%s: returning with NULL MC\n", __func__); | |
c21e0bbf MO |
664 | return; |
665 | } | |
666 | ||
667 | switch (level) { | |
c21e0bbf | 668 | case UNMAP_THREE: |
a583d00a UK |
669 | /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ |
670 | if (index == PRIMARY_HWQ) | |
671 | cxl_unmap_afu_irq(hwq->ctx, 3, hwq); | |
c21e0bbf | 672 | case UNMAP_TWO: |
a583d00a | 673 | cxl_unmap_afu_irq(hwq->ctx, 2, hwq); |
c21e0bbf | 674 | case UNMAP_ONE: |
a583d00a | 675 | cxl_unmap_afu_irq(hwq->ctx, 1, hwq); |
c21e0bbf | 676 | case FREE_IRQ: |
a583d00a | 677 | cxl_free_afu_irqs(hwq->ctx); |
9526f360 MK |
678 | /* fall through */ |
679 | case UNDO_NOOP: | |
680 | /* No action required */ | |
681 | break; | |
682 | } | |
683 | } | |
684 | ||
685 | /** | |
686 | * term_mc() - terminates the master context | |
687 | * @cfg: Internal structure associated with the host. | |
a583d00a | 688 | * @index: Index of the hardware queue. |
9526f360 MK |
689 | * |
690 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
691 | */ | |
a583d00a | 692 | static void term_mc(struct cxlflash_cfg *cfg, u32 index) |
9526f360 | 693 | { |
9526f360 MK |
694 | struct afu *afu = cfg->afu; |
695 | struct device *dev = &cfg->dev->dev; | |
a583d00a | 696 | struct hwq *hwq; |
9526f360 | 697 | |
a583d00a UK |
698 | if (!afu) { |
699 | dev_err(dev, "%s: returning with NULL afu\n", __func__); | |
9526f360 | 700 | return; |
c21e0bbf | 701 | } |
9526f360 | 702 | |
a583d00a UK |
703 | hwq = get_hwq(afu, index); |
704 | ||
705 | if (!hwq->ctx) { | |
706 | dev_err(dev, "%s: returning with NULL MC\n", __func__); | |
707 | return; | |
708 | } | |
709 | ||
710 | WARN_ON(cxl_stop_context(hwq->ctx)); | |
711 | if (index != PRIMARY_HWQ) | |
712 | WARN_ON(cxl_release_context(hwq->ctx)); | |
713 | hwq->ctx = NULL; | |
c21e0bbf MO |
714 | } |
715 | ||
716 | /** | |
717 | * term_afu() - terminates the AFU | |
1284fb0c | 718 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
719 | * |
720 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
721 | */ | |
722 | static void term_afu(struct cxlflash_cfg *cfg) | |
723 | { | |
88d33628 | 724 | struct device *dev = &cfg->dev->dev; |
a583d00a | 725 | int k; |
88d33628 | 726 | |
9526f360 MK |
727 | /* |
728 | * Tear down is carefully orchestrated to ensure | |
729 | * no interrupts can come in when the problem state | |
730 | * area is unmapped. | |
731 | * | |
a583d00a | 732 | * 1) Disable all AFU interrupts for each master |
9526f360 | 733 | * 2) Unmap the problem state area |
a583d00a | 734 | * 3) Stop each master context |
9526f360 | 735 | */ |
bb85ef68 | 736 | for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) |
a583d00a UK |
737 | term_intr(cfg, UNMAP_THREE, k); |
738 | ||
c21e0bbf MO |
739 | if (cfg->afu) |
740 | stop_afu(cfg); | |
741 | ||
bb85ef68 | 742 | for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) |
a583d00a | 743 | term_mc(cfg, k); |
6ded8b3c | 744 | |
88d33628 | 745 | dev_dbg(dev, "%s: returning\n", __func__); |
c21e0bbf MO |
746 | } |
747 | ||
704c4b0d UK |
748 | /** |
749 | * notify_shutdown() - notifies device of pending shutdown | |
750 | * @cfg: Internal structure associated with the host. | |
751 | * @wait: Whether to wait for shutdown processing to complete. | |
752 | * | |
753 | * This function will notify the AFU that the adapter is being shutdown | |
754 | * and will wait for shutdown processing to complete if wait is true. | |
755 | * This notification should flush pending I/Os to the device and halt | |
756 | * further I/Os until the next AFU reset is issued and device restarted. | |
757 | */ | |
758 | static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) | |
759 | { | |
760 | struct afu *afu = cfg->afu; | |
761 | struct device *dev = &cfg->dev->dev; | |
704c4b0d | 762 | struct dev_dependent_vals *ddv; |
c885d3fe | 763 | __be64 __iomem *fc_port_regs; |
704c4b0d UK |
764 | u64 reg, status; |
765 | int i, retry_cnt = 0; | |
766 | ||
767 | ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; | |
768 | if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) | |
769 | return; | |
770 | ||
1bd2b282 | 771 | if (!afu || !afu->afu_map) { |
88d33628 | 772 | dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); |
1bd2b282 UK |
773 | return; |
774 | } | |
775 | ||
704c4b0d | 776 | /* Notify AFU */ |
66d4bce4 | 777 | for (i = 0; i < cfg->num_fc_ports; i++) { |
c885d3fe MO |
778 | fc_port_regs = get_fc_port_regs(cfg, i); |
779 | ||
780 | reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); | |
704c4b0d | 781 | reg |= SISL_FC_SHUTDOWN_NORMAL; |
c885d3fe | 782 | writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); |
704c4b0d UK |
783 | } |
784 | ||
785 | if (!wait) | |
786 | return; | |
787 | ||
788 | /* Wait up to 1.5 seconds for shutdown processing to complete */ | |
66d4bce4 | 789 | for (i = 0; i < cfg->num_fc_ports; i++) { |
c885d3fe | 790 | fc_port_regs = get_fc_port_regs(cfg, i); |
704c4b0d | 791 | retry_cnt = 0; |
c885d3fe | 792 | |
704c4b0d | 793 | while (true) { |
c885d3fe | 794 | status = readq_be(&fc_port_regs[FC_STATUS / 8]); |
704c4b0d UK |
795 | if (status & SISL_STATUS_SHUTDOWN_COMPLETE) |
796 | break; | |
797 | if (++retry_cnt >= MC_RETRY_CNT) { | |
798 | dev_dbg(dev, "%s: port %d shutdown processing " | |
799 | "not yet completed\n", __func__, i); | |
800 | break; | |
801 | } | |
802 | msleep(100 * retry_cnt); | |
803 | } | |
804 | } | |
805 | } | |
806 | ||
c21e0bbf MO |
807 | /** |
808 | * cxlflash_remove() - PCI entry point to tear down host | |
809 | * @pdev: PCI device associated with the host. | |
810 | * | |
f92ba507 MO |
811 | * Safe to use as a cleanup in partially allocated/initialized state. Note that |
812 | * the reset_waitq is flushed as part of the stop/termination of user contexts. | |
c21e0bbf MO |
813 | */ |
814 | static void cxlflash_remove(struct pci_dev *pdev) | |
815 | { | |
816 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
88d33628 | 817 | struct device *dev = &pdev->dev; |
c21e0bbf MO |
818 | ulong lock_flags; |
819 | ||
babf985d | 820 | if (!pci_is_enabled(pdev)) { |
88d33628 | 821 | dev_dbg(dev, "%s: Device is disabled\n", __func__); |
babf985d UK |
822 | return; |
823 | } | |
824 | ||
c21e0bbf MO |
825 | /* If a Task Management Function is active, wait for it to complete |
826 | * before continuing with remove. | |
827 | */ | |
018d1dc9 | 828 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 829 | if (cfg->tmf_active) |
018d1dc9 MO |
830 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
831 | !cfg->tmf_active, | |
832 | cfg->tmf_slock); | |
833 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); | |
c21e0bbf | 834 | |
704c4b0d UK |
835 | /* Notify AFU and wait for shutdown processing to complete */ |
836 | notify_shutdown(cfg, true); | |
837 | ||
5cdac81a | 838 | cfg->state = STATE_FAILTERM; |
65be2c79 | 839 | cxlflash_stop_term_user_contexts(cfg); |
5cdac81a | 840 | |
c21e0bbf MO |
841 | switch (cfg->init_state) { |
842 | case INIT_STATE_SCSI: | |
65be2c79 | 843 | cxlflash_term_local_luns(cfg); |
c21e0bbf | 844 | scsi_remove_host(cfg->host); |
c21e0bbf | 845 | case INIT_STATE_AFU: |
b45cdbaf | 846 | term_afu(cfg); |
c21e0bbf | 847 | case INIT_STATE_PCI: |
c21e0bbf MO |
848 | pci_disable_device(pdev); |
849 | case INIT_STATE_NONE: | |
c21e0bbf | 850 | free_mem(cfg); |
8b5b1e87 | 851 | scsi_host_put(cfg->host); |
c21e0bbf MO |
852 | break; |
853 | } | |
854 | ||
88d33628 | 855 | dev_dbg(dev, "%s: returning\n", __func__); |
c21e0bbf MO |
856 | } |
857 | ||
858 | /** | |
859 | * alloc_mem() - allocates the AFU and its command pool | |
1284fb0c | 860 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
861 | * |
862 | * A partially allocated state remains on failure. | |
863 | * | |
864 | * Return: | |
865 | * 0 on success | |
866 | * -ENOMEM on failure to allocate memory | |
867 | */ | |
868 | static int alloc_mem(struct cxlflash_cfg *cfg) | |
869 | { | |
870 | int rc = 0; | |
4392ba49 | 871 | struct device *dev = &cfg->dev->dev; |
c21e0bbf | 872 | |
bae0ac69 | 873 | /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ |
c21e0bbf MO |
874 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
875 | get_order(sizeof(struct afu))); | |
876 | if (unlikely(!cfg->afu)) { | |
4392ba49 MO |
877 | dev_err(dev, "%s: cannot get %d free pages\n", |
878 | __func__, get_order(sizeof(struct afu))); | |
c21e0bbf MO |
879 | rc = -ENOMEM; |
880 | goto out; | |
881 | } | |
882 | cfg->afu->parent = cfg; | |
bb85ef68 | 883 | cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; |
c21e0bbf | 884 | cfg->afu->afu_map = NULL; |
c21e0bbf MO |
885 | out: |
886 | return rc; | |
887 | } | |
888 | ||
889 | /** | |
890 | * init_pci() - initializes the host as a PCI device | |
1284fb0c | 891 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 892 | * |
1284fb0c | 893 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
894 | */ |
895 | static int init_pci(struct cxlflash_cfg *cfg) | |
896 | { | |
897 | struct pci_dev *pdev = cfg->dev; | |
88d33628 | 898 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
899 | int rc = 0; |
900 | ||
c21e0bbf MO |
901 | rc = pci_enable_device(pdev); |
902 | if (rc || pci_channel_offline(pdev)) { | |
903 | if (pci_channel_offline(pdev)) { | |
904 | cxlflash_wait_for_pci_err_recovery(cfg); | |
905 | rc = pci_enable_device(pdev); | |
906 | } | |
907 | ||
908 | if (rc) { | |
88d33628 | 909 | dev_err(dev, "%s: Cannot enable adapter\n", __func__); |
c21e0bbf | 910 | cxlflash_wait_for_pci_err_recovery(cfg); |
961487e4 | 911 | goto out; |
c21e0bbf MO |
912 | } |
913 | } | |
914 | ||
c21e0bbf | 915 | out: |
88d33628 | 916 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf | 917 | return rc; |
c21e0bbf MO |
918 | } |
919 | ||
920 | /** | |
921 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan | |
1284fb0c | 922 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 923 | * |
1284fb0c | 924 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
925 | */ |
926 | static int init_scsi(struct cxlflash_cfg *cfg) | |
927 | { | |
928 | struct pci_dev *pdev = cfg->dev; | |
88d33628 | 929 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
930 | int rc = 0; |
931 | ||
932 | rc = scsi_add_host(cfg->host, &pdev->dev); | |
933 | if (rc) { | |
88d33628 | 934 | dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
935 | goto out; |
936 | } | |
937 | ||
938 | scsi_scan_host(cfg->host); | |
939 | ||
940 | out: | |
88d33628 | 941 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
942 | return rc; |
943 | } | |
944 | ||
945 | /** | |
946 | * set_port_online() - transitions the specified host FC port to online state | |
947 | * @fc_regs: Top of MMIO region defined for specified port. | |
948 | * | |
949 | * The provided MMIO region must be mapped prior to call. Online state means | |
950 | * that the FC link layer has synced, completed the handshaking process, and | |
951 | * is ready for login to start. | |
952 | */ | |
1786f4a0 | 953 | static void set_port_online(__be64 __iomem *fc_regs) |
c21e0bbf MO |
954 | { |
955 | u64 cmdcfg; | |
956 | ||
957 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
958 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ | |
959 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ | |
960 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
961 | } | |
962 | ||
963 | /** | |
964 | * set_port_offline() - transitions the specified host FC port to offline state | |
965 | * @fc_regs: Top of MMIO region defined for specified port. | |
966 | * | |
967 | * The provided MMIO region must be mapped prior to call. | |
968 | */ | |
1786f4a0 | 969 | static void set_port_offline(__be64 __iomem *fc_regs) |
c21e0bbf MO |
970 | { |
971 | u64 cmdcfg; | |
972 | ||
973 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
974 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ | |
975 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ | |
976 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
977 | } | |
978 | ||
979 | /** | |
980 | * wait_port_online() - waits for the specified host FC port come online | |
981 | * @fc_regs: Top of MMIO region defined for specified port. | |
982 | * @delay_us: Number of microseconds to delay between reading port status. | |
983 | * @nretry: Number of cycles to retry reading port status. | |
984 | * | |
985 | * The provided MMIO region must be mapped prior to call. This will timeout | |
986 | * when the cable is not plugged in. | |
987 | * | |
988 | * Return: | |
989 | * TRUE (1) when the specified port is online | |
990 | * FALSE (0) when the specified port fails to come online after timeout | |
c21e0bbf | 991 | */ |
88d33628 | 992 | static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
c21e0bbf MO |
993 | { |
994 | u64 status; | |
995 | ||
88d33628 | 996 | WARN_ON(delay_us < 1000); |
c21e0bbf MO |
997 | |
998 | do { | |
999 | msleep(delay_us / 1000); | |
1000 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | |
05dab432 MO |
1001 | if (status == U64_MAX) |
1002 | nretry /= 2; | |
c21e0bbf MO |
1003 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && |
1004 | nretry--); | |
1005 | ||
1006 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); | |
1007 | } | |
1008 | ||
1009 | /** | |
1010 | * wait_port_offline() - waits for the specified host FC port go offline | |
1011 | * @fc_regs: Top of MMIO region defined for specified port. | |
1012 | * @delay_us: Number of microseconds to delay between reading port status. | |
1013 | * @nretry: Number of cycles to retry reading port status. | |
1014 | * | |
1015 | * The provided MMIO region must be mapped prior to call. | |
1016 | * | |
1017 | * Return: | |
1018 | * TRUE (1) when the specified port is offline | |
1019 | * FALSE (0) when the specified port fails to go offline after timeout | |
c21e0bbf | 1020 | */ |
88d33628 | 1021 | static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
c21e0bbf MO |
1022 | { |
1023 | u64 status; | |
1024 | ||
88d33628 | 1025 | WARN_ON(delay_us < 1000); |
c21e0bbf MO |
1026 | |
1027 | do { | |
1028 | msleep(delay_us / 1000); | |
1029 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | |
05dab432 MO |
1030 | if (status == U64_MAX) |
1031 | nretry /= 2; | |
c21e0bbf MO |
1032 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && |
1033 | nretry--); | |
1034 | ||
1035 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); | |
1036 | } | |
1037 | ||
1038 | /** | |
1039 | * afu_set_wwpn() - configures the WWPN for the specified host FC port | |
1040 | * @afu: AFU associated with the host that owns the specified FC port. | |
1041 | * @port: Port number being configured. | |
1042 | * @fc_regs: Top of MMIO region defined for specified port. | |
1043 | * @wwpn: The world-wide-port-number previously discovered for port. | |
1044 | * | |
1045 | * The provided MMIO region must be mapped prior to call. As part of the | |
1046 | * sequence to configure the WWPN, the port is toggled offline and then back | |
1047 | * online. This toggling action can cause this routine to delay up to a few | |
1048 | * seconds. When configured to use the internal LUN feature of the AFU, a | |
1049 | * failure to come online is overridden. | |
c21e0bbf | 1050 | */ |
f8013261 MO |
1051 | static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, |
1052 | u64 wwpn) | |
c21e0bbf | 1053 | { |
88d33628 MO |
1054 | struct cxlflash_cfg *cfg = afu->parent; |
1055 | struct device *dev = &cfg->dev->dev; | |
1056 | ||
c21e0bbf | 1057 | set_port_offline(fc_regs); |
c21e0bbf MO |
1058 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
1059 | FC_PORT_STATUS_RETRY_CNT)) { | |
88d33628 MO |
1060 | dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", |
1061 | __func__, port); | |
c21e0bbf MO |
1062 | } |
1063 | ||
f8013261 | 1064 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); |
964497b3 | 1065 | |
c21e0bbf | 1066 | set_port_online(fc_regs); |
c21e0bbf MO |
1067 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
1068 | FC_PORT_STATUS_RETRY_CNT)) { | |
88d33628 MO |
1069 | dev_dbg(dev, "%s: wait on port %d to go online timed out\n", |
1070 | __func__, port); | |
c21e0bbf | 1071 | } |
c21e0bbf MO |
1072 | } |
1073 | ||
1074 | /** | |
1075 | * afu_link_reset() - resets the specified host FC port | |
1076 | * @afu: AFU associated with the host that owns the specified FC port. | |
1077 | * @port: Port number being configured. | |
1078 | * @fc_regs: Top of MMIO region defined for specified port. | |
1079 | * | |
1080 | * The provided MMIO region must be mapped prior to call. The sequence to | |
1081 | * reset the port involves toggling it offline and then back online. This | |
1082 | * action can cause this routine to delay up to a few seconds. An effort | |
1083 | * is made to maintain link with the device by switching to host to use | |
1084 | * the alternate port exclusively while the reset takes place. | |
1085 | * failure to come online is overridden. | |
1086 | */ | |
1786f4a0 | 1087 | static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) |
c21e0bbf | 1088 | { |
88d33628 MO |
1089 | struct cxlflash_cfg *cfg = afu->parent; |
1090 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
1091 | u64 port_sel; |
1092 | ||
1093 | /* first switch the AFU to the other links, if any */ | |
1094 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); | |
4da74db0 | 1095 | port_sel &= ~(1ULL << port); |
c21e0bbf MO |
1096 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1097 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | |
1098 | ||
1099 | set_port_offline(fc_regs); | |
1100 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1101 | FC_PORT_STATUS_RETRY_CNT)) | |
88d33628 MO |
1102 | dev_err(dev, "%s: wait on port %d to go offline timed out\n", |
1103 | __func__, port); | |
c21e0bbf MO |
1104 | |
1105 | set_port_online(fc_regs); | |
1106 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1107 | FC_PORT_STATUS_RETRY_CNT)) | |
88d33628 MO |
1108 | dev_err(dev, "%s: wait on port %d to go online timed out\n", |
1109 | __func__, port); | |
c21e0bbf MO |
1110 | |
1111 | /* switch back to include this port */ | |
4da74db0 | 1112 | port_sel |= (1ULL << port); |
c21e0bbf MO |
1113 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1114 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | |
1115 | ||
88d33628 | 1116 | dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); |
c21e0bbf MO |
1117 | } |
1118 | ||
c21e0bbf MO |
1119 | /** |
1120 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts | |
1121 | * @afu: AFU associated with the host. | |
1122 | */ | |
1123 | static void afu_err_intr_init(struct afu *afu) | |
1124 | { | |
66d4bce4 | 1125 | struct cxlflash_cfg *cfg = afu->parent; |
c885d3fe | 1126 | __be64 __iomem *fc_port_regs; |
c21e0bbf | 1127 | int i; |
a583d00a | 1128 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
c21e0bbf MO |
1129 | u64 reg; |
1130 | ||
1131 | /* global async interrupts: AFU clears afu_ctrl on context exit | |
1132 | * if async interrupts were sent to that context. This prevents | |
1133 | * the AFU form sending further async interrupts when | |
1134 | * there is | |
1135 | * nobody to receive them. | |
1136 | */ | |
1137 | ||
1138 | /* mask all */ | |
1139 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); | |
a583d00a UK |
1140 | /* set LISN# to send and point to primary master context */ |
1141 | reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); | |
c21e0bbf MO |
1142 | |
1143 | if (afu->internal_lun) | |
1144 | reg |= 1; /* Bit 63 indicates local lun */ | |
1145 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); | |
1146 | /* clear all */ | |
1147 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | |
1148 | /* unmask bits that are of interest */ | |
1149 | /* note: afu can send an interrupt after this step */ | |
1150 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); | |
1151 | /* clear again in case a bit came on after previous clear but before */ | |
1152 | /* unmask */ | |
1153 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | |
1154 | ||
1155 | /* Clear/Set internal lun bits */ | |
c885d3fe MO |
1156 | fc_port_regs = get_fc_port_regs(cfg, 0); |
1157 | reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); | |
c21e0bbf MO |
1158 | reg &= SISL_FC_INTERNAL_MASK; |
1159 | if (afu->internal_lun) | |
1160 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); | |
c885d3fe | 1161 | writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); |
c21e0bbf MO |
1162 | |
1163 | /* now clear FC errors */ | |
66d4bce4 | 1164 | for (i = 0; i < cfg->num_fc_ports; i++) { |
c885d3fe MO |
1165 | fc_port_regs = get_fc_port_regs(cfg, i); |
1166 | ||
1167 | writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); | |
1168 | writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); | |
c21e0bbf MO |
1169 | } |
1170 | ||
1171 | /* sync interrupts for master's IOARRIN write */ | |
1172 | /* note that unlike asyncs, there can be no pending sync interrupts */ | |
1173 | /* at this time (this is a fresh context and master has not written */ | |
1174 | /* IOARRIN yet), so there is nothing to clear. */ | |
1175 | ||
1176 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ | |
bb85ef68 | 1177 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
1178 | hwq = get_hwq(afu, i); |
1179 | ||
1180 | writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); | |
1181 | writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); | |
1182 | } | |
c21e0bbf MO |
1183 | } |
1184 | ||
1185 | /** | |
1186 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors | |
1187 | * @irq: Interrupt number. | |
1188 | * @data: Private data provided at interrupt registration, the AFU. | |
1189 | * | |
1190 | * Return: Always return IRQ_HANDLED. | |
1191 | */ | |
1192 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) | |
1193 | { | |
a583d00a UK |
1194 | struct hwq *hwq = (struct hwq *)data; |
1195 | struct cxlflash_cfg *cfg = hwq->afu->parent; | |
88d33628 | 1196 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
1197 | u64 reg; |
1198 | u64 reg_unmasked; | |
1199 | ||
a583d00a | 1200 | reg = readq_be(&hwq->host_map->intr_status); |
c21e0bbf MO |
1201 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); |
1202 | ||
1203 | if (reg_unmasked == 0UL) { | |
88d33628 MO |
1204 | dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", |
1205 | __func__, reg); | |
c21e0bbf MO |
1206 | goto cxlflash_sync_err_irq_exit; |
1207 | } | |
1208 | ||
88d33628 MO |
1209 | dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", |
1210 | __func__, reg); | |
c21e0bbf | 1211 | |
a583d00a | 1212 | writeq_be(reg_unmasked, &hwq->host_map->intr_clear); |
c21e0bbf MO |
1213 | |
1214 | cxlflash_sync_err_irq_exit: | |
c21e0bbf MO |
1215 | return IRQ_HANDLED; |
1216 | } | |
1217 | ||
1218 | /** | |
9ba1a1fb MO |
1219 | * process_hrrq() - process the read-response queue |
1220 | * @afu: AFU associated with the host. | |
7bb512aa | 1221 | * @doneq: Queue of commands harvested from the RRQ. |
2588f222 | 1222 | * @budget: Threshold of RRQ entries to process. |
7bb512aa MO |
1223 | * |
1224 | * This routine must be called holding the disabled RRQ spin lock. | |
c21e0bbf | 1225 | * |
9ba1a1fb | 1226 | * Return: The number of entries processed. |
c21e0bbf | 1227 | */ |
a583d00a | 1228 | static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) |
c21e0bbf | 1229 | { |
a583d00a | 1230 | struct afu *afu = hwq->afu; |
c21e0bbf | 1231 | struct afu_cmd *cmd; |
bae0ac69 MO |
1232 | struct sisl_ioasa *ioasa; |
1233 | struct sisl_ioarcb *ioarcb; | |
a583d00a | 1234 | bool toggle = hwq->toggle; |
9ba1a1fb | 1235 | int num_hrrq = 0; |
c21e0bbf | 1236 | u64 entry, |
a583d00a UK |
1237 | *hrrq_start = hwq->hrrq_start, |
1238 | *hrrq_end = hwq->hrrq_end, | |
1239 | *hrrq_curr = hwq->hrrq_curr; | |
c21e0bbf | 1240 | |
2588f222 | 1241 | /* Process ready RRQ entries up to the specified budget (if any) */ |
c21e0bbf MO |
1242 | while (true) { |
1243 | entry = *hrrq_curr; | |
1244 | ||
1245 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) | |
1246 | break; | |
1247 | ||
bae0ac69 MO |
1248 | entry &= ~SISL_RESP_HANDLE_T_BIT; |
1249 | ||
1250 | if (afu_is_sq_cmd_mode(afu)) { | |
1251 | ioasa = (struct sisl_ioasa *)entry; | |
1252 | cmd = container_of(ioasa, struct afu_cmd, sa); | |
1253 | } else { | |
1254 | ioarcb = (struct sisl_ioarcb *)entry; | |
1255 | cmd = container_of(ioarcb, struct afu_cmd, rcb); | |
1256 | } | |
1257 | ||
7bb512aa | 1258 | list_add_tail(&cmd->queue, doneq); |
c21e0bbf MO |
1259 | |
1260 | /* Advance to next entry or wrap and flip the toggle bit */ | |
1261 | if (hrrq_curr < hrrq_end) | |
1262 | hrrq_curr++; | |
1263 | else { | |
1264 | hrrq_curr = hrrq_start; | |
1265 | toggle ^= SISL_RESP_HANDLE_T_BIT; | |
1266 | } | |
bae0ac69 | 1267 | |
a583d00a | 1268 | atomic_inc(&hwq->hsq_credits); |
9ba1a1fb | 1269 | num_hrrq++; |
2588f222 MO |
1270 | |
1271 | if (budget > 0 && num_hrrq >= budget) | |
1272 | break; | |
c21e0bbf MO |
1273 | } |
1274 | ||
a583d00a UK |
1275 | hwq->hrrq_curr = hrrq_curr; |
1276 | hwq->toggle = toggle; | |
c21e0bbf | 1277 | |
9ba1a1fb MO |
1278 | return num_hrrq; |
1279 | } | |
1280 | ||
7bb512aa MO |
1281 | /** |
1282 | * process_cmd_doneq() - process a queue of harvested RRQ commands | |
1283 | * @doneq: Queue of completed commands. | |
1284 | * | |
1285 | * Note that upon return the queue can no longer be trusted. | |
1286 | */ | |
1287 | static void process_cmd_doneq(struct list_head *doneq) | |
1288 | { | |
1289 | struct afu_cmd *cmd, *tmp; | |
1290 | ||
1291 | WARN_ON(list_empty(doneq)); | |
1292 | ||
1293 | list_for_each_entry_safe(cmd, tmp, doneq, queue) | |
1294 | cmd_complete(cmd); | |
1295 | } | |
1296 | ||
2588f222 MO |
1297 | /** |
1298 | * cxlflash_irqpoll() - process a queue of harvested RRQ commands | |
1299 | * @irqpoll: IRQ poll structure associated with queue to poll. | |
1300 | * @budget: Threshold of RRQ entries to process per poll. | |
1301 | * | |
1302 | * Return: The number of entries processed. | |
1303 | */ | |
1304 | static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) | |
1305 | { | |
a583d00a | 1306 | struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); |
2588f222 MO |
1307 | unsigned long hrrq_flags; |
1308 | LIST_HEAD(doneq); | |
1309 | int num_entries = 0; | |
1310 | ||
a583d00a | 1311 | spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); |
2588f222 | 1312 | |
a583d00a | 1313 | num_entries = process_hrrq(hwq, &doneq, budget); |
2588f222 MO |
1314 | if (num_entries < budget) |
1315 | irq_poll_complete(irqpoll); | |
1316 | ||
a583d00a | 1317 | spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); |
2588f222 MO |
1318 | |
1319 | process_cmd_doneq(&doneq); | |
1320 | return num_entries; | |
1321 | } | |
1322 | ||
9ba1a1fb MO |
1323 | /** |
1324 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) | |
1325 | * @irq: Interrupt number. | |
1326 | * @data: Private data provided at interrupt registration, the AFU. | |
1327 | * | |
7bb512aa | 1328 | * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. |
9ba1a1fb MO |
1329 | */ |
1330 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) | |
1331 | { | |
a583d00a UK |
1332 | struct hwq *hwq = (struct hwq *)data; |
1333 | struct afu *afu = hwq->afu; | |
7bb512aa MO |
1334 | unsigned long hrrq_flags; |
1335 | LIST_HEAD(doneq); | |
1336 | int num_entries = 0; | |
9ba1a1fb | 1337 | |
a583d00a | 1338 | spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); |
2588f222 MO |
1339 | |
1340 | if (afu_is_irqpoll_enabled(afu)) { | |
a583d00a UK |
1341 | irq_poll_sched(&hwq->irqpoll); |
1342 | spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); | |
2588f222 MO |
1343 | return IRQ_HANDLED; |
1344 | } | |
1345 | ||
a583d00a UK |
1346 | num_entries = process_hrrq(hwq, &doneq, -1); |
1347 | spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); | |
7bb512aa MO |
1348 | |
1349 | if (num_entries == 0) | |
1350 | return IRQ_NONE; | |
1351 | ||
1352 | process_cmd_doneq(&doneq); | |
c21e0bbf MO |
1353 | return IRQ_HANDLED; |
1354 | } | |
1355 | ||
8056044c MO |
1356 | /* |
1357 | * Asynchronous interrupt information table | |
1358 | * | |
1359 | * NOTE: | |
1360 | * - Order matters here as this array is indexed by bit position. | |
1361 | * | |
1362 | * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro | |
1363 | * as complex and complains due to a lack of parentheses/braces. | |
1364 | */ | |
1365 | #define ASTATUS_FC(_a, _b, _c, _d) \ | |
1366 | { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } | |
1367 | ||
1368 | #define BUILD_SISL_ASTATUS_FC_PORT(_a) \ | |
1369 | ASTATUS_FC(_a, LINK_UP, "link up", 0), \ | |
1370 | ASTATUS_FC(_a, LINK_DN, "link down", 0), \ | |
1371 | ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ | |
1372 | ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ | |
1373 | ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ | |
1374 | ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ | |
1375 | ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ | |
1376 | ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) | |
1377 | ||
1378 | static const struct asyc_intr_info ainfo[] = { | |
1379 | BUILD_SISL_ASTATUS_FC_PORT(1), | |
1380 | BUILD_SISL_ASTATUS_FC_PORT(0), | |
1381 | BUILD_SISL_ASTATUS_FC_PORT(3), | |
1382 | BUILD_SISL_ASTATUS_FC_PORT(2) | |
1383 | }; | |
1384 | ||
c21e0bbf MO |
1385 | /** |
1386 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors | |
1387 | * @irq: Interrupt number. | |
1388 | * @data: Private data provided at interrupt registration, the AFU. | |
1389 | * | |
1390 | * Return: Always return IRQ_HANDLED. | |
1391 | */ | |
1392 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) | |
1393 | { | |
a583d00a UK |
1394 | struct hwq *hwq = (struct hwq *)data; |
1395 | struct afu *afu = hwq->afu; | |
4392ba49 MO |
1396 | struct cxlflash_cfg *cfg = afu->parent; |
1397 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf | 1398 | const struct asyc_intr_info *info; |
1786f4a0 | 1399 | struct sisl_global_map __iomem *global = &afu->afu_map->global; |
c885d3fe | 1400 | __be64 __iomem *fc_port_regs; |
8056044c | 1401 | u64 reg_unmasked; |
c21e0bbf | 1402 | u64 reg; |
8056044c | 1403 | u64 bit; |
c21e0bbf | 1404 | u8 port; |
c21e0bbf | 1405 | |
c21e0bbf MO |
1406 | reg = readq_be(&global->regs.aintr_status); |
1407 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); | |
1408 | ||
8056044c | 1409 | if (unlikely(reg_unmasked == 0)) { |
88d33628 | 1410 | dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", |
4392ba49 | 1411 | __func__, reg); |
c21e0bbf MO |
1412 | goto out; |
1413 | } | |
1414 | ||
f15fbf8d | 1415 | /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ |
c21e0bbf MO |
1416 | writeq_be(reg_unmasked, &global->regs.aintr_clear); |
1417 | ||
f15fbf8d | 1418 | /* Check each bit that is on */ |
8056044c MO |
1419 | for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { |
1420 | if (unlikely(bit >= ARRAY_SIZE(ainfo))) { | |
1421 | WARN_ON_ONCE(1); | |
c21e0bbf | 1422 | continue; |
8056044c MO |
1423 | } |
1424 | ||
1425 | info = &ainfo[bit]; | |
1426 | if (unlikely(info->status != 1ULL << bit)) { | |
1427 | WARN_ON_ONCE(1); | |
1428 | continue; | |
1429 | } | |
c21e0bbf MO |
1430 | |
1431 | port = info->port; | |
c885d3fe | 1432 | fc_port_regs = get_fc_port_regs(cfg, port); |
c21e0bbf | 1433 | |
88d33628 | 1434 | dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", |
4392ba49 | 1435 | __func__, port, info->desc, |
c885d3fe | 1436 | readq_be(&fc_port_regs[FC_STATUS / 8])); |
c21e0bbf MO |
1437 | |
1438 | /* | |
f15fbf8d | 1439 | * Do link reset first, some OTHER errors will set FC_ERROR |
c21e0bbf MO |
1440 | * again if cleared before or w/o a reset |
1441 | */ | |
1442 | if (info->action & LINK_RESET) { | |
4392ba49 MO |
1443 | dev_err(dev, "%s: FC Port %d: resetting link\n", |
1444 | __func__, port); | |
c21e0bbf MO |
1445 | cfg->lr_state = LINK_RESET_REQUIRED; |
1446 | cfg->lr_port = port; | |
1447 | schedule_work(&cfg->work_q); | |
1448 | } | |
1449 | ||
1450 | if (info->action & CLR_FC_ERROR) { | |
c885d3fe | 1451 | reg = readq_be(&fc_port_regs[FC_ERROR / 8]); |
c21e0bbf MO |
1452 | |
1453 | /* | |
f15fbf8d | 1454 | * Since all errors are unmasked, FC_ERROR and FC_ERRCAP |
c21e0bbf MO |
1455 | * should be the same and tracing one is sufficient. |
1456 | */ | |
1457 | ||
88d33628 | 1458 | dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", |
4392ba49 | 1459 | __func__, port, reg); |
c21e0bbf | 1460 | |
c885d3fe MO |
1461 | writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); |
1462 | writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); | |
c21e0bbf | 1463 | } |
ef51074a MO |
1464 | |
1465 | if (info->action & SCAN_HOST) { | |
1466 | atomic_inc(&cfg->scan_host_needed); | |
1467 | schedule_work(&cfg->work_q); | |
1468 | } | |
c21e0bbf MO |
1469 | } |
1470 | ||
1471 | out: | |
c21e0bbf MO |
1472 | return IRQ_HANDLED; |
1473 | } | |
1474 | ||
1475 | /** | |
1476 | * start_context() - starts the master context | |
1284fb0c | 1477 | * @cfg: Internal structure associated with the host. |
a583d00a | 1478 | * @index: Index of the hardware queue. |
c21e0bbf MO |
1479 | * |
1480 | * Return: A success or failure value from CXL services. | |
1481 | */ | |
a583d00a | 1482 | static int start_context(struct cxlflash_cfg *cfg, u32 index) |
c21e0bbf | 1483 | { |
88d33628 | 1484 | struct device *dev = &cfg->dev->dev; |
a583d00a | 1485 | struct hwq *hwq = get_hwq(cfg->afu, index); |
c21e0bbf MO |
1486 | int rc = 0; |
1487 | ||
a583d00a UK |
1488 | rc = cxl_start_context(hwq->ctx, |
1489 | hwq->work.work_element_descriptor, | |
c21e0bbf MO |
1490 | NULL); |
1491 | ||
88d33628 | 1492 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1493 | return rc; |
1494 | } | |
1495 | ||
1496 | /** | |
1497 | * read_vpd() - obtains the WWPNs from VPD | |
1284fb0c | 1498 | * @cfg: Internal structure associated with the host. |
66d4bce4 | 1499 | * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs |
c21e0bbf | 1500 | * |
1284fb0c | 1501 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
1502 | */ |
1503 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) | |
1504 | { | |
88d33628 MO |
1505 | struct device *dev = &cfg->dev->dev; |
1506 | struct pci_dev *pdev = cfg->dev; | |
c21e0bbf MO |
1507 | int rc = 0; |
1508 | int ro_start, ro_size, i, j, k; | |
1509 | ssize_t vpd_size; | |
1510 | char vpd_data[CXLFLASH_VPD_LEN]; | |
1511 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; | |
bdcff1c5 | 1512 | char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; |
c21e0bbf MO |
1513 | |
1514 | /* Get the VPD data from the device */ | |
88d33628 | 1515 | vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); |
c21e0bbf | 1516 | if (unlikely(vpd_size <= 0)) { |
88d33628 MO |
1517 | dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", |
1518 | __func__, vpd_size); | |
c21e0bbf MO |
1519 | rc = -ENODEV; |
1520 | goto out; | |
1521 | } | |
1522 | ||
1523 | /* Get the read only section offset */ | |
1524 | ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, | |
1525 | PCI_VPD_LRDT_RO_DATA); | |
1526 | if (unlikely(ro_start < 0)) { | |
88d33628 | 1527 | dev_err(dev, "%s: VPD Read-only data not found\n", __func__); |
c21e0bbf MO |
1528 | rc = -ENODEV; |
1529 | goto out; | |
1530 | } | |
1531 | ||
1532 | /* Get the read only section size, cap when extends beyond read VPD */ | |
1533 | ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); | |
1534 | j = ro_size; | |
1535 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | |
1536 | if (unlikely((i + j) > vpd_size)) { | |
88d33628 MO |
1537 | dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", |
1538 | __func__, (i + j), vpd_size); | |
c21e0bbf MO |
1539 | ro_size = vpd_size - i; |
1540 | } | |
1541 | ||
1542 | /* | |
1543 | * Find the offset of the WWPN tag within the read only | |
1544 | * VPD data and validate the found field (partials are | |
1545 | * no good to us). Convert the ASCII data to an integer | |
1546 | * value. Note that we must copy to a temporary buffer | |
1547 | * because the conversion service requires that the ASCII | |
1548 | * string be terminated. | |
1549 | */ | |
66d4bce4 | 1550 | for (k = 0; k < cfg->num_fc_ports; k++) { |
c21e0bbf MO |
1551 | j = ro_size; |
1552 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | |
1553 | ||
1554 | i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); | |
1555 | if (unlikely(i < 0)) { | |
88d33628 MO |
1556 | dev_err(dev, "%s: Port %d WWPN not found in VPD\n", |
1557 | __func__, k); | |
c21e0bbf MO |
1558 | rc = -ENODEV; |
1559 | goto out; | |
1560 | } | |
1561 | ||
1562 | j = pci_vpd_info_field_size(&vpd_data[i]); | |
1563 | i += PCI_VPD_INFO_FLD_HDR_SIZE; | |
1564 | if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { | |
88d33628 MO |
1565 | dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", |
1566 | __func__, k); | |
c21e0bbf MO |
1567 | rc = -ENODEV; |
1568 | goto out; | |
1569 | } | |
1570 | ||
1571 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); | |
1572 | rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); | |
1573 | if (unlikely(rc)) { | |
88d33628 MO |
1574 | dev_err(dev, "%s: WWPN conversion failed for port %d\n", |
1575 | __func__, k); | |
c21e0bbf MO |
1576 | rc = -ENODEV; |
1577 | goto out; | |
1578 | } | |
66d4bce4 MO |
1579 | |
1580 | dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); | |
c21e0bbf MO |
1581 | } |
1582 | ||
1583 | out: | |
88d33628 | 1584 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1585 | return rc; |
1586 | } | |
1587 | ||
1588 | /** | |
15305514 | 1589 | * init_pcr() - initialize the provisioning and control registers |
1284fb0c | 1590 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 1591 | * |
15305514 MO |
1592 | * Also sets up fast access to the mapped registers and initializes AFU |
1593 | * command fields that never change. | |
c21e0bbf | 1594 | */ |
15305514 | 1595 | static void init_pcr(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1596 | { |
1597 | struct afu *afu = cfg->afu; | |
1786f4a0 | 1598 | struct sisl_ctrl_map __iomem *ctrl_map; |
a583d00a | 1599 | struct hwq *hwq; |
c21e0bbf MO |
1600 | int i; |
1601 | ||
1602 | for (i = 0; i < MAX_CONTEXT; i++) { | |
1603 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; | |
f15fbf8d MO |
1604 | /* Disrupt any clients that could be running */ |
1605 | /* e.g. clients that survived a master restart */ | |
c21e0bbf MO |
1606 | writeq_be(0, &ctrl_map->rht_start); |
1607 | writeq_be(0, &ctrl_map->rht_cnt_id); | |
1608 | writeq_be(0, &ctrl_map->ctx_cap); | |
1609 | } | |
1610 | ||
a583d00a | 1611 | /* Copy frequently used fields into hwq */ |
bb85ef68 | 1612 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
1613 | hwq = get_hwq(afu, i); |
1614 | ||
1615 | hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx); | |
1616 | hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; | |
1617 | hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; | |
c21e0bbf | 1618 | |
a583d00a UK |
1619 | /* Program the Endian Control for the master context */ |
1620 | writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); | |
1621 | } | |
c21e0bbf MO |
1622 | } |
1623 | ||
1624 | /** | |
1625 | * init_global() - initialize AFU global registers | |
1284fb0c | 1626 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 1627 | */ |
15305514 | 1628 | static int init_global(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1629 | { |
1630 | struct afu *afu = cfg->afu; | |
4392ba49 | 1631 | struct device *dev = &cfg->dev->dev; |
a583d00a UK |
1632 | struct hwq *hwq; |
1633 | struct sisl_host_map __iomem *hmap; | |
c885d3fe | 1634 | __be64 __iomem *fc_port_regs; |
66d4bce4 | 1635 | u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ |
c21e0bbf MO |
1636 | int i = 0, num_ports = 0; |
1637 | int rc = 0; | |
1638 | u64 reg; | |
1639 | ||
1640 | rc = read_vpd(cfg, &wwpn[0]); | |
1641 | if (rc) { | |
4392ba49 | 1642 | dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); |
c21e0bbf MO |
1643 | goto out; |
1644 | } | |
1645 | ||
a583d00a | 1646 | /* Set up RRQ and SQ in HWQ for master issued cmds */ |
bb85ef68 | 1647 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
1648 | hwq = get_hwq(afu, i); |
1649 | hmap = hwq->host_map; | |
c21e0bbf | 1650 | |
a583d00a UK |
1651 | writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); |
1652 | writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); | |
1653 | ||
1654 | if (afu_is_sq_cmd_mode(afu)) { | |
1655 | writeq_be((u64)hwq->hsq_start, &hmap->sq_start); | |
1656 | writeq_be((u64)hwq->hsq_end, &hmap->sq_end); | |
1657 | } | |
bae0ac69 MO |
1658 | } |
1659 | ||
c21e0bbf MO |
1660 | /* AFU configuration */ |
1661 | reg = readq_be(&afu->afu_map->global.regs.afu_config); | |
1662 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; | |
1663 | /* enable all auto retry options and control endianness */ | |
1664 | /* leave others at default: */ | |
1665 | /* CTX_CAP write protected, mbox_r does not clear on read and */ | |
1666 | /* checker on if dual afu */ | |
1667 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); | |
1668 | ||
f15fbf8d | 1669 | /* Global port select: select either port */ |
c21e0bbf | 1670 | if (afu->internal_lun) { |
f15fbf8d | 1671 | /* Only use port 0 */ |
c21e0bbf | 1672 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); |
66d4bce4 | 1673 | num_ports = 0; |
c21e0bbf | 1674 | } else { |
e8e17ea6 MO |
1675 | writeq_be(PORT_MASK(cfg->num_fc_ports), |
1676 | &afu->afu_map->global.regs.afu_port_sel); | |
66d4bce4 | 1677 | num_ports = cfg->num_fc_ports; |
c21e0bbf MO |
1678 | } |
1679 | ||
1680 | for (i = 0; i < num_ports; i++) { | |
c885d3fe MO |
1681 | fc_port_regs = get_fc_port_regs(cfg, i); |
1682 | ||
f15fbf8d | 1683 | /* Unmask all errors (but they are still masked at AFU) */ |
c885d3fe | 1684 | writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); |
f15fbf8d | 1685 | /* Clear CRC error cnt & set a threshold */ |
c885d3fe MO |
1686 | (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); |
1687 | writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); | |
c21e0bbf | 1688 | |
f15fbf8d | 1689 | /* Set WWPNs. If already programmed, wwpn[i] is 0 */ |
f8013261 | 1690 | if (wwpn[i] != 0) |
c885d3fe | 1691 | afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); |
c21e0bbf MO |
1692 | /* Programming WWPN back to back causes additional |
1693 | * offline/online transitions and a PLOGI | |
1694 | */ | |
1695 | msleep(100); | |
c21e0bbf MO |
1696 | } |
1697 | ||
f15fbf8d MO |
1698 | /* Set up master's own CTX_CAP to allow real mode, host translation */ |
1699 | /* tables, afu cmds and read/write GSCSI cmds. */ | |
c21e0bbf | 1700 | /* First, unlock ctx_cap write by reading mbox */ |
bb85ef68 | 1701 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
1702 | hwq = get_hwq(afu, i); |
1703 | ||
1704 | (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ | |
1705 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | | |
1706 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | | |
1707 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), | |
1708 | &hwq->ctrl_map->ctx_cap); | |
1709 | } | |
f15fbf8d | 1710 | /* Initialize heartbeat */ |
c21e0bbf | 1711 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); |
c21e0bbf MO |
1712 | out: |
1713 | return rc; | |
1714 | } | |
1715 | ||
1716 | /** | |
1717 | * start_afu() - initializes and starts the AFU | |
1284fb0c | 1718 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
1719 | */ |
1720 | static int start_afu(struct cxlflash_cfg *cfg) | |
1721 | { | |
1722 | struct afu *afu = cfg->afu; | |
88d33628 | 1723 | struct device *dev = &cfg->dev->dev; |
a583d00a | 1724 | struct hwq *hwq; |
c21e0bbf | 1725 | int rc = 0; |
a583d00a | 1726 | int i; |
c21e0bbf | 1727 | |
c21e0bbf MO |
1728 | init_pcr(cfg); |
1729 | ||
a583d00a | 1730 | /* Initialize each HWQ */ |
bb85ef68 | 1731 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a | 1732 | hwq = get_hwq(afu, i); |
c21e0bbf | 1733 | |
a583d00a UK |
1734 | /* After an AFU reset, RRQ entries are stale, clear them */ |
1735 | memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); | |
bae0ac69 | 1736 | |
a583d00a UK |
1737 | /* Initialize RRQ pointers */ |
1738 | hwq->hrrq_start = &hwq->rrq_entry[0]; | |
1739 | hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; | |
1740 | hwq->hrrq_curr = hwq->hrrq_start; | |
1741 | hwq->toggle = 1; | |
edc034e8 UK |
1742 | |
1743 | /* Initialize spin locks */ | |
a583d00a | 1744 | spin_lock_init(&hwq->hrrq_slock); |
edc034e8 | 1745 | spin_lock_init(&hwq->hsq_slock); |
bae0ac69 | 1746 | |
a583d00a UK |
1747 | /* Initialize SQ */ |
1748 | if (afu_is_sq_cmd_mode(afu)) { | |
1749 | memset(&hwq->sq, 0, sizeof(hwq->sq)); | |
1750 | hwq->hsq_start = &hwq->sq[0]; | |
1751 | hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; | |
1752 | hwq->hsq_curr = hwq->hsq_start; | |
1753 | ||
a583d00a UK |
1754 | atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); |
1755 | } | |
1756 | ||
1757 | /* Initialize IRQ poll */ | |
1758 | if (afu_is_irqpoll_enabled(afu)) | |
1759 | irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, | |
1760 | cxlflash_irqpoll); | |
1761 | ||
1762 | } | |
2588f222 | 1763 | |
c21e0bbf MO |
1764 | rc = init_global(cfg); |
1765 | ||
88d33628 | 1766 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1767 | return rc; |
1768 | } | |
1769 | ||
1770 | /** | |
9526f360 | 1771 | * init_intr() - setup interrupt handlers for the master context |
1284fb0c | 1772 | * @cfg: Internal structure associated with the host. |
a583d00a | 1773 | * @hwq: Hardware queue to initialize. |
c21e0bbf | 1774 | * |
1284fb0c | 1775 | * Return: 0 on success, -errno on failure |
c21e0bbf | 1776 | */ |
9526f360 | 1777 | static enum undo_level init_intr(struct cxlflash_cfg *cfg, |
a583d00a | 1778 | struct hwq *hwq) |
c21e0bbf | 1779 | { |
9526f360 | 1780 | struct device *dev = &cfg->dev->dev; |
a583d00a | 1781 | struct cxl_context *ctx = hwq->ctx; |
c21e0bbf | 1782 | int rc = 0; |
9526f360 | 1783 | enum undo_level level = UNDO_NOOP; |
a583d00a UK |
1784 | bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); |
1785 | int num_irqs = is_primary_hwq ? 3 : 2; | |
c21e0bbf | 1786 | |
a583d00a | 1787 | rc = cxl_allocate_afu_irqs(ctx, num_irqs); |
c21e0bbf | 1788 | if (unlikely(rc)) { |
88d33628 | 1789 | dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", |
c21e0bbf | 1790 | __func__, rc); |
9526f360 | 1791 | level = UNDO_NOOP; |
c21e0bbf MO |
1792 | goto out; |
1793 | } | |
1794 | ||
a583d00a | 1795 | rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, |
c21e0bbf MO |
1796 | "SISL_MSI_SYNC_ERROR"); |
1797 | if (unlikely(rc <= 0)) { | |
88d33628 | 1798 | dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); |
c21e0bbf MO |
1799 | level = FREE_IRQ; |
1800 | goto out; | |
1801 | } | |
1802 | ||
a583d00a | 1803 | rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, |
c21e0bbf MO |
1804 | "SISL_MSI_RRQ_UPDATED"); |
1805 | if (unlikely(rc <= 0)) { | |
88d33628 | 1806 | dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); |
c21e0bbf MO |
1807 | level = UNMAP_ONE; |
1808 | goto out; | |
1809 | } | |
1810 | ||
a583d00a UK |
1811 | /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ |
1812 | if (!is_primary_hwq) | |
1813 | goto out; | |
1814 | ||
1815 | rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, | |
c21e0bbf MO |
1816 | "SISL_MSI_ASYNC_ERROR"); |
1817 | if (unlikely(rc <= 0)) { | |
88d33628 | 1818 | dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); |
c21e0bbf MO |
1819 | level = UNMAP_TWO; |
1820 | goto out; | |
1821 | } | |
9526f360 MK |
1822 | out: |
1823 | return level; | |
1824 | } | |
c21e0bbf | 1825 | |
9526f360 MK |
1826 | /** |
1827 | * init_mc() - create and register as the master context | |
1828 | * @cfg: Internal structure associated with the host. | |
a583d00a | 1829 | * index: HWQ Index of the master context. |
9526f360 MK |
1830 | * |
1831 | * Return: 0 on success, -errno on failure | |
1832 | */ | |
a583d00a | 1833 | static int init_mc(struct cxlflash_cfg *cfg, u32 index) |
9526f360 MK |
1834 | { |
1835 | struct cxl_context *ctx; | |
1836 | struct device *dev = &cfg->dev->dev; | |
a583d00a | 1837 | struct hwq *hwq = get_hwq(cfg->afu, index); |
9526f360 MK |
1838 | int rc = 0; |
1839 | enum undo_level level; | |
1840 | ||
a583d00a UK |
1841 | hwq->afu = cfg->afu; |
1842 | hwq->index = index; | |
1843 | ||
1844 | if (index == PRIMARY_HWQ) | |
1845 | ctx = cxl_get_context(cfg->dev); | |
1846 | else | |
1847 | ctx = cxl_dev_context_init(cfg->dev); | |
9526f360 MK |
1848 | if (unlikely(!ctx)) { |
1849 | rc = -ENOMEM; | |
a583d00a | 1850 | goto err1; |
9526f360 | 1851 | } |
a583d00a UK |
1852 | |
1853 | WARN_ON(hwq->ctx); | |
1854 | hwq->ctx = ctx; | |
9526f360 MK |
1855 | |
1856 | /* Set it up as a master with the CXL */ | |
1857 | cxl_set_master(ctx); | |
1858 | ||
a583d00a UK |
1859 | /* Reset AFU when initializing primary context */ |
1860 | if (index == PRIMARY_HWQ) { | |
1861 | rc = cxl_afu_reset(ctx); | |
1862 | if (unlikely(rc)) { | |
1863 | dev_err(dev, "%s: AFU reset failed rc=%d\n", | |
1864 | __func__, rc); | |
1865 | goto err1; | |
1866 | } | |
9526f360 MK |
1867 | } |
1868 | ||
a583d00a | 1869 | level = init_intr(cfg, hwq); |
9526f360 | 1870 | if (unlikely(level)) { |
88d33628 | 1871 | dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); |
a583d00a | 1872 | goto err2; |
9526f360 | 1873 | } |
c21e0bbf MO |
1874 | |
1875 | /* This performs the equivalent of the CXL_IOCTL_START_WORK. | |
1876 | * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process | |
1877 | * element (pe) that is embedded in the context (ctx) | |
1878 | */ | |
a583d00a | 1879 | rc = start_context(cfg, index); |
c21e0bbf MO |
1880 | if (unlikely(rc)) { |
1881 | dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); | |
1882 | level = UNMAP_THREE; | |
a583d00a | 1883 | goto err2; |
c21e0bbf | 1884 | } |
a583d00a UK |
1885 | |
1886 | out: | |
88d33628 | 1887 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf | 1888 | return rc; |
a583d00a UK |
1889 | err2: |
1890 | term_intr(cfg, level, index); | |
1891 | if (index != PRIMARY_HWQ) | |
1892 | cxl_release_context(ctx); | |
1893 | err1: | |
1894 | hwq->ctx = NULL; | |
1895 | goto out; | |
c21e0bbf MO |
1896 | } |
1897 | ||
a290b480 MO |
1898 | /** |
1899 | * get_num_afu_ports() - determines and configures the number of AFU ports | |
1900 | * @cfg: Internal structure associated with the host. | |
1901 | * | |
1902 | * This routine determines the number of AFU ports by converting the global | |
1903 | * port selection mask. The converted value is only valid following an AFU | |
1904 | * reset (explicit or power-on). This routine must be invoked shortly after | |
1905 | * mapping as other routines are dependent on the number of ports during the | |
1906 | * initialization sequence. | |
1907 | * | |
1908 | * To support legacy AFUs that might not have reflected an initial global | |
1909 | * port mask (value read is 0), default to the number of ports originally | |
1910 | * supported by the cxlflash driver (2) before hardware with other port | |
1911 | * offerings was introduced. | |
1912 | */ | |
1913 | static void get_num_afu_ports(struct cxlflash_cfg *cfg) | |
1914 | { | |
1915 | struct afu *afu = cfg->afu; | |
1916 | struct device *dev = &cfg->dev->dev; | |
1917 | u64 port_mask; | |
1918 | int num_fc_ports = LEGACY_FC_PORTS; | |
1919 | ||
1920 | port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); | |
1921 | if (port_mask != 0ULL) | |
1922 | num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); | |
1923 | ||
1924 | dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", | |
1925 | __func__, port_mask, num_fc_ports); | |
1926 | ||
1927 | cfg->num_fc_ports = num_fc_ports; | |
1928 | cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); | |
1929 | } | |
1930 | ||
c21e0bbf MO |
1931 | /** |
1932 | * init_afu() - setup as master context and start AFU | |
1284fb0c | 1933 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
1934 | * |
1935 | * This routine is a higher level of control for configuring the | |
1936 | * AFU on probe and reset paths. | |
1937 | * | |
1284fb0c | 1938 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
1939 | */ |
1940 | static int init_afu(struct cxlflash_cfg *cfg) | |
1941 | { | |
1942 | u64 reg; | |
1943 | int rc = 0; | |
1944 | struct afu *afu = cfg->afu; | |
1945 | struct device *dev = &cfg->dev->dev; | |
a583d00a UK |
1946 | struct hwq *hwq; |
1947 | int i; | |
c21e0bbf | 1948 | |
5cdac81a MO |
1949 | cxl_perst_reloads_same_image(cfg->cxl_afu, true); |
1950 | ||
bb85ef68 MO |
1951 | afu->num_hwqs = afu->desired_hwqs; |
1952 | for (i = 0; i < afu->num_hwqs; i++) { | |
a583d00a UK |
1953 | rc = init_mc(cfg, i); |
1954 | if (rc) { | |
1955 | dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", | |
1956 | __func__, rc, i); | |
1957 | goto err1; | |
1958 | } | |
c21e0bbf MO |
1959 | } |
1960 | ||
a583d00a UK |
1961 | /* Map the entire MMIO space of the AFU using the first context */ |
1962 | hwq = get_hwq(afu, PRIMARY_HWQ); | |
1963 | afu->afu_map = cxl_psa_map(hwq->ctx); | |
c21e0bbf | 1964 | if (!afu->afu_map) { |
88d33628 | 1965 | dev_err(dev, "%s: cxl_psa_map failed\n", __func__); |
ee3491ba | 1966 | rc = -ENOMEM; |
c21e0bbf MO |
1967 | goto err1; |
1968 | } | |
1969 | ||
e5ce067b MO |
1970 | /* No byte reverse on reading afu_version or string will be backwards */ |
1971 | reg = readq(&afu->afu_map->global.regs.afu_version); | |
1972 | memcpy(afu->version, ®, sizeof(reg)); | |
c21e0bbf MO |
1973 | afu->interface_version = |
1974 | readq_be(&afu->afu_map->global.regs.interface_version); | |
e5ce067b | 1975 | if ((afu->interface_version + 1) == 0) { |
88d33628 MO |
1976 | dev_err(dev, "Back level AFU, please upgrade. AFU version %s " |
1977 | "interface version %016llx\n", afu->version, | |
e5ce067b MO |
1978 | afu->interface_version); |
1979 | rc = -EINVAL; | |
d940f9ae | 1980 | goto err1; |
ee3491ba MO |
1981 | } |
1982 | ||
bae0ac69 MO |
1983 | if (afu_is_sq_cmd_mode(afu)) { |
1984 | afu->send_cmd = send_cmd_sq; | |
1985 | afu->context_reset = context_reset_sq; | |
1986 | } else { | |
1987 | afu->send_cmd = send_cmd_ioarrin; | |
1988 | afu->context_reset = context_reset_ioarrin; | |
1989 | } | |
48b4be36 | 1990 | |
88d33628 MO |
1991 | dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, |
1992 | afu->version, afu->interface_version); | |
c21e0bbf | 1993 | |
a290b480 MO |
1994 | get_num_afu_ports(cfg); |
1995 | ||
c21e0bbf MO |
1996 | rc = start_afu(cfg); |
1997 | if (rc) { | |
88d33628 | 1998 | dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); |
d940f9ae | 1999 | goto err1; |
c21e0bbf MO |
2000 | } |
2001 | ||
2002 | afu_err_intr_init(cfg->afu); | |
bb85ef68 | 2003 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
2004 | hwq = get_hwq(afu, i); |
2005 | ||
a583d00a UK |
2006 | hwq->room = readq_be(&hwq->host_map->cmd_room); |
2007 | } | |
c21e0bbf | 2008 | |
2cb79266 MO |
2009 | /* Restore the LUN mappings */ |
2010 | cxlflash_restore_luntable(cfg); | |
ee3491ba | 2011 | out: |
88d33628 | 2012 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf | 2013 | return rc; |
ee3491ba | 2014 | |
ee3491ba | 2015 | err1: |
bb85ef68 | 2016 | for (i = afu->num_hwqs - 1; i >= 0; i--) { |
a583d00a UK |
2017 | term_intr(cfg, UNMAP_THREE, i); |
2018 | term_mc(cfg, i); | |
2019 | } | |
ee3491ba | 2020 | goto out; |
c21e0bbf MO |
2021 | } |
2022 | ||
3b4f03cd UK |
2023 | /** |
2024 | * afu_reset() - resets the AFU | |
2025 | * @cfg: Internal structure associated with the host. | |
2026 | * | |
2027 | * Return: 0 on success, -errno on failure | |
2028 | */ | |
2029 | static int afu_reset(struct cxlflash_cfg *cfg) | |
2030 | { | |
2031 | struct device *dev = &cfg->dev->dev; | |
2032 | int rc = 0; | |
2033 | ||
2034 | /* Stop the context before the reset. Since the context is | |
2035 | * no longer available restart it after the reset is complete | |
2036 | */ | |
2037 | term_afu(cfg); | |
2038 | ||
2039 | rc = init_afu(cfg); | |
2040 | ||
2041 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); | |
2042 | return rc; | |
2043 | } | |
2044 | ||
2045 | /** | |
2046 | * drain_ioctls() - wait until all currently executing ioctls have completed | |
2047 | * @cfg: Internal structure associated with the host. | |
2048 | * | |
2049 | * Obtain write access to read/write semaphore that wraps ioctl | |
2050 | * handling to 'drain' ioctls currently executing. | |
2051 | */ | |
2052 | static void drain_ioctls(struct cxlflash_cfg *cfg) | |
2053 | { | |
2054 | down_write(&cfg->ioctl_rwsem); | |
2055 | up_write(&cfg->ioctl_rwsem); | |
2056 | } | |
2057 | ||
2058 | /** | |
2059 | * cxlflash_async_reset_host() - asynchronous host reset handler | |
2060 | * @data: Private data provided while scheduling reset. | |
2061 | * @cookie: Cookie that can be used for checkpointing. | |
2062 | */ | |
2063 | static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) | |
2064 | { | |
2065 | struct cxlflash_cfg *cfg = data; | |
2066 | struct device *dev = &cfg->dev->dev; | |
2067 | int rc = 0; | |
2068 | ||
2069 | if (cfg->state != STATE_RESET) { | |
2070 | dev_dbg(dev, "%s: Not performing a reset, state=%d\n", | |
2071 | __func__, cfg->state); | |
2072 | goto out; | |
2073 | } | |
2074 | ||
2075 | drain_ioctls(cfg); | |
2076 | cxlflash_mark_contexts_error(cfg); | |
2077 | rc = afu_reset(cfg); | |
2078 | if (rc) | |
2079 | cfg->state = STATE_FAILTERM; | |
2080 | else | |
2081 | cfg->state = STATE_NORMAL; | |
2082 | wake_up_all(&cfg->reset_waitq); | |
2083 | ||
2084 | out: | |
2085 | scsi_unblock_requests(cfg->host); | |
2086 | } | |
2087 | ||
2088 | /** | |
2089 | * cxlflash_schedule_async_reset() - schedule an asynchronous host reset | |
2090 | * @cfg: Internal structure associated with the host. | |
2091 | */ | |
2092 | static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) | |
2093 | { | |
2094 | struct device *dev = &cfg->dev->dev; | |
2095 | ||
2096 | if (cfg->state != STATE_NORMAL) { | |
2097 | dev_dbg(dev, "%s: Not performing reset state=%d\n", | |
2098 | __func__, cfg->state); | |
2099 | return; | |
2100 | } | |
2101 | ||
2102 | cfg->state = STATE_RESET; | |
2103 | scsi_block_requests(cfg->host); | |
2104 | cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host, | |
2105 | cfg); | |
2106 | } | |
2107 | ||
c21e0bbf MO |
2108 | /** |
2109 | * cxlflash_afu_sync() - builds and sends an AFU sync command | |
2110 | * @afu: AFU associated with the host. | |
2111 | * @ctx_hndl_u: Identifies context requesting sync. | |
2112 | * @res_hndl_u: Identifies resource requesting sync. | |
2113 | * @mode: Type of sync to issue (lightweight, heavyweight, global). | |
2114 | * | |
2115 | * The AFU can only take 1 sync command at a time. This routine enforces this | |
f15fbf8d | 2116 | * limitation by using a mutex to provide exclusive access to the AFU during |
c21e0bbf MO |
2117 | * the sync. This design point requires calling threads to not be on interrupt |
2118 | * context due to the possibility of sleeping during concurrent sync operations. | |
2119 | * | |
5cdac81a MO |
2120 | * AFU sync operations are only necessary and allowed when the device is |
2121 | * operating normally. When not operating normally, sync requests can occur as | |
2122 | * part of cleaning up resources associated with an adapter prior to removal. | |
2123 | * In this scenario, these requests are simply ignored (safe due to the AFU | |
2124 | * going away). | |
2125 | * | |
c21e0bbf | 2126 | * Return: |
91995b34 | 2127 | * 0 on success, -errno on failure |
c21e0bbf MO |
2128 | */ |
2129 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, | |
2130 | res_hndl_t res_hndl_u, u8 mode) | |
2131 | { | |
5cdac81a | 2132 | struct cxlflash_cfg *cfg = afu->parent; |
4392ba49 | 2133 | struct device *dev = &cfg->dev->dev; |
c21e0bbf | 2134 | struct afu_cmd *cmd = NULL; |
a583d00a | 2135 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
350bb478 | 2136 | char *buf = NULL; |
c21e0bbf | 2137 | int rc = 0; |
ddc869e9 | 2138 | int nretry = 0; |
c21e0bbf MO |
2139 | static DEFINE_MUTEX(sync_active); |
2140 | ||
5cdac81a | 2141 | if (cfg->state != STATE_NORMAL) { |
88d33628 MO |
2142 | dev_dbg(dev, "%s: Sync not required state=%u\n", |
2143 | __func__, cfg->state); | |
5cdac81a MO |
2144 | return 0; |
2145 | } | |
2146 | ||
c21e0bbf | 2147 | mutex_lock(&sync_active); |
de01283b | 2148 | atomic_inc(&afu->cmds_active); |
350bb478 MO |
2149 | buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); |
2150 | if (unlikely(!buf)) { | |
2151 | dev_err(dev, "%s: no memory for command\n", __func__); | |
91995b34 | 2152 | rc = -ENOMEM; |
c21e0bbf MO |
2153 | goto out; |
2154 | } | |
2155 | ||
350bb478 | 2156 | cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); |
ddc869e9 UK |
2157 | |
2158 | retry: | |
350bb478 | 2159 | init_completion(&cmd->cevent); |
350bb478 | 2160 | cmd->parent = afu; |
a583d00a | 2161 | cmd->hwq_index = hwq->index; |
c21e0bbf | 2162 | |
ddc869e9 UK |
2163 | dev_dbg(dev, "%s: afu=%p cmd=%p ctx=%d nretry=%d\n", |
2164 | __func__, afu, cmd, ctx_hndl_u, nretry); | |
c21e0bbf MO |
2165 | |
2166 | cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; | |
a583d00a | 2167 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
350bb478 | 2168 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
c21e0bbf MO |
2169 | cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; |
2170 | ||
2171 | cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ | |
2172 | cmd->rcb.cdb[1] = mode; | |
2173 | ||
2174 | /* The cdb is aligned, no unaligned accessors required */ | |
1786f4a0 MO |
2175 | *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); |
2176 | *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); | |
c21e0bbf | 2177 | |
48b4be36 | 2178 | rc = afu->send_cmd(afu, cmd); |
91995b34 UK |
2179 | if (unlikely(rc)) { |
2180 | rc = -ENOBUFS; | |
c21e0bbf | 2181 | goto out; |
91995b34 | 2182 | } |
c21e0bbf | 2183 | |
9ba848ac | 2184 | rc = wait_resp(afu, cmd); |
ddc869e9 UK |
2185 | if (rc == -ETIMEDOUT) { |
2186 | rc = afu->context_reset(hwq); | |
2187 | if (!rc && ++nretry < 2) | |
2188 | goto retry; | |
3b4f03cd | 2189 | cxlflash_schedule_async_reset(cfg); |
ddc869e9 UK |
2190 | } |
2191 | ||
c21e0bbf | 2192 | out: |
de01283b | 2193 | atomic_dec(&afu->cmds_active); |
c21e0bbf | 2194 | mutex_unlock(&sync_active); |
350bb478 | 2195 | kfree(buf); |
88d33628 | 2196 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
2197 | return rc; |
2198 | } | |
2199 | ||
15305514 MO |
2200 | /** |
2201 | * cxlflash_eh_device_reset_handler() - reset a single LUN | |
2202 | * @scp: SCSI command to send. | |
2203 | * | |
2204 | * Return: | |
2205 | * SUCCESS as defined in scsi/scsi.h | |
2206 | * FAILED as defined in scsi/scsi.h | |
2207 | */ | |
2208 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) | |
2209 | { | |
2210 | int rc = SUCCESS; | |
2211 | struct Scsi_Host *host = scp->device->host; | |
88d33628 MO |
2212 | struct cxlflash_cfg *cfg = shost_priv(host); |
2213 | struct device *dev = &cfg->dev->dev; | |
15305514 MO |
2214 | struct afu *afu = cfg->afu; |
2215 | int rcr = 0; | |
2216 | ||
88d33628 MO |
2217 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
2218 | "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, | |
2219 | scp->device->channel, scp->device->id, scp->device->lun, | |
2220 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
2221 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
2222 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
2223 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
15305514 | 2224 | |
ed486daa | 2225 | retry: |
15305514 MO |
2226 | switch (cfg->state) { |
2227 | case STATE_NORMAL: | |
2228 | rcr = send_tmf(afu, scp, TMF_LUN_RESET); | |
2229 | if (unlikely(rcr)) | |
2230 | rc = FAILED; | |
2231 | break; | |
2232 | case STATE_RESET: | |
2233 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
ed486daa | 2234 | goto retry; |
15305514 MO |
2235 | default: |
2236 | rc = FAILED; | |
2237 | break; | |
2238 | } | |
2239 | ||
88d33628 | 2240 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
15305514 MO |
2241 | return rc; |
2242 | } | |
2243 | ||
2244 | /** | |
2245 | * cxlflash_eh_host_reset_handler() - reset the host adapter | |
2246 | * @scp: SCSI command from stack identifying host. | |
2247 | * | |
1d3324c3 MO |
2248 | * Following a reset, the state is evaluated again in case an EEH occurred |
2249 | * during the reset. In such a scenario, the host reset will either yield | |
2250 | * until the EEH recovery is complete or return success or failure based | |
2251 | * upon the current device state. | |
2252 | * | |
15305514 MO |
2253 | * Return: |
2254 | * SUCCESS as defined in scsi/scsi.h | |
2255 | * FAILED as defined in scsi/scsi.h | |
2256 | */ | |
2257 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) | |
2258 | { | |
2259 | int rc = SUCCESS; | |
2260 | int rcr = 0; | |
2261 | struct Scsi_Host *host = scp->device->host; | |
88d33628 MO |
2262 | struct cxlflash_cfg *cfg = shost_priv(host); |
2263 | struct device *dev = &cfg->dev->dev; | |
15305514 | 2264 | |
88d33628 MO |
2265 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
2266 | "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, | |
2267 | scp->device->channel, scp->device->id, scp->device->lun, | |
2268 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
2269 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
2270 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
2271 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
15305514 MO |
2272 | |
2273 | switch (cfg->state) { | |
2274 | case STATE_NORMAL: | |
2275 | cfg->state = STATE_RESET; | |
f411396d | 2276 | drain_ioctls(cfg); |
15305514 MO |
2277 | cxlflash_mark_contexts_error(cfg); |
2278 | rcr = afu_reset(cfg); | |
2279 | if (rcr) { | |
2280 | rc = FAILED; | |
2281 | cfg->state = STATE_FAILTERM; | |
2282 | } else | |
2283 | cfg->state = STATE_NORMAL; | |
2284 | wake_up_all(&cfg->reset_waitq); | |
1d3324c3 MO |
2285 | ssleep(1); |
2286 | /* fall through */ | |
15305514 MO |
2287 | case STATE_RESET: |
2288 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
2289 | if (cfg->state == STATE_NORMAL) | |
2290 | break; | |
2291 | /* fall through */ | |
2292 | default: | |
2293 | rc = FAILED; | |
2294 | break; | |
2295 | } | |
2296 | ||
88d33628 | 2297 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
15305514 MO |
2298 | return rc; |
2299 | } | |
2300 | ||
2301 | /** | |
2302 | * cxlflash_change_queue_depth() - change the queue depth for the device | |
2303 | * @sdev: SCSI device destined for queue depth change. | |
2304 | * @qdepth: Requested queue depth value to set. | |
2305 | * | |
2306 | * The requested queue depth is capped to the maximum supported value. | |
2307 | * | |
2308 | * Return: The actual queue depth set. | |
2309 | */ | |
2310 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) | |
2311 | { | |
2312 | ||
2313 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) | |
2314 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; | |
2315 | ||
2316 | scsi_change_queue_depth(sdev, qdepth); | |
2317 | return sdev->queue_depth; | |
2318 | } | |
2319 | ||
2320 | /** | |
2321 | * cxlflash_show_port_status() - queries and presents the current port status | |
e0f01a21 | 2322 | * @port: Desired port for status reporting. |
90c9f8f4 | 2323 | * @cfg: Internal structure associated with the host. |
15305514 MO |
2324 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2325 | * | |
66d4bce4 | 2326 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
15305514 | 2327 | */ |
90c9f8f4 MO |
2328 | static ssize_t cxlflash_show_port_status(u32 port, |
2329 | struct cxlflash_cfg *cfg, | |
2330 | char *buf) | |
15305514 | 2331 | { |
66d4bce4 | 2332 | struct device *dev = &cfg->dev->dev; |
15305514 | 2333 | char *disp_status; |
15305514 | 2334 | u64 status; |
c885d3fe | 2335 | __be64 __iomem *fc_port_regs; |
15305514 | 2336 | |
66d4bce4 MO |
2337 | WARN_ON(port >= MAX_FC_PORTS); |
2338 | ||
2339 | if (port >= cfg->num_fc_ports) { | |
2340 | dev_info(dev, "%s: Port %d not supported on this card.\n", | |
2341 | __func__, port); | |
2342 | return -EINVAL; | |
2343 | } | |
15305514 | 2344 | |
c885d3fe MO |
2345 | fc_port_regs = get_fc_port_regs(cfg, port); |
2346 | status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); | |
e0f01a21 | 2347 | status &= FC_MTIP_STATUS_MASK; |
15305514 MO |
2348 | |
2349 | if (status == FC_MTIP_STATUS_ONLINE) | |
2350 | disp_status = "online"; | |
2351 | else if (status == FC_MTIP_STATUS_OFFLINE) | |
2352 | disp_status = "offline"; | |
2353 | else | |
2354 | disp_status = "unknown"; | |
2355 | ||
e0f01a21 MO |
2356 | return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); |
2357 | } | |
2358 | ||
2359 | /** | |
2360 | * port0_show() - queries and presents the current status of port 0 | |
2361 | * @dev: Generic device associated with the host owning the port. | |
2362 | * @attr: Device attribute representing the port. | |
2363 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2364 | * | |
2365 | * Return: The size of the ASCII string returned in @buf. | |
2366 | */ | |
2367 | static ssize_t port0_show(struct device *dev, | |
2368 | struct device_attribute *attr, | |
2369 | char *buf) | |
2370 | { | |
88d33628 | 2371 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2372 | |
90c9f8f4 | 2373 | return cxlflash_show_port_status(0, cfg, buf); |
15305514 MO |
2374 | } |
2375 | ||
2376 | /** | |
e0f01a21 MO |
2377 | * port1_show() - queries and presents the current status of port 1 |
2378 | * @dev: Generic device associated with the host owning the port. | |
2379 | * @attr: Device attribute representing the port. | |
2380 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2381 | * | |
2382 | * Return: The size of the ASCII string returned in @buf. | |
2383 | */ | |
2384 | static ssize_t port1_show(struct device *dev, | |
2385 | struct device_attribute *attr, | |
2386 | char *buf) | |
2387 | { | |
88d33628 | 2388 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2389 | |
90c9f8f4 | 2390 | return cxlflash_show_port_status(1, cfg, buf); |
e0f01a21 MO |
2391 | } |
2392 | ||
bdcff1c5 MO |
2393 | /** |
2394 | * port2_show() - queries and presents the current status of port 2 | |
2395 | * @dev: Generic device associated with the host owning the port. | |
2396 | * @attr: Device attribute representing the port. | |
2397 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2398 | * | |
2399 | * Return: The size of the ASCII string returned in @buf. | |
2400 | */ | |
2401 | static ssize_t port2_show(struct device *dev, | |
2402 | struct device_attribute *attr, | |
2403 | char *buf) | |
2404 | { | |
2405 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2406 | ||
2407 | return cxlflash_show_port_status(2, cfg, buf); | |
2408 | } | |
2409 | ||
2410 | /** | |
2411 | * port3_show() - queries and presents the current status of port 3 | |
2412 | * @dev: Generic device associated with the host owning the port. | |
2413 | * @attr: Device attribute representing the port. | |
2414 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2415 | * | |
2416 | * Return: The size of the ASCII string returned in @buf. | |
2417 | */ | |
2418 | static ssize_t port3_show(struct device *dev, | |
2419 | struct device_attribute *attr, | |
2420 | char *buf) | |
2421 | { | |
2422 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2423 | ||
2424 | return cxlflash_show_port_status(3, cfg, buf); | |
2425 | } | |
2426 | ||
e0f01a21 MO |
2427 | /** |
2428 | * lun_mode_show() - presents the current LUN mode of the host | |
15305514 | 2429 | * @dev: Generic device associated with the host. |
e0f01a21 | 2430 | * @attr: Device attribute representing the LUN mode. |
15305514 MO |
2431 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. |
2432 | * | |
2433 | * Return: The size of the ASCII string returned in @buf. | |
2434 | */ | |
e0f01a21 MO |
2435 | static ssize_t lun_mode_show(struct device *dev, |
2436 | struct device_attribute *attr, char *buf) | |
15305514 | 2437 | { |
88d33628 | 2438 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
15305514 MO |
2439 | struct afu *afu = cfg->afu; |
2440 | ||
e0f01a21 | 2441 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); |
15305514 MO |
2442 | } |
2443 | ||
2444 | /** | |
e0f01a21 | 2445 | * lun_mode_store() - sets the LUN mode of the host |
15305514 | 2446 | * @dev: Generic device associated with the host. |
e0f01a21 | 2447 | * @attr: Device attribute representing the LUN mode. |
15305514 MO |
2448 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. |
2449 | * @count: Length of data resizing in @buf. | |
2450 | * | |
2451 | * The CXL Flash AFU supports a dummy LUN mode where the external | |
2452 | * links and storage are not required. Space on the FPGA is used | |
2453 | * to create 1 or 2 small LUNs which are presented to the system | |
2454 | * as if they were a normal storage device. This feature is useful | |
2455 | * during development and also provides manufacturing with a way | |
2456 | * to test the AFU without an actual device. | |
2457 | * | |
2458 | * 0 = external LUN[s] (default) | |
2459 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) | |
2460 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) | |
2461 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) | |
2462 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) | |
2463 | * | |
2464 | * Return: The size of the ASCII string returned in @buf. | |
2465 | */ | |
e0f01a21 MO |
2466 | static ssize_t lun_mode_store(struct device *dev, |
2467 | struct device_attribute *attr, | |
2468 | const char *buf, size_t count) | |
15305514 MO |
2469 | { |
2470 | struct Scsi_Host *shost = class_to_shost(dev); | |
88d33628 | 2471 | struct cxlflash_cfg *cfg = shost_priv(shost); |
15305514 MO |
2472 | struct afu *afu = cfg->afu; |
2473 | int rc; | |
2474 | u32 lun_mode; | |
2475 | ||
2476 | rc = kstrtouint(buf, 10, &lun_mode); | |
2477 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { | |
2478 | afu->internal_lun = lun_mode; | |
603ecce9 MK |
2479 | |
2480 | /* | |
2481 | * When configured for internal LUN, there is only one channel, | |
66d4bce4 MO |
2482 | * channel number 0, else there will be one less than the number |
2483 | * of fc ports for this card. | |
603ecce9 MK |
2484 | */ |
2485 | if (afu->internal_lun) | |
2486 | shost->max_channel = 0; | |
2487 | else | |
e8e17ea6 | 2488 | shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); |
603ecce9 | 2489 | |
15305514 MO |
2490 | afu_reset(cfg); |
2491 | scsi_scan_host(cfg->host); | |
2492 | } | |
2493 | ||
2494 | return count; | |
2495 | } | |
2496 | ||
2497 | /** | |
e0f01a21 | 2498 | * ioctl_version_show() - presents the current ioctl version of the host |
15305514 MO |
2499 | * @dev: Generic device associated with the host. |
2500 | * @attr: Device attribute representing the ioctl version. | |
2501 | * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. | |
2502 | * | |
2503 | * Return: The size of the ASCII string returned in @buf. | |
2504 | */ | |
e0f01a21 MO |
2505 | static ssize_t ioctl_version_show(struct device *dev, |
2506 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2507 | { |
2508 | return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); | |
2509 | } | |
2510 | ||
2511 | /** | |
e0f01a21 MO |
2512 | * cxlflash_show_port_lun_table() - queries and presents the port LUN table |
2513 | * @port: Desired port for status reporting. | |
90c9f8f4 | 2514 | * @cfg: Internal structure associated with the host. |
e0f01a21 MO |
2515 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2516 | * | |
66d4bce4 | 2517 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
e0f01a21 MO |
2518 | */ |
2519 | static ssize_t cxlflash_show_port_lun_table(u32 port, | |
90c9f8f4 | 2520 | struct cxlflash_cfg *cfg, |
e0f01a21 MO |
2521 | char *buf) |
2522 | { | |
66d4bce4 | 2523 | struct device *dev = &cfg->dev->dev; |
c885d3fe | 2524 | __be64 __iomem *fc_port_luns; |
e0f01a21 MO |
2525 | int i; |
2526 | ssize_t bytes = 0; | |
e0f01a21 | 2527 | |
66d4bce4 MO |
2528 | WARN_ON(port >= MAX_FC_PORTS); |
2529 | ||
2530 | if (port >= cfg->num_fc_ports) { | |
2531 | dev_info(dev, "%s: Port %d not supported on this card.\n", | |
2532 | __func__, port); | |
2533 | return -EINVAL; | |
2534 | } | |
e0f01a21 | 2535 | |
c885d3fe | 2536 | fc_port_luns = get_fc_port_luns(cfg, port); |
e0f01a21 MO |
2537 | |
2538 | for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) | |
2539 | bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, | |
c885d3fe MO |
2540 | "%03d: %016llx\n", |
2541 | i, readq_be(&fc_port_luns[i])); | |
e0f01a21 MO |
2542 | return bytes; |
2543 | } | |
2544 | ||
2545 | /** | |
2546 | * port0_lun_table_show() - presents the current LUN table of port 0 | |
2547 | * @dev: Generic device associated with the host owning the port. | |
2548 | * @attr: Device attribute representing the port. | |
2549 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2550 | * | |
2551 | * Return: The size of the ASCII string returned in @buf. | |
2552 | */ | |
2553 | static ssize_t port0_lun_table_show(struct device *dev, | |
2554 | struct device_attribute *attr, | |
2555 | char *buf) | |
2556 | { | |
88d33628 | 2557 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2558 | |
90c9f8f4 | 2559 | return cxlflash_show_port_lun_table(0, cfg, buf); |
e0f01a21 MO |
2560 | } |
2561 | ||
2562 | /** | |
2563 | * port1_lun_table_show() - presents the current LUN table of port 1 | |
2564 | * @dev: Generic device associated with the host owning the port. | |
2565 | * @attr: Device attribute representing the port. | |
2566 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2567 | * | |
2568 | * Return: The size of the ASCII string returned in @buf. | |
2569 | */ | |
2570 | static ssize_t port1_lun_table_show(struct device *dev, | |
2571 | struct device_attribute *attr, | |
2572 | char *buf) | |
2573 | { | |
88d33628 | 2574 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2575 | |
90c9f8f4 | 2576 | return cxlflash_show_port_lun_table(1, cfg, buf); |
e0f01a21 MO |
2577 | } |
2578 | ||
bdcff1c5 MO |
2579 | /** |
2580 | * port2_lun_table_show() - presents the current LUN table of port 2 | |
2581 | * @dev: Generic device associated with the host owning the port. | |
2582 | * @attr: Device attribute representing the port. | |
2583 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2584 | * | |
2585 | * Return: The size of the ASCII string returned in @buf. | |
2586 | */ | |
2587 | static ssize_t port2_lun_table_show(struct device *dev, | |
2588 | struct device_attribute *attr, | |
2589 | char *buf) | |
2590 | { | |
2591 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2592 | ||
2593 | return cxlflash_show_port_lun_table(2, cfg, buf); | |
2594 | } | |
2595 | ||
2596 | /** | |
2597 | * port3_lun_table_show() - presents the current LUN table of port 3 | |
2598 | * @dev: Generic device associated with the host owning the port. | |
2599 | * @attr: Device attribute representing the port. | |
2600 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2601 | * | |
2602 | * Return: The size of the ASCII string returned in @buf. | |
2603 | */ | |
2604 | static ssize_t port3_lun_table_show(struct device *dev, | |
2605 | struct device_attribute *attr, | |
2606 | char *buf) | |
2607 | { | |
2608 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2609 | ||
2610 | return cxlflash_show_port_lun_table(3, cfg, buf); | |
2611 | } | |
2612 | ||
2588f222 MO |
2613 | /** |
2614 | * irqpoll_weight_show() - presents the current IRQ poll weight for the host | |
2615 | * @dev: Generic device associated with the host. | |
2616 | * @attr: Device attribute representing the IRQ poll weight. | |
2617 | * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll | |
2618 | * weight in ASCII. | |
2619 | * | |
2620 | * An IRQ poll weight of 0 indicates polling is disabled. | |
2621 | * | |
2622 | * Return: The size of the ASCII string returned in @buf. | |
2623 | */ | |
2624 | static ssize_t irqpoll_weight_show(struct device *dev, | |
2625 | struct device_attribute *attr, char *buf) | |
2626 | { | |
2627 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2628 | struct afu *afu = cfg->afu; | |
2629 | ||
2630 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); | |
2631 | } | |
2632 | ||
2633 | /** | |
2634 | * irqpoll_weight_store() - sets the current IRQ poll weight for the host | |
2635 | * @dev: Generic device associated with the host. | |
2636 | * @attr: Device attribute representing the IRQ poll weight. | |
2637 | * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll | |
2638 | * weight in ASCII. | |
2639 | * @count: Length of data resizing in @buf. | |
2640 | * | |
2641 | * An IRQ poll weight of 0 indicates polling is disabled. | |
2642 | * | |
2643 | * Return: The size of the ASCII string returned in @buf. | |
2644 | */ | |
2645 | static ssize_t irqpoll_weight_store(struct device *dev, | |
2646 | struct device_attribute *attr, | |
2647 | const char *buf, size_t count) | |
2648 | { | |
2649 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2650 | struct device *cfgdev = &cfg->dev->dev; | |
2651 | struct afu *afu = cfg->afu; | |
a583d00a | 2652 | struct hwq *hwq; |
2588f222 | 2653 | u32 weight; |
a583d00a | 2654 | int rc, i; |
2588f222 MO |
2655 | |
2656 | rc = kstrtouint(buf, 10, &weight); | |
2657 | if (rc) | |
2658 | return -EINVAL; | |
2659 | ||
2660 | if (weight > 256) { | |
2661 | dev_info(cfgdev, | |
2662 | "Invalid IRQ poll weight. It must be 256 or less.\n"); | |
2663 | return -EINVAL; | |
2664 | } | |
2665 | ||
2666 | if (weight == afu->irqpoll_weight) { | |
2667 | dev_info(cfgdev, | |
2668 | "Current IRQ poll weight has the same weight.\n"); | |
2669 | return -EINVAL; | |
2670 | } | |
2671 | ||
a583d00a | 2672 | if (afu_is_irqpoll_enabled(afu)) { |
bb85ef68 | 2673 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
2674 | hwq = get_hwq(afu, i); |
2675 | ||
2676 | irq_poll_disable(&hwq->irqpoll); | |
2677 | } | |
2678 | } | |
2588f222 MO |
2679 | |
2680 | afu->irqpoll_weight = weight; | |
2681 | ||
a583d00a | 2682 | if (weight > 0) { |
bb85ef68 | 2683 | for (i = 0; i < afu->num_hwqs; i++) { |
a583d00a UK |
2684 | hwq = get_hwq(afu, i); |
2685 | ||
2686 | irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); | |
2687 | } | |
2688 | } | |
2588f222 MO |
2689 | |
2690 | return count; | |
2691 | } | |
2692 | ||
bb85ef68 MO |
2693 | /** |
2694 | * num_hwqs_show() - presents the number of hardware queues for the host | |
2695 | * @dev: Generic device associated with the host. | |
2696 | * @attr: Device attribute representing the number of hardware queues. | |
2697 | * @buf: Buffer of length PAGE_SIZE to report back the number of hardware | |
2698 | * queues in ASCII. | |
2699 | * | |
2700 | * Return: The size of the ASCII string returned in @buf. | |
2701 | */ | |
2702 | static ssize_t num_hwqs_show(struct device *dev, | |
2703 | struct device_attribute *attr, char *buf) | |
2704 | { | |
2705 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2706 | struct afu *afu = cfg->afu; | |
2707 | ||
2708 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); | |
2709 | } | |
2710 | ||
2711 | /** | |
2712 | * num_hwqs_store() - sets the number of hardware queues for the host | |
2713 | * @dev: Generic device associated with the host. | |
2714 | * @attr: Device attribute representing the number of hardware queues. | |
2715 | * @buf: Buffer of length PAGE_SIZE containing the number of hardware | |
2716 | * queues in ASCII. | |
2717 | * @count: Length of data resizing in @buf. | |
2718 | * | |
2719 | * n > 0: num_hwqs = n | |
2720 | * n = 0: num_hwqs = num_online_cpus() | |
2721 | * n < 0: num_online_cpus() / abs(n) | |
2722 | * | |
2723 | * Return: The size of the ASCII string returned in @buf. | |
2724 | */ | |
2725 | static ssize_t num_hwqs_store(struct device *dev, | |
2726 | struct device_attribute *attr, | |
2727 | const char *buf, size_t count) | |
2728 | { | |
2729 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2730 | struct afu *afu = cfg->afu; | |
2731 | int rc; | |
2732 | int nhwqs, num_hwqs; | |
2733 | ||
2734 | rc = kstrtoint(buf, 10, &nhwqs); | |
2735 | if (rc) | |
2736 | return -EINVAL; | |
2737 | ||
2738 | if (nhwqs >= 1) | |
2739 | num_hwqs = nhwqs; | |
2740 | else if (nhwqs == 0) | |
2741 | num_hwqs = num_online_cpus(); | |
2742 | else | |
2743 | num_hwqs = num_online_cpus() / abs(nhwqs); | |
2744 | ||
2745 | afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); | |
2746 | WARN_ON_ONCE(afu->desired_hwqs == 0); | |
2747 | ||
2748 | retry: | |
2749 | switch (cfg->state) { | |
2750 | case STATE_NORMAL: | |
2751 | cfg->state = STATE_RESET; | |
2752 | drain_ioctls(cfg); | |
2753 | cxlflash_mark_contexts_error(cfg); | |
2754 | rc = afu_reset(cfg); | |
2755 | if (rc) | |
2756 | cfg->state = STATE_FAILTERM; | |
2757 | else | |
2758 | cfg->state = STATE_NORMAL; | |
2759 | wake_up_all(&cfg->reset_waitq); | |
2760 | break; | |
2761 | case STATE_RESET: | |
2762 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
2763 | if (cfg->state == STATE_NORMAL) | |
2764 | goto retry; | |
2765 | default: | |
2766 | /* Ideally should not happen */ | |
2767 | dev_err(dev, "%s: Device is not ready, state=%d\n", | |
2768 | __func__, cfg->state); | |
2769 | break; | |
2770 | } | |
2771 | ||
2772 | return count; | |
2773 | } | |
2774 | ||
8c052e9e MO |
2775 | static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" }; |
2776 | ||
2777 | /** | |
2778 | * hwq_mode_show() - presents the HWQ steering mode for the host | |
2779 | * @dev: Generic device associated with the host. | |
2780 | * @attr: Device attribute representing the HWQ steering mode. | |
2781 | * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode | |
2782 | * as a character string. | |
2783 | * | |
2784 | * Return: The size of the ASCII string returned in @buf. | |
2785 | */ | |
2786 | static ssize_t hwq_mode_show(struct device *dev, | |
2787 | struct device_attribute *attr, char *buf) | |
2788 | { | |
2789 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2790 | struct afu *afu = cfg->afu; | |
2791 | ||
2792 | return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]); | |
2793 | } | |
2794 | ||
2795 | /** | |
2796 | * hwq_mode_store() - sets the HWQ steering mode for the host | |
2797 | * @dev: Generic device associated with the host. | |
2798 | * @attr: Device attribute representing the HWQ steering mode. | |
2799 | * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode | |
2800 | * as a character string. | |
2801 | * @count: Length of data resizing in @buf. | |
2802 | * | |
2803 | * rr = Round-Robin | |
2804 | * tag = Block MQ Tagging | |
2805 | * cpu = CPU Affinity | |
2806 | * | |
2807 | * Return: The size of the ASCII string returned in @buf. | |
2808 | */ | |
2809 | static ssize_t hwq_mode_store(struct device *dev, | |
2810 | struct device_attribute *attr, | |
2811 | const char *buf, size_t count) | |
2812 | { | |
2813 | struct Scsi_Host *shost = class_to_shost(dev); | |
2814 | struct cxlflash_cfg *cfg = shost_priv(shost); | |
2815 | struct device *cfgdev = &cfg->dev->dev; | |
2816 | struct afu *afu = cfg->afu; | |
2817 | int i; | |
2818 | u32 mode = MAX_HWQ_MODE; | |
2819 | ||
2820 | for (i = 0; i < MAX_HWQ_MODE; i++) { | |
2821 | if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { | |
2822 | mode = i; | |
2823 | break; | |
2824 | } | |
2825 | } | |
2826 | ||
2827 | if (mode >= MAX_HWQ_MODE) { | |
2828 | dev_info(cfgdev, "Invalid HWQ steering mode.\n"); | |
2829 | return -EINVAL; | |
2830 | } | |
2831 | ||
2832 | if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) { | |
2833 | dev_info(cfgdev, "SCSI-MQ is not enabled, use a different " | |
2834 | "HWQ steering mode.\n"); | |
2835 | return -EINVAL; | |
2836 | } | |
2837 | ||
2838 | afu->hwq_mode = mode; | |
2839 | ||
2840 | return count; | |
2841 | } | |
2842 | ||
e0f01a21 MO |
2843 | /** |
2844 | * mode_show() - presents the current mode of the device | |
15305514 MO |
2845 | * @dev: Generic device associated with the device. |
2846 | * @attr: Device attribute representing the device mode. | |
2847 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. | |
2848 | * | |
2849 | * Return: The size of the ASCII string returned in @buf. | |
2850 | */ | |
e0f01a21 MO |
2851 | static ssize_t mode_show(struct device *dev, |
2852 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2853 | { |
2854 | struct scsi_device *sdev = to_scsi_device(dev); | |
2855 | ||
e0f01a21 MO |
2856 | return scnprintf(buf, PAGE_SIZE, "%s\n", |
2857 | sdev->hostdata ? "superpipe" : "legacy"); | |
15305514 MO |
2858 | } |
2859 | ||
2860 | /* | |
2861 | * Host attributes | |
2862 | */ | |
e0f01a21 MO |
2863 | static DEVICE_ATTR_RO(port0); |
2864 | static DEVICE_ATTR_RO(port1); | |
bdcff1c5 MO |
2865 | static DEVICE_ATTR_RO(port2); |
2866 | static DEVICE_ATTR_RO(port3); | |
e0f01a21 MO |
2867 | static DEVICE_ATTR_RW(lun_mode); |
2868 | static DEVICE_ATTR_RO(ioctl_version); | |
2869 | static DEVICE_ATTR_RO(port0_lun_table); | |
2870 | static DEVICE_ATTR_RO(port1_lun_table); | |
bdcff1c5 MO |
2871 | static DEVICE_ATTR_RO(port2_lun_table); |
2872 | static DEVICE_ATTR_RO(port3_lun_table); | |
2588f222 | 2873 | static DEVICE_ATTR_RW(irqpoll_weight); |
bb85ef68 | 2874 | static DEVICE_ATTR_RW(num_hwqs); |
8c052e9e | 2875 | static DEVICE_ATTR_RW(hwq_mode); |
15305514 MO |
2876 | |
2877 | static struct device_attribute *cxlflash_host_attrs[] = { | |
2878 | &dev_attr_port0, | |
2879 | &dev_attr_port1, | |
bdcff1c5 MO |
2880 | &dev_attr_port2, |
2881 | &dev_attr_port3, | |
15305514 MO |
2882 | &dev_attr_lun_mode, |
2883 | &dev_attr_ioctl_version, | |
e0f01a21 MO |
2884 | &dev_attr_port0_lun_table, |
2885 | &dev_attr_port1_lun_table, | |
bdcff1c5 MO |
2886 | &dev_attr_port2_lun_table, |
2887 | &dev_attr_port3_lun_table, | |
2588f222 | 2888 | &dev_attr_irqpoll_weight, |
bb85ef68 | 2889 | &dev_attr_num_hwqs, |
8c052e9e | 2890 | &dev_attr_hwq_mode, |
15305514 MO |
2891 | NULL |
2892 | }; | |
2893 | ||
2894 | /* | |
2895 | * Device attributes | |
2896 | */ | |
e0f01a21 | 2897 | static DEVICE_ATTR_RO(mode); |
15305514 MO |
2898 | |
2899 | static struct device_attribute *cxlflash_dev_attrs[] = { | |
2900 | &dev_attr_mode, | |
2901 | NULL | |
2902 | }; | |
2903 | ||
2904 | /* | |
2905 | * Host template | |
2906 | */ | |
2907 | static struct scsi_host_template driver_template = { | |
2908 | .module = THIS_MODULE, | |
2909 | .name = CXLFLASH_ADAPTER_NAME, | |
2910 | .info = cxlflash_driver_info, | |
2911 | .ioctl = cxlflash_ioctl, | |
2912 | .proc_name = CXLFLASH_NAME, | |
2913 | .queuecommand = cxlflash_queuecommand, | |
2914 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, | |
2915 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, | |
2916 | .change_queue_depth = cxlflash_change_queue_depth, | |
83430833 | 2917 | .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, |
15305514 | 2918 | .can_queue = CXLFLASH_MAX_CMDS, |
5fbb96c8 | 2919 | .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, |
15305514 | 2920 | .this_id = -1, |
68ab2d76 | 2921 | .sg_tablesize = 1, /* No scatter gather support */ |
15305514 MO |
2922 | .max_sectors = CXLFLASH_MAX_SECTORS, |
2923 | .use_clustering = ENABLE_CLUSTERING, | |
2924 | .shost_attrs = cxlflash_host_attrs, | |
2925 | .sdev_attrs = cxlflash_dev_attrs, | |
2926 | }; | |
2927 | ||
2928 | /* | |
2929 | * Device dependent values | |
2930 | */ | |
96e1b660 UK |
2931 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, |
2932 | 0ULL }; | |
2933 | static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, | |
704c4b0d | 2934 | CXLFLASH_NOTIFY_SHUTDOWN }; |
42f90a6b MO |
2935 | static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, |
2936 | CXLFLASH_NOTIFY_SHUTDOWN }; | |
15305514 MO |
2937 | |
2938 | /* | |
2939 | * PCI device binding table | |
2940 | */ | |
2941 | static struct pci_device_id cxlflash_pci_table[] = { | |
2942 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, | |
2943 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, | |
a2746fb1 MK |
2944 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, |
2945 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, | |
42f90a6b MO |
2946 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, |
2947 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, | |
15305514 MO |
2948 | {} |
2949 | }; | |
2950 | ||
2951 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); | |
2952 | ||
c21e0bbf MO |
2953 | /** |
2954 | * cxlflash_worker_thread() - work thread handler for the AFU | |
2955 | * @work: Work structure contained within cxlflash associated with host. | |
2956 | * | |
2957 | * Handles the following events: | |
2958 | * - Link reset which cannot be performed on interrupt context due to | |
2959 | * blocking up to a few seconds | |
ef51074a | 2960 | * - Rescan the host |
c21e0bbf MO |
2961 | */ |
2962 | static void cxlflash_worker_thread(struct work_struct *work) | |
2963 | { | |
5cdac81a MO |
2964 | struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, |
2965 | work_q); | |
c21e0bbf | 2966 | struct afu *afu = cfg->afu; |
4392ba49 | 2967 | struct device *dev = &cfg->dev->dev; |
c885d3fe | 2968 | __be64 __iomem *fc_port_regs; |
c21e0bbf MO |
2969 | int port; |
2970 | ulong lock_flags; | |
2971 | ||
5cdac81a MO |
2972 | /* Avoid MMIO if the device has failed */ |
2973 | ||
2974 | if (cfg->state != STATE_NORMAL) | |
2975 | return; | |
2976 | ||
c21e0bbf MO |
2977 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
2978 | ||
2979 | if (cfg->lr_state == LINK_RESET_REQUIRED) { | |
2980 | port = cfg->lr_port; | |
2981 | if (port < 0) | |
4392ba49 MO |
2982 | dev_err(dev, "%s: invalid port index %d\n", |
2983 | __func__, port); | |
c21e0bbf MO |
2984 | else { |
2985 | spin_unlock_irqrestore(cfg->host->host_lock, | |
2986 | lock_flags); | |
2987 | ||
2988 | /* The reset can block... */ | |
c885d3fe MO |
2989 | fc_port_regs = get_fc_port_regs(cfg, port); |
2990 | afu_link_reset(afu, port, fc_port_regs); | |
c21e0bbf MO |
2991 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
2992 | } | |
2993 | ||
2994 | cfg->lr_state = LINK_RESET_COMPLETE; | |
2995 | } | |
2996 | ||
c21e0bbf | 2997 | spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); |
ef51074a MO |
2998 | |
2999 | if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) | |
3000 | scsi_scan_host(cfg->host); | |
c21e0bbf MO |
3001 | } |
3002 | ||
3003 | /** | |
3004 | * cxlflash_probe() - PCI entry point to add host | |
3005 | * @pdev: PCI device associated with the host. | |
3006 | * @dev_id: PCI device id associated with device. | |
3007 | * | |
f92ba507 MO |
3008 | * The device will initially start out in a 'probing' state and |
3009 | * transition to the 'normal' state at the end of a successful | |
3010 | * probe. Should an EEH event occur during probe, the notification | |
3011 | * thread (error_detected()) will wait until the probe handler | |
3012 | * is nearly complete. At that time, the device will be moved to | |
3013 | * a 'probed' state and the EEH thread woken up to drive the slot | |
3014 | * reset and recovery (device moves to 'normal' state). Meanwhile, | |
3015 | * the probe will be allowed to exit successfully. | |
3016 | * | |
1284fb0c | 3017 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
3018 | */ |
3019 | static int cxlflash_probe(struct pci_dev *pdev, | |
3020 | const struct pci_device_id *dev_id) | |
3021 | { | |
3022 | struct Scsi_Host *host; | |
3023 | struct cxlflash_cfg *cfg = NULL; | |
88d33628 | 3024 | struct device *dev = &pdev->dev; |
c21e0bbf MO |
3025 | struct dev_dependent_vals *ddv; |
3026 | int rc = 0; | |
66d4bce4 | 3027 | int k; |
c21e0bbf MO |
3028 | |
3029 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", | |
3030 | __func__, pdev->irq); | |
3031 | ||
3032 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; | |
3033 | driver_template.max_sectors = ddv->max_sectors; | |
3034 | ||
3035 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); | |
3036 | if (!host) { | |
88d33628 | 3037 | dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); |
c21e0bbf MO |
3038 | rc = -ENOMEM; |
3039 | goto out; | |
3040 | } | |
3041 | ||
3042 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; | |
3043 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; | |
c21e0bbf MO |
3044 | host->unique_id = host->host_no; |
3045 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; | |
3046 | ||
88d33628 | 3047 | cfg = shost_priv(host); |
c21e0bbf MO |
3048 | cfg->host = host; |
3049 | rc = alloc_mem(cfg); | |
3050 | if (rc) { | |
88d33628 | 3051 | dev_err(dev, "%s: alloc_mem failed\n", __func__); |
c21e0bbf | 3052 | rc = -ENOMEM; |
8b5b1e87 | 3053 | scsi_host_put(cfg->host); |
c21e0bbf MO |
3054 | goto out; |
3055 | } | |
3056 | ||
3057 | cfg->init_state = INIT_STATE_NONE; | |
3058 | cfg->dev = pdev; | |
17ead26f | 3059 | cfg->cxl_fops = cxlflash_cxl_fops; |
2cb79266 MO |
3060 | |
3061 | /* | |
66d4bce4 MO |
3062 | * Promoted LUNs move to the top of the LUN table. The rest stay on |
3063 | * the bottom half. The bottom half grows from the end (index = 255), | |
3064 | * whereas the top half grows from the beginning (index = 0). | |
3065 | * | |
3066 | * Initialize the last LUN index for all possible ports. | |
2cb79266 | 3067 | */ |
66d4bce4 MO |
3068 | cfg->promote_lun_index = 0; |
3069 | ||
3070 | for (k = 0; k < MAX_FC_PORTS; k++) | |
3071 | cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; | |
2cb79266 | 3072 | |
c21e0bbf | 3073 | cfg->dev_id = (struct pci_device_id *)dev_id; |
c21e0bbf MO |
3074 | |
3075 | init_waitqueue_head(&cfg->tmf_waitq); | |
439e85c1 | 3076 | init_waitqueue_head(&cfg->reset_waitq); |
c21e0bbf MO |
3077 | |
3078 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); | |
3079 | cfg->lr_state = LINK_RESET_INVALID; | |
3080 | cfg->lr_port = -1; | |
0d73122c | 3081 | spin_lock_init(&cfg->tmf_slock); |
65be2c79 MO |
3082 | mutex_init(&cfg->ctx_tbl_list_mutex); |
3083 | mutex_init(&cfg->ctx_recovery_mutex); | |
0a27ae51 | 3084 | init_rwsem(&cfg->ioctl_rwsem); |
65be2c79 MO |
3085 | INIT_LIST_HEAD(&cfg->ctx_err_recovery); |
3086 | INIT_LIST_HEAD(&cfg->lluns); | |
c21e0bbf MO |
3087 | |
3088 | pci_set_drvdata(pdev, cfg); | |
3089 | ||
c21e0bbf MO |
3090 | cfg->cxl_afu = cxl_pci_to_afu(pdev); |
3091 | ||
3092 | rc = init_pci(cfg); | |
3093 | if (rc) { | |
88d33628 | 3094 | dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
3095 | goto out_remove; |
3096 | } | |
3097 | cfg->init_state = INIT_STATE_PCI; | |
3098 | ||
3099 | rc = init_afu(cfg); | |
f92ba507 | 3100 | if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { |
88d33628 | 3101 | dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
3102 | goto out_remove; |
3103 | } | |
3104 | cfg->init_state = INIT_STATE_AFU; | |
3105 | ||
c21e0bbf MO |
3106 | rc = init_scsi(cfg); |
3107 | if (rc) { | |
88d33628 | 3108 | dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
3109 | goto out_remove; |
3110 | } | |
3111 | cfg->init_state = INIT_STATE_SCSI; | |
3112 | ||
f92ba507 MO |
3113 | if (wq_has_sleeper(&cfg->reset_waitq)) { |
3114 | cfg->state = STATE_PROBED; | |
3115 | wake_up_all(&cfg->reset_waitq); | |
3116 | } else | |
3117 | cfg->state = STATE_NORMAL; | |
c21e0bbf | 3118 | out: |
88d33628 | 3119 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
3120 | return rc; |
3121 | ||
3122 | out_remove: | |
3123 | cxlflash_remove(pdev); | |
3124 | goto out; | |
3125 | } | |
3126 | ||
5cdac81a MO |
3127 | /** |
3128 | * cxlflash_pci_error_detected() - called when a PCI error is detected | |
3129 | * @pdev: PCI device struct. | |
3130 | * @state: PCI channel state. | |
3131 | * | |
1d3324c3 MO |
3132 | * When an EEH occurs during an active reset, wait until the reset is |
3133 | * complete and then take action based upon the device state. | |
3134 | * | |
5cdac81a MO |
3135 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT |
3136 | */ | |
3137 | static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, | |
3138 | pci_channel_state_t state) | |
3139 | { | |
65be2c79 | 3140 | int rc = 0; |
5cdac81a MO |
3141 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
3142 | struct device *dev = &cfg->dev->dev; | |
3143 | ||
3144 | dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); | |
3145 | ||
3146 | switch (state) { | |
3147 | case pci_channel_io_frozen: | |
f92ba507 MO |
3148 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && |
3149 | cfg->state != STATE_PROBING); | |
1d3324c3 MO |
3150 | if (cfg->state == STATE_FAILTERM) |
3151 | return PCI_ERS_RESULT_DISCONNECT; | |
3152 | ||
439e85c1 | 3153 | cfg->state = STATE_RESET; |
5cdac81a | 3154 | scsi_block_requests(cfg->host); |
0a27ae51 | 3155 | drain_ioctls(cfg); |
65be2c79 MO |
3156 | rc = cxlflash_mark_contexts_error(cfg); |
3157 | if (unlikely(rc)) | |
88d33628 | 3158 | dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", |
65be2c79 | 3159 | __func__, rc); |
9526f360 | 3160 | term_afu(cfg); |
5cdac81a MO |
3161 | return PCI_ERS_RESULT_NEED_RESET; |
3162 | case pci_channel_io_perm_failure: | |
3163 | cfg->state = STATE_FAILTERM; | |
439e85c1 | 3164 | wake_up_all(&cfg->reset_waitq); |
5cdac81a MO |
3165 | scsi_unblock_requests(cfg->host); |
3166 | return PCI_ERS_RESULT_DISCONNECT; | |
3167 | default: | |
3168 | break; | |
3169 | } | |
3170 | return PCI_ERS_RESULT_NEED_RESET; | |
3171 | } | |
3172 | ||
3173 | /** | |
3174 | * cxlflash_pci_slot_reset() - called when PCI slot has been reset | |
3175 | * @pdev: PCI device struct. | |
3176 | * | |
3177 | * This routine is called by the pci error recovery code after the PCI | |
3178 | * slot has been reset, just before we should resume normal operations. | |
3179 | * | |
3180 | * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT | |
3181 | */ | |
3182 | static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) | |
3183 | { | |
3184 | int rc = 0; | |
3185 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
3186 | struct device *dev = &cfg->dev->dev; | |
3187 | ||
3188 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); | |
3189 | ||
3190 | rc = init_afu(cfg); | |
3191 | if (unlikely(rc)) { | |
88d33628 | 3192 | dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); |
5cdac81a MO |
3193 | return PCI_ERS_RESULT_DISCONNECT; |
3194 | } | |
3195 | ||
3196 | return PCI_ERS_RESULT_RECOVERED; | |
3197 | } | |
3198 | ||
3199 | /** | |
3200 | * cxlflash_pci_resume() - called when normal operation can resume | |
3201 | * @pdev: PCI device struct | |
3202 | */ | |
3203 | static void cxlflash_pci_resume(struct pci_dev *pdev) | |
3204 | { | |
3205 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
3206 | struct device *dev = &cfg->dev->dev; | |
3207 | ||
3208 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); | |
3209 | ||
3210 | cfg->state = STATE_NORMAL; | |
439e85c1 | 3211 | wake_up_all(&cfg->reset_waitq); |
5cdac81a MO |
3212 | scsi_unblock_requests(cfg->host); |
3213 | } | |
3214 | ||
3215 | static const struct pci_error_handlers cxlflash_err_handler = { | |
3216 | .error_detected = cxlflash_pci_error_detected, | |
3217 | .slot_reset = cxlflash_pci_slot_reset, | |
3218 | .resume = cxlflash_pci_resume, | |
3219 | }; | |
3220 | ||
c21e0bbf MO |
3221 | /* |
3222 | * PCI device structure | |
3223 | */ | |
3224 | static struct pci_driver cxlflash_driver = { | |
3225 | .name = CXLFLASH_NAME, | |
3226 | .id_table = cxlflash_pci_table, | |
3227 | .probe = cxlflash_probe, | |
3228 | .remove = cxlflash_remove, | |
babf985d | 3229 | .shutdown = cxlflash_remove, |
5cdac81a | 3230 | .err_handler = &cxlflash_err_handler, |
c21e0bbf MO |
3231 | }; |
3232 | ||
3233 | /** | |
3234 | * init_cxlflash() - module entry point | |
3235 | * | |
1284fb0c | 3236 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
3237 | */ |
3238 | static int __init init_cxlflash(void) | |
3239 | { | |
db853d50 | 3240 | check_sizes(); |
65be2c79 MO |
3241 | cxlflash_list_init(); |
3242 | ||
c21e0bbf MO |
3243 | return pci_register_driver(&cxlflash_driver); |
3244 | } | |
3245 | ||
3246 | /** | |
3247 | * exit_cxlflash() - module exit point | |
3248 | */ | |
3249 | static void __exit exit_cxlflash(void) | |
3250 | { | |
65be2c79 MO |
3251 | cxlflash_term_global_luns(); |
3252 | cxlflash_free_errpage(); | |
3253 | ||
c21e0bbf MO |
3254 | pci_unregister_driver(&cxlflash_driver); |
3255 | } | |
3256 | ||
3257 | module_init(init_cxlflash); | |
3258 | module_exit(exit_cxlflash); |