]>
Commit | Line | Data |
---|---|---|
c21e0bbf MO |
1 | /* |
2 | * CXL Flash Device Driver | |
3 | * | |
4 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation | |
5 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation | |
6 | * | |
7 | * Copyright (C) 2015 IBM Corporation | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <linux/delay.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/pci.h> | |
19 | ||
20 | #include <asm/unaligned.h> | |
21 | ||
22 | #include <misc/cxl.h> | |
23 | ||
24 | #include <scsi/scsi_cmnd.h> | |
25 | #include <scsi/scsi_host.h> | |
65be2c79 | 26 | #include <uapi/scsi/cxlflash_ioctl.h> |
c21e0bbf MO |
27 | |
28 | #include "main.h" | |
29 | #include "sislite.h" | |
30 | #include "common.h" | |
31 | ||
32 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); | |
33 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); | |
34 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); | |
35 | MODULE_LICENSE("GPL"); | |
36 | ||
c21e0bbf MO |
37 | /** |
38 | * process_cmd_err() - command error handler | |
39 | * @cmd: AFU command that experienced the error. | |
40 | * @scp: SCSI command associated with the AFU command in error. | |
41 | * | |
42 | * Translates error bits from AFU command to SCSI command results. | |
43 | */ | |
44 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) | |
45 | { | |
fb67d44d MO |
46 | struct afu *afu = cmd->parent; |
47 | struct cxlflash_cfg *cfg = afu->parent; | |
48 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
49 | struct sisl_ioarcb *ioarcb; |
50 | struct sisl_ioasa *ioasa; | |
8396012f | 51 | u32 resid; |
c21e0bbf MO |
52 | |
53 | if (unlikely(!cmd)) | |
54 | return; | |
55 | ||
56 | ioarcb = &(cmd->rcb); | |
57 | ioasa = &(cmd->sa); | |
58 | ||
59 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { | |
8396012f MO |
60 | resid = ioasa->resid; |
61 | scsi_set_resid(scp, resid); | |
fb67d44d MO |
62 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", |
63 | __func__, cmd, scp, resid); | |
c21e0bbf MO |
64 | } |
65 | ||
66 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { | |
fb67d44d MO |
67 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", |
68 | __func__, cmd, scp); | |
c21e0bbf MO |
69 | scp->result = (DID_ERROR << 16); |
70 | } | |
71 | ||
fb67d44d MO |
72 | dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " |
73 | "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, | |
74 | ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, | |
75 | ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); | |
c21e0bbf MO |
76 | |
77 | if (ioasa->rc.scsi_rc) { | |
78 | /* We have a SCSI status */ | |
79 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { | |
80 | memcpy(scp->sense_buffer, ioasa->sense_data, | |
81 | SISL_SENSE_DATA_LEN); | |
82 | scp->result = ioasa->rc.scsi_rc; | |
83 | } else | |
84 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); | |
85 | } | |
86 | ||
87 | /* | |
88 | * We encountered an error. Set scp->result based on nature | |
89 | * of error. | |
90 | */ | |
91 | if (ioasa->rc.fc_rc) { | |
92 | /* We have an FC status */ | |
93 | switch (ioasa->rc.fc_rc) { | |
94 | case SISL_FC_RC_LINKDOWN: | |
95 | scp->result = (DID_REQUEUE << 16); | |
96 | break; | |
97 | case SISL_FC_RC_RESID: | |
98 | /* This indicates an FCP resid underrun */ | |
99 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { | |
100 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, | |
101 | * then we will handle this error else where. | |
102 | * If not then we must handle it here. | |
8396012f | 103 | * This is probably an AFU bug. |
c21e0bbf MO |
104 | */ |
105 | scp->result = (DID_ERROR << 16); | |
106 | } | |
107 | break; | |
108 | case SISL_FC_RC_RESIDERR: | |
109 | /* Resid mismatch between adapter and device */ | |
110 | case SISL_FC_RC_TGTABORT: | |
111 | case SISL_FC_RC_ABORTOK: | |
112 | case SISL_FC_RC_ABORTFAIL: | |
113 | case SISL_FC_RC_NOLOGI: | |
114 | case SISL_FC_RC_ABORTPEND: | |
115 | case SISL_FC_RC_WRABORTPEND: | |
116 | case SISL_FC_RC_NOEXP: | |
117 | case SISL_FC_RC_INUSE: | |
118 | scp->result = (DID_ERROR << 16); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
123 | if (ioasa->rc.afu_rc) { | |
124 | /* We have an AFU error */ | |
125 | switch (ioasa->rc.afu_rc) { | |
126 | case SISL_AFU_RC_NO_CHANNELS: | |
8396012f | 127 | scp->result = (DID_NO_CONNECT << 16); |
c21e0bbf MO |
128 | break; |
129 | case SISL_AFU_RC_DATA_DMA_ERR: | |
130 | switch (ioasa->afu_extra) { | |
131 | case SISL_AFU_DMA_ERR_PAGE_IN: | |
132 | /* Retry */ | |
133 | scp->result = (DID_IMM_RETRY << 16); | |
134 | break; | |
135 | case SISL_AFU_DMA_ERR_INVALID_EA: | |
136 | default: | |
137 | scp->result = (DID_ERROR << 16); | |
138 | } | |
139 | break; | |
140 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: | |
141 | /* Retry */ | |
142 | scp->result = (DID_ALLOC_FAILURE << 16); | |
143 | break; | |
144 | default: | |
145 | scp->result = (DID_ERROR << 16); | |
146 | } | |
147 | } | |
148 | } | |
149 | ||
150 | /** | |
151 | * cmd_complete() - command completion handler | |
152 | * @cmd: AFU command that has completed. | |
153 | * | |
154 | * Prepares and submits command that has either completed or timed out to | |
155 | * the SCSI stack. Checks AFU command back into command pool for non-internal | |
fe7f9698 | 156 | * (cmd->scp populated) commands. |
c21e0bbf MO |
157 | */ |
158 | static void cmd_complete(struct afu_cmd *cmd) | |
159 | { | |
160 | struct scsi_cmnd *scp; | |
c21e0bbf MO |
161 | ulong lock_flags; |
162 | struct afu *afu = cmd->parent; | |
163 | struct cxlflash_cfg *cfg = afu->parent; | |
fb67d44d | 164 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
165 | bool cmd_is_tmf; |
166 | ||
fe7f9698 MO |
167 | if (cmd->scp) { |
168 | scp = cmd->scp; | |
8396012f | 169 | if (unlikely(cmd->sa.ioasc)) |
c21e0bbf MO |
170 | process_cmd_err(cmd, scp); |
171 | else | |
172 | scp->result = (DID_OK << 16); | |
173 | ||
c21e0bbf | 174 | cmd_is_tmf = cmd->cmd_tmf; |
c21e0bbf | 175 | |
fb67d44d MO |
176 | dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", |
177 | __func__, scp, scp->result, cmd->sa.ioasc); | |
c21e0bbf | 178 | |
c21e0bbf MO |
179 | scsi_dma_unmap(scp); |
180 | scp->scsi_done(scp); | |
181 | ||
182 | if (cmd_is_tmf) { | |
018d1dc9 | 183 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
184 | cfg->tmf_active = false; |
185 | wake_up_all_locked(&cfg->tmf_waitq); | |
018d1dc9 | 186 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
187 | } |
188 | } else | |
189 | complete(&cmd->cevent); | |
190 | } | |
191 | ||
15305514 | 192 | /** |
9c7d1ee5 | 193 | * context_reset() - reset command owner context via specified register |
15305514 | 194 | * @cmd: AFU command that timed out. |
9c7d1ee5 | 195 | * @reset_reg: MMIO register to perform reset. |
15305514 | 196 | */ |
9c7d1ee5 | 197 | static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) |
15305514 MO |
198 | { |
199 | int nretry = 0; | |
200 | u64 rrin = 0x1; | |
15305514 | 201 | struct afu *afu = cmd->parent; |
3d2f617d UK |
202 | struct cxlflash_cfg *cfg = afu->parent; |
203 | struct device *dev = &cfg->dev->dev; | |
15305514 | 204 | |
fb67d44d | 205 | dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd); |
15305514 | 206 | |
9c7d1ee5 | 207 | writeq_be(rrin, reset_reg); |
15305514 | 208 | do { |
9c7d1ee5 | 209 | rrin = readq_be(reset_reg); |
15305514 MO |
210 | if (rrin != 0x1) |
211 | break; | |
212 | /* Double delay each time */ | |
ea765431 | 213 | udelay(1 << nretry); |
15305514 | 214 | } while (nretry++ < MC_ROOM_RETRY_CNT); |
3d2f617d | 215 | |
fb67d44d | 216 | dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n", |
3d2f617d | 217 | __func__, rrin, nretry); |
15305514 MO |
218 | } |
219 | ||
9c7d1ee5 MO |
220 | /** |
221 | * context_reset_ioarrin() - reset command owner context via IOARRIN register | |
222 | * @cmd: AFU command that timed out. | |
223 | */ | |
224 | static void context_reset_ioarrin(struct afu_cmd *cmd) | |
225 | { | |
226 | struct afu *afu = cmd->parent; | |
227 | ||
228 | context_reset(cmd, &afu->host_map->ioarrin); | |
229 | } | |
230 | ||
696d0b0c MO |
231 | /** |
232 | * context_reset_sq() - reset command owner context w/ SQ Context Reset register | |
233 | * @cmd: AFU command that timed out. | |
234 | */ | |
235 | static void context_reset_sq(struct afu_cmd *cmd) | |
236 | { | |
237 | struct afu *afu = cmd->parent; | |
238 | ||
239 | context_reset(cmd, &afu->host_map->sq_ctx_reset); | |
240 | } | |
241 | ||
15305514 | 242 | /** |
48b4be36 | 243 | * send_cmd_ioarrin() - sends an AFU command via IOARRIN register |
15305514 MO |
244 | * @afu: AFU associated with the host. |
245 | * @cmd: AFU command to send. | |
246 | * | |
247 | * Return: | |
1284fb0c | 248 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
15305514 | 249 | */ |
48b4be36 | 250 | static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) |
15305514 MO |
251 | { |
252 | struct cxlflash_cfg *cfg = afu->parent; | |
253 | struct device *dev = &cfg->dev->dev; | |
15305514 | 254 | int rc = 0; |
11f7b184 UK |
255 | s64 room; |
256 | ulong lock_flags; | |
15305514 MO |
257 | |
258 | /* | |
11f7b184 UK |
259 | * To avoid the performance penalty of MMIO, spread the update of |
260 | * 'room' over multiple commands. | |
15305514 | 261 | */ |
11f7b184 UK |
262 | spin_lock_irqsave(&afu->rrin_slock, lock_flags); |
263 | if (--afu->room < 0) { | |
264 | room = readq_be(&afu->host_map->cmd_room); | |
265 | if (room <= 0) { | |
266 | dev_dbg_ratelimited(dev, "%s: no cmd_room to send " | |
267 | "0x%02X, room=0x%016llX\n", | |
268 | __func__, cmd->rcb.cdb[0], room); | |
269 | afu->room = 0; | |
270 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
271 | goto out; | |
15305514 | 272 | } |
11f7b184 | 273 | afu->room = room - 1; |
15305514 MO |
274 | } |
275 | ||
15305514 MO |
276 | writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); |
277 | out: | |
11f7b184 | 278 | spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); |
fb67d44d MO |
279 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, |
280 | cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); | |
15305514 | 281 | return rc; |
15305514 MO |
282 | } |
283 | ||
696d0b0c MO |
284 | /** |
285 | * send_cmd_sq() - sends an AFU command via SQ ring | |
286 | * @afu: AFU associated with the host. | |
287 | * @cmd: AFU command to send. | |
288 | * | |
289 | * Return: | |
290 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure | |
291 | */ | |
292 | static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) | |
293 | { | |
294 | struct cxlflash_cfg *cfg = afu->parent; | |
295 | struct device *dev = &cfg->dev->dev; | |
296 | int rc = 0; | |
297 | int newval; | |
298 | ulong lock_flags; | |
299 | ||
300 | newval = atomic_dec_if_positive(&afu->hsq_credits); | |
301 | if (newval <= 0) { | |
302 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
303 | goto out; | |
304 | } | |
305 | ||
306 | cmd->rcb.ioasa = &cmd->sa; | |
307 | ||
308 | spin_lock_irqsave(&afu->hsq_slock, lock_flags); | |
309 | ||
310 | *afu->hsq_curr = cmd->rcb; | |
311 | if (afu->hsq_curr < afu->hsq_end) | |
312 | afu->hsq_curr++; | |
313 | else | |
314 | afu->hsq_curr = afu->hsq_start; | |
315 | writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail); | |
316 | ||
317 | spin_unlock_irqrestore(&afu->hsq_slock, lock_flags); | |
318 | out: | |
fb67d44d MO |
319 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " |
320 | "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, | |
321 | cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr, | |
696d0b0c MO |
322 | readq_be(&afu->host_map->sq_head), |
323 | readq_be(&afu->host_map->sq_tail)); | |
324 | return rc; | |
325 | } | |
326 | ||
15305514 MO |
327 | /** |
328 | * wait_resp() - polls for a response or timeout to a sent AFU command | |
329 | * @afu: AFU associated with the host. | |
330 | * @cmd: AFU command that was sent. | |
9ba848ac MO |
331 | * |
332 | * Return: | |
333 | * 0 on success, -1 on timeout/error | |
15305514 | 334 | */ |
9ba848ac | 335 | static int wait_resp(struct afu *afu, struct afu_cmd *cmd) |
15305514 | 336 | { |
fb67d44d MO |
337 | struct cxlflash_cfg *cfg = afu->parent; |
338 | struct device *dev = &cfg->dev->dev; | |
9ba848ac | 339 | int rc = 0; |
15305514 MO |
340 | ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); |
341 | ||
342 | timeout = wait_for_completion_timeout(&cmd->cevent, timeout); | |
9ba848ac | 343 | if (!timeout) { |
48b4be36 | 344 | afu->context_reset(cmd); |
9ba848ac MO |
345 | rc = -1; |
346 | } | |
15305514 | 347 | |
9ba848ac | 348 | if (unlikely(cmd->sa.ioasc != 0)) { |
fb67d44d MO |
349 | dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", |
350 | __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); | |
9ba848ac MO |
351 | rc = -1; |
352 | } | |
353 | ||
354 | return rc; | |
15305514 MO |
355 | } |
356 | ||
c21e0bbf MO |
357 | /** |
358 | * send_tmf() - sends a Task Management Function (TMF) | |
359 | * @afu: AFU to checkout from. | |
360 | * @scp: SCSI command from stack. | |
361 | * @tmfcmd: TMF command to send. | |
362 | * | |
363 | * Return: | |
1284fb0c | 364 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
c21e0bbf MO |
365 | */ |
366 | static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) | |
367 | { | |
c21e0bbf | 368 | u32 port_sel = scp->device->channel + 1; |
fb67d44d | 369 | struct cxlflash_cfg *cfg = shost_priv(scp->device->host); |
d4ace351 | 370 | struct afu_cmd *cmd = sc_to_afucz(scp); |
4392ba49 | 371 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
372 | ulong lock_flags; |
373 | int rc = 0; | |
018d1dc9 | 374 | ulong to; |
c21e0bbf | 375 | |
018d1dc9 MO |
376 | /* When Task Management Function is active do not send another */ |
377 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); | |
c21e0bbf | 378 | if (cfg->tmf_active) |
018d1dc9 MO |
379 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
380 | !cfg->tmf_active, | |
381 | cfg->tmf_slock); | |
c21e0bbf | 382 | cfg->tmf_active = true; |
018d1dc9 | 383 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 384 | |
fe7f9698 | 385 | cmd->scp = scp; |
d4ace351 MO |
386 | cmd->parent = afu; |
387 | cmd->cmd_tmf = true; | |
388 | ||
c21e0bbf | 389 | cmd->rcb.ctx_id = afu->ctx_hndl; |
5fbb96c8 | 390 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
c21e0bbf MO |
391 | cmd->rcb.port_sel = port_sel; |
392 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); | |
c21e0bbf | 393 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | |
d4ace351 MO |
394 | SISL_REQ_FLAGS_SUP_UNDERRUN | |
395 | SISL_REQ_FLAGS_TMF_CMD); | |
c21e0bbf MO |
396 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); |
397 | ||
48b4be36 | 398 | rc = afu->send_cmd(afu, cmd); |
c21e0bbf | 399 | if (unlikely(rc)) { |
018d1dc9 | 400 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 401 | cfg->tmf_active = false; |
018d1dc9 | 402 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
403 | goto out; |
404 | } | |
405 | ||
018d1dc9 MO |
406 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
407 | to = msecs_to_jiffies(5000); | |
408 | to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, | |
409 | !cfg->tmf_active, | |
410 | cfg->tmf_slock, | |
411 | to); | |
412 | if (!to) { | |
413 | cfg->tmf_active = false; | |
fb67d44d | 414 | dev_err(dev, "%s: TMF timed out\n", __func__); |
018d1dc9 MO |
415 | rc = -1; |
416 | } | |
417 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); | |
c21e0bbf MO |
418 | out: |
419 | return rc; | |
420 | } | |
421 | ||
422 | /** | |
423 | * cxlflash_driver_info() - information handler for this host driver | |
424 | * @host: SCSI host associated with device. | |
425 | * | |
426 | * Return: A string describing the device. | |
427 | */ | |
428 | static const char *cxlflash_driver_info(struct Scsi_Host *host) | |
429 | { | |
430 | return CXLFLASH_ADAPTER_NAME; | |
431 | } | |
432 | ||
433 | /** | |
434 | * cxlflash_queuecommand() - sends a mid-layer request | |
435 | * @host: SCSI host associated with device. | |
436 | * @scp: SCSI command to send. | |
437 | * | |
1284fb0c | 438 | * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
c21e0bbf MO |
439 | */ |
440 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) | |
441 | { | |
fb67d44d | 442 | struct cxlflash_cfg *cfg = shost_priv(host); |
c21e0bbf | 443 | struct afu *afu = cfg->afu; |
4392ba49 | 444 | struct device *dev = &cfg->dev->dev; |
5fbb96c8 | 445 | struct afu_cmd *cmd = sc_to_afucz(scp); |
9d89326c | 446 | struct scatterlist *sg = scsi_sglist(scp); |
c21e0bbf | 447 | u32 port_sel = scp->device->channel + 1; |
9d89326c | 448 | u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; |
c21e0bbf | 449 | ulong lock_flags; |
9d89326c | 450 | int nseg = 0; |
c21e0bbf MO |
451 | int rc = 0; |
452 | ||
4392ba49 | 453 | dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
fb67d44d | 454 | "cdb=(%08x-%08x-%08x-%08x)\n", |
4392ba49 MO |
455 | __func__, scp, host->host_no, scp->device->channel, |
456 | scp->device->id, scp->device->lun, | |
457 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
458 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
459 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
460 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
c21e0bbf | 461 | |
018d1dc9 MO |
462 | /* |
463 | * If a Task Management Function is active, wait for it to complete | |
c21e0bbf MO |
464 | * before continuing with regular commands. |
465 | */ | |
018d1dc9 | 466 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 467 | if (cfg->tmf_active) { |
018d1dc9 | 468 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf MO |
469 | rc = SCSI_MLQUEUE_HOST_BUSY; |
470 | goto out; | |
471 | } | |
018d1dc9 | 472 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 473 | |
5cdac81a | 474 | switch (cfg->state) { |
439e85c1 | 475 | case STATE_RESET: |
fb67d44d | 476 | dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); |
5cdac81a MO |
477 | rc = SCSI_MLQUEUE_HOST_BUSY; |
478 | goto out; | |
479 | case STATE_FAILTERM: | |
fb67d44d | 480 | dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); |
5cdac81a MO |
481 | scp->result = (DID_NO_CONNECT << 16); |
482 | scp->scsi_done(scp); | |
483 | rc = 0; | |
484 | goto out; | |
485 | default: | |
486 | break; | |
487 | } | |
488 | ||
9d89326c MO |
489 | if (likely(sg)) { |
490 | nseg = scsi_dma_map(scp); | |
491 | if (unlikely(nseg < 0)) { | |
fb67d44d | 492 | dev_err(dev, "%s: Fail DMA map\n", __func__); |
9d89326c MO |
493 | rc = SCSI_MLQUEUE_HOST_BUSY; |
494 | goto out; | |
495 | } | |
c21e0bbf | 496 | |
9d89326c MO |
497 | cmd->rcb.data_len = sg_dma_len(sg); |
498 | cmd->rcb.data_ea = sg_dma_address(sg); | |
499 | } | |
c21e0bbf | 500 | |
fe7f9698 | 501 | cmd->scp = scp; |
5fbb96c8 | 502 | cmd->parent = afu; |
c21e0bbf | 503 | |
9d89326c MO |
504 | cmd->rcb.ctx_id = afu->ctx_hndl; |
505 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; | |
506 | cmd->rcb.port_sel = port_sel; | |
507 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); | |
c21e0bbf | 508 | |
9d89326c MO |
509 | if (scp->sc_data_direction == DMA_TO_DEVICE) |
510 | req_flags |= SISL_REQ_FLAGS_HOST_WRITE; | |
c21e0bbf | 511 | |
9d89326c | 512 | cmd->rcb.req_flags = req_flags; |
c21e0bbf MO |
513 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); |
514 | ||
48b4be36 | 515 | rc = afu->send_cmd(afu, cmd); |
5fbb96c8 | 516 | if (unlikely(rc)) |
c21e0bbf | 517 | scsi_dma_unmap(scp); |
c21e0bbf MO |
518 | out: |
519 | return rc; | |
520 | } | |
521 | ||
522 | /** | |
15305514 | 523 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe |
1284fb0c | 524 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 525 | */ |
15305514 | 526 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) |
c21e0bbf | 527 | { |
15305514 | 528 | struct pci_dev *pdev = cfg->dev; |
c21e0bbf | 529 | |
15305514 MO |
530 | if (pci_channel_offline(pdev)) |
531 | wait_event_timeout(cfg->reset_waitq, | |
532 | !pci_channel_offline(pdev), | |
533 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); | |
c21e0bbf MO |
534 | } |
535 | ||
536 | /** | |
15305514 | 537 | * free_mem() - free memory associated with the AFU |
1284fb0c | 538 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 539 | */ |
15305514 | 540 | static void free_mem(struct cxlflash_cfg *cfg) |
c21e0bbf | 541 | { |
15305514 | 542 | struct afu *afu = cfg->afu; |
c21e0bbf | 543 | |
15305514 | 544 | if (cfg->afu) { |
15305514 MO |
545 | free_pages((ulong)afu, get_order(sizeof(struct afu))); |
546 | cfg->afu = NULL; | |
5cdac81a | 547 | } |
c21e0bbf MO |
548 | } |
549 | ||
550 | /** | |
15305514 | 551 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space |
1284fb0c | 552 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 553 | * |
15305514 | 554 | * Safe to call with AFU in a partially allocated/initialized state. |
ee91e332 | 555 | * |
0df5bef7 | 556 | * Cancels scheduled worker threads, waits for any active internal AFU |
cba06e6d | 557 | * commands to timeout, disables IRQ polling and then unmaps the MMIO space. |
c21e0bbf | 558 | */ |
15305514 | 559 | static void stop_afu(struct cxlflash_cfg *cfg) |
c21e0bbf | 560 | { |
15305514 | 561 | struct afu *afu = cfg->afu; |
c21e0bbf | 562 | |
0df5bef7 UK |
563 | cancel_work_sync(&cfg->work_q); |
564 | ||
15305514 | 565 | if (likely(afu)) { |
de01283b MO |
566 | while (atomic_read(&afu->cmds_active)) |
567 | ssleep(1); | |
cba06e6d MO |
568 | if (afu_is_irqpoll_enabled(afu)) |
569 | irq_poll_disable(&afu->irqpoll); | |
c21e0bbf | 570 | if (likely(afu->afu_map)) { |
1786f4a0 | 571 | cxl_psa_unmap((void __iomem *)afu->afu_map); |
c21e0bbf MO |
572 | afu->afu_map = NULL; |
573 | } | |
574 | } | |
575 | } | |
576 | ||
577 | /** | |
9526f360 | 578 | * term_intr() - disables all AFU interrupts |
1284fb0c | 579 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
580 | * @level: Depth of allocation, where to begin waterfall tear down. |
581 | * | |
582 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
583 | */ | |
9526f360 | 584 | static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) |
c21e0bbf | 585 | { |
c21e0bbf | 586 | struct afu *afu = cfg->afu; |
4392ba49 | 587 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
588 | |
589 | if (!afu || !cfg->mcctx) { | |
9526f360 | 590 | dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); |
c21e0bbf MO |
591 | return; |
592 | } | |
593 | ||
594 | switch (level) { | |
c21e0bbf MO |
595 | case UNMAP_THREE: |
596 | cxl_unmap_afu_irq(cfg->mcctx, 3, afu); | |
597 | case UNMAP_TWO: | |
598 | cxl_unmap_afu_irq(cfg->mcctx, 2, afu); | |
599 | case UNMAP_ONE: | |
600 | cxl_unmap_afu_irq(cfg->mcctx, 1, afu); | |
601 | case FREE_IRQ: | |
602 | cxl_free_afu_irqs(cfg->mcctx); | |
9526f360 MK |
603 | /* fall through */ |
604 | case UNDO_NOOP: | |
605 | /* No action required */ | |
606 | break; | |
607 | } | |
608 | } | |
609 | ||
610 | /** | |
611 | * term_mc() - terminates the master context | |
612 | * @cfg: Internal structure associated with the host. | |
613 | * @level: Depth of allocation, where to begin waterfall tear down. | |
614 | * | |
615 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
616 | */ | |
617 | static void term_mc(struct cxlflash_cfg *cfg) | |
618 | { | |
619 | int rc = 0; | |
620 | struct afu *afu = cfg->afu; | |
621 | struct device *dev = &cfg->dev->dev; | |
622 | ||
623 | if (!afu || !cfg->mcctx) { | |
624 | dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); | |
625 | return; | |
c21e0bbf | 626 | } |
9526f360 MK |
627 | |
628 | rc = cxl_stop_context(cfg->mcctx); | |
629 | WARN_ON(rc); | |
630 | cfg->mcctx = NULL; | |
c21e0bbf MO |
631 | } |
632 | ||
633 | /** | |
634 | * term_afu() - terminates the AFU | |
1284fb0c | 635 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
636 | * |
637 | * Safe to call with AFU/MC in partially allocated/initialized state. | |
638 | */ | |
639 | static void term_afu(struct cxlflash_cfg *cfg) | |
640 | { | |
fb67d44d MO |
641 | struct device *dev = &cfg->dev->dev; |
642 | ||
9526f360 MK |
643 | /* |
644 | * Tear down is carefully orchestrated to ensure | |
645 | * no interrupts can come in when the problem state | |
646 | * area is unmapped. | |
647 | * | |
648 | * 1) Disable all AFU interrupts | |
649 | * 2) Unmap the problem state area | |
650 | * 3) Stop the master context | |
651 | */ | |
652 | term_intr(cfg, UNMAP_THREE); | |
c21e0bbf MO |
653 | if (cfg->afu) |
654 | stop_afu(cfg); | |
655 | ||
9526f360 | 656 | term_mc(cfg); |
6ded8b3c | 657 | |
fb67d44d | 658 | dev_dbg(dev, "%s: returning\n", __func__); |
c21e0bbf MO |
659 | } |
660 | ||
704c4b0d UK |
661 | /** |
662 | * notify_shutdown() - notifies device of pending shutdown | |
663 | * @cfg: Internal structure associated with the host. | |
664 | * @wait: Whether to wait for shutdown processing to complete. | |
665 | * | |
666 | * This function will notify the AFU that the adapter is being shutdown | |
667 | * and will wait for shutdown processing to complete if wait is true. | |
668 | * This notification should flush pending I/Os to the device and halt | |
669 | * further I/Os until the next AFU reset is issued and device restarted. | |
670 | */ | |
671 | static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) | |
672 | { | |
673 | struct afu *afu = cfg->afu; | |
674 | struct device *dev = &cfg->dev->dev; | |
1bd2b282 | 675 | struct sisl_global_map __iomem *global; |
704c4b0d UK |
676 | struct dev_dependent_vals *ddv; |
677 | u64 reg, status; | |
678 | int i, retry_cnt = 0; | |
679 | ||
680 | ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; | |
681 | if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) | |
682 | return; | |
683 | ||
1bd2b282 | 684 | if (!afu || !afu->afu_map) { |
fb67d44d | 685 | dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); |
1bd2b282 UK |
686 | return; |
687 | } | |
688 | ||
689 | global = &afu->afu_map->global; | |
690 | ||
704c4b0d | 691 | /* Notify AFU */ |
78ae028e | 692 | for (i = 0; i < cfg->num_fc_ports; i++) { |
704c4b0d UK |
693 | reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]); |
694 | reg |= SISL_FC_SHUTDOWN_NORMAL; | |
695 | writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]); | |
696 | } | |
697 | ||
698 | if (!wait) | |
699 | return; | |
700 | ||
701 | /* Wait up to 1.5 seconds for shutdown processing to complete */ | |
78ae028e | 702 | for (i = 0; i < cfg->num_fc_ports; i++) { |
704c4b0d UK |
703 | retry_cnt = 0; |
704 | while (true) { | |
705 | status = readq_be(&global->fc_regs[i][FC_STATUS / 8]); | |
706 | if (status & SISL_STATUS_SHUTDOWN_COMPLETE) | |
707 | break; | |
708 | if (++retry_cnt >= MC_RETRY_CNT) { | |
709 | dev_dbg(dev, "%s: port %d shutdown processing " | |
710 | "not yet completed\n", __func__, i); | |
711 | break; | |
712 | } | |
713 | msleep(100 * retry_cnt); | |
714 | } | |
715 | } | |
716 | } | |
717 | ||
c21e0bbf MO |
718 | /** |
719 | * cxlflash_remove() - PCI entry point to tear down host | |
720 | * @pdev: PCI device associated with the host. | |
721 | * | |
722 | * Safe to use as a cleanup in partially allocated/initialized state. | |
723 | */ | |
724 | static void cxlflash_remove(struct pci_dev *pdev) | |
725 | { | |
726 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
fb67d44d | 727 | struct device *dev = &pdev->dev; |
c21e0bbf MO |
728 | ulong lock_flags; |
729 | ||
babf985d | 730 | if (!pci_is_enabled(pdev)) { |
fb67d44d | 731 | dev_dbg(dev, "%s: Device is disabled\n", __func__); |
babf985d UK |
732 | return; |
733 | } | |
734 | ||
c21e0bbf MO |
735 | /* If a Task Management Function is active, wait for it to complete |
736 | * before continuing with remove. | |
737 | */ | |
018d1dc9 | 738 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
c21e0bbf | 739 | if (cfg->tmf_active) |
018d1dc9 MO |
740 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
741 | !cfg->tmf_active, | |
742 | cfg->tmf_slock); | |
743 | spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); | |
c21e0bbf | 744 | |
704c4b0d UK |
745 | /* Notify AFU and wait for shutdown processing to complete */ |
746 | notify_shutdown(cfg, true); | |
747 | ||
5cdac81a | 748 | cfg->state = STATE_FAILTERM; |
65be2c79 | 749 | cxlflash_stop_term_user_contexts(cfg); |
5cdac81a | 750 | |
c21e0bbf MO |
751 | switch (cfg->init_state) { |
752 | case INIT_STATE_SCSI: | |
65be2c79 | 753 | cxlflash_term_local_luns(cfg); |
c21e0bbf | 754 | scsi_remove_host(cfg->host); |
f15fbf8d | 755 | /* fall through */ |
c21e0bbf | 756 | case INIT_STATE_AFU: |
b45cdbaf | 757 | term_afu(cfg); |
c21e0bbf | 758 | case INIT_STATE_PCI: |
c21e0bbf MO |
759 | pci_disable_device(pdev); |
760 | case INIT_STATE_NONE: | |
c21e0bbf | 761 | free_mem(cfg); |
8b5b1e87 | 762 | scsi_host_put(cfg->host); |
c21e0bbf MO |
763 | break; |
764 | } | |
765 | ||
fb67d44d | 766 | dev_dbg(dev, "%s: returning\n", __func__); |
c21e0bbf MO |
767 | } |
768 | ||
769 | /** | |
770 | * alloc_mem() - allocates the AFU and its command pool | |
1284fb0c | 771 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
772 | * |
773 | * A partially allocated state remains on failure. | |
774 | * | |
775 | * Return: | |
776 | * 0 on success | |
777 | * -ENOMEM on failure to allocate memory | |
778 | */ | |
779 | static int alloc_mem(struct cxlflash_cfg *cfg) | |
780 | { | |
781 | int rc = 0; | |
4392ba49 | 782 | struct device *dev = &cfg->dev->dev; |
c21e0bbf | 783 | |
696d0b0c | 784 | /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ |
c21e0bbf MO |
785 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
786 | get_order(sizeof(struct afu))); | |
787 | if (unlikely(!cfg->afu)) { | |
4392ba49 MO |
788 | dev_err(dev, "%s: cannot get %d free pages\n", |
789 | __func__, get_order(sizeof(struct afu))); | |
c21e0bbf MO |
790 | rc = -ENOMEM; |
791 | goto out; | |
792 | } | |
793 | cfg->afu->parent = cfg; | |
794 | cfg->afu->afu_map = NULL; | |
c21e0bbf MO |
795 | out: |
796 | return rc; | |
797 | } | |
798 | ||
799 | /** | |
800 | * init_pci() - initializes the host as a PCI device | |
1284fb0c | 801 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 802 | * |
1284fb0c | 803 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
804 | */ |
805 | static int init_pci(struct cxlflash_cfg *cfg) | |
806 | { | |
807 | struct pci_dev *pdev = cfg->dev; | |
fb67d44d | 808 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
809 | int rc = 0; |
810 | ||
c21e0bbf MO |
811 | rc = pci_enable_device(pdev); |
812 | if (rc || pci_channel_offline(pdev)) { | |
813 | if (pci_channel_offline(pdev)) { | |
814 | cxlflash_wait_for_pci_err_recovery(cfg); | |
815 | rc = pci_enable_device(pdev); | |
816 | } | |
817 | ||
818 | if (rc) { | |
fb67d44d | 819 | dev_err(dev, "%s: Cannot enable adapter\n", __func__); |
c21e0bbf | 820 | cxlflash_wait_for_pci_err_recovery(cfg); |
961487e4 | 821 | goto out; |
c21e0bbf MO |
822 | } |
823 | } | |
824 | ||
c21e0bbf | 825 | out: |
fb67d44d | 826 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf | 827 | return rc; |
c21e0bbf MO |
828 | } |
829 | ||
830 | /** | |
831 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan | |
1284fb0c | 832 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 833 | * |
1284fb0c | 834 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
835 | */ |
836 | static int init_scsi(struct cxlflash_cfg *cfg) | |
837 | { | |
838 | struct pci_dev *pdev = cfg->dev; | |
fb67d44d | 839 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
840 | int rc = 0; |
841 | ||
842 | rc = scsi_add_host(cfg->host, &pdev->dev); | |
843 | if (rc) { | |
fb67d44d | 844 | dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
845 | goto out; |
846 | } | |
847 | ||
848 | scsi_scan_host(cfg->host); | |
849 | ||
850 | out: | |
fb67d44d | 851 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
852 | return rc; |
853 | } | |
854 | ||
855 | /** | |
856 | * set_port_online() - transitions the specified host FC port to online state | |
857 | * @fc_regs: Top of MMIO region defined for specified port. | |
858 | * | |
859 | * The provided MMIO region must be mapped prior to call. Online state means | |
860 | * that the FC link layer has synced, completed the handshaking process, and | |
861 | * is ready for login to start. | |
862 | */ | |
1786f4a0 | 863 | static void set_port_online(__be64 __iomem *fc_regs) |
c21e0bbf MO |
864 | { |
865 | u64 cmdcfg; | |
866 | ||
867 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
868 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ | |
869 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ | |
870 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
871 | } | |
872 | ||
873 | /** | |
874 | * set_port_offline() - transitions the specified host FC port to offline state | |
875 | * @fc_regs: Top of MMIO region defined for specified port. | |
876 | * | |
877 | * The provided MMIO region must be mapped prior to call. | |
878 | */ | |
1786f4a0 | 879 | static void set_port_offline(__be64 __iomem *fc_regs) |
c21e0bbf MO |
880 | { |
881 | u64 cmdcfg; | |
882 | ||
883 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
884 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ | |
885 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ | |
886 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | |
887 | } | |
888 | ||
889 | /** | |
890 | * wait_port_online() - waits for the specified host FC port come online | |
891 | * @fc_regs: Top of MMIO region defined for specified port. | |
892 | * @delay_us: Number of microseconds to delay between reading port status. | |
893 | * @nretry: Number of cycles to retry reading port status. | |
894 | * | |
895 | * The provided MMIO region must be mapped prior to call. This will timeout | |
896 | * when the cable is not plugged in. | |
897 | * | |
898 | * Return: | |
899 | * TRUE (1) when the specified port is online | |
900 | * FALSE (0) when the specified port fails to come online after timeout | |
c21e0bbf | 901 | */ |
fb67d44d | 902 | static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
c21e0bbf MO |
903 | { |
904 | u64 status; | |
905 | ||
fb67d44d | 906 | WARN_ON(delay_us < 1000); |
c21e0bbf MO |
907 | |
908 | do { | |
909 | msleep(delay_us / 1000); | |
910 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | |
05dab432 MO |
911 | if (status == U64_MAX) |
912 | nretry /= 2; | |
c21e0bbf MO |
913 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && |
914 | nretry--); | |
915 | ||
916 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); | |
917 | } | |
918 | ||
919 | /** | |
920 | * wait_port_offline() - waits for the specified host FC port go offline | |
921 | * @fc_regs: Top of MMIO region defined for specified port. | |
922 | * @delay_us: Number of microseconds to delay between reading port status. | |
923 | * @nretry: Number of cycles to retry reading port status. | |
924 | * | |
925 | * The provided MMIO region must be mapped prior to call. | |
926 | * | |
927 | * Return: | |
928 | * TRUE (1) when the specified port is offline | |
929 | * FALSE (0) when the specified port fails to go offline after timeout | |
c21e0bbf | 930 | */ |
fb67d44d | 931 | static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
c21e0bbf MO |
932 | { |
933 | u64 status; | |
934 | ||
fb67d44d | 935 | WARN_ON(delay_us < 1000); |
c21e0bbf MO |
936 | |
937 | do { | |
938 | msleep(delay_us / 1000); | |
939 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | |
05dab432 MO |
940 | if (status == U64_MAX) |
941 | nretry /= 2; | |
c21e0bbf MO |
942 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && |
943 | nretry--); | |
944 | ||
945 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); | |
946 | } | |
947 | ||
948 | /** | |
949 | * afu_set_wwpn() - configures the WWPN for the specified host FC port | |
950 | * @afu: AFU associated with the host that owns the specified FC port. | |
951 | * @port: Port number being configured. | |
952 | * @fc_regs: Top of MMIO region defined for specified port. | |
953 | * @wwpn: The world-wide-port-number previously discovered for port. | |
954 | * | |
955 | * The provided MMIO region must be mapped prior to call. As part of the | |
956 | * sequence to configure the WWPN, the port is toggled offline and then back | |
957 | * online. This toggling action can cause this routine to delay up to a few | |
958 | * seconds. When configured to use the internal LUN feature of the AFU, a | |
959 | * failure to come online is overridden. | |
c21e0bbf | 960 | */ |
f8013261 MO |
961 | static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, |
962 | u64 wwpn) | |
c21e0bbf | 963 | { |
fb67d44d MO |
964 | struct cxlflash_cfg *cfg = afu->parent; |
965 | struct device *dev = &cfg->dev->dev; | |
966 | ||
c21e0bbf | 967 | set_port_offline(fc_regs); |
c21e0bbf MO |
968 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
969 | FC_PORT_STATUS_RETRY_CNT)) { | |
fb67d44d MO |
970 | dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", |
971 | __func__, port); | |
c21e0bbf MO |
972 | } |
973 | ||
f8013261 | 974 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); |
964497b3 | 975 | |
c21e0bbf | 976 | set_port_online(fc_regs); |
c21e0bbf MO |
977 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
978 | FC_PORT_STATUS_RETRY_CNT)) { | |
fb67d44d MO |
979 | dev_dbg(dev, "%s: wait on port %d to go online timed out\n", |
980 | __func__, port); | |
c21e0bbf | 981 | } |
c21e0bbf MO |
982 | } |
983 | ||
984 | /** | |
985 | * afu_link_reset() - resets the specified host FC port | |
986 | * @afu: AFU associated with the host that owns the specified FC port. | |
987 | * @port: Port number being configured. | |
988 | * @fc_regs: Top of MMIO region defined for specified port. | |
989 | * | |
990 | * The provided MMIO region must be mapped prior to call. The sequence to | |
991 | * reset the port involves toggling it offline and then back online. This | |
992 | * action can cause this routine to delay up to a few seconds. An effort | |
993 | * is made to maintain link with the device by switching to host to use | |
994 | * the alternate port exclusively while the reset takes place. | |
995 | * failure to come online is overridden. | |
996 | */ | |
1786f4a0 | 997 | static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) |
c21e0bbf | 998 | { |
fb67d44d MO |
999 | struct cxlflash_cfg *cfg = afu->parent; |
1000 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
1001 | u64 port_sel; |
1002 | ||
1003 | /* first switch the AFU to the other links, if any */ | |
1004 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); | |
4da74db0 | 1005 | port_sel &= ~(1ULL << port); |
c21e0bbf MO |
1006 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1007 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | |
1008 | ||
1009 | set_port_offline(fc_regs); | |
1010 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1011 | FC_PORT_STATUS_RETRY_CNT)) | |
fb67d44d MO |
1012 | dev_err(dev, "%s: wait on port %d to go offline timed out\n", |
1013 | __func__, port); | |
c21e0bbf MO |
1014 | |
1015 | set_port_online(fc_regs); | |
1016 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | |
1017 | FC_PORT_STATUS_RETRY_CNT)) | |
fb67d44d MO |
1018 | dev_err(dev, "%s: wait on port %d to go online timed out\n", |
1019 | __func__, port); | |
c21e0bbf MO |
1020 | |
1021 | /* switch back to include this port */ | |
4da74db0 | 1022 | port_sel |= (1ULL << port); |
c21e0bbf MO |
1023 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1024 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | |
1025 | ||
fb67d44d | 1026 | dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); |
c21e0bbf MO |
1027 | } |
1028 | ||
1029 | /* | |
1030 | * Asynchronous interrupt information table | |
1031 | */ | |
1032 | static const struct asyc_intr_info ainfo[] = { | |
1033 | {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, | |
1034 | {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, | |
1035 | {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, | |
e6e6df3f | 1036 | {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, |
c21e0bbf | 1037 | {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, |
ef51074a | 1038 | {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, |
c21e0bbf | 1039 | {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, |
bbbfae96 | 1040 | {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0}, |
c21e0bbf MO |
1041 | {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, |
1042 | {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, | |
1043 | {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, | |
a9be294e | 1044 | {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, |
c21e0bbf | 1045 | {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, |
ef51074a | 1046 | {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, |
c21e0bbf | 1047 | {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, |
bbbfae96 | 1048 | {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0}, |
c21e0bbf MO |
1049 | {0x0, "", 0, 0} /* terminator */ |
1050 | }; | |
1051 | ||
1052 | /** | |
1053 | * find_ainfo() - locates and returns asynchronous interrupt information | |
1054 | * @status: Status code set by AFU on error. | |
1055 | * | |
1056 | * Return: The located information or NULL when the status code is invalid. | |
1057 | */ | |
1058 | static const struct asyc_intr_info *find_ainfo(u64 status) | |
1059 | { | |
1060 | const struct asyc_intr_info *info; | |
1061 | ||
1062 | for (info = &ainfo[0]; info->status; info++) | |
1063 | if (info->status == status) | |
1064 | return info; | |
1065 | ||
1066 | return NULL; | |
1067 | } | |
1068 | ||
1069 | /** | |
1070 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts | |
1071 | * @afu: AFU associated with the host. | |
1072 | */ | |
1073 | static void afu_err_intr_init(struct afu *afu) | |
1074 | { | |
78ae028e | 1075 | struct cxlflash_cfg *cfg = afu->parent; |
c21e0bbf MO |
1076 | int i; |
1077 | u64 reg; | |
1078 | ||
1079 | /* global async interrupts: AFU clears afu_ctrl on context exit | |
1080 | * if async interrupts were sent to that context. This prevents | |
1081 | * the AFU form sending further async interrupts when | |
1082 | * there is | |
1083 | * nobody to receive them. | |
1084 | */ | |
1085 | ||
1086 | /* mask all */ | |
1087 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); | |
1088 | /* set LISN# to send and point to master context */ | |
1089 | reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); | |
1090 | ||
1091 | if (afu->internal_lun) | |
1092 | reg |= 1; /* Bit 63 indicates local lun */ | |
1093 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); | |
1094 | /* clear all */ | |
1095 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | |
1096 | /* unmask bits that are of interest */ | |
1097 | /* note: afu can send an interrupt after this step */ | |
1098 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); | |
1099 | /* clear again in case a bit came on after previous clear but before */ | |
1100 | /* unmask */ | |
1101 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | |
1102 | ||
1103 | /* Clear/Set internal lun bits */ | |
1104 | reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); | |
1105 | reg &= SISL_FC_INTERNAL_MASK; | |
1106 | if (afu->internal_lun) | |
1107 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); | |
1108 | writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); | |
1109 | ||
1110 | /* now clear FC errors */ | |
78ae028e | 1111 | for (i = 0; i < cfg->num_fc_ports; i++) { |
c21e0bbf MO |
1112 | writeq_be(0xFFFFFFFFU, |
1113 | &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); | |
1114 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); | |
1115 | } | |
1116 | ||
1117 | /* sync interrupts for master's IOARRIN write */ | |
1118 | /* note that unlike asyncs, there can be no pending sync interrupts */ | |
1119 | /* at this time (this is a fresh context and master has not written */ | |
1120 | /* IOARRIN yet), so there is nothing to clear. */ | |
1121 | ||
1122 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ | |
1123 | writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); | |
1124 | writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); | |
1125 | } | |
1126 | ||
1127 | /** | |
1128 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors | |
1129 | * @irq: Interrupt number. | |
1130 | * @data: Private data provided at interrupt registration, the AFU. | |
1131 | * | |
1132 | * Return: Always return IRQ_HANDLED. | |
1133 | */ | |
1134 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) | |
1135 | { | |
1136 | struct afu *afu = (struct afu *)data; | |
fb67d44d MO |
1137 | struct cxlflash_cfg *cfg = afu->parent; |
1138 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
1139 | u64 reg; |
1140 | u64 reg_unmasked; | |
1141 | ||
1142 | reg = readq_be(&afu->host_map->intr_status); | |
1143 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); | |
1144 | ||
1145 | if (reg_unmasked == 0UL) { | |
fb67d44d MO |
1146 | dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", |
1147 | __func__, reg); | |
c21e0bbf MO |
1148 | goto cxlflash_sync_err_irq_exit; |
1149 | } | |
1150 | ||
fb67d44d MO |
1151 | dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", |
1152 | __func__, reg); | |
c21e0bbf MO |
1153 | |
1154 | writeq_be(reg_unmasked, &afu->host_map->intr_clear); | |
1155 | ||
1156 | cxlflash_sync_err_irq_exit: | |
c21e0bbf MO |
1157 | return IRQ_HANDLED; |
1158 | } | |
1159 | ||
1160 | /** | |
76a6ebbe MO |
1161 | * process_hrrq() - process the read-response queue |
1162 | * @afu: AFU associated with the host. | |
f918b4a8 | 1163 | * @doneq: Queue of commands harvested from the RRQ. |
cba06e6d | 1164 | * @budget: Threshold of RRQ entries to process. |
f918b4a8 MO |
1165 | * |
1166 | * This routine must be called holding the disabled RRQ spin lock. | |
c21e0bbf | 1167 | * |
76a6ebbe | 1168 | * Return: The number of entries processed. |
c21e0bbf | 1169 | */ |
cba06e6d | 1170 | static int process_hrrq(struct afu *afu, struct list_head *doneq, int budget) |
c21e0bbf | 1171 | { |
c21e0bbf | 1172 | struct afu_cmd *cmd; |
696d0b0c MO |
1173 | struct sisl_ioasa *ioasa; |
1174 | struct sisl_ioarcb *ioarcb; | |
c21e0bbf | 1175 | bool toggle = afu->toggle; |
76a6ebbe | 1176 | int num_hrrq = 0; |
c21e0bbf MO |
1177 | u64 entry, |
1178 | *hrrq_start = afu->hrrq_start, | |
1179 | *hrrq_end = afu->hrrq_end, | |
1180 | *hrrq_curr = afu->hrrq_curr; | |
1181 | ||
cba06e6d | 1182 | /* Process ready RRQ entries up to the specified budget (if any) */ |
c21e0bbf MO |
1183 | while (true) { |
1184 | entry = *hrrq_curr; | |
1185 | ||
1186 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) | |
1187 | break; | |
1188 | ||
696d0b0c MO |
1189 | entry &= ~SISL_RESP_HANDLE_T_BIT; |
1190 | ||
1191 | if (afu_is_sq_cmd_mode(afu)) { | |
1192 | ioasa = (struct sisl_ioasa *)entry; | |
1193 | cmd = container_of(ioasa, struct afu_cmd, sa); | |
1194 | } else { | |
1195 | ioarcb = (struct sisl_ioarcb *)entry; | |
1196 | cmd = container_of(ioarcb, struct afu_cmd, rcb); | |
1197 | } | |
1198 | ||
f918b4a8 | 1199 | list_add_tail(&cmd->queue, doneq); |
c21e0bbf MO |
1200 | |
1201 | /* Advance to next entry or wrap and flip the toggle bit */ | |
1202 | if (hrrq_curr < hrrq_end) | |
1203 | hrrq_curr++; | |
1204 | else { | |
1205 | hrrq_curr = hrrq_start; | |
1206 | toggle ^= SISL_RESP_HANDLE_T_BIT; | |
1207 | } | |
696d0b0c MO |
1208 | |
1209 | atomic_inc(&afu->hsq_credits); | |
76a6ebbe | 1210 | num_hrrq++; |
cba06e6d MO |
1211 | |
1212 | if (budget > 0 && num_hrrq >= budget) | |
1213 | break; | |
c21e0bbf MO |
1214 | } |
1215 | ||
1216 | afu->hrrq_curr = hrrq_curr; | |
1217 | afu->toggle = toggle; | |
1218 | ||
76a6ebbe MO |
1219 | return num_hrrq; |
1220 | } | |
1221 | ||
f918b4a8 MO |
1222 | /** |
1223 | * process_cmd_doneq() - process a queue of harvested RRQ commands | |
1224 | * @doneq: Queue of completed commands. | |
1225 | * | |
1226 | * Note that upon return the queue can no longer be trusted. | |
1227 | */ | |
1228 | static void process_cmd_doneq(struct list_head *doneq) | |
1229 | { | |
1230 | struct afu_cmd *cmd, *tmp; | |
1231 | ||
1232 | WARN_ON(list_empty(doneq)); | |
1233 | ||
1234 | list_for_each_entry_safe(cmd, tmp, doneq, queue) | |
1235 | cmd_complete(cmd); | |
1236 | } | |
1237 | ||
cba06e6d MO |
1238 | /** |
1239 | * cxlflash_irqpoll() - process a queue of harvested RRQ commands | |
1240 | * @irqpoll: IRQ poll structure associated with queue to poll. | |
1241 | * @budget: Threshold of RRQ entries to process per poll. | |
1242 | * | |
1243 | * Return: The number of entries processed. | |
1244 | */ | |
1245 | static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) | |
1246 | { | |
1247 | struct afu *afu = container_of(irqpoll, struct afu, irqpoll); | |
1248 | unsigned long hrrq_flags; | |
1249 | LIST_HEAD(doneq); | |
1250 | int num_entries = 0; | |
1251 | ||
1252 | spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags); | |
1253 | ||
1254 | num_entries = process_hrrq(afu, &doneq, budget); | |
1255 | if (num_entries < budget) | |
1256 | irq_poll_complete(irqpoll); | |
1257 | ||
1258 | spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags); | |
1259 | ||
1260 | process_cmd_doneq(&doneq); | |
1261 | return num_entries; | |
1262 | } | |
1263 | ||
76a6ebbe MO |
1264 | /** |
1265 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) | |
1266 | * @irq: Interrupt number. | |
1267 | * @data: Private data provided at interrupt registration, the AFU. | |
1268 | * | |
f918b4a8 | 1269 | * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. |
76a6ebbe MO |
1270 | */ |
1271 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) | |
1272 | { | |
1273 | struct afu *afu = (struct afu *)data; | |
f918b4a8 MO |
1274 | unsigned long hrrq_flags; |
1275 | LIST_HEAD(doneq); | |
1276 | int num_entries = 0; | |
76a6ebbe | 1277 | |
f918b4a8 | 1278 | spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags); |
cba06e6d MO |
1279 | |
1280 | if (afu_is_irqpoll_enabled(afu)) { | |
1281 | irq_poll_sched(&afu->irqpoll); | |
1282 | spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags); | |
1283 | return IRQ_HANDLED; | |
1284 | } | |
1285 | ||
1286 | num_entries = process_hrrq(afu, &doneq, -1); | |
f918b4a8 MO |
1287 | spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags); |
1288 | ||
1289 | if (num_entries == 0) | |
1290 | return IRQ_NONE; | |
1291 | ||
1292 | process_cmd_doneq(&doneq); | |
c21e0bbf MO |
1293 | return IRQ_HANDLED; |
1294 | } | |
1295 | ||
1296 | /** | |
1297 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors | |
1298 | * @irq: Interrupt number. | |
1299 | * @data: Private data provided at interrupt registration, the AFU. | |
1300 | * | |
1301 | * Return: Always return IRQ_HANDLED. | |
1302 | */ | |
1303 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) | |
1304 | { | |
1305 | struct afu *afu = (struct afu *)data; | |
4392ba49 MO |
1306 | struct cxlflash_cfg *cfg = afu->parent; |
1307 | struct device *dev = &cfg->dev->dev; | |
c21e0bbf MO |
1308 | u64 reg_unmasked; |
1309 | const struct asyc_intr_info *info; | |
1786f4a0 | 1310 | struct sisl_global_map __iomem *global = &afu->afu_map->global; |
c21e0bbf MO |
1311 | u64 reg; |
1312 | u8 port; | |
1313 | int i; | |
1314 | ||
c21e0bbf MO |
1315 | reg = readq_be(&global->regs.aintr_status); |
1316 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); | |
1317 | ||
1318 | if (reg_unmasked == 0) { | |
fb67d44d | 1319 | dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", |
4392ba49 | 1320 | __func__, reg); |
c21e0bbf MO |
1321 | goto out; |
1322 | } | |
1323 | ||
f15fbf8d | 1324 | /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ |
c21e0bbf MO |
1325 | writeq_be(reg_unmasked, &global->regs.aintr_clear); |
1326 | ||
f15fbf8d | 1327 | /* Check each bit that is on */ |
c21e0bbf MO |
1328 | for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { |
1329 | info = find_ainfo(1ULL << i); | |
16798d34 | 1330 | if (((reg_unmasked & 0x1) == 0) || !info) |
c21e0bbf MO |
1331 | continue; |
1332 | ||
1333 | port = info->port; | |
1334 | ||
fb67d44d | 1335 | dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", |
4392ba49 | 1336 | __func__, port, info->desc, |
c21e0bbf MO |
1337 | readq_be(&global->fc_regs[port][FC_STATUS / 8])); |
1338 | ||
1339 | /* | |
f15fbf8d | 1340 | * Do link reset first, some OTHER errors will set FC_ERROR |
c21e0bbf MO |
1341 | * again if cleared before or w/o a reset |
1342 | */ | |
1343 | if (info->action & LINK_RESET) { | |
4392ba49 MO |
1344 | dev_err(dev, "%s: FC Port %d: resetting link\n", |
1345 | __func__, port); | |
c21e0bbf MO |
1346 | cfg->lr_state = LINK_RESET_REQUIRED; |
1347 | cfg->lr_port = port; | |
1348 | schedule_work(&cfg->work_q); | |
1349 | } | |
1350 | ||
1351 | if (info->action & CLR_FC_ERROR) { | |
1352 | reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); | |
1353 | ||
1354 | /* | |
f15fbf8d | 1355 | * Since all errors are unmasked, FC_ERROR and FC_ERRCAP |
c21e0bbf MO |
1356 | * should be the same and tracing one is sufficient. |
1357 | */ | |
1358 | ||
fb67d44d | 1359 | dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", |
4392ba49 | 1360 | __func__, port, reg); |
c21e0bbf MO |
1361 | |
1362 | writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); | |
1363 | writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); | |
1364 | } | |
ef51074a MO |
1365 | |
1366 | if (info->action & SCAN_HOST) { | |
1367 | atomic_inc(&cfg->scan_host_needed); | |
1368 | schedule_work(&cfg->work_q); | |
1369 | } | |
c21e0bbf MO |
1370 | } |
1371 | ||
1372 | out: | |
c21e0bbf MO |
1373 | return IRQ_HANDLED; |
1374 | } | |
1375 | ||
1376 | /** | |
1377 | * start_context() - starts the master context | |
1284fb0c | 1378 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
1379 | * |
1380 | * Return: A success or failure value from CXL services. | |
1381 | */ | |
1382 | static int start_context(struct cxlflash_cfg *cfg) | |
1383 | { | |
fb67d44d | 1384 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
1385 | int rc = 0; |
1386 | ||
1387 | rc = cxl_start_context(cfg->mcctx, | |
1388 | cfg->afu->work.work_element_descriptor, | |
1389 | NULL); | |
1390 | ||
fb67d44d | 1391 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1392 | return rc; |
1393 | } | |
1394 | ||
1395 | /** | |
1396 | * read_vpd() - obtains the WWPNs from VPD | |
1284fb0c | 1397 | * @cfg: Internal structure associated with the host. |
78ae028e | 1398 | * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs |
c21e0bbf | 1399 | * |
1284fb0c | 1400 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
1401 | */ |
1402 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) | |
1403 | { | |
fb67d44d MO |
1404 | struct device *dev = &cfg->dev->dev; |
1405 | struct pci_dev *pdev = cfg->dev; | |
c21e0bbf MO |
1406 | int rc = 0; |
1407 | int ro_start, ro_size, i, j, k; | |
1408 | ssize_t vpd_size; | |
1409 | char vpd_data[CXLFLASH_VPD_LEN]; | |
1410 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; | |
78ae028e | 1411 | char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6" }; |
c21e0bbf MO |
1412 | |
1413 | /* Get the VPD data from the device */ | |
fb67d44d | 1414 | vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); |
c21e0bbf | 1415 | if (unlikely(vpd_size <= 0)) { |
fb67d44d MO |
1416 | dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", |
1417 | __func__, vpd_size); | |
c21e0bbf MO |
1418 | rc = -ENODEV; |
1419 | goto out; | |
1420 | } | |
1421 | ||
1422 | /* Get the read only section offset */ | |
1423 | ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, | |
1424 | PCI_VPD_LRDT_RO_DATA); | |
1425 | if (unlikely(ro_start < 0)) { | |
fb67d44d | 1426 | dev_err(dev, "%s: VPD Read-only data not found\n", __func__); |
c21e0bbf MO |
1427 | rc = -ENODEV; |
1428 | goto out; | |
1429 | } | |
1430 | ||
1431 | /* Get the read only section size, cap when extends beyond read VPD */ | |
1432 | ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); | |
1433 | j = ro_size; | |
1434 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | |
1435 | if (unlikely((i + j) > vpd_size)) { | |
fb67d44d MO |
1436 | dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", |
1437 | __func__, (i + j), vpd_size); | |
c21e0bbf MO |
1438 | ro_size = vpd_size - i; |
1439 | } | |
1440 | ||
1441 | /* | |
1442 | * Find the offset of the WWPN tag within the read only | |
1443 | * VPD data and validate the found field (partials are | |
1444 | * no good to us). Convert the ASCII data to an integer | |
1445 | * value. Note that we must copy to a temporary buffer | |
1446 | * because the conversion service requires that the ASCII | |
1447 | * string be terminated. | |
1448 | */ | |
78ae028e | 1449 | for (k = 0; k < cfg->num_fc_ports; k++) { |
c21e0bbf MO |
1450 | j = ro_size; |
1451 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | |
1452 | ||
1453 | i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); | |
1454 | if (unlikely(i < 0)) { | |
fb67d44d MO |
1455 | dev_err(dev, "%s: Port %d WWPN not found in VPD\n", |
1456 | __func__, k); | |
c21e0bbf MO |
1457 | rc = -ENODEV; |
1458 | goto out; | |
1459 | } | |
1460 | ||
1461 | j = pci_vpd_info_field_size(&vpd_data[i]); | |
1462 | i += PCI_VPD_INFO_FLD_HDR_SIZE; | |
1463 | if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { | |
fb67d44d MO |
1464 | dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", |
1465 | __func__, k); | |
c21e0bbf MO |
1466 | rc = -ENODEV; |
1467 | goto out; | |
1468 | } | |
1469 | ||
1470 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); | |
1471 | rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); | |
1472 | if (unlikely(rc)) { | |
fb67d44d MO |
1473 | dev_err(dev, "%s: WWPN conversion failed for port %d\n", |
1474 | __func__, k); | |
c21e0bbf MO |
1475 | rc = -ENODEV; |
1476 | goto out; | |
1477 | } | |
78ae028e MO |
1478 | |
1479 | dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); | |
c21e0bbf MO |
1480 | } |
1481 | ||
1482 | out: | |
fb67d44d | 1483 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1484 | return rc; |
1485 | } | |
1486 | ||
1487 | /** | |
15305514 | 1488 | * init_pcr() - initialize the provisioning and control registers |
1284fb0c | 1489 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 1490 | * |
15305514 MO |
1491 | * Also sets up fast access to the mapped registers and initializes AFU |
1492 | * command fields that never change. | |
c21e0bbf | 1493 | */ |
15305514 | 1494 | static void init_pcr(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1495 | { |
1496 | struct afu *afu = cfg->afu; | |
1786f4a0 | 1497 | struct sisl_ctrl_map __iomem *ctrl_map; |
c21e0bbf MO |
1498 | int i; |
1499 | ||
1500 | for (i = 0; i < MAX_CONTEXT; i++) { | |
1501 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; | |
f15fbf8d MO |
1502 | /* Disrupt any clients that could be running */ |
1503 | /* e.g. clients that survived a master restart */ | |
c21e0bbf MO |
1504 | writeq_be(0, &ctrl_map->rht_start); |
1505 | writeq_be(0, &ctrl_map->rht_cnt_id); | |
1506 | writeq_be(0, &ctrl_map->ctx_cap); | |
1507 | } | |
1508 | ||
f15fbf8d | 1509 | /* Copy frequently used fields into afu */ |
c21e0bbf | 1510 | afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); |
c21e0bbf MO |
1511 | afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; |
1512 | afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; | |
1513 | ||
1514 | /* Program the Endian Control for the master context */ | |
1515 | writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); | |
c21e0bbf MO |
1516 | } |
1517 | ||
1518 | /** | |
1519 | * init_global() - initialize AFU global registers | |
1284fb0c | 1520 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 1521 | */ |
15305514 | 1522 | static int init_global(struct cxlflash_cfg *cfg) |
c21e0bbf MO |
1523 | { |
1524 | struct afu *afu = cfg->afu; | |
4392ba49 | 1525 | struct device *dev = &cfg->dev->dev; |
78ae028e | 1526 | u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ |
c21e0bbf MO |
1527 | int i = 0, num_ports = 0; |
1528 | int rc = 0; | |
1529 | u64 reg; | |
1530 | ||
1531 | rc = read_vpd(cfg, &wwpn[0]); | |
1532 | if (rc) { | |
4392ba49 | 1533 | dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); |
c21e0bbf MO |
1534 | goto out; |
1535 | } | |
1536 | ||
696d0b0c | 1537 | /* Set up RRQ and SQ in AFU for master issued cmds */ |
c21e0bbf MO |
1538 | writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); |
1539 | writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); | |
1540 | ||
696d0b0c MO |
1541 | if (afu_is_sq_cmd_mode(afu)) { |
1542 | writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start); | |
1543 | writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end); | |
1544 | } | |
1545 | ||
c21e0bbf MO |
1546 | /* AFU configuration */ |
1547 | reg = readq_be(&afu->afu_map->global.regs.afu_config); | |
1548 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; | |
1549 | /* enable all auto retry options and control endianness */ | |
1550 | /* leave others at default: */ | |
1551 | /* CTX_CAP write protected, mbox_r does not clear on read and */ | |
1552 | /* checker on if dual afu */ | |
1553 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); | |
1554 | ||
f15fbf8d | 1555 | /* Global port select: select either port */ |
c21e0bbf | 1556 | if (afu->internal_lun) { |
f15fbf8d | 1557 | /* Only use port 0 */ |
c21e0bbf | 1558 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); |
78ae028e | 1559 | num_ports = 0; |
c21e0bbf MO |
1560 | } else { |
1561 | writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); | |
78ae028e | 1562 | num_ports = cfg->num_fc_ports; |
c21e0bbf MO |
1563 | } |
1564 | ||
1565 | for (i = 0; i < num_ports; i++) { | |
f15fbf8d | 1566 | /* Unmask all errors (but they are still masked at AFU) */ |
c21e0bbf | 1567 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); |
f15fbf8d | 1568 | /* Clear CRC error cnt & set a threshold */ |
c21e0bbf MO |
1569 | (void)readq_be(&afu->afu_map->global. |
1570 | fc_regs[i][FC_CNT_CRCERR / 8]); | |
1571 | writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] | |
1572 | [FC_CRC_THRESH / 8]); | |
1573 | ||
f15fbf8d | 1574 | /* Set WWPNs. If already programmed, wwpn[i] is 0 */ |
f8013261 MO |
1575 | if (wwpn[i] != 0) |
1576 | afu_set_wwpn(afu, i, | |
1577 | &afu->afu_map->global.fc_regs[i][0], | |
1578 | wwpn[i]); | |
c21e0bbf MO |
1579 | /* Programming WWPN back to back causes additional |
1580 | * offline/online transitions and a PLOGI | |
1581 | */ | |
1582 | msleep(100); | |
c21e0bbf MO |
1583 | } |
1584 | ||
f15fbf8d MO |
1585 | /* Set up master's own CTX_CAP to allow real mode, host translation */ |
1586 | /* tables, afu cmds and read/write GSCSI cmds. */ | |
c21e0bbf MO |
1587 | /* First, unlock ctx_cap write by reading mbox */ |
1588 | (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ | |
1589 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | | |
1590 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | | |
1591 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), | |
1592 | &afu->ctrl_map->ctx_cap); | |
f15fbf8d | 1593 | /* Initialize heartbeat */ |
c21e0bbf | 1594 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); |
c21e0bbf MO |
1595 | out: |
1596 | return rc; | |
1597 | } | |
1598 | ||
1599 | /** | |
1600 | * start_afu() - initializes and starts the AFU | |
1284fb0c | 1601 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
1602 | */ |
1603 | static int start_afu(struct cxlflash_cfg *cfg) | |
1604 | { | |
1605 | struct afu *afu = cfg->afu; | |
fb67d44d | 1606 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
1607 | int rc = 0; |
1608 | ||
c21e0bbf MO |
1609 | init_pcr(cfg); |
1610 | ||
f918b4a8 | 1611 | /* Initialize RRQ */ |
af10483e | 1612 | memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); |
c21e0bbf MO |
1613 | afu->hrrq_start = &afu->rrq_entry[0]; |
1614 | afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; | |
1615 | afu->hrrq_curr = afu->hrrq_start; | |
1616 | afu->toggle = 1; | |
f918b4a8 | 1617 | spin_lock_init(&afu->hrrq_slock); |
c21e0bbf | 1618 | |
696d0b0c MO |
1619 | /* Initialize SQ */ |
1620 | if (afu_is_sq_cmd_mode(afu)) { | |
1621 | memset(&afu->sq, 0, sizeof(afu->sq)); | |
1622 | afu->hsq_start = &afu->sq[0]; | |
1623 | afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1]; | |
1624 | afu->hsq_curr = afu->hsq_start; | |
1625 | ||
1626 | spin_lock_init(&afu->hsq_slock); | |
1627 | atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1); | |
1628 | } | |
1629 | ||
cba06e6d MO |
1630 | /* Initialize IRQ poll */ |
1631 | if (afu_is_irqpoll_enabled(afu)) | |
1632 | irq_poll_init(&afu->irqpoll, afu->irqpoll_weight, | |
1633 | cxlflash_irqpoll); | |
1634 | ||
c21e0bbf MO |
1635 | rc = init_global(cfg); |
1636 | ||
fb67d44d | 1637 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1638 | return rc; |
1639 | } | |
1640 | ||
1641 | /** | |
9526f360 | 1642 | * init_intr() - setup interrupt handlers for the master context |
1284fb0c | 1643 | * @cfg: Internal structure associated with the host. |
c21e0bbf | 1644 | * |
1284fb0c | 1645 | * Return: 0 on success, -errno on failure |
c21e0bbf | 1646 | */ |
9526f360 MK |
1647 | static enum undo_level init_intr(struct cxlflash_cfg *cfg, |
1648 | struct cxl_context *ctx) | |
c21e0bbf | 1649 | { |
c21e0bbf | 1650 | struct afu *afu = cfg->afu; |
9526f360 | 1651 | struct device *dev = &cfg->dev->dev; |
c21e0bbf | 1652 | int rc = 0; |
9526f360 | 1653 | enum undo_level level = UNDO_NOOP; |
c21e0bbf MO |
1654 | |
1655 | rc = cxl_allocate_afu_irqs(ctx, 3); | |
1656 | if (unlikely(rc)) { | |
fb67d44d | 1657 | dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", |
c21e0bbf | 1658 | __func__, rc); |
9526f360 | 1659 | level = UNDO_NOOP; |
c21e0bbf MO |
1660 | goto out; |
1661 | } | |
1662 | ||
1663 | rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, | |
1664 | "SISL_MSI_SYNC_ERROR"); | |
1665 | if (unlikely(rc <= 0)) { | |
fb67d44d | 1666 | dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); |
c21e0bbf MO |
1667 | level = FREE_IRQ; |
1668 | goto out; | |
1669 | } | |
1670 | ||
1671 | rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, | |
1672 | "SISL_MSI_RRQ_UPDATED"); | |
1673 | if (unlikely(rc <= 0)) { | |
fb67d44d | 1674 | dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); |
c21e0bbf MO |
1675 | level = UNMAP_ONE; |
1676 | goto out; | |
1677 | } | |
1678 | ||
1679 | rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, | |
1680 | "SISL_MSI_ASYNC_ERROR"); | |
1681 | if (unlikely(rc <= 0)) { | |
fb67d44d | 1682 | dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); |
c21e0bbf MO |
1683 | level = UNMAP_TWO; |
1684 | goto out; | |
1685 | } | |
9526f360 MK |
1686 | out: |
1687 | return level; | |
1688 | } | |
c21e0bbf | 1689 | |
9526f360 MK |
1690 | /** |
1691 | * init_mc() - create and register as the master context | |
1692 | * @cfg: Internal structure associated with the host. | |
1693 | * | |
1694 | * Return: 0 on success, -errno on failure | |
1695 | */ | |
1696 | static int init_mc(struct cxlflash_cfg *cfg) | |
1697 | { | |
1698 | struct cxl_context *ctx; | |
1699 | struct device *dev = &cfg->dev->dev; | |
1700 | int rc = 0; | |
1701 | enum undo_level level; | |
1702 | ||
1703 | ctx = cxl_get_context(cfg->dev); | |
1704 | if (unlikely(!ctx)) { | |
1705 | rc = -ENOMEM; | |
1706 | goto ret; | |
1707 | } | |
1708 | cfg->mcctx = ctx; | |
1709 | ||
1710 | /* Set it up as a master with the CXL */ | |
1711 | cxl_set_master(ctx); | |
1712 | ||
1713 | /* During initialization reset the AFU to start from a clean slate */ | |
1714 | rc = cxl_afu_reset(cfg->mcctx); | |
1715 | if (unlikely(rc)) { | |
fb67d44d | 1716 | dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc); |
9526f360 MK |
1717 | goto ret; |
1718 | } | |
1719 | ||
1720 | level = init_intr(cfg, ctx); | |
1721 | if (unlikely(level)) { | |
fb67d44d | 1722 | dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); |
9526f360 MK |
1723 | goto out; |
1724 | } | |
c21e0bbf MO |
1725 | |
1726 | /* This performs the equivalent of the CXL_IOCTL_START_WORK. | |
1727 | * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process | |
1728 | * element (pe) that is embedded in the context (ctx) | |
1729 | */ | |
1730 | rc = start_context(cfg); | |
1731 | if (unlikely(rc)) { | |
1732 | dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); | |
1733 | level = UNMAP_THREE; | |
1734 | goto out; | |
1735 | } | |
1736 | ret: | |
fb67d44d | 1737 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1738 | return rc; |
1739 | out: | |
9526f360 | 1740 | term_intr(cfg, level); |
c21e0bbf MO |
1741 | goto ret; |
1742 | } | |
1743 | ||
1744 | /** | |
1745 | * init_afu() - setup as master context and start AFU | |
1284fb0c | 1746 | * @cfg: Internal structure associated with the host. |
c21e0bbf MO |
1747 | * |
1748 | * This routine is a higher level of control for configuring the | |
1749 | * AFU on probe and reset paths. | |
1750 | * | |
1284fb0c | 1751 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
1752 | */ |
1753 | static int init_afu(struct cxlflash_cfg *cfg) | |
1754 | { | |
1755 | u64 reg; | |
1756 | int rc = 0; | |
1757 | struct afu *afu = cfg->afu; | |
1758 | struct device *dev = &cfg->dev->dev; | |
1759 | ||
5cdac81a MO |
1760 | cxl_perst_reloads_same_image(cfg->cxl_afu, true); |
1761 | ||
c21e0bbf MO |
1762 | rc = init_mc(cfg); |
1763 | if (rc) { | |
fb67d44d | 1764 | dev_err(dev, "%s: init_mc failed rc=%d\n", |
c21e0bbf | 1765 | __func__, rc); |
ee3491ba | 1766 | goto out; |
c21e0bbf MO |
1767 | } |
1768 | ||
f15fbf8d | 1769 | /* Map the entire MMIO space of the AFU */ |
c21e0bbf MO |
1770 | afu->afu_map = cxl_psa_map(cfg->mcctx); |
1771 | if (!afu->afu_map) { | |
fb67d44d | 1772 | dev_err(dev, "%s: cxl_psa_map failed\n", __func__); |
ee3491ba | 1773 | rc = -ENOMEM; |
c21e0bbf MO |
1774 | goto err1; |
1775 | } | |
1776 | ||
e5ce067b MO |
1777 | /* No byte reverse on reading afu_version or string will be backwards */ |
1778 | reg = readq(&afu->afu_map->global.regs.afu_version); | |
1779 | memcpy(afu->version, ®, sizeof(reg)); | |
c21e0bbf MO |
1780 | afu->interface_version = |
1781 | readq_be(&afu->afu_map->global.regs.interface_version); | |
e5ce067b | 1782 | if ((afu->interface_version + 1) == 0) { |
fb67d44d MO |
1783 | dev_err(dev, "Back level AFU, please upgrade. AFU version %s " |
1784 | "interface version %016llx\n", afu->version, | |
e5ce067b MO |
1785 | afu->interface_version); |
1786 | rc = -EINVAL; | |
0df5bef7 | 1787 | goto err1; |
ee3491ba MO |
1788 | } |
1789 | ||
696d0b0c MO |
1790 | if (afu_is_sq_cmd_mode(afu)) { |
1791 | afu->send_cmd = send_cmd_sq; | |
1792 | afu->context_reset = context_reset_sq; | |
1793 | } else { | |
1794 | afu->send_cmd = send_cmd_ioarrin; | |
1795 | afu->context_reset = context_reset_ioarrin; | |
1796 | } | |
48b4be36 | 1797 | |
fb67d44d MO |
1798 | dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, |
1799 | afu->version, afu->interface_version); | |
c21e0bbf MO |
1800 | |
1801 | rc = start_afu(cfg); | |
1802 | if (rc) { | |
fb67d44d | 1803 | dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); |
0df5bef7 | 1804 | goto err1; |
c21e0bbf MO |
1805 | } |
1806 | ||
1807 | afu_err_intr_init(cfg->afu); | |
11f7b184 UK |
1808 | spin_lock_init(&afu->rrin_slock); |
1809 | afu->room = readq_be(&afu->host_map->cmd_room); | |
c21e0bbf | 1810 | |
2cb79266 MO |
1811 | /* Restore the LUN mappings */ |
1812 | cxlflash_restore_luntable(cfg); | |
ee3491ba | 1813 | out: |
fb67d44d | 1814 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf | 1815 | return rc; |
ee3491ba | 1816 | |
ee3491ba | 1817 | err1: |
9526f360 MK |
1818 | term_intr(cfg, UNMAP_THREE); |
1819 | term_mc(cfg); | |
ee3491ba | 1820 | goto out; |
c21e0bbf MO |
1821 | } |
1822 | ||
c21e0bbf MO |
1823 | /** |
1824 | * cxlflash_afu_sync() - builds and sends an AFU sync command | |
1825 | * @afu: AFU associated with the host. | |
1826 | * @ctx_hndl_u: Identifies context requesting sync. | |
1827 | * @res_hndl_u: Identifies resource requesting sync. | |
1828 | * @mode: Type of sync to issue (lightweight, heavyweight, global). | |
1829 | * | |
1830 | * The AFU can only take 1 sync command at a time. This routine enforces this | |
f15fbf8d | 1831 | * limitation by using a mutex to provide exclusive access to the AFU during |
c21e0bbf MO |
1832 | * the sync. This design point requires calling threads to not be on interrupt |
1833 | * context due to the possibility of sleeping during concurrent sync operations. | |
1834 | * | |
5cdac81a MO |
1835 | * AFU sync operations are only necessary and allowed when the device is |
1836 | * operating normally. When not operating normally, sync requests can occur as | |
1837 | * part of cleaning up resources associated with an adapter prior to removal. | |
1838 | * In this scenario, these requests are simply ignored (safe due to the AFU | |
1839 | * going away). | |
1840 | * | |
c21e0bbf MO |
1841 | * Return: |
1842 | * 0 on success | |
1843 | * -1 on failure | |
1844 | */ | |
1845 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, | |
1846 | res_hndl_t res_hndl_u, u8 mode) | |
1847 | { | |
5cdac81a | 1848 | struct cxlflash_cfg *cfg = afu->parent; |
4392ba49 | 1849 | struct device *dev = &cfg->dev->dev; |
c21e0bbf | 1850 | struct afu_cmd *cmd = NULL; |
350bb478 | 1851 | char *buf = NULL; |
c21e0bbf | 1852 | int rc = 0; |
c21e0bbf MO |
1853 | static DEFINE_MUTEX(sync_active); |
1854 | ||
5cdac81a | 1855 | if (cfg->state != STATE_NORMAL) { |
fb67d44d MO |
1856 | dev_dbg(dev, "%s: Sync not required state=%u\n", |
1857 | __func__, cfg->state); | |
5cdac81a MO |
1858 | return 0; |
1859 | } | |
1860 | ||
c21e0bbf | 1861 | mutex_lock(&sync_active); |
de01283b | 1862 | atomic_inc(&afu->cmds_active); |
350bb478 MO |
1863 | buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); |
1864 | if (unlikely(!buf)) { | |
1865 | dev_err(dev, "%s: no memory for command\n", __func__); | |
c21e0bbf MO |
1866 | rc = -1; |
1867 | goto out; | |
1868 | } | |
1869 | ||
350bb478 MO |
1870 | cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); |
1871 | init_completion(&cmd->cevent); | |
350bb478 | 1872 | cmd->parent = afu; |
c21e0bbf | 1873 | |
fb67d44d | 1874 | dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); |
c21e0bbf MO |
1875 | |
1876 | cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; | |
350bb478 MO |
1877 | cmd->rcb.ctx_id = afu->ctx_hndl; |
1878 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; | |
c21e0bbf MO |
1879 | cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; |
1880 | ||
1881 | cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ | |
1882 | cmd->rcb.cdb[1] = mode; | |
1883 | ||
1884 | /* The cdb is aligned, no unaligned accessors required */ | |
1786f4a0 MO |
1885 | *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); |
1886 | *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); | |
c21e0bbf | 1887 | |
48b4be36 | 1888 | rc = afu->send_cmd(afu, cmd); |
c21e0bbf MO |
1889 | if (unlikely(rc)) |
1890 | goto out; | |
1891 | ||
9ba848ac MO |
1892 | rc = wait_resp(afu, cmd); |
1893 | if (unlikely(rc)) | |
c21e0bbf MO |
1894 | rc = -1; |
1895 | out: | |
de01283b | 1896 | atomic_dec(&afu->cmds_active); |
c21e0bbf | 1897 | mutex_unlock(&sync_active); |
350bb478 | 1898 | kfree(buf); |
fb67d44d | 1899 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1900 | return rc; |
1901 | } | |
1902 | ||
1903 | /** | |
15305514 MO |
1904 | * afu_reset() - resets the AFU |
1905 | * @cfg: Internal structure associated with the host. | |
c21e0bbf | 1906 | * |
1284fb0c | 1907 | * Return: 0 on success, -errno on failure |
c21e0bbf | 1908 | */ |
15305514 | 1909 | static int afu_reset(struct cxlflash_cfg *cfg) |
c21e0bbf | 1910 | { |
fb67d44d | 1911 | struct device *dev = &cfg->dev->dev; |
c21e0bbf | 1912 | int rc = 0; |
fb67d44d | 1913 | |
c21e0bbf MO |
1914 | /* Stop the context before the reset. Since the context is |
1915 | * no longer available restart it after the reset is complete | |
1916 | */ | |
c21e0bbf MO |
1917 | term_afu(cfg); |
1918 | ||
1919 | rc = init_afu(cfg); | |
1920 | ||
fb67d44d | 1921 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
1922 | return rc; |
1923 | } | |
1924 | ||
f411396d MK |
1925 | /** |
1926 | * drain_ioctls() - wait until all currently executing ioctls have completed | |
1927 | * @cfg: Internal structure associated with the host. | |
1928 | * | |
1929 | * Obtain write access to read/write semaphore that wraps ioctl | |
1930 | * handling to 'drain' ioctls currently executing. | |
1931 | */ | |
1932 | static void drain_ioctls(struct cxlflash_cfg *cfg) | |
1933 | { | |
1934 | down_write(&cfg->ioctl_rwsem); | |
1935 | up_write(&cfg->ioctl_rwsem); | |
1936 | } | |
1937 | ||
15305514 MO |
1938 | /** |
1939 | * cxlflash_eh_device_reset_handler() - reset a single LUN | |
1940 | * @scp: SCSI command to send. | |
1941 | * | |
1942 | * Return: | |
1943 | * SUCCESS as defined in scsi/scsi.h | |
1944 | * FAILED as defined in scsi/scsi.h | |
1945 | */ | |
1946 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) | |
1947 | { | |
1948 | int rc = SUCCESS; | |
1949 | struct Scsi_Host *host = scp->device->host; | |
fb67d44d MO |
1950 | struct cxlflash_cfg *cfg = shost_priv(host); |
1951 | struct device *dev = &cfg->dev->dev; | |
15305514 MO |
1952 | struct afu *afu = cfg->afu; |
1953 | int rcr = 0; | |
1954 | ||
fb67d44d MO |
1955 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
1956 | "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, | |
1957 | scp->device->channel, scp->device->id, scp->device->lun, | |
1958 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
1959 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
1960 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
1961 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
15305514 | 1962 | |
ed486daa | 1963 | retry: |
15305514 MO |
1964 | switch (cfg->state) { |
1965 | case STATE_NORMAL: | |
1966 | rcr = send_tmf(afu, scp, TMF_LUN_RESET); | |
1967 | if (unlikely(rcr)) | |
1968 | rc = FAILED; | |
1969 | break; | |
1970 | case STATE_RESET: | |
1971 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
ed486daa | 1972 | goto retry; |
15305514 MO |
1973 | default: |
1974 | rc = FAILED; | |
1975 | break; | |
1976 | } | |
1977 | ||
fb67d44d | 1978 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
15305514 MO |
1979 | return rc; |
1980 | } | |
1981 | ||
1982 | /** | |
1983 | * cxlflash_eh_host_reset_handler() - reset the host adapter | |
1984 | * @scp: SCSI command from stack identifying host. | |
1985 | * | |
1d3324c3 MO |
1986 | * Following a reset, the state is evaluated again in case an EEH occurred |
1987 | * during the reset. In such a scenario, the host reset will either yield | |
1988 | * until the EEH recovery is complete or return success or failure based | |
1989 | * upon the current device state. | |
1990 | * | |
15305514 MO |
1991 | * Return: |
1992 | * SUCCESS as defined in scsi/scsi.h | |
1993 | * FAILED as defined in scsi/scsi.h | |
1994 | */ | |
1995 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) | |
1996 | { | |
1997 | int rc = SUCCESS; | |
1998 | int rcr = 0; | |
1999 | struct Scsi_Host *host = scp->device->host; | |
fb67d44d MO |
2000 | struct cxlflash_cfg *cfg = shost_priv(host); |
2001 | struct device *dev = &cfg->dev->dev; | |
15305514 | 2002 | |
fb67d44d MO |
2003 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
2004 | "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, | |
2005 | scp->device->channel, scp->device->id, scp->device->lun, | |
2006 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | |
2007 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | |
2008 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | |
2009 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | |
15305514 MO |
2010 | |
2011 | switch (cfg->state) { | |
2012 | case STATE_NORMAL: | |
2013 | cfg->state = STATE_RESET; | |
f411396d | 2014 | drain_ioctls(cfg); |
15305514 MO |
2015 | cxlflash_mark_contexts_error(cfg); |
2016 | rcr = afu_reset(cfg); | |
2017 | if (rcr) { | |
2018 | rc = FAILED; | |
2019 | cfg->state = STATE_FAILTERM; | |
2020 | } else | |
2021 | cfg->state = STATE_NORMAL; | |
2022 | wake_up_all(&cfg->reset_waitq); | |
1d3324c3 MO |
2023 | ssleep(1); |
2024 | /* fall through */ | |
15305514 MO |
2025 | case STATE_RESET: |
2026 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); | |
2027 | if (cfg->state == STATE_NORMAL) | |
2028 | break; | |
2029 | /* fall through */ | |
2030 | default: | |
2031 | rc = FAILED; | |
2032 | break; | |
2033 | } | |
2034 | ||
fb67d44d | 2035 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
15305514 MO |
2036 | return rc; |
2037 | } | |
2038 | ||
2039 | /** | |
2040 | * cxlflash_change_queue_depth() - change the queue depth for the device | |
2041 | * @sdev: SCSI device destined for queue depth change. | |
2042 | * @qdepth: Requested queue depth value to set. | |
2043 | * | |
2044 | * The requested queue depth is capped to the maximum supported value. | |
2045 | * | |
2046 | * Return: The actual queue depth set. | |
2047 | */ | |
2048 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) | |
2049 | { | |
2050 | ||
2051 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) | |
2052 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; | |
2053 | ||
2054 | scsi_change_queue_depth(sdev, qdepth); | |
2055 | return sdev->queue_depth; | |
2056 | } | |
2057 | ||
2058 | /** | |
2059 | * cxlflash_show_port_status() - queries and presents the current port status | |
e0f01a21 | 2060 | * @port: Desired port for status reporting. |
3b225cd3 | 2061 | * @cfg: Internal structure associated with the host. |
15305514 MO |
2062 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2063 | * | |
78ae028e | 2064 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
15305514 | 2065 | */ |
3b225cd3 MO |
2066 | static ssize_t cxlflash_show_port_status(u32 port, |
2067 | struct cxlflash_cfg *cfg, | |
2068 | char *buf) | |
15305514 | 2069 | { |
78ae028e | 2070 | struct device *dev = &cfg->dev->dev; |
3b225cd3 | 2071 | struct afu *afu = cfg->afu; |
15305514 | 2072 | char *disp_status; |
15305514 | 2073 | u64 status; |
e0f01a21 | 2074 | __be64 __iomem *fc_regs; |
15305514 | 2075 | |
78ae028e MO |
2076 | WARN_ON(port >= MAX_FC_PORTS); |
2077 | ||
2078 | if (port >= cfg->num_fc_ports) { | |
2079 | dev_info(dev, "%s: Port %d not supported on this card.\n", | |
2080 | __func__, port); | |
2081 | return -EINVAL; | |
2082 | } | |
15305514 MO |
2083 | |
2084 | fc_regs = &afu->afu_map->global.fc_regs[port][0]; | |
e0f01a21 MO |
2085 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
2086 | status &= FC_MTIP_STATUS_MASK; | |
15305514 MO |
2087 | |
2088 | if (status == FC_MTIP_STATUS_ONLINE) | |
2089 | disp_status = "online"; | |
2090 | else if (status == FC_MTIP_STATUS_OFFLINE) | |
2091 | disp_status = "offline"; | |
2092 | else | |
2093 | disp_status = "unknown"; | |
2094 | ||
e0f01a21 MO |
2095 | return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); |
2096 | } | |
2097 | ||
2098 | /** | |
2099 | * port0_show() - queries and presents the current status of port 0 | |
2100 | * @dev: Generic device associated with the host owning the port. | |
2101 | * @attr: Device attribute representing the port. | |
2102 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2103 | * | |
2104 | * Return: The size of the ASCII string returned in @buf. | |
2105 | */ | |
2106 | static ssize_t port0_show(struct device *dev, | |
2107 | struct device_attribute *attr, | |
2108 | char *buf) | |
2109 | { | |
fb67d44d | 2110 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2111 | |
3b225cd3 | 2112 | return cxlflash_show_port_status(0, cfg, buf); |
15305514 MO |
2113 | } |
2114 | ||
2115 | /** | |
e0f01a21 MO |
2116 | * port1_show() - queries and presents the current status of port 1 |
2117 | * @dev: Generic device associated with the host owning the port. | |
2118 | * @attr: Device attribute representing the port. | |
2119 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2120 | * | |
2121 | * Return: The size of the ASCII string returned in @buf. | |
2122 | */ | |
2123 | static ssize_t port1_show(struct device *dev, | |
2124 | struct device_attribute *attr, | |
2125 | char *buf) | |
2126 | { | |
fb67d44d | 2127 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2128 | |
3b225cd3 | 2129 | return cxlflash_show_port_status(1, cfg, buf); |
e0f01a21 MO |
2130 | } |
2131 | ||
2132 | /** | |
2133 | * lun_mode_show() - presents the current LUN mode of the host | |
15305514 | 2134 | * @dev: Generic device associated with the host. |
e0f01a21 | 2135 | * @attr: Device attribute representing the LUN mode. |
15305514 MO |
2136 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. |
2137 | * | |
2138 | * Return: The size of the ASCII string returned in @buf. | |
2139 | */ | |
e0f01a21 MO |
2140 | static ssize_t lun_mode_show(struct device *dev, |
2141 | struct device_attribute *attr, char *buf) | |
15305514 | 2142 | { |
fb67d44d | 2143 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
15305514 MO |
2144 | struct afu *afu = cfg->afu; |
2145 | ||
e0f01a21 | 2146 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); |
15305514 MO |
2147 | } |
2148 | ||
2149 | /** | |
e0f01a21 | 2150 | * lun_mode_store() - sets the LUN mode of the host |
15305514 | 2151 | * @dev: Generic device associated with the host. |
e0f01a21 | 2152 | * @attr: Device attribute representing the LUN mode. |
15305514 MO |
2153 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. |
2154 | * @count: Length of data resizing in @buf. | |
2155 | * | |
2156 | * The CXL Flash AFU supports a dummy LUN mode where the external | |
2157 | * links and storage are not required. Space on the FPGA is used | |
2158 | * to create 1 or 2 small LUNs which are presented to the system | |
2159 | * as if they were a normal storage device. This feature is useful | |
2160 | * during development and also provides manufacturing with a way | |
2161 | * to test the AFU without an actual device. | |
2162 | * | |
2163 | * 0 = external LUN[s] (default) | |
2164 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) | |
2165 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) | |
2166 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) | |
2167 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) | |
2168 | * | |
2169 | * Return: The size of the ASCII string returned in @buf. | |
2170 | */ | |
e0f01a21 MO |
2171 | static ssize_t lun_mode_store(struct device *dev, |
2172 | struct device_attribute *attr, | |
2173 | const char *buf, size_t count) | |
15305514 MO |
2174 | { |
2175 | struct Scsi_Host *shost = class_to_shost(dev); | |
fb67d44d | 2176 | struct cxlflash_cfg *cfg = shost_priv(shost); |
15305514 MO |
2177 | struct afu *afu = cfg->afu; |
2178 | int rc; | |
2179 | u32 lun_mode; | |
2180 | ||
2181 | rc = kstrtouint(buf, 10, &lun_mode); | |
2182 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { | |
2183 | afu->internal_lun = lun_mode; | |
603ecce9 MK |
2184 | |
2185 | /* | |
2186 | * When configured for internal LUN, there is only one channel, | |
78ae028e MO |
2187 | * channel number 0, else there will be one less than the number |
2188 | * of fc ports for this card. | |
603ecce9 MK |
2189 | */ |
2190 | if (afu->internal_lun) | |
2191 | shost->max_channel = 0; | |
2192 | else | |
78ae028e | 2193 | shost->max_channel = cfg->num_fc_ports - 1; |
603ecce9 | 2194 | |
15305514 MO |
2195 | afu_reset(cfg); |
2196 | scsi_scan_host(cfg->host); | |
2197 | } | |
2198 | ||
2199 | return count; | |
2200 | } | |
2201 | ||
2202 | /** | |
e0f01a21 | 2203 | * ioctl_version_show() - presents the current ioctl version of the host |
15305514 MO |
2204 | * @dev: Generic device associated with the host. |
2205 | * @attr: Device attribute representing the ioctl version. | |
2206 | * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. | |
2207 | * | |
2208 | * Return: The size of the ASCII string returned in @buf. | |
2209 | */ | |
e0f01a21 MO |
2210 | static ssize_t ioctl_version_show(struct device *dev, |
2211 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2212 | { |
2213 | return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); | |
2214 | } | |
2215 | ||
2216 | /** | |
e0f01a21 MO |
2217 | * cxlflash_show_port_lun_table() - queries and presents the port LUN table |
2218 | * @port: Desired port for status reporting. | |
3b225cd3 | 2219 | * @cfg: Internal structure associated with the host. |
e0f01a21 MO |
2220 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2221 | * | |
78ae028e | 2222 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
e0f01a21 MO |
2223 | */ |
2224 | static ssize_t cxlflash_show_port_lun_table(u32 port, | |
3b225cd3 | 2225 | struct cxlflash_cfg *cfg, |
e0f01a21 MO |
2226 | char *buf) |
2227 | { | |
78ae028e | 2228 | struct device *dev = &cfg->dev->dev; |
3b225cd3 | 2229 | struct afu *afu = cfg->afu; |
e0f01a21 MO |
2230 | int i; |
2231 | ssize_t bytes = 0; | |
2232 | __be64 __iomem *fc_port; | |
2233 | ||
78ae028e MO |
2234 | WARN_ON(port >= MAX_FC_PORTS); |
2235 | ||
2236 | if (port >= cfg->num_fc_ports) { | |
2237 | dev_info(dev, "%s: Port %d not supported on this card.\n", | |
2238 | __func__, port); | |
2239 | return -EINVAL; | |
2240 | } | |
e0f01a21 MO |
2241 | |
2242 | fc_port = &afu->afu_map->global.fc_port[port][0]; | |
2243 | ||
2244 | for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) | |
2245 | bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, | |
fb67d44d | 2246 | "%03d: %016llx\n", i, readq_be(&fc_port[i])); |
e0f01a21 MO |
2247 | return bytes; |
2248 | } | |
2249 | ||
2250 | /** | |
2251 | * port0_lun_table_show() - presents the current LUN table of port 0 | |
2252 | * @dev: Generic device associated with the host owning the port. | |
2253 | * @attr: Device attribute representing the port. | |
2254 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2255 | * | |
2256 | * Return: The size of the ASCII string returned in @buf. | |
2257 | */ | |
2258 | static ssize_t port0_lun_table_show(struct device *dev, | |
2259 | struct device_attribute *attr, | |
2260 | char *buf) | |
2261 | { | |
fb67d44d | 2262 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2263 | |
3b225cd3 | 2264 | return cxlflash_show_port_lun_table(0, cfg, buf); |
e0f01a21 MO |
2265 | } |
2266 | ||
2267 | /** | |
2268 | * port1_lun_table_show() - presents the current LUN table of port 1 | |
2269 | * @dev: Generic device associated with the host owning the port. | |
2270 | * @attr: Device attribute representing the port. | |
2271 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | |
2272 | * | |
2273 | * Return: The size of the ASCII string returned in @buf. | |
2274 | */ | |
2275 | static ssize_t port1_lun_table_show(struct device *dev, | |
2276 | struct device_attribute *attr, | |
2277 | char *buf) | |
2278 | { | |
fb67d44d | 2279 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
e0f01a21 | 2280 | |
3b225cd3 | 2281 | return cxlflash_show_port_lun_table(1, cfg, buf); |
e0f01a21 MO |
2282 | } |
2283 | ||
cba06e6d MO |
2284 | /** |
2285 | * irqpoll_weight_show() - presents the current IRQ poll weight for the host | |
2286 | * @dev: Generic device associated with the host. | |
2287 | * @attr: Device attribute representing the IRQ poll weight. | |
2288 | * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll | |
2289 | * weight in ASCII. | |
2290 | * | |
2291 | * An IRQ poll weight of 0 indicates polling is disabled. | |
2292 | * | |
2293 | * Return: The size of the ASCII string returned in @buf. | |
2294 | */ | |
2295 | static ssize_t irqpoll_weight_show(struct device *dev, | |
2296 | struct device_attribute *attr, char *buf) | |
2297 | { | |
2298 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2299 | struct afu *afu = cfg->afu; | |
2300 | ||
2301 | return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); | |
2302 | } | |
2303 | ||
2304 | /** | |
2305 | * irqpoll_weight_store() - sets the current IRQ poll weight for the host | |
2306 | * @dev: Generic device associated with the host. | |
2307 | * @attr: Device attribute representing the IRQ poll weight. | |
2308 | * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll | |
2309 | * weight in ASCII. | |
2310 | * @count: Length of data resizing in @buf. | |
2311 | * | |
2312 | * An IRQ poll weight of 0 indicates polling is disabled. | |
2313 | * | |
2314 | * Return: The size of the ASCII string returned in @buf. | |
2315 | */ | |
2316 | static ssize_t irqpoll_weight_store(struct device *dev, | |
2317 | struct device_attribute *attr, | |
2318 | const char *buf, size_t count) | |
2319 | { | |
2320 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); | |
2321 | struct device *cfgdev = &cfg->dev->dev; | |
2322 | struct afu *afu = cfg->afu; | |
2323 | u32 weight; | |
2324 | int rc; | |
2325 | ||
2326 | rc = kstrtouint(buf, 10, &weight); | |
2327 | if (rc) | |
2328 | return -EINVAL; | |
2329 | ||
2330 | if (weight > 256) { | |
2331 | dev_info(cfgdev, | |
2332 | "Invalid IRQ poll weight. It must be 256 or less.\n"); | |
2333 | return -EINVAL; | |
2334 | } | |
2335 | ||
2336 | if (weight == afu->irqpoll_weight) { | |
2337 | dev_info(cfgdev, | |
2338 | "Current IRQ poll weight has the same weight.\n"); | |
2339 | return -EINVAL; | |
2340 | } | |
2341 | ||
2342 | if (afu_is_irqpoll_enabled(afu)) | |
2343 | irq_poll_disable(&afu->irqpoll); | |
2344 | ||
2345 | afu->irqpoll_weight = weight; | |
2346 | ||
2347 | if (weight > 0) | |
2348 | irq_poll_init(&afu->irqpoll, weight, cxlflash_irqpoll); | |
2349 | ||
2350 | return count; | |
2351 | } | |
2352 | ||
e0f01a21 MO |
2353 | /** |
2354 | * mode_show() - presents the current mode of the device | |
15305514 MO |
2355 | * @dev: Generic device associated with the device. |
2356 | * @attr: Device attribute representing the device mode. | |
2357 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. | |
2358 | * | |
2359 | * Return: The size of the ASCII string returned in @buf. | |
2360 | */ | |
e0f01a21 MO |
2361 | static ssize_t mode_show(struct device *dev, |
2362 | struct device_attribute *attr, char *buf) | |
15305514 MO |
2363 | { |
2364 | struct scsi_device *sdev = to_scsi_device(dev); | |
2365 | ||
e0f01a21 MO |
2366 | return scnprintf(buf, PAGE_SIZE, "%s\n", |
2367 | sdev->hostdata ? "superpipe" : "legacy"); | |
15305514 MO |
2368 | } |
2369 | ||
2370 | /* | |
2371 | * Host attributes | |
2372 | */ | |
e0f01a21 MO |
2373 | static DEVICE_ATTR_RO(port0); |
2374 | static DEVICE_ATTR_RO(port1); | |
2375 | static DEVICE_ATTR_RW(lun_mode); | |
2376 | static DEVICE_ATTR_RO(ioctl_version); | |
2377 | static DEVICE_ATTR_RO(port0_lun_table); | |
2378 | static DEVICE_ATTR_RO(port1_lun_table); | |
cba06e6d | 2379 | static DEVICE_ATTR_RW(irqpoll_weight); |
15305514 MO |
2380 | |
2381 | static struct device_attribute *cxlflash_host_attrs[] = { | |
2382 | &dev_attr_port0, | |
2383 | &dev_attr_port1, | |
2384 | &dev_attr_lun_mode, | |
2385 | &dev_attr_ioctl_version, | |
e0f01a21 MO |
2386 | &dev_attr_port0_lun_table, |
2387 | &dev_attr_port1_lun_table, | |
cba06e6d | 2388 | &dev_attr_irqpoll_weight, |
15305514 MO |
2389 | NULL |
2390 | }; | |
2391 | ||
2392 | /* | |
2393 | * Device attributes | |
2394 | */ | |
e0f01a21 | 2395 | static DEVICE_ATTR_RO(mode); |
15305514 MO |
2396 | |
2397 | static struct device_attribute *cxlflash_dev_attrs[] = { | |
2398 | &dev_attr_mode, | |
2399 | NULL | |
2400 | }; | |
2401 | ||
2402 | /* | |
2403 | * Host template | |
2404 | */ | |
2405 | static struct scsi_host_template driver_template = { | |
2406 | .module = THIS_MODULE, | |
2407 | .name = CXLFLASH_ADAPTER_NAME, | |
2408 | .info = cxlflash_driver_info, | |
2409 | .ioctl = cxlflash_ioctl, | |
2410 | .proc_name = CXLFLASH_NAME, | |
2411 | .queuecommand = cxlflash_queuecommand, | |
2412 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, | |
2413 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, | |
2414 | .change_queue_depth = cxlflash_change_queue_depth, | |
83430833 | 2415 | .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, |
15305514 | 2416 | .can_queue = CXLFLASH_MAX_CMDS, |
5fbb96c8 | 2417 | .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, |
15305514 | 2418 | .this_id = -1, |
68ab2d76 | 2419 | .sg_tablesize = 1, /* No scatter gather support */ |
15305514 MO |
2420 | .max_sectors = CXLFLASH_MAX_SECTORS, |
2421 | .use_clustering = ENABLE_CLUSTERING, | |
2422 | .shost_attrs = cxlflash_host_attrs, | |
2423 | .sdev_attrs = cxlflash_dev_attrs, | |
2424 | }; | |
2425 | ||
2426 | /* | |
2427 | * Device dependent values | |
2428 | */ | |
96e1b660 UK |
2429 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, |
2430 | 0ULL }; | |
2431 | static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, | |
704c4b0d | 2432 | CXLFLASH_NOTIFY_SHUTDOWN }; |
94344520 MO |
2433 | static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, |
2434 | CXLFLASH_NOTIFY_SHUTDOWN }; | |
15305514 MO |
2435 | |
2436 | /* | |
2437 | * PCI device binding table | |
2438 | */ | |
2439 | static struct pci_device_id cxlflash_pci_table[] = { | |
2440 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, | |
2441 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, | |
a2746fb1 MK |
2442 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, |
2443 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, | |
94344520 MO |
2444 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, |
2445 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, | |
15305514 MO |
2446 | {} |
2447 | }; | |
2448 | ||
2449 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); | |
2450 | ||
c21e0bbf MO |
2451 | /** |
2452 | * cxlflash_worker_thread() - work thread handler for the AFU | |
2453 | * @work: Work structure contained within cxlflash associated with host. | |
2454 | * | |
2455 | * Handles the following events: | |
2456 | * - Link reset which cannot be performed on interrupt context due to | |
2457 | * blocking up to a few seconds | |
ef51074a | 2458 | * - Rescan the host |
c21e0bbf MO |
2459 | */ |
2460 | static void cxlflash_worker_thread(struct work_struct *work) | |
2461 | { | |
5cdac81a MO |
2462 | struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, |
2463 | work_q); | |
c21e0bbf | 2464 | struct afu *afu = cfg->afu; |
4392ba49 | 2465 | struct device *dev = &cfg->dev->dev; |
c21e0bbf MO |
2466 | int port; |
2467 | ulong lock_flags; | |
2468 | ||
5cdac81a MO |
2469 | /* Avoid MMIO if the device has failed */ |
2470 | ||
2471 | if (cfg->state != STATE_NORMAL) | |
2472 | return; | |
2473 | ||
c21e0bbf MO |
2474 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
2475 | ||
2476 | if (cfg->lr_state == LINK_RESET_REQUIRED) { | |
2477 | port = cfg->lr_port; | |
2478 | if (port < 0) | |
4392ba49 MO |
2479 | dev_err(dev, "%s: invalid port index %d\n", |
2480 | __func__, port); | |
c21e0bbf MO |
2481 | else { |
2482 | spin_unlock_irqrestore(cfg->host->host_lock, | |
2483 | lock_flags); | |
2484 | ||
2485 | /* The reset can block... */ | |
2486 | afu_link_reset(afu, port, | |
f15fbf8d | 2487 | &afu->afu_map->global.fc_regs[port][0]); |
c21e0bbf MO |
2488 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
2489 | } | |
2490 | ||
2491 | cfg->lr_state = LINK_RESET_COMPLETE; | |
2492 | } | |
2493 | ||
c21e0bbf | 2494 | spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); |
ef51074a MO |
2495 | |
2496 | if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) | |
2497 | scsi_scan_host(cfg->host); | |
c21e0bbf MO |
2498 | } |
2499 | ||
2500 | /** | |
2501 | * cxlflash_probe() - PCI entry point to add host | |
2502 | * @pdev: PCI device associated with the host. | |
2503 | * @dev_id: PCI device id associated with device. | |
2504 | * | |
1284fb0c | 2505 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
2506 | */ |
2507 | static int cxlflash_probe(struct pci_dev *pdev, | |
2508 | const struct pci_device_id *dev_id) | |
2509 | { | |
2510 | struct Scsi_Host *host; | |
2511 | struct cxlflash_cfg *cfg = NULL; | |
fb67d44d | 2512 | struct device *dev = &pdev->dev; |
c21e0bbf MO |
2513 | struct dev_dependent_vals *ddv; |
2514 | int rc = 0; | |
78ae028e | 2515 | int k; |
c21e0bbf MO |
2516 | |
2517 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", | |
2518 | __func__, pdev->irq); | |
2519 | ||
2520 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; | |
2521 | driver_template.max_sectors = ddv->max_sectors; | |
2522 | ||
2523 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); | |
2524 | if (!host) { | |
fb67d44d | 2525 | dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); |
c21e0bbf MO |
2526 | rc = -ENOMEM; |
2527 | goto out; | |
2528 | } | |
2529 | ||
2530 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; | |
2531 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; | |
2532 | host->max_channel = NUM_FC_PORTS - 1; | |
2533 | host->unique_id = host->host_no; | |
2534 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; | |
2535 | ||
fb67d44d | 2536 | cfg = shost_priv(host); |
c21e0bbf MO |
2537 | cfg->host = host; |
2538 | rc = alloc_mem(cfg); | |
2539 | if (rc) { | |
fb67d44d | 2540 | dev_err(dev, "%s: alloc_mem failed\n", __func__); |
c21e0bbf | 2541 | rc = -ENOMEM; |
8b5b1e87 | 2542 | scsi_host_put(cfg->host); |
c21e0bbf MO |
2543 | goto out; |
2544 | } | |
2545 | ||
2546 | cfg->init_state = INIT_STATE_NONE; | |
2547 | cfg->dev = pdev; | |
78ae028e | 2548 | cfg->num_fc_ports = NUM_FC_PORTS; |
17ead26f | 2549 | cfg->cxl_fops = cxlflash_cxl_fops; |
2cb79266 MO |
2550 | |
2551 | /* | |
78ae028e MO |
2552 | * Promoted LUNs move to the top of the LUN table. The rest stay on |
2553 | * the bottom half. The bottom half grows from the end (index = 255), | |
2554 | * whereas the top half grows from the beginning (index = 0). | |
2555 | * | |
2556 | * Initialize the last LUN index for all possible ports. | |
2cb79266 | 2557 | */ |
78ae028e MO |
2558 | cfg->promote_lun_index = 0; |
2559 | ||
2560 | for (k = 0; k < MAX_FC_PORTS; k++) | |
2561 | cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; | |
2cb79266 | 2562 | |
c21e0bbf | 2563 | cfg->dev_id = (struct pci_device_id *)dev_id; |
c21e0bbf MO |
2564 | |
2565 | init_waitqueue_head(&cfg->tmf_waitq); | |
439e85c1 | 2566 | init_waitqueue_head(&cfg->reset_waitq); |
c21e0bbf MO |
2567 | |
2568 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); | |
2569 | cfg->lr_state = LINK_RESET_INVALID; | |
2570 | cfg->lr_port = -1; | |
0d73122c | 2571 | spin_lock_init(&cfg->tmf_slock); |
65be2c79 MO |
2572 | mutex_init(&cfg->ctx_tbl_list_mutex); |
2573 | mutex_init(&cfg->ctx_recovery_mutex); | |
0a27ae51 | 2574 | init_rwsem(&cfg->ioctl_rwsem); |
65be2c79 MO |
2575 | INIT_LIST_HEAD(&cfg->ctx_err_recovery); |
2576 | INIT_LIST_HEAD(&cfg->lluns); | |
c21e0bbf MO |
2577 | |
2578 | pci_set_drvdata(pdev, cfg); | |
2579 | ||
c21e0bbf MO |
2580 | cfg->cxl_afu = cxl_pci_to_afu(pdev); |
2581 | ||
2582 | rc = init_pci(cfg); | |
2583 | if (rc) { | |
fb67d44d | 2584 | dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
2585 | goto out_remove; |
2586 | } | |
2587 | cfg->init_state = INIT_STATE_PCI; | |
2588 | ||
2589 | rc = init_afu(cfg); | |
2590 | if (rc) { | |
fb67d44d | 2591 | dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
2592 | goto out_remove; |
2593 | } | |
2594 | cfg->init_state = INIT_STATE_AFU; | |
2595 | ||
c21e0bbf MO |
2596 | rc = init_scsi(cfg); |
2597 | if (rc) { | |
fb67d44d | 2598 | dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); |
c21e0bbf MO |
2599 | goto out_remove; |
2600 | } | |
2601 | cfg->init_state = INIT_STATE_SCSI; | |
2602 | ||
2603 | out: | |
fb67d44d | 2604 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); |
c21e0bbf MO |
2605 | return rc; |
2606 | ||
2607 | out_remove: | |
2608 | cxlflash_remove(pdev); | |
2609 | goto out; | |
2610 | } | |
2611 | ||
5cdac81a MO |
2612 | /** |
2613 | * cxlflash_pci_error_detected() - called when a PCI error is detected | |
2614 | * @pdev: PCI device struct. | |
2615 | * @state: PCI channel state. | |
2616 | * | |
1d3324c3 MO |
2617 | * When an EEH occurs during an active reset, wait until the reset is |
2618 | * complete and then take action based upon the device state. | |
2619 | * | |
5cdac81a MO |
2620 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT |
2621 | */ | |
2622 | static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, | |
2623 | pci_channel_state_t state) | |
2624 | { | |
65be2c79 | 2625 | int rc = 0; |
5cdac81a MO |
2626 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
2627 | struct device *dev = &cfg->dev->dev; | |
2628 | ||
2629 | dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); | |
2630 | ||
2631 | switch (state) { | |
2632 | case pci_channel_io_frozen: | |
1d3324c3 MO |
2633 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
2634 | if (cfg->state == STATE_FAILTERM) | |
2635 | return PCI_ERS_RESULT_DISCONNECT; | |
2636 | ||
439e85c1 | 2637 | cfg->state = STATE_RESET; |
5cdac81a | 2638 | scsi_block_requests(cfg->host); |
0a27ae51 | 2639 | drain_ioctls(cfg); |
65be2c79 MO |
2640 | rc = cxlflash_mark_contexts_error(cfg); |
2641 | if (unlikely(rc)) | |
fb67d44d | 2642 | dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", |
65be2c79 | 2643 | __func__, rc); |
9526f360 | 2644 | term_afu(cfg); |
5cdac81a MO |
2645 | return PCI_ERS_RESULT_NEED_RESET; |
2646 | case pci_channel_io_perm_failure: | |
2647 | cfg->state = STATE_FAILTERM; | |
439e85c1 | 2648 | wake_up_all(&cfg->reset_waitq); |
5cdac81a MO |
2649 | scsi_unblock_requests(cfg->host); |
2650 | return PCI_ERS_RESULT_DISCONNECT; | |
2651 | default: | |
2652 | break; | |
2653 | } | |
2654 | return PCI_ERS_RESULT_NEED_RESET; | |
2655 | } | |
2656 | ||
2657 | /** | |
2658 | * cxlflash_pci_slot_reset() - called when PCI slot has been reset | |
2659 | * @pdev: PCI device struct. | |
2660 | * | |
2661 | * This routine is called by the pci error recovery code after the PCI | |
2662 | * slot has been reset, just before we should resume normal operations. | |
2663 | * | |
2664 | * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT | |
2665 | */ | |
2666 | static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) | |
2667 | { | |
2668 | int rc = 0; | |
2669 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
2670 | struct device *dev = &cfg->dev->dev; | |
2671 | ||
2672 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); | |
2673 | ||
2674 | rc = init_afu(cfg); | |
2675 | if (unlikely(rc)) { | |
fb67d44d | 2676 | dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); |
5cdac81a MO |
2677 | return PCI_ERS_RESULT_DISCONNECT; |
2678 | } | |
2679 | ||
2680 | return PCI_ERS_RESULT_RECOVERED; | |
2681 | } | |
2682 | ||
2683 | /** | |
2684 | * cxlflash_pci_resume() - called when normal operation can resume | |
2685 | * @pdev: PCI device struct | |
2686 | */ | |
2687 | static void cxlflash_pci_resume(struct pci_dev *pdev) | |
2688 | { | |
2689 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | |
2690 | struct device *dev = &cfg->dev->dev; | |
2691 | ||
2692 | dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); | |
2693 | ||
2694 | cfg->state = STATE_NORMAL; | |
439e85c1 | 2695 | wake_up_all(&cfg->reset_waitq); |
5cdac81a MO |
2696 | scsi_unblock_requests(cfg->host); |
2697 | } | |
2698 | ||
2699 | static const struct pci_error_handlers cxlflash_err_handler = { | |
2700 | .error_detected = cxlflash_pci_error_detected, | |
2701 | .slot_reset = cxlflash_pci_slot_reset, | |
2702 | .resume = cxlflash_pci_resume, | |
2703 | }; | |
2704 | ||
c21e0bbf MO |
2705 | /* |
2706 | * PCI device structure | |
2707 | */ | |
2708 | static struct pci_driver cxlflash_driver = { | |
2709 | .name = CXLFLASH_NAME, | |
2710 | .id_table = cxlflash_pci_table, | |
2711 | .probe = cxlflash_probe, | |
2712 | .remove = cxlflash_remove, | |
babf985d | 2713 | .shutdown = cxlflash_remove, |
5cdac81a | 2714 | .err_handler = &cxlflash_err_handler, |
c21e0bbf MO |
2715 | }; |
2716 | ||
2717 | /** | |
2718 | * init_cxlflash() - module entry point | |
2719 | * | |
1284fb0c | 2720 | * Return: 0 on success, -errno on failure |
c21e0bbf MO |
2721 | */ |
2722 | static int __init init_cxlflash(void) | |
2723 | { | |
65be2c79 MO |
2724 | cxlflash_list_init(); |
2725 | ||
c21e0bbf MO |
2726 | return pci_register_driver(&cxlflash_driver); |
2727 | } | |
2728 | ||
2729 | /** | |
2730 | * exit_cxlflash() - module exit point | |
2731 | */ | |
2732 | static void __exit exit_cxlflash(void) | |
2733 | { | |
65be2c79 MO |
2734 | cxlflash_term_global_luns(); |
2735 | cxlflash_free_errpage(); | |
2736 | ||
c21e0bbf MO |
2737 | pci_unregister_driver(&cxlflash_driver); |
2738 | } | |
2739 | ||
2740 | module_init(init_cxlflash); | |
2741 | module_exit(exit_cxlflash); |