]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/qla2xxx/qla_isr.c
[SCSI] qla2xxx: Add FW DUMP SIZE sysfs attribute.
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
07e264b7 3 * Copyright (c) 2003-2011 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4 9
05236a05 10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
df7baa50 12#include <scsi/scsi_tcq.h>
9a069e19 13#include <scsi/scsi_bsg_fc.h>
bad75002 14#include <scsi/scsi_eh.h>
df7baa50 15
1da177e4 16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
17static void qla2x00_process_completed_request(struct scsi_qla_host *,
18 struct req_que *, uint32_t);
19static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 20static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
21static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
22 sts_entry_t *);
9a853f71 23
1da177e4
LT
24/**
25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
26 * @irq:
27 * @dev_id: SCSI driver HA context
1da177e4
LT
28 *
29 * Called by system whenever the host adapter generates an interrupt.
30 *
31 * Returns handled flag.
32 */
33irqreturn_t
7d12e780 34qla2100_intr_handler(int irq, void *dev_id)
1da177e4 35{
e315cd28
AC
36 scsi_qla_host_t *vha;
37 struct qla_hw_data *ha;
3d71644c 38 struct device_reg_2xxx __iomem *reg;
1da177e4 39 int status;
1da177e4 40 unsigned long iter;
14e660e6 41 uint16_t hccr;
9a853f71 42 uint16_t mb[4];
e315cd28 43 struct rsp_que *rsp;
43fac4d9 44 unsigned long flags;
1da177e4 45
e315cd28
AC
46 rsp = (struct rsp_que *) dev_id;
47 if (!rsp) {
3256b435
CD
48 ql_log(ql_log_info, NULL, 0x505d,
49 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
50 return (IRQ_NONE);
51 }
52
e315cd28 53 ha = rsp->hw;
3d71644c 54 reg = &ha->iobase->isp;
1da177e4
LT
55 status = 0;
56
43fac4d9 57 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 58 vha = pci_get_drvdata(ha->pdev);
1da177e4 59 for (iter = 50; iter--; ) {
14e660e6
SJ
60 hccr = RD_REG_WORD(&reg->hccr);
61 if (hccr & HCCR_RISC_PAUSE) {
62 if (pci_channel_offline(ha->pdev))
63 break;
64
65 /*
66 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 67 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
68 * out of the RISC PAUSED state.
69 */
70 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
71 RD_REG_WORD(&reg->hccr);
72
e315cd28
AC
73 ha->isp_ops->fw_dump(vha, 1);
74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
75 break;
76 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
77 break;
78
79 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
80 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
81 RD_REG_WORD(&reg->hccr);
82
83 /* Get mailbox data. */
9a853f71
AV
84 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
85 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 86 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 87 status |= MBX_INTERRUPT;
9a853f71
AV
88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
89 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
90 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
91 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 92 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
93 } else {
94 /*EMPTY*/
7c3df132
SK
95 ql_dbg(ql_dbg_async, vha, 0x5025,
96 "Unrecognized interrupt type (%d).\n",
97 mb[0]);
1da177e4
LT
98 }
99 /* Release mailbox registers. */
100 WRT_REG_WORD(&reg->semaphore, 0);
101 RD_REG_WORD(&reg->semaphore);
102 } else {
73208dfd 103 qla2x00_process_response_queue(rsp);
1da177e4
LT
104
105 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
106 RD_REG_WORD(&reg->hccr);
107 }
108 }
43fac4d9 109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 110
1da177e4
LT
111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 114 complete(&ha->mbx_intr_comp);
1da177e4
LT
115 }
116
1da177e4
LT
117 return (IRQ_HANDLED);
118}
119
120/**
121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
122 * @irq:
123 * @dev_id: SCSI driver HA context
1da177e4
LT
124 *
125 * Called by system whenever the host adapter generates an interrupt.
126 *
127 * Returns handled flag.
128 */
129irqreturn_t
7d12e780 130qla2300_intr_handler(int irq, void *dev_id)
1da177e4 131{
e315cd28 132 scsi_qla_host_t *vha;
3d71644c 133 struct device_reg_2xxx __iomem *reg;
1da177e4 134 int status;
1da177e4
LT
135 unsigned long iter;
136 uint32_t stat;
1da177e4 137 uint16_t hccr;
9a853f71 138 uint16_t mb[4];
e315cd28
AC
139 struct rsp_que *rsp;
140 struct qla_hw_data *ha;
43fac4d9 141 unsigned long flags;
1da177e4 142
e315cd28
AC
143 rsp = (struct rsp_que *) dev_id;
144 if (!rsp) {
3256b435
CD
145 ql_log(ql_log_info, NULL, 0x5058,
146 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
147 return (IRQ_NONE);
148 }
149
e315cd28 150 ha = rsp->hw;
3d71644c 151 reg = &ha->iobase->isp;
1da177e4
LT
152 status = 0;
153
43fac4d9 154 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 155 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
156 for (iter = 50; iter--; ) {
157 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
158 if (stat & HSR_RISC_PAUSED) {
85880801 159 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
160 break;
161
1da177e4
LT
162 hccr = RD_REG_WORD(&reg->hccr);
163 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
164 ql_log(ql_log_warn, vha, 0x5026,
165 "Parity error -- HCCR=%x, Dumping "
166 "firmware.\n", hccr);
1da177e4 167 else
7c3df132
SK
168 ql_log(ql_log_warn, vha, 0x5027,
169 "RISC paused -- HCCR=%x, Dumping "
170 "firmware.\n", hccr);
1da177e4
LT
171
172 /*
173 * Issue a "HARD" reset in order for the RISC
174 * interrupt bit to be cleared. Schedule a big
a06a0f8e 175 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
176 */
177 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
178 RD_REG_WORD(&reg->hccr);
07f31805 179
e315cd28
AC
180 ha->isp_ops->fw_dump(vha, 1);
181 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
182 break;
183 } else if ((stat & HSR_RISC_INT) == 0)
184 break;
185
1da177e4 186 switch (stat & 0xff) {
1da177e4
LT
187 case 0x1:
188 case 0x2:
189 case 0x10:
190 case 0x11:
e315cd28 191 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
192 status |= MBX_INTERRUPT;
193
194 /* Release mailbox registers. */
195 WRT_REG_WORD(&reg->semaphore, 0);
196 break;
197 case 0x12:
9a853f71
AV
198 mb[0] = MSW(stat);
199 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
200 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
201 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 202 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
203 break;
204 case 0x13:
73208dfd 205 qla2x00_process_response_queue(rsp);
1da177e4
LT
206 break;
207 case 0x15:
9a853f71
AV
208 mb[0] = MBA_CMPLT_1_16BIT;
209 mb[1] = MSW(stat);
73208dfd 210 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
211 break;
212 case 0x16:
9a853f71
AV
213 mb[0] = MBA_SCSI_COMPLETION;
214 mb[1] = MSW(stat);
215 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 216 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
217 break;
218 default:
7c3df132
SK
219 ql_dbg(ql_dbg_async, vha, 0x5028,
220 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
221 break;
222 }
223 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
224 RD_REG_WORD_RELAXED(&reg->hccr);
225 }
43fac4d9 226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 227
1da177e4
LT
228 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
229 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 230 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 231 complete(&ha->mbx_intr_comp);
1da177e4
LT
232 }
233
1da177e4
LT
234 return (IRQ_HANDLED);
235}
236
237/**
238 * qla2x00_mbx_completion() - Process mailbox command completions.
239 * @ha: SCSI driver HA context
240 * @mb0: Mailbox0 register
241 */
242static void
e315cd28 243qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
244{
245 uint16_t cnt;
4fa94f83 246 uint32_t mboxes;
1da177e4 247 uint16_t __iomem *wptr;
e315cd28 248 struct qla_hw_data *ha = vha->hw;
3d71644c 249 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 250
4fa94f83
AV
251 /* Read all mbox registers? */
252 mboxes = (1 << ha->mbx_count) - 1;
253 if (!ha->mcp)
254 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
255 else
256 mboxes = ha->mcp->in_mb;
257
1da177e4
LT
258 /* Load return mailbox registers. */
259 ha->flags.mbox_int = 1;
260 ha->mailbox_out[0] = mb0;
4fa94f83 261 mboxes >>= 1;
1da177e4
LT
262 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
263
264 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 265 if (IS_QLA2200(ha) && cnt == 8)
1da177e4 266 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
4fa94f83 267 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
1da177e4 268 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
4fa94f83 269 else if (mboxes & BIT_0)
1da177e4 270 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 271
1da177e4 272 wptr++;
4fa94f83 273 mboxes >>= 1;
1da177e4 274 }
1da177e4
LT
275}
276
8a659571
AV
277static void
278qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
279{
280 static char *event[] =
281 { "Complete", "Request Notification", "Time Extension" };
282 int rval;
283 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
284 uint16_t __iomem *wptr;
285 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
286
287 /* Seed data -- mailbox1 -> mailbox7. */
288 wptr = (uint16_t __iomem *)&reg24->mailbox1;
289 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
290 mb[cnt] = RD_REG_WORD(wptr);
291
7c3df132 292 ql_dbg(ql_dbg_async, vha, 0x5021,
6246b8a1 293 "Inter-Driver Communication %s -- "
7c3df132
SK
294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
296 mb[4], mb[5], mb[6]);
8a659571
AV
297
298 /* Acknowledgement needed? [Notify && non-zero timeout]. */
299 timeout = (descr >> 8) & 0xf;
300 if (aen != MBA_IDC_NOTIFY || !timeout)
301 return;
302
7c3df132 303 ql_dbg(ql_dbg_async, vha, 0x5022,
d8424f68 304 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
7c3df132 305 vha->host_no, event[aen & 0xff], timeout);
8a659571
AV
306
307 rval = qla2x00_post_idc_ack_work(vha, mb);
308 if (rval != QLA_SUCCESS)
7c3df132 309 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
310 "IDC failed to post ACK.\n");
311}
312
daae62a3
CD
313#define LS_UNKNOWN 2
314char *
315qla2x00_get_link_speed_str(struct qla_hw_data *ha)
316{
317 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
318 char *link_speed;
319 int fw_speed = ha->link_data_rate;
320
321 if (IS_QLA2100(ha) || IS_QLA2200(ha))
322 link_speed = link_speeds[0];
323 else if (fw_speed == 0x13)
324 link_speed = link_speeds[6];
325 else {
326 link_speed = link_speeds[LS_UNKNOWN];
327 if (fw_speed < 6)
328 link_speed =
329 link_speeds[fw_speed];
330 }
331
332 return link_speed;
333}
334
1da177e4
LT
335/**
336 * qla2x00_async_event() - Process aynchronous events.
337 * @ha: SCSI driver HA context
9a853f71 338 * @mb: Mailbox registers (0 - 3)
1da177e4 339 */
2c3dfe3f 340void
73208dfd 341qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 342{
1da177e4 343 uint16_t handle_cnt;
bdab23da 344 uint16_t cnt, mbx;
1da177e4 345 uint32_t handles[5];
e315cd28 346 struct qla_hw_data *ha = vha->hw;
3d71644c 347 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 348 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 349 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4 350 uint32_t rscn_entry, host_pid;
4d4df193 351 unsigned long flags;
1da177e4
LT
352
353 /* Setup to process RIO completion. */
354 handle_cnt = 0;
6246b8a1 355 if (IS_CNA_CAPABLE(ha))
3a03eb79 356 goto skip_rio;
1da177e4
LT
357 switch (mb[0]) {
358 case MBA_SCSI_COMPLETION:
9a853f71 359 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
360 handle_cnt = 1;
361 break;
362 case MBA_CMPLT_1_16BIT:
9a853f71 363 handles[0] = mb[1];
1da177e4
LT
364 handle_cnt = 1;
365 mb[0] = MBA_SCSI_COMPLETION;
366 break;
367 case MBA_CMPLT_2_16BIT:
9a853f71
AV
368 handles[0] = mb[1];
369 handles[1] = mb[2];
1da177e4
LT
370 handle_cnt = 2;
371 mb[0] = MBA_SCSI_COMPLETION;
372 break;
373 case MBA_CMPLT_3_16BIT:
9a853f71
AV
374 handles[0] = mb[1];
375 handles[1] = mb[2];
376 handles[2] = mb[3];
1da177e4
LT
377 handle_cnt = 3;
378 mb[0] = MBA_SCSI_COMPLETION;
379 break;
380 case MBA_CMPLT_4_16BIT:
9a853f71
AV
381 handles[0] = mb[1];
382 handles[1] = mb[2];
383 handles[2] = mb[3];
1da177e4
LT
384 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
385 handle_cnt = 4;
386 mb[0] = MBA_SCSI_COMPLETION;
387 break;
388 case MBA_CMPLT_5_16BIT:
9a853f71
AV
389 handles[0] = mb[1];
390 handles[1] = mb[2];
391 handles[2] = mb[3];
1da177e4
LT
392 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
393 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
394 handle_cnt = 5;
395 mb[0] = MBA_SCSI_COMPLETION;
396 break;
397 case MBA_CMPLT_2_32BIT:
9a853f71 398 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
399 handles[1] = le32_to_cpu(
400 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
401 RD_MAILBOX_REG(ha, reg, 6));
402 handle_cnt = 2;
403 mb[0] = MBA_SCSI_COMPLETION;
404 break;
405 default:
406 break;
407 }
3a03eb79 408skip_rio:
1da177e4
LT
409 switch (mb[0]) {
410 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 411 if (!vha->flags.online)
1da177e4
LT
412 break;
413
414 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
415 qla2x00_process_completed_request(vha, rsp->req,
416 handles[cnt]);
1da177e4
LT
417 break;
418
419 case MBA_RESET: /* Reset */
7c3df132
SK
420 ql_dbg(ql_dbg_async, vha, 0x5002,
421 "Asynchronous RESET.\n");
1da177e4 422
e315cd28 423 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
424 break;
425
426 case MBA_SYSTEM_ERR: /* System Error */
6246b8a1
GM
427 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
428 RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 429 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
430 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
431 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 432
e315cd28 433 ha->isp_ops->fw_dump(vha, 1);
1da177e4 434
e428924c 435 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 436 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 437 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
438 "Unrecoverable Hardware Error: adapter "
439 "marked OFFLINE!\n");
e315cd28 440 vha->flags.online = 0;
6246b8a1 441 vha->device_flags |= DFLG_DEV_FAILED;
b1d46989 442 } else {
25985edc 443 /* Check to see if MPI timeout occurred */
b1d46989
MI
444 if ((mbx & MBX_3) && (ha->flags.port0))
445 set_bit(MPI_RESET_NEEDED,
446 &vha->dpc_flags);
447
e315cd28 448 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 449 }
9a853f71 450 } else if (mb[1] == 0) {
7c3df132 451 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
452 "Unrecoverable Hardware Error: adapter marked "
453 "OFFLINE!\n");
e315cd28 454 vha->flags.online = 0;
6246b8a1 455 vha->device_flags |= DFLG_DEV_FAILED;
1da177e4 456 } else
e315cd28 457 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
458 break;
459
460 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
461 ql_log(ql_log_warn, vha, 0x5006,
462 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 463
e315cd28 464 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
465 break;
466
467 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
468 ql_log(ql_log_warn, vha, 0x5007,
469 "ISP Response Transfer Error.\n");
1da177e4 470
e315cd28 471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
472 break;
473
474 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
475 ql_dbg(ql_dbg_async, vha, 0x5008,
476 "Asynchronous WAKEUP_THRES.\n");
1da177e4 477
2d70c103 478 break;
1da177e4 479 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
cfb0919c 480 ql_dbg(ql_dbg_async, vha, 0x5009,
7c3df132 481 "LIP occurred (%x).\n", mb[1]);
1da177e4 482
e315cd28
AC
483 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
484 atomic_set(&vha->loop_state, LOOP_DOWN);
485 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
486 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
487 }
488
e315cd28
AC
489 if (vha->vp_idx) {
490 atomic_set(&vha->vp_state, VP_FAILED);
491 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
492 }
493
e315cd28
AC
494 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
495 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 496
e315cd28
AC
497 vha->flags.management_server_logged_in = 0;
498 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
499 break;
500
501 case MBA_LOOP_UP: /* Loop Up Event */
daae62a3 502 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d8b45213 503 ha->link_data_rate = PORT_SPEED_1GB;
daae62a3 504 else
1da177e4 505 ha->link_data_rate = mb[1];
1da177e4 506
cfb0919c 507 ql_dbg(ql_dbg_async, vha, 0x500a,
daae62a3
CD
508 "LOOP UP detected (%s Gbps).\n",
509 qla2x00_get_link_speed_str(ha));
1da177e4 510
e315cd28
AC
511 vha->flags.management_server_logged_in = 0;
512 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
513 break;
514
515 case MBA_LOOP_DOWN: /* Loop Down Event */
6246b8a1
GM
516 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
517 ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 518 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
cfb0919c 519 ql_dbg(ql_dbg_async, vha, 0x500b,
7c3df132
SK
520 "LOOP DOWN detected (%x %x %x %x).\n",
521 mb[1], mb[2], mb[3], mbx);
1da177e4 522
e315cd28
AC
523 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
524 atomic_set(&vha->loop_state, LOOP_DOWN);
525 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
526 vha->device_flags |= DFLG_NO_CABLE;
527 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
528 }
529
e315cd28
AC
530 if (vha->vp_idx) {
531 atomic_set(&vha->vp_state, VP_FAILED);
532 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
533 }
534
e315cd28 535 vha->flags.management_server_logged_in = 0;
d8b45213 536 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 537 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
538 break;
539
540 case MBA_LIP_RESET: /* LIP reset occurred */
cfb0919c 541 ql_dbg(ql_dbg_async, vha, 0x500c,
cc3ef7bc 542 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 543
e315cd28
AC
544 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
545 atomic_set(&vha->loop_state, LOOP_DOWN);
546 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
547 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
548 }
549
e315cd28
AC
550 if (vha->vp_idx) {
551 atomic_set(&vha->vp_state, VP_FAILED);
552 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
553 }
554
e315cd28 555 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
556
557 ha->operating_mode = LOOP;
e315cd28
AC
558 vha->flags.management_server_logged_in = 0;
559 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
560 break;
561
3a03eb79 562 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
563 case MBA_POINT_TO_POINT: /* Point-to-Point */
564 if (IS_QLA2100(ha))
565 break;
566
6246b8a1 567 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
7c3df132
SK
568 ql_dbg(ql_dbg_async, vha, 0x500d,
569 "DCBX Completed -- %04x %04x %04x.\n",
570 mb[1], mb[2], mb[3]);
23f2ebd1
SR
571 if (ha->notify_dcbx_comp)
572 complete(&ha->dcbx_comp);
573
574 } else
7c3df132
SK
575 ql_dbg(ql_dbg_async, vha, 0x500e,
576 "Asynchronous P2P MODE received.\n");
1da177e4
LT
577
578 /*
579 * Until there's a transition from loop down to loop up, treat
580 * this as loop down only.
581 */
e315cd28
AC
582 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
583 atomic_set(&vha->loop_state, LOOP_DOWN);
584 if (!atomic_read(&vha->loop_down_timer))
585 atomic_set(&vha->loop_down_timer,
1da177e4 586 LOOP_DOWN_TIME);
e315cd28 587 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
588 }
589
e315cd28
AC
590 if (vha->vp_idx) {
591 atomic_set(&vha->vp_state, VP_FAILED);
592 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
593 }
594
e315cd28
AC
595 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
596 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
597
598 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
599 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
600
601 ha->flags.gpsc_supported = 1;
e315cd28 602 vha->flags.management_server_logged_in = 0;
1da177e4
LT
603 break;
604
605 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
606 if (IS_QLA2100(ha))
607 break;
608
cfb0919c 609 ql_dbg(ql_dbg_async, vha, 0x500f,
1da177e4
LT
610 "Configuration change detected: value=%x.\n", mb[1]);
611
e315cd28
AC
612 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
613 atomic_set(&vha->loop_state, LOOP_DOWN);
614 if (!atomic_read(&vha->loop_down_timer))
615 atomic_set(&vha->loop_down_timer,
1da177e4 616 LOOP_DOWN_TIME);
e315cd28 617 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
618 }
619
e315cd28
AC
620 if (vha->vp_idx) {
621 atomic_set(&vha->vp_state, VP_FAILED);
622 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
623 }
624
e315cd28
AC
625 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
626 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
627 break;
628
629 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
630 /*
631 * Handle only global and vn-port update events
632 *
633 * Relevant inputs:
634 * mb[1] = N_Port handle of changed port
635 * OR 0xffff for global event
636 * mb[2] = New login state
637 * 7 = Port logged out
638 * mb[3] = LSB is vp_idx, 0xff = all vps
639 *
640 * Skip processing if:
641 * Event is global, vp_idx is NOT all vps,
642 * vp_idx does not match
643 * Event is not global, vp_idx does not match
644 */
12cec63e
AV
645 if (IS_QLA2XXX_MIDTYPE(ha) &&
646 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
647 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
648 break;
73208dfd 649
9764ff88
AV
650 /* Global event -- port logout or port unavailable. */
651 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
652 ql_dbg(ql_dbg_async, vha, 0x5010,
653 "Port unavailable %04x %04x %04x.\n",
654 mb[1], mb[2], mb[3]);
daae62a3
CD
655 ql_log(ql_log_warn, vha, 0x505e,
656 "Link is offline.\n");
9764ff88
AV
657
658 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
659 atomic_set(&vha->loop_state, LOOP_DOWN);
660 atomic_set(&vha->loop_down_timer,
661 LOOP_DOWN_TIME);
662 vha->device_flags |= DFLG_NO_CABLE;
663 qla2x00_mark_all_devices_lost(vha, 1);
664 }
665
666 if (vha->vp_idx) {
667 atomic_set(&vha->vp_state, VP_FAILED);
668 fc_vport_set_state(vha->fc_vport,
669 FC_VPORT_FAILED);
faadc5e7 670 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
671 }
672
673 vha->flags.management_server_logged_in = 0;
674 ha->link_data_rate = PORT_SPEED_UNKNOWN;
675 break;
676 }
677
1da177e4 678 /*
cc3ef7bc 679 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
680 * event etc. earlier indicating loop is down) then process
681 * it. Otherwise ignore it and Wait for RSCN to come in.
682 */
e315cd28
AC
683 atomic_set(&vha->loop_down_timer, 0);
684 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
685 atomic_read(&vha->loop_state) != LOOP_DEAD) {
7c3df132
SK
686 ql_dbg(ql_dbg_async, vha, 0x5011,
687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
688 mb[1], mb[2], mb[3]);
2d70c103
NB
689
690 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
691 break;
692 }
693
7c3df132
SK
694 ql_dbg(ql_dbg_async, vha, 0x5012,
695 "Port database changed %04x %04x %04x.\n",
696 mb[1], mb[2], mb[3]);
daae62a3
CD
697 ql_log(ql_log_warn, vha, 0x505f,
698 "Link is operational (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha));
1da177e4
LT
700
701 /*
702 * Mark all devices as missing so we will login again.
703 */
e315cd28 704 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 705
e315cd28 706 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 707
2d70c103
NB
708 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
709 set_bit(SCR_PENDING, &vha->dpc_flags);
710
e315cd28
AC
711 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
712 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2d70c103
NB
713
714 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
715 break;
716
717 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 718 /* Check if the Vport has issued a SCR */
e315cd28 719 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
720 break;
721 /* Only handle SCNs for our Vport index. */
0d6e61bc 722 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 723 break;
0d6e61bc 724
7c3df132
SK
725 ql_dbg(ql_dbg_async, vha, 0x5013,
726 "RSCN database changed -- %04x %04x %04x.\n",
727 mb[1], mb[2], mb[3]);
1da177e4 728
59d72d87 729 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
730 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
731 | vha->d_id.b.al_pa;
1da177e4 732 if (rscn_entry == host_pid) {
7c3df132
SK
733 ql_dbg(ql_dbg_async, vha, 0x5014,
734 "Ignoring RSCN update to local host "
735 "port ID (%06x).\n", host_pid);
1da177e4
LT
736 break;
737 }
738
59d72d87
RA
739 /* Ignore reserved bits from RSCN-payload. */
740 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1da177e4 741
e315cd28
AC
742 atomic_set(&vha->loop_down_timer, 0);
743 vha->flags.management_server_logged_in = 0;
1da177e4 744
e315cd28
AC
745 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
746 set_bit(RSCN_UPDATE, &vha->dpc_flags);
747 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
748 break;
749
750 /* case MBA_RIO_RESPONSE: */
751 case MBA_ZIO_RESPONSE:
7c3df132
SK
752 ql_dbg(ql_dbg_async, vha, 0x5015,
753 "[R|Z]IO update completion.\n");
1da177e4 754
e428924c 755 if (IS_FWI2_CAPABLE(ha))
2afa19a9 756 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 757 else
73208dfd 758 qla2x00_process_response_queue(rsp);
1da177e4 759 break;
9a853f71
AV
760
761 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
762 ql_dbg(ql_dbg_async, vha, 0x5016,
763 "Discard RND Frame -- %04x %04x %04x.\n",
764 mb[1], mb[2], mb[3]);
9a853f71 765 break;
45ebeb56
AV
766
767 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
768 ql_dbg(ql_dbg_async, vha, 0x5017,
769 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 770 break;
4d4df193
HK
771
772 case MBA_ISP84XX_ALERT:
7c3df132
SK
773 ql_dbg(ql_dbg_async, vha, 0x5018,
774 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
775 mb[1], mb[2], mb[3]);
4d4df193
HK
776
777 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
778 switch (mb[1]) {
779 case A84_PANIC_RECOVERY:
7c3df132
SK
780 ql_log(ql_log_info, vha, 0x5019,
781 "Alert 84XX: panic recovery %04x %04x.\n",
782 mb[2], mb[3]);
4d4df193
HK
783 break;
784 case A84_OP_LOGIN_COMPLETE:
785 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
786 ql_log(ql_log_info, vha, 0x501a,
787 "Alert 84XX: firmware version %x.\n",
788 ha->cs84xx->op_fw_version);
4d4df193
HK
789 break;
790 case A84_DIAG_LOGIN_COMPLETE:
791 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
792 ql_log(ql_log_info, vha, 0x501b,
793 "Alert 84XX: diagnostic firmware version %x.\n",
794 ha->cs84xx->diag_fw_version);
4d4df193
HK
795 break;
796 case A84_GOLD_LOGIN_COMPLETE:
797 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
798 ha->cs84xx->fw_update = 1;
7c3df132
SK
799 ql_log(ql_log_info, vha, 0x501c,
800 "Alert 84XX: gold firmware version %x.\n",
801 ha->cs84xx->gold_fw_version);
4d4df193
HK
802 break;
803 default:
7c3df132
SK
804 ql_log(ql_log_warn, vha, 0x501d,
805 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
806 mb[1], mb[2], mb[3]);
807 }
808 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
809 break;
3a03eb79 810 case MBA_DCBX_START:
7c3df132
SK
811 ql_dbg(ql_dbg_async, vha, 0x501e,
812 "DCBX Started -- %04x %04x %04x.\n",
813 mb[1], mb[2], mb[3]);
3a03eb79
AV
814 break;
815 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
816 ql_dbg(ql_dbg_async, vha, 0x501f,
817 "DCBX Parameters Updated -- %04x %04x %04x.\n",
818 mb[1], mb[2], mb[3]);
3a03eb79
AV
819 break;
820 case MBA_FCF_CONF_ERR:
7c3df132
SK
821 ql_dbg(ql_dbg_async, vha, 0x5020,
822 "FCF Configuration Error -- %04x %04x %04x.\n",
823 mb[1], mb[2], mb[3]);
3a03eb79
AV
824 break;
825 case MBA_IDC_COMPLETE:
3a03eb79 826 case MBA_IDC_NOTIFY:
3a03eb79 827 case MBA_IDC_TIME_EXT:
8a659571 828 qla81xx_idc_event(vha, mb[0], mb[1]);
3a03eb79 829 break;
6246b8a1
GM
830 default:
831 ql_dbg(ql_dbg_async, vha, 0x5057,
832 "Unknown AEN:%04x %04x %04x %04x\n",
833 mb[0], mb[1], mb[2], mb[3]);
1da177e4 834 }
2c3dfe3f 835
2d70c103
NB
836 qlt_async_event(mb[0], vha, mb);
837
e315cd28 838 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 839 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
840}
841
842/**
843 * qla2x00_process_completed_request() - Process a Fast Post response.
844 * @ha: SCSI driver HA context
845 * @index: SRB index
846 */
847static void
73208dfd
AC
848qla2x00_process_completed_request(struct scsi_qla_host *vha,
849 struct req_que *req, uint32_t index)
1da177e4
LT
850{
851 srb_t *sp;
e315cd28 852 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
853
854 /* Validate handle. */
855 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
856 ql_log(ql_log_warn, vha, 0x3014,
857 "Invalid SCSI command index (%x).\n", index);
1da177e4 858
8f7daead
GM
859 if (IS_QLA82XX(ha))
860 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
861 else
862 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
863 return;
864 }
865
e315cd28 866 sp = req->outstanding_cmds[index];
1da177e4
LT
867 if (sp) {
868 /* Free outstanding command slot. */
e315cd28 869 req->outstanding_cmds[index] = NULL;
1da177e4 870
1da177e4 871 /* Save ISP completion status */
9ba56b95 872 sp->done(ha, sp, DID_OK << 16);
1da177e4 873 } else {
7c3df132 874 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 875
8f7daead
GM
876 if (IS_QLA82XX(ha))
877 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
878 else
879 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
880 }
881}
882
ac280b67
AV
883static srb_t *
884qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
885 struct req_que *req, void *iocb)
886{
887 struct qla_hw_data *ha = vha->hw;
888 sts_entry_t *pkt = iocb;
889 srb_t *sp = NULL;
890 uint16_t index;
891
892 index = LSW(pkt->handle);
893 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
894 ql_log(ql_log_warn, vha, 0x5031,
895 "Invalid command index (%x).\n", index);
8f7daead
GM
896 if (IS_QLA82XX(ha))
897 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
898 else
899 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
900 goto done;
901 }
902 sp = req->outstanding_cmds[index];
903 if (!sp) {
7c3df132
SK
904 ql_log(ql_log_warn, vha, 0x5032,
905 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
906 return sp;
907 }
908 if (sp->handle != index) {
7c3df132
SK
909 ql_log(ql_log_warn, vha, 0x5033,
910 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
911 return NULL;
912 }
9a069e19 913
ac280b67 914 req->outstanding_cmds[index] = NULL;
9a069e19 915
ac280b67
AV
916done:
917 return sp;
918}
919
920static void
921qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
922 struct mbx_entry *mbx)
923{
924 const char func[] = "MBX-IOCB";
925 const char *type;
ac280b67
AV
926 fc_port_t *fcport;
927 srb_t *sp;
4916392b 928 struct srb_iocb *lio;
99b0bec7 929 uint16_t *data;
5ff1d584 930 uint16_t status;
ac280b67
AV
931
932 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
933 if (!sp)
934 return;
935
9ba56b95
GM
936 lio = &sp->u.iocb_cmd;
937 type = sp->name;
ac280b67 938 fcport = sp->fcport;
4916392b 939 data = lio->u.logio.data;
ac280b67 940
5ff1d584 941 data[0] = MBS_COMMAND_ERROR;
4916392b 942 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 943 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 944 if (mbx->entry_status) {
7c3df132 945 ql_dbg(ql_dbg_async, vha, 0x5043,
cfb0919c 946 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
d3fa9e7d 947 "entry-status=%x status=%x state-flag=%x "
cfb0919c
CD
948 "status-flags=%x.\n", type, sp->handle,
949 fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
950 fcport->d_id.b.al_pa, mbx->entry_status,
951 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 952 le16_to_cpu(mbx->status_flags));
d3fa9e7d 953
cfb0919c 954 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
7c3df132 955 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 956
99b0bec7 957 goto logio_done;
ac280b67
AV
958 }
959
5ff1d584 960 status = le16_to_cpu(mbx->status);
9ba56b95 961 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
5ff1d584
AV
962 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
963 status = 0;
964 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132 965 ql_dbg(ql_dbg_async, vha, 0x5045,
cfb0919c
CD
966 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
967 type, sp->handle, fcport->d_id.b.domain,
968 fcport->d_id.b.area, fcport->d_id.b.al_pa,
969 le16_to_cpu(mbx->mb1));
ac280b67
AV
970
971 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 972 if (sp->type == SRB_LOGIN_CMD) {
99b0bec7
AV
973 fcport->port_type = FCT_TARGET;
974 if (le16_to_cpu(mbx->mb1) & BIT_0)
975 fcport->port_type = FCT_INITIATOR;
6ac52608 976 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 977 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 978 }
99b0bec7 979 goto logio_done;
ac280b67
AV
980 }
981
982 data[0] = le16_to_cpu(mbx->mb0);
983 switch (data[0]) {
984 case MBS_PORT_ID_USED:
985 data[1] = le16_to_cpu(mbx->mb1);
986 break;
987 case MBS_LOOP_ID_USED:
988 break;
989 default:
990 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
991 break;
992 }
993
7c3df132 994 ql_log(ql_log_warn, vha, 0x5046,
cfb0919c
CD
995 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
996 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
997 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
998 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
ac280b67 999 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 1000 le16_to_cpu(mbx->mb7));
ac280b67 1001
99b0bec7 1002logio_done:
9ba56b95 1003 sp->done(vha, sp, 0);
ac280b67
AV
1004}
1005
9bc4f4fb
HZ
1006static void
1007qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1008 sts_entry_t *pkt, int iocb_type)
1009{
1010 const char func[] = "CT_IOCB";
1011 const char *type;
9bc4f4fb 1012 srb_t *sp;
9bc4f4fb
HZ
1013 struct fc_bsg_job *bsg_job;
1014 uint16_t comp_status;
9ba56b95 1015 int res;
9bc4f4fb
HZ
1016
1017 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1018 if (!sp)
1019 return;
1020
9ba56b95 1021 bsg_job = sp->u.bsg_job;
9bc4f4fb 1022
9ba56b95 1023 type = "ct pass-through";
9bc4f4fb
HZ
1024
1025 comp_status = le16_to_cpu(pkt->comp_status);
1026
1027 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1028 * fc payload to the caller
1029 */
1030 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1031 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1032
1033 if (comp_status != CS_COMPLETE) {
1034 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1035 res = DID_OK << 16;
9bc4f4fb
HZ
1036 bsg_job->reply->reply_payload_rcv_len =
1037 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1038
7c3df132
SK
1039 ql_log(ql_log_warn, vha, 0x5048,
1040 "CT pass-through-%s error "
9bc4f4fb 1041 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1042 type, comp_status,
1043 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1044 } else {
7c3df132
SK
1045 ql_log(ql_log_warn, vha, 0x5049,
1046 "CT pass-through-%s error "
1047 "comp_status-status=0x%x.\n", type, comp_status);
9ba56b95 1048 res = DID_ERROR << 16;
9bc4f4fb
HZ
1049 bsg_job->reply->reply_payload_rcv_len = 0;
1050 }
cfb0919c 1051 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
7c3df132 1052 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1053 } else {
9ba56b95 1054 res = DID_OK << 16;
9bc4f4fb
HZ
1055 bsg_job->reply->reply_payload_rcv_len =
1056 bsg_job->reply_payload.payload_len;
1057 bsg_job->reply_len = 0;
1058 }
1059
9ba56b95 1060 sp->done(vha, sp, res);
9bc4f4fb
HZ
1061}
1062
9a069e19
GM
1063static void
1064qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1065 struct sts_entry_24xx *pkt, int iocb_type)
1066{
1067 const char func[] = "ELS_CT_IOCB";
1068 const char *type;
9a069e19 1069 srb_t *sp;
9a069e19
GM
1070 struct fc_bsg_job *bsg_job;
1071 uint16_t comp_status;
1072 uint32_t fw_status[3];
1073 uint8_t* fw_sts_ptr;
9ba56b95 1074 int res;
9a069e19
GM
1075
1076 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1077 if (!sp)
1078 return;
9ba56b95 1079 bsg_job = sp->u.bsg_job;
9a069e19
GM
1080
1081 type = NULL;
9ba56b95 1082 switch (sp->type) {
9a069e19
GM
1083 case SRB_ELS_CMD_RPT:
1084 case SRB_ELS_CMD_HST:
1085 type = "els";
1086 break;
1087 case SRB_CT_CMD:
1088 type = "ct pass-through";
1089 break;
1090 default:
37fed3ee 1091 ql_dbg(ql_dbg_user, vha, 0x503e,
9ba56b95 1092 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
9a069e19
GM
1093 return;
1094 }
1095
1096 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1097 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1098 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1099
1100 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1101 * fc payload to the caller
1102 */
1103 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1104 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1105
1106 if (comp_status != CS_COMPLETE) {
1107 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1108 res = DID_OK << 16;
9a069e19 1109 bsg_job->reply->reply_payload_rcv_len =
9ba56b95 1110 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
9a069e19 1111
37fed3ee 1112 ql_dbg(ql_dbg_user, vha, 0x503f,
cfb0919c 1113 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1114 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
cfb0919c 1115 type, sp->handle, comp_status, fw_status[1], fw_status[2],
7c3df132
SK
1116 le16_to_cpu(((struct els_sts_entry_24xx *)
1117 pkt)->total_byte_count));
9a069e19
GM
1118 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1119 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1120 }
1121 else {
37fed3ee 1122 ql_dbg(ql_dbg_user, vha, 0x5040,
cfb0919c 1123 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1124 "error subcode 1=0x%x error subcode 2=0x%x.\n",
cfb0919c 1125 type, sp->handle, comp_status,
7c3df132
SK
1126 le16_to_cpu(((struct els_sts_entry_24xx *)
1127 pkt)->error_subcode_1),
1128 le16_to_cpu(((struct els_sts_entry_24xx *)
1129 pkt)->error_subcode_2));
9ba56b95 1130 res = DID_ERROR << 16;
9a069e19
GM
1131 bsg_job->reply->reply_payload_rcv_len = 0;
1132 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1133 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1134 }
37fed3ee 1135 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
7c3df132 1136 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1137 }
1138 else {
9ba56b95 1139 res = DID_OK << 16;
9a069e19
GM
1140 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1141 bsg_job->reply_len = 0;
1142 }
1143
9ba56b95 1144 sp->done(vha, sp, res);
9a069e19
GM
1145}
1146
ac280b67
AV
1147static void
1148qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1149 struct logio_entry_24xx *logio)
1150{
1151 const char func[] = "LOGIO-IOCB";
1152 const char *type;
ac280b67
AV
1153 fc_port_t *fcport;
1154 srb_t *sp;
4916392b 1155 struct srb_iocb *lio;
99b0bec7 1156 uint16_t *data;
ac280b67
AV
1157 uint32_t iop[2];
1158
1159 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1160 if (!sp)
1161 return;
1162
9ba56b95
GM
1163 lio = &sp->u.iocb_cmd;
1164 type = sp->name;
ac280b67 1165 fcport = sp->fcport;
4916392b 1166 data = lio->u.logio.data;
ac280b67 1167
5ff1d584 1168 data[0] = MBS_COMMAND_ERROR;
4916392b 1169 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1170 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1171 if (logio->entry_status) {
5e19ed90 1172 ql_log(ql_log_warn, fcport->vha, 0x5034,
cfb0919c 1173 "Async-%s error entry - hdl=%x"
d3fa9e7d 1174 "portid=%02x%02x%02x entry-status=%x.\n",
cfb0919c
CD
1175 type, sp->handle, fcport->d_id.b.domain,
1176 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1177 logio->entry_status);
1178 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
7c3df132 1179 (uint8_t *)logio, sizeof(*logio));
ac280b67 1180
99b0bec7 1181 goto logio_done;
ac280b67
AV
1182 }
1183
1184 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
5e19ed90 1185 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
cfb0919c
CD
1186 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1187 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1188 fcport->d_id.b.area, fcport->d_id.b.al_pa,
7c3df132 1189 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1190
1191 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1192 if (sp->type != SRB_LOGIN_CMD)
99b0bec7 1193 goto logio_done;
ac280b67
AV
1194
1195 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1196 if (iop[0] & BIT_4) {
1197 fcport->port_type = FCT_TARGET;
1198 if (iop[0] & BIT_8)
8474f3a0 1199 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1200 } else if (iop[0] & BIT_5)
ac280b67 1201 fcport->port_type = FCT_INITIATOR;
b0cd579c 1202
2d70c103
NB
1203 if (iop[0] & BIT_7)
1204 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1205
ac280b67
AV
1206 if (logio->io_parameter[7] || logio->io_parameter[8])
1207 fcport->supported_classes |= FC_COS_CLASS2;
1208 if (logio->io_parameter[9] || logio->io_parameter[10])
1209 fcport->supported_classes |= FC_COS_CLASS3;
1210
99b0bec7 1211 goto logio_done;
ac280b67
AV
1212 }
1213
1214 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1215 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1216 switch (iop[0]) {
1217 case LSC_SCODE_PORTID_USED:
1218 data[0] = MBS_PORT_ID_USED;
1219 data[1] = LSW(iop[1]);
1220 break;
1221 case LSC_SCODE_NPORT_USED:
1222 data[0] = MBS_LOOP_ID_USED;
1223 break;
ac280b67
AV
1224 default:
1225 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1226 break;
1227 }
1228
5e19ed90 1229 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
cfb0919c
CD
1230 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1231 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
d3fa9e7d 1232 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1233 le16_to_cpu(logio->comp_status),
1234 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1235 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1236
99b0bec7 1237logio_done:
9ba56b95 1238 sp->done(vha, sp, 0);
ac280b67
AV
1239}
1240
3822263e
MI
1241static void
1242qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1243 struct tsk_mgmt_entry *tsk)
1244{
1245 const char func[] = "TMF-IOCB";
1246 const char *type;
1247 fc_port_t *fcport;
1248 srb_t *sp;
1249 struct srb_iocb *iocb;
3822263e
MI
1250 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1251 int error = 1;
1252
1253 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1254 if (!sp)
1255 return;
1256
9ba56b95
GM
1257 iocb = &sp->u.iocb_cmd;
1258 type = sp->name;
3822263e
MI
1259 fcport = sp->fcport;
1260
1261 if (sts->entry_status) {
5e19ed90 1262 ql_log(ql_log_warn, fcport->vha, 0x5038,
cfb0919c
CD
1263 "Async-%s error - hdl=%x entry-status(%x).\n",
1264 type, sp->handle, sts->entry_status);
3822263e 1265 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
5e19ed90 1266 ql_log(ql_log_warn, fcport->vha, 0x5039,
cfb0919c
CD
1267 "Async-%s error - hdl=%x completion status(%x).\n",
1268 type, sp->handle, sts->comp_status);
3822263e
MI
1269 } else if (!(le16_to_cpu(sts->scsi_status) &
1270 SS_RESPONSE_INFO_LEN_VALID)) {
5e19ed90 1271 ql_log(ql_log_warn, fcport->vha, 0x503a,
cfb0919c
CD
1272 "Async-%s error - hdl=%x no response info(%x).\n",
1273 type, sp->handle, sts->scsi_status);
3822263e 1274 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
5e19ed90 1275 ql_log(ql_log_warn, fcport->vha, 0x503b,
cfb0919c
CD
1276 "Async-%s error - hdl=%x not enough response(%d).\n",
1277 type, sp->handle, sts->rsp_data_len);
3822263e 1278 } else if (sts->data[3]) {
5e19ed90 1279 ql_log(ql_log_warn, fcport->vha, 0x503c,
cfb0919c
CD
1280 "Async-%s error - hdl=%x response(%x).\n",
1281 type, sp->handle, sts->data[3]);
3822263e
MI
1282 } else {
1283 error = 0;
1284 }
1285
1286 if (error) {
1287 iocb->u.tmf.data = error;
7c3df132
SK
1288 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1289 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1290 }
1291
9ba56b95 1292 sp->done(vha, sp, 0);
3822263e
MI
1293}
1294
1da177e4
LT
1295/**
1296 * qla2x00_process_response_queue() - Process response queue entries.
1297 * @ha: SCSI driver HA context
1298 */
1299void
73208dfd 1300qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1301{
73208dfd
AC
1302 struct scsi_qla_host *vha;
1303 struct qla_hw_data *ha = rsp->hw;
3d71644c 1304 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1305 sts_entry_t *pkt;
1306 uint16_t handle_cnt;
1307 uint16_t cnt;
73208dfd 1308
2afa19a9 1309 vha = pci_get_drvdata(ha->pdev);
1da177e4 1310
e315cd28 1311 if (!vha->flags.online)
1da177e4
LT
1312 return;
1313
e315cd28
AC
1314 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1315 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1316
e315cd28
AC
1317 rsp->ring_index++;
1318 if (rsp->ring_index == rsp->length) {
1319 rsp->ring_index = 0;
1320 rsp->ring_ptr = rsp->ring;
1da177e4 1321 } else {
e315cd28 1322 rsp->ring_ptr++;
1da177e4
LT
1323 }
1324
1325 if (pkt->entry_status != 0) {
73208dfd 1326 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1327 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1328 wmb();
1329 continue;
1330 }
1331
1332 switch (pkt->entry_type) {
1333 case STATUS_TYPE:
73208dfd 1334 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1335 break;
1336 case STATUS_TYPE_21:
1337 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1338 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1339 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1340 ((sts21_entry_t *)pkt)->handle[cnt]);
1341 }
1342 break;
1343 case STATUS_TYPE_22:
1344 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1345 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1346 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1347 ((sts22_entry_t *)pkt)->handle[cnt]);
1348 }
1349 break;
1350 case STATUS_CONT_TYPE:
2afa19a9 1351 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1352 break;
ac280b67
AV
1353 case MBX_IOCB_TYPE:
1354 qla2x00_mbx_iocb_entry(vha, rsp->req,
1355 (struct mbx_entry *)pkt);
3822263e 1356 break;
9bc4f4fb
HZ
1357 case CT_IOCB_TYPE:
1358 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1359 break;
1da177e4
LT
1360 default:
1361 /* Type Not Supported. */
7c3df132
SK
1362 ql_log(ql_log_warn, vha, 0x504a,
1363 "Received unknown response pkt type %x "
1da177e4 1364 "entry status=%x.\n",
7c3df132 1365 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1366 break;
1367 }
1368 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1369 wmb();
1370 }
1371
1372 /* Adjust ring index */
e315cd28 1373 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1374}
1375
4733fcb1 1376static inline void
5544213b 1377qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
9ba56b95 1378 uint32_t sense_len, struct rsp_que *rsp, int res)
4733fcb1 1379{
7c3df132 1380 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95
GM
1381 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1382 uint32_t track_sense_len;
4733fcb1
AV
1383
1384 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1385 sense_len = SCSI_SENSE_BUFFERSIZE;
1386
9ba56b95
GM
1387 SET_CMD_SENSE_LEN(sp, sense_len);
1388 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1389 track_sense_len = sense_len;
1390
1391 if (sense_len > par_sense_len)
5544213b 1392 sense_len = par_sense_len;
4733fcb1
AV
1393
1394 memcpy(cp->sense_buffer, sense_data, sense_len);
1395
9ba56b95
GM
1396 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1397 track_sense_len -= sense_len;
1398 SET_CMD_SENSE_LEN(sp, track_sense_len);
1399
1400 if (track_sense_len != 0) {
2afa19a9 1401 rsp->status_srb = sp;
9ba56b95
GM
1402 cp->result = res;
1403 }
4733fcb1 1404
cfb0919c
CD
1405 if (sense_len) {
1406 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1407 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1408 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1409 cp);
7c3df132
SK
1410 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1411 cp->sense_buffer, sense_len);
cfb0919c 1412 }
4733fcb1
AV
1413}
1414
bad75002
AE
1415struct scsi_dif_tuple {
1416 __be16 guard; /* Checksum */
d6a03581 1417 __be16 app_tag; /* APPL identifier */
bad75002
AE
1418 __be32 ref_tag; /* Target LBA or indirect LBA */
1419};
1420
1421/*
1422 * Checks the guard or meta-data for the type of error
1423 * detected by the HBA. In case of errors, we set the
1424 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1425 * to indicate to the kernel that the HBA detected error.
1426 */
8cb2049c 1427static inline int
bad75002
AE
1428qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1429{
7c3df132 1430 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95 1431 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
8cb2049c
AE
1432 uint8_t *ap = &sts24->data[12];
1433 uint8_t *ep = &sts24->data[20];
bad75002
AE
1434 uint32_t e_ref_tag, a_ref_tag;
1435 uint16_t e_app_tag, a_app_tag;
1436 uint16_t e_guard, a_guard;
1437
8cb2049c
AE
1438 /*
1439 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1440 * would make guard field appear at offset 2
1441 */
1442 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1443 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1444 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1445 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1446 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1447 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1448
7c3df132
SK
1449 ql_dbg(ql_dbg_io, vha, 0x3023,
1450 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1451
7c3df132
SK
1452 ql_dbg(ql_dbg_io, vha, 0x3024,
1453 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1454 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1455 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1456 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1457 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1458
8cb2049c
AE
1459 /*
1460 * Ignore sector if:
1461 * For type 3: ref & app tag is all 'f's
1462 * For type 0,1,2: app tag is all 'f's
1463 */
1464 if ((a_app_tag == 0xffff) &&
1465 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1466 (a_ref_tag == 0xffffffff))) {
1467 uint32_t blocks_done, resid;
1468 sector_t lba_s = scsi_get_lba(cmd);
1469
1470 /* 2TB boundary case covered automatically with this */
1471 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1472
1473 resid = scsi_bufflen(cmd) - (blocks_done *
1474 cmd->device->sector_size);
1475
1476 scsi_set_resid(cmd, resid);
1477 cmd->result = DID_OK << 16;
1478
1479 /* Update protection tag */
1480 if (scsi_prot_sg_count(cmd)) {
1481 uint32_t i, j = 0, k = 0, num_ent;
1482 struct scatterlist *sg;
1483 struct sd_dif_tuple *spt;
1484
1485 /* Patch the corresponding protection tags */
1486 scsi_for_each_prot_sg(cmd, sg,
1487 scsi_prot_sg_count(cmd), i) {
1488 num_ent = sg_dma_len(sg) / 8;
1489 if (k + num_ent < blocks_done) {
1490 k += num_ent;
1491 continue;
1492 }
1493 j = blocks_done - k - 1;
1494 k = blocks_done;
1495 break;
1496 }
1497
1498 if (k != blocks_done) {
cfb0919c 1499 ql_log(ql_log_warn, vha, 0x302f,
8ec9c7fb
RD
1500 "unexpected tag values tag:lba=%x:%llx)\n",
1501 e_ref_tag, (unsigned long long)lba_s);
8cb2049c
AE
1502 return 1;
1503 }
1504
1505 spt = page_address(sg_page(sg)) + sg->offset;
1506 spt += j;
1507
1508 spt->app_tag = 0xffff;
1509 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1510 spt->ref_tag = 0xffffffff;
1511 }
1512
1513 return 0;
1514 }
1515
bad75002
AE
1516 /* check guard */
1517 if (e_guard != a_guard) {
1518 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1519 0x10, 0x1);
1520 set_driver_byte(cmd, DRIVER_SENSE);
1521 set_host_byte(cmd, DID_ABORT);
1522 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1523 return 1;
bad75002
AE
1524 }
1525
e02587d7
AE
1526 /* check ref tag */
1527 if (e_ref_tag != a_ref_tag) {
bad75002 1528 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1529 0x10, 0x3);
bad75002
AE
1530 set_driver_byte(cmd, DRIVER_SENSE);
1531 set_host_byte(cmd, DID_ABORT);
1532 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1533 return 1;
bad75002
AE
1534 }
1535
e02587d7
AE
1536 /* check appl tag */
1537 if (e_app_tag != a_app_tag) {
bad75002 1538 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1539 0x10, 0x2);
bad75002
AE
1540 set_driver_byte(cmd, DRIVER_SENSE);
1541 set_host_byte(cmd, DID_ABORT);
1542 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1543 return 1;
bad75002 1544 }
e02587d7 1545
8cb2049c 1546 return 1;
bad75002
AE
1547}
1548
a9b6f722
SK
1549static void
1550qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1551 struct req_que *req, uint32_t index)
1552{
1553 struct qla_hw_data *ha = vha->hw;
1554 srb_t *sp;
1555 uint16_t comp_status;
1556 uint16_t scsi_status;
1557 uint16_t thread_id;
1558 uint32_t rval = EXT_STATUS_OK;
1559 struct fc_bsg_job *bsg_job = NULL;
1560 sts_entry_t *sts;
1561 struct sts_entry_24xx *sts24;
1562 sts = (sts_entry_t *) pkt;
1563 sts24 = (struct sts_entry_24xx *) pkt;
1564
1565 /* Validate handle. */
1566 if (index >= MAX_OUTSTANDING_COMMANDS) {
1567 ql_log(ql_log_warn, vha, 0x70af,
1568 "Invalid SCSI completion handle 0x%x.\n", index);
1569 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1570 return;
1571 }
1572
1573 sp = req->outstanding_cmds[index];
1574 if (sp) {
1575 /* Free outstanding command slot. */
1576 req->outstanding_cmds[index] = NULL;
1577 bsg_job = sp->u.bsg_job;
1578 } else {
1579 ql_log(ql_log_warn, vha, 0x70b0,
1580 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1581 req->id, index);
1582
1583 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1584 return;
1585 }
1586
1587 if (IS_FWI2_CAPABLE(ha)) {
1588 comp_status = le16_to_cpu(sts24->comp_status);
1589 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1590 } else {
1591 comp_status = le16_to_cpu(sts->comp_status);
1592 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1593 }
1594
1595 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1596 switch (comp_status) {
1597 case CS_COMPLETE:
1598 if (scsi_status == 0) {
1599 bsg_job->reply->reply_payload_rcv_len =
1600 bsg_job->reply_payload.payload_len;
1601 rval = EXT_STATUS_OK;
1602 }
1603 goto done;
1604
1605 case CS_DATA_OVERRUN:
1606 ql_dbg(ql_dbg_user, vha, 0x70b1,
1607 "Command completed with date overrun thread_id=%d\n",
1608 thread_id);
1609 rval = EXT_STATUS_DATA_OVERRUN;
1610 break;
1611
1612 case CS_DATA_UNDERRUN:
1613 ql_dbg(ql_dbg_user, vha, 0x70b2,
1614 "Command completed with date underrun thread_id=%d\n",
1615 thread_id);
1616 rval = EXT_STATUS_DATA_UNDERRUN;
1617 break;
1618 case CS_BIDIR_RD_OVERRUN:
1619 ql_dbg(ql_dbg_user, vha, 0x70b3,
1620 "Command completed with read data overrun thread_id=%d\n",
1621 thread_id);
1622 rval = EXT_STATUS_DATA_OVERRUN;
1623 break;
1624
1625 case CS_BIDIR_RD_WR_OVERRUN:
1626 ql_dbg(ql_dbg_user, vha, 0x70b4,
1627 "Command completed with read and write data overrun "
1628 "thread_id=%d\n", thread_id);
1629 rval = EXT_STATUS_DATA_OVERRUN;
1630 break;
1631
1632 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1633 ql_dbg(ql_dbg_user, vha, 0x70b5,
1634 "Command completed with read data over and write data "
1635 "underrun thread_id=%d\n", thread_id);
1636 rval = EXT_STATUS_DATA_OVERRUN;
1637 break;
1638
1639 case CS_BIDIR_RD_UNDERRUN:
1640 ql_dbg(ql_dbg_user, vha, 0x70b6,
1641 "Command completed with read data data underrun "
1642 "thread_id=%d\n", thread_id);
1643 rval = EXT_STATUS_DATA_UNDERRUN;
1644 break;
1645
1646 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1647 ql_dbg(ql_dbg_user, vha, 0x70b7,
1648 "Command completed with read data under and write data "
1649 "overrun thread_id=%d\n", thread_id);
1650 rval = EXT_STATUS_DATA_UNDERRUN;
1651 break;
1652
1653 case CS_BIDIR_RD_WR_UNDERRUN:
1654 ql_dbg(ql_dbg_user, vha, 0x70b8,
1655 "Command completed with read and write data underrun "
1656 "thread_id=%d\n", thread_id);
1657 rval = EXT_STATUS_DATA_UNDERRUN;
1658 break;
1659
1660 case CS_BIDIR_DMA:
1661 ql_dbg(ql_dbg_user, vha, 0x70b9,
1662 "Command completed with data DMA error thread_id=%d\n",
1663 thread_id);
1664 rval = EXT_STATUS_DMA_ERR;
1665 break;
1666
1667 case CS_TIMEOUT:
1668 ql_dbg(ql_dbg_user, vha, 0x70ba,
1669 "Command completed with timeout thread_id=%d\n",
1670 thread_id);
1671 rval = EXT_STATUS_TIMEOUT;
1672 break;
1673 default:
1674 ql_dbg(ql_dbg_user, vha, 0x70bb,
1675 "Command completed with completion status=0x%x "
1676 "thread_id=%d\n", comp_status, thread_id);
1677 rval = EXT_STATUS_ERR;
1678 break;
1679 }
1680 bsg_job->reply->reply_payload_rcv_len = 0;
1681
1682done:
1683 /* Return the vendor specific reply to API */
1684 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1685 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1686 /* Always return DID_OK, bsg will send the vendor specific response
1687 * in this case only */
1688 sp->done(vha, sp, (DID_OK << 6));
1689
1690}
1691
1da177e4
LT
1692/**
1693 * qla2x00_status_entry() - Process a Status IOCB entry.
1694 * @ha: SCSI driver HA context
1695 * @pkt: Entry pointer
1696 */
1697static void
73208dfd 1698qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1699{
1da177e4 1700 srb_t *sp;
1da177e4
LT
1701 fc_port_t *fcport;
1702 struct scsi_cmnd *cp;
9a853f71
AV
1703 sts_entry_t *sts;
1704 struct sts_entry_24xx *sts24;
1da177e4
LT
1705 uint16_t comp_status;
1706 uint16_t scsi_status;
b7d2280c 1707 uint16_t ox_id;
1da177e4
LT
1708 uint8_t lscsi_status;
1709 int32_t resid;
5544213b
AV
1710 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1711 fw_resid_len;
9a853f71 1712 uint8_t *rsp_info, *sense_data;
e315cd28 1713 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1714 uint32_t handle;
1715 uint16_t que;
1716 struct req_que *req;
b7d2280c 1717 int logit = 1;
9ba56b95 1718 int res = 0;
a9b6f722 1719 uint16_t state_flags = 0;
9a853f71
AV
1720
1721 sts = (sts_entry_t *) pkt;
1722 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1723 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1724 comp_status = le16_to_cpu(sts24->comp_status);
1725 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
a9b6f722 1726 state_flags = le16_to_cpu(sts24->state_flags);
9a853f71
AV
1727 } else {
1728 comp_status = le16_to_cpu(sts->comp_status);
1729 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1730 }
2afa19a9
AC
1731 handle = (uint32_t) LSW(sts->handle);
1732 que = MSW(sts->handle);
1733 req = ha->req_q_map[que];
a9083016 1734
1da177e4 1735 /* Validate handle. */
2afa19a9
AC
1736 if (handle < MAX_OUTSTANDING_COMMANDS) {
1737 sp = req->outstanding_cmds[handle];
1da177e4
LT
1738 } else
1739 sp = NULL;
1740
1741 if (sp == NULL) {
cfb0919c 1742 ql_dbg(ql_dbg_io, vha, 0x3017,
7c3df132 1743 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1744
8f7daead
GM
1745 if (IS_QLA82XX(ha))
1746 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1747 else
1748 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1749 qla2xxx_wake_dpc(vha);
1da177e4
LT
1750 return;
1751 }
a9b6f722
SK
1752
1753 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1754 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1755 return;
1756 }
1757
1758 /* Fast path completion. */
1759 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1760 qla2x00_process_completed_request(vha, req, handle);
1761
1762 return;
1763 }
1764
1765 req->outstanding_cmds[handle] = NULL;
9ba56b95 1766 cp = GET_CMD_SP(sp);
1da177e4 1767 if (cp == NULL) {
cfb0919c 1768 ql_dbg(ql_dbg_io, vha, 0x3018,
7c3df132
SK
1769 "Command already returned (0x%x/%p).\n",
1770 sts->handle, sp);
1da177e4
LT
1771
1772 return;
1773 }
1774
9a853f71 1775 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1776
bdf79621 1777 fcport = sp->fcport;
1da177e4 1778
b7d2280c 1779 ox_id = 0;
5544213b
AV
1780 sense_len = par_sense_len = rsp_info_len = resid_len =
1781 fw_resid_len = 0;
e428924c 1782 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
1783 if (scsi_status & SS_SENSE_LEN_VALID)
1784 sense_len = le32_to_cpu(sts24->sense_len);
1785 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1786 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1787 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1788 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1789 if (comp_status == CS_DATA_UNDERRUN)
1790 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1791 rsp_info = sts24->data;
1792 sense_data = sts24->data;
1793 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 1794 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 1795 par_sense_len = sizeof(sts24->data);
9a853f71 1796 } else {
0f00a206
LC
1797 if (scsi_status & SS_SENSE_LEN_VALID)
1798 sense_len = le16_to_cpu(sts->req_sense_length);
1799 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1800 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
1801 resid_len = le32_to_cpu(sts->residual_length);
1802 rsp_info = sts->rsp_info;
1803 sense_data = sts->req_sense_data;
5544213b 1804 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
1805 }
1806
1da177e4
LT
1807 /* Check for any FCP transport errors. */
1808 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1809 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 1810 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 1811 sense_data += rsp_info_len;
5544213b
AV
1812 par_sense_len -= rsp_info_len;
1813 }
9a853f71 1814 if (rsp_info_len > 3 && rsp_info[3]) {
5e19ed90 1815 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
7c3df132
SK
1816 "FCP I/O protocol failure (0x%x/0x%x).\n",
1817 rsp_info_len, rsp_info[3]);
1da177e4 1818
9ba56b95 1819 res = DID_BUS_BUSY << 16;
b7d2280c 1820 goto out;
1da177e4
LT
1821 }
1822 }
1823
3e8ce320
AV
1824 /* Check for overrun. */
1825 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1826 scsi_status & SS_RESIDUAL_OVER)
1827 comp_status = CS_DATA_OVERRUN;
1828
1da177e4
LT
1829 /*
1830 * Based on Host and scsi status generate status code for Linux
1831 */
1832 switch (comp_status) {
1833 case CS_COMPLETE:
df7baa50 1834 case CS_QUEUE_FULL:
1da177e4 1835 if (scsi_status == 0) {
9ba56b95 1836 res = DID_OK << 16;
1da177e4
LT
1837 break;
1838 }
1839 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 1840 resid = resid_len;
385d70b4 1841 scsi_set_resid(cp, resid);
0da69df1
AV
1842
1843 if (!lscsi_status &&
385d70b4 1844 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 1845 cp->underflow)) {
5e19ed90 1846 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
7c3df132 1847 "Mid-layer underflow "
b7d2280c 1848 "detected (0x%x of 0x%x bytes).\n",
7c3df132 1849 resid, scsi_bufflen(cp));
0da69df1 1850
9ba56b95 1851 res = DID_ERROR << 16;
0da69df1
AV
1852 break;
1853 }
1da177e4 1854 }
9ba56b95 1855 res = DID_OK << 16 | lscsi_status;
1da177e4 1856
df7baa50 1857 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 1858 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
7c3df132 1859 "QUEUE FULL detected.\n");
df7baa50
AV
1860 break;
1861 }
b7d2280c 1862 logit = 0;
1da177e4
LT
1863 if (lscsi_status != SS_CHECK_CONDITION)
1864 break;
1865
b80ca4f7 1866 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1867 if (!(scsi_status & SS_SENSE_LEN_VALID))
1868 break;
1869
5544213b 1870 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
9ba56b95 1871 rsp, res);
1da177e4
LT
1872 break;
1873
1874 case CS_DATA_UNDERRUN:
ed17c71b 1875 /* Use F/W calculated residual length. */
0f00a206
LC
1876 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1877 scsi_set_resid(cp, resid);
1878 if (scsi_status & SS_RESIDUAL_UNDER) {
1879 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
5e19ed90 1880 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
7c3df132
SK
1881 "Dropped frame(s) detected "
1882 "(0x%x of 0x%x bytes).\n",
1883 resid, scsi_bufflen(cp));
0f00a206 1884
9ba56b95 1885 res = DID_ERROR << 16 | lscsi_status;
4e85e3d9 1886 goto check_scsi_status;
6acf8190 1887 }
ed17c71b 1888
0f00a206
LC
1889 if (!lscsi_status &&
1890 ((unsigned)(scsi_bufflen(cp) - resid) <
1891 cp->underflow)) {
5e19ed90 1892 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
7c3df132 1893 "Mid-layer underflow "
b7d2280c 1894 "detected (0x%x of 0x%x bytes).\n",
7c3df132 1895 resid, scsi_bufflen(cp));
e038a1be 1896
9ba56b95 1897 res = DID_ERROR << 16;
0f00a206
LC
1898 break;
1899 }
4aee5766
GM
1900 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
1901 lscsi_status != SAM_STAT_BUSY) {
1902 /*
1903 * scsi status of task set and busy are considered to be
1904 * task not completed.
1905 */
1906
5e19ed90 1907 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
7c3df132 1908 "Dropped frame(s) detected (0x%x "
4aee5766
GM
1909 "of 0x%x bytes).\n", resid,
1910 scsi_bufflen(cp));
0f00a206 1911
9ba56b95 1912 res = DID_ERROR << 16 | lscsi_status;
0374f55e 1913 goto check_scsi_status;
4aee5766
GM
1914 } else {
1915 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
1916 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
1917 scsi_status, lscsi_status);
1da177e4
LT
1918 }
1919
9ba56b95 1920 res = DID_OK << 16 | lscsi_status;
b7d2280c 1921 logit = 0;
0f00a206 1922
0374f55e 1923check_scsi_status:
1da177e4 1924 /*
fa2a1ce5 1925 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
1926 * Status.
1927 */
1928 if (lscsi_status != 0) {
ffec28a3 1929 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 1930 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
7c3df132 1931 "QUEUE FULL detected.\n");
b7d2280c 1932 logit = 1;
ffec28a3
AV
1933 break;
1934 }
1da177e4
LT
1935 if (lscsi_status != SS_CHECK_CONDITION)
1936 break;
1937
b80ca4f7 1938 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1939 if (!(scsi_status & SS_SENSE_LEN_VALID))
1940 break;
1941
5544213b 1942 qla2x00_handle_sense(sp, sense_data, par_sense_len,
9ba56b95 1943 sense_len, rsp, res);
1da177e4
LT
1944 }
1945 break;
1946
1da177e4
LT
1947 case CS_PORT_LOGGED_OUT:
1948 case CS_PORT_CONFIG_CHG:
1949 case CS_PORT_BUSY:
1950 case CS_INCOMPLETE:
1951 case CS_PORT_UNAVAILABLE:
b7d2280c 1952 case CS_TIMEOUT:
ff454b01
CD
1953 case CS_RESET:
1954
056a4483
MC
1955 /*
1956 * We are going to have the fc class block the rport
1957 * while we try to recover so instruct the mid layer
1958 * to requeue until the class decides how to handle this.
1959 */
9ba56b95 1960 res = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
1961
1962 if (comp_status == CS_TIMEOUT) {
1963 if (IS_FWI2_CAPABLE(ha))
1964 break;
1965 else if ((le16_to_cpu(sts->status_flags) &
1966 SF_LOGOUT_SENT) == 0)
1967 break;
1968 }
1969
5e19ed90 1970 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
7c3df132
SK
1971 "Port down status: port-state=0x%x.\n",
1972 atomic_read(&fcport->state));
b7d2280c 1973
a7a28504 1974 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 1975 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1976 break;
1977
1da177e4 1978 case CS_ABORTED:
9ba56b95 1979 res = DID_RESET << 16;
1da177e4 1980 break;
bad75002
AE
1981
1982 case CS_DIF_ERROR:
8cb2049c 1983 logit = qla2x00_handle_dif_error(sp, sts24);
bad75002 1984 break;
1da177e4 1985 default:
9ba56b95 1986 res = DID_ERROR << 16;
1da177e4
LT
1987 break;
1988 }
1989
b7d2280c
AV
1990out:
1991 if (logit)
5e19ed90 1992 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
7c3df132 1993 "FCP command status: 0x%x-0x%x (0x%x) "
cfb0919c
CD
1994 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1995 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
7c3df132 1996 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
9ba56b95 1997 comp_status, scsi_status, res, vha->host_no,
cfb0919c
CD
1998 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
1999 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2000 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2001 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2002 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
7c3df132 2003 resid_len, fw_resid_len);
b7d2280c 2004
2afa19a9 2005 if (rsp->status_srb == NULL)
9ba56b95 2006 sp->done(ha, sp, res);
1da177e4
LT
2007}
2008
2009/**
2010 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2011 * @ha: SCSI driver HA context
2012 * @pkt: Entry pointer
2013 *
2014 * Extended sense data.
2015 */
2016static void
2afa19a9 2017qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4 2018{
9ba56b95 2019 uint8_t sense_sz = 0;
2afa19a9 2020 struct qla_hw_data *ha = rsp->hw;
7c3df132 2021 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
9ba56b95 2022 srb_t *sp = rsp->status_srb;
1da177e4 2023 struct scsi_cmnd *cp;
9ba56b95
GM
2024 uint32_t sense_len;
2025 uint8_t *sense_ptr;
1da177e4 2026
9ba56b95
GM
2027 if (!sp || !GET_CMD_SENSE_LEN(sp))
2028 return;
1da177e4 2029
9ba56b95
GM
2030 sense_len = GET_CMD_SENSE_LEN(sp);
2031 sense_ptr = GET_CMD_SENSE_PTR(sp);
1da177e4 2032
9ba56b95
GM
2033 cp = GET_CMD_SP(sp);
2034 if (cp == NULL) {
2035 ql_log(ql_log_warn, vha, 0x3025,
2036 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1da177e4 2037
9ba56b95
GM
2038 rsp->status_srb = NULL;
2039 return;
1da177e4 2040 }
1da177e4 2041
9ba56b95
GM
2042 if (sense_len > sizeof(pkt->data))
2043 sense_sz = sizeof(pkt->data);
2044 else
2045 sense_sz = sense_len;
c4631191 2046
9ba56b95
GM
2047 /* Move sense data. */
2048 if (IS_FWI2_CAPABLE(ha))
2049 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2050 memcpy(sense_ptr, pkt->data, sense_sz);
2051 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2052 sense_ptr, sense_sz);
c4631191 2053
9ba56b95
GM
2054 sense_len -= sense_sz;
2055 sense_ptr += sense_sz;
c4631191 2056
9ba56b95
GM
2057 SET_CMD_SENSE_PTR(sp, sense_ptr);
2058 SET_CMD_SENSE_LEN(sp, sense_len);
2059
2060 /* Place command on done queue. */
2061 if (sense_len == 0) {
2062 rsp->status_srb = NULL;
2063 sp->done(ha, sp, cp->result);
c4631191 2064 }
c4631191
GM
2065}
2066
1da177e4
LT
2067/**
2068 * qla2x00_error_entry() - Process an error entry.
2069 * @ha: SCSI driver HA context
2070 * @pkt: Entry pointer
2071 */
2072static void
73208dfd 2073qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
2074{
2075 srb_t *sp;
e315cd28 2076 struct qla_hw_data *ha = vha->hw;
c4631191 2077 const char func[] = "ERROR-IOCB";
2afa19a9 2078 uint16_t que = MSW(pkt->handle);
a6fe35c0 2079 struct req_que *req = NULL;
9ba56b95 2080 int res = DID_ERROR << 16;
7c3df132 2081
9ba56b95
GM
2082 ql_dbg(ql_dbg_async, vha, 0x502a,
2083 "type of error status in response: 0x%x\n", pkt->entry_status);
2084
a6fe35c0
AE
2085 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2086 goto fatal;
2087
2088 req = ha->req_q_map[que];
2089
9ba56b95
GM
2090 if (pkt->entry_status & RF_BUSY)
2091 res = DID_BUS_BUSY << 16;
1da177e4 2092
c4631191 2093 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
a6fe35c0 2094 if (sp) {
9ba56b95 2095 sp->done(ha, sp, res);
a6fe35c0 2096 return;
1da177e4 2097 }
a6fe35c0
AE
2098fatal:
2099 ql_log(ql_log_warn, vha, 0x5030,
2100 "Error entry - invalid handle/queue.\n");
2101
2102 if (IS_QLA82XX(ha))
2103 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2104 else
2105 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2106 qla2xxx_wake_dpc(vha);
1da177e4
LT
2107}
2108
9a853f71
AV
2109/**
2110 * qla24xx_mbx_completion() - Process mailbox command completions.
2111 * @ha: SCSI driver HA context
2112 * @mb0: Mailbox0 register
2113 */
2114static void
e315cd28 2115qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
2116{
2117 uint16_t cnt;
4fa94f83 2118 uint32_t mboxes;
9a853f71 2119 uint16_t __iomem *wptr;
e315cd28 2120 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
2121 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2122
4fa94f83
AV
2123 /* Read all mbox registers? */
2124 mboxes = (1 << ha->mbx_count) - 1;
2125 if (!ha->mcp)
2126 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2127 else
2128 mboxes = ha->mcp->in_mb;
2129
9a853f71
AV
2130 /* Load return mailbox registers. */
2131 ha->flags.mbox_int = 1;
2132 ha->mailbox_out[0] = mb0;
4fa94f83 2133 mboxes >>= 1;
9a853f71
AV
2134 wptr = (uint16_t __iomem *)&reg->mailbox1;
2135
2136 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
4fa94f83
AV
2137 if (mboxes & BIT_0)
2138 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2139
2140 mboxes >>= 1;
9a853f71
AV
2141 wptr++;
2142 }
9a853f71
AV
2143}
2144
2145/**
2146 * qla24xx_process_response_queue() - Process response queue entries.
2147 * @ha: SCSI driver HA context
2148 */
2afa19a9
AC
2149void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2150 struct rsp_que *rsp)
9a853f71 2151{
9a853f71 2152 struct sts_entry_24xx *pkt;
a9083016 2153 struct qla_hw_data *ha = vha->hw;
9a853f71 2154
e315cd28 2155 if (!vha->flags.online)
9a853f71
AV
2156 return;
2157
e315cd28
AC
2158 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2159 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2160
e315cd28
AC
2161 rsp->ring_index++;
2162 if (rsp->ring_index == rsp->length) {
2163 rsp->ring_index = 0;
2164 rsp->ring_ptr = rsp->ring;
9a853f71 2165 } else {
e315cd28 2166 rsp->ring_ptr++;
9a853f71
AV
2167 }
2168
2169 if (pkt->entry_status != 0) {
73208dfd 2170 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2d70c103
NB
2171
2172 (void)qlt_24xx_process_response_error(vha, pkt);
2173
9a853f71
AV
2174 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2175 wmb();
2176 continue;
2177 }
2178
2179 switch (pkt->entry_type) {
2180 case STATUS_TYPE:
73208dfd 2181 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2182 break;
2183 case STATUS_CONT_TYPE:
2afa19a9 2184 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2185 break;
2c3dfe3f 2186 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2187 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2188 (struct vp_rpt_id_entry_24xx *)pkt);
2189 break;
ac280b67
AV
2190 case LOGINOUT_PORT_IOCB_TYPE:
2191 qla24xx_logio_entry(vha, rsp->req,
2192 (struct logio_entry_24xx *)pkt);
2193 break;
3822263e
MI
2194 case TSK_MGMT_IOCB_TYPE:
2195 qla24xx_tm_iocb_entry(vha, rsp->req,
2196 (struct tsk_mgmt_entry *)pkt);
2197 break;
9a069e19
GM
2198 case CT_IOCB_TYPE:
2199 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
9a069e19
GM
2200 break;
2201 case ELS_IOCB_TYPE:
2202 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2203 break;
2d70c103
NB
2204 case ABTS_RECV_24XX:
2205 /* ensure that the ATIO queue is empty */
2206 qlt_24xx_process_atio_queue(vha);
2207 case ABTS_RESP_24XX:
2208 case CTIO_TYPE7:
2209 case NOTIFY_ACK_TYPE:
2210 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2211 break;
54883291
SK
2212 case MARKER_TYPE:
2213 /* Do nothing in this case, this check is to prevent it
2214 * from falling into default case
2215 */
2216 break;
9a853f71
AV
2217 default:
2218 /* Type Not Supported. */
7c3df132
SK
2219 ql_dbg(ql_dbg_async, vha, 0x5042,
2220 "Received unknown response pkt type %x "
9a853f71 2221 "entry status=%x.\n",
7c3df132 2222 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2223 break;
2224 }
2225 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2226 wmb();
2227 }
2228
2229 /* Adjust ring index */
a9083016
GM
2230 if (IS_QLA82XX(ha)) {
2231 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2232 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2233 } else
2234 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2235}
2236
05236a05 2237static void
e315cd28 2238qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2239{
2240 int rval;
2241 uint32_t cnt;
e315cd28 2242 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2243 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2244
6246b8a1 2245 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
05236a05
AV
2246 return;
2247
2248 rval = QLA_SUCCESS;
2249 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2250 RD_REG_DWORD(&reg->iobase_addr);
2251 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2252 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2253 rval == QLA_SUCCESS; cnt--) {
2254 if (cnt) {
2255 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2256 udelay(10);
2257 } else
2258 rval = QLA_FUNCTION_TIMEOUT;
2259 }
2260 if (rval == QLA_SUCCESS)
2261 goto next_test;
2262
2263 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2264 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2265 rval == QLA_SUCCESS; cnt--) {
2266 if (cnt) {
2267 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2268 udelay(10);
2269 } else
2270 rval = QLA_FUNCTION_TIMEOUT;
2271 }
2272 if (rval != QLA_SUCCESS)
2273 goto done;
2274
2275next_test:
2276 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2277 ql_log(ql_log_info, vha, 0x504c,
2278 "Additional code -- 0x55AA.\n");
05236a05
AV
2279
2280done:
2281 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2282 RD_REG_DWORD(&reg->iobase_window);
2283}
2284
9a853f71 2285/**
6246b8a1 2286 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
9a853f71
AV
2287 * @irq:
2288 * @dev_id: SCSI driver HA context
9a853f71
AV
2289 *
2290 * Called by system whenever the host adapter generates an interrupt.
2291 *
2292 * Returns handled flag.
2293 */
2294irqreturn_t
7d12e780 2295qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2296{
e315cd28
AC
2297 scsi_qla_host_t *vha;
2298 struct qla_hw_data *ha;
9a853f71
AV
2299 struct device_reg_24xx __iomem *reg;
2300 int status;
9a853f71
AV
2301 unsigned long iter;
2302 uint32_t stat;
2303 uint32_t hccr;
2304 uint16_t mb[4];
e315cd28 2305 struct rsp_que *rsp;
43fac4d9 2306 unsigned long flags;
9a853f71 2307
e315cd28
AC
2308 rsp = (struct rsp_que *) dev_id;
2309 if (!rsp) {
3256b435
CD
2310 ql_log(ql_log_info, NULL, 0x5059,
2311 "%s: NULL response queue pointer.\n", __func__);
9a853f71
AV
2312 return IRQ_NONE;
2313 }
2314
e315cd28 2315 ha = rsp->hw;
9a853f71
AV
2316 reg = &ha->iobase->isp24;
2317 status = 0;
2318
85880801
AV
2319 if (unlikely(pci_channel_offline(ha->pdev)))
2320 return IRQ_HANDLED;
2321
43fac4d9 2322 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2323 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2324 for (iter = 50; iter--; ) {
2325 stat = RD_REG_DWORD(&reg->host_status);
2326 if (stat & HSRX_RISC_PAUSED) {
85880801 2327 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2328 break;
2329
9a853f71
AV
2330 hccr = RD_REG_DWORD(&reg->hccr);
2331
7c3df132
SK
2332 ql_log(ql_log_warn, vha, 0x504b,
2333 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2334 hccr);
05236a05 2335
e315cd28 2336 qla2xxx_check_risc_status(vha);
05236a05 2337
e315cd28
AC
2338 ha->isp_ops->fw_dump(vha, 1);
2339 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2340 break;
2341 } else if ((stat & HSRX_RISC_INT) == 0)
2342 break;
2343
2344 switch (stat & 0xff) {
2345 case 0x1:
2346 case 0x2:
2347 case 0x10:
2348 case 0x11:
e315cd28 2349 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2350 status |= MBX_INTERRUPT;
2351
2352 break;
2353 case 0x12:
2354 mb[0] = MSW(stat);
2355 mb[1] = RD_REG_WORD(&reg->mailbox1);
2356 mb[2] = RD_REG_WORD(&reg->mailbox2);
2357 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2358 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
2359 break;
2360 case 0x13:
73208dfd 2361 case 0x14:
2afa19a9 2362 qla24xx_process_response_queue(vha, rsp);
9a853f71 2363 break;
2d70c103
NB
2364 case 0x1C: /* ATIO queue updated */
2365 qlt_24xx_process_atio_queue(vha);
2366 break;
2367 case 0x1D: /* ATIO and response queues updated */
2368 qlt_24xx_process_atio_queue(vha);
2369 qla24xx_process_response_queue(vha, rsp);
2370 break;
9a853f71 2371 default:
7c3df132
SK
2372 ql_dbg(ql_dbg_async, vha, 0x504f,
2373 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2374 break;
2375 }
2376 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2377 RD_REG_DWORD_RELAXED(&reg->hccr);
2378 }
43fac4d9 2379 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71
AV
2380
2381 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2382 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 2383 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2384 complete(&ha->mbx_intr_comp);
9a853f71
AV
2385 }
2386
2387 return IRQ_HANDLED;
2388}
2389
a8488abe
AV
2390static irqreturn_t
2391qla24xx_msix_rsp_q(int irq, void *dev_id)
2392{
e315cd28
AC
2393 struct qla_hw_data *ha;
2394 struct rsp_que *rsp;
a8488abe 2395 struct device_reg_24xx __iomem *reg;
2afa19a9 2396 struct scsi_qla_host *vha;
0f19bc68 2397 unsigned long flags;
a8488abe 2398
e315cd28
AC
2399 rsp = (struct rsp_que *) dev_id;
2400 if (!rsp) {
3256b435
CD
2401 ql_log(ql_log_info, NULL, 0x505a,
2402 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2403 return IRQ_NONE;
2404 }
2405 ha = rsp->hw;
a8488abe
AV
2406 reg = &ha->iobase->isp24;
2407
0f19bc68 2408 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2409
a67093d4 2410 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2411 qla24xx_process_response_queue(vha, rsp);
3155754a 2412 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2413 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2414 RD_REG_DWORD_RELAXED(&reg->hccr);
2415 }
0f19bc68 2416 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2417
2418 return IRQ_HANDLED;
2419}
2420
68ca949c
AC
2421static irqreturn_t
2422qla25xx_msix_rsp_q(int irq, void *dev_id)
2423{
2424 struct qla_hw_data *ha;
2425 struct rsp_que *rsp;
3155754a 2426 struct device_reg_24xx __iomem *reg;
0f19bc68 2427 unsigned long flags;
68ca949c
AC
2428
2429 rsp = (struct rsp_que *) dev_id;
2430 if (!rsp) {
3256b435
CD
2431 ql_log(ql_log_info, NULL, 0x505b,
2432 "%s: NULL response queue pointer.\n", __func__);
68ca949c
AC
2433 return IRQ_NONE;
2434 }
2435 ha = rsp->hw;
2436
3155754a 2437 /* Clear the interrupt, if enabled, for this response queue */
d424754c 2438 if (!ha->flags.disable_msix_handshake) {
3155754a 2439 reg = &ha->iobase->isp24;
0f19bc68 2440 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2441 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2442 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2443 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2444 }
68ca949c
AC
2445 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2446
2447 return IRQ_HANDLED;
2448}
2449
a8488abe
AV
2450static irqreturn_t
2451qla24xx_msix_default(int irq, void *dev_id)
2452{
e315cd28
AC
2453 scsi_qla_host_t *vha;
2454 struct qla_hw_data *ha;
2455 struct rsp_que *rsp;
a8488abe
AV
2456 struct device_reg_24xx __iomem *reg;
2457 int status;
a8488abe
AV
2458 uint32_t stat;
2459 uint32_t hccr;
2460 uint16_t mb[4];
0f19bc68 2461 unsigned long flags;
a8488abe 2462
e315cd28
AC
2463 rsp = (struct rsp_que *) dev_id;
2464 if (!rsp) {
3256b435
CD
2465 ql_log(ql_log_info, NULL, 0x505c,
2466 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2467 return IRQ_NONE;
2468 }
2469 ha = rsp->hw;
a8488abe
AV
2470 reg = &ha->iobase->isp24;
2471 status = 0;
2472
0f19bc68 2473 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2474 vha = pci_get_drvdata(ha->pdev);
87f27015 2475 do {
a8488abe
AV
2476 stat = RD_REG_DWORD(&reg->host_status);
2477 if (stat & HSRX_RISC_PAUSED) {
85880801 2478 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2479 break;
2480
a8488abe
AV
2481 hccr = RD_REG_DWORD(&reg->hccr);
2482
7c3df132
SK
2483 ql_log(ql_log_info, vha, 0x5050,
2484 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2485 hccr);
05236a05 2486
e315cd28 2487 qla2xxx_check_risc_status(vha);
05236a05 2488
e315cd28
AC
2489 ha->isp_ops->fw_dump(vha, 1);
2490 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2491 break;
2492 } else if ((stat & HSRX_RISC_INT) == 0)
2493 break;
2494
2495 switch (stat & 0xff) {
2496 case 0x1:
2497 case 0x2:
2498 case 0x10:
2499 case 0x11:
e315cd28 2500 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2501 status |= MBX_INTERRUPT;
2502
2503 break;
2504 case 0x12:
2505 mb[0] = MSW(stat);
2506 mb[1] = RD_REG_WORD(&reg->mailbox1);
2507 mb[2] = RD_REG_WORD(&reg->mailbox2);
2508 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2509 qla2x00_async_event(vha, rsp, mb);
a8488abe
AV
2510 break;
2511 case 0x13:
73208dfd 2512 case 0x14:
2afa19a9 2513 qla24xx_process_response_queue(vha, rsp);
a8488abe 2514 break;
2d70c103
NB
2515 case 0x1C: /* ATIO queue updated */
2516 qlt_24xx_process_atio_queue(vha);
2517 break;
2518 case 0x1D: /* ATIO and response queues updated */
2519 qlt_24xx_process_atio_queue(vha);
2520 qla24xx_process_response_queue(vha, rsp);
2521 break;
a8488abe 2522 default:
7c3df132
SK
2523 ql_dbg(ql_dbg_async, vha, 0x5051,
2524 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2525 break;
2526 }
2527 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2528 } while (0);
0f19bc68 2529 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2530
2531 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2532 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 2533 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2534 complete(&ha->mbx_intr_comp);
a8488abe 2535 }
a8488abe
AV
2536 return IRQ_HANDLED;
2537}
2538
2539/* Interrupt handling helpers. */
2540
2541struct qla_init_msix_entry {
a8488abe 2542 const char *name;
476834c2 2543 irq_handler_t handler;
a8488abe
AV
2544};
2545
68ca949c 2546static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2547 { "qla2xxx (default)", qla24xx_msix_default },
2548 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2549 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2550};
2551
a9083016
GM
2552static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2553 { "qla2xxx (default)", qla82xx_msix_default },
2554 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2555};
2556
a8488abe 2557static void
e315cd28 2558qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2559{
2560 int i;
2561 struct qla_msix_entry *qentry;
7c3df132 2562 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2563
73208dfd
AC
2564 for (i = 0; i < ha->msix_count; i++) {
2565 qentry = &ha->msix_entries[i];
a8488abe 2566 if (qentry->have_irq)
73208dfd 2567 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2568 }
2569 pci_disable_msix(ha->pdev);
73208dfd
AC
2570 kfree(ha->msix_entries);
2571 ha->msix_entries = NULL;
2572 ha->flags.msix_enabled = 0;
7c3df132
SK
2573 ql_dbg(ql_dbg_init, vha, 0x0042,
2574 "Disabled the MSI.\n");
a8488abe
AV
2575}
2576
2577static int
73208dfd 2578qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2579{
ad038fa8 2580#define MIN_MSIX_COUNT 2
a8488abe 2581 int i, ret;
73208dfd 2582 struct msix_entry *entries;
a8488abe 2583 struct qla_msix_entry *qentry;
7c3df132 2584 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2585
2586 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2587 GFP_KERNEL);
7c3df132
SK
2588 if (!entries) {
2589 ql_log(ql_log_warn, vha, 0x00bc,
2590 "Failed to allocate memory for msix_entry.\n");
73208dfd 2591 return -ENOMEM;
7c3df132 2592 }
a8488abe 2593
73208dfd
AC
2594 for (i = 0; i < ha->msix_count; i++)
2595 entries[i].entry = i;
a8488abe 2596
73208dfd 2597 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2598 if (ret) {
ad038fa8
LC
2599 if (ret < MIN_MSIX_COUNT)
2600 goto msix_failed;
2601
7c3df132
SK
2602 ql_log(ql_log_warn, vha, 0x00c6,
2603 "MSI-X: Failed to enable support "
2604 "-- %d/%d\n Retry with %d vectors.\n",
2605 ha->msix_count, ret, ret);
73208dfd
AC
2606 ha->msix_count = ret;
2607 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2608 if (ret) {
ad038fa8 2609msix_failed:
7c3df132
SK
2610 ql_log(ql_log_fatal, vha, 0x00c7,
2611 "MSI-X: Failed to enable support, "
2612 "giving up -- %d/%d.\n",
2613 ha->msix_count, ret);
73208dfd
AC
2614 goto msix_out;
2615 }
2afa19a9 2616 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2617 }
2618 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2619 ha->msix_count, GFP_KERNEL);
2620 if (!ha->msix_entries) {
7c3df132
SK
2621 ql_log(ql_log_fatal, vha, 0x00c8,
2622 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2623 ret = -ENOMEM;
a8488abe
AV
2624 goto msix_out;
2625 }
2626 ha->flags.msix_enabled = 1;
2627
73208dfd
AC
2628 for (i = 0; i < ha->msix_count; i++) {
2629 qentry = &ha->msix_entries[i];
2630 qentry->vector = entries[i].vector;
2631 qentry->entry = entries[i].entry;
a8488abe 2632 qentry->have_irq = 0;
73208dfd 2633 qentry->rsp = NULL;
a8488abe
AV
2634 }
2635
2afa19a9
AC
2636 /* Enable MSI-X vectors for the base queue */
2637 for (i = 0; i < 2; i++) {
2638 qentry = &ha->msix_entries[i];
a9083016
GM
2639 if (IS_QLA82XX(ha)) {
2640 ret = request_irq(qentry->vector,
2641 qla82xx_msix_entries[i].handler,
2642 0, qla82xx_msix_entries[i].name, rsp);
2643 } else {
2644 ret = request_irq(qentry->vector,
2645 msix_entries[i].handler,
2646 0, msix_entries[i].name, rsp);
2647 }
2afa19a9 2648 if (ret) {
7c3df132
SK
2649 ql_log(ql_log_fatal, vha, 0x00cb,
2650 "MSI-X: unable to register handler -- %x/%d.\n",
2651 qentry->vector, ret);
2afa19a9
AC
2652 qla24xx_disable_msix(ha);
2653 ha->mqenable = 0;
2654 goto msix_out;
2655 }
2656 qentry->have_irq = 1;
2657 qentry->rsp = rsp;
2658 rsp->msix = qentry;
73208dfd 2659 }
73208dfd
AC
2660
2661 /* Enable MSI-X vector for response queue update for queue 0 */
6246b8a1
GM
2662 if (IS_QLA83XX(ha)) {
2663 if (ha->msixbase && ha->mqiobase &&
2664 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2665 ha->mqenable = 1;
2666 } else
2667 if (ha->mqiobase
2668 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2669 ha->mqenable = 1;
7c3df132
SK
2670 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2671 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2672 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2673 ql_dbg(ql_dbg_init, vha, 0x0055,
2674 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2675 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2676
a8488abe 2677msix_out:
73208dfd 2678 kfree(entries);
a8488abe
AV
2679 return ret;
2680}
2681
2682int
73208dfd 2683qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2684{
2685 int ret;
963b0fdd 2686 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2687 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2688
2689 /* If possible, enable MSI-X. */
6246b8a1
GM
2690 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2691 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
6377a7ae
BH
2692 goto skip_msi;
2693
2694 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2695 (ha->pdev->subsystem_device == 0x7040 ||
2696 ha->pdev->subsystem_device == 0x7041 ||
2697 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2698 ql_log(ql_log_warn, vha, 0x0034,
2699 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2700 ha->pdev->subsystem_vendor,
7c3df132 2701 ha->pdev->subsystem_device);
6377a7ae
BH
2702 goto skip_msi;
2703 }
a8488abe 2704
42cd4f5d 2705 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
7c3df132
SK
2706 ql_log(ql_log_warn, vha, 0x0035,
2707 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
42cd4f5d 2708 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
a8488abe
AV
2709 goto skip_msix;
2710 }
2711
73208dfd 2712 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2713 if (!ret) {
7c3df132
SK
2714 ql_dbg(ql_dbg_init, vha, 0x0036,
2715 "MSI-X: Enabled (0x%X, 0x%X).\n",
2716 ha->chip_revision, ha->fw_attributes);
963b0fdd 2717 goto clear_risc_ints;
a8488abe 2718 }
7c3df132
SK
2719 ql_log(ql_log_info, vha, 0x0037,
2720 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2721skip_msix:
cbedb601 2722
3a03eb79
AV
2723 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2724 !IS_QLA8001(ha))
cbedb601
AV
2725 goto skip_msi;
2726
2727 ret = pci_enable_msi(ha->pdev);
2728 if (!ret) {
7c3df132
SK
2729 ql_dbg(ql_dbg_init, vha, 0x0038,
2730 "MSI: Enabled.\n");
cbedb601 2731 ha->flags.msi_enabled = 1;
a9083016 2732 } else
7c3df132
SK
2733 ql_log(ql_log_warn, vha, 0x0039,
2734 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
cbedb601
AV
2735skip_msi:
2736
fd34f556 2737 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2738 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2739 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2740 if (ret) {
7c3df132 2741 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2742 "Failed to reserve interrupt %d already in use.\n",
2743 ha->pdev->irq);
963b0fdd
AV
2744 goto fail;
2745 }
7992abfc 2746
963b0fdd
AV
2747clear_risc_ints:
2748
3a03eb79
AV
2749 /*
2750 * FIXME: Noted that 8014s were being dropped during NK testing.
2751 * Timing deltas during MSI-X/INTa transitions?
2752 */
6246b8a1 2753 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
3a03eb79 2754 goto fail;
c6952483 2755 spin_lock_irq(&ha->hardware_lock);
963b0fdd
AV
2756 if (IS_FWI2_CAPABLE(ha)) {
2757 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2758 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2759 } else {
2760 WRT_REG_WORD(&reg->isp.semaphore, 0);
2761 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2762 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
a8488abe 2763 }
c6952483 2764 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2765
963b0fdd 2766fail:
a8488abe
AV
2767 return ret;
2768}
2769
2770void
e315cd28 2771qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2772{
e315cd28 2773 struct qla_hw_data *ha = vha->hw;
9a347ff4
CD
2774 struct rsp_que *rsp;
2775
2776 /*
2777 * We need to check that ha->rsp_q_map is valid in case we are called
2778 * from a probe failure context.
2779 */
2780 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2781 return;
2782 rsp = ha->rsp_q_map[0];
a8488abe
AV
2783
2784 if (ha->flags.msix_enabled)
2785 qla24xx_disable_msix(ha);
90a86fc0 2786 else if (ha->flags.msi_enabled) {
e315cd28 2787 free_irq(ha->pdev->irq, rsp);
cbedb601 2788 pci_disable_msi(ha->pdev);
90a86fc0
JC
2789 } else
2790 free_irq(ha->pdev->irq, rsp);
a8488abe 2791}
e315cd28 2792
73208dfd
AC
2793
2794int qla25xx_request_irq(struct rsp_que *rsp)
2795{
2796 struct qla_hw_data *ha = rsp->hw;
2afa19a9 2797 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 2798 struct qla_msix_entry *msix = rsp->msix;
7c3df132 2799 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2800 int ret;
2801
2802 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2803 if (ret) {
7c3df132
SK
2804 ql_log(ql_log_fatal, vha, 0x00e6,
2805 "MSI-X: Unable to register handler -- %x/%d.\n",
2806 msix->vector, ret);
73208dfd
AC
2807 return ret;
2808 }
2809 msix->have_irq = 1;
2810 msix->rsp = rsp;
2811 return ret;
2812}