]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla2xxx/qla_isr.c
[SCSI] qla2xxx: Use a valid enode-mac if none defined.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
07e264b7 3 * Copyright (c) 2003-2011 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
05236a05 9#include <linux/delay.h>
5a0e3ad6 10#include <linux/slab.h>
df7baa50 11#include <scsi/scsi_tcq.h>
9a069e19 12#include <scsi/scsi_bsg_fc.h>
bad75002 13#include <scsi/scsi_eh.h>
df7baa50 14
1da177e4 15static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
16static void qla2x00_process_completed_request(struct scsi_qla_host *,
17 struct req_que *, uint32_t);
18static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *);
9a853f71 22
1da177e4
LT
23/**
24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @irq:
26 * @dev_id: SCSI driver HA context
1da177e4
LT
27 *
28 * Called by system whenever the host adapter generates an interrupt.
29 *
30 * Returns handled flag.
31 */
32irqreturn_t
7d12e780 33qla2100_intr_handler(int irq, void *dev_id)
1da177e4 34{
e315cd28
AC
35 scsi_qla_host_t *vha;
36 struct qla_hw_data *ha;
3d71644c 37 struct device_reg_2xxx __iomem *reg;
1da177e4 38 int status;
1da177e4 39 unsigned long iter;
14e660e6 40 uint16_t hccr;
9a853f71 41 uint16_t mb[4];
e315cd28 42 struct rsp_que *rsp;
43fac4d9 43 unsigned long flags;
1da177e4 44
e315cd28
AC
45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) {
1da177e4 47 printk(KERN_INFO
7c3df132 48 "%s(): NULL response queue pointer.\n", __func__);
1da177e4
LT
49 return (IRQ_NONE);
50 }
51
e315cd28 52 ha = rsp->hw;
3d71644c 53 reg = &ha->iobase->isp;
1da177e4
LT
54 status = 0;
55
43fac4d9 56 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 57 vha = pci_get_drvdata(ha->pdev);
1da177e4 58 for (iter = 50; iter--; ) {
14e660e6
SJ
59 hccr = RD_REG_WORD(&reg->hccr);
60 if (hccr & HCCR_RISC_PAUSE) {
61 if (pci_channel_offline(ha->pdev))
62 break;
63
64 /*
65 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 66 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
67 * out of the RISC PAUSED state.
68 */
69 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70 RD_REG_WORD(&reg->hccr);
71
e315cd28
AC
72 ha->isp_ops->fw_dump(vha, 1);
73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
74 break;
75 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
76 break;
77
78 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80 RD_REG_WORD(&reg->hccr);
81
82 /* Get mailbox data. */
9a853f71
AV
83 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 85 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 86 status |= MBX_INTERRUPT;
9a853f71
AV
87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 91 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
92 } else {
93 /*EMPTY*/
7c3df132
SK
94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "Unrecognized interrupt type (%d).\n",
96 mb[0]);
1da177e4
LT
97 }
98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0);
100 RD_REG_WORD(&reg->semaphore);
101 } else {
73208dfd 102 qla2x00_process_response_queue(rsp);
1da177e4
LT
103
104 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105 RD_REG_WORD(&reg->hccr);
106 }
107 }
43fac4d9 108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 109
1da177e4
LT
110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 113 complete(&ha->mbx_intr_comp);
1da177e4
LT
114 }
115
1da177e4
LT
116 return (IRQ_HANDLED);
117}
118
119/**
120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121 * @irq:
122 * @dev_id: SCSI driver HA context
1da177e4
LT
123 *
124 * Called by system whenever the host adapter generates an interrupt.
125 *
126 * Returns handled flag.
127 */
128irqreturn_t
7d12e780 129qla2300_intr_handler(int irq, void *dev_id)
1da177e4 130{
e315cd28 131 scsi_qla_host_t *vha;
3d71644c 132 struct device_reg_2xxx __iomem *reg;
1da177e4 133 int status;
1da177e4
LT
134 unsigned long iter;
135 uint32_t stat;
1da177e4 136 uint16_t hccr;
9a853f71 137 uint16_t mb[4];
e315cd28
AC
138 struct rsp_que *rsp;
139 struct qla_hw_data *ha;
43fac4d9 140 unsigned long flags;
1da177e4 141
e315cd28
AC
142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) {
1da177e4 144 printk(KERN_INFO
7c3df132 145 "%s(): NULL response queue pointer.\n", __func__);
1da177e4
LT
146 return (IRQ_NONE);
147 }
148
e315cd28 149 ha = rsp->hw;
3d71644c 150 reg = &ha->iobase->isp;
1da177e4
LT
151 status = 0;
152
43fac4d9 153 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 154 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
155 for (iter = 50; iter--; ) {
156 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157 if (stat & HSR_RISC_PAUSED) {
85880801 158 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
159 break;
160
1da177e4
LT
161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
163 ql_log(ql_log_warn, vha, 0x5026,
164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
1da177e4 166 else
7c3df132
SK
167 ql_log(ql_log_warn, vha, 0x5027,
168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
1da177e4
LT
170
171 /*
172 * Issue a "HARD" reset in order for the RISC
173 * interrupt bit to be cleared. Schedule a big
a06a0f8e 174 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
175 */
176 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
177 RD_REG_WORD(&reg->hccr);
07f31805 178
e315cd28
AC
179 ha->isp_ops->fw_dump(vha, 1);
180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
181 break;
182 } else if ((stat & HSR_RISC_INT) == 0)
183 break;
184
1da177e4 185 switch (stat & 0xff) {
1da177e4
LT
186 case 0x1:
187 case 0x2:
188 case 0x10:
189 case 0x11:
e315cd28 190 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
191 status |= MBX_INTERRUPT;
192
193 /* Release mailbox registers. */
194 WRT_REG_WORD(&reg->semaphore, 0);
195 break;
196 case 0x12:
9a853f71
AV
197 mb[0] = MSW(stat);
198 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
199 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
200 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 201 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
202 break;
203 case 0x13:
73208dfd 204 qla2x00_process_response_queue(rsp);
1da177e4
LT
205 break;
206 case 0x15:
9a853f71
AV
207 mb[0] = MBA_CMPLT_1_16BIT;
208 mb[1] = MSW(stat);
73208dfd 209 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
210 break;
211 case 0x16:
9a853f71
AV
212 mb[0] = MBA_SCSI_COMPLETION;
213 mb[1] = MSW(stat);
214 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 215 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
216 break;
217 default:
7c3df132
SK
218 ql_dbg(ql_dbg_async, vha, 0x5028,
219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
220 break;
221 }
222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
223 RD_REG_WORD_RELAXED(&reg->hccr);
224 }
43fac4d9 225 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 226
1da177e4
LT
227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 230 complete(&ha->mbx_intr_comp);
1da177e4
LT
231 }
232
1da177e4
LT
233 return (IRQ_HANDLED);
234}
235
236/**
237 * qla2x00_mbx_completion() - Process mailbox command completions.
238 * @ha: SCSI driver HA context
239 * @mb0: Mailbox0 register
240 */
241static void
e315cd28 242qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
243{
244 uint16_t cnt;
4fa94f83 245 uint32_t mboxes;
1da177e4 246 uint16_t __iomem *wptr;
e315cd28 247 struct qla_hw_data *ha = vha->hw;
3d71644c 248 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 249
4fa94f83
AV
250 /* Read all mbox registers? */
251 mboxes = (1 << ha->mbx_count) - 1;
252 if (!ha->mcp)
253 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
254 else
255 mboxes = ha->mcp->in_mb;
256
1da177e4
LT
257 /* Load return mailbox registers. */
258 ha->flags.mbox_int = 1;
259 ha->mailbox_out[0] = mb0;
4fa94f83 260 mboxes >>= 1;
1da177e4
LT
261 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
262
263 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 264 if (IS_QLA2200(ha) && cnt == 8)
1da177e4 265 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
4fa94f83 266 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
1da177e4 267 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
4fa94f83 268 else if (mboxes & BIT_0)
1da177e4 269 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 270
1da177e4 271 wptr++;
4fa94f83 272 mboxes >>= 1;
1da177e4 273 }
1da177e4
LT
274}
275
8a659571
AV
276static void
277qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
278{
279 static char *event[] =
280 { "Complete", "Request Notification", "Time Extension" };
281 int rval;
282 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
283 uint16_t __iomem *wptr;
284 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
285
286 /* Seed data -- mailbox1 -> mailbox7. */
287 wptr = (uint16_t __iomem *)&reg24->mailbox1;
288 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
289 mb[cnt] = RD_REG_WORD(wptr);
290
7c3df132 291 ql_dbg(ql_dbg_async, vha, 0x5021,
6246b8a1 292 "Inter-Driver Communication %s -- "
7c3df132
SK
293 "%04x %04x %04x %04x %04x %04x %04x.\n",
294 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
295 mb[4], mb[5], mb[6]);
8a659571
AV
296
297 /* Acknowledgement needed? [Notify && non-zero timeout]. */
298 timeout = (descr >> 8) & 0xf;
299 if (aen != MBA_IDC_NOTIFY || !timeout)
300 return;
301
7c3df132 302 ql_dbg(ql_dbg_async, vha, 0x5022,
d8424f68 303 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
7c3df132 304 vha->host_no, event[aen & 0xff], timeout);
8a659571
AV
305
306 rval = qla2x00_post_idc_ack_work(vha, mb);
307 if (rval != QLA_SUCCESS)
7c3df132 308 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
309 "IDC failed to post ACK.\n");
310}
311
1da177e4
LT
312/**
313 * qla2x00_async_event() - Process aynchronous events.
314 * @ha: SCSI driver HA context
9a853f71 315 * @mb: Mailbox registers (0 - 3)
1da177e4 316 */
2c3dfe3f 317void
73208dfd 318qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 319{
9a853f71 320#define LS_UNKNOWN 2
6246b8a1 321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
1da177e4 322 char *link_speed;
1da177e4 323 uint16_t handle_cnt;
bdab23da 324 uint16_t cnt, mbx;
1da177e4 325 uint32_t handles[5];
e315cd28 326 struct qla_hw_data *ha = vha->hw;
3d71644c 327 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 328 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 329 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4
LT
330 uint32_t rscn_entry, host_pid;
331 uint8_t rscn_queue_index;
4d4df193 332 unsigned long flags;
1da177e4
LT
333
334 /* Setup to process RIO completion. */
335 handle_cnt = 0;
6246b8a1 336 if (IS_CNA_CAPABLE(ha))
3a03eb79 337 goto skip_rio;
1da177e4
LT
338 switch (mb[0]) {
339 case MBA_SCSI_COMPLETION:
9a853f71 340 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
341 handle_cnt = 1;
342 break;
343 case MBA_CMPLT_1_16BIT:
9a853f71 344 handles[0] = mb[1];
1da177e4
LT
345 handle_cnt = 1;
346 mb[0] = MBA_SCSI_COMPLETION;
347 break;
348 case MBA_CMPLT_2_16BIT:
9a853f71
AV
349 handles[0] = mb[1];
350 handles[1] = mb[2];
1da177e4
LT
351 handle_cnt = 2;
352 mb[0] = MBA_SCSI_COMPLETION;
353 break;
354 case MBA_CMPLT_3_16BIT:
9a853f71
AV
355 handles[0] = mb[1];
356 handles[1] = mb[2];
357 handles[2] = mb[3];
1da177e4
LT
358 handle_cnt = 3;
359 mb[0] = MBA_SCSI_COMPLETION;
360 break;
361 case MBA_CMPLT_4_16BIT:
9a853f71
AV
362 handles[0] = mb[1];
363 handles[1] = mb[2];
364 handles[2] = mb[3];
1da177e4
LT
365 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
366 handle_cnt = 4;
367 mb[0] = MBA_SCSI_COMPLETION;
368 break;
369 case MBA_CMPLT_5_16BIT:
9a853f71
AV
370 handles[0] = mb[1];
371 handles[1] = mb[2];
372 handles[2] = mb[3];
1da177e4
LT
373 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
374 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
375 handle_cnt = 5;
376 mb[0] = MBA_SCSI_COMPLETION;
377 break;
378 case MBA_CMPLT_2_32BIT:
9a853f71 379 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
380 handles[1] = le32_to_cpu(
381 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
382 RD_MAILBOX_REG(ha, reg, 6));
383 handle_cnt = 2;
384 mb[0] = MBA_SCSI_COMPLETION;
385 break;
386 default:
387 break;
388 }
3a03eb79 389skip_rio:
1da177e4
LT
390 switch (mb[0]) {
391 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 392 if (!vha->flags.online)
1da177e4
LT
393 break;
394
395 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
396 qla2x00_process_completed_request(vha, rsp->req,
397 handles[cnt]);
1da177e4
LT
398 break;
399
400 case MBA_RESET: /* Reset */
7c3df132
SK
401 ql_dbg(ql_dbg_async, vha, 0x5002,
402 "Asynchronous RESET.\n");
1da177e4 403
e315cd28 404 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
405 break;
406
407 case MBA_SYSTEM_ERR: /* System Error */
6246b8a1
GM
408 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
409 RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 410 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
411 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
412 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 413
e315cd28 414 ha->isp_ops->fw_dump(vha, 1);
1da177e4 415
e428924c 416 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 417 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 418 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
419 "Unrecoverable Hardware Error: adapter "
420 "marked OFFLINE!\n");
e315cd28 421 vha->flags.online = 0;
6246b8a1 422 vha->device_flags |= DFLG_DEV_FAILED;
b1d46989 423 } else {
25985edc 424 /* Check to see if MPI timeout occurred */
b1d46989
MI
425 if ((mbx & MBX_3) && (ha->flags.port0))
426 set_bit(MPI_RESET_NEEDED,
427 &vha->dpc_flags);
428
e315cd28 429 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 430 }
9a853f71 431 } else if (mb[1] == 0) {
7c3df132 432 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
433 "Unrecoverable Hardware Error: adapter marked "
434 "OFFLINE!\n");
e315cd28 435 vha->flags.online = 0;
6246b8a1 436 vha->device_flags |= DFLG_DEV_FAILED;
1da177e4 437 } else
e315cd28 438 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
439 break;
440
441 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
442 ql_log(ql_log_warn, vha, 0x5006,
443 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 444
e315cd28 445 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
446 break;
447
448 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
449 ql_log(ql_log_warn, vha, 0x5007,
450 "ISP Response Transfer Error.\n");
1da177e4 451
e315cd28 452 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
453 break;
454
455 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
456 ql_dbg(ql_dbg_async, vha, 0x5008,
457 "Asynchronous WAKEUP_THRES.\n");
1da177e4
LT
458 break;
459
460 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
cfb0919c 461 ql_dbg(ql_dbg_async, vha, 0x5009,
7c3df132 462 "LIP occurred (%x).\n", mb[1]);
1da177e4 463
e315cd28
AC
464 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
465 atomic_set(&vha->loop_state, LOOP_DOWN);
466 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
467 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
468 }
469
e315cd28
AC
470 if (vha->vp_idx) {
471 atomic_set(&vha->vp_state, VP_FAILED);
472 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
473 }
474
e315cd28
AC
475 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
476 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 477
e315cd28
AC
478 vha->flags.management_server_logged_in = 0;
479 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
480 break;
481
482 case MBA_LOOP_UP: /* Loop Up Event */
1da177e4
LT
483 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
484 link_speed = link_speeds[0];
d8b45213 485 ha->link_data_rate = PORT_SPEED_1GB;
1da177e4 486 } else {
9a853f71 487 link_speed = link_speeds[LS_UNKNOWN];
6246b8a1 488 if (mb[1] < 6)
1da177e4 489 link_speed = link_speeds[mb[1]];
3a03eb79 490 else if (mb[1] == 0x13)
6246b8a1 491 link_speed = link_speeds[6];
1da177e4
LT
492 ha->link_data_rate = mb[1];
493 }
494
cfb0919c 495 ql_dbg(ql_dbg_async, vha, 0x500a,
7c3df132 496 "LOOP UP detected (%s Gbps).\n", link_speed);
1da177e4 497
e315cd28
AC
498 vha->flags.management_server_logged_in = 0;
499 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
500 break;
501
502 case MBA_LOOP_DOWN: /* Loop Down Event */
6246b8a1
GM
503 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
504 ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 505 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
cfb0919c 506 ql_dbg(ql_dbg_async, vha, 0x500b,
7c3df132
SK
507 "LOOP DOWN detected (%x %x %x %x).\n",
508 mb[1], mb[2], mb[3], mbx);
1da177e4 509
e315cd28
AC
510 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
511 atomic_set(&vha->loop_state, LOOP_DOWN);
512 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
513 vha->device_flags |= DFLG_NO_CABLE;
514 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
515 }
516
e315cd28
AC
517 if (vha->vp_idx) {
518 atomic_set(&vha->vp_state, VP_FAILED);
519 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
520 }
521
e315cd28 522 vha->flags.management_server_logged_in = 0;
d8b45213 523 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 524 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
525 break;
526
527 case MBA_LIP_RESET: /* LIP reset occurred */
cfb0919c 528 ql_dbg(ql_dbg_async, vha, 0x500c,
cc3ef7bc 529 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 530
e315cd28
AC
531 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
532 atomic_set(&vha->loop_state, LOOP_DOWN);
533 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
534 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
535 }
536
e315cd28
AC
537 if (vha->vp_idx) {
538 atomic_set(&vha->vp_state, VP_FAILED);
539 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
540 }
541
e315cd28 542 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
543
544 ha->operating_mode = LOOP;
e315cd28
AC
545 vha->flags.management_server_logged_in = 0;
546 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
547 break;
548
3a03eb79 549 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
550 case MBA_POINT_TO_POINT: /* Point-to-Point */
551 if (IS_QLA2100(ha))
552 break;
553
6246b8a1 554 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
7c3df132
SK
555 ql_dbg(ql_dbg_async, vha, 0x500d,
556 "DCBX Completed -- %04x %04x %04x.\n",
557 mb[1], mb[2], mb[3]);
23f2ebd1
SR
558 if (ha->notify_dcbx_comp)
559 complete(&ha->dcbx_comp);
560
561 } else
7c3df132
SK
562 ql_dbg(ql_dbg_async, vha, 0x500e,
563 "Asynchronous P2P MODE received.\n");
1da177e4
LT
564
565 /*
566 * Until there's a transition from loop down to loop up, treat
567 * this as loop down only.
568 */
e315cd28
AC
569 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
570 atomic_set(&vha->loop_state, LOOP_DOWN);
571 if (!atomic_read(&vha->loop_down_timer))
572 atomic_set(&vha->loop_down_timer,
1da177e4 573 LOOP_DOWN_TIME);
e315cd28 574 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
575 }
576
e315cd28
AC
577 if (vha->vp_idx) {
578 atomic_set(&vha->vp_state, VP_FAILED);
579 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
580 }
581
e315cd28
AC
582 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
583 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
584
585 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
586 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
587
588 ha->flags.gpsc_supported = 1;
e315cd28 589 vha->flags.management_server_logged_in = 0;
1da177e4
LT
590 break;
591
592 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
593 if (IS_QLA2100(ha))
594 break;
595
cfb0919c 596 ql_dbg(ql_dbg_async, vha, 0x500f,
1da177e4
LT
597 "Configuration change detected: value=%x.\n", mb[1]);
598
e315cd28
AC
599 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
600 atomic_set(&vha->loop_state, LOOP_DOWN);
601 if (!atomic_read(&vha->loop_down_timer))
602 atomic_set(&vha->loop_down_timer,
1da177e4 603 LOOP_DOWN_TIME);
e315cd28 604 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
605 }
606
e315cd28
AC
607 if (vha->vp_idx) {
608 atomic_set(&vha->vp_state, VP_FAILED);
609 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
610 }
611
e315cd28
AC
612 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
613 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
614 break;
615
616 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
617 /*
618 * Handle only global and vn-port update events
619 *
620 * Relevant inputs:
621 * mb[1] = N_Port handle of changed port
622 * OR 0xffff for global event
623 * mb[2] = New login state
624 * 7 = Port logged out
625 * mb[3] = LSB is vp_idx, 0xff = all vps
626 *
627 * Skip processing if:
628 * Event is global, vp_idx is NOT all vps,
629 * vp_idx does not match
630 * Event is not global, vp_idx does not match
631 */
12cec63e
AV
632 if (IS_QLA2XXX_MIDTYPE(ha) &&
633 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
634 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
635 break;
73208dfd 636
9764ff88
AV
637 /* Global event -- port logout or port unavailable. */
638 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
639 ql_dbg(ql_dbg_async, vha, 0x5010,
640 "Port unavailable %04x %04x %04x.\n",
641 mb[1], mb[2], mb[3]);
9764ff88
AV
642
643 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
644 atomic_set(&vha->loop_state, LOOP_DOWN);
645 atomic_set(&vha->loop_down_timer,
646 LOOP_DOWN_TIME);
647 vha->device_flags |= DFLG_NO_CABLE;
648 qla2x00_mark_all_devices_lost(vha, 1);
649 }
650
651 if (vha->vp_idx) {
652 atomic_set(&vha->vp_state, VP_FAILED);
653 fc_vport_set_state(vha->fc_vport,
654 FC_VPORT_FAILED);
faadc5e7 655 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
656 }
657
658 vha->flags.management_server_logged_in = 0;
659 ha->link_data_rate = PORT_SPEED_UNKNOWN;
660 break;
661 }
662
1da177e4 663 /*
cc3ef7bc 664 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
665 * event etc. earlier indicating loop is down) then process
666 * it. Otherwise ignore it and Wait for RSCN to come in.
667 */
e315cd28
AC
668 atomic_set(&vha->loop_down_timer, 0);
669 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
670 atomic_read(&vha->loop_state) != LOOP_DEAD) {
7c3df132
SK
671 ql_dbg(ql_dbg_async, vha, 0x5011,
672 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
673 mb[1], mb[2], mb[3]);
1da177e4
LT
674 break;
675 }
676
7c3df132
SK
677 ql_dbg(ql_dbg_async, vha, 0x5012,
678 "Port database changed %04x %04x %04x.\n",
679 mb[1], mb[2], mb[3]);
1da177e4
LT
680
681 /*
682 * Mark all devices as missing so we will login again.
683 */
e315cd28 684 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 685
e315cd28 686 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 687
e315cd28 688 vha->flags.rscn_queue_overflow = 1;
1da177e4 689
e315cd28
AC
690 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
691 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
692 break;
693
694 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 695 /* Check if the Vport has issued a SCR */
e315cd28 696 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
697 break;
698 /* Only handle SCNs for our Vport index. */
0d6e61bc 699 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 700 break;
0d6e61bc 701
7c3df132
SK
702 ql_dbg(ql_dbg_async, vha, 0x5013,
703 "RSCN database changed -- %04x %04x %04x.\n",
704 mb[1], mb[2], mb[3]);
1da177e4 705
59d72d87 706 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
707 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
708 | vha->d_id.b.al_pa;
1da177e4 709 if (rscn_entry == host_pid) {
7c3df132
SK
710 ql_dbg(ql_dbg_async, vha, 0x5014,
711 "Ignoring RSCN update to local host "
712 "port ID (%06x).\n", host_pid);
1da177e4
LT
713 break;
714 }
715
59d72d87
RA
716 /* Ignore reserved bits from RSCN-payload. */
717 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
e315cd28 718 rscn_queue_index = vha->rscn_in_ptr + 1;
1da177e4
LT
719 if (rscn_queue_index == MAX_RSCN_COUNT)
720 rscn_queue_index = 0;
e315cd28
AC
721 if (rscn_queue_index != vha->rscn_out_ptr) {
722 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
723 vha->rscn_in_ptr = rscn_queue_index;
1da177e4 724 } else {
e315cd28 725 vha->flags.rscn_queue_overflow = 1;
1da177e4
LT
726 }
727
e315cd28
AC
728 atomic_set(&vha->loop_down_timer, 0);
729 vha->flags.management_server_logged_in = 0;
1da177e4 730
e315cd28
AC
731 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
732 set_bit(RSCN_UPDATE, &vha->dpc_flags);
733 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
734 break;
735
736 /* case MBA_RIO_RESPONSE: */
737 case MBA_ZIO_RESPONSE:
7c3df132
SK
738 ql_dbg(ql_dbg_async, vha, 0x5015,
739 "[R|Z]IO update completion.\n");
1da177e4 740
e428924c 741 if (IS_FWI2_CAPABLE(ha))
2afa19a9 742 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 743 else
73208dfd 744 qla2x00_process_response_queue(rsp);
1da177e4 745 break;
9a853f71
AV
746
747 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
748 ql_dbg(ql_dbg_async, vha, 0x5016,
749 "Discard RND Frame -- %04x %04x %04x.\n",
750 mb[1], mb[2], mb[3]);
9a853f71 751 break;
45ebeb56
AV
752
753 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
754 ql_dbg(ql_dbg_async, vha, 0x5017,
755 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 756 break;
4d4df193
HK
757
758 case MBA_ISP84XX_ALERT:
7c3df132
SK
759 ql_dbg(ql_dbg_async, vha, 0x5018,
760 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
761 mb[1], mb[2], mb[3]);
4d4df193
HK
762
763 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
764 switch (mb[1]) {
765 case A84_PANIC_RECOVERY:
7c3df132
SK
766 ql_log(ql_log_info, vha, 0x5019,
767 "Alert 84XX: panic recovery %04x %04x.\n",
768 mb[2], mb[3]);
4d4df193
HK
769 break;
770 case A84_OP_LOGIN_COMPLETE:
771 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
772 ql_log(ql_log_info, vha, 0x501a,
773 "Alert 84XX: firmware version %x.\n",
774 ha->cs84xx->op_fw_version);
4d4df193
HK
775 break;
776 case A84_DIAG_LOGIN_COMPLETE:
777 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
778 ql_log(ql_log_info, vha, 0x501b,
779 "Alert 84XX: diagnostic firmware version %x.\n",
780 ha->cs84xx->diag_fw_version);
4d4df193
HK
781 break;
782 case A84_GOLD_LOGIN_COMPLETE:
783 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
784 ha->cs84xx->fw_update = 1;
7c3df132
SK
785 ql_log(ql_log_info, vha, 0x501c,
786 "Alert 84XX: gold firmware version %x.\n",
787 ha->cs84xx->gold_fw_version);
4d4df193
HK
788 break;
789 default:
7c3df132
SK
790 ql_log(ql_log_warn, vha, 0x501d,
791 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
792 mb[1], mb[2], mb[3]);
793 }
794 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
795 break;
3a03eb79 796 case MBA_DCBX_START:
7c3df132
SK
797 ql_dbg(ql_dbg_async, vha, 0x501e,
798 "DCBX Started -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]);
3a03eb79
AV
800 break;
801 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
802 ql_dbg(ql_dbg_async, vha, 0x501f,
803 "DCBX Parameters Updated -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]);
3a03eb79
AV
805 break;
806 case MBA_FCF_CONF_ERR:
7c3df132
SK
807 ql_dbg(ql_dbg_async, vha, 0x5020,
808 "FCF Configuration Error -- %04x %04x %04x.\n",
809 mb[1], mb[2], mb[3]);
3a03eb79
AV
810 break;
811 case MBA_IDC_COMPLETE:
3a03eb79 812 case MBA_IDC_NOTIFY:
3a03eb79 813 case MBA_IDC_TIME_EXT:
8a659571 814 qla81xx_idc_event(vha, mb[0], mb[1]);
3a03eb79 815 break;
6246b8a1
GM
816 default:
817 ql_dbg(ql_dbg_async, vha, 0x5057,
818 "Unknown AEN:%04x %04x %04x %04x\n",
819 mb[0], mb[1], mb[2], mb[3]);
1da177e4 820 }
2c3dfe3f 821
e315cd28 822 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 823 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
824}
825
826/**
827 * qla2x00_process_completed_request() - Process a Fast Post response.
828 * @ha: SCSI driver HA context
829 * @index: SRB index
830 */
831static void
73208dfd
AC
832qla2x00_process_completed_request(struct scsi_qla_host *vha,
833 struct req_que *req, uint32_t index)
1da177e4
LT
834{
835 srb_t *sp;
e315cd28 836 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
837
838 /* Validate handle. */
839 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
840 ql_log(ql_log_warn, vha, 0x3014,
841 "Invalid SCSI command index (%x).\n", index);
1da177e4 842
8f7daead
GM
843 if (IS_QLA82XX(ha))
844 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
845 else
846 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
847 return;
848 }
849
e315cd28 850 sp = req->outstanding_cmds[index];
1da177e4
LT
851 if (sp) {
852 /* Free outstanding command slot. */
e315cd28 853 req->outstanding_cmds[index] = NULL;
1da177e4 854
1da177e4
LT
855 /* Save ISP completion status */
856 sp->cmd->result = DID_OK << 16;
73208dfd 857 qla2x00_sp_compl(ha, sp);
1da177e4 858 } else {
7c3df132 859 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 860
8f7daead
GM
861 if (IS_QLA82XX(ha))
862 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
863 else
864 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
865 }
866}
867
ac280b67
AV
868static srb_t *
869qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
870 struct req_que *req, void *iocb)
871{
872 struct qla_hw_data *ha = vha->hw;
873 sts_entry_t *pkt = iocb;
874 srb_t *sp = NULL;
875 uint16_t index;
876
877 index = LSW(pkt->handle);
878 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
879 ql_log(ql_log_warn, vha, 0x5031,
880 "Invalid command index (%x).\n", index);
8f7daead
GM
881 if (IS_QLA82XX(ha))
882 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
883 else
884 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
885 goto done;
886 }
887 sp = req->outstanding_cmds[index];
888 if (!sp) {
7c3df132
SK
889 ql_log(ql_log_warn, vha, 0x5032,
890 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
891 return sp;
892 }
893 if (sp->handle != index) {
7c3df132
SK
894 ql_log(ql_log_warn, vha, 0x5033,
895 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
896 return NULL;
897 }
9a069e19 898
ac280b67 899 req->outstanding_cmds[index] = NULL;
9a069e19 900
ac280b67
AV
901done:
902 return sp;
903}
904
905static void
906qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
907 struct mbx_entry *mbx)
908{
909 const char func[] = "MBX-IOCB";
910 const char *type;
ac280b67
AV
911 fc_port_t *fcport;
912 srb_t *sp;
4916392b
MI
913 struct srb_iocb *lio;
914 struct srb_ctx *ctx;
99b0bec7 915 uint16_t *data;
5ff1d584 916 uint16_t status;
ac280b67
AV
917
918 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
919 if (!sp)
920 return;
921
4916392b
MI
922 ctx = sp->ctx;
923 lio = ctx->u.iocb_cmd;
924 type = ctx->name;
ac280b67 925 fcport = sp->fcport;
4916392b 926 data = lio->u.logio.data;
ac280b67 927
5ff1d584 928 data[0] = MBS_COMMAND_ERROR;
4916392b 929 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 930 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 931 if (mbx->entry_status) {
7c3df132 932 ql_dbg(ql_dbg_async, vha, 0x5043,
cfb0919c 933 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
d3fa9e7d 934 "entry-status=%x status=%x state-flag=%x "
cfb0919c
CD
935 "status-flags=%x.\n", type, sp->handle,
936 fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
937 fcport->d_id.b.al_pa, mbx->entry_status,
938 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 939 le16_to_cpu(mbx->status_flags));
d3fa9e7d 940
cfb0919c 941 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
7c3df132 942 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 943
99b0bec7 944 goto logio_done;
ac280b67
AV
945 }
946
5ff1d584 947 status = le16_to_cpu(mbx->status);
4916392b 948 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
5ff1d584
AV
949 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
950 status = 0;
951 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132 952 ql_dbg(ql_dbg_async, vha, 0x5045,
cfb0919c
CD
953 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
954 type, sp->handle, fcport->d_id.b.domain,
955 fcport->d_id.b.area, fcport->d_id.b.al_pa,
956 le16_to_cpu(mbx->mb1));
ac280b67
AV
957
958 data[0] = MBS_COMMAND_COMPLETE;
4916392b 959 if (ctx->type == SRB_LOGIN_CMD) {
99b0bec7
AV
960 fcport->port_type = FCT_TARGET;
961 if (le16_to_cpu(mbx->mb1) & BIT_0)
962 fcport->port_type = FCT_INITIATOR;
6ac52608 963 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 964 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 965 }
99b0bec7 966 goto logio_done;
ac280b67
AV
967 }
968
969 data[0] = le16_to_cpu(mbx->mb0);
970 switch (data[0]) {
971 case MBS_PORT_ID_USED:
972 data[1] = le16_to_cpu(mbx->mb1);
973 break;
974 case MBS_LOOP_ID_USED:
975 break;
976 default:
977 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
978 break;
979 }
980
7c3df132 981 ql_log(ql_log_warn, vha, 0x5046,
cfb0919c
CD
982 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
983 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
984 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
985 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
ac280b67 986 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 987 le16_to_cpu(mbx->mb7));
ac280b67 988
99b0bec7 989logio_done:
4916392b 990 lio->done(sp);
ac280b67
AV
991}
992
9bc4f4fb
HZ
993static void
994qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
995 sts_entry_t *pkt, int iocb_type)
996{
997 const char func[] = "CT_IOCB";
998 const char *type;
999 struct qla_hw_data *ha = vha->hw;
1000 srb_t *sp;
1001 struct srb_ctx *sp_bsg;
1002 struct fc_bsg_job *bsg_job;
1003 uint16_t comp_status;
1004
1005 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1006 if (!sp)
1007 return;
1008
1009 sp_bsg = sp->ctx;
1010 bsg_job = sp_bsg->u.bsg_job;
1011
1012 type = NULL;
1013 switch (sp_bsg->type) {
1014 case SRB_CT_CMD:
1015 type = "ct pass-through";
1016 break;
1017 default:
7c3df132
SK
1018 ql_log(ql_log_warn, vha, 0x5047,
1019 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
9bc4f4fb
HZ
1020 return;
1021 }
1022
1023 comp_status = le16_to_cpu(pkt->comp_status);
1024
1025 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1026 * fc payload to the caller
1027 */
1028 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1029 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1030
1031 if (comp_status != CS_COMPLETE) {
1032 if (comp_status == CS_DATA_UNDERRUN) {
1033 bsg_job->reply->result = DID_OK << 16;
1034 bsg_job->reply->reply_payload_rcv_len =
1035 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1036
7c3df132
SK
1037 ql_log(ql_log_warn, vha, 0x5048,
1038 "CT pass-through-%s error "
9bc4f4fb 1039 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1040 type, comp_status,
1041 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1042 } else {
7c3df132
SK
1043 ql_log(ql_log_warn, vha, 0x5049,
1044 "CT pass-through-%s error "
1045 "comp_status-status=0x%x.\n", type, comp_status);
9bc4f4fb
HZ
1046 bsg_job->reply->result = DID_ERROR << 16;
1047 bsg_job->reply->reply_payload_rcv_len = 0;
1048 }
cfb0919c 1049 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
7c3df132 1050 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1051 } else {
6eab04a8 1052 bsg_job->reply->result = DID_OK << 16;
9bc4f4fb
HZ
1053 bsg_job->reply->reply_payload_rcv_len =
1054 bsg_job->reply_payload.payload_len;
1055 bsg_job->reply_len = 0;
1056 }
1057
1058 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1059 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1060
1061 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1062 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1063
1064 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1065 kfree(sp->fcport);
1066
1067 kfree(sp->ctx);
1068 mempool_free(sp, ha->srb_mempool);
1069 bsg_job->job_done(bsg_job);
1070}
1071
9a069e19
GM
1072static void
1073qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1074 struct sts_entry_24xx *pkt, int iocb_type)
1075{
1076 const char func[] = "ELS_CT_IOCB";
1077 const char *type;
1078 struct qla_hw_data *ha = vha->hw;
1079 srb_t *sp;
4916392b 1080 struct srb_ctx *sp_bsg;
9a069e19
GM
1081 struct fc_bsg_job *bsg_job;
1082 uint16_t comp_status;
1083 uint32_t fw_status[3];
1084 uint8_t* fw_sts_ptr;
1085
1086 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1087 if (!sp)
1088 return;
4916392b
MI
1089 sp_bsg = sp->ctx;
1090 bsg_job = sp_bsg->u.bsg_job;
9a069e19
GM
1091
1092 type = NULL;
4916392b 1093 switch (sp_bsg->type) {
9a069e19
GM
1094 case SRB_ELS_CMD_RPT:
1095 case SRB_ELS_CMD_HST:
1096 type = "els";
1097 break;
1098 case SRB_CT_CMD:
1099 type = "ct pass-through";
1100 break;
1101 default:
7c3df132
SK
1102 ql_log(ql_log_warn, vha, 0x503e,
1103 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
9a069e19
GM
1104 return;
1105 }
1106
1107 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1108 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1109 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1110
1111 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1112 * fc payload to the caller
1113 */
1114 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1115 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1116
1117 if (comp_status != CS_COMPLETE) {
1118 if (comp_status == CS_DATA_UNDERRUN) {
1119 bsg_job->reply->result = DID_OK << 16;
1120 bsg_job->reply->reply_payload_rcv_len =
1121 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1122
7c3df132 1123 ql_log(ql_log_info, vha, 0x503f,
cfb0919c 1124 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1125 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
cfb0919c 1126 type, sp->handle, comp_status, fw_status[1], fw_status[2],
7c3df132
SK
1127 le16_to_cpu(((struct els_sts_entry_24xx *)
1128 pkt)->total_byte_count));
9a069e19
GM
1129 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1130 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1131 }
1132 else {
7c3df132 1133 ql_log(ql_log_info, vha, 0x5040,
cfb0919c 1134 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1135 "error subcode 1=0x%x error subcode 2=0x%x.\n",
cfb0919c 1136 type, sp->handle, comp_status,
7c3df132
SK
1137 le16_to_cpu(((struct els_sts_entry_24xx *)
1138 pkt)->error_subcode_1),
1139 le16_to_cpu(((struct els_sts_entry_24xx *)
1140 pkt)->error_subcode_2));
9a069e19
GM
1141 bsg_job->reply->result = DID_ERROR << 16;
1142 bsg_job->reply->reply_payload_rcv_len = 0;
1143 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1144 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1145 }
7c3df132
SK
1146 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1147 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1148 }
1149 else {
6eab04a8 1150 bsg_job->reply->result = DID_OK << 16;
9a069e19
GM
1151 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1152 bsg_job->reply_len = 0;
1153 }
1154
1155 dma_unmap_sg(&ha->pdev->dev,
1156 bsg_job->request_payload.sg_list,
1157 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1158 dma_unmap_sg(&ha->pdev->dev,
1159 bsg_job->reply_payload.sg_list,
1160 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4916392b
MI
1161 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1162 (sp_bsg->type == SRB_CT_CMD))
9a069e19
GM
1163 kfree(sp->fcport);
1164 kfree(sp->ctx);
1165 mempool_free(sp, ha->srb_mempool);
1166 bsg_job->job_done(bsg_job);
1167}
1168
ac280b67
AV
1169static void
1170qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1171 struct logio_entry_24xx *logio)
1172{
1173 const char func[] = "LOGIO-IOCB";
1174 const char *type;
ac280b67
AV
1175 fc_port_t *fcport;
1176 srb_t *sp;
4916392b
MI
1177 struct srb_iocb *lio;
1178 struct srb_ctx *ctx;
99b0bec7 1179 uint16_t *data;
ac280b67
AV
1180 uint32_t iop[2];
1181
1182 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1183 if (!sp)
1184 return;
1185
4916392b
MI
1186 ctx = sp->ctx;
1187 lio = ctx->u.iocb_cmd;
1188 type = ctx->name;
ac280b67 1189 fcport = sp->fcport;
4916392b 1190 data = lio->u.logio.data;
ac280b67 1191
5ff1d584 1192 data[0] = MBS_COMMAND_ERROR;
4916392b 1193 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1194 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1195 if (logio->entry_status) {
7c3df132 1196 ql_log(ql_log_warn, vha, 0x5034,
cfb0919c 1197 "Async-%s error entry - hdl=%x"
d3fa9e7d 1198 "portid=%02x%02x%02x entry-status=%x.\n",
cfb0919c
CD
1199 type, sp->handle, fcport->d_id.b.domain,
1200 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1201 logio->entry_status);
1202 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
7c3df132 1203 (uint8_t *)logio, sizeof(*logio));
ac280b67 1204
99b0bec7 1205 goto logio_done;
ac280b67
AV
1206 }
1207
1208 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
7c3df132 1209 ql_dbg(ql_dbg_async, vha, 0x5036,
cfb0919c
CD
1210 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1211 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1212 fcport->d_id.b.area, fcport->d_id.b.al_pa,
7c3df132 1213 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1214
1215 data[0] = MBS_COMMAND_COMPLETE;
4916392b 1216 if (ctx->type != SRB_LOGIN_CMD)
99b0bec7 1217 goto logio_done;
ac280b67
AV
1218
1219 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1220 if (iop[0] & BIT_4) {
1221 fcport->port_type = FCT_TARGET;
1222 if (iop[0] & BIT_8)
8474f3a0 1223 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1224 } else if (iop[0] & BIT_5)
ac280b67 1225 fcport->port_type = FCT_INITIATOR;
b0cd579c 1226
ac280b67
AV
1227 if (logio->io_parameter[7] || logio->io_parameter[8])
1228 fcport->supported_classes |= FC_COS_CLASS2;
1229 if (logio->io_parameter[9] || logio->io_parameter[10])
1230 fcport->supported_classes |= FC_COS_CLASS3;
1231
99b0bec7 1232 goto logio_done;
ac280b67
AV
1233 }
1234
1235 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1236 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1237 switch (iop[0]) {
1238 case LSC_SCODE_PORTID_USED:
1239 data[0] = MBS_PORT_ID_USED;
1240 data[1] = LSW(iop[1]);
1241 break;
1242 case LSC_SCODE_NPORT_USED:
1243 data[0] = MBS_LOOP_ID_USED;
1244 break;
ac280b67
AV
1245 default:
1246 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1247 break;
1248 }
1249
7c3df132 1250 ql_dbg(ql_dbg_async, vha, 0x5037,
cfb0919c
CD
1251 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1252 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
d3fa9e7d 1253 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1254 le16_to_cpu(logio->comp_status),
1255 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1256 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1257
99b0bec7 1258logio_done:
4916392b 1259 lio->done(sp);
ac280b67
AV
1260}
1261
3822263e
MI
1262static void
1263qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1264 struct tsk_mgmt_entry *tsk)
1265{
1266 const char func[] = "TMF-IOCB";
1267 const char *type;
1268 fc_port_t *fcport;
1269 srb_t *sp;
1270 struct srb_iocb *iocb;
1271 struct srb_ctx *ctx;
1272 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1273 int error = 1;
1274
1275 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1276 if (!sp)
1277 return;
1278
1279 ctx = sp->ctx;
1280 iocb = ctx->u.iocb_cmd;
1281 type = ctx->name;
1282 fcport = sp->fcport;
1283
1284 if (sts->entry_status) {
7c3df132 1285 ql_log(ql_log_warn, vha, 0x5038,
cfb0919c
CD
1286 "Async-%s error - hdl=%x entry-status(%x).\n",
1287 type, sp->handle, sts->entry_status);
3822263e 1288 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
7c3df132 1289 ql_log(ql_log_warn, vha, 0x5039,
cfb0919c
CD
1290 "Async-%s error - hdl=%x completion status(%x).\n",
1291 type, sp->handle, sts->comp_status);
3822263e
MI
1292 } else if (!(le16_to_cpu(sts->scsi_status) &
1293 SS_RESPONSE_INFO_LEN_VALID)) {
7c3df132 1294 ql_log(ql_log_warn, vha, 0x503a,
cfb0919c
CD
1295 "Async-%s error - hdl=%x no response info(%x).\n",
1296 type, sp->handle, sts->scsi_status);
3822263e 1297 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
7c3df132 1298 ql_log(ql_log_warn, vha, 0x503b,
cfb0919c
CD
1299 "Async-%s error - hdl=%x not enough response(%d).\n",
1300 type, sp->handle, sts->rsp_data_len);
3822263e 1301 } else if (sts->data[3]) {
7c3df132 1302 ql_log(ql_log_warn, vha, 0x503c,
cfb0919c
CD
1303 "Async-%s error - hdl=%x response(%x).\n",
1304 type, sp->handle, sts->data[3]);
3822263e
MI
1305 } else {
1306 error = 0;
1307 }
1308
1309 if (error) {
1310 iocb->u.tmf.data = error;
7c3df132
SK
1311 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1312 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1313 }
1314
1315 iocb->done(sp);
1316}
1317
1da177e4
LT
1318/**
1319 * qla2x00_process_response_queue() - Process response queue entries.
1320 * @ha: SCSI driver HA context
1321 */
1322void
73208dfd 1323qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1324{
73208dfd
AC
1325 struct scsi_qla_host *vha;
1326 struct qla_hw_data *ha = rsp->hw;
3d71644c 1327 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1328 sts_entry_t *pkt;
1329 uint16_t handle_cnt;
1330 uint16_t cnt;
73208dfd 1331
2afa19a9 1332 vha = pci_get_drvdata(ha->pdev);
1da177e4 1333
e315cd28 1334 if (!vha->flags.online)
1da177e4
LT
1335 return;
1336
e315cd28
AC
1337 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1338 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1339
e315cd28
AC
1340 rsp->ring_index++;
1341 if (rsp->ring_index == rsp->length) {
1342 rsp->ring_index = 0;
1343 rsp->ring_ptr = rsp->ring;
1da177e4 1344 } else {
e315cd28 1345 rsp->ring_ptr++;
1da177e4
LT
1346 }
1347
1348 if (pkt->entry_status != 0) {
73208dfd 1349 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1350 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1351 wmb();
1352 continue;
1353 }
1354
1355 switch (pkt->entry_type) {
1356 case STATUS_TYPE:
73208dfd 1357 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1358 break;
1359 case STATUS_TYPE_21:
1360 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1361 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1362 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1363 ((sts21_entry_t *)pkt)->handle[cnt]);
1364 }
1365 break;
1366 case STATUS_TYPE_22:
1367 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1368 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1369 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1370 ((sts22_entry_t *)pkt)->handle[cnt]);
1371 }
1372 break;
1373 case STATUS_CONT_TYPE:
2afa19a9 1374 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1375 break;
ac280b67
AV
1376 case MBX_IOCB_TYPE:
1377 qla2x00_mbx_iocb_entry(vha, rsp->req,
1378 (struct mbx_entry *)pkt);
3822263e 1379 break;
9bc4f4fb
HZ
1380 case CT_IOCB_TYPE:
1381 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1382 break;
1da177e4
LT
1383 default:
1384 /* Type Not Supported. */
7c3df132
SK
1385 ql_log(ql_log_warn, vha, 0x504a,
1386 "Received unknown response pkt type %x "
1da177e4 1387 "entry status=%x.\n",
7c3df132 1388 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1389 break;
1390 }
1391 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1392 wmb();
1393 }
1394
1395 /* Adjust ring index */
e315cd28 1396 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1397}
1398
4733fcb1 1399static inline void
5544213b
AV
1400qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1401 uint32_t sense_len, struct rsp_que *rsp)
4733fcb1 1402{
7c3df132 1403 struct scsi_qla_host *vha = sp->fcport->vha;
4733fcb1
AV
1404 struct scsi_cmnd *cp = sp->cmd;
1405
1406 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1407 sense_len = SCSI_SENSE_BUFFERSIZE;
1408
4733fcb1
AV
1409 sp->request_sense_length = sense_len;
1410 sp->request_sense_ptr = cp->sense_buffer;
5544213b
AV
1411 if (sp->request_sense_length > par_sense_len)
1412 sense_len = par_sense_len;
4733fcb1
AV
1413
1414 memcpy(cp->sense_buffer, sense_data, sense_len);
1415
1416 sp->request_sense_ptr += sense_len;
1417 sp->request_sense_length -= sense_len;
1418 if (sp->request_sense_length != 0)
2afa19a9 1419 rsp->status_srb = sp;
4733fcb1 1420
cfb0919c
CD
1421 if (sense_len) {
1422 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1423 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1424 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1425 cp);
7c3df132
SK
1426 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1427 cp->sense_buffer, sense_len);
cfb0919c 1428 }
4733fcb1
AV
1429}
1430
bad75002
AE
1431struct scsi_dif_tuple {
1432 __be16 guard; /* Checksum */
1433 __be16 app_tag; /* APPL identifer */
1434 __be32 ref_tag; /* Target LBA or indirect LBA */
1435};
1436
1437/*
1438 * Checks the guard or meta-data for the type of error
1439 * detected by the HBA. In case of errors, we set the
1440 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1441 * to indicate to the kernel that the HBA detected error.
1442 */
8cb2049c 1443static inline int
bad75002
AE
1444qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1445{
7c3df132 1446 struct scsi_qla_host *vha = sp->fcport->vha;
bad75002 1447 struct scsi_cmnd *cmd = sp->cmd;
8cb2049c
AE
1448 uint8_t *ap = &sts24->data[12];
1449 uint8_t *ep = &sts24->data[20];
bad75002
AE
1450 uint32_t e_ref_tag, a_ref_tag;
1451 uint16_t e_app_tag, a_app_tag;
1452 uint16_t e_guard, a_guard;
1453
8cb2049c
AE
1454 /*
1455 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1456 * would make guard field appear at offset 2
1457 */
1458 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1459 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1460 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1461 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1462 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1463 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1464
7c3df132
SK
1465 ql_dbg(ql_dbg_io, vha, 0x3023,
1466 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1467
7c3df132
SK
1468 ql_dbg(ql_dbg_io, vha, 0x3024,
1469 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1470 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1471 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1472 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1473 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1474
8cb2049c
AE
1475 /*
1476 * Ignore sector if:
1477 * For type 3: ref & app tag is all 'f's
1478 * For type 0,1,2: app tag is all 'f's
1479 */
1480 if ((a_app_tag == 0xffff) &&
1481 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1482 (a_ref_tag == 0xffffffff))) {
1483 uint32_t blocks_done, resid;
1484 sector_t lba_s = scsi_get_lba(cmd);
1485
1486 /* 2TB boundary case covered automatically with this */
1487 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1488
1489 resid = scsi_bufflen(cmd) - (blocks_done *
1490 cmd->device->sector_size);
1491
1492 scsi_set_resid(cmd, resid);
1493 cmd->result = DID_OK << 16;
1494
1495 /* Update protection tag */
1496 if (scsi_prot_sg_count(cmd)) {
1497 uint32_t i, j = 0, k = 0, num_ent;
1498 struct scatterlist *sg;
1499 struct sd_dif_tuple *spt;
1500
1501 /* Patch the corresponding protection tags */
1502 scsi_for_each_prot_sg(cmd, sg,
1503 scsi_prot_sg_count(cmd), i) {
1504 num_ent = sg_dma_len(sg) / 8;
1505 if (k + num_ent < blocks_done) {
1506 k += num_ent;
1507 continue;
1508 }
1509 j = blocks_done - k - 1;
1510 k = blocks_done;
1511 break;
1512 }
1513
1514 if (k != blocks_done) {
cfb0919c 1515 ql_log(ql_log_warn, vha, 0x302f,
8ec9c7fb
RD
1516 "unexpected tag values tag:lba=%x:%llx)\n",
1517 e_ref_tag, (unsigned long long)lba_s);
8cb2049c
AE
1518 return 1;
1519 }
1520
1521 spt = page_address(sg_page(sg)) + sg->offset;
1522 spt += j;
1523
1524 spt->app_tag = 0xffff;
1525 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1526 spt->ref_tag = 0xffffffff;
1527 }
1528
1529 return 0;
1530 }
1531
bad75002
AE
1532 /* check guard */
1533 if (e_guard != a_guard) {
1534 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1535 0x10, 0x1);
1536 set_driver_byte(cmd, DRIVER_SENSE);
1537 set_host_byte(cmd, DID_ABORT);
1538 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1539 return 1;
bad75002
AE
1540 }
1541
e02587d7
AE
1542 /* check ref tag */
1543 if (e_ref_tag != a_ref_tag) {
bad75002 1544 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1545 0x10, 0x3);
bad75002
AE
1546 set_driver_byte(cmd, DRIVER_SENSE);
1547 set_host_byte(cmd, DID_ABORT);
1548 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1549 return 1;
bad75002
AE
1550 }
1551
e02587d7
AE
1552 /* check appl tag */
1553 if (e_app_tag != a_app_tag) {
bad75002 1554 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1555 0x10, 0x2);
bad75002
AE
1556 set_driver_byte(cmd, DRIVER_SENSE);
1557 set_host_byte(cmd, DID_ABORT);
1558 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1559 return 1;
bad75002 1560 }
e02587d7 1561
8cb2049c 1562 return 1;
bad75002
AE
1563}
1564
1da177e4
LT
1565/**
1566 * qla2x00_status_entry() - Process a Status IOCB entry.
1567 * @ha: SCSI driver HA context
1568 * @pkt: Entry pointer
1569 */
1570static void
73208dfd 1571qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1572{
1da177e4 1573 srb_t *sp;
1da177e4
LT
1574 fc_port_t *fcport;
1575 struct scsi_cmnd *cp;
9a853f71
AV
1576 sts_entry_t *sts;
1577 struct sts_entry_24xx *sts24;
1da177e4
LT
1578 uint16_t comp_status;
1579 uint16_t scsi_status;
b7d2280c 1580 uint16_t ox_id;
1da177e4
LT
1581 uint8_t lscsi_status;
1582 int32_t resid;
5544213b
AV
1583 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1584 fw_resid_len;
9a853f71 1585 uint8_t *rsp_info, *sense_data;
e315cd28 1586 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1587 uint32_t handle;
1588 uint16_t que;
1589 struct req_que *req;
b7d2280c 1590 int logit = 1;
9a853f71
AV
1591
1592 sts = (sts_entry_t *) pkt;
1593 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1594 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1595 comp_status = le16_to_cpu(sts24->comp_status);
1596 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1597 } else {
1598 comp_status = le16_to_cpu(sts->comp_status);
1599 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1600 }
2afa19a9
AC
1601 handle = (uint32_t) LSW(sts->handle);
1602 que = MSW(sts->handle);
1603 req = ha->req_q_map[que];
a9083016 1604
1da177e4 1605 /* Fast path completion. */
9a853f71 1606 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2afa19a9 1607 qla2x00_process_completed_request(vha, req, handle);
1da177e4
LT
1608
1609 return;
1610 }
1611
1612 /* Validate handle. */
2afa19a9
AC
1613 if (handle < MAX_OUTSTANDING_COMMANDS) {
1614 sp = req->outstanding_cmds[handle];
1615 req->outstanding_cmds[handle] = NULL;
1da177e4
LT
1616 } else
1617 sp = NULL;
1618
1619 if (sp == NULL) {
cfb0919c 1620 ql_dbg(ql_dbg_io, vha, 0x3017,
7c3df132 1621 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1622
8f7daead
GM
1623 if (IS_QLA82XX(ha))
1624 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1625 else
1626 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1627 qla2xxx_wake_dpc(vha);
1da177e4
LT
1628 return;
1629 }
1630 cp = sp->cmd;
1631 if (cp == NULL) {
cfb0919c 1632 ql_dbg(ql_dbg_io, vha, 0x3018,
7c3df132
SK
1633 "Command already returned (0x%x/%p).\n",
1634 sts->handle, sp);
1da177e4
LT
1635
1636 return;
1637 }
1638
9a853f71 1639 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1640
bdf79621 1641 fcport = sp->fcport;
1da177e4 1642
b7d2280c 1643 ox_id = 0;
5544213b
AV
1644 sense_len = par_sense_len = rsp_info_len = resid_len =
1645 fw_resid_len = 0;
e428924c 1646 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
1647 if (scsi_status & SS_SENSE_LEN_VALID)
1648 sense_len = le32_to_cpu(sts24->sense_len);
1649 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1650 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1651 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1652 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1653 if (comp_status == CS_DATA_UNDERRUN)
1654 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1655 rsp_info = sts24->data;
1656 sense_data = sts24->data;
1657 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 1658 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 1659 par_sense_len = sizeof(sts24->data);
9a853f71 1660 } else {
0f00a206
LC
1661 if (scsi_status & SS_SENSE_LEN_VALID)
1662 sense_len = le16_to_cpu(sts->req_sense_length);
1663 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1664 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
1665 resid_len = le32_to_cpu(sts->residual_length);
1666 rsp_info = sts->rsp_info;
1667 sense_data = sts->req_sense_data;
5544213b 1668 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
1669 }
1670
1da177e4
LT
1671 /* Check for any FCP transport errors. */
1672 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1673 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 1674 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 1675 sense_data += rsp_info_len;
5544213b
AV
1676 par_sense_len -= rsp_info_len;
1677 }
9a853f71 1678 if (rsp_info_len > 3 && rsp_info[3]) {
cfb0919c 1679 ql_dbg(ql_dbg_io, vha, 0x3019,
7c3df132
SK
1680 "FCP I/O protocol failure (0x%x/0x%x).\n",
1681 rsp_info_len, rsp_info[3]);
1da177e4
LT
1682
1683 cp->result = DID_BUS_BUSY << 16;
b7d2280c 1684 goto out;
1da177e4
LT
1685 }
1686 }
1687
3e8ce320
AV
1688 /* Check for overrun. */
1689 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1690 scsi_status & SS_RESIDUAL_OVER)
1691 comp_status = CS_DATA_OVERRUN;
1692
1da177e4
LT
1693 /*
1694 * Based on Host and scsi status generate status code for Linux
1695 */
1696 switch (comp_status) {
1697 case CS_COMPLETE:
df7baa50 1698 case CS_QUEUE_FULL:
1da177e4
LT
1699 if (scsi_status == 0) {
1700 cp->result = DID_OK << 16;
1701 break;
1702 }
1703 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 1704 resid = resid_len;
385d70b4 1705 scsi_set_resid(cp, resid);
0da69df1
AV
1706
1707 if (!lscsi_status &&
385d70b4 1708 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 1709 cp->underflow)) {
cfb0919c 1710 ql_dbg(ql_dbg_io, vha, 0x301a,
7c3df132 1711 "Mid-layer underflow "
b7d2280c 1712 "detected (0x%x of 0x%x bytes).\n",
7c3df132 1713 resid, scsi_bufflen(cp));
0da69df1
AV
1714
1715 cp->result = DID_ERROR << 16;
1716 break;
1717 }
1da177e4 1718 }
1da177e4
LT
1719 cp->result = DID_OK << 16 | lscsi_status;
1720
df7baa50 1721 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
cfb0919c 1722 ql_dbg(ql_dbg_io, vha, 0x301b,
7c3df132 1723 "QUEUE FULL detected.\n");
df7baa50
AV
1724 break;
1725 }
b7d2280c 1726 logit = 0;
1da177e4
LT
1727 if (lscsi_status != SS_CHECK_CONDITION)
1728 break;
1729
b80ca4f7 1730 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1731 if (!(scsi_status & SS_SENSE_LEN_VALID))
1732 break;
1733
5544213b
AV
1734 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1735 rsp);
1da177e4
LT
1736 break;
1737
1738 case CS_DATA_UNDERRUN:
ed17c71b 1739 /* Use F/W calculated residual length. */
0f00a206
LC
1740 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1741 scsi_set_resid(cp, resid);
1742 if (scsi_status & SS_RESIDUAL_UNDER) {
1743 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
cfb0919c 1744 ql_dbg(ql_dbg_io, vha, 0x301d,
7c3df132
SK
1745 "Dropped frame(s) detected "
1746 "(0x%x of 0x%x bytes).\n",
1747 resid, scsi_bufflen(cp));
0f00a206
LC
1748
1749 cp->result = DID_ERROR << 16 | lscsi_status;
4e85e3d9 1750 goto check_scsi_status;
6acf8190 1751 }
ed17c71b 1752
0f00a206
LC
1753 if (!lscsi_status &&
1754 ((unsigned)(scsi_bufflen(cp) - resid) <
1755 cp->underflow)) {
cfb0919c 1756 ql_dbg(ql_dbg_io, vha, 0x301e,
7c3df132 1757 "Mid-layer underflow "
b7d2280c 1758 "detected (0x%x of 0x%x bytes).\n",
7c3df132 1759 resid, scsi_bufflen(cp));
e038a1be 1760
0f00a206
LC
1761 cp->result = DID_ERROR << 16;
1762 break;
1763 }
0374f55e 1764 } else {
cfb0919c 1765 ql_dbg(ql_dbg_io, vha, 0x301f,
7c3df132
SK
1766 "Dropped frame(s) detected (0x%x "
1767 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
0f00a206 1768
0374f55e
LC
1769 cp->result = DID_ERROR << 16 | lscsi_status;
1770 goto check_scsi_status;
1da177e4
LT
1771 }
1772
0f00a206 1773 cp->result = DID_OK << 16 | lscsi_status;
b7d2280c 1774 logit = 0;
0f00a206 1775
0374f55e 1776check_scsi_status:
1da177e4 1777 /*
fa2a1ce5 1778 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
1779 * Status.
1780 */
1781 if (lscsi_status != 0) {
ffec28a3 1782 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
cfb0919c 1783 ql_dbg(ql_dbg_io, vha, 0x3020,
7c3df132 1784 "QUEUE FULL detected.\n");
b7d2280c 1785 logit = 1;
ffec28a3
AV
1786 break;
1787 }
1da177e4
LT
1788 if (lscsi_status != SS_CHECK_CONDITION)
1789 break;
1790
b80ca4f7 1791 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1792 if (!(scsi_status & SS_SENSE_LEN_VALID))
1793 break;
1794
5544213b
AV
1795 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1796 sense_len, rsp);
1da177e4
LT
1797 }
1798 break;
1799
1da177e4
LT
1800 case CS_PORT_LOGGED_OUT:
1801 case CS_PORT_CONFIG_CHG:
1802 case CS_PORT_BUSY:
1803 case CS_INCOMPLETE:
1804 case CS_PORT_UNAVAILABLE:
b7d2280c 1805 case CS_TIMEOUT:
ff454b01
CD
1806 case CS_RESET:
1807
056a4483
MC
1808 /*
1809 * We are going to have the fc class block the rport
1810 * while we try to recover so instruct the mid layer
1811 * to requeue until the class decides how to handle this.
1812 */
1813 cp->result = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
1814
1815 if (comp_status == CS_TIMEOUT) {
1816 if (IS_FWI2_CAPABLE(ha))
1817 break;
1818 else if ((le16_to_cpu(sts->status_flags) &
1819 SF_LOGOUT_SENT) == 0)
1820 break;
1821 }
1822
7c3df132
SK
1823 ql_dbg(ql_dbg_io, vha, 0x3021,
1824 "Port down status: port-state=0x%x.\n",
1825 atomic_read(&fcport->state));
b7d2280c 1826
a7a28504 1827 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 1828 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1829 break;
1830
1da177e4 1831 case CS_ABORTED:
1da177e4
LT
1832 cp->result = DID_RESET << 16;
1833 break;
bad75002
AE
1834
1835 case CS_DIF_ERROR:
8cb2049c 1836 logit = qla2x00_handle_dif_error(sp, sts24);
bad75002 1837 break;
1da177e4 1838 default:
1da177e4
LT
1839 cp->result = DID_ERROR << 16;
1840 break;
1841 }
1842
b7d2280c
AV
1843out:
1844 if (logit)
7c3df132
SK
1845 ql_dbg(ql_dbg_io, vha, 0x3022,
1846 "FCP command status: 0x%x-0x%x (0x%x) "
cfb0919c
CD
1847 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1848 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
7c3df132 1849 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
cfb0919c
CD
1850 comp_status, scsi_status, cp->result, vha->host_no,
1851 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
1852 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
1853 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1854 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
1855 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
7c3df132 1856 resid_len, fw_resid_len);
b7d2280c 1857
2afa19a9 1858 if (rsp->status_srb == NULL)
73208dfd 1859 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1860}
1861
1862/**
1863 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1864 * @ha: SCSI driver HA context
1865 * @pkt: Entry pointer
1866 *
1867 * Extended sense data.
1868 */
1869static void
2afa19a9 1870qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4
LT
1871{
1872 uint8_t sense_sz = 0;
2afa19a9 1873 struct qla_hw_data *ha = rsp->hw;
7c3df132 1874 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2afa19a9 1875 srb_t *sp = rsp->status_srb;
1da177e4
LT
1876 struct scsi_cmnd *cp;
1877
1878 if (sp != NULL && sp->request_sense_length != 0) {
1879 cp = sp->cmd;
1880 if (cp == NULL) {
7c3df132
SK
1881 ql_log(ql_log_warn, vha, 0x3025,
1882 "cmd is NULL: already returned to OS (sp=%p).\n",
fa2a1ce5 1883 sp);
1da177e4 1884
2afa19a9 1885 rsp->status_srb = NULL;
1da177e4
LT
1886 return;
1887 }
1888
1889 if (sp->request_sense_length > sizeof(pkt->data)) {
1890 sense_sz = sizeof(pkt->data);
1891 } else {
1892 sense_sz = sp->request_sense_length;
1893 }
1894
1895 /* Move sense data. */
e428924c 1896 if (IS_FWI2_CAPABLE(ha))
9a853f71 1897 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1da177e4 1898 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
7c3df132
SK
1899 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1900 sp->request_sense_ptr, sense_sz);
1da177e4
LT
1901
1902 sp->request_sense_ptr += sense_sz;
1903 sp->request_sense_length -= sense_sz;
1904
1905 /* Place command on done queue. */
1906 if (sp->request_sense_length == 0) {
2afa19a9 1907 rsp->status_srb = NULL;
73208dfd 1908 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1909 }
1910 }
1911}
1912
c4631191
GM
1913static int
1914qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
1915{
1916 struct qla_hw_data *ha = vha->hw;
1917 struct srb_ctx *ctx;
1918
1919 if (!sp->ctx)
1920 return 1;
1921
1922 ctx = sp->ctx;
1923
1924 if (ctx->type == SRB_LOGIN_CMD ||
1925 ctx->type == SRB_LOGOUT_CMD ||
1926 ctx->type == SRB_TM_CMD) {
1927 ctx->u.iocb_cmd->done(sp);
1928 return 0;
1929 } else if (ctx->type == SRB_ADISC_CMD) {
1930 ctx->u.iocb_cmd->free(sp);
1931 return 0;
1932 } else {
1933 struct fc_bsg_job *bsg_job;
1934
1935 bsg_job = ctx->u.bsg_job;
1936 if (ctx->type == SRB_ELS_CMD_HST ||
1937 ctx->type == SRB_CT_CMD)
1938 kfree(sp->fcport);
1939
1940 bsg_job->reply->reply_data.ctels_reply.status =
1941 FC_CTELS_STATUS_OK;
1942 bsg_job->reply->result = DID_ERROR << 16;
1943 bsg_job->reply->reply_payload_rcv_len = 0;
1944 kfree(sp->ctx);
1945 mempool_free(sp, ha->srb_mempool);
1946 bsg_job->job_done(bsg_job);
1947 return 0;
1948 }
1949 return 1;
1950}
1951
1da177e4
LT
1952/**
1953 * qla2x00_error_entry() - Process an error entry.
1954 * @ha: SCSI driver HA context
1955 * @pkt: Entry pointer
1956 */
1957static void
73208dfd 1958qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
1959{
1960 srb_t *sp;
e315cd28 1961 struct qla_hw_data *ha = vha->hw;
c4631191 1962 const char func[] = "ERROR-IOCB";
2afa19a9
AC
1963 uint16_t que = MSW(pkt->handle);
1964 struct req_que *req = ha->req_q_map[que];
7c3df132 1965
1da177e4 1966 if (pkt->entry_status & RF_INV_E_ORDER)
7c3df132
SK
1967 ql_dbg(ql_dbg_async, vha, 0x502a,
1968 "Invalid Entry Order.\n");
1da177e4 1969 else if (pkt->entry_status & RF_INV_E_COUNT)
7c3df132
SK
1970 ql_dbg(ql_dbg_async, vha, 0x502b,
1971 "Invalid Entry Count.\n");
1da177e4 1972 else if (pkt->entry_status & RF_INV_E_PARAM)
7c3df132
SK
1973 ql_dbg(ql_dbg_async, vha, 0x502c,
1974 "Invalid Entry Parameter.\n");
1da177e4 1975 else if (pkt->entry_status & RF_INV_E_TYPE)
7c3df132
SK
1976 ql_dbg(ql_dbg_async, vha, 0x502d,
1977 "Invalid Entry Type.\n");
1da177e4 1978 else if (pkt->entry_status & RF_BUSY)
7c3df132
SK
1979 ql_dbg(ql_dbg_async, vha, 0x502e,
1980 "Busy.\n");
1da177e4 1981 else
7c3df132
SK
1982 ql_dbg(ql_dbg_async, vha, 0x502f,
1983 "UNKNOWN flag error.\n");
1da177e4 1984
c4631191 1985 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1da177e4 1986 if (sp) {
c4631191
GM
1987 if (qla2x00_free_sp_ctx(vha, sp)) {
1988 if (pkt->entry_status &
1989 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1990 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1991 sp->cmd->result = DID_ERROR << 16;
1992 } else if (pkt->entry_status & RF_BUSY) {
1993 sp->cmd->result = DID_BUS_BUSY << 16;
1994 } else {
1995 sp->cmd->result = DID_ERROR << 16;
1996 }
1997 qla2x00_sp_compl(ha, sp);
1da177e4 1998 }
9a853f71 1999 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
8f7daead
GM
2000 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
2001 || pkt->entry_type == COMMAND_TYPE_6) {
7c3df132
SK
2002 ql_log(ql_log_warn, vha, 0x5030,
2003 "Error entry - invalid handle.\n");
1da177e4 2004
8f7daead
GM
2005 if (IS_QLA82XX(ha))
2006 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2007 else
2008 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 2009 qla2xxx_wake_dpc(vha);
1da177e4
LT
2010 }
2011}
2012
9a853f71
AV
2013/**
2014 * qla24xx_mbx_completion() - Process mailbox command completions.
2015 * @ha: SCSI driver HA context
2016 * @mb0: Mailbox0 register
2017 */
2018static void
e315cd28 2019qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
2020{
2021 uint16_t cnt;
4fa94f83 2022 uint32_t mboxes;
9a853f71 2023 uint16_t __iomem *wptr;
e315cd28 2024 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
2025 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2026
4fa94f83
AV
2027 /* Read all mbox registers? */
2028 mboxes = (1 << ha->mbx_count) - 1;
2029 if (!ha->mcp)
2030 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2031 else
2032 mboxes = ha->mcp->in_mb;
2033
9a853f71
AV
2034 /* Load return mailbox registers. */
2035 ha->flags.mbox_int = 1;
2036 ha->mailbox_out[0] = mb0;
4fa94f83 2037 mboxes >>= 1;
9a853f71
AV
2038 wptr = (uint16_t __iomem *)&reg->mailbox1;
2039
2040 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
4fa94f83
AV
2041 if (mboxes & BIT_0)
2042 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2043
2044 mboxes >>= 1;
9a853f71
AV
2045 wptr++;
2046 }
9a853f71
AV
2047}
2048
2049/**
2050 * qla24xx_process_response_queue() - Process response queue entries.
2051 * @ha: SCSI driver HA context
2052 */
2afa19a9
AC
2053void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2054 struct rsp_que *rsp)
9a853f71 2055{
9a853f71 2056 struct sts_entry_24xx *pkt;
a9083016 2057 struct qla_hw_data *ha = vha->hw;
9a853f71 2058
e315cd28 2059 if (!vha->flags.online)
9a853f71
AV
2060 return;
2061
e315cd28
AC
2062 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2063 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2064
e315cd28
AC
2065 rsp->ring_index++;
2066 if (rsp->ring_index == rsp->length) {
2067 rsp->ring_index = 0;
2068 rsp->ring_ptr = rsp->ring;
9a853f71 2069 } else {
e315cd28 2070 rsp->ring_ptr++;
9a853f71
AV
2071 }
2072
2073 if (pkt->entry_status != 0) {
73208dfd 2074 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
9a853f71
AV
2075 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2076 wmb();
2077 continue;
2078 }
2079
2080 switch (pkt->entry_type) {
2081 case STATUS_TYPE:
73208dfd 2082 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2083 break;
2084 case STATUS_CONT_TYPE:
2afa19a9 2085 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2086 break;
2c3dfe3f 2087 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2088 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2089 (struct vp_rpt_id_entry_24xx *)pkt);
2090 break;
ac280b67
AV
2091 case LOGINOUT_PORT_IOCB_TYPE:
2092 qla24xx_logio_entry(vha, rsp->req,
2093 (struct logio_entry_24xx *)pkt);
2094 break;
3822263e
MI
2095 case TSK_MGMT_IOCB_TYPE:
2096 qla24xx_tm_iocb_entry(vha, rsp->req,
2097 (struct tsk_mgmt_entry *)pkt);
2098 break;
9a069e19
GM
2099 case CT_IOCB_TYPE:
2100 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
9a069e19
GM
2101 break;
2102 case ELS_IOCB_TYPE:
2103 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2104 break;
54883291
SK
2105 case MARKER_TYPE:
2106 /* Do nothing in this case, this check is to prevent it
2107 * from falling into default case
2108 */
2109 break;
9a853f71
AV
2110 default:
2111 /* Type Not Supported. */
7c3df132
SK
2112 ql_dbg(ql_dbg_async, vha, 0x5042,
2113 "Received unknown response pkt type %x "
9a853f71 2114 "entry status=%x.\n",
7c3df132 2115 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2116 break;
2117 }
2118 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2119 wmb();
2120 }
2121
2122 /* Adjust ring index */
a9083016
GM
2123 if (IS_QLA82XX(ha)) {
2124 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2125 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2126 } else
2127 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2128}
2129
05236a05 2130static void
e315cd28 2131qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2132{
2133 int rval;
2134 uint32_t cnt;
e315cd28 2135 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2136 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2137
6246b8a1 2138 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
05236a05
AV
2139 return;
2140
2141 rval = QLA_SUCCESS;
2142 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2143 RD_REG_DWORD(&reg->iobase_addr);
2144 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2145 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2146 rval == QLA_SUCCESS; cnt--) {
2147 if (cnt) {
2148 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2149 udelay(10);
2150 } else
2151 rval = QLA_FUNCTION_TIMEOUT;
2152 }
2153 if (rval == QLA_SUCCESS)
2154 goto next_test;
2155
2156 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2157 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2158 rval == QLA_SUCCESS; cnt--) {
2159 if (cnt) {
2160 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2161 udelay(10);
2162 } else
2163 rval = QLA_FUNCTION_TIMEOUT;
2164 }
2165 if (rval != QLA_SUCCESS)
2166 goto done;
2167
2168next_test:
2169 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2170 ql_log(ql_log_info, vha, 0x504c,
2171 "Additional code -- 0x55AA.\n");
05236a05
AV
2172
2173done:
2174 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2175 RD_REG_DWORD(&reg->iobase_window);
2176}
2177
9a853f71 2178/**
6246b8a1 2179 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
9a853f71
AV
2180 * @irq:
2181 * @dev_id: SCSI driver HA context
9a853f71
AV
2182 *
2183 * Called by system whenever the host adapter generates an interrupt.
2184 *
2185 * Returns handled flag.
2186 */
2187irqreturn_t
7d12e780 2188qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2189{
e315cd28
AC
2190 scsi_qla_host_t *vha;
2191 struct qla_hw_data *ha;
9a853f71
AV
2192 struct device_reg_24xx __iomem *reg;
2193 int status;
9a853f71
AV
2194 unsigned long iter;
2195 uint32_t stat;
2196 uint32_t hccr;
2197 uint16_t mb[4];
e315cd28 2198 struct rsp_que *rsp;
43fac4d9 2199 unsigned long flags;
9a853f71 2200
e315cd28
AC
2201 rsp = (struct rsp_que *) dev_id;
2202 if (!rsp) {
9a853f71 2203 printk(KERN_INFO
7c3df132 2204 "%s(): NULL response queue pointer.\n", __func__);
9a853f71
AV
2205 return IRQ_NONE;
2206 }
2207
e315cd28 2208 ha = rsp->hw;
9a853f71
AV
2209 reg = &ha->iobase->isp24;
2210 status = 0;
2211
85880801
AV
2212 if (unlikely(pci_channel_offline(ha->pdev)))
2213 return IRQ_HANDLED;
2214
43fac4d9 2215 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2216 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2217 for (iter = 50; iter--; ) {
2218 stat = RD_REG_DWORD(&reg->host_status);
2219 if (stat & HSRX_RISC_PAUSED) {
85880801 2220 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2221 break;
2222
9a853f71
AV
2223 hccr = RD_REG_DWORD(&reg->hccr);
2224
7c3df132
SK
2225 ql_log(ql_log_warn, vha, 0x504b,
2226 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2227 hccr);
05236a05 2228
e315cd28 2229 qla2xxx_check_risc_status(vha);
05236a05 2230
e315cd28
AC
2231 ha->isp_ops->fw_dump(vha, 1);
2232 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2233 break;
2234 } else if ((stat & HSRX_RISC_INT) == 0)
2235 break;
2236
2237 switch (stat & 0xff) {
2238 case 0x1:
2239 case 0x2:
2240 case 0x10:
2241 case 0x11:
e315cd28 2242 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2243 status |= MBX_INTERRUPT;
2244
2245 break;
2246 case 0x12:
2247 mb[0] = MSW(stat);
2248 mb[1] = RD_REG_WORD(&reg->mailbox1);
2249 mb[2] = RD_REG_WORD(&reg->mailbox2);
2250 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2251 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
2252 break;
2253 case 0x13:
73208dfd 2254 case 0x14:
2afa19a9 2255 qla24xx_process_response_queue(vha, rsp);
9a853f71
AV
2256 break;
2257 default:
7c3df132
SK
2258 ql_dbg(ql_dbg_async, vha, 0x504f,
2259 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2260 break;
2261 }
2262 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2263 RD_REG_DWORD_RELAXED(&reg->hccr);
2264 }
43fac4d9 2265 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71
AV
2266
2267 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2268 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 2269 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2270 complete(&ha->mbx_intr_comp);
9a853f71
AV
2271 }
2272
2273 return IRQ_HANDLED;
2274}
2275
a8488abe
AV
2276static irqreturn_t
2277qla24xx_msix_rsp_q(int irq, void *dev_id)
2278{
e315cd28
AC
2279 struct qla_hw_data *ha;
2280 struct rsp_que *rsp;
a8488abe 2281 struct device_reg_24xx __iomem *reg;
2afa19a9 2282 struct scsi_qla_host *vha;
0f19bc68 2283 unsigned long flags;
a8488abe 2284
e315cd28
AC
2285 rsp = (struct rsp_que *) dev_id;
2286 if (!rsp) {
2287 printk(KERN_INFO
7c3df132 2288 "%s(): NULL response queue pointer.\n", __func__);
e315cd28
AC
2289 return IRQ_NONE;
2290 }
2291 ha = rsp->hw;
a8488abe
AV
2292 reg = &ha->iobase->isp24;
2293
0f19bc68 2294 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2295
a67093d4 2296 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2297 qla24xx_process_response_queue(vha, rsp);
3155754a 2298 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2299 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2300 RD_REG_DWORD_RELAXED(&reg->hccr);
2301 }
0f19bc68 2302 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2303
2304 return IRQ_HANDLED;
2305}
2306
68ca949c
AC
2307static irqreturn_t
2308qla25xx_msix_rsp_q(int irq, void *dev_id)
2309{
2310 struct qla_hw_data *ha;
2311 struct rsp_que *rsp;
3155754a 2312 struct device_reg_24xx __iomem *reg;
0f19bc68 2313 unsigned long flags;
68ca949c
AC
2314
2315 rsp = (struct rsp_que *) dev_id;
2316 if (!rsp) {
2317 printk(KERN_INFO
7c3df132 2318 "%s(): NULL response queue pointer.\n", __func__);
68ca949c
AC
2319 return IRQ_NONE;
2320 }
2321 ha = rsp->hw;
2322
3155754a 2323 /* Clear the interrupt, if enabled, for this response queue */
d424754c 2324 if (!ha->flags.disable_msix_handshake) {
3155754a 2325 reg = &ha->iobase->isp24;
0f19bc68 2326 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2327 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2328 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2329 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2330 }
68ca949c
AC
2331 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2332
2333 return IRQ_HANDLED;
2334}
2335
a8488abe
AV
2336static irqreturn_t
2337qla24xx_msix_default(int irq, void *dev_id)
2338{
e315cd28
AC
2339 scsi_qla_host_t *vha;
2340 struct qla_hw_data *ha;
2341 struct rsp_que *rsp;
a8488abe
AV
2342 struct device_reg_24xx __iomem *reg;
2343 int status;
a8488abe
AV
2344 uint32_t stat;
2345 uint32_t hccr;
2346 uint16_t mb[4];
0f19bc68 2347 unsigned long flags;
a8488abe 2348
e315cd28
AC
2349 rsp = (struct rsp_que *) dev_id;
2350 if (!rsp) {
7c3df132
SK
2351 printk(KERN_INFO
2352 "%s(): NULL response queue pointer.\n", __func__);
e315cd28
AC
2353 return IRQ_NONE;
2354 }
2355 ha = rsp->hw;
a8488abe
AV
2356 reg = &ha->iobase->isp24;
2357 status = 0;
2358
0f19bc68 2359 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2360 vha = pci_get_drvdata(ha->pdev);
87f27015 2361 do {
a8488abe
AV
2362 stat = RD_REG_DWORD(&reg->host_status);
2363 if (stat & HSRX_RISC_PAUSED) {
85880801 2364 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2365 break;
2366
a8488abe
AV
2367 hccr = RD_REG_DWORD(&reg->hccr);
2368
7c3df132
SK
2369 ql_log(ql_log_info, vha, 0x5050,
2370 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2371 hccr);
05236a05 2372
e315cd28 2373 qla2xxx_check_risc_status(vha);
05236a05 2374
e315cd28
AC
2375 ha->isp_ops->fw_dump(vha, 1);
2376 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2377 break;
2378 } else if ((stat & HSRX_RISC_INT) == 0)
2379 break;
2380
2381 switch (stat & 0xff) {
2382 case 0x1:
2383 case 0x2:
2384 case 0x10:
2385 case 0x11:
e315cd28 2386 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2387 status |= MBX_INTERRUPT;
2388
2389 break;
2390 case 0x12:
2391 mb[0] = MSW(stat);
2392 mb[1] = RD_REG_WORD(&reg->mailbox1);
2393 mb[2] = RD_REG_WORD(&reg->mailbox2);
2394 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2395 qla2x00_async_event(vha, rsp, mb);
a8488abe
AV
2396 break;
2397 case 0x13:
73208dfd 2398 case 0x14:
2afa19a9 2399 qla24xx_process_response_queue(vha, rsp);
a8488abe
AV
2400 break;
2401 default:
7c3df132
SK
2402 ql_dbg(ql_dbg_async, vha, 0x5051,
2403 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2404 break;
2405 }
2406 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2407 } while (0);
0f19bc68 2408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2409
2410 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2411 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 2412 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2413 complete(&ha->mbx_intr_comp);
a8488abe 2414 }
a8488abe
AV
2415 return IRQ_HANDLED;
2416}
2417
2418/* Interrupt handling helpers. */
2419
2420struct qla_init_msix_entry {
a8488abe 2421 const char *name;
476834c2 2422 irq_handler_t handler;
a8488abe
AV
2423};
2424
68ca949c 2425static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2426 { "qla2xxx (default)", qla24xx_msix_default },
2427 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2428 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2429};
2430
a9083016
GM
2431static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2432 { "qla2xxx (default)", qla82xx_msix_default },
2433 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2434};
2435
a8488abe 2436static void
e315cd28 2437qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2438{
2439 int i;
2440 struct qla_msix_entry *qentry;
7c3df132 2441 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2442
73208dfd
AC
2443 for (i = 0; i < ha->msix_count; i++) {
2444 qentry = &ha->msix_entries[i];
a8488abe 2445 if (qentry->have_irq)
73208dfd 2446 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2447 }
2448 pci_disable_msix(ha->pdev);
73208dfd
AC
2449 kfree(ha->msix_entries);
2450 ha->msix_entries = NULL;
2451 ha->flags.msix_enabled = 0;
7c3df132
SK
2452 ql_dbg(ql_dbg_init, vha, 0x0042,
2453 "Disabled the MSI.\n");
a8488abe
AV
2454}
2455
2456static int
73208dfd 2457qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2458{
ad038fa8 2459#define MIN_MSIX_COUNT 2
a8488abe 2460 int i, ret;
73208dfd 2461 struct msix_entry *entries;
a8488abe 2462 struct qla_msix_entry *qentry;
7c3df132 2463 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2464
2465 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2466 GFP_KERNEL);
7c3df132
SK
2467 if (!entries) {
2468 ql_log(ql_log_warn, vha, 0x00bc,
2469 "Failed to allocate memory for msix_entry.\n");
73208dfd 2470 return -ENOMEM;
7c3df132 2471 }
a8488abe 2472
73208dfd
AC
2473 for (i = 0; i < ha->msix_count; i++)
2474 entries[i].entry = i;
a8488abe 2475
73208dfd 2476 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2477 if (ret) {
ad038fa8
LC
2478 if (ret < MIN_MSIX_COUNT)
2479 goto msix_failed;
2480
7c3df132
SK
2481 ql_log(ql_log_warn, vha, 0x00c6,
2482 "MSI-X: Failed to enable support "
2483 "-- %d/%d\n Retry with %d vectors.\n",
2484 ha->msix_count, ret, ret);
73208dfd
AC
2485 ha->msix_count = ret;
2486 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2487 if (ret) {
ad038fa8 2488msix_failed:
7c3df132
SK
2489 ql_log(ql_log_fatal, vha, 0x00c7,
2490 "MSI-X: Failed to enable support, "
2491 "giving up -- %d/%d.\n",
2492 ha->msix_count, ret);
73208dfd
AC
2493 goto msix_out;
2494 }
2afa19a9 2495 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2496 }
2497 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2498 ha->msix_count, GFP_KERNEL);
2499 if (!ha->msix_entries) {
7c3df132
SK
2500 ql_log(ql_log_fatal, vha, 0x00c8,
2501 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2502 ret = -ENOMEM;
a8488abe
AV
2503 goto msix_out;
2504 }
2505 ha->flags.msix_enabled = 1;
2506
73208dfd
AC
2507 for (i = 0; i < ha->msix_count; i++) {
2508 qentry = &ha->msix_entries[i];
2509 qentry->vector = entries[i].vector;
2510 qentry->entry = entries[i].entry;
a8488abe 2511 qentry->have_irq = 0;
73208dfd 2512 qentry->rsp = NULL;
a8488abe
AV
2513 }
2514
2afa19a9
AC
2515 /* Enable MSI-X vectors for the base queue */
2516 for (i = 0; i < 2; i++) {
2517 qentry = &ha->msix_entries[i];
a9083016
GM
2518 if (IS_QLA82XX(ha)) {
2519 ret = request_irq(qentry->vector,
2520 qla82xx_msix_entries[i].handler,
2521 0, qla82xx_msix_entries[i].name, rsp);
2522 } else {
2523 ret = request_irq(qentry->vector,
2524 msix_entries[i].handler,
2525 0, msix_entries[i].name, rsp);
2526 }
2afa19a9 2527 if (ret) {
7c3df132
SK
2528 ql_log(ql_log_fatal, vha, 0x00cb,
2529 "MSI-X: unable to register handler -- %x/%d.\n",
2530 qentry->vector, ret);
2afa19a9
AC
2531 qla24xx_disable_msix(ha);
2532 ha->mqenable = 0;
2533 goto msix_out;
2534 }
2535 qentry->have_irq = 1;
2536 qentry->rsp = rsp;
2537 rsp->msix = qentry;
73208dfd 2538 }
73208dfd
AC
2539
2540 /* Enable MSI-X vector for response queue update for queue 0 */
6246b8a1
GM
2541 if (IS_QLA83XX(ha)) {
2542 if (ha->msixbase && ha->mqiobase &&
2543 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2544 ha->mqenable = 1;
2545 } else
2546 if (ha->mqiobase
2547 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2548 ha->mqenable = 1;
7c3df132
SK
2549 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2550 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2551 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2552 ql_dbg(ql_dbg_init, vha, 0x0055,
2553 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2554 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2555
a8488abe 2556msix_out:
73208dfd 2557 kfree(entries);
a8488abe
AV
2558 return ret;
2559}
2560
2561int
73208dfd 2562qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2563{
2564 int ret;
963b0fdd 2565 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2566 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2567
2568 /* If possible, enable MSI-X. */
6246b8a1
GM
2569 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2570 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
6377a7ae
BH
2571 goto skip_msi;
2572
2573 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2574 (ha->pdev->subsystem_device == 0x7040 ||
2575 ha->pdev->subsystem_device == 0x7041 ||
2576 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2577 ql_log(ql_log_warn, vha, 0x0034,
2578 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2579 ha->pdev->subsystem_vendor,
7c3df132 2580 ha->pdev->subsystem_device);
6377a7ae
BH
2581 goto skip_msi;
2582 }
a8488abe 2583
42cd4f5d 2584 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
7c3df132
SK
2585 ql_log(ql_log_warn, vha, 0x0035,
2586 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
42cd4f5d 2587 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
a8488abe
AV
2588 goto skip_msix;
2589 }
2590
73208dfd 2591 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2592 if (!ret) {
7c3df132
SK
2593 ql_dbg(ql_dbg_init, vha, 0x0036,
2594 "MSI-X: Enabled (0x%X, 0x%X).\n",
2595 ha->chip_revision, ha->fw_attributes);
963b0fdd 2596 goto clear_risc_ints;
a8488abe 2597 }
7c3df132
SK
2598 ql_log(ql_log_info, vha, 0x0037,
2599 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2600skip_msix:
cbedb601 2601
3a03eb79
AV
2602 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2603 !IS_QLA8001(ha))
cbedb601
AV
2604 goto skip_msi;
2605
2606 ret = pci_enable_msi(ha->pdev);
2607 if (!ret) {
7c3df132
SK
2608 ql_dbg(ql_dbg_init, vha, 0x0038,
2609 "MSI: Enabled.\n");
cbedb601 2610 ha->flags.msi_enabled = 1;
a9083016 2611 } else
7c3df132
SK
2612 ql_log(ql_log_warn, vha, 0x0039,
2613 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
cbedb601
AV
2614skip_msi:
2615
fd34f556 2616 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2617 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2618 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2619 if (ret) {
7c3df132 2620 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2621 "Failed to reserve interrupt %d already in use.\n",
2622 ha->pdev->irq);
963b0fdd
AV
2623 goto fail;
2624 }
7992abfc 2625
963b0fdd
AV
2626clear_risc_ints:
2627
3a03eb79
AV
2628 /*
2629 * FIXME: Noted that 8014s were being dropped during NK testing.
2630 * Timing deltas during MSI-X/INTa transitions?
2631 */
6246b8a1 2632 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
3a03eb79 2633 goto fail;
c6952483 2634 spin_lock_irq(&ha->hardware_lock);
963b0fdd
AV
2635 if (IS_FWI2_CAPABLE(ha)) {
2636 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2637 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2638 } else {
2639 WRT_REG_WORD(&reg->isp.semaphore, 0);
2640 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2641 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
a8488abe 2642 }
c6952483 2643 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2644
963b0fdd 2645fail:
a8488abe
AV
2646 return ret;
2647}
2648
2649void
e315cd28 2650qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2651{
e315cd28 2652 struct qla_hw_data *ha = vha->hw;
73208dfd 2653 struct rsp_que *rsp = ha->rsp_q_map[0];
a8488abe
AV
2654
2655 if (ha->flags.msix_enabled)
2656 qla24xx_disable_msix(ha);
90a86fc0 2657 else if (ha->flags.msi_enabled) {
e315cd28 2658 free_irq(ha->pdev->irq, rsp);
cbedb601 2659 pci_disable_msi(ha->pdev);
90a86fc0
JC
2660 } else
2661 free_irq(ha->pdev->irq, rsp);
a8488abe 2662}
e315cd28 2663
73208dfd
AC
2664
2665int qla25xx_request_irq(struct rsp_que *rsp)
2666{
2667 struct qla_hw_data *ha = rsp->hw;
2afa19a9 2668 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 2669 struct qla_msix_entry *msix = rsp->msix;
7c3df132 2670 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2671 int ret;
2672
2673 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2674 if (ret) {
7c3df132
SK
2675 ql_log(ql_log_fatal, vha, 0x00e6,
2676 "MSI-X: Unable to register handler -- %x/%d.\n",
2677 msix->vector, ret);
73208dfd
AC
2678 return ret;
2679 }
2680 msix->have_irq = 1;
2681 msix->rsp = rsp;
2682 return ret;
2683}