]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/lpfc/lpfc_sli.c
[SCSI] lpfc: NPIV: split ports
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
9413afff 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e
JB
20 *******************************************************************/
21
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
f888ba3c 31#include <scsi/scsi_transport_fc.h>
dea3101e
JB
32
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_disc.h"
36#include "lpfc_scsi.h"
37#include "lpfc.h"
38#include "lpfc_crtn.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_compat.h"
41
42/*
43 * Define macro to log: Mailbox command x%x cannot issue Data
44 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility.
46 */
47#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
48 lpfc_printf_log(phba, \
49 KERN_INFO, \
50 LOG_MBOX | LOG_SLI, \
51 "%d:0311 Mailbox command x%x cannot issue " \
52 "Data: x%x x%x x%x\n", \
53 phba->brd_no, \
54 mb->mbxCommand, \
2e0fef85 55 phba->pport->port_state, \
dea3101e 56 psli->sli_flag, \
2e0fef85 57 flag)
dea3101e
JB
58
59
60/* There are only four IOCB completion types. */
61typedef enum _lpfc_iocb_type {
62 LPFC_UNKNOWN_IOCB,
63 LPFC_UNSOL_IOCB,
64 LPFC_SOL_IOCB,
65 LPFC_ABORT_IOCB
66} lpfc_iocb_type;
67
2e0fef85
JS
68static struct lpfc_iocbq *
69__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
70{
71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 struct lpfc_iocbq * iocbq = NULL;
73
74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
75 return iocbq;
76}
77
2e0fef85
JS
78struct lpfc_iocbq *
79lpfc_sli_get_iocbq(struct lpfc_hba *phba)
80{
81 struct lpfc_iocbq * iocbq = NULL;
82 unsigned long iflags;
83
84 spin_lock_irqsave(&phba->hbalock, iflags);
85 iocbq = __lpfc_sli_get_iocbq(phba);
86 spin_unlock_irqrestore(&phba->hbalock, iflags);
87 return iocbq;
88}
89
604a3e30 90void
2e0fef85 91__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 92{
2e0fef85 93 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
94
95 /*
96 * Clean all volatile data fields, preserve iotag and node struct.
97 */
98 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
99 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
100}
101
2e0fef85
JS
102void
103lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
104{
105 unsigned long iflags;
106
107 /*
108 * Clean all volatile data fields, preserve iotag and node struct.
109 */
110 spin_lock_irqsave(&phba->hbalock, iflags);
111 __lpfc_sli_release_iocbq(phba, iocbq);
112 spin_unlock_irqrestore(&phba->hbalock, iflags);
113}
114
dea3101e
JB
115/*
116 * Translate the iocb command to an iocb command type used to decide the final
117 * disposition of each completed IOCB.
118 */
119static lpfc_iocb_type
120lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
121{
122 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
123
124 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
125 return 0;
126
127 switch (iocb_cmnd) {
128 case CMD_XMIT_SEQUENCE_CR:
129 case CMD_XMIT_SEQUENCE_CX:
130 case CMD_XMIT_BCAST_CN:
131 case CMD_XMIT_BCAST_CX:
132 case CMD_ELS_REQUEST_CR:
133 case CMD_ELS_REQUEST_CX:
134 case CMD_CREATE_XRI_CR:
135 case CMD_CREATE_XRI_CX:
136 case CMD_GET_RPI_CN:
137 case CMD_XMIT_ELS_RSP_CX:
138 case CMD_GET_RPI_CR:
139 case CMD_FCP_IWRITE_CR:
140 case CMD_FCP_IWRITE_CX:
141 case CMD_FCP_IREAD_CR:
142 case CMD_FCP_IREAD_CX:
143 case CMD_FCP_ICMND_CR:
144 case CMD_FCP_ICMND_CX:
f5603511
JS
145 case CMD_FCP_TSEND_CX:
146 case CMD_FCP_TRSP_CX:
147 case CMD_FCP_TRECEIVE_CX:
148 case CMD_FCP_AUTO_TRSP_CX:
dea3101e
JB
149 case CMD_ADAPTER_MSG:
150 case CMD_ADAPTER_DUMP:
151 case CMD_XMIT_SEQUENCE64_CR:
152 case CMD_XMIT_SEQUENCE64_CX:
153 case CMD_XMIT_BCAST64_CN:
154 case CMD_XMIT_BCAST64_CX:
155 case CMD_ELS_REQUEST64_CR:
156 case CMD_ELS_REQUEST64_CX:
157 case CMD_FCP_IWRITE64_CR:
158 case CMD_FCP_IWRITE64_CX:
159 case CMD_FCP_IREAD64_CR:
160 case CMD_FCP_IREAD64_CX:
161 case CMD_FCP_ICMND64_CR:
162 case CMD_FCP_ICMND64_CX:
f5603511
JS
163 case CMD_FCP_TSEND64_CX:
164 case CMD_FCP_TRSP64_CX:
165 case CMD_FCP_TRECEIVE64_CX:
dea3101e
JB
166 case CMD_GEN_REQUEST64_CR:
167 case CMD_GEN_REQUEST64_CX:
168 case CMD_XMIT_ELS_RSP64_CX:
169 type = LPFC_SOL_IOCB;
170 break;
171 case CMD_ABORT_XRI_CN:
172 case CMD_ABORT_XRI_CX:
173 case CMD_CLOSE_XRI_CN:
174 case CMD_CLOSE_XRI_CX:
175 case CMD_XRI_ABORTED_CX:
176 case CMD_ABORT_MXRI64_CN:
177 type = LPFC_ABORT_IOCB;
178 break;
179 case CMD_RCV_SEQUENCE_CX:
180 case CMD_RCV_ELS_REQ_CX:
181 case CMD_RCV_SEQUENCE64_CX:
182 case CMD_RCV_ELS_REQ64_CX:
183 type = LPFC_UNSOL_IOCB;
184 break;
185 default:
186 type = LPFC_UNKNOWN_IOCB;
187 break;
188 }
189
190 return type;
191}
192
193static int
2e0fef85 194lpfc_sli_ring_map(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e
JB
195{
196 struct lpfc_sli *psli = &phba->sli;
197 MAILBOX_t *pmbox = &pmb->mb;
198 int i, rc;
199
200 for (i = 0; i < psli->num_rings; i++) {
2e0fef85 201 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e
JB
202 lpfc_config_ring(phba, i, pmb);
203 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
204 if (rc != MBX_SUCCESS) {
205 lpfc_printf_log(phba,
206 KERN_ERR,
207 LOG_INIT,
208 "%d:0446 Adapter failed to init, "
209 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
210 "ring %d\n",
211 phba->brd_no,
212 pmbox->mbxCommand,
213 pmbox->mbxStatus,
214 i);
2e0fef85 215 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
216 return -ENXIO;
217 }
218 }
219 return 0;
220}
221
222static int
2e0fef85
JS
223lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
224 struct lpfc_iocbq *piocb)
dea3101e 225{
dea3101e
JB
226 list_add_tail(&piocb->list, &pring->txcmplq);
227 pring->txcmplq_cnt++;
228 if (unlikely(pring->ringno == LPFC_ELS_RING))
2e0fef85
JS
229 mod_timer(&piocb->vport->els_tmofunc,
230 jiffies + HZ * (phba->fc_ratov << 1));
dea3101e 231
2e0fef85 232 return 0;
dea3101e
JB
233}
234
235static struct lpfc_iocbq *
2e0fef85 236lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
237{
238 struct list_head *dlp;
239 struct lpfc_iocbq *cmd_iocb;
240
241 dlp = &pring->txq;
242 cmd_iocb = NULL;
243 list_remove_head((&pring->txq), cmd_iocb,
244 struct lpfc_iocbq,
245 list);
246 if (cmd_iocb) {
247 /* If the first ptr is not equal to the list header,
248 * deque the IOCBQ_t and return it.
249 */
250 pring->txq_cnt--;
251 }
2e0fef85 252 return cmd_iocb;
dea3101e
JB
253}
254
255static IOCB_t *
256lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
257{
4cc2da1d 258 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
dea3101e
JB
259 uint32_t max_cmd_idx = pring->numCiocb;
260 IOCB_t *iocb = NULL;
261
262 if ((pring->next_cmdidx == pring->cmdidx) &&
263 (++pring->next_cmdidx >= max_cmd_idx))
264 pring->next_cmdidx = 0;
265
266 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
267
268 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
269
270 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
271 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
272 "%d:0315 Ring %d issue: portCmdGet %d "
273 "is bigger then cmd ring %d\n",
274 phba->brd_no, pring->ringno,
275 pring->local_getidx, max_cmd_idx);
276
2e0fef85 277 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
278 /*
279 * All error attention handlers are posted to
280 * worker thread
281 */
282 phba->work_ha |= HA_ERATT;
283 phba->work_hs = HS_FFER3;
284 if (phba->work_wait)
285 wake_up(phba->work_wait);
286
287 return NULL;
288 }
289
290 if (pring->local_getidx == pring->next_cmdidx)
291 return NULL;
292 }
293
294 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
295
296 return iocb;
297}
298
604a3e30 299uint16_t
2e0fef85 300lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 301{
2e0fef85
JS
302 struct lpfc_iocbq **new_arr;
303 struct lpfc_iocbq **old_arr;
604a3e30
JB
304 size_t new_len;
305 struct lpfc_sli *psli = &phba->sli;
306 uint16_t iotag;
dea3101e 307
2e0fef85 308 spin_lock_irq(&phba->hbalock);
604a3e30
JB
309 iotag = psli->last_iotag;
310 if(++iotag < psli->iocbq_lookup_len) {
311 psli->last_iotag = iotag;
312 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 313 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
314 iocbq->iotag = iotag;
315 return iotag;
2e0fef85 316 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
317 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
318 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
319 spin_unlock_irq(&phba->hbalock);
320 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
321 GFP_KERNEL);
322 if (new_arr) {
2e0fef85 323 spin_lock_irq(&phba->hbalock);
604a3e30
JB
324 old_arr = psli->iocbq_lookup;
325 if (new_len <= psli->iocbq_lookup_len) {
326 /* highly unprobable case */
327 kfree(new_arr);
328 iotag = psli->last_iotag;
329 if(++iotag < psli->iocbq_lookup_len) {
330 psli->last_iotag = iotag;
331 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 332 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
333 iocbq->iotag = iotag;
334 return iotag;
335 }
2e0fef85 336 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
337 return 0;
338 }
339 if (psli->iocbq_lookup)
340 memcpy(new_arr, old_arr,
341 ((psli->last_iotag + 1) *
342 sizeof (struct lpfc_iocbq *)));
343 psli->iocbq_lookup = new_arr;
344 psli->iocbq_lookup_len = new_len;
345 psli->last_iotag = iotag;
346 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 347 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
348 iocbq->iotag = iotag;
349 kfree(old_arr);
350 return iotag;
351 }
8f6d98d2 352 } else
2e0fef85 353 spin_unlock_irq(&phba->hbalock);
dea3101e 354
604a3e30
JB
355 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
356 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
357 phba->brd_no, psli->last_iotag);
dea3101e 358
604a3e30 359 return 0;
dea3101e
JB
360}
361
362static void
363lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
364 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
365{
366 /*
604a3e30 367 * Set up an iotag
dea3101e 368 */
604a3e30 369 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e
JB
370
371 /*
372 * Issue iocb command to adapter
373 */
374 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
375 wmb();
376 pring->stats.iocb_cmd++;
377
378 /*
379 * If there is no completion routine to call, we can release the
380 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
381 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
382 */
383 if (nextiocb->iocb_cmpl)
384 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 385 else
2e0fef85 386 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e
JB
387
388 /*
389 * Let the HBA know what IOCB slot will be the next one the
390 * driver will put a command into.
391 */
392 pring->cmdidx = pring->next_cmdidx;
f91b392c 393 writel(pring->cmdidx, phba->MBslimaddr
dea3101e
JB
394 + (SLIMOFF + (pring->ringno * 2)) * 4);
395}
396
397static void
2e0fef85 398lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
399{
400 int ringno = pring->ringno;
401
402 pring->flag |= LPFC_CALL_RING_AVAILABLE;
403
404 wmb();
405
406 /*
407 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
408 * The HBA will tell us when an IOCB entry is available.
409 */
410 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
411 readl(phba->CAregaddr); /* flush */
412
413 pring->stats.iocb_cmd_full++;
414}
415
416static void
2e0fef85 417lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
418{
419 int ringno = pring->ringno;
420
421 /*
422 * Tell the HBA that there is work to do in this ring.
423 */
424 wmb();
425 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
426 readl(phba->CAregaddr); /* flush */
427}
428
429static void
2e0fef85 430lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
431{
432 IOCB_t *iocb;
433 struct lpfc_iocbq *nextiocb;
434
435 /*
436 * Check to see if:
437 * (a) there is anything on the txq to send
438 * (b) link is up
439 * (c) link attention events can be processed (fcp ring only)
440 * (d) IOCB processing is not blocked by the outstanding mbox command.
441 */
442 if (pring->txq_cnt &&
2e0fef85 443 lpfc_is_link_up(phba) &&
dea3101e
JB
444 (pring->ringno != phba->sli.fcp_ring ||
445 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
446 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
447
448 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
449 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
450 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
451
452 if (iocb)
453 lpfc_sli_update_ring(phba, pring);
454 else
455 lpfc_sli_update_full_ring(phba, pring);
456 }
457
458 return;
459}
460
461/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
462static void
2e0fef85 463lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
dea3101e 464{
2e0fef85
JS
465 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
466 unsigned long iflags;
dea3101e
JB
467
468 /* If the ring is active, flag it */
2e0fef85 469 spin_lock_irqsave(&phba->hbalock, iflags);
dea3101e
JB
470 if (phba->sli.ring[ringno].cmdringaddr) {
471 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
472 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
473 /*
474 * Force update of the local copy of cmdGetInx
475 */
476 phba->sli.ring[ringno].local_getidx
477 = le32_to_cpu(pgp->cmdGetInx);
dea3101e 478 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
dea3101e
JB
479 }
480 }
2e0fef85 481 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e
JB
482}
483
484static int
485lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
486{
487 uint8_t ret;
488
489 switch (mbxCommand) {
490 case MBX_LOAD_SM:
491 case MBX_READ_NV:
492 case MBX_WRITE_NV:
493 case MBX_RUN_BIU_DIAG:
494 case MBX_INIT_LINK:
495 case MBX_DOWN_LINK:
496 case MBX_CONFIG_LINK:
497 case MBX_CONFIG_RING:
498 case MBX_RESET_RING:
499 case MBX_READ_CONFIG:
500 case MBX_READ_RCONFIG:
501 case MBX_READ_SPARM:
502 case MBX_READ_STATUS:
503 case MBX_READ_RPI:
504 case MBX_READ_XRI:
505 case MBX_READ_REV:
506 case MBX_READ_LNK_STAT:
507 case MBX_REG_LOGIN:
508 case MBX_UNREG_LOGIN:
509 case MBX_READ_LA:
510 case MBX_CLEAR_LA:
511 case MBX_DUMP_MEMORY:
512 case MBX_DUMP_CONTEXT:
513 case MBX_RUN_DIAGS:
514 case MBX_RESTART:
515 case MBX_UPDATE_CFG:
516 case MBX_DOWN_LOAD:
517 case MBX_DEL_LD_ENTRY:
518 case MBX_RUN_PROGRAM:
519 case MBX_SET_MASK:
520 case MBX_SET_SLIM:
521 case MBX_UNREG_D_ID:
41415862 522 case MBX_KILL_BOARD:
dea3101e 523 case MBX_CONFIG_FARP:
41415862 524 case MBX_BEACON:
dea3101e
JB
525 case MBX_LOAD_AREA:
526 case MBX_RUN_BIU_DIAG64:
527 case MBX_CONFIG_PORT:
528 case MBX_READ_SPARM64:
529 case MBX_READ_RPI64:
530 case MBX_REG_LOGIN64:
531 case MBX_READ_LA64:
532 case MBX_FLASH_WR_ULA:
533 case MBX_SET_DEBUG:
534 case MBX_LOAD_EXP_ROM:
535 ret = mbxCommand;
536 break;
537 default:
538 ret = MBX_SHUTDOWN;
539 break;
540 }
2e0fef85 541 return ret;
dea3101e
JB
542}
543static void
2e0fef85 544lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e
JB
545{
546 wait_queue_head_t *pdone_q;
547
548 /*
549 * If pdone_q is empty, the driver thread gave up waiting and
550 * continued running.
551 */
7054a606 552 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
dea3101e
JB
553 pdone_q = (wait_queue_head_t *) pmboxq->context1;
554 if (pdone_q)
555 wake_up_interruptible(pdone_q);
556 return;
557}
558
559void
2e0fef85 560lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e
JB
561{
562 struct lpfc_dmabuf *mp;
7054a606
JS
563 uint16_t rpi;
564 int rc;
565
dea3101e 566 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 567
dea3101e
JB
568 if (mp) {
569 lpfc_mbuf_free(phba, mp->virt, mp->phys);
570 kfree(mp);
571 }
7054a606
JS
572
573 /*
574 * If a REG_LOGIN succeeded after node is destroyed or node
575 * is in re-discovery driver need to cleanup the RPI.
576 */
2e0fef85
JS
577 if (!(phba->pport->load_flag & FC_UNLOADING) &&
578 pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
579 !pmb->mb.mbxStatus) {
7054a606
JS
580
581 rpi = pmb->mb.un.varWords[0];
582 lpfc_unreg_login(phba, rpi, pmb);
583 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
584 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
585 if (rc != MBX_NOT_FINISHED)
586 return;
587 }
588
2e0fef85 589 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
590 return;
591}
592
593int
2e0fef85 594lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 595{
2e0fef85 596 MAILBOX_t *mbox, *pmbox;
dea3101e 597 LPFC_MBOXQ_t *pmb;
dea3101e
JB
598 int i, rc;
599 uint32_t process_next;
2e0fef85 600 unsigned long iflags;
dea3101e 601
dea3101e
JB
602 /* We should only get here if we are in SLI2 mode */
603 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
2e0fef85 604 return 1;
dea3101e
JB
605 }
606
607 phba->sli.slistat.mbox_event++;
608
609 /* Get a Mailbox buffer to setup mailbox commands for callback */
610 if ((pmb = phba->sli.mbox_active)) {
611 pmbox = &pmb->mb;
4cc2da1d 612 mbox = &phba->slim2p->mbx;
dea3101e
JB
613
614 /* First check out the status word */
615 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
616
617 /* Sanity check to ensure the host owns the mailbox */
618 if (pmbox->mbxOwner != OWN_HOST) {
619 /* Lets try for a while */
620 for (i = 0; i < 10240; i++) {
621 /* First copy command data */
622 lpfc_sli_pcimem_bcopy(mbox, pmbox,
623 sizeof (uint32_t));
624 if (pmbox->mbxOwner == OWN_HOST)
625 goto mbout;
626 }
627 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
628 <status> */
629 lpfc_printf_log(phba,
b4c02652 630 KERN_WARNING,
dea3101e
JB
631 LOG_MBOX | LOG_SLI,
632 "%d:0304 Stray Mailbox Interrupt "
633 "mbxCommand x%x mbxStatus x%x\n",
634 phba->brd_no,
635 pmbox->mbxCommand,
636 pmbox->mbxStatus);
637
2e0fef85 638 spin_lock_irq(&phba->hbalock);
dea3101e 639 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85
JS
640 spin_unlock_irq(&phba->hbalock);
641 return 1;
dea3101e
JB
642 }
643
644 mbout:
645 del_timer_sync(&phba->sli.mbox_tmo);
2e0fef85
JS
646
647 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
648 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
649 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
dea3101e
JB
650
651 /*
652 * It is a fatal error if unknown mbox command completion.
653 */
654 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
655 MBX_SHUTDOWN) {
656
657 /* Unknow mailbox command compl */
658 lpfc_printf_log(phba,
659 KERN_ERR,
660 LOG_MBOX | LOG_SLI,
661 "%d:0323 Unknown Mailbox command %x Cmpl\n",
662 phba->brd_no,
663 pmbox->mbxCommand);
2e0fef85 664 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
665 phba->work_hs = HS_FFER3;
666 lpfc_handle_eratt(phba);
2e0fef85 667 return 0;
dea3101e
JB
668 }
669
670 phba->sli.mbox_active = NULL;
671 if (pmbox->mbxStatus) {
672 phba->sli.slistat.mbox_stat_err++;
673 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
674 /* Mbox cmd cmpl error - RETRYing */
675 lpfc_printf_log(phba,
676 KERN_INFO,
677 LOG_MBOX | LOG_SLI,
678 "%d:0305 Mbox cmd cmpl error - "
679 "RETRYing Data: x%x x%x x%x x%x\n",
680 phba->brd_no,
681 pmbox->mbxCommand,
682 pmbox->mbxStatus,
683 pmbox->un.varWords[0],
2e0fef85 684 phba->pport->port_state);
dea3101e
JB
685 pmbox->mbxStatus = 0;
686 pmbox->mbxOwner = OWN_HOST;
2e0fef85 687 spin_lock_irq(&phba->hbalock);
dea3101e 688 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 689 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
691 if (rc == MBX_SUCCESS)
2e0fef85 692 return 0;
dea3101e
JB
693 }
694 }
695
696 /* Mailbox cmd <cmd> Cmpl <cmpl> */
697 lpfc_printf_log(phba,
698 KERN_INFO,
699 LOG_MBOX | LOG_SLI,
700 "%d:0307 Mailbox cmd x%x Cmpl x%p "
701 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
702 phba->brd_no,
703 pmbox->mbxCommand,
704 pmb->mbox_cmpl,
705 *((uint32_t *) pmbox),
706 pmbox->un.varWords[0],
707 pmbox->un.varWords[1],
708 pmbox->un.varWords[2],
709 pmbox->un.varWords[3],
710 pmbox->un.varWords[4],
711 pmbox->un.varWords[5],
712 pmbox->un.varWords[6],
713 pmbox->un.varWords[7]);
714
715 if (pmb->mbox_cmpl) {
716 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
717 pmb->mbox_cmpl(phba,pmb);
718 }
719 }
720
721
722 do {
723 process_next = 0; /* by default don't loop */
2e0fef85 724 spin_lock_irq(&phba->hbalock);
dea3101e
JB
725 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
726
727 /* Process next mailbox command if there is one */
728 if ((pmb = lpfc_mbox_get(phba))) {
2e0fef85 729 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
730 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
731 if (rc == MBX_NOT_FINISHED) {
732 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
733 pmb->mbox_cmpl(phba,pmb);
734 process_next = 1;
735 continue; /* loop back */
736 }
737 } else {
2e0fef85 738 spin_unlock_irq(&phba->hbalock);
dea3101e 739 /* Turn on IOCB processing */
1dcb58e5 740 for (i = 0; i < phba->sli.num_rings; i++)
dea3101e 741 lpfc_sli_turn_on_ring(phba, i);
dea3101e
JB
742 }
743
744 } while (process_next);
745
2e0fef85 746 return 0;
dea3101e
JB
747}
748static int
749lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
750 struct lpfc_iocbq *saveq)
751{
752 IOCB_t * irsp;
753 WORD5 * w5p;
754 uint32_t Rctl, Type;
755 uint32_t match, i;
756
757 match = 0;
758 irsp = &(saveq->iocb);
759 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
760 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
761 Rctl = FC_ELS_REQ;
762 Type = FC_ELS_DATA;
763 } else {
764 w5p =
765 (WORD5 *) & (saveq->iocb.un.
766 ulpWord[5]);
767 Rctl = w5p->hcsw.Rctl;
768 Type = w5p->hcsw.Type;
769
770 /* Firmware Workaround */
771 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
772 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
773 Rctl = FC_ELS_REQ;
774 Type = FC_ELS_DATA;
775 w5p->hcsw.Rctl = Rctl;
776 w5p->hcsw.Type = Type;
777 }
778 }
779 /* unSolicited Responses */
780 if (pring->prt[0].profile) {
cf5bf97e
JW
781 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
782 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
783 saveq);
dea3101e
JB
784 match = 1;
785 } else {
786 /* We must search, based on rctl / type
787 for the right routine */
788 for (i = 0; i < pring->num_mask;
789 i++) {
790 if ((pring->prt[i].rctl ==
791 Rctl)
792 && (pring->prt[i].
793 type == Type)) {
cf5bf97e
JW
794 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
795 (pring->prt[i].lpfc_sli_rcv_unsol_event)
796 (phba, pring, saveq);
dea3101e
JB
797 match = 1;
798 break;
799 }
800 }
801 }
802 if (match == 0) {
803 /* Unexpected Rctl / Type received */
804 /* Ring <ringno> handler: unexpected
805 Rctl <Rctl> Type <Type> received */
806 lpfc_printf_log(phba,
807 KERN_WARNING,
808 LOG_SLI,
809 "%d:0313 Ring %d handler: unexpected Rctl x%x "
810 "Type x%x received \n",
811 phba->brd_no,
812 pring->ringno,
813 Rctl,
814 Type);
815 }
816 return(1);
817}
818
819static struct lpfc_iocbq *
2e0fef85
JS
820lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
821 struct lpfc_sli_ring *pring,
822 struct lpfc_iocbq *prspiocb)
dea3101e 823{
dea3101e
JB
824 struct lpfc_iocbq *cmd_iocb = NULL;
825 uint16_t iotag;
826
604a3e30
JB
827 iotag = prspiocb->iocb.ulpIoTag;
828
829 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
830 cmd_iocb = phba->sli.iocbq_lookup[iotag];
831 list_del(&cmd_iocb->list);
832 pring->txcmplq_cnt--;
833 return cmd_iocb;
dea3101e
JB
834 }
835
dea3101e 836 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
604a3e30
JB
837 "%d:0317 iotag x%x is out off "
838 "range: max iotag x%x wd0 x%x\n",
839 phba->brd_no, iotag,
840 phba->sli.last_iotag,
841 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e
JB
842 return NULL;
843}
844
845static int
2e0fef85 846lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e
JB
847 struct lpfc_iocbq *saveq)
848{
2e0fef85 849 struct lpfc_iocbq *cmdiocbp;
dea3101e
JB
850 int rc = 1;
851 unsigned long iflag;
852
853 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 854 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 855 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
856 spin_unlock_irqrestore(&phba->hbalock, iflag);
857
dea3101e
JB
858 if (cmdiocbp) {
859 if (cmdiocbp->iocb_cmpl) {
860 /*
861 * Post all ELS completions to the worker thread.
862 * All other are passed to the completion callback.
863 */
864 if (pring->ringno == LPFC_ELS_RING) {
07951076
JS
865 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
866 cmdiocbp->iocb_flag &=
867 ~LPFC_DRIVER_ABORTED;
868 saveq->iocb.ulpStatus =
869 IOSTAT_LOCAL_REJECT;
870 saveq->iocb.un.ulpWord[4] =
871 IOERR_SLI_ABORTED;
872 }
dea3101e 873 }
2e0fef85 874 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
875 } else
876 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e
JB
877 } else {
878 /*
879 * Unknown initiating command based on the response iotag.
880 * This could be the case on the ELS ring because of
881 * lpfc_els_abort().
882 */
883 if (pring->ringno != LPFC_ELS_RING) {
884 /*
885 * Ring <ringno> handler: unexpected completion IoTag
886 * <IoTag>
887 */
888 lpfc_printf_log(phba,
889 KERN_WARNING,
890 LOG_SLI,
891 "%d:0322 Ring %d handler: unexpected "
892 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
893 phba->brd_no,
894 pring->ringno,
895 saveq->iocb.ulpIoTag,
896 saveq->iocb.ulpStatus,
897 saveq->iocb.un.ulpWord[4],
898 saveq->iocb.ulpCommand,
899 saveq->iocb.ulpContext);
900 }
901 }
68876920 902
dea3101e
JB
903 return rc;
904}
905
2e0fef85
JS
906static void
907lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe
JSEC
908{
909 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
910 /*
911 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
912 * rsp ring <portRspMax>
913 */
914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
915 "%d:0312 Ring %d handler: portRspPut %d "
916 "is bigger then rsp ring %d\n",
917 phba->brd_no, pring->ringno,
918 le32_to_cpu(pgp->rspPutInx),
919 pring->numRiocb);
920
2e0fef85 921 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
922
923 /*
924 * All error attention handlers are posted to
925 * worker thread
926 */
927 phba->work_ha |= HA_ERATT;
928 phba->work_hs = HS_FFER3;
929 if (phba->work_wait)
930 wake_up(phba->work_wait);
931
932 return;
933}
934
2e0fef85 935void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
875fbdfe 936{
2e0fef85
JS
937 struct lpfc_sli *psli = &phba->sli;
938 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
875fbdfe
JSEC
939 IOCB_t *irsp = NULL;
940 IOCB_t *entry = NULL;
941 struct lpfc_iocbq *cmdiocbq = NULL;
942 struct lpfc_iocbq rspiocbq;
943 struct lpfc_pgp *pgp;
944 uint32_t status;
945 uint32_t portRspPut, portRspMax;
946 int type;
947 uint32_t rsp_cmpl = 0;
948 void __iomem *to_slim;
949 uint32_t ha_copy;
2e0fef85 950 unsigned long iflags;
875fbdfe
JSEC
951
952 pring->stats.iocb_event++;
953
954 /* The driver assumes SLI-2 mode */
955 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
956
957 /*
958 * The next available response entry should never exceed the maximum
959 * entries. If it does, treat it as an adapter hardware error.
960 */
961 portRspMax = pring->numRiocb;
962 portRspPut = le32_to_cpu(pgp->rspPutInx);
963 if (unlikely(portRspPut >= portRspMax)) {
964 lpfc_sli_rsp_pointers_error(phba, pring);
965 return;
966 }
967
968 rmb();
969 while (pring->rspidx != portRspPut) {
970
971 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
972
973 if (++pring->rspidx >= portRspMax)
974 pring->rspidx = 0;
975
976 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
977 (uint32_t *) &rspiocbq.iocb,
2e0fef85 978 sizeof(IOCB_t));
875fbdfe
JSEC
979 irsp = &rspiocbq.iocb;
980 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
981 pring->stats.iocb_rsp++;
982 rsp_cmpl++;
983
984 if (unlikely(irsp->ulpStatus)) {
985 /* Rsp ring <ringno> error: IOCB */
986 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
987 "%d:0326 Rsp Ring %d error: IOCB Data: "
988 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
989 phba->brd_no, pring->ringno,
990 irsp->un.ulpWord[0],
991 irsp->un.ulpWord[1],
992 irsp->un.ulpWord[2],
993 irsp->un.ulpWord[3],
994 irsp->un.ulpWord[4],
995 irsp->un.ulpWord[5],
996 *(((uint32_t *) irsp) + 6),
997 *(((uint32_t *) irsp) + 7));
998 }
999
1000 switch (type) {
1001 case LPFC_ABORT_IOCB:
1002 case LPFC_SOL_IOCB:
1003 /*
1004 * Idle exchange closed via ABTS from port. No iocb
1005 * resources need to be recovered.
1006 */
1007 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b
JS
1008 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1009 "%d:0314 IOCB cmd 0x%x"
1010 " processed. Skipping"
1011 " completion", phba->brd_no,
1012 irsp->ulpCommand);
875fbdfe
JSEC
1013 break;
1014 }
1015
2e0fef85 1016 spin_lock_irqsave(&phba->hbalock, iflags);
875fbdfe
JSEC
1017 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1018 &rspiocbq);
2e0fef85 1019 spin_unlock_irqrestore(&phba->hbalock, iflags);
875fbdfe
JSEC
1020 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1021 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1022 &rspiocbq);
1023 }
1024 break;
1025 default:
1026 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1027 char adaptermsg[LPFC_MAX_ADPTMSG];
1028 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1029 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1030 MAX_MSG_DATA);
1031 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1032 phba->brd_no, adaptermsg);
1033 } else {
1034 /* Unknown IOCB command */
1035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1036 "%d:0321 Unknown IOCB command "
1037 "Data: x%x, x%x x%x x%x x%x\n",
1038 phba->brd_no, type,
1039 irsp->ulpCommand,
1040 irsp->ulpStatus,
1041 irsp->ulpIoTag,
1042 irsp->ulpContext);
1043 }
1044 break;
1045 }
1046
1047 /*
1048 * The response IOCB has been processed. Update the ring
1049 * pointer in SLIM. If the port response put pointer has not
1050 * been updated, sync the pgp->rspPutInx and fetch the new port
1051 * response put pointer.
1052 */
1053 to_slim = phba->MBslimaddr +
1054 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1055 writeb(pring->rspidx, to_slim);
1056
1057 if (pring->rspidx == portRspPut)
1058 portRspPut = le32_to_cpu(pgp->rspPutInx);
1059 }
1060
1061 ha_copy = readl(phba->HAregaddr);
1062 ha_copy >>= (LPFC_FCP_RING * 4);
1063
1064 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2e0fef85 1065 spin_lock_irqsave(&phba->hbalock, iflags);
875fbdfe
JSEC
1066 pring->stats.iocb_rsp_full++;
1067 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1068 writel(status, phba->CAregaddr);
1069 readl(phba->CAregaddr);
2e0fef85 1070 spin_unlock_irqrestore(&phba->hbalock, iflags);
875fbdfe
JSEC
1071 }
1072 if ((ha_copy & HA_R0CE_RSP) &&
1073 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2e0fef85 1074 spin_lock_irqsave(&phba->hbalock, iflags);
875fbdfe
JSEC
1075 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1076 pring->stats.iocb_cmd_empty++;
1077
1078 /* Force update of the local copy of cmdGetInx */
1079 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1080 lpfc_sli_resume_iocb(phba, pring);
1081
1082 if ((pring->lpfc_sli_cmd_available))
1083 (pring->lpfc_sli_cmd_available) (phba, pring);
1084
2e0fef85 1085 spin_unlock_irqrestore(&phba->hbalock, iflags);
875fbdfe
JSEC
1086 }
1087
1088 return;
1089}
1090
dea3101e
JB
1091/*
1092 * This routine presumes LPFC_FCP_RING handling and doesn't bother
1093 * to check it explicitly.
1094 */
1095static int
2e0fef85
JS
1096lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1097 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 1098{
2e0fef85 1099 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
dea3101e 1100 IOCB_t *irsp = NULL;
87f6eaff 1101 IOCB_t *entry = NULL;
dea3101e
JB
1102 struct lpfc_iocbq *cmdiocbq = NULL;
1103 struct lpfc_iocbq rspiocbq;
dea3101e
JB
1104 uint32_t status;
1105 uint32_t portRspPut, portRspMax;
1106 int rc = 1;
1107 lpfc_iocb_type type;
1108 unsigned long iflag;
1109 uint32_t rsp_cmpl = 0;
2e0fef85 1110 void __iomem *to_slim;
dea3101e 1111
2e0fef85 1112 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
1113 pring->stats.iocb_event++;
1114
dea3101e
JB
1115 /*
1116 * The next available response entry should never exceed the maximum
1117 * entries. If it does, treat it as an adapter hardware error.
1118 */
1119 portRspMax = pring->numRiocb;
1120 portRspPut = le32_to_cpu(pgp->rspPutInx);
1121 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 1122 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 1123 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
1124 return 1;
1125 }
1126
1127 rmb();
1128 while (pring->rspidx != portRspPut) {
87f6eaff
JSEC
1129 /*
1130 * Fetch an entry off the ring and copy it into a local data
1131 * structure. The copy involves a byte-swap since the
1132 * network byte order and pci byte orders are different.
1133 */
4a0dfcde 1134 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
875fbdfe
JSEC
1135
1136 if (++pring->rspidx >= portRspMax)
1137 pring->rspidx = 0;
1138
87f6eaff
JSEC
1139 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1140 (uint32_t *) &rspiocbq.iocb,
2e0fef85 1141 sizeof(IOCB_t));
a4bc3379 1142 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
1143 irsp = &rspiocbq.iocb;
1144
dea3101e
JB
1145 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1146 pring->stats.iocb_rsp++;
1147 rsp_cmpl++;
1148
1149 if (unlikely(irsp->ulpStatus)) {
1150 /* Rsp ring <ringno> error: IOCB */
1151 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
dca9479b 1152 "%d:0336 Rsp Ring %d error: IOCB Data: "
dea3101e
JB
1153 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1154 phba->brd_no, pring->ringno,
1155 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
1156 irsp->un.ulpWord[2], irsp->un.ulpWord[3],
1157 irsp->un.ulpWord[4], irsp->un.ulpWord[5],
1158 *(((uint32_t *) irsp) + 6),
1159 *(((uint32_t *) irsp) + 7));
1160 }
1161
1162 switch (type) {
1163 case LPFC_ABORT_IOCB:
1164 case LPFC_SOL_IOCB:
1165 /*
1166 * Idle exchange closed via ABTS from port. No iocb
1167 * resources need to be recovered.
1168 */
1169 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b
JS
1170 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1171 "%d:0333 IOCB cmd 0x%x"
1172 " processed. Skipping"
1173 " completion\n", phba->brd_no,
1174 irsp->ulpCommand);
dea3101e
JB
1175 break;
1176 }
1177
604a3e30
JB
1178 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1179 &rspiocbq);
dea3101e 1180 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
b808608b
JW
1181 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1182 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1183 &rspiocbq);
1184 } else {
2e0fef85
JS
1185 spin_unlock_irqrestore(&phba->hbalock,
1186 iflag);
b808608b
JW
1187 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1188 &rspiocbq);
2e0fef85 1189 spin_lock_irqsave(&phba->hbalock,
b808608b
JW
1190 iflag);
1191 }
dea3101e
JB
1192 }
1193 break;
a4bc3379 1194 case LPFC_UNSOL_IOCB:
2e0fef85 1195 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 1196 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 1197 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 1198 break;
dea3101e
JB
1199 default:
1200 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1201 char adaptermsg[LPFC_MAX_ADPTMSG];
1202 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1203 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1204 MAX_MSG_DATA);
1205 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1206 phba->brd_no, adaptermsg);
1207 } else {
1208 /* Unknown IOCB command */
1209 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
dca9479b 1210 "%d:0334 Unknown IOCB command "
dea3101e
JB
1211 "Data: x%x, x%x x%x x%x x%x\n",
1212 phba->brd_no, type, irsp->ulpCommand,
1213 irsp->ulpStatus, irsp->ulpIoTag,
1214 irsp->ulpContext);
1215 }
1216 break;
1217 }
1218
1219 /*
1220 * The response IOCB has been processed. Update the ring
1221 * pointer in SLIM. If the port response put pointer has not
1222 * been updated, sync the pgp->rspPutInx and fetch the new port
1223 * response put pointer.
1224 */
dea3101e
JB
1225 to_slim = phba->MBslimaddr +
1226 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
f91b392c 1227 writel(pring->rspidx, to_slim);
dea3101e
JB
1228
1229 if (pring->rspidx == portRspPut)
1230 portRspPut = le32_to_cpu(pgp->rspPutInx);
1231 }
1232
1233 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1234 pring->stats.iocb_rsp_full++;
1235 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1236 writel(status, phba->CAregaddr);
1237 readl(phba->CAregaddr);
1238 }
1239 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1240 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1241 pring->stats.iocb_cmd_empty++;
1242
1243 /* Force update of the local copy of cmdGetInx */
1244 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1245 lpfc_sli_resume_iocb(phba, pring);
1246
1247 if ((pring->lpfc_sli_cmd_available))
1248 (pring->lpfc_sli_cmd_available) (phba, pring);
1249
1250 }
1251
2e0fef85 1252 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
1253 return rc;
1254}
1255
1256
1257int
2e0fef85
JS
1258lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1259 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 1260{
2e0fef85 1261 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
dea3101e
JB
1262 IOCB_t *entry;
1263 IOCB_t *irsp = NULL;
1264 struct lpfc_iocbq *rspiocbp = NULL;
1265 struct lpfc_iocbq *next_iocb;
1266 struct lpfc_iocbq *cmdiocbp;
1267 struct lpfc_iocbq *saveq;
dea3101e
JB
1268 uint8_t iocb_cmd_type;
1269 lpfc_iocb_type type;
1270 uint32_t status, free_saveq;
1271 uint32_t portRspPut, portRspMax;
1272 int rc = 1;
1273 unsigned long iflag;
2e0fef85 1274 void __iomem *to_slim;
dea3101e 1275
2e0fef85 1276 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
1277 pring->stats.iocb_event++;
1278
dea3101e
JB
1279 /*
1280 * The next available response entry should never exceed the maximum
1281 * entries. If it does, treat it as an adapter hardware error.
1282 */
1283 portRspMax = pring->numRiocb;
1284 portRspPut = le32_to_cpu(pgp->rspPutInx);
1285 if (portRspPut >= portRspMax) {
1286 /*
1287 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1288 * rsp ring <portRspMax>
1289 */
1290 lpfc_printf_log(phba,
1291 KERN_ERR,
1292 LOG_SLI,
dca9479b 1293 "%d:0303 Ring %d handler: portRspPut %d "
dea3101e
JB
1294 "is bigger then rsp ring %d\n",
1295 phba->brd_no,
1296 pring->ringno, portRspPut, portRspMax);
1297
2e0fef85
JS
1298 phba->link_state = LPFC_HBA_ERROR;
1299 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
1300
1301 phba->work_hs = HS_FFER3;
1302 lpfc_handle_eratt(phba);
1303
1304 return 1;
1305 }
1306
1307 rmb();
dea3101e
JB
1308 while (pring->rspidx != portRspPut) {
1309 /*
1310 * Build a completion list and call the appropriate handler.
1311 * The process is to get the next available response iocb, get
1312 * a free iocb from the list, copy the response data into the
1313 * free iocb, insert to the continuation list, and update the
1314 * next response index to slim. This process makes response
1315 * iocb's in the ring available to DMA as fast as possible but
1316 * pays a penalty for a copy operation. Since the iocb is
1317 * only 32 bytes, this penalty is considered small relative to
1318 * the PCI reads for register values and a slim write. When
1319 * the ulpLe field is set, the entire Command has been
1320 * received.
1321 */
1322 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
2e0fef85 1323 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
1324 if (rspiocbp == NULL) {
1325 printk(KERN_ERR "%s: out of buffers! Failing "
1326 "completion.\n", __FUNCTION__);
1327 break;
1328 }
1329
2e0fef85 1330 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof(IOCB_t));
dea3101e
JB
1331 irsp = &rspiocbp->iocb;
1332
1333 if (++pring->rspidx >= portRspMax)
1334 pring->rspidx = 0;
1335
1336 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1337 + 1) * 4;
f91b392c 1338 writel(pring->rspidx, to_slim);
dea3101e
JB
1339
1340 if (list_empty(&(pring->iocb_continueq))) {
1341 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1342 } else {
1343 list_add_tail(&rspiocbp->list,
1344 &(pring->iocb_continueq));
1345 }
1346
1347 pring->iocb_continueq_cnt++;
1348 if (irsp->ulpLe) {
1349 /*
1350 * By default, the driver expects to free all resources
1351 * associated with this iocb completion.
1352 */
1353 free_saveq = 1;
1354 saveq = list_get_first(&pring->iocb_continueq,
1355 struct lpfc_iocbq, list);
1356 irsp = &(saveq->iocb);
1357 list_del_init(&pring->iocb_continueq);
1358 pring->iocb_continueq_cnt = 0;
1359
1360 pring->stats.iocb_rsp++;
1361
1362 if (irsp->ulpStatus) {
1363 /* Rsp ring <ringno> error: IOCB */
1364 lpfc_printf_log(phba,
1365 KERN_WARNING,
1366 LOG_SLI,
1367 "%d:0328 Rsp Ring %d error: IOCB Data: "
1368 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1369 phba->brd_no,
1370 pring->ringno,
1371 irsp->un.ulpWord[0],
1372 irsp->un.ulpWord[1],
1373 irsp->un.ulpWord[2],
1374 irsp->un.ulpWord[3],
1375 irsp->un.ulpWord[4],
1376 irsp->un.ulpWord[5],
1377 *(((uint32_t *) irsp) + 6),
1378 *(((uint32_t *) irsp) + 7));
1379 }
1380
1381 /*
1382 * Fetch the IOCB command type and call the correct
1383 * completion routine. Solicited and Unsolicited
1384 * IOCBs on the ELS ring get freed back to the
1385 * lpfc_iocb_list by the discovery kernel thread.
1386 */
1387 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1388 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1389 if (type == LPFC_SOL_IOCB) {
2e0fef85 1390 spin_unlock_irqrestore(&phba->hbalock,
dea3101e
JB
1391 iflag);
1392 rc = lpfc_sli_process_sol_iocb(phba, pring,
2e0fef85
JS
1393 saveq);
1394 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 1395 } else if (type == LPFC_UNSOL_IOCB) {
2e0fef85 1396 spin_unlock_irqrestore(&phba->hbalock,
dea3101e
JB
1397 iflag);
1398 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2e0fef85
JS
1399 saveq);
1400 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
1401 } else if (type == LPFC_ABORT_IOCB) {
1402 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1403 ((cmdiocbp =
604a3e30
JB
1404 lpfc_sli_iocbq_lookup(phba, pring,
1405 saveq)))) {
dea3101e
JB
1406 /* Call the specified completion
1407 routine */
1408 if (cmdiocbp->iocb_cmpl) {
1409 spin_unlock_irqrestore(
2e0fef85 1410 &phba->hbalock,
dea3101e
JB
1411 iflag);
1412 (cmdiocbp->iocb_cmpl) (phba,
1413 cmdiocbp, saveq);
1414 spin_lock_irqsave(
2e0fef85 1415 &phba->hbalock,
dea3101e 1416 iflag);
604a3e30 1417 } else
2e0fef85 1418 __lpfc_sli_release_iocbq(phba,
604a3e30 1419 cmdiocbp);
dea3101e
JB
1420 }
1421 } else if (type == LPFC_UNKNOWN_IOCB) {
1422 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1423
1424 char adaptermsg[LPFC_MAX_ADPTMSG];
1425
1426 memset(adaptermsg, 0,
1427 LPFC_MAX_ADPTMSG);
1428 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1429 MAX_MSG_DATA);
1430 dev_warn(&((phba->pcidev)->dev),
1431 "lpfc%d: %s",
1432 phba->brd_no, adaptermsg);
1433 } else {
1434 /* Unknown IOCB command */
1435 lpfc_printf_log(phba,
1436 KERN_ERR,
1437 LOG_SLI,
dca9479b 1438 "%d:0335 Unknown IOCB command "
dea3101e
JB
1439 "Data: x%x x%x x%x x%x\n",
1440 phba->brd_no,
1441 irsp->ulpCommand,
1442 irsp->ulpStatus,
1443 irsp->ulpIoTag,
1444 irsp->ulpContext);
1445 }
1446 }
1447
1448 if (free_saveq) {
2e0fef85
JS
1449 list_for_each_entry_safe(rspiocbp, next_iocb,
1450 &saveq->list, list) {
1451 list_del(&rspiocbp->list);
1452 __lpfc_sli_release_iocbq(phba,
1453 rspiocbp);
dea3101e 1454 }
2e0fef85 1455 __lpfc_sli_release_iocbq(phba, saveq);
dea3101e
JB
1456 }
1457 }
1458
1459 /*
1460 * If the port response put pointer has not been updated, sync
1461 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1462 * response put pointer.
1463 */
1464 if (pring->rspidx == portRspPut) {
1465 portRspPut = le32_to_cpu(pgp->rspPutInx);
1466 }
1467 } /* while (pring->rspidx != portRspPut) */
1468
1469 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1470 /* At least one response entry has been freed */
1471 pring->stats.iocb_rsp_full++;
1472 /* SET RxRE_RSP in Chip Att register */
1473 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1474 writel(status, phba->CAregaddr);
1475 readl(phba->CAregaddr); /* flush */
1476 }
1477 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1478 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1479 pring->stats.iocb_cmd_empty++;
1480
1481 /* Force update of the local copy of cmdGetInx */
1482 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1483 lpfc_sli_resume_iocb(phba, pring);
1484
1485 if ((pring->lpfc_sli_cmd_available))
1486 (pring->lpfc_sli_cmd_available) (phba, pring);
1487
1488 }
1489
2e0fef85 1490 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
1491 return rc;
1492}
1493
2e0fef85 1494void
dea3101e
JB
1495lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1496{
2534ba75 1497 LIST_HEAD(completions);
dea3101e 1498 struct lpfc_iocbq *iocb, *next_iocb;
2534ba75 1499 IOCB_t *cmd = NULL;
dea3101e
JB
1500
1501 /* Error everything on txq and txcmplq
1502 * First do the txq.
1503 */
2e0fef85 1504 spin_lock_irq(&phba->hbalock);
2534ba75 1505 list_splice_init(&pring->txq, &completions);
dea3101e 1506 pring->txq_cnt = 0;
dea3101e
JB
1507
1508 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
1509 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1510 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 1511
2e0fef85 1512 spin_unlock_irq(&phba->hbalock);
dea3101e 1513
2534ba75
JS
1514 while (!list_empty(&completions)) {
1515 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1516 cmd = &iocb->iocb;
1517 list_del(&iocb->list);
dea3101e 1518
2e0fef85
JS
1519 if (!iocb->iocb_cmpl)
1520 lpfc_sli_release_iocbq(phba, iocb);
1521 else {
dea3101e
JB
1522 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1523 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
dea3101e 1524 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 1525 }
dea3101e 1526 }
dea3101e
JB
1527}
1528
41415862 1529int
2e0fef85 1530lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
dea3101e 1531{
41415862
JW
1532 uint32_t status;
1533 int i = 0;
1534 int retval = 0;
dea3101e 1535
41415862
JW
1536 /* Read the HBA Host Status Register */
1537 status = readl(phba->HSregaddr);
dea3101e 1538
41415862
JW
1539 /*
1540 * Check status register every 100ms for 5 retries, then every
1541 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1542 * every 2.5 sec for 4.
1543 * Break our of the loop if errors occurred during init.
1544 */
1545 while (((status & mask) != mask) &&
1546 !(status & HS_FFERM) &&
1547 i++ < 20) {
dea3101e 1548
41415862
JW
1549 if (i <= 5)
1550 msleep(10);
1551 else if (i <= 10)
1552 msleep(500);
1553 else
1554 msleep(2500);
dea3101e 1555
41415862 1556 if (i == 15) {
2e0fef85
JS
1557 /* Do post */
1558 phba->pport->port_state = LPFC_STATE_UNKNOWN;
41415862
JW
1559 lpfc_sli_brdrestart(phba);
1560 }
1561 /* Read the HBA Host Status Register */
1562 status = readl(phba->HSregaddr);
1563 }
dea3101e 1564
41415862
JW
1565 /* Check to see if any errors occurred during init */
1566 if ((status & HS_FFERM) || (i >= 20)) {
2e0fef85 1567 phba->link_state = LPFC_HBA_ERROR;
41415862 1568 retval = 1;
dea3101e 1569 }
dea3101e 1570
41415862
JW
1571 return retval;
1572}
dea3101e 1573
9290831f
JS
1574#define BARRIER_TEST_PATTERN (0xdeadbeef)
1575
2e0fef85 1576void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 1577{
65a29c16
JS
1578 uint32_t __iomem *resp_buf;
1579 uint32_t __iomem *mbox_buf;
9290831f
JS
1580 volatile uint32_t mbox;
1581 uint32_t hc_copy;
1582 int i;
1583 uint8_t hdrtype;
1584
1585 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1586 if (hdrtype != 0x80 ||
1587 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1588 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1589 return;
1590
1591 /*
1592 * Tell the other part of the chip to suspend temporarily all
1593 * its DMA activity.
1594 */
65a29c16 1595 resp_buf = phba->MBslimaddr;
9290831f
JS
1596
1597 /* Disable the error attention */
1598 hc_copy = readl(phba->HCregaddr);
1599 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1600 readl(phba->HCregaddr); /* flush */
2e0fef85 1601 phba->link_flag |= LS_IGNORE_ERATT;
9290831f
JS
1602
1603 if (readl(phba->HAregaddr) & HA_ERATT) {
1604 /* Clear Chip error bit */
1605 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 1606 phba->pport->stopped = 1;
9290831f
JS
1607 }
1608
1609 mbox = 0;
1610 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1611 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1612
1613 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 1614 mbox_buf = phba->MBslimaddr;
9290831f
JS
1615 writel(mbox, mbox_buf);
1616
1617 for (i = 0;
1618 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1619 mdelay(1);
1620
1621 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1622 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
2e0fef85 1623 phba->pport->stopped)
9290831f
JS
1624 goto restore_hc;
1625 else
1626 goto clear_errat;
1627 }
1628
1629 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1630 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
1631 mdelay(1);
1632
1633clear_errat:
1634
1635 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1636 mdelay(1);
1637
1638 if (readl(phba->HAregaddr) & HA_ERATT) {
1639 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 1640 phba->pport->stopped = 1;
9290831f
JS
1641 }
1642
1643restore_hc:
2e0fef85 1644 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
1645 writel(hc_copy, phba->HCregaddr);
1646 readl(phba->HCregaddr); /* flush */
1647}
1648
41415862 1649int
2e0fef85 1650lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
1651{
1652 struct lpfc_sli *psli;
1653 LPFC_MBOXQ_t *pmb;
1654 uint32_t status;
1655 uint32_t ha_copy;
1656 int retval;
1657 int i = 0;
dea3101e 1658
41415862 1659 psli = &phba->sli;
dea3101e 1660
41415862
JW
1661 /* Kill HBA */
1662 lpfc_printf_log(phba,
1663 KERN_INFO,
1664 LOG_SLI,
1665 "%d:0329 Kill HBA Data: x%x x%x\n",
1666 phba->brd_no,
2e0fef85 1667 phba->pport->port_state,
41415862
JW
1668 psli->sli_flag);
1669
1670 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9290831f 1671 GFP_KERNEL)) == 0)
41415862 1672 return 1;
41415862
JW
1673
1674 /* Disable the error attention */
2e0fef85 1675 spin_lock_irq(&phba->hbalock);
41415862
JW
1676 status = readl(phba->HCregaddr);
1677 status &= ~HC_ERINT_ENA;
1678 writel(status, phba->HCregaddr);
1679 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
1680 phba->link_flag |= LS_IGNORE_ERATT;
1681 spin_unlock_irq(&phba->hbalock);
41415862
JW
1682
1683 lpfc_kill_board(phba, pmb);
1684 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1685 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1686
1687 if (retval != MBX_SUCCESS) {
1688 if (retval != MBX_BUSY)
1689 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
1690 spin_lock_irq(&phba->hbalock);
1691 phba->link_flag &= ~LS_IGNORE_ERATT;
1692 spin_unlock_irq(&phba->hbalock);
41415862
JW
1693 return 1;
1694 }
1695
9290831f
JS
1696 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1697
41415862
JW
1698 mempool_free(pmb, phba->mbox_mem_pool);
1699
1700 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1701 * attention every 100ms for 3 seconds. If we don't get ERATT after
1702 * 3 seconds we still set HBA_ERROR state because the status of the
1703 * board is now undefined.
1704 */
1705 ha_copy = readl(phba->HAregaddr);
1706
1707 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1708 mdelay(100);
1709 ha_copy = readl(phba->HAregaddr);
1710 }
1711
1712 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
1713 if (ha_copy & HA_ERATT) {
1714 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 1715 phba->pport->stopped = 1;
9290831f 1716 }
2e0fef85 1717 spin_lock_irq(&phba->hbalock);
41415862 1718 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85
JS
1719 phba->link_flag &= ~LS_IGNORE_ERATT;
1720 spin_unlock_irq(&phba->hbalock);
41415862
JW
1721
1722 psli->mbox_active = NULL;
1723 lpfc_hba_down_post(phba);
2e0fef85 1724 phba->link_state = LPFC_HBA_ERROR;
41415862 1725
2e0fef85 1726 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e
JB
1727}
1728
41415862 1729int
2e0fef85 1730lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 1731{
41415862 1732 struct lpfc_sli *psli;
dea3101e 1733 struct lpfc_sli_ring *pring;
41415862 1734 uint16_t cfg_value;
dea3101e 1735 int i;
dea3101e 1736
41415862 1737 psli = &phba->sli;
dea3101e 1738
41415862
JW
1739 /* Reset HBA */
1740 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1741 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
2e0fef85 1742 phba->pport->port_state, psli->sli_flag);
dea3101e
JB
1743
1744 /* perform board reset */
1745 phba->fc_eventTag = 0;
2e0fef85
JS
1746 phba->pport->fc_myDID = 0;
1747 phba->pport->fc_prevDID = 0;
dea3101e 1748
41415862
JW
1749 /* Turn off parity checking and serr during the physical reset */
1750 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1751 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1752 (cfg_value &
1753 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1754
1c067a42 1755 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
41415862
JW
1756 /* Now toggle INITFF bit in the Host Control Register */
1757 writel(HC_INITFF, phba->HCregaddr);
1758 mdelay(1);
1759 readl(phba->HCregaddr); /* flush */
1760 writel(0, phba->HCregaddr);
1761 readl(phba->HCregaddr); /* flush */
1762
1763 /* Restore PCI cmd register */
1764 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e
JB
1765
1766 /* Initialize relevant SLI info */
41415862
JW
1767 for (i = 0; i < psli->num_rings; i++) {
1768 pring = &psli->ring[i];
dea3101e
JB
1769 pring->flag = 0;
1770 pring->rspidx = 0;
1771 pring->next_cmdidx = 0;
1772 pring->local_getidx = 0;
1773 pring->cmdidx = 0;
1774 pring->missbufcnt = 0;
1775 }
dea3101e 1776
2e0fef85 1777 phba->link_state = LPFC_WARM_START;
41415862
JW
1778 return 0;
1779}
1780
1781int
2e0fef85 1782lpfc_sli_brdrestart(struct lpfc_hba *phba)
41415862
JW
1783{
1784 MAILBOX_t *mb;
1785 struct lpfc_sli *psli;
1786 uint16_t skip_post;
1787 volatile uint32_t word0;
1788 void __iomem *to_slim;
1789
2e0fef85 1790 spin_lock_irq(&phba->hbalock);
41415862
JW
1791
1792 psli = &phba->sli;
1793
1794 /* Restart HBA */
1795 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
dca9479b 1796 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
2e0fef85 1797 phba->pport->port_state, psli->sli_flag);
41415862
JW
1798
1799 word0 = 0;
1800 mb = (MAILBOX_t *) &word0;
1801 mb->mbxCommand = MBX_RESTART;
1802 mb->mbxHc = 1;
1803
9290831f
JS
1804 lpfc_reset_barrier(phba);
1805
41415862
JW
1806 to_slim = phba->MBslimaddr;
1807 writel(*(uint32_t *) mb, to_slim);
1808 readl(to_slim); /* flush */
1809
1810 /* Only skip post after fc_ffinit is completed */
2e0fef85 1811 if (phba->pport->port_state) {
41415862
JW
1812 skip_post = 1;
1813 word0 = 1; /* This is really setting up word1 */
dea3101e 1814 } else {
41415862
JW
1815 skip_post = 0;
1816 word0 = 0; /* This is really setting up word1 */
dea3101e 1817 }
65a29c16 1818 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
1819 writel(*(uint32_t *) mb, to_slim);
1820 readl(to_slim); /* flush */
dea3101e 1821
41415862 1822 lpfc_sli_brdreset(phba);
2e0fef85
JS
1823 phba->pport->stopped = 0;
1824 phba->link_state = LPFC_INIT_START;
41415862 1825
2e0fef85 1826 spin_unlock_irq(&phba->hbalock);
41415862 1827
64ba8818
JS
1828 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1829 psli->stats_start = get_seconds();
1830
41415862
JW
1831 if (skip_post)
1832 mdelay(100);
1833 else
1834 mdelay(2000);
1835
1836 lpfc_hba_down_post(phba);
dea3101e
JB
1837
1838 return 0;
1839}
1840
1841static int
1842lpfc_sli_chipset_init(struct lpfc_hba *phba)
1843{
1844 uint32_t status, i = 0;
1845
1846 /* Read the HBA Host Status Register */
1847 status = readl(phba->HSregaddr);
1848
1849 /* Check status register to see what current state is */
1850 i = 0;
1851 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1852
1853 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1854 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1855 * 4.
1856 */
1857 if (i++ >= 20) {
1858 /* Adapter failed to init, timeout, status reg
1859 <status> */
1860 lpfc_printf_log(phba,
1861 KERN_ERR,
1862 LOG_INIT,
1863 "%d:0436 Adapter failed to init, "
1864 "timeout, status reg x%x\n",
1865 phba->brd_no,
1866 status);
2e0fef85 1867 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1868 return -ETIMEDOUT;
1869 }
1870
1871 /* Check to see if any errors occurred during init */
1872 if (status & HS_FFERM) {
1873 /* ERROR: During chipset initialization */
1874 /* Adapter failed to init, chipset, status reg
1875 <status> */
1876 lpfc_printf_log(phba,
1877 KERN_ERR,
1878 LOG_INIT,
1879 "%d:0437 Adapter failed to init, "
1880 "chipset, status reg x%x\n",
1881 phba->brd_no,
1882 status);
2e0fef85 1883 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1884 return -EIO;
1885 }
1886
1887 if (i <= 5) {
1888 msleep(10);
1889 } else if (i <= 10) {
1890 msleep(500);
1891 } else {
1892 msleep(2500);
1893 }
1894
1895 if (i == 15) {
2e0fef85
JS
1896 /* Do post */
1897 phba->pport->port_state = LPFC_STATE_UNKNOWN;
41415862 1898 lpfc_sli_brdrestart(phba);
dea3101e
JB
1899 }
1900 /* Read the HBA Host Status Register */
1901 status = readl(phba->HSregaddr);
1902 }
1903
1904 /* Check to see if any errors occurred during init */
1905 if (status & HS_FFERM) {
1906 /* ERROR: During chipset initialization */
1907 /* Adapter failed to init, chipset, status reg <status> */
1908 lpfc_printf_log(phba,
1909 KERN_ERR,
1910 LOG_INIT,
1911 "%d:0438 Adapter failed to init, chipset, "
1912 "status reg x%x\n",
1913 phba->brd_no,
1914 status);
2e0fef85 1915 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1916 return -EIO;
1917 }
1918
1919 /* Clear all interrupt enable conditions */
1920 writel(0, phba->HCregaddr);
1921 readl(phba->HCregaddr); /* flush */
1922
1923 /* setup host attn register */
1924 writel(0xffffffff, phba->HAregaddr);
1925 readl(phba->HAregaddr); /* flush */
1926 return 0;
1927}
1928
1929int
2e0fef85 1930lpfc_sli_hba_setup(struct lpfc_hba *phba)
dea3101e
JB
1931{
1932 LPFC_MBOXQ_t *pmb;
1933 uint32_t resetcount = 0, rc = 0, done = 0;
1934
1935 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1936 if (!pmb) {
2e0fef85 1937 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1938 return -ENOMEM;
1939 }
1940
1941 while (resetcount < 2 && !done) {
2e0fef85 1942 spin_lock_irq(&phba->hbalock);
1c067a42 1943 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85
JS
1944 spin_unlock_irq(&phba->hbalock);
1945 phba->pport->port_state = LPFC_STATE_UNKNOWN;
41415862 1946 lpfc_sli_brdrestart(phba);
dea3101e
JB
1947 msleep(2500);
1948 rc = lpfc_sli_chipset_init(phba);
1949 if (rc)
1950 break;
1951
2e0fef85 1952 spin_lock_irq(&phba->hbalock);
1c067a42 1953 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 1954 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
1955 resetcount++;
1956
1957 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1958 * means the call was successful. Any other nonzero value is a failure,
1959 * but if ERESTART is returned, the driver may reset the HBA and try
1960 * again.
1961 */
1962 rc = lpfc_config_port_prep(phba);
1963 if (rc == -ERESTART) {
2e0fef85 1964 phba->pport->port_state = 0;
dea3101e
JB
1965 continue;
1966 } else if (rc) {
1967 break;
1968 }
1969
2e0fef85 1970 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e
JB
1971 lpfc_config_port(phba, pmb);
1972 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1973 if (rc == MBX_SUCCESS)
1974 done = 1;
1975 else {
1976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1977 "%d:0442 Adapter failed to init, mbxCmd x%x "
1978 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1979 phba->brd_no, pmb->mb.mbxCommand,
1980 pmb->mb.mbxStatus, 0);
2e0fef85 1981 spin_lock_irq(&phba->hbalock);
dea3101e 1982 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2e0fef85
JS
1983 spin_unlock_irq(&phba->hbalock);
1984 rc = -ENXIO;
dea3101e
JB
1985 }
1986 }
1987 if (!done)
1988 goto lpfc_sli_hba_setup_error;
1989
1990 rc = lpfc_sli_ring_map(phba, pmb);
1991
1992 if (rc)
1993 goto lpfc_sli_hba_setup_error;
1994
1995 phba->sli.sli_flag |= LPFC_PROCESS_LA;
1996
1997 rc = lpfc_config_port_post(phba);
1998 if (rc)
1999 goto lpfc_sli_hba_setup_error;
2000
2001 goto lpfc_sli_hba_setup_exit;
2002lpfc_sli_hba_setup_error:
2e0fef85 2003 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
2004lpfc_sli_hba_setup_exit:
2005 mempool_free(pmb, phba->mbox_mem_pool);
2006 return rc;
2007}
2008
dea3101e
JB
2009/*! lpfc_mbox_timeout
2010 *
2011 * \pre
2012 * \post
2013 * \param hba Pointer to per struct lpfc_hba structure
2014 * \param l1 Pointer to the driver's mailbox queue.
2015 * \return
2016 * void
2017 *
2018 * \b Description:
2019 *
2020 * This routine handles mailbox timeout events at timer interrupt context.
2021 */
2022void
2023lpfc_mbox_timeout(unsigned long ptr)
2024{
2e0fef85 2025 struct lpfc_hba *phba = (struct lpfc_hba *) phba;
dea3101e 2026 unsigned long iflag;
2e0fef85 2027 uint32_t tmo_posted;
dea3101e 2028
2e0fef85
JS
2029 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2030 tmo_posted = (phba->pport->work_port_events & WORKER_MBOX_TMO) == 0;
2031 if (!tmo_posted)
2032 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2033 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2034
2035 if (!tmo_posted) {
dea3101e
JB
2036 if (phba->work_wait)
2037 wake_up(phba->work_wait);
2038 }
dea3101e
JB
2039}
2040
2041void
2042lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2043{
2e0fef85
JS
2044 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2045 MAILBOX_t *mb = &pmbox->mb;
1dcb58e5
JS
2046 struct lpfc_sli *psli = &phba->sli;
2047 struct lpfc_sli_ring *pring;
dea3101e 2048
2e0fef85 2049 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
dea3101e
JB
2050 return;
2051 }
2052
dea3101e
JB
2053 /* Mbox cmd <mbxCommand> timeout */
2054 lpfc_printf_log(phba,
2055 KERN_ERR,
2056 LOG_MBOX | LOG_SLI,
2057 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2058 phba->brd_no,
2059 mb->mbxCommand,
2e0fef85 2060 phba->pport->port_state,
dea3101e
JB
2061 phba->sli.sli_flag,
2062 phba->sli.mbox_active);
2063
1dcb58e5
JS
2064 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2065 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2066 * it to fail all oustanding SCSI IO.
2067 */
2e0fef85
JS
2068 spin_lock_irq(&phba->pport->work_port_lock);
2069 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2070 spin_unlock_irq(&phba->pport->work_port_lock);
2071 spin_lock_irq(&phba->hbalock);
2072 phba->link_state = LPFC_LINK_UNKNOWN;
2073 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
1dcb58e5 2074 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2e0fef85 2075 spin_unlock_irq(&phba->hbalock);
1dcb58e5
JS
2076
2077 pring = &psli->ring[psli->fcp_ring];
2078 lpfc_sli_abort_iocb_ring(phba, pring);
2079
2080 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2081 "%d:0316 Resetting board due to mailbox timeout\n",
2082 phba->brd_no);
2083 /*
2084 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2085 * on oustanding mailbox commands.
2086 */
2087 lpfc_offline_prep(phba);
2088 lpfc_offline(phba);
2089 lpfc_sli_brdrestart(phba);
2090 if (lpfc_online(phba) == 0) /* Initialize the HBA */
2091 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2092 lpfc_unblock_mgmt_io(phba);
dea3101e
JB
2093 return;
2094}
2095
2096int
2e0fef85 2097lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
dea3101e 2098{
dea3101e 2099 MAILBOX_t *mb;
2e0fef85 2100 struct lpfc_sli *psli = &phba->sli;
dea3101e
JB
2101 uint32_t status, evtctr;
2102 uint32_t ha_copy;
2103 int i;
2104 unsigned long drvr_flag = 0;
2105 volatile uint32_t word0, ldata;
2106 void __iomem *to_slim;
2107
8d63f375
LV
2108 /* If the PCI channel is in offline state, do not post mbox. */
2109 if (unlikely(pci_channel_offline(phba->pcidev)))
2110 return MBX_NOT_FINISHED;
2111
2e0fef85 2112 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e
JB
2113 psli = &phba->sli;
2114
dea3101e
JB
2115 mb = &pmbox->mb;
2116 status = MBX_SUCCESS;
2117
2e0fef85
JS
2118 if (phba->link_state == LPFC_HBA_ERROR) {
2119 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
2120
2121 /* Mbox command <mbxCommand> cannot issue */
2122 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2e0fef85 2123 return MBX_NOT_FINISHED;
41415862
JW
2124 }
2125
9290831f
JS
2126 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2127 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2e0fef85 2128 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9290831f 2129 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2e0fef85 2130 return MBX_NOT_FINISHED;
9290831f
JS
2131 }
2132
dea3101e
JB
2133 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2134 /* Polling for a mbox command when another one is already active
2135 * is not allowed in SLI. Also, the driver must have established
2136 * SLI2 mode to queue and process multiple mbox commands.
2137 */
2138
2139 if (flag & MBX_POLL) {
2e0fef85 2140 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
2141
2142 /* Mbox command <mbxCommand> cannot issue */
2e0fef85
JS
2143 LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag);
2144 return MBX_NOT_FINISHED;
dea3101e
JB
2145 }
2146
2147 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2e0fef85 2148 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2149 /* Mbox command <mbxCommand> cannot issue */
2e0fef85
JS
2150 LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag);
2151 return MBX_NOT_FINISHED;
dea3101e
JB
2152 }
2153
2154 /* Handle STOP IOCB processing flag. This is only meaningful
2155 * if we are not polling for mbox completion.
2156 */
2157 if (flag & MBX_STOP_IOCB) {
2158 flag &= ~MBX_STOP_IOCB;
2159 /* Now flag each ring */
2160 for (i = 0; i < psli->num_rings; i++) {
2161 /* If the ring is active, flag it */
2162 if (psli->ring[i].cmdringaddr) {
2163 psli->ring[i].flag |=
2164 LPFC_STOP_IOCB_MBX;
2165 }
2166 }
2167 }
2168
2169 /* Another mailbox command is still being processed, queue this
2170 * command to be processed later.
2171 */
2172 lpfc_mbox_put(phba, pmbox);
2173
2174 /* Mbox cmd issue - BUSY */
2175 lpfc_printf_log(phba,
2176 KERN_INFO,
2177 LOG_MBOX | LOG_SLI,
2178 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
2179 phba->brd_no,
2180 mb->mbxCommand,
2e0fef85 2181 phba->pport->port_state,
dea3101e
JB
2182 psli->sli_flag,
2183 flag);
2184
2185 psli->slistat.mbox_busy++;
2e0fef85 2186 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2187
2e0fef85 2188 return MBX_BUSY;
dea3101e
JB
2189 }
2190
2191 /* Handle STOP IOCB processing flag. This is only meaningful
2192 * if we are not polling for mbox completion.
2193 */
2194 if (flag & MBX_STOP_IOCB) {
2195 flag &= ~MBX_STOP_IOCB;
2196 if (flag == MBX_NOWAIT) {
2197 /* Now flag each ring */
2198 for (i = 0; i < psli->num_rings; i++) {
2199 /* If the ring is active, flag it */
2200 if (psli->ring[i].cmdringaddr) {
2201 psli->ring[i].flag |=
2202 LPFC_STOP_IOCB_MBX;
2203 }
2204 }
2205 }
2206 }
2207
2208 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2209
2210 /* If we are not polling, we MUST be in SLI2 mode */
2211 if (flag != MBX_POLL) {
41415862
JW
2212 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2213 (mb->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 2214 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 2215 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
2216 /* Mbox command <mbxCommand> cannot issue */
2217 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
2e0fef85 2218 return MBX_NOT_FINISHED;
dea3101e
JB
2219 }
2220 /* timeout active mbox command */
a309a6b6
JS
2221 mod_timer(&psli->mbox_tmo, (jiffies +
2222 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
dea3101e
JB
2223 }
2224
2225 /* Mailbox cmd <cmd> issue */
2226 lpfc_printf_log(phba,
2227 KERN_INFO,
2228 LOG_MBOX | LOG_SLI,
2229 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
2230 phba->brd_no,
2231 mb->mbxCommand,
2e0fef85 2232 phba->pport->port_state,
dea3101e 2233 psli->sli_flag,
2e0fef85 2234 flag);
dea3101e
JB
2235
2236 psli->slistat.mbox_cmd++;
2237 evtctr = psli->slistat.mbox_event;
2238
2239 /* next set own bit for the adapter and copy over command word */
2240 mb->mbxOwner = OWN_CHIP;
2241
2242 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
dea3101e 2243 /* First copy command data to host SLIM area */
4cc2da1d 2244 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
dea3101e 2245 } else {
9290831f 2246 if (mb->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 2247 /* copy command data into host mbox for cmpl */
4cc2da1d
JSEC
2248 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2249 MAILBOX_CMD_SIZE);
dea3101e
JB
2250 }
2251
2252 /* First copy mbox command data to HBA SLIM, skip past first
2253 word */
2254 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2255 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2256 MAILBOX_CMD_SIZE - sizeof (uint32_t));
2257
2258 /* Next copy over first word, with mbxOwner set */
2259 ldata = *((volatile uint32_t *)mb);
2260 to_slim = phba->MBslimaddr;
2261 writel(ldata, to_slim);
2262 readl(to_slim); /* flush */
2263
2264 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2265 /* switch over to host mailbox */
2266 psli->sli_flag |= LPFC_SLI2_ACTIVE;
2267 }
2268 }
2269
2270 wmb();
2271 /* interrupt board to doit right away */
2272 writel(CA_MBATT, phba->CAregaddr);
2273 readl(phba->CAregaddr); /* flush */
2274
2275 switch (flag) {
2276 case MBX_NOWAIT:
2277 /* Don't wait for it to finish, just return */
2278 psli->mbox_active = pmbox;
2279 break;
2280
2281 case MBX_POLL:
dea3101e
JB
2282 psli->mbox_active = NULL;
2283 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2284 /* First read mbox status word */
4cc2da1d 2285 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
dea3101e
JB
2286 word0 = le32_to_cpu(word0);
2287 } else {
2288 /* First read mbox status word */
2289 word0 = readl(phba->MBslimaddr);
2290 }
2291
2292 /* Read the HBA Host Attention Register */
2293 ha_copy = readl(phba->HAregaddr);
2294
a309a6b6
JS
2295 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2296 i *= 1000; /* Convert to ms */
2297
dea3101e 2298 /* Wait for command to complete */
41415862
JW
2299 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2300 (!(ha_copy & HA_MBATT) &&
2e0fef85 2301 (phba->link_state > LPFC_WARM_START))) {
a309a6b6 2302 if (i-- <= 0) {
dea3101e 2303 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 2304 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 2305 drvr_flag);
2e0fef85 2306 return MBX_NOT_FINISHED;
dea3101e
JB
2307 }
2308
2309 /* Check if we took a mbox interrupt while we were
2310 polling */
2311 if (((word0 & OWN_CHIP) != OWN_CHIP)
2312 && (evtctr != psli->slistat.mbox_event))
2313 break;
2314
2e0fef85 2315 spin_unlock_irqrestore(&phba->hbalock,
dea3101e
JB
2316 drvr_flag);
2317
1dcb58e5 2318 msleep(1);
dea3101e 2319
2e0fef85 2320 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e
JB
2321
2322 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2323 /* First copy command data */
4cc2da1d
JSEC
2324 word0 = *((volatile uint32_t *)
2325 &phba->slim2p->mbx);
dea3101e
JB
2326 word0 = le32_to_cpu(word0);
2327 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2328 MAILBOX_t *slimmb;
2329 volatile uint32_t slimword0;
2330 /* Check real SLIM for any errors */
2331 slimword0 = readl(phba->MBslimaddr);
2332 slimmb = (MAILBOX_t *) & slimword0;
2333 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2334 && slimmb->mbxStatus) {
2335 psli->sli_flag &=
2336 ~LPFC_SLI2_ACTIVE;
2337 word0 = slimword0;
2338 }
2339 }
2340 } else {
2341 /* First copy command data */
2342 word0 = readl(phba->MBslimaddr);
2343 }
2344 /* Read the HBA Host Attention Register */
2345 ha_copy = readl(phba->HAregaddr);
2346 }
2347
2348 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
dea3101e 2349 /* copy results back to user */
4cc2da1d
JSEC
2350 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2351 MAILBOX_CMD_SIZE);
dea3101e
JB
2352 } else {
2353 /* First copy command data */
2354 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2355 MAILBOX_CMD_SIZE);
2356 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2357 pmbox->context2) {
2e0fef85 2358 lpfc_memcpy_from_slim((void *) pmbox->context2,
dea3101e
JB
2359 phba->MBslimaddr + DMP_RSP_OFFSET,
2360 mb->un.varDmp.word_cnt);
2361 }
2362 }
2363
2364 writel(HA_MBATT, phba->HAregaddr);
2365 readl(phba->HAregaddr); /* flush */
2366
2367 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2368 status = mb->mbxStatus;
2369 }
2370
2e0fef85
JS
2371 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2372 return status;
dea3101e
JB
2373}
2374
2375static int
2e0fef85
JS
2376lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2377 struct lpfc_iocbq *piocb)
dea3101e 2378{
2e0fef85
JS
2379 unsigned long iflags;
2380
dea3101e 2381 /* Insert the caller's iocb in the txq tail for later processing. */
2e0fef85 2382 spin_lock_irqsave(&phba->hbalock, iflags);
dea3101e
JB
2383 list_add_tail(&piocb->list, &pring->txq);
2384 pring->txq_cnt++;
2e0fef85
JS
2385 spin_unlock_irqrestore(&phba->hbalock, iflags);
2386 return 0;
dea3101e
JB
2387}
2388
2389static struct lpfc_iocbq *
2390lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 2391 struct lpfc_iocbq **piocb)
dea3101e
JB
2392{
2393 struct lpfc_iocbq * nextiocb;
2394
2395 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2396 if (!nextiocb) {
2397 nextiocb = *piocb;
2398 *piocb = NULL;
2399 }
2400
2401 return nextiocb;
2402}
2403
2404int
2405lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2406 struct lpfc_iocbq *piocb, uint32_t flag)
2407{
2408 struct lpfc_iocbq *nextiocb;
2e0fef85 2409 unsigned long iflags;
dea3101e
JB
2410 IOCB_t *iocb;
2411
8d63f375
LV
2412 /* If the PCI channel is in offline state, do not post iocbs. */
2413 if (unlikely(pci_channel_offline(phba->pcidev)))
2414 return IOCB_ERROR;
2415
dea3101e
JB
2416 /*
2417 * We should never get an IOCB if we are in a < LINK_DOWN state
2418 */
2e0fef85 2419 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e
JB
2420 return IOCB_ERROR;
2421
2422 /*
2423 * Check to see if we are blocking IOCB processing because of a
2424 * outstanding mbox command.
2425 */
2426 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2427 goto iocb_busy;
2428
2e0fef85 2429 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 2430 /*
2680eeaa 2431 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e
JB
2432 * can be issued if the link is not up.
2433 */
2434 switch (piocb->iocb.ulpCommand) {
2435 case CMD_QUE_RING_BUF_CN:
2436 case CMD_QUE_RING_BUF64_CN:
dea3101e
JB
2437 /*
2438 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2439 * completion, iocb_cmpl MUST be 0.
2440 */
2441 if (piocb->iocb_cmpl)
2442 piocb->iocb_cmpl = NULL;
2443 /*FALLTHROUGH*/
2444 case CMD_CREATE_XRI_CR:
2680eeaa
JS
2445 case CMD_CLOSE_XRI_CN:
2446 case CMD_CLOSE_XRI_CX:
dea3101e
JB
2447 break;
2448 default:
2449 goto iocb_busy;
2450 }
2451
2452 /*
2453 * For FCP commands, we must be in a state where we can process link
2454 * attention events.
2455 */
2456 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2457 !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
2458 goto iocb_busy;
2459
2e0fef85 2460 spin_lock_irqsave(&phba->hbalock, iflags);
dea3101e
JB
2461 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2462 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2463 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2464
2465 if (iocb)
2466 lpfc_sli_update_ring(phba, pring);
2467 else
2468 lpfc_sli_update_full_ring(phba, pring);
2e0fef85 2469 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e
JB
2470
2471 if (!piocb)
2472 return IOCB_SUCCESS;
2473
2474 goto out_busy;
2475
2476 iocb_busy:
2e0fef85 2477 spin_lock_irqsave(&phba->hbalock, iflags);
dea3101e 2478 pring->stats.iocb_cmd_delay++;
2e0fef85 2479 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e
JB
2480
2481 out_busy:
2482
2483 if (!(flag & SLI_IOCB_RET_IOCB)) {
2484 lpfc_sli_ringtx_put(phba, pring, piocb);
2485 return IOCB_SUCCESS;
2486 }
2487
2488 return IOCB_BUSY;
2489}
2490
cf5bf97e
JW
2491static int
2492lpfc_extra_ring_setup( struct lpfc_hba *phba)
2493{
2494 struct lpfc_sli *psli;
2495 struct lpfc_sli_ring *pring;
2496
2497 psli = &phba->sli;
2498
2499 /* Adjust cmd/rsp ring iocb entries more evenly */
a4bc3379
JS
2500
2501 /* Take some away from the FCP ring */
cf5bf97e
JW
2502 pring = &psli->ring[psli->fcp_ring];
2503 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2504 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2505 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2506 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2507
a4bc3379
JS
2508 /* and give them to the extra ring */
2509 pring = &psli->ring[psli->extra_ring];
2510
cf5bf97e
JW
2511 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2512 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2513 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2514 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2515
2516 /* Setup default profile for this ring */
2517 pring->iotag_max = 4096;
2518 pring->num_mask = 1;
2519 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
2520 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2521 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
2522 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2523 return 0;
2524}
2525
dea3101e
JB
2526int
2527lpfc_sli_setup(struct lpfc_hba *phba)
2528{
2529 int i, totiocb = 0;
2530 struct lpfc_sli *psli = &phba->sli;
2531 struct lpfc_sli_ring *pring;
2532
2533 psli->num_rings = MAX_CONFIGURED_RINGS;
2534 psli->sli_flag = 0;
2535 psli->fcp_ring = LPFC_FCP_RING;
2536 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 2537 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 2538
604a3e30
JB
2539 psli->iocbq_lookup = NULL;
2540 psli->iocbq_lookup_len = 0;
2541 psli->last_iotag = 0;
2542
dea3101e
JB
2543 for (i = 0; i < psli->num_rings; i++) {
2544 pring = &psli->ring[i];
2545 switch (i) {
2546 case LPFC_FCP_RING: /* ring 0 - FCP */
2547 /* numCiocb and numRiocb are used in config_port */
2548 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2549 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2550 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2551 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2552 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2553 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2554 pring->iotag_ctr = 0;
2555 pring->iotag_max =
2556 (phba->cfg_hba_queue_depth * 2);
2557 pring->fast_iotag = pring->iotag_max;
2558 pring->num_mask = 0;
2559 break;
a4bc3379 2560 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e
JB
2561 /* numCiocb and numRiocb are used in config_port */
2562 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2563 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2e0fef85 2564 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e
JB
2565 pring->num_mask = 0;
2566 break;
2567 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2568 /* numCiocb and numRiocb are used in config_port */
2569 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2570 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2571 pring->fast_iotag = 0;
2572 pring->iotag_ctr = 0;
2573 pring->iotag_max = 4096;
2574 pring->num_mask = 4;
2575 pring->prt[0].profile = 0; /* Mask 0 */
2576 pring->prt[0].rctl = FC_ELS_REQ;
2577 pring->prt[0].type = FC_ELS_DATA;
2578 pring->prt[0].lpfc_sli_rcv_unsol_event =
2579 lpfc_els_unsol_event;
2580 pring->prt[1].profile = 0; /* Mask 1 */
2581 pring->prt[1].rctl = FC_ELS_RSP;
2582 pring->prt[1].type = FC_ELS_DATA;
2583 pring->prt[1].lpfc_sli_rcv_unsol_event =
2584 lpfc_els_unsol_event;
2585 pring->prt[2].profile = 0; /* Mask 2 */
2586 /* NameServer Inquiry */
2587 pring->prt[2].rctl = FC_UNSOL_CTL;
2588 /* NameServer */
2589 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2590 pring->prt[2].lpfc_sli_rcv_unsol_event =
2591 lpfc_ct_unsol_event;
2592 pring->prt[3].profile = 0; /* Mask 3 */
2593 /* NameServer response */
2594 pring->prt[3].rctl = FC_SOL_CTL;
2595 /* NameServer */
2596 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2597 pring->prt[3].lpfc_sli_rcv_unsol_event =
2598 lpfc_ct_unsol_event;
2599 break;
2600 }
2601 totiocb += (pring->numCiocb + pring->numRiocb);
2602 }
2603 if (totiocb > MAX_SLI2_IOCB) {
2604 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2606 "%d:0462 Too many cmd / rsp ring entries in "
2607 "SLI2 SLIM Data: x%x x%x\n",
2608 phba->brd_no, totiocb, MAX_SLI2_IOCB);
2609 }
cf5bf97e
JW
2610 if (phba->cfg_multi_ring_support == 2)
2611 lpfc_extra_ring_setup(phba);
dea3101e
JB
2612
2613 return 0;
2614}
2615
2616int
2e0fef85 2617lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e
JB
2618{
2619 struct lpfc_sli *psli;
2620 struct lpfc_sli_ring *pring;
604a3e30 2621 int i;
dea3101e
JB
2622
2623 psli = &phba->sli;
2e0fef85 2624 spin_lock_irq(&phba->hbalock);
dea3101e
JB
2625 INIT_LIST_HEAD(&psli->mboxq);
2626 /* Initialize list headers for txq and txcmplq as double linked lists */
2627 for (i = 0; i < psli->num_rings; i++) {
2628 pring = &psli->ring[i];
2629 pring->ringno = i;
2630 pring->next_cmdidx = 0;
2631 pring->local_getidx = 0;
2632 pring->cmdidx = 0;
2633 INIT_LIST_HEAD(&pring->txq);
2634 INIT_LIST_HEAD(&pring->txcmplq);
2635 INIT_LIST_HEAD(&pring->iocb_continueq);
2636 INIT_LIST_HEAD(&pring->postbufq);
dea3101e 2637 }
2e0fef85
JS
2638 spin_unlock_irq(&phba->hbalock);
2639 return 1;
dea3101e
JB
2640}
2641
2642int
2e0fef85 2643lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 2644{
2534ba75 2645 LIST_HEAD(completions);
2e0fef85 2646 struct lpfc_sli *psli = &phba->sli;
dea3101e
JB
2647 struct lpfc_sli_ring *pring;
2648 LPFC_MBOXQ_t *pmb;
2534ba75
JS
2649 struct lpfc_iocbq *iocb;
2650 IOCB_t *cmd = NULL;
dea3101e
JB
2651 int i;
2652 unsigned long flags = 0;
2653
dea3101e
JB
2654 lpfc_hba_down_prep(phba);
2655
2e0fef85 2656 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e
JB
2657 for (i = 0; i < psli->num_rings; i++) {
2658 pring = &psli->ring[i];
2659 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2660
2661 /*
2662 * Error everything on the txq since these iocbs have not been
2663 * given to the FW yet.
2664 */
2534ba75 2665 list_splice_init(&pring->txq, &completions);
dea3101e
JB
2666 pring->txq_cnt = 0;
2667
2534ba75 2668 }
2e0fef85 2669 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 2670
2534ba75
JS
2671 while (!list_empty(&completions)) {
2672 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2673 cmd = &iocb->iocb;
2674 list_del(&iocb->list);
dea3101e 2675
2e0fef85
JS
2676 if (!iocb->iocb_cmpl)
2677 lpfc_sli_release_iocbq(phba, iocb);
2678 else {
2534ba75
JS
2679 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2680 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2681 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 2682 }
dea3101e
JB
2683 }
2684
dea3101e
JB
2685 /* Return any active mbox cmds */
2686 del_timer_sync(&psli->mbox_tmo);
2e0fef85
JS
2687
2688 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2689 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2690 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2691
2692 pmb = psli->mbox_active;
2693 if (pmb) {
2694 psli->mbox_active = NULL;
dea3101e 2695 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2e0fef85 2696 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
dea3101e 2697 if (pmb->mbox_cmpl) {
dea3101e 2698 pmb->mbox_cmpl(phba,pmb);
dea3101e
JB
2699 }
2700 }
dea3101e
JB
2701
2702 /* Return any pending mbox cmds */
2703 while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2704 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2705 if (pmb->mbox_cmpl) {
dea3101e 2706 pmb->mbox_cmpl(phba,pmb);
dea3101e
JB
2707 }
2708 }
dea3101e
JB
2709 INIT_LIST_HEAD(&psli->mboxq);
2710
dea3101e
JB
2711 return 1;
2712}
2713
2714void
2715lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2716{
2717 uint32_t *src = srcp;
2718 uint32_t *dest = destp;
2719 uint32_t ldata;
2720 int i;
2721
2722 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2723 ldata = *src;
2724 ldata = le32_to_cpu(ldata);
2725 *dest = ldata;
2726 src++;
2727 dest++;
2728 }
2729}
2730
2731int
2e0fef85
JS
2732lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2733 struct lpfc_dmabuf *mp)
dea3101e
JB
2734{
2735 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2736 later */
2e0fef85 2737 spin_lock_irq(&phba->hbalock);
dea3101e 2738 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 2739 pring->postbufq_cnt++;
2e0fef85 2740 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
2741 return 0;
2742}
2743
2744
2745struct lpfc_dmabuf *
2746lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2747 dma_addr_t phys)
2748{
2749 struct lpfc_dmabuf *mp, *next_mp;
2750 struct list_head *slp = &pring->postbufq;
2751
2752 /* Search postbufq, from the begining, looking for a match on phys */
2e0fef85 2753 spin_lock_irq(&phba->hbalock);
dea3101e
JB
2754 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2755 if (mp->phys == phys) {
2756 list_del_init(&mp->list);
2757 pring->postbufq_cnt--;
2e0fef85 2758 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
2759 return mp;
2760 }
2761 }
2762
2e0fef85 2763 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
2764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2765 "%d:0410 Cannot find virtual addr for mapped buf on "
2766 "ring %d Data x%llx x%p x%p x%x\n",
2e0fef85 2767 phba->brd_no, pring->ringno, (unsigned long long) phys,
dea3101e
JB
2768 slp->next, slp->prev, pring->postbufq_cnt);
2769 return NULL;
2770}
2771
2772static void
2e0fef85
JS
2773lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2774 struct lpfc_iocbq *rspiocb)
dea3101e 2775{
2e0fef85 2776 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa
JS
2777 uint16_t abort_iotag, abort_context;
2778 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
2779 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2780
2781 abort_iocb = NULL;
2680eeaa
JS
2782
2783 if (irsp->ulpStatus) {
2784 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
2785 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
2786
2e0fef85 2787 spin_lock_irq(&phba->hbalock);
2680eeaa
JS
2788 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
2789 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
2790
2791 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2792 "%d:0327 Cannot abort els iocb %p"
2793 " with tag %x context %x\n",
2794 phba->brd_no, abort_iocb,
2795 abort_iotag, abort_context);
2796
2797 /*
2798 * make sure we have the right iocbq before taking it
2799 * off the txcmplq and try to call completion routine.
2800 */
2e0fef85
JS
2801 if (!abort_iocb ||
2802 abort_iocb->iocb.ulpContext != abort_context ||
2803 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
2804 spin_unlock_irq(&phba->hbalock);
2805 else {
2680eeaa
JS
2806 list_del(&abort_iocb->list);
2807 pring->txcmplq_cnt--;
2e0fef85 2808 spin_unlock_irq(&phba->hbalock);
2680eeaa
JS
2809
2810 rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
2811 if (rsp_ab_iocb == NULL)
2812 lpfc_sli_release_iocbq(phba, abort_iocb);
2813 else {
2e0fef85 2814 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2680eeaa
JS
2815 rsp_ab_iocb->iocb.ulpStatus =
2816 IOSTAT_LOCAL_REJECT;
2817 rsp_ab_iocb->iocb.un.ulpWord[4] =
2818 IOERR_SLI_ABORTED;
2e0fef85
JS
2819 (abort_iocb->iocb_cmpl)(phba, abort_iocb,
2820 rsp_ab_iocb);
2680eeaa
JS
2821 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
2822 }
2823 }
2824 }
2825
604a3e30 2826 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e
JB
2827 return;
2828}
2829
2830int
2e0fef85
JS
2831lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2832 struct lpfc_iocbq *cmdiocb)
dea3101e 2833{
2e0fef85 2834 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 2835 struct lpfc_iocbq *abtsiocbp;
dea3101e
JB
2836 IOCB_t *icmd = NULL;
2837 IOCB_t *iabt = NULL;
07951076
JS
2838 int retval = IOCB_ERROR;
2839
2840 /* There are certain command types we don't want
2841 * to abort.
2842 */
2843 icmd = &cmdiocb->iocb;
2e0fef85
JS
2844 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
2845 icmd->ulpCommand == CMD_CLOSE_XRI_CN)
07951076
JS
2846 return 0;
2847
2848 /* If we're unloading, interrupts are disabled so we
2849 * need to cleanup the iocb here.
2850 */
2e0fef85 2851 if (vport->load_flag & FC_UNLOADING)
07951076 2852 goto abort_iotag_exit;
dea3101e
JB
2853
2854 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 2855 abtsiocbp = lpfc_sli_get_iocbq(phba);
dea3101e
JB
2856 if (abtsiocbp == NULL)
2857 return 0;
dea3101e 2858
07951076
JS
2859 /* This signals the response to set the correct status
2860 * before calling the completion handler.
2861 */
2862 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
2863
dea3101e 2864 iabt = &abtsiocbp->iocb;
07951076
JS
2865 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
2866 iabt->un.acxri.abortContextTag = icmd->ulpContext;
2867 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
2868 iabt->ulpLe = 1;
2869 iabt->ulpClass = icmd->ulpClass;
dea3101e 2870
2e0fef85 2871 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
2872 iabt->ulpCommand = CMD_ABORT_XRI_CN;
2873 else
2874 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 2875
07951076 2876 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9
JS
2877
2878 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2879 "%d:0339 Abort xri x%x, original iotag x%x, abort "
2880 "cmd iotag x%x\n",
2881 phba->brd_no, iabt->un.acxri.abortContextTag,
2882 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
07951076 2883 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
dea3101e 2884
07951076 2885abort_iotag_exit:
2e0fef85
JS
2886 /*
2887 * Caller to this routine should check for IOCB_ERROR
2888 * and handle it properly. This routine no longer removes
2889 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 2890 */
2e0fef85 2891 return retval;
dea3101e
JB
2892}
2893
2894static int
0bd4ca25
JSEC
2895lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2896 uint64_t lun_id, uint32_t ctx,
2897 lpfc_ctx_cmd ctx_cmd)
dea3101e 2898{
0bd4ca25
JSEC
2899 struct lpfc_scsi_buf *lpfc_cmd;
2900 struct scsi_cmnd *cmnd;
dea3101e
JB
2901 int rc = 1;
2902
0bd4ca25
JSEC
2903 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
2904 return rc;
2905
2906 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
2907 cmnd = lpfc_cmd->pCmd;
2908
2909 if (cmnd == NULL)
dea3101e
JB
2910 return rc;
2911
2912 switch (ctx_cmd) {
2913 case LPFC_CTX_LUN:
0bd4ca25
JSEC
2914 if ((cmnd->device->id == tgt_id) &&
2915 (cmnd->device->lun == lun_id))
dea3101e
JB
2916 rc = 0;
2917 break;
2918 case LPFC_CTX_TGT:
0bd4ca25 2919 if (cmnd->device->id == tgt_id)
dea3101e
JB
2920 rc = 0;
2921 break;
2922 case LPFC_CTX_CTX:
0bd4ca25 2923 if (iocbq->iocb.ulpContext == ctx)
dea3101e 2924 rc = 0;
0bd4ca25 2925 break;
dea3101e
JB
2926 case LPFC_CTX_HOST:
2927 rc = 0;
2928 break;
2929 default:
2930 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
2931 __FUNCTION__, ctx_cmd);
2932 break;
2933 }
2934
2935 return rc;
2936}
2937
2938int
2939lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 2940 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
dea3101e 2941{
0bd4ca25
JSEC
2942 struct lpfc_iocbq *iocbq;
2943 int sum, i;
dea3101e 2944
0bd4ca25
JSEC
2945 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
2946 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 2947
0bd4ca25
JSEC
2948 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2949 0, ctx_cmd) == 0)
2950 sum++;
dea3101e 2951 }
0bd4ca25 2952
dea3101e
JB
2953 return sum;
2954}
2955
5eb95af0 2956void
2e0fef85
JS
2957lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2958 struct lpfc_iocbq *rspiocb)
5eb95af0 2959{
604a3e30 2960 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
2961 return;
2962}
2963
dea3101e
JB
2964int
2965lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2966 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2967 lpfc_ctx_cmd abort_cmd)
2968{
0bd4ca25
JSEC
2969 struct lpfc_iocbq *iocbq;
2970 struct lpfc_iocbq *abtsiocb;
dea3101e 2971 IOCB_t *cmd = NULL;
dea3101e 2972 int errcnt = 0, ret_val = 0;
0bd4ca25 2973 int i;
dea3101e 2974
0bd4ca25
JSEC
2975 for (i = 1; i <= phba->sli.last_iotag; i++) {
2976 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 2977
2e0fef85
JS
2978 if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
2979 abort_cmd) != 0)
dea3101e
JB
2980 continue;
2981
2982 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 2983 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
2984 if (abtsiocb == NULL) {
2985 errcnt++;
2986 continue;
2987 }
dea3101e 2988
0bd4ca25 2989 cmd = &iocbq->iocb;
dea3101e
JB
2990 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2991 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2992 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2993 abtsiocb->iocb.ulpLe = 1;
2994 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 2995 abtsiocb->vport = phba->pport;
dea3101e 2996
2e0fef85 2997 if (lpfc_is_link_up(phba))
dea3101e
JB
2998 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2999 else
3000 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3001
5eb95af0
JSEC
3002 /* Setup callback routine and issue the command. */
3003 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
dea3101e
JB
3004 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3005 if (ret_val == IOCB_ERROR) {
604a3e30 3006 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e
JB
3007 errcnt++;
3008 continue;
3009 }
3010 }
3011
3012 return errcnt;
3013}
3014
68876920
JSEC
3015static void
3016lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3017 struct lpfc_iocbq *cmdiocbq,
3018 struct lpfc_iocbq *rspiocbq)
dea3101e 3019{
68876920
JSEC
3020 wait_queue_head_t *pdone_q;
3021 unsigned long iflags;
dea3101e 3022
2e0fef85 3023 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
3024 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3025 if (cmdiocbq->context2 && rspiocbq)
3026 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3027 &rspiocbq->iocb, sizeof(IOCB_t));
3028
3029 pdone_q = cmdiocbq->context_un.wait_queue;
2e0fef85 3030 spin_unlock_irqrestore(&phba->hbalock, iflags);
68876920
JSEC
3031 if (pdone_q)
3032 wake_up(pdone_q);
dea3101e
JB
3033 return;
3034}
3035
68876920
JSEC
3036/*
3037 * Issue the caller's iocb and wait for its completion, but no longer than the
3038 * caller's timeout. Note that iocb_flags is cleared before the
3039 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3040 * definition this is a wait function.
3041 */
dea3101e 3042int
2e0fef85
JS
3043lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3044 struct lpfc_sli_ring *pring,
3045 struct lpfc_iocbq *piocb,
3046 struct lpfc_iocbq *prspiocbq,
68876920 3047 uint32_t timeout)
dea3101e 3048{
7259f0d0 3049 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
3050 long timeleft, timeout_req = 0;
3051 int retval = IOCB_SUCCESS;
875fbdfe 3052 uint32_t creg_val;
dea3101e
JB
3053
3054 /*
68876920
JSEC
3055 * If the caller has provided a response iocbq buffer, then context2
3056 * is NULL or its an error.
dea3101e 3057 */
68876920
JSEC
3058 if (prspiocbq) {
3059 if (piocb->context2)
3060 return IOCB_ERROR;
3061 piocb->context2 = prspiocbq;
dea3101e
JB
3062 }
3063
68876920
JSEC
3064 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3065 piocb->context_un.wait_queue = &done_q;
3066 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 3067
875fbdfe
JSEC
3068 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3069 creg_val = readl(phba->HCregaddr);
3070 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3071 writel(creg_val, phba->HCregaddr);
3072 readl(phba->HCregaddr); /* flush */
3073 }
3074
68876920
JSEC
3075 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3076 if (retval == IOCB_SUCCESS) {
3077 timeout_req = timeout * HZ;
2e0fef85 3078 spin_unlock_irq(&phba->hbalock);
68876920
JSEC
3079 timeleft = wait_event_timeout(done_q,
3080 piocb->iocb_flag & LPFC_IO_WAKE,
3081 timeout_req);
2e0fef85 3082 spin_lock_irq(&phba->hbalock);
dea3101e 3083
7054a606
JS
3084 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3085 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3086 "%d:0331 IOCB wake signaled\n",
3087 phba->brd_no);
3088 } else if (timeleft == 0) {
68876920 3089 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
dca9479b 3090 "%d:0338 IOCB wait timeout error - no "
68876920
JSEC
3091 "wake response Data x%x\n",
3092 phba->brd_no, timeout);
3093 retval = IOCB_TIMEDOUT;
7054a606 3094 } else {
68876920
JSEC
3095 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3096 "%d:0330 IOCB wake NOT set, "
3097 "Data x%x x%lx\n", phba->brd_no,
3098 timeout, (timeleft / jiffies));
3099 retval = IOCB_TIMEDOUT;
dea3101e 3100 }
68876920
JSEC
3101 } else {
3102 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3103 "%d:0332 IOCB wait issue failed, Data x%x\n",
3104 phba->brd_no, retval);
3105 retval = IOCB_ERROR;
dea3101e
JB
3106 }
3107
875fbdfe
JSEC
3108 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3109 creg_val = readl(phba->HCregaddr);
3110 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3111 writel(creg_val, phba->HCregaddr);
3112 readl(phba->HCregaddr); /* flush */
3113 }
3114
68876920
JSEC
3115 if (prspiocbq)
3116 piocb->context2 = NULL;
3117
3118 piocb->context_un.wait_queue = NULL;
3119 piocb->iocb_cmpl = NULL;
dea3101e
JB
3120 return retval;
3121}
68876920 3122
dea3101e 3123int
2e0fef85 3124lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e
JB
3125 uint32_t timeout)
3126{
7259f0d0 3127 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e
JB
3128 int retval;
3129
3130 /* The caller must leave context1 empty. */
3131 if (pmboxq->context1 != 0) {
2e0fef85 3132 return MBX_NOT_FINISHED;
dea3101e
JB
3133 }
3134
3135 /* setup wake call as IOCB callback */
3136 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3137 /* setup context field to pass wait_queue pointer to wake function */
3138 pmboxq->context1 = &done_q;
3139
dea3101e
JB
3140 /* now issue the command */
3141 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3142
3143 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
3144 wait_event_interruptible_timeout(done_q,
3145 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3146 timeout * HZ);
3147
dea3101e 3148 pmboxq->context1 = NULL;
7054a606
JS
3149 /*
3150 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3151 * else do not free the resources.
3152 */
3153 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
dea3101e 3154 retval = MBX_SUCCESS;
7054a606
JS
3155 else
3156 retval = MBX_TIMEOUT;
dea3101e
JB
3157 }
3158
dea3101e
JB
3159 return retval;
3160}
3161
b4c02652
JS
3162int
3163lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3164{
2e0fef85 3165 struct lpfc_vport *vport = phba->pport;
b4c02652
JS
3166 int i = 0;
3167
2e0fef85 3168 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
b4c02652
JS
3169 if (i++ > LPFC_MBOX_TMO * 1000)
3170 return 1;
3171
3172 if (lpfc_sli_handle_mb_event(phba) == 0)
3173 i = 0;
3174
3175 msleep(1);
3176 }
3177
3178 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3179}
3180
dea3101e 3181irqreturn_t
7d12e780 3182lpfc_intr_handler(int irq, void *dev_id)
dea3101e 3183{
2e0fef85 3184 struct lpfc_hba *phba;
dea3101e
JB
3185 uint32_t ha_copy;
3186 uint32_t work_ha_copy;
3187 unsigned long status;
3188 int i;
3189 uint32_t control;
3190
3191 /*
3192 * Get the driver's phba structure from the dev_id and
3193 * assume the HBA is not interrupting.
3194 */
3195 phba = (struct lpfc_hba *) dev_id;
3196
3197 if (unlikely(!phba))
3198 return IRQ_NONE;
3199
8d63f375
LV
3200 /* If the pci channel is offline, ignore all the interrupts. */
3201 if (unlikely(pci_channel_offline(phba->pcidev)))
3202 return IRQ_NONE;
3203
dea3101e
JB
3204 phba->sli.slistat.sli_intr++;
3205
3206 /*
3207 * Call the HBA to see if it is interrupting. If not, don't claim
3208 * the interrupt
3209 */
3210
3211 /* Ignore all interrupts during initialization. */
2e0fef85 3212 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e
JB
3213 return IRQ_NONE;
3214
3215 /*
3216 * Read host attention register to determine interrupt source
3217 * Clear Attention Sources, except Error Attention (to
3218 * preserve status) and Link Attention
3219 */
2e0fef85 3220 spin_lock(&phba->hbalock);
dea3101e 3221 ha_copy = readl(phba->HAregaddr);
ebdbe65f
JS
3222 /* If somebody is waiting to handle an eratt don't process it
3223 * here. The brdkill function will do this.
3224 */
2e0fef85 3225 if (phba->link_flag & LS_IGNORE_ERATT)
ebdbe65f 3226 ha_copy &= ~HA_ERATT;
dea3101e
JB
3227 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3228 readl(phba->HAregaddr); /* flush */
2e0fef85 3229 spin_unlock(&phba->hbalock);
dea3101e
JB
3230
3231 if (unlikely(!ha_copy))
3232 return IRQ_NONE;
3233
3234 work_ha_copy = ha_copy & phba->work_ha_mask;
3235
3236 if (unlikely(work_ha_copy)) {
3237 if (work_ha_copy & HA_LATT) {
3238 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3239 /*
3240 * Turn off Link Attention interrupts
3241 * until CLEAR_LA done
3242 */
2e0fef85 3243 spin_lock(&phba->hbalock);
dea3101e
JB
3244 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3245 control = readl(phba->HCregaddr);
3246 control &= ~HC_LAINT_ENA;
3247 writel(control, phba->HCregaddr);
3248 readl(phba->HCregaddr); /* flush */
2e0fef85 3249 spin_unlock(&phba->hbalock);
dea3101e
JB
3250 }
3251 else
3252 work_ha_copy &= ~HA_LATT;
3253 }
3254
3255 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3256 for (i = 0; i < phba->sli.num_rings; i++) {
3257 if (work_ha_copy & (HA_RXATT << (4*i))) {
3258 /*
3259 * Turn off Slow Rings interrupts
3260 */
2e0fef85 3261 spin_lock(&phba->hbalock);
dea3101e
JB
3262 control = readl(phba->HCregaddr);
3263 control &= ~(HC_R0INT_ENA << i);
3264 writel(control, phba->HCregaddr);
3265 readl(phba->HCregaddr); /* flush */
2e0fef85 3266 spin_unlock(&phba->hbalock);
dea3101e
JB
3267 }
3268 }
3269 }
3270
3271 if (work_ha_copy & HA_ERATT) {
2e0fef85 3272 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
3273 /*
3274 * There was a link/board error. Read the
3275 * status register to retrieve the error event
3276 * and process it.
3277 */
3278 phba->sli.slistat.err_attn_event++;
3279 /* Save status info */
3280 phba->work_hs = readl(phba->HSregaddr);
3281 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3282 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3283
3284 /* Clear Chip error bit */
3285 writel(HA_ERATT, phba->HAregaddr);
3286 readl(phba->HAregaddr); /* flush */
2e0fef85 3287 phba->pport->stopped = 1;
dea3101e
JB
3288 }
3289
2e0fef85 3290 spin_lock(&phba->hbalock);
dea3101e
JB
3291 phba->work_ha |= work_ha_copy;
3292 if (phba->work_wait)
3293 wake_up(phba->work_wait);
2e0fef85 3294 spin_unlock(&phba->hbalock);
dea3101e
JB
3295 }
3296
3297 ha_copy &= ~(phba->work_ha_mask);
3298
3299 /*
3300 * Process all events on FCP ring. Take the optimized path for
3301 * FCP IO. Any other IO is slow path and is handled by
3302 * the worker thread.
3303 */
3304 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
3305 status >>= (4*LPFC_FCP_RING);
3306 if (status & HA_RXATT)
3307 lpfc_sli_handle_fast_ring_event(phba,
3308 &phba->sli.ring[LPFC_FCP_RING],
3309 status);
a4bc3379
JS
3310
3311 if (phba->cfg_multi_ring_support == 2) {
3312 /*
3313 * Process all events on extra ring. Take the optimized path
3314 * for extra ring IO. Any other IO is slow path and is handled
3315 * by the worker thread.
3316 */
3317 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3318 status >>= (4*LPFC_EXTRA_RING);
3319 if (status & HA_RXATT) {
3320 lpfc_sli_handle_fast_ring_event(phba,
3321 &phba->sli.ring[LPFC_EXTRA_RING],
3322 status);
3323 }
3324 }
dea3101e
JB
3325 return IRQ_HANDLED;
3326
3327} /* lpfc_intr_handler */