]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/qla2xxx/qla_dbg.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qla2xxx / qla_dbg.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0193 | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e |
17 * | Mailbox commands | 0x1199 | 0x1193 |
18 * | Device Discovery | 0x2134 | 0x210e-0x2116 |
19 * | | | 0x211a |
20 * | | | 0x211c-0x2128 |
21 * | | | 0x212a-0x2130 |
22 * | Queue Command and IO tracing | 0x3074 | 0x300b |
23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x5090 | 0x502b-0x502f |
30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 |
33 * | | | 0x505f |
34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 |
39 * | | | 0x70a5-0x70a6 |
40 * | | | 0x70a8,0x70ab |
41 * | | | 0x70ad-0x70ae |
42 * | | | 0x70d0-0x70d6 |
43 * | | | 0x70d7-0x70db |
44 * | Task Management | 0x8042 | 0x8000,0x800b |
45 * | | | 0x8019 |
46 * | | | 0x8025,0x8026 |
47 * | | | 0x8031,0x8032 |
48 * | | | 0x8039,0x803c |
49 * | AER/EEH | 0x9011 | |
50 * | Virtual Port | 0xa007 | |
51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
52 * | | | 0xb09e,0xb0ae |
53 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 |
57 * | | | 0xb111,0xb11e |
58 * | | | 0xb12c,0xb12d |
59 * | | | 0xb13a,0xb142 |
60 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 |
62 * | MultiQ | 0xc010 | |
63 * | Misc | 0xd302 | 0xd031-0xd0ff |
64 * | | | 0xd101-0xd1fe |
65 * | | | 0xd214-0xd2fe |
66 * | Target Mode | 0xe081 | |
67 * | Target Mode Management | 0xf09b | 0xf002 |
68 * | | | 0xf046-0xf049 |
69 * | Target Mode Task Management | 0x1000d | |
70 * ----------------------------------------------------------------------
71 */
72
73 #include "qla_def.h"
74
75 #include <linux/delay.h>
76
77 static uint32_t ql_dbg_offset = 0x800;
78
79 static inline void
80 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
81 {
82 fw_dump->fw_major_version = htonl(ha->fw_major_version);
83 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
84 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
85 fw_dump->fw_attributes = htonl(ha->fw_attributes);
86
87 fw_dump->vendor = htonl(ha->pdev->vendor);
88 fw_dump->device = htonl(ha->pdev->device);
89 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
90 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
91 }
92
93 static inline void *
94 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
95 {
96 struct req_que *req = ha->req_q_map[0];
97 struct rsp_que *rsp = ha->rsp_q_map[0];
98 /* Request queue. */
99 memcpy(ptr, req->ring, req->length *
100 sizeof(request_t));
101
102 /* Response queue. */
103 ptr += req->length * sizeof(request_t);
104 memcpy(ptr, rsp->ring, rsp->length *
105 sizeof(response_t));
106
107 return ptr + (rsp->length * sizeof(response_t));
108 }
109
110 int
111 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
112 uint32_t ram_dwords, void **nxt)
113 {
114 int rval;
115 uint32_t cnt, stat, timer, dwords, idx;
116 uint16_t mb0;
117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
118 dma_addr_t dump_dma = ha->gid_list_dma;
119 uint32_t *dump = (uint32_t *)ha->gid_list;
120
121 rval = QLA_SUCCESS;
122 mb0 = 0;
123
124 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
125 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
126
127 dwords = qla2x00_gid_list_size(ha) / 4;
128 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
129 cnt += dwords, addr += dwords) {
130 if (cnt + dwords > ram_dwords)
131 dwords = ram_dwords - cnt;
132
133 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
134 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
135
136 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
137 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
138 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
139 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
140
141 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
142 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
143
144 WRT_REG_WORD(&reg->mailbox9, 0);
145 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
146
147 ha->flags.mbox_int = 0;
148 for (timer = 6000000; timer; timer--) {
149 /* Check for pending interrupts. */
150 stat = RD_REG_DWORD(&reg->host_status);
151 if (stat & HSRX_RISC_INT) {
152 stat &= 0xff;
153
154 if (stat == 0x1 || stat == 0x2 ||
155 stat == 0x10 || stat == 0x11) {
156 set_bit(MBX_INTERRUPT,
157 &ha->mbx_cmd_flags);
158
159 mb0 = RD_REG_WORD(&reg->mailbox0);
160 RD_REG_WORD(&reg->mailbox1);
161
162 WRT_REG_DWORD(&reg->hccr,
163 HCCRX_CLR_RISC_INT);
164 RD_REG_DWORD(&reg->hccr);
165 break;
166 }
167
168 /* Clear this intr; it wasn't a mailbox intr */
169 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
170 RD_REG_DWORD(&reg->hccr);
171 }
172 udelay(5);
173 }
174 ha->flags.mbox_int = 1;
175
176 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
177 rval = mb0 & MBS_MASK;
178 for (idx = 0; idx < dwords; idx++)
179 ram[cnt + idx] = IS_QLA27XX(ha) ?
180 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
181 } else {
182 rval = QLA_FUNCTION_FAILED;
183 }
184 }
185
186 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
187 return rval;
188 }
189
190 int
191 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
192 uint32_t ram_dwords, void **nxt)
193 {
194 int rval;
195 uint32_t cnt, stat, timer, dwords, idx;
196 uint16_t mb0;
197 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
198 dma_addr_t dump_dma = ha->gid_list_dma;
199 uint32_t *dump = (uint32_t *)ha->gid_list;
200
201 rval = QLA_SUCCESS;
202 mb0 = 0;
203
204 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
205 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
206
207 dwords = qla2x00_gid_list_size(ha) / 4;
208 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
209 cnt += dwords, addr += dwords) {
210 if (cnt + dwords > ram_dwords)
211 dwords = ram_dwords - cnt;
212
213 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
214 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
215
216 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
217 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
218 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
219 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
220
221 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
222 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
223 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
224
225 ha->flags.mbox_int = 0;
226 for (timer = 6000000; timer; timer--) {
227 /* Check for pending interrupts. */
228 stat = RD_REG_DWORD(&reg->host_status);
229 if (stat & HSRX_RISC_INT) {
230 stat &= 0xff;
231
232 if (stat == 0x1 || stat == 0x2 ||
233 stat == 0x10 || stat == 0x11) {
234 set_bit(MBX_INTERRUPT,
235 &ha->mbx_cmd_flags);
236
237 mb0 = RD_REG_WORD(&reg->mailbox0);
238
239 WRT_REG_DWORD(&reg->hccr,
240 HCCRX_CLR_RISC_INT);
241 RD_REG_DWORD(&reg->hccr);
242 break;
243 }
244
245 /* Clear this intr; it wasn't a mailbox intr */
246 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
247 RD_REG_DWORD(&reg->hccr);
248 }
249 udelay(5);
250 }
251 ha->flags.mbox_int = 1;
252
253 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
254 rval = mb0 & MBS_MASK;
255 for (idx = 0; idx < dwords; idx++)
256 ram[cnt + idx] = IS_QLA27XX(ha) ?
257 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
258 } else {
259 rval = QLA_FUNCTION_FAILED;
260 }
261 }
262
263 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
264 return rval;
265 }
266
267 static int
268 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
269 uint32_t cram_size, void **nxt)
270 {
271 int rval;
272
273 /* Code RAM. */
274 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
275 if (rval != QLA_SUCCESS)
276 return rval;
277
278 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
279
280 /* External Memory. */
281 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
282 ha->fw_memory_size - 0x100000 + 1, nxt);
283 if (rval == QLA_SUCCESS)
284 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
285
286 return rval;
287 }
288
289 static uint32_t *
290 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
291 uint32_t count, uint32_t *buf)
292 {
293 uint32_t __iomem *dmp_reg;
294
295 WRT_REG_DWORD(&reg->iobase_addr, iobase);
296 dmp_reg = &reg->iobase_window;
297 for ( ; count--; dmp_reg++)
298 *buf++ = htonl(RD_REG_DWORD(dmp_reg));
299
300 return buf;
301 }
302
303 void
304 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
305 {
306 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
307
308 /* 100 usec delay is sufficient enough for hardware to pause RISC */
309 udelay(100);
310 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
311 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
312 }
313
314 int
315 qla24xx_soft_reset(struct qla_hw_data *ha)
316 {
317 int rval = QLA_SUCCESS;
318 uint32_t cnt;
319 uint16_t wd;
320 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
321
322 /*
323 * Reset RISC. The delay is dependent on system architecture.
324 * Driver can proceed with the reset sequence after waiting
325 * for a timeout period.
326 */
327 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
328 for (cnt = 0; cnt < 30000; cnt++) {
329 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
330 break;
331
332 udelay(10);
333 }
334 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
335 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
336
337 WRT_REG_DWORD(&reg->ctrl_status,
338 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
339 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
340
341 udelay(100);
342
343 /* Wait for soft-reset to complete. */
344 for (cnt = 0; cnt < 30000; cnt++) {
345 if ((RD_REG_DWORD(&reg->ctrl_status) &
346 CSRX_ISP_SOFT_RESET) == 0)
347 break;
348
349 udelay(10);
350 }
351 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
352 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
353
354 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
355 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
356
357 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
358 rval == QLA_SUCCESS; cnt--) {
359 if (cnt)
360 udelay(10);
361 else
362 rval = QLA_FUNCTION_TIMEOUT;
363 }
364 if (rval == QLA_SUCCESS)
365 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
366
367 return rval;
368 }
369
370 static int
371 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
372 uint32_t ram_words, void **nxt)
373 {
374 int rval;
375 uint32_t cnt, stat, timer, words, idx;
376 uint16_t mb0;
377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
378 dma_addr_t dump_dma = ha->gid_list_dma;
379 uint16_t *dump = (uint16_t *)ha->gid_list;
380
381 rval = QLA_SUCCESS;
382 mb0 = 0;
383
384 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
385 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
386
387 words = qla2x00_gid_list_size(ha) / 2;
388 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
389 cnt += words, addr += words) {
390 if (cnt + words > ram_words)
391 words = ram_words - cnt;
392
393 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
394 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
395
396 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
397 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
398 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
399 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
400
401 WRT_MAILBOX_REG(ha, reg, 4, words);
402 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
403
404 for (timer = 6000000; timer; timer--) {
405 /* Check for pending interrupts. */
406 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
407 if (stat & HSR_RISC_INT) {
408 stat &= 0xff;
409
410 if (stat == 0x1 || stat == 0x2) {
411 set_bit(MBX_INTERRUPT,
412 &ha->mbx_cmd_flags);
413
414 mb0 = RD_MAILBOX_REG(ha, reg, 0);
415
416 /* Release mailbox registers. */
417 WRT_REG_WORD(&reg->semaphore, 0);
418 WRT_REG_WORD(&reg->hccr,
419 HCCR_CLR_RISC_INT);
420 RD_REG_WORD(&reg->hccr);
421 break;
422 } else if (stat == 0x10 || stat == 0x11) {
423 set_bit(MBX_INTERRUPT,
424 &ha->mbx_cmd_flags);
425
426 mb0 = RD_MAILBOX_REG(ha, reg, 0);
427
428 WRT_REG_WORD(&reg->hccr,
429 HCCR_CLR_RISC_INT);
430 RD_REG_WORD(&reg->hccr);
431 break;
432 }
433
434 /* clear this intr; it wasn't a mailbox intr */
435 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 RD_REG_WORD(&reg->hccr);
437 }
438 udelay(5);
439 }
440
441 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 rval = mb0 & MBS_MASK;
443 for (idx = 0; idx < words; idx++)
444 ram[cnt + idx] = swab16(dump[idx]);
445 } else {
446 rval = QLA_FUNCTION_FAILED;
447 }
448 }
449
450 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
451 return rval;
452 }
453
454 static inline void
455 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
456 uint16_t *buf)
457 {
458 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
459
460 for ( ; count--; dmp_reg++)
461 *buf++ = htons(RD_REG_WORD(dmp_reg));
462 }
463
464 static inline void *
465 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
466 {
467 if (!ha->eft)
468 return ptr;
469
470 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
471 return ptr + ntohl(ha->fw_dump->eft_size);
472 }
473
474 static inline void *
475 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
476 {
477 uint32_t cnt;
478 uint32_t *iter_reg;
479 struct qla2xxx_fce_chain *fcec = ptr;
480
481 if (!ha->fce)
482 return ptr;
483
484 *last_chain = &fcec->type;
485 fcec->type = htonl(DUMP_CHAIN_FCE);
486 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
487 fce_calc_size(ha->fce_bufs));
488 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
489 fcec->addr_l = htonl(LSD(ha->fce_dma));
490 fcec->addr_h = htonl(MSD(ha->fce_dma));
491
492 iter_reg = fcec->eregs;
493 for (cnt = 0; cnt < 8; cnt++)
494 *iter_reg++ = htonl(ha->fce_mb[cnt]);
495
496 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
497
498 return (char *)iter_reg + ntohl(fcec->size);
499 }
500
501 static inline void *
502 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
503 {
504 struct qla2xxx_offld_chain *c = ptr;
505
506 if (!ha->exlogin_buf)
507 return ptr;
508
509 *last_chain = &c->type;
510
511 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
512 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
513 ha->exlogin_size);
514 c->size = cpu_to_be32(ha->exlogin_size);
515 c->addr = cpu_to_be64(ha->exlogin_buf_dma);
516
517 ptr += sizeof(struct qla2xxx_offld_chain);
518 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
519
520 return (char *)ptr + cpu_to_be32(c->size);
521 }
522
523 static inline void *
524 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
525 {
526 struct qla2xxx_offld_chain *c = ptr;
527
528 if (!ha->exchoffld_buf)
529 return ptr;
530
531 *last_chain = &c->type;
532
533 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
534 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
535 ha->exchoffld_size);
536 c->size = cpu_to_be32(ha->exchoffld_size);
537 c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
538
539 ptr += sizeof(struct qla2xxx_offld_chain);
540 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
541
542 return (char *)ptr + cpu_to_be32(c->size);
543 }
544
545 static inline void *
546 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
547 uint32_t **last_chain)
548 {
549 struct qla2xxx_mqueue_chain *q;
550 struct qla2xxx_mqueue_header *qh;
551 uint32_t num_queues;
552 int que;
553 struct {
554 int length;
555 void *ring;
556 } aq, *aqp;
557
558 if (!ha->tgt.atio_ring)
559 return ptr;
560
561 num_queues = 1;
562 aqp = &aq;
563 aqp->length = ha->tgt.atio_q_length;
564 aqp->ring = ha->tgt.atio_ring;
565
566 for (que = 0; que < num_queues; que++) {
567 /* aqp = ha->atio_q_map[que]; */
568 q = ptr;
569 *last_chain = &q->type;
570 q->type = htonl(DUMP_CHAIN_QUEUE);
571 q->chain_size = htonl(
572 sizeof(struct qla2xxx_mqueue_chain) +
573 sizeof(struct qla2xxx_mqueue_header) +
574 (aqp->length * sizeof(request_t)));
575 ptr += sizeof(struct qla2xxx_mqueue_chain);
576
577 /* Add header. */
578 qh = ptr;
579 qh->queue = htonl(TYPE_ATIO_QUEUE);
580 qh->number = htonl(que);
581 qh->size = htonl(aqp->length * sizeof(request_t));
582 ptr += sizeof(struct qla2xxx_mqueue_header);
583
584 /* Add data. */
585 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
586
587 ptr += aqp->length * sizeof(request_t);
588 }
589
590 return ptr;
591 }
592
593 static inline void *
594 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
595 {
596 struct qla2xxx_mqueue_chain *q;
597 struct qla2xxx_mqueue_header *qh;
598 struct req_que *req;
599 struct rsp_que *rsp;
600 int que;
601
602 if (!ha->mqenable)
603 return ptr;
604
605 /* Request queues */
606 for (que = 1; que < ha->max_req_queues; que++) {
607 req = ha->req_q_map[que];
608 if (!req)
609 break;
610
611 /* Add chain. */
612 q = ptr;
613 *last_chain = &q->type;
614 q->type = htonl(DUMP_CHAIN_QUEUE);
615 q->chain_size = htonl(
616 sizeof(struct qla2xxx_mqueue_chain) +
617 sizeof(struct qla2xxx_mqueue_header) +
618 (req->length * sizeof(request_t)));
619 ptr += sizeof(struct qla2xxx_mqueue_chain);
620
621 /* Add header. */
622 qh = ptr;
623 qh->queue = htonl(TYPE_REQUEST_QUEUE);
624 qh->number = htonl(que);
625 qh->size = htonl(req->length * sizeof(request_t));
626 ptr += sizeof(struct qla2xxx_mqueue_header);
627
628 /* Add data. */
629 memcpy(ptr, req->ring, req->length * sizeof(request_t));
630 ptr += req->length * sizeof(request_t);
631 }
632
633 /* Response queues */
634 for (que = 1; que < ha->max_rsp_queues; que++) {
635 rsp = ha->rsp_q_map[que];
636 if (!rsp)
637 break;
638
639 /* Add chain. */
640 q = ptr;
641 *last_chain = &q->type;
642 q->type = htonl(DUMP_CHAIN_QUEUE);
643 q->chain_size = htonl(
644 sizeof(struct qla2xxx_mqueue_chain) +
645 sizeof(struct qla2xxx_mqueue_header) +
646 (rsp->length * sizeof(response_t)));
647 ptr += sizeof(struct qla2xxx_mqueue_chain);
648
649 /* Add header. */
650 qh = ptr;
651 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
652 qh->number = htonl(que);
653 qh->size = htonl(rsp->length * sizeof(response_t));
654 ptr += sizeof(struct qla2xxx_mqueue_header);
655
656 /* Add data. */
657 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
658 ptr += rsp->length * sizeof(response_t);
659 }
660
661 return ptr;
662 }
663
664 static inline void *
665 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
666 {
667 uint32_t cnt, que_idx;
668 uint8_t que_cnt;
669 struct qla2xxx_mq_chain *mq = ptr;
670 device_reg_t *reg;
671
672 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
673 return ptr;
674
675 mq = ptr;
676 *last_chain = &mq->type;
677 mq->type = htonl(DUMP_CHAIN_MQ);
678 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
679
680 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
681 ha->max_req_queues : ha->max_rsp_queues;
682 mq->count = htonl(que_cnt);
683 for (cnt = 0; cnt < que_cnt; cnt++) {
684 reg = ISP_QUE_REG(ha, cnt);
685 que_idx = cnt * 4;
686 mq->qregs[que_idx] =
687 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
688 mq->qregs[que_idx+1] =
689 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
690 mq->qregs[que_idx+2] =
691 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
692 mq->qregs[que_idx+3] =
693 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
694 }
695
696 return ptr + sizeof(struct qla2xxx_mq_chain);
697 }
698
699 void
700 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
701 {
702 struct qla_hw_data *ha = vha->hw;
703
704 if (rval != QLA_SUCCESS) {
705 ql_log(ql_log_warn, vha, 0xd000,
706 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
707 rval, ha->fw_dump_cap_flags);
708 ha->fw_dumped = 0;
709 } else {
710 ql_log(ql_log_info, vha, 0xd001,
711 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
712 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
713 ha->fw_dumped = 1;
714 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
715 }
716 }
717
718 /**
719 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
720 * @ha: HA context
721 * @hardware_locked: Called with the hardware_lock
722 */
723 void
724 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
725 {
726 int rval;
727 uint32_t cnt;
728 struct qla_hw_data *ha = vha->hw;
729 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
730 uint16_t __iomem *dmp_reg;
731 unsigned long flags;
732 struct qla2300_fw_dump *fw;
733 void *nxt;
734 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
735
736 flags = 0;
737
738 #ifndef __CHECKER__
739 if (!hardware_locked)
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741 #endif
742
743 if (!ha->fw_dump) {
744 ql_log(ql_log_warn, vha, 0xd002,
745 "No buffer available for dump.\n");
746 goto qla2300_fw_dump_failed;
747 }
748
749 if (ha->fw_dumped) {
750 ql_log(ql_log_warn, vha, 0xd003,
751 "Firmware has been previously dumped (%p) "
752 "-- ignoring request.\n",
753 ha->fw_dump);
754 goto qla2300_fw_dump_failed;
755 }
756 fw = &ha->fw_dump->isp.isp23;
757 qla2xxx_prep_dump(ha, ha->fw_dump);
758
759 rval = QLA_SUCCESS;
760 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
761
762 /* Pause RISC. */
763 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
764 if (IS_QLA2300(ha)) {
765 for (cnt = 30000;
766 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
767 rval == QLA_SUCCESS; cnt--) {
768 if (cnt)
769 udelay(100);
770 else
771 rval = QLA_FUNCTION_TIMEOUT;
772 }
773 } else {
774 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
775 udelay(10);
776 }
777
778 if (rval == QLA_SUCCESS) {
779 dmp_reg = &reg->flash_address;
780 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
781 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
782
783 dmp_reg = &reg->u.isp2300.req_q_in;
784 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
785 cnt++, dmp_reg++)
786 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
787
788 dmp_reg = &reg->u.isp2300.mailbox0;
789 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
790 cnt++, dmp_reg++)
791 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
792
793 WRT_REG_WORD(&reg->ctrl_status, 0x40);
794 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
795
796 WRT_REG_WORD(&reg->ctrl_status, 0x50);
797 qla2xxx_read_window(reg, 48, fw->dma_reg);
798
799 WRT_REG_WORD(&reg->ctrl_status, 0x00);
800 dmp_reg = &reg->risc_hw;
801 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
802 cnt++, dmp_reg++)
803 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
804
805 WRT_REG_WORD(&reg->pcr, 0x2000);
806 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
807
808 WRT_REG_WORD(&reg->pcr, 0x2200);
809 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
810
811 WRT_REG_WORD(&reg->pcr, 0x2400);
812 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
813
814 WRT_REG_WORD(&reg->pcr, 0x2600);
815 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
816
817 WRT_REG_WORD(&reg->pcr, 0x2800);
818 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
819
820 WRT_REG_WORD(&reg->pcr, 0x2A00);
821 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
822
823 WRT_REG_WORD(&reg->pcr, 0x2C00);
824 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
825
826 WRT_REG_WORD(&reg->pcr, 0x2E00);
827 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
828
829 WRT_REG_WORD(&reg->ctrl_status, 0x10);
830 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
831
832 WRT_REG_WORD(&reg->ctrl_status, 0x20);
833 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
834
835 WRT_REG_WORD(&reg->ctrl_status, 0x30);
836 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
837
838 /* Reset RISC. */
839 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
840 for (cnt = 0; cnt < 30000; cnt++) {
841 if ((RD_REG_WORD(&reg->ctrl_status) &
842 CSR_ISP_SOFT_RESET) == 0)
843 break;
844
845 udelay(10);
846 }
847 }
848
849 if (!IS_QLA2300(ha)) {
850 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
851 rval == QLA_SUCCESS; cnt--) {
852 if (cnt)
853 udelay(100);
854 else
855 rval = QLA_FUNCTION_TIMEOUT;
856 }
857 }
858
859 /* Get RISC SRAM. */
860 if (rval == QLA_SUCCESS)
861 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
862 sizeof(fw->risc_ram) / 2, &nxt);
863
864 /* Get stack SRAM. */
865 if (rval == QLA_SUCCESS)
866 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
867 sizeof(fw->stack_ram) / 2, &nxt);
868
869 /* Get data SRAM. */
870 if (rval == QLA_SUCCESS)
871 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
872 ha->fw_memory_size - 0x11000 + 1, &nxt);
873
874 if (rval == QLA_SUCCESS)
875 qla2xxx_copy_queues(ha, nxt);
876
877 qla2xxx_dump_post_process(base_vha, rval);
878
879 qla2300_fw_dump_failed:
880 #ifndef __CHECKER__
881 if (!hardware_locked)
882 spin_unlock_irqrestore(&ha->hardware_lock, flags);
883 #else
884 ;
885 #endif
886 }
887
888 /**
889 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
890 * @ha: HA context
891 * @hardware_locked: Called with the hardware_lock
892 */
893 void
894 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
895 {
896 int rval;
897 uint32_t cnt, timer;
898 uint16_t risc_address;
899 uint16_t mb0, mb2;
900 struct qla_hw_data *ha = vha->hw;
901 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
902 uint16_t __iomem *dmp_reg;
903 unsigned long flags;
904 struct qla2100_fw_dump *fw;
905 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
906
907 risc_address = 0;
908 mb0 = mb2 = 0;
909 flags = 0;
910
911 #ifndef __CHECKER__
912 if (!hardware_locked)
913 spin_lock_irqsave(&ha->hardware_lock, flags);
914 #endif
915
916 if (!ha->fw_dump) {
917 ql_log(ql_log_warn, vha, 0xd004,
918 "No buffer available for dump.\n");
919 goto qla2100_fw_dump_failed;
920 }
921
922 if (ha->fw_dumped) {
923 ql_log(ql_log_warn, vha, 0xd005,
924 "Firmware has been previously dumped (%p) "
925 "-- ignoring request.\n",
926 ha->fw_dump);
927 goto qla2100_fw_dump_failed;
928 }
929 fw = &ha->fw_dump->isp.isp21;
930 qla2xxx_prep_dump(ha, ha->fw_dump);
931
932 rval = QLA_SUCCESS;
933 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
934
935 /* Pause RISC. */
936 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
937 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
938 rval == QLA_SUCCESS; cnt--) {
939 if (cnt)
940 udelay(100);
941 else
942 rval = QLA_FUNCTION_TIMEOUT;
943 }
944 if (rval == QLA_SUCCESS) {
945 dmp_reg = &reg->flash_address;
946 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
947 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
948
949 dmp_reg = &reg->u.isp2100.mailbox0;
950 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
951 if (cnt == 8)
952 dmp_reg = &reg->u_end.isp2200.mailbox8;
953
954 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
955 }
956
957 dmp_reg = &reg->u.isp2100.unused_2[0];
958 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
959 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
960
961 WRT_REG_WORD(&reg->ctrl_status, 0x00);
962 dmp_reg = &reg->risc_hw;
963 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
964 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
965
966 WRT_REG_WORD(&reg->pcr, 0x2000);
967 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
968
969 WRT_REG_WORD(&reg->pcr, 0x2100);
970 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
971
972 WRT_REG_WORD(&reg->pcr, 0x2200);
973 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
974
975 WRT_REG_WORD(&reg->pcr, 0x2300);
976 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
977
978 WRT_REG_WORD(&reg->pcr, 0x2400);
979 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
980
981 WRT_REG_WORD(&reg->pcr, 0x2500);
982 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
983
984 WRT_REG_WORD(&reg->pcr, 0x2600);
985 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
986
987 WRT_REG_WORD(&reg->pcr, 0x2700);
988 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
989
990 WRT_REG_WORD(&reg->ctrl_status, 0x10);
991 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
992
993 WRT_REG_WORD(&reg->ctrl_status, 0x20);
994 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
995
996 WRT_REG_WORD(&reg->ctrl_status, 0x30);
997 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
998
999 /* Reset the ISP. */
1000 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1001 }
1002
1003 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
1004 rval == QLA_SUCCESS; cnt--) {
1005 if (cnt)
1006 udelay(100);
1007 else
1008 rval = QLA_FUNCTION_TIMEOUT;
1009 }
1010
1011 /* Pause RISC. */
1012 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1013 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1014
1015 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1016 for (cnt = 30000;
1017 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1018 rval == QLA_SUCCESS; cnt--) {
1019 if (cnt)
1020 udelay(100);
1021 else
1022 rval = QLA_FUNCTION_TIMEOUT;
1023 }
1024 if (rval == QLA_SUCCESS) {
1025 /* Set memory configuration and timing. */
1026 if (IS_QLA2100(ha))
1027 WRT_REG_WORD(&reg->mctr, 0xf1);
1028 else
1029 WRT_REG_WORD(&reg->mctr, 0xf2);
1030 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
1031
1032 /* Release RISC. */
1033 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1034 }
1035 }
1036
1037 if (rval == QLA_SUCCESS) {
1038 /* Get RISC SRAM. */
1039 risc_address = 0x1000;
1040 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1041 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1042 }
1043 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1044 cnt++, risc_address++) {
1045 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1046 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1047
1048 for (timer = 6000000; timer != 0; timer--) {
1049 /* Check for pending interrupts. */
1050 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1051 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1052 set_bit(MBX_INTERRUPT,
1053 &ha->mbx_cmd_flags);
1054
1055 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1056 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1057
1058 WRT_REG_WORD(&reg->semaphore, 0);
1059 WRT_REG_WORD(&reg->hccr,
1060 HCCR_CLR_RISC_INT);
1061 RD_REG_WORD(&reg->hccr);
1062 break;
1063 }
1064 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1065 RD_REG_WORD(&reg->hccr);
1066 }
1067 udelay(5);
1068 }
1069
1070 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1071 rval = mb0 & MBS_MASK;
1072 fw->risc_ram[cnt] = htons(mb2);
1073 } else {
1074 rval = QLA_FUNCTION_FAILED;
1075 }
1076 }
1077
1078 if (rval == QLA_SUCCESS)
1079 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1080
1081 qla2xxx_dump_post_process(base_vha, rval);
1082
1083 qla2100_fw_dump_failed:
1084 #ifndef __CHECKER__
1085 if (!hardware_locked)
1086 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1087 #else
1088 ;
1089 #endif
1090 }
1091
1092 void
1093 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1094 {
1095 int rval;
1096 uint32_t cnt;
1097 struct qla_hw_data *ha = vha->hw;
1098 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1099 uint32_t __iomem *dmp_reg;
1100 uint32_t *iter_reg;
1101 uint16_t __iomem *mbx_reg;
1102 unsigned long flags;
1103 struct qla24xx_fw_dump *fw;
1104 void *nxt;
1105 void *nxt_chain;
1106 uint32_t *last_chain = NULL;
1107 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1108
1109 if (IS_P3P_TYPE(ha))
1110 return;
1111
1112 flags = 0;
1113 ha->fw_dump_cap_flags = 0;
1114
1115 #ifndef __CHECKER__
1116 if (!hardware_locked)
1117 spin_lock_irqsave(&ha->hardware_lock, flags);
1118 #endif
1119
1120 if (!ha->fw_dump) {
1121 ql_log(ql_log_warn, vha, 0xd006,
1122 "No buffer available for dump.\n");
1123 goto qla24xx_fw_dump_failed;
1124 }
1125
1126 if (ha->fw_dumped) {
1127 ql_log(ql_log_warn, vha, 0xd007,
1128 "Firmware has been previously dumped (%p) "
1129 "-- ignoring request.\n",
1130 ha->fw_dump);
1131 goto qla24xx_fw_dump_failed;
1132 }
1133 fw = &ha->fw_dump->isp.isp24;
1134 qla2xxx_prep_dump(ha, ha->fw_dump);
1135
1136 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1137
1138 /*
1139 * Pause RISC. No need to track timeout, as resetting the chip
1140 * is the right approach incase of pause timeout
1141 */
1142 qla24xx_pause_risc(reg, ha);
1143
1144 /* Host interface registers. */
1145 dmp_reg = &reg->flash_addr;
1146 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1147 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1148
1149 /* Disable interrupts. */
1150 WRT_REG_DWORD(&reg->ictrl, 0);
1151 RD_REG_DWORD(&reg->ictrl);
1152
1153 /* Shadow registers. */
1154 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1155 RD_REG_DWORD(&reg->iobase_addr);
1156 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1157 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1158
1159 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1160 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1161
1162 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1163 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1164
1165 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1166 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1167
1168 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1169 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1170
1171 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1172 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1173
1174 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1175 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1176
1177 /* Mailbox registers. */
1178 mbx_reg = &reg->mailbox0;
1179 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1180 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1181
1182 /* Transfer sequence registers. */
1183 iter_reg = fw->xseq_gp_reg;
1184 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1185 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1186 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1187 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1188 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1189 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1190 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1191 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1192
1193 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1194 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1195
1196 /* Receive sequence registers. */
1197 iter_reg = fw->rseq_gp_reg;
1198 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1199 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1200 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1201 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1202 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1203 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1204 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1205 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1206
1207 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1208 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1209 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1210
1211 /* Command DMA registers. */
1212 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1213
1214 /* Queues. */
1215 iter_reg = fw->req0_dma_reg;
1216 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1217 dmp_reg = &reg->iobase_q;
1218 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1219 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1220
1221 iter_reg = fw->resp0_dma_reg;
1222 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1223 dmp_reg = &reg->iobase_q;
1224 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1225 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1226
1227 iter_reg = fw->req1_dma_reg;
1228 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1229 dmp_reg = &reg->iobase_q;
1230 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1231 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1232
1233 /* Transmit DMA registers. */
1234 iter_reg = fw->xmt0_dma_reg;
1235 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1236 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1237
1238 iter_reg = fw->xmt1_dma_reg;
1239 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1240 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1241
1242 iter_reg = fw->xmt2_dma_reg;
1243 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1244 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1245
1246 iter_reg = fw->xmt3_dma_reg;
1247 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1248 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1249
1250 iter_reg = fw->xmt4_dma_reg;
1251 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1252 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1253
1254 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1255
1256 /* Receive DMA registers. */
1257 iter_reg = fw->rcvt0_data_dma_reg;
1258 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1259 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1260
1261 iter_reg = fw->rcvt1_data_dma_reg;
1262 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1263 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1264
1265 /* RISC registers. */
1266 iter_reg = fw->risc_gp_reg;
1267 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1269 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1270 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1272 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1273 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1274 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1275
1276 /* Local memory controller registers. */
1277 iter_reg = fw->lmc_reg;
1278 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1279 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1280 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1281 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1282 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1283 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1284 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1285
1286 /* Fibre Protocol Module registers. */
1287 iter_reg = fw->fpm_hdw_reg;
1288 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1289 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1290 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1291 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1292 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1293 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1294 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1295 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1296 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1297 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1298 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1299 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1300
1301 /* Frame Buffer registers. */
1302 iter_reg = fw->fb_hdw_reg;
1303 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1304 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1305 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1306 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1307 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1308 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1309 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1310 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1311 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1312 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1313 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1314
1315 rval = qla24xx_soft_reset(ha);
1316 if (rval != QLA_SUCCESS)
1317 goto qla24xx_fw_dump_failed_0;
1318
1319 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1320 &nxt);
1321 if (rval != QLA_SUCCESS)
1322 goto qla24xx_fw_dump_failed_0;
1323
1324 nxt = qla2xxx_copy_queues(ha, nxt);
1325
1326 qla24xx_copy_eft(ha, nxt);
1327
1328 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1329 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1330 if (last_chain) {
1331 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1332 *last_chain |= htonl(DUMP_CHAIN_LAST);
1333 }
1334
1335 /* Adjust valid length. */
1336 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1337
1338 qla24xx_fw_dump_failed_0:
1339 qla2xxx_dump_post_process(base_vha, rval);
1340
1341 qla24xx_fw_dump_failed:
1342 #ifndef __CHECKER__
1343 if (!hardware_locked)
1344 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1345 #else
1346 ;
1347 #endif
1348 }
1349
1350 void
1351 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1352 {
1353 int rval;
1354 uint32_t cnt;
1355 struct qla_hw_data *ha = vha->hw;
1356 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1357 uint32_t __iomem *dmp_reg;
1358 uint32_t *iter_reg;
1359 uint16_t __iomem *mbx_reg;
1360 unsigned long flags;
1361 struct qla25xx_fw_dump *fw;
1362 void *nxt, *nxt_chain;
1363 uint32_t *last_chain = NULL;
1364 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1365
1366 flags = 0;
1367 ha->fw_dump_cap_flags = 0;
1368
1369 #ifndef __CHECKER__
1370 if (!hardware_locked)
1371 spin_lock_irqsave(&ha->hardware_lock, flags);
1372 #endif
1373
1374 if (!ha->fw_dump) {
1375 ql_log(ql_log_warn, vha, 0xd008,
1376 "No buffer available for dump.\n");
1377 goto qla25xx_fw_dump_failed;
1378 }
1379
1380 if (ha->fw_dumped) {
1381 ql_log(ql_log_warn, vha, 0xd009,
1382 "Firmware has been previously dumped (%p) "
1383 "-- ignoring request.\n",
1384 ha->fw_dump);
1385 goto qla25xx_fw_dump_failed;
1386 }
1387 fw = &ha->fw_dump->isp.isp25;
1388 qla2xxx_prep_dump(ha, ha->fw_dump);
1389 ha->fw_dump->version = htonl(2);
1390
1391 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1392
1393 /*
1394 * Pause RISC. No need to track timeout, as resetting the chip
1395 * is the right approach incase of pause timeout
1396 */
1397 qla24xx_pause_risc(reg, ha);
1398
1399 /* Host/Risc registers. */
1400 iter_reg = fw->host_risc_reg;
1401 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1402 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1403
1404 /* PCIe registers. */
1405 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1406 RD_REG_DWORD(&reg->iobase_addr);
1407 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1408 dmp_reg = &reg->iobase_c4;
1409 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1410 dmp_reg++;
1411 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1412 dmp_reg++;
1413 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1414 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1415
1416 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1417 RD_REG_DWORD(&reg->iobase_window);
1418
1419 /* Host interface registers. */
1420 dmp_reg = &reg->flash_addr;
1421 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1422 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1423
1424 /* Disable interrupts. */
1425 WRT_REG_DWORD(&reg->ictrl, 0);
1426 RD_REG_DWORD(&reg->ictrl);
1427
1428 /* Shadow registers. */
1429 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1430 RD_REG_DWORD(&reg->iobase_addr);
1431 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1432 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1433
1434 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1435 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436
1437 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1438 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1439
1440 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1441 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1442
1443 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1444 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1445
1446 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1447 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1448
1449 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1450 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1451
1452 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1453 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1454
1455 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1456 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1457
1458 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1459 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1460
1461 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1462 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1463
1464 /* RISC I/O register. */
1465 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1466 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1467
1468 /* Mailbox registers. */
1469 mbx_reg = &reg->mailbox0;
1470 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1471 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1472
1473 /* Transfer sequence registers. */
1474 iter_reg = fw->xseq_gp_reg;
1475 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1476 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1477 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1478 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1479 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1480 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1481 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1482 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1483
1484 iter_reg = fw->xseq_0_reg;
1485 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1486 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1487 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1488
1489 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1490
1491 /* Receive sequence registers. */
1492 iter_reg = fw->rseq_gp_reg;
1493 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1494 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1495 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1496 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1497 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1498 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1499 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1500 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1501
1502 iter_reg = fw->rseq_0_reg;
1503 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1504 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1505
1506 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1507 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1508
1509 /* Auxiliary sequence registers. */
1510 iter_reg = fw->aseq_gp_reg;
1511 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1512 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1513 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1514 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1515 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1516 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1517 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1518 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1519
1520 iter_reg = fw->aseq_0_reg;
1521 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1522 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1523
1524 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1525 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1526
1527 /* Command DMA registers. */
1528 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1529
1530 /* Queues. */
1531 iter_reg = fw->req0_dma_reg;
1532 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1533 dmp_reg = &reg->iobase_q;
1534 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1535 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1536
1537 iter_reg = fw->resp0_dma_reg;
1538 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1539 dmp_reg = &reg->iobase_q;
1540 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1541 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1542
1543 iter_reg = fw->req1_dma_reg;
1544 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1545 dmp_reg = &reg->iobase_q;
1546 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1547 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1548
1549 /* Transmit DMA registers. */
1550 iter_reg = fw->xmt0_dma_reg;
1551 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1552 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1553
1554 iter_reg = fw->xmt1_dma_reg;
1555 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1556 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1557
1558 iter_reg = fw->xmt2_dma_reg;
1559 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1560 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1561
1562 iter_reg = fw->xmt3_dma_reg;
1563 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1564 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1565
1566 iter_reg = fw->xmt4_dma_reg;
1567 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1568 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1569
1570 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1571
1572 /* Receive DMA registers. */
1573 iter_reg = fw->rcvt0_data_dma_reg;
1574 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1575 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1576
1577 iter_reg = fw->rcvt1_data_dma_reg;
1578 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1579 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1580
1581 /* RISC registers. */
1582 iter_reg = fw->risc_gp_reg;
1583 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1587 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1588 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1589 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1590 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1591
1592 /* Local memory controller registers. */
1593 iter_reg = fw->lmc_reg;
1594 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1595 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1601 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1602
1603 /* Fibre Protocol Module registers. */
1604 iter_reg = fw->fpm_hdw_reg;
1605 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1606 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1607 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1608 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1609 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1610 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1611 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1612 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1613 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1614 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1615 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1616 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1617
1618 /* Frame Buffer registers. */
1619 iter_reg = fw->fb_hdw_reg;
1620 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1621 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1622 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1623 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1624 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1625 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1626 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1627 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1628 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1629 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1630 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1631 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1632
1633 /* Multi queue registers */
1634 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1635 &last_chain);
1636
1637 rval = qla24xx_soft_reset(ha);
1638 if (rval != QLA_SUCCESS)
1639 goto qla25xx_fw_dump_failed_0;
1640
1641 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1642 &nxt);
1643 if (rval != QLA_SUCCESS)
1644 goto qla25xx_fw_dump_failed_0;
1645
1646 nxt = qla2xxx_copy_queues(ha, nxt);
1647
1648 qla24xx_copy_eft(ha, nxt);
1649
1650 /* Chain entries -- started with MQ. */
1651 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1652 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1653 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1654 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1655 if (last_chain) {
1656 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1657 *last_chain |= htonl(DUMP_CHAIN_LAST);
1658 }
1659
1660 /* Adjust valid length. */
1661 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1662
1663 qla25xx_fw_dump_failed_0:
1664 qla2xxx_dump_post_process(base_vha, rval);
1665
1666 qla25xx_fw_dump_failed:
1667 #ifndef __CHECKER__
1668 if (!hardware_locked)
1669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1670 #else
1671 ;
1672 #endif
1673 }
1674
1675 void
1676 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1677 {
1678 int rval;
1679 uint32_t cnt;
1680 struct qla_hw_data *ha = vha->hw;
1681 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1682 uint32_t __iomem *dmp_reg;
1683 uint32_t *iter_reg;
1684 uint16_t __iomem *mbx_reg;
1685 unsigned long flags;
1686 struct qla81xx_fw_dump *fw;
1687 void *nxt, *nxt_chain;
1688 uint32_t *last_chain = NULL;
1689 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1690
1691 flags = 0;
1692 ha->fw_dump_cap_flags = 0;
1693
1694 #ifndef __CHECKER__
1695 if (!hardware_locked)
1696 spin_lock_irqsave(&ha->hardware_lock, flags);
1697 #endif
1698
1699 if (!ha->fw_dump) {
1700 ql_log(ql_log_warn, vha, 0xd00a,
1701 "No buffer available for dump.\n");
1702 goto qla81xx_fw_dump_failed;
1703 }
1704
1705 if (ha->fw_dumped) {
1706 ql_log(ql_log_warn, vha, 0xd00b,
1707 "Firmware has been previously dumped (%p) "
1708 "-- ignoring request.\n",
1709 ha->fw_dump);
1710 goto qla81xx_fw_dump_failed;
1711 }
1712 fw = &ha->fw_dump->isp.isp81;
1713 qla2xxx_prep_dump(ha, ha->fw_dump);
1714
1715 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1716
1717 /*
1718 * Pause RISC. No need to track timeout, as resetting the chip
1719 * is the right approach incase of pause timeout
1720 */
1721 qla24xx_pause_risc(reg, ha);
1722
1723 /* Host/Risc registers. */
1724 iter_reg = fw->host_risc_reg;
1725 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1726 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1727
1728 /* PCIe registers. */
1729 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1730 RD_REG_DWORD(&reg->iobase_addr);
1731 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1732 dmp_reg = &reg->iobase_c4;
1733 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1734 dmp_reg++;
1735 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1736 dmp_reg++;
1737 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1738 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1739
1740 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1741 RD_REG_DWORD(&reg->iobase_window);
1742
1743 /* Host interface registers. */
1744 dmp_reg = &reg->flash_addr;
1745 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1746 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1747
1748 /* Disable interrupts. */
1749 WRT_REG_DWORD(&reg->ictrl, 0);
1750 RD_REG_DWORD(&reg->ictrl);
1751
1752 /* Shadow registers. */
1753 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1754 RD_REG_DWORD(&reg->iobase_addr);
1755 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1756 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1757
1758 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1759 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1760
1761 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1762 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1763
1764 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1765 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1766
1767 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1768 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1769
1770 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1771 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1772
1773 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1774 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1775
1776 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1777 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1778
1779 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1780 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1781
1782 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1783 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1784
1785 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1786 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1787
1788 /* RISC I/O register. */
1789 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1790 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1791
1792 /* Mailbox registers. */
1793 mbx_reg = &reg->mailbox0;
1794 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1795 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1796
1797 /* Transfer sequence registers. */
1798 iter_reg = fw->xseq_gp_reg;
1799 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1800 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1801 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1802 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1803 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1804 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1805 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1806 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1807
1808 iter_reg = fw->xseq_0_reg;
1809 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1810 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1811 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1812
1813 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1814
1815 /* Receive sequence registers. */
1816 iter_reg = fw->rseq_gp_reg;
1817 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1818 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1819 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1820 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1821 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1822 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1823 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1824 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1825
1826 iter_reg = fw->rseq_0_reg;
1827 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1828 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1829
1830 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1831 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1832
1833 /* Auxiliary sequence registers. */
1834 iter_reg = fw->aseq_gp_reg;
1835 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1836 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1837 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1838 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1839 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1840 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1841 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1842 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1843
1844 iter_reg = fw->aseq_0_reg;
1845 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1846 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1847
1848 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1849 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1850
1851 /* Command DMA registers. */
1852 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1853
1854 /* Queues. */
1855 iter_reg = fw->req0_dma_reg;
1856 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1857 dmp_reg = &reg->iobase_q;
1858 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1859 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1860
1861 iter_reg = fw->resp0_dma_reg;
1862 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1863 dmp_reg = &reg->iobase_q;
1864 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1865 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1866
1867 iter_reg = fw->req1_dma_reg;
1868 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1869 dmp_reg = &reg->iobase_q;
1870 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1871 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1872
1873 /* Transmit DMA registers. */
1874 iter_reg = fw->xmt0_dma_reg;
1875 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1876 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1877
1878 iter_reg = fw->xmt1_dma_reg;
1879 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1880 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1881
1882 iter_reg = fw->xmt2_dma_reg;
1883 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1884 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1885
1886 iter_reg = fw->xmt3_dma_reg;
1887 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1888 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1889
1890 iter_reg = fw->xmt4_dma_reg;
1891 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1892 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1893
1894 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1895
1896 /* Receive DMA registers. */
1897 iter_reg = fw->rcvt0_data_dma_reg;
1898 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1899 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1900
1901 iter_reg = fw->rcvt1_data_dma_reg;
1902 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1903 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1904
1905 /* RISC registers. */
1906 iter_reg = fw->risc_gp_reg;
1907 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1914 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1915
1916 /* Local memory controller registers. */
1917 iter_reg = fw->lmc_reg;
1918 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1919 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1920 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1921 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1922 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1923 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1924 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1925 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1926
1927 /* Fibre Protocol Module registers. */
1928 iter_reg = fw->fpm_hdw_reg;
1929 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1930 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1931 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1932 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1933 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1934 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1935 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1936 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1937 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1938 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1939 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1940 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1941 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1942 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1943
1944 /* Frame Buffer registers. */
1945 iter_reg = fw->fb_hdw_reg;
1946 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1947 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1948 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1949 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1950 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1951 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1952 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1953 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1954 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1955 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1956 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1957 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1958 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1959
1960 /* Multi queue registers */
1961 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1962 &last_chain);
1963
1964 rval = qla24xx_soft_reset(ha);
1965 if (rval != QLA_SUCCESS)
1966 goto qla81xx_fw_dump_failed_0;
1967
1968 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1969 &nxt);
1970 if (rval != QLA_SUCCESS)
1971 goto qla81xx_fw_dump_failed_0;
1972
1973 nxt = qla2xxx_copy_queues(ha, nxt);
1974
1975 qla24xx_copy_eft(ha, nxt);
1976
1977 /* Chain entries -- started with MQ. */
1978 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1979 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1980 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1981 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1982 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1983 if (last_chain) {
1984 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1985 *last_chain |= htonl(DUMP_CHAIN_LAST);
1986 }
1987
1988 /* Adjust valid length. */
1989 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1990
1991 qla81xx_fw_dump_failed_0:
1992 qla2xxx_dump_post_process(base_vha, rval);
1993
1994 qla81xx_fw_dump_failed:
1995 #ifndef __CHECKER__
1996 if (!hardware_locked)
1997 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1998 #else
1999 ;
2000 #endif
2001 }
2002
2003 void
2004 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2005 {
2006 int rval;
2007 uint32_t cnt;
2008 struct qla_hw_data *ha = vha->hw;
2009 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2010 uint32_t __iomem *dmp_reg;
2011 uint32_t *iter_reg;
2012 uint16_t __iomem *mbx_reg;
2013 unsigned long flags;
2014 struct qla83xx_fw_dump *fw;
2015 void *nxt, *nxt_chain;
2016 uint32_t *last_chain = NULL;
2017 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2018
2019 flags = 0;
2020 ha->fw_dump_cap_flags = 0;
2021
2022 #ifndef __CHECKER__
2023 if (!hardware_locked)
2024 spin_lock_irqsave(&ha->hardware_lock, flags);
2025 #endif
2026
2027 if (!ha->fw_dump) {
2028 ql_log(ql_log_warn, vha, 0xd00c,
2029 "No buffer available for dump!!!\n");
2030 goto qla83xx_fw_dump_failed;
2031 }
2032
2033 if (ha->fw_dumped) {
2034 ql_log(ql_log_warn, vha, 0xd00d,
2035 "Firmware has been previously dumped (%p) -- ignoring "
2036 "request...\n", ha->fw_dump);
2037 goto qla83xx_fw_dump_failed;
2038 }
2039 fw = &ha->fw_dump->isp.isp83;
2040 qla2xxx_prep_dump(ha, ha->fw_dump);
2041
2042 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
2043
2044 /*
2045 * Pause RISC. No need to track timeout, as resetting the chip
2046 * is the right approach incase of pause timeout
2047 */
2048 qla24xx_pause_risc(reg, ha);
2049
2050 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2051 dmp_reg = &reg->iobase_window;
2052 RD_REG_DWORD(dmp_reg);
2053 WRT_REG_DWORD(dmp_reg, 0);
2054
2055 dmp_reg = &reg->unused_4_1[0];
2056 RD_REG_DWORD(dmp_reg);
2057 WRT_REG_DWORD(dmp_reg, 0);
2058
2059 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2060 dmp_reg = &reg->unused_4_1[2];
2061 RD_REG_DWORD(dmp_reg);
2062 WRT_REG_DWORD(dmp_reg, 0);
2063
2064 /* select PCR and disable ecc checking and correction */
2065 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2066 RD_REG_DWORD(&reg->iobase_addr);
2067 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2068
2069 /* Host/Risc registers. */
2070 iter_reg = fw->host_risc_reg;
2071 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2072 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2073 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2074
2075 /* PCIe registers. */
2076 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2077 RD_REG_DWORD(&reg->iobase_addr);
2078 WRT_REG_DWORD(&reg->iobase_window, 0x01);
2079 dmp_reg = &reg->iobase_c4;
2080 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2081 dmp_reg++;
2082 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2083 dmp_reg++;
2084 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2085 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2086
2087 WRT_REG_DWORD(&reg->iobase_window, 0x00);
2088 RD_REG_DWORD(&reg->iobase_window);
2089
2090 /* Host interface registers. */
2091 dmp_reg = &reg->flash_addr;
2092 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2093 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2094
2095 /* Disable interrupts. */
2096 WRT_REG_DWORD(&reg->ictrl, 0);
2097 RD_REG_DWORD(&reg->ictrl);
2098
2099 /* Shadow registers. */
2100 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2101 RD_REG_DWORD(&reg->iobase_addr);
2102 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2103 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2104
2105 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2106 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2107
2108 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2109 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2110
2111 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2112 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2113
2114 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2115 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2116
2117 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2118 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2119
2120 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2121 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2122
2123 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2124 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2125
2126 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2127 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2128
2129 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2130 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2131
2132 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2133 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2134
2135 /* RISC I/O register. */
2136 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2137 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2138
2139 /* Mailbox registers. */
2140 mbx_reg = &reg->mailbox0;
2141 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2142 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2143
2144 /* Transfer sequence registers. */
2145 iter_reg = fw->xseq_gp_reg;
2146 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2147 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2148 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2149 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2161 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2162
2163 iter_reg = fw->xseq_0_reg;
2164 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2165 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2166 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2167
2168 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2169
2170 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2171
2172 /* Receive sequence registers. */
2173 iter_reg = fw->rseq_gp_reg;
2174 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2175 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2176 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2177 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2178 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2179 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2180 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2181 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2182 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2183 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2184 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2185 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2186 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2187 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2188 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2189 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2190
2191 iter_reg = fw->rseq_0_reg;
2192 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2193 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2194
2195 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2196 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2197 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2198
2199 /* Auxiliary sequence registers. */
2200 iter_reg = fw->aseq_gp_reg;
2201 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2202 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2203 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2204 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2205 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2206 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2207 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2208 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2209 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2210 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2211 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2212 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2213 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2214 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2215 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2216 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2217
2218 iter_reg = fw->aseq_0_reg;
2219 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2220 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2221
2222 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2223 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2224 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2225
2226 /* Command DMA registers. */
2227 iter_reg = fw->cmd_dma_reg;
2228 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2229 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2230 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2231 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2232
2233 /* Queues. */
2234 iter_reg = fw->req0_dma_reg;
2235 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2236 dmp_reg = &reg->iobase_q;
2237 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2238 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2239
2240 iter_reg = fw->resp0_dma_reg;
2241 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2242 dmp_reg = &reg->iobase_q;
2243 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2244 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2245
2246 iter_reg = fw->req1_dma_reg;
2247 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2248 dmp_reg = &reg->iobase_q;
2249 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2250 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2251
2252 /* Transmit DMA registers. */
2253 iter_reg = fw->xmt0_dma_reg;
2254 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2255 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2256
2257 iter_reg = fw->xmt1_dma_reg;
2258 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2259 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2260
2261 iter_reg = fw->xmt2_dma_reg;
2262 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2263 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2264
2265 iter_reg = fw->xmt3_dma_reg;
2266 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2267 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2268
2269 iter_reg = fw->xmt4_dma_reg;
2270 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2271 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2272
2273 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2274
2275 /* Receive DMA registers. */
2276 iter_reg = fw->rcvt0_data_dma_reg;
2277 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2278 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2279
2280 iter_reg = fw->rcvt1_data_dma_reg;
2281 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2282 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2283
2284 /* RISC registers. */
2285 iter_reg = fw->risc_gp_reg;
2286 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2287 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2288 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2289 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2290 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2293 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2294
2295 /* Local memory controller registers. */
2296 iter_reg = fw->lmc_reg;
2297 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2298 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2299 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2304 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2305
2306 /* Fibre Protocol Module registers. */
2307 iter_reg = fw->fpm_hdw_reg;
2308 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2309 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2310 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2323 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2324
2325 /* RQ0 Array registers. */
2326 iter_reg = fw->rq0_array_reg;
2327 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2328 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2329 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2342 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2343
2344 /* RQ1 Array registers. */
2345 iter_reg = fw->rq1_array_reg;
2346 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2347 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2348 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2353 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2354 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2355 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2357 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2358 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2359 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2360 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2361 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2362
2363 /* RP0 Array registers. */
2364 iter_reg = fw->rp0_array_reg;
2365 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2366 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2367 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2368 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2380 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2381
2382 /* RP1 Array registers. */
2383 iter_reg = fw->rp1_array_reg;
2384 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2386 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2387 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2390 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2391 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2392 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2393 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2394 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2395 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2396 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2397 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2398 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2399 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2400
2401 iter_reg = fw->at0_array_reg;
2402 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2403 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2404 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2405 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2406 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2407 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2408 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2409 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2410
2411 /* I/O Queue Control registers. */
2412 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2413
2414 /* Frame Buffer registers. */
2415 iter_reg = fw->fb_hdw_reg;
2416 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2417 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2418 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2419 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2420 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2421 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2422 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2423 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2424 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2425 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2426 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2427 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2428 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2429 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2430 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2431 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2432 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2433 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2434 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2435 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2436 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2437 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2438 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2439 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2440 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2441 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2442 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2443
2444 /* Multi queue registers */
2445 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2446 &last_chain);
2447
2448 rval = qla24xx_soft_reset(ha);
2449 if (rval != QLA_SUCCESS) {
2450 ql_log(ql_log_warn, vha, 0xd00e,
2451 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2452 rval = QLA_SUCCESS;
2453
2454 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2455
2456 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2457 RD_REG_DWORD(&reg->hccr);
2458
2459 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2460 RD_REG_DWORD(&reg->hccr);
2461
2462 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2463 RD_REG_DWORD(&reg->hccr);
2464
2465 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2466 udelay(5);
2467
2468 if (!cnt) {
2469 nxt = fw->code_ram;
2470 nxt += sizeof(fw->code_ram);
2471 nxt += (ha->fw_memory_size - 0x100000 + 1);
2472 goto copy_queue;
2473 } else {
2474 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2475 ql_log(ql_log_warn, vha, 0xd010,
2476 "bigger hammer success?\n");
2477 }
2478 }
2479
2480 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2481 &nxt);
2482 if (rval != QLA_SUCCESS)
2483 goto qla83xx_fw_dump_failed_0;
2484
2485 copy_queue:
2486 nxt = qla2xxx_copy_queues(ha, nxt);
2487
2488 qla24xx_copy_eft(ha, nxt);
2489
2490 /* Chain entries -- started with MQ. */
2491 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2492 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2493 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2494 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2495 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2496 if (last_chain) {
2497 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2498 *last_chain |= htonl(DUMP_CHAIN_LAST);
2499 }
2500
2501 /* Adjust valid length. */
2502 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2503
2504 qla83xx_fw_dump_failed_0:
2505 qla2xxx_dump_post_process(base_vha, rval);
2506
2507 qla83xx_fw_dump_failed:
2508 #ifndef __CHECKER__
2509 if (!hardware_locked)
2510 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2511 #else
2512 ;
2513 #endif
2514 }
2515
2516 /****************************************************************************/
2517 /* Driver Debug Functions. */
2518 /****************************************************************************/
2519
2520 static inline int
2521 ql_mask_match(uint32_t level)
2522 {
2523 if (ql2xextended_error_logging == 1)
2524 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2525 return (level & ql2xextended_error_logging) == level;
2526 }
2527
2528 /*
2529 * This function is for formatting and logging debug information.
2530 * It is to be used when vha is available. It formats the message
2531 * and logs it to the messages file.
2532 * parameters:
2533 * level: The level of the debug messages to be printed.
2534 * If ql2xextended_error_logging value is correctly set,
2535 * this message will appear in the messages file.
2536 * vha: Pointer to the scsi_qla_host_t.
2537 * id: This is a unique identifier for the level. It identifies the
2538 * part of the code from where the message originated.
2539 * msg: The message to be displayed.
2540 */
2541 void
2542 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2543 {
2544 va_list va;
2545 struct va_format vaf;
2546
2547 if (!ql_mask_match(level))
2548 return;
2549
2550 va_start(va, fmt);
2551
2552 vaf.fmt = fmt;
2553 vaf.va = &va;
2554
2555 if (vha != NULL) {
2556 const struct pci_dev *pdev = vha->hw->pdev;
2557 /* <module-name> <pci-name> <msg-id>:<host> Message */
2558 pr_warn("%s [%s]-%04x:%ld: %pV",
2559 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2560 vha->host_no, &vaf);
2561 } else {
2562 pr_warn("%s [%s]-%04x: : %pV",
2563 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2564 }
2565
2566 va_end(va);
2567
2568 }
2569
2570 /*
2571 * This function is for formatting and logging debug information.
2572 * It is to be used when vha is not available and pci is available,
2573 * i.e., before host allocation. It formats the message and logs it
2574 * to the messages file.
2575 * parameters:
2576 * level: The level of the debug messages to be printed.
2577 * If ql2xextended_error_logging value is correctly set,
2578 * this message will appear in the messages file.
2579 * pdev: Pointer to the struct pci_dev.
2580 * id: This is a unique id for the level. It identifies the part
2581 * of the code from where the message originated.
2582 * msg: The message to be displayed.
2583 */
2584 void
2585 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2586 const char *fmt, ...)
2587 {
2588 va_list va;
2589 struct va_format vaf;
2590
2591 if (pdev == NULL)
2592 return;
2593 if (!ql_mask_match(level))
2594 return;
2595
2596 va_start(va, fmt);
2597
2598 vaf.fmt = fmt;
2599 vaf.va = &va;
2600
2601 /* <module-name> <dev-name>:<msg-id> Message */
2602 pr_warn("%s [%s]-%04x: : %pV",
2603 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2604
2605 va_end(va);
2606 }
2607
2608 /*
2609 * This function is for formatting and logging log messages.
2610 * It is to be used when vha is available. It formats the message
2611 * and logs it to the messages file. All the messages will be logged
2612 * irrespective of value of ql2xextended_error_logging.
2613 * parameters:
2614 * level: The level of the log messages to be printed in the
2615 * messages file.
2616 * vha: Pointer to the scsi_qla_host_t
2617 * id: This is a unique id for the level. It identifies the
2618 * part of the code from where the message originated.
2619 * msg: The message to be displayed.
2620 */
2621 void
2622 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2623 {
2624 va_list va;
2625 struct va_format vaf;
2626 char pbuf[128];
2627
2628 if (level > ql_errlev)
2629 return;
2630
2631 if (vha != NULL) {
2632 const struct pci_dev *pdev = vha->hw->pdev;
2633 /* <module-name> <msg-id>:<host> Message */
2634 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2635 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2636 } else {
2637 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2638 QL_MSGHDR, "0000:00:00.0", id);
2639 }
2640 pbuf[sizeof(pbuf) - 1] = 0;
2641
2642 va_start(va, fmt);
2643
2644 vaf.fmt = fmt;
2645 vaf.va = &va;
2646
2647 switch (level) {
2648 case ql_log_fatal: /* FATAL LOG */
2649 pr_crit("%s%pV", pbuf, &vaf);
2650 break;
2651 case ql_log_warn:
2652 pr_err("%s%pV", pbuf, &vaf);
2653 break;
2654 case ql_log_info:
2655 pr_warn("%s%pV", pbuf, &vaf);
2656 break;
2657 default:
2658 pr_info("%s%pV", pbuf, &vaf);
2659 break;
2660 }
2661
2662 va_end(va);
2663 }
2664
2665 /*
2666 * This function is for formatting and logging log messages.
2667 * It is to be used when vha is not available and pci is available,
2668 * i.e., before host allocation. It formats the message and logs
2669 * it to the messages file. All the messages are logged irrespective
2670 * of the value of ql2xextended_error_logging.
2671 * parameters:
2672 * level: The level of the log messages to be printed in the
2673 * messages file.
2674 * pdev: Pointer to the struct pci_dev.
2675 * id: This is a unique id for the level. It identifies the
2676 * part of the code from where the message originated.
2677 * msg: The message to be displayed.
2678 */
2679 void
2680 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2681 const char *fmt, ...)
2682 {
2683 va_list va;
2684 struct va_format vaf;
2685 char pbuf[128];
2686
2687 if (pdev == NULL)
2688 return;
2689 if (level > ql_errlev)
2690 return;
2691
2692 /* <module-name> <dev-name>:<msg-id> Message */
2693 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2694 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2695 pbuf[sizeof(pbuf) - 1] = 0;
2696
2697 va_start(va, fmt);
2698
2699 vaf.fmt = fmt;
2700 vaf.va = &va;
2701
2702 switch (level) {
2703 case ql_log_fatal: /* FATAL LOG */
2704 pr_crit("%s%pV", pbuf, &vaf);
2705 break;
2706 case ql_log_warn:
2707 pr_err("%s%pV", pbuf, &vaf);
2708 break;
2709 case ql_log_info:
2710 pr_warn("%s%pV", pbuf, &vaf);
2711 break;
2712 default:
2713 pr_info("%s%pV", pbuf, &vaf);
2714 break;
2715 }
2716
2717 va_end(va);
2718 }
2719
2720 void
2721 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2722 {
2723 int i;
2724 struct qla_hw_data *ha = vha->hw;
2725 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2726 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2727 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2728 uint16_t __iomem *mbx_reg;
2729
2730 if (!ql_mask_match(level))
2731 return;
2732
2733 if (IS_P3P_TYPE(ha))
2734 mbx_reg = &reg82->mailbox_in[0];
2735 else if (IS_FWI2_CAPABLE(ha))
2736 mbx_reg = &reg24->mailbox0;
2737 else
2738 mbx_reg = MAILBOX_REG(ha, reg, 0);
2739
2740 ql_dbg(level, vha, id, "Mailbox registers:\n");
2741 for (i = 0; i < 6; i++)
2742 ql_dbg(level, vha, id,
2743 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
2744 }
2745
2746
2747 void
2748 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2749 uint8_t *buf, uint size)
2750 {
2751 uint cnt;
2752
2753 if (!ql_mask_match(level))
2754 return;
2755
2756 ql_dbg(level, vha, id,
2757 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2758 ql_dbg(level, vha, id,
2759 "----- -----------------------------------------------\n");
2760 for (cnt = 0; cnt < size; cnt += 16) {
2761 ql_dbg(level, vha, id, "%04x: ", cnt);
2762 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2763 buf + cnt, min(16U, size - cnt), false);
2764 }
2765 }
2766
2767 /*
2768 * This function is for formatting and logging log messages.
2769 * It is to be used when vha is available. It formats the message
2770 * and logs it to the messages file. All the messages will be logged
2771 * irrespective of value of ql2xextended_error_logging.
2772 * parameters:
2773 * level: The level of the log messages to be printed in the
2774 * messages file.
2775 * vha: Pointer to the scsi_qla_host_t
2776 * id: This is a unique id for the level. It identifies the
2777 * part of the code from where the message originated.
2778 * msg: The message to be displayed.
2779 */
2780 void
2781 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2782 const char *fmt, ...)
2783 {
2784 va_list va;
2785 struct va_format vaf;
2786 char pbuf[128];
2787
2788 if (level > ql_errlev)
2789 return;
2790
2791 if (qpair != NULL) {
2792 const struct pci_dev *pdev = qpair->pdev;
2793 /* <module-name> <msg-id>:<host> Message */
2794 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
2795 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2796 } else {
2797 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2798 QL_MSGHDR, "0000:00:00.0", id);
2799 }
2800 pbuf[sizeof(pbuf) - 1] = 0;
2801
2802 va_start(va, fmt);
2803
2804 vaf.fmt = fmt;
2805 vaf.va = &va;
2806
2807 switch (level) {
2808 case ql_log_fatal: /* FATAL LOG */
2809 pr_crit("%s%pV", pbuf, &vaf);
2810 break;
2811 case ql_log_warn:
2812 pr_err("%s%pV", pbuf, &vaf);
2813 break;
2814 case ql_log_info:
2815 pr_warn("%s%pV", pbuf, &vaf);
2816 break;
2817 default:
2818 pr_info("%s%pV", pbuf, &vaf);
2819 break;
2820 }
2821
2822 va_end(va);
2823 }
2824
2825 /*
2826 * This function is for formatting and logging debug information.
2827 * It is to be used when vha is available. It formats the message
2828 * and logs it to the messages file.
2829 * parameters:
2830 * level: The level of the debug messages to be printed.
2831 * If ql2xextended_error_logging value is correctly set,
2832 * this message will appear in the messages file.
2833 * vha: Pointer to the scsi_qla_host_t.
2834 * id: This is a unique identifier for the level. It identifies the
2835 * part of the code from where the message originated.
2836 * msg: The message to be displayed.
2837 */
2838 void
2839 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2840 const char *fmt, ...)
2841 {
2842 va_list va;
2843 struct va_format vaf;
2844
2845 if (!ql_mask_match(level))
2846 return;
2847
2848 va_start(va, fmt);
2849
2850 vaf.fmt = fmt;
2851 vaf.va = &va;
2852
2853 if (qpair != NULL) {
2854 const struct pci_dev *pdev = qpair->pdev;
2855 /* <module-name> <pci-name> <msg-id>:<host> Message */
2856 pr_warn("%s [%s]-%04x: %pV",
2857 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2858 &vaf);
2859 } else {
2860 pr_warn("%s [%s]-%04x: : %pV",
2861 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2862 }
2863
2864 va_end(va);
2865
2866 }