qla2xxx: Avoid side effects when using endianizer macros.
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_dbg.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x018f | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e-0x0170 |
17 * | Mailbox commands | 0x1192 | |
18 * | | | |
19 * | Device Discovery | 0x2016 | 0x2020-0x2022, |
20 * | | | 0x2011-0x2012, |
21 * | | | 0x2099-0x20a4 |
22 * | Queue Command and IO tracing | 0x3074 | 0x300b |
23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x5089 | 0x502b-0x502f |
30 * | | | 0x505e |
31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 |
33 * | | | 0x507b,0x505f |
34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 |
39 * | | | 0x70a5-0x70a6 |
40 * | | | 0x70a8,0x70ab |
41 * | | | 0x70ad-0x70ae |
42 * | | | 0x70d7-0x70db |
43 * | | | 0x70de-0x70df |
44 * | Task Management | 0x803d | 0x8000,0x800b |
45 * | | | 0x8019 |
46 * | | | 0x8025,0x8026 |
47 * | | | 0x8031,0x8032 |
48 * | | | 0x8039,0x803c |
49 * | AER/EEH | 0x9011 | |
50 * | Virtual Port | 0xa007 | |
51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
52 * | | | 0xb09e,0xb0ae |
53 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 |
57 * | | | 0xb111,0xb11e |
58 * | | | 0xb12c,0xb12d |
59 * | | | 0xb13a,0xb142 |
60 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 |
62 * | MultiQ | 0xc00c | |
63 * | Misc | 0xd301 | 0xd031-0xd0ff |
64 * | | | 0xd101-0xd1fe |
65 * | | | 0xd214-0xd2fe |
66 * | Target Mode | 0xe080 | |
67 * | Target Mode Management | 0xf09b | 0xf002 |
68 * | | | 0xf046-0xf049 |
69 * | Target Mode Task Management | 0x1000d | |
70 * ----------------------------------------------------------------------
71 */
72
73 #include "qla_def.h"
74
75 #include <linux/delay.h>
76
77 static uint32_t ql_dbg_offset = 0x800;
78
79 static inline void
80 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
81 {
82 fw_dump->fw_major_version = htonl(ha->fw_major_version);
83 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
84 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
85 fw_dump->fw_attributes = htonl(ha->fw_attributes);
86
87 fw_dump->vendor = htonl(ha->pdev->vendor);
88 fw_dump->device = htonl(ha->pdev->device);
89 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
90 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
91 }
92
93 static inline void *
94 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
95 {
96 struct req_que *req = ha->req_q_map[0];
97 struct rsp_que *rsp = ha->rsp_q_map[0];
98 /* Request queue. */
99 memcpy(ptr, req->ring, req->length *
100 sizeof(request_t));
101
102 /* Response queue. */
103 ptr += req->length * sizeof(request_t);
104 memcpy(ptr, rsp->ring, rsp->length *
105 sizeof(response_t));
106
107 return ptr + (rsp->length * sizeof(response_t));
108 }
109
110 int
111 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
112 uint32_t ram_dwords, void **nxt)
113 {
114 int rval;
115 uint32_t cnt, stat, timer, dwords, idx;
116 uint16_t mb0;
117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
118 dma_addr_t dump_dma = ha->gid_list_dma;
119 uint32_t *dump = (uint32_t *)ha->gid_list;
120
121 rval = QLA_SUCCESS;
122 mb0 = 0;
123
124 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
125 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
126
127 dwords = qla2x00_gid_list_size(ha) / 4;
128 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
129 cnt += dwords, addr += dwords) {
130 if (cnt + dwords > ram_dwords)
131 dwords = ram_dwords - cnt;
132
133 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
134 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
135
136 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
137 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
138 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
139 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
140
141 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
142 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
143
144 WRT_REG_WORD(&reg->mailbox9, 0);
145 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
146
147 ha->flags.mbox_int = 0;
148 for (timer = 6000000; timer; timer--) {
149 /* Check for pending interrupts. */
150 stat = RD_REG_DWORD(&reg->host_status);
151 if (stat & HSRX_RISC_INT) {
152 stat &= 0xff;
153
154 if (stat == 0x1 || stat == 0x2 ||
155 stat == 0x10 || stat == 0x11) {
156 set_bit(MBX_INTERRUPT,
157 &ha->mbx_cmd_flags);
158
159 mb0 = RD_REG_WORD(&reg->mailbox0);
160 RD_REG_WORD(&reg->mailbox1);
161
162 WRT_REG_DWORD(&reg->hccr,
163 HCCRX_CLR_RISC_INT);
164 RD_REG_DWORD(&reg->hccr);
165 break;
166 }
167
168 /* Clear this intr; it wasn't a mailbox intr */
169 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
170 RD_REG_DWORD(&reg->hccr);
171 }
172 udelay(5);
173 }
174 ha->flags.mbox_int = 1;
175
176 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
177 rval = mb0 & MBS_MASK;
178 for (idx = 0; idx < dwords; idx++)
179 ram[cnt + idx] = IS_QLA27XX(ha) ?
180 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
181 } else {
182 rval = QLA_FUNCTION_FAILED;
183 }
184 }
185
186 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
187 return rval;
188 }
189
190 int
191 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
192 uint32_t ram_dwords, void **nxt)
193 {
194 int rval;
195 uint32_t cnt, stat, timer, dwords, idx;
196 uint16_t mb0;
197 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
198 dma_addr_t dump_dma = ha->gid_list_dma;
199 uint32_t *dump = (uint32_t *)ha->gid_list;
200
201 rval = QLA_SUCCESS;
202 mb0 = 0;
203
204 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
205 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
206
207 dwords = qla2x00_gid_list_size(ha) / 4;
208 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
209 cnt += dwords, addr += dwords) {
210 if (cnt + dwords > ram_dwords)
211 dwords = ram_dwords - cnt;
212
213 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
214 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
215
216 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
217 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
218 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
219 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
220
221 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
222 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
223 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
224
225 ha->flags.mbox_int = 0;
226 for (timer = 6000000; timer; timer--) {
227 /* Check for pending interrupts. */
228 stat = RD_REG_DWORD(&reg->host_status);
229 if (stat & HSRX_RISC_INT) {
230 stat &= 0xff;
231
232 if (stat == 0x1 || stat == 0x2 ||
233 stat == 0x10 || stat == 0x11) {
234 set_bit(MBX_INTERRUPT,
235 &ha->mbx_cmd_flags);
236
237 mb0 = RD_REG_WORD(&reg->mailbox0);
238
239 WRT_REG_DWORD(&reg->hccr,
240 HCCRX_CLR_RISC_INT);
241 RD_REG_DWORD(&reg->hccr);
242 break;
243 }
244
245 /* Clear this intr; it wasn't a mailbox intr */
246 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
247 RD_REG_DWORD(&reg->hccr);
248 }
249 udelay(5);
250 }
251 ha->flags.mbox_int = 1;
252
253 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
254 rval = mb0 & MBS_MASK;
255 for (idx = 0; idx < dwords; idx++)
256 ram[cnt + idx] = IS_QLA27XX(ha) ?
257 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
258 } else {
259 rval = QLA_FUNCTION_FAILED;
260 }
261 }
262
263 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
264 return rval;
265 }
266
267 static int
268 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
269 uint32_t cram_size, void **nxt)
270 {
271 int rval;
272
273 /* Code RAM. */
274 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
275 if (rval != QLA_SUCCESS)
276 return rval;
277
278 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
279
280 /* External Memory. */
281 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
282 ha->fw_memory_size - 0x100000 + 1, nxt);
283 if (rval == QLA_SUCCESS)
284 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
285
286 return rval;
287 }
288
289 static uint32_t *
290 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
291 uint32_t count, uint32_t *buf)
292 {
293 uint32_t __iomem *dmp_reg;
294
295 WRT_REG_DWORD(&reg->iobase_addr, iobase);
296 dmp_reg = &reg->iobase_window;
297 for ( ; count--; dmp_reg++)
298 *buf++ = htonl(RD_REG_DWORD(dmp_reg));
299
300 return buf;
301 }
302
303 void
304 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
305 {
306 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
307
308 /* 100 usec delay is sufficient enough for hardware to pause RISC */
309 udelay(100);
310 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
311 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
312 }
313
314 int
315 qla24xx_soft_reset(struct qla_hw_data *ha)
316 {
317 int rval = QLA_SUCCESS;
318 uint32_t cnt;
319 uint16_t wd;
320 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
321
322 /*
323 * Reset RISC. The delay is dependent on system architecture.
324 * Driver can proceed with the reset sequence after waiting
325 * for a timeout period.
326 */
327 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
328 for (cnt = 0; cnt < 30000; cnt++) {
329 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
330 break;
331
332 udelay(10);
333 }
334 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
335 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
336
337 WRT_REG_DWORD(&reg->ctrl_status,
338 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
339 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
340
341 udelay(100);
342
343 /* Wait for soft-reset to complete. */
344 for (cnt = 0; cnt < 30000; cnt++) {
345 if ((RD_REG_DWORD(&reg->ctrl_status) &
346 CSRX_ISP_SOFT_RESET) == 0)
347 break;
348
349 udelay(10);
350 }
351 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
352 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
353
354 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
355 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
356
357 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
358 rval == QLA_SUCCESS; cnt--) {
359 if (cnt)
360 udelay(10);
361 else
362 rval = QLA_FUNCTION_TIMEOUT;
363 }
364 if (rval == QLA_SUCCESS)
365 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
366
367 return rval;
368 }
369
370 static int
371 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
372 uint32_t ram_words, void **nxt)
373 {
374 int rval;
375 uint32_t cnt, stat, timer, words, idx;
376 uint16_t mb0;
377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
378 dma_addr_t dump_dma = ha->gid_list_dma;
379 uint16_t *dump = (uint16_t *)ha->gid_list;
380
381 rval = QLA_SUCCESS;
382 mb0 = 0;
383
384 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
385 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
386
387 words = qla2x00_gid_list_size(ha) / 2;
388 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
389 cnt += words, addr += words) {
390 if (cnt + words > ram_words)
391 words = ram_words - cnt;
392
393 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
394 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
395
396 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
397 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
398 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
399 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
400
401 WRT_MAILBOX_REG(ha, reg, 4, words);
402 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
403
404 for (timer = 6000000; timer; timer--) {
405 /* Check for pending interrupts. */
406 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
407 if (stat & HSR_RISC_INT) {
408 stat &= 0xff;
409
410 if (stat == 0x1 || stat == 0x2) {
411 set_bit(MBX_INTERRUPT,
412 &ha->mbx_cmd_flags);
413
414 mb0 = RD_MAILBOX_REG(ha, reg, 0);
415
416 /* Release mailbox registers. */
417 WRT_REG_WORD(&reg->semaphore, 0);
418 WRT_REG_WORD(&reg->hccr,
419 HCCR_CLR_RISC_INT);
420 RD_REG_WORD(&reg->hccr);
421 break;
422 } else if (stat == 0x10 || stat == 0x11) {
423 set_bit(MBX_INTERRUPT,
424 &ha->mbx_cmd_flags);
425
426 mb0 = RD_MAILBOX_REG(ha, reg, 0);
427
428 WRT_REG_WORD(&reg->hccr,
429 HCCR_CLR_RISC_INT);
430 RD_REG_WORD(&reg->hccr);
431 break;
432 }
433
434 /* clear this intr; it wasn't a mailbox intr */
435 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 RD_REG_WORD(&reg->hccr);
437 }
438 udelay(5);
439 }
440
441 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 rval = mb0 & MBS_MASK;
443 for (idx = 0; idx < words; idx++)
444 ram[cnt + idx] = swab16(dump[idx]);
445 } else {
446 rval = QLA_FUNCTION_FAILED;
447 }
448 }
449
450 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
451 return rval;
452 }
453
454 static inline void
455 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
456 uint16_t *buf)
457 {
458 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
459
460 for ( ; count--; dmp_reg++)
461 *buf++ = htons(RD_REG_WORD(dmp_reg));
462 }
463
464 static inline void *
465 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
466 {
467 if (!ha->eft)
468 return ptr;
469
470 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
471 return ptr + ntohl(ha->fw_dump->eft_size);
472 }
473
474 static inline void *
475 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
476 {
477 uint32_t cnt;
478 uint32_t *iter_reg;
479 struct qla2xxx_fce_chain *fcec = ptr;
480
481 if (!ha->fce)
482 return ptr;
483
484 *last_chain = &fcec->type;
485 fcec->type = htonl(DUMP_CHAIN_FCE);
486 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
487 fce_calc_size(ha->fce_bufs));
488 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
489 fcec->addr_l = htonl(LSD(ha->fce_dma));
490 fcec->addr_h = htonl(MSD(ha->fce_dma));
491
492 iter_reg = fcec->eregs;
493 for (cnt = 0; cnt < 8; cnt++)
494 *iter_reg++ = htonl(ha->fce_mb[cnt]);
495
496 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
497
498 return (char *)iter_reg + ntohl(fcec->size);
499 }
500
501 static inline void *
502 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
503 uint32_t **last_chain)
504 {
505 struct qla2xxx_mqueue_chain *q;
506 struct qla2xxx_mqueue_header *qh;
507 uint32_t num_queues;
508 int que;
509 struct {
510 int length;
511 void *ring;
512 } aq, *aqp;
513
514 if (!ha->tgt.atio_ring)
515 return ptr;
516
517 num_queues = 1;
518 aqp = &aq;
519 aqp->length = ha->tgt.atio_q_length;
520 aqp->ring = ha->tgt.atio_ring;
521
522 for (que = 0; que < num_queues; que++) {
523 /* aqp = ha->atio_q_map[que]; */
524 q = ptr;
525 *last_chain = &q->type;
526 q->type = htonl(DUMP_CHAIN_QUEUE);
527 q->chain_size = htonl(
528 sizeof(struct qla2xxx_mqueue_chain) +
529 sizeof(struct qla2xxx_mqueue_header) +
530 (aqp->length * sizeof(request_t)));
531 ptr += sizeof(struct qla2xxx_mqueue_chain);
532
533 /* Add header. */
534 qh = ptr;
535 qh->queue = htonl(TYPE_ATIO_QUEUE);
536 qh->number = htonl(que);
537 qh->size = htonl(aqp->length * sizeof(request_t));
538 ptr += sizeof(struct qla2xxx_mqueue_header);
539
540 /* Add data. */
541 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
542
543 ptr += aqp->length * sizeof(request_t);
544 }
545
546 return ptr;
547 }
548
549 static inline void *
550 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
551 {
552 struct qla2xxx_mqueue_chain *q;
553 struct qla2xxx_mqueue_header *qh;
554 struct req_que *req;
555 struct rsp_que *rsp;
556 int que;
557
558 if (!ha->mqenable)
559 return ptr;
560
561 /* Request queues */
562 for (que = 1; que < ha->max_req_queues; que++) {
563 req = ha->req_q_map[que];
564 if (!req)
565 break;
566
567 /* Add chain. */
568 q = ptr;
569 *last_chain = &q->type;
570 q->type = htonl(DUMP_CHAIN_QUEUE);
571 q->chain_size = htonl(
572 sizeof(struct qla2xxx_mqueue_chain) +
573 sizeof(struct qla2xxx_mqueue_header) +
574 (req->length * sizeof(request_t)));
575 ptr += sizeof(struct qla2xxx_mqueue_chain);
576
577 /* Add header. */
578 qh = ptr;
579 qh->queue = htonl(TYPE_REQUEST_QUEUE);
580 qh->number = htonl(que);
581 qh->size = htonl(req->length * sizeof(request_t));
582 ptr += sizeof(struct qla2xxx_mqueue_header);
583
584 /* Add data. */
585 memcpy(ptr, req->ring, req->length * sizeof(request_t));
586 ptr += req->length * sizeof(request_t);
587 }
588
589 /* Response queues */
590 for (que = 1; que < ha->max_rsp_queues; que++) {
591 rsp = ha->rsp_q_map[que];
592 if (!rsp)
593 break;
594
595 /* Add chain. */
596 q = ptr;
597 *last_chain = &q->type;
598 q->type = htonl(DUMP_CHAIN_QUEUE);
599 q->chain_size = htonl(
600 sizeof(struct qla2xxx_mqueue_chain) +
601 sizeof(struct qla2xxx_mqueue_header) +
602 (rsp->length * sizeof(response_t)));
603 ptr += sizeof(struct qla2xxx_mqueue_chain);
604
605 /* Add header. */
606 qh = ptr;
607 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
608 qh->number = htonl(que);
609 qh->size = htonl(rsp->length * sizeof(response_t));
610 ptr += sizeof(struct qla2xxx_mqueue_header);
611
612 /* Add data. */
613 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
614 ptr += rsp->length * sizeof(response_t);
615 }
616
617 return ptr;
618 }
619
620 static inline void *
621 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
622 {
623 uint32_t cnt, que_idx;
624 uint8_t que_cnt;
625 struct qla2xxx_mq_chain *mq = ptr;
626 device_reg_t *reg;
627
628 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
629 return ptr;
630
631 mq = ptr;
632 *last_chain = &mq->type;
633 mq->type = htonl(DUMP_CHAIN_MQ);
634 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
635
636 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
637 ha->max_req_queues : ha->max_rsp_queues;
638 mq->count = htonl(que_cnt);
639 for (cnt = 0; cnt < que_cnt; cnt++) {
640 reg = ISP_QUE_REG(ha, cnt);
641 que_idx = cnt * 4;
642 mq->qregs[que_idx] =
643 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
644 mq->qregs[que_idx+1] =
645 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
646 mq->qregs[que_idx+2] =
647 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
648 mq->qregs[que_idx+3] =
649 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
650 }
651
652 return ptr + sizeof(struct qla2xxx_mq_chain);
653 }
654
655 void
656 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
657 {
658 struct qla_hw_data *ha = vha->hw;
659
660 if (rval != QLA_SUCCESS) {
661 ql_log(ql_log_warn, vha, 0xd000,
662 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
663 rval, ha->fw_dump_cap_flags);
664 ha->fw_dumped = 0;
665 } else {
666 ql_log(ql_log_info, vha, 0xd001,
667 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
668 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
669 ha->fw_dumped = 1;
670 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
671 }
672 }
673
674 /**
675 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
676 * @ha: HA context
677 * @hardware_locked: Called with the hardware_lock
678 */
679 void
680 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
681 {
682 int rval;
683 uint32_t cnt;
684 struct qla_hw_data *ha = vha->hw;
685 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
686 uint16_t __iomem *dmp_reg;
687 unsigned long flags;
688 struct qla2300_fw_dump *fw;
689 void *nxt;
690 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
691
692 flags = 0;
693
694 #ifndef __CHECKER__
695 if (!hardware_locked)
696 spin_lock_irqsave(&ha->hardware_lock, flags);
697 #endif
698
699 if (!ha->fw_dump) {
700 ql_log(ql_log_warn, vha, 0xd002,
701 "No buffer available for dump.\n");
702 goto qla2300_fw_dump_failed;
703 }
704
705 if (ha->fw_dumped) {
706 ql_log(ql_log_warn, vha, 0xd003,
707 "Firmware has been previously dumped (%p) "
708 "-- ignoring request.\n",
709 ha->fw_dump);
710 goto qla2300_fw_dump_failed;
711 }
712 fw = &ha->fw_dump->isp.isp23;
713 qla2xxx_prep_dump(ha, ha->fw_dump);
714
715 rval = QLA_SUCCESS;
716 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
717
718 /* Pause RISC. */
719 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
720 if (IS_QLA2300(ha)) {
721 for (cnt = 30000;
722 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
723 rval == QLA_SUCCESS; cnt--) {
724 if (cnt)
725 udelay(100);
726 else
727 rval = QLA_FUNCTION_TIMEOUT;
728 }
729 } else {
730 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
731 udelay(10);
732 }
733
734 if (rval == QLA_SUCCESS) {
735 dmp_reg = &reg->flash_address;
736 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
737 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
738
739 dmp_reg = &reg->u.isp2300.req_q_in;
740 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
741 cnt++, dmp_reg++)
742 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
743
744 dmp_reg = &reg->u.isp2300.mailbox0;
745 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
746 cnt++, dmp_reg++)
747 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
748
749 WRT_REG_WORD(&reg->ctrl_status, 0x40);
750 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
751
752 WRT_REG_WORD(&reg->ctrl_status, 0x50);
753 qla2xxx_read_window(reg, 48, fw->dma_reg);
754
755 WRT_REG_WORD(&reg->ctrl_status, 0x00);
756 dmp_reg = &reg->risc_hw;
757 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
758 cnt++, dmp_reg++)
759 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
760
761 WRT_REG_WORD(&reg->pcr, 0x2000);
762 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
763
764 WRT_REG_WORD(&reg->pcr, 0x2200);
765 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
766
767 WRT_REG_WORD(&reg->pcr, 0x2400);
768 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
769
770 WRT_REG_WORD(&reg->pcr, 0x2600);
771 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
772
773 WRT_REG_WORD(&reg->pcr, 0x2800);
774 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
775
776 WRT_REG_WORD(&reg->pcr, 0x2A00);
777 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
778
779 WRT_REG_WORD(&reg->pcr, 0x2C00);
780 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
781
782 WRT_REG_WORD(&reg->pcr, 0x2E00);
783 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
784
785 WRT_REG_WORD(&reg->ctrl_status, 0x10);
786 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
787
788 WRT_REG_WORD(&reg->ctrl_status, 0x20);
789 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
790
791 WRT_REG_WORD(&reg->ctrl_status, 0x30);
792 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
793
794 /* Reset RISC. */
795 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
796 for (cnt = 0; cnt < 30000; cnt++) {
797 if ((RD_REG_WORD(&reg->ctrl_status) &
798 CSR_ISP_SOFT_RESET) == 0)
799 break;
800
801 udelay(10);
802 }
803 }
804
805 if (!IS_QLA2300(ha)) {
806 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
807 rval == QLA_SUCCESS; cnt--) {
808 if (cnt)
809 udelay(100);
810 else
811 rval = QLA_FUNCTION_TIMEOUT;
812 }
813 }
814
815 /* Get RISC SRAM. */
816 if (rval == QLA_SUCCESS)
817 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
818 sizeof(fw->risc_ram) / 2, &nxt);
819
820 /* Get stack SRAM. */
821 if (rval == QLA_SUCCESS)
822 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
823 sizeof(fw->stack_ram) / 2, &nxt);
824
825 /* Get data SRAM. */
826 if (rval == QLA_SUCCESS)
827 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
828 ha->fw_memory_size - 0x11000 + 1, &nxt);
829
830 if (rval == QLA_SUCCESS)
831 qla2xxx_copy_queues(ha, nxt);
832
833 qla2xxx_dump_post_process(base_vha, rval);
834
835 qla2300_fw_dump_failed:
836 #ifndef __CHECKER__
837 if (!hardware_locked)
838 spin_unlock_irqrestore(&ha->hardware_lock, flags);
839 #else
840 ;
841 #endif
842 }
843
844 /**
845 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
846 * @ha: HA context
847 * @hardware_locked: Called with the hardware_lock
848 */
849 void
850 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
851 {
852 int rval;
853 uint32_t cnt, timer;
854 uint16_t risc_address;
855 uint16_t mb0, mb2;
856 struct qla_hw_data *ha = vha->hw;
857 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
858 uint16_t __iomem *dmp_reg;
859 unsigned long flags;
860 struct qla2100_fw_dump *fw;
861 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
862
863 risc_address = 0;
864 mb0 = mb2 = 0;
865 flags = 0;
866
867 #ifndef __CHECKER__
868 if (!hardware_locked)
869 spin_lock_irqsave(&ha->hardware_lock, flags);
870 #endif
871
872 if (!ha->fw_dump) {
873 ql_log(ql_log_warn, vha, 0xd004,
874 "No buffer available for dump.\n");
875 goto qla2100_fw_dump_failed;
876 }
877
878 if (ha->fw_dumped) {
879 ql_log(ql_log_warn, vha, 0xd005,
880 "Firmware has been previously dumped (%p) "
881 "-- ignoring request.\n",
882 ha->fw_dump);
883 goto qla2100_fw_dump_failed;
884 }
885 fw = &ha->fw_dump->isp.isp21;
886 qla2xxx_prep_dump(ha, ha->fw_dump);
887
888 rval = QLA_SUCCESS;
889 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
890
891 /* Pause RISC. */
892 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
893 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
894 rval == QLA_SUCCESS; cnt--) {
895 if (cnt)
896 udelay(100);
897 else
898 rval = QLA_FUNCTION_TIMEOUT;
899 }
900 if (rval == QLA_SUCCESS) {
901 dmp_reg = &reg->flash_address;
902 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
903 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
904
905 dmp_reg = &reg->u.isp2100.mailbox0;
906 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
907 if (cnt == 8)
908 dmp_reg = &reg->u_end.isp2200.mailbox8;
909
910 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
911 }
912
913 dmp_reg = &reg->u.isp2100.unused_2[0];
914 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
915 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
916
917 WRT_REG_WORD(&reg->ctrl_status, 0x00);
918 dmp_reg = &reg->risc_hw;
919 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
920 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
921
922 WRT_REG_WORD(&reg->pcr, 0x2000);
923 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
924
925 WRT_REG_WORD(&reg->pcr, 0x2100);
926 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
927
928 WRT_REG_WORD(&reg->pcr, 0x2200);
929 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
930
931 WRT_REG_WORD(&reg->pcr, 0x2300);
932 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
933
934 WRT_REG_WORD(&reg->pcr, 0x2400);
935 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
936
937 WRT_REG_WORD(&reg->pcr, 0x2500);
938 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
939
940 WRT_REG_WORD(&reg->pcr, 0x2600);
941 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
942
943 WRT_REG_WORD(&reg->pcr, 0x2700);
944 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
945
946 WRT_REG_WORD(&reg->ctrl_status, 0x10);
947 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
948
949 WRT_REG_WORD(&reg->ctrl_status, 0x20);
950 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
951
952 WRT_REG_WORD(&reg->ctrl_status, 0x30);
953 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
954
955 /* Reset the ISP. */
956 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
957 }
958
959 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
960 rval == QLA_SUCCESS; cnt--) {
961 if (cnt)
962 udelay(100);
963 else
964 rval = QLA_FUNCTION_TIMEOUT;
965 }
966
967 /* Pause RISC. */
968 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
969 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
970
971 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
972 for (cnt = 30000;
973 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
974 rval == QLA_SUCCESS; cnt--) {
975 if (cnt)
976 udelay(100);
977 else
978 rval = QLA_FUNCTION_TIMEOUT;
979 }
980 if (rval == QLA_SUCCESS) {
981 /* Set memory configuration and timing. */
982 if (IS_QLA2100(ha))
983 WRT_REG_WORD(&reg->mctr, 0xf1);
984 else
985 WRT_REG_WORD(&reg->mctr, 0xf2);
986 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
987
988 /* Release RISC. */
989 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
990 }
991 }
992
993 if (rval == QLA_SUCCESS) {
994 /* Get RISC SRAM. */
995 risc_address = 0x1000;
996 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
997 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
998 }
999 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1000 cnt++, risc_address++) {
1001 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1002 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1003
1004 for (timer = 6000000; timer != 0; timer--) {
1005 /* Check for pending interrupts. */
1006 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1007 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1008 set_bit(MBX_INTERRUPT,
1009 &ha->mbx_cmd_flags);
1010
1011 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1012 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1013
1014 WRT_REG_WORD(&reg->semaphore, 0);
1015 WRT_REG_WORD(&reg->hccr,
1016 HCCR_CLR_RISC_INT);
1017 RD_REG_WORD(&reg->hccr);
1018 break;
1019 }
1020 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1021 RD_REG_WORD(&reg->hccr);
1022 }
1023 udelay(5);
1024 }
1025
1026 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1027 rval = mb0 & MBS_MASK;
1028 fw->risc_ram[cnt] = htons(mb2);
1029 } else {
1030 rval = QLA_FUNCTION_FAILED;
1031 }
1032 }
1033
1034 if (rval == QLA_SUCCESS)
1035 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1036
1037 qla2xxx_dump_post_process(base_vha, rval);
1038
1039 qla2100_fw_dump_failed:
1040 #ifndef __CHECKER__
1041 if (!hardware_locked)
1042 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1043 #else
1044 ;
1045 #endif
1046 }
1047
1048 void
1049 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1050 {
1051 int rval;
1052 uint32_t cnt;
1053 struct qla_hw_data *ha = vha->hw;
1054 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1055 uint32_t __iomem *dmp_reg;
1056 uint32_t *iter_reg;
1057 uint16_t __iomem *mbx_reg;
1058 unsigned long flags;
1059 struct qla24xx_fw_dump *fw;
1060 void *nxt;
1061 void *nxt_chain;
1062 uint32_t *last_chain = NULL;
1063 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1064
1065 if (IS_P3P_TYPE(ha))
1066 return;
1067
1068 flags = 0;
1069 ha->fw_dump_cap_flags = 0;
1070
1071 #ifndef __CHECKER__
1072 if (!hardware_locked)
1073 spin_lock_irqsave(&ha->hardware_lock, flags);
1074 #endif
1075
1076 if (!ha->fw_dump) {
1077 ql_log(ql_log_warn, vha, 0xd006,
1078 "No buffer available for dump.\n");
1079 goto qla24xx_fw_dump_failed;
1080 }
1081
1082 if (ha->fw_dumped) {
1083 ql_log(ql_log_warn, vha, 0xd007,
1084 "Firmware has been previously dumped (%p) "
1085 "-- ignoring request.\n",
1086 ha->fw_dump);
1087 goto qla24xx_fw_dump_failed;
1088 }
1089 fw = &ha->fw_dump->isp.isp24;
1090 qla2xxx_prep_dump(ha, ha->fw_dump);
1091
1092 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1093
1094 /*
1095 * Pause RISC. No need to track timeout, as resetting the chip
1096 * is the right approach incase of pause timeout
1097 */
1098 qla24xx_pause_risc(reg, ha);
1099
1100 /* Host interface registers. */
1101 dmp_reg = &reg->flash_addr;
1102 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1103 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1104
1105 /* Disable interrupts. */
1106 WRT_REG_DWORD(&reg->ictrl, 0);
1107 RD_REG_DWORD(&reg->ictrl);
1108
1109 /* Shadow registers. */
1110 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1111 RD_REG_DWORD(&reg->iobase_addr);
1112 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1113 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1114
1115 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1116 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1117
1118 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1119 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1120
1121 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1122 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1123
1124 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1125 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1126
1127 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1128 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1129
1130 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1131 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1132
1133 /* Mailbox registers. */
1134 mbx_reg = &reg->mailbox0;
1135 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
1136 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1137
1138 /* Transfer sequence registers. */
1139 iter_reg = fw->xseq_gp_reg;
1140 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1141 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1142 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1143 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1144 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1145 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1146 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1147 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1148
1149 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1150 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1151
1152 /* Receive sequence registers. */
1153 iter_reg = fw->rseq_gp_reg;
1154 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1155 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1156 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1157 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1158 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1159 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1160 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1161 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1162
1163 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1164 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1165 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1166
1167 /* Command DMA registers. */
1168 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1169
1170 /* Queues. */
1171 iter_reg = fw->req0_dma_reg;
1172 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1173 dmp_reg = &reg->iobase_q;
1174 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1175 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1176
1177 iter_reg = fw->resp0_dma_reg;
1178 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1179 dmp_reg = &reg->iobase_q;
1180 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1181 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1182
1183 iter_reg = fw->req1_dma_reg;
1184 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1185 dmp_reg = &reg->iobase_q;
1186 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1187 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1188
1189 /* Transmit DMA registers. */
1190 iter_reg = fw->xmt0_dma_reg;
1191 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1192 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1193
1194 iter_reg = fw->xmt1_dma_reg;
1195 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1196 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1197
1198 iter_reg = fw->xmt2_dma_reg;
1199 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1200 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1201
1202 iter_reg = fw->xmt3_dma_reg;
1203 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1204 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1205
1206 iter_reg = fw->xmt4_dma_reg;
1207 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1208 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1209
1210 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1211
1212 /* Receive DMA registers. */
1213 iter_reg = fw->rcvt0_data_dma_reg;
1214 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1215 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1216
1217 iter_reg = fw->rcvt1_data_dma_reg;
1218 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1219 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1220
1221 /* RISC registers. */
1222 iter_reg = fw->risc_gp_reg;
1223 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1224 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1225 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1226 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1227 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1228 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1229 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1230 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1231
1232 /* Local memory controller registers. */
1233 iter_reg = fw->lmc_reg;
1234 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1235 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1236 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1237 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1238 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1239 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1240 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1241
1242 /* Fibre Protocol Module registers. */
1243 iter_reg = fw->fpm_hdw_reg;
1244 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1245 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1246 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1247 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1248 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1249 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1254 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1255 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1256
1257 /* Frame Buffer registers. */
1258 iter_reg = fw->fb_hdw_reg;
1259 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1260 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1261 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1265 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1266 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1267 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1269 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1270
1271 rval = qla24xx_soft_reset(ha);
1272 if (rval != QLA_SUCCESS)
1273 goto qla24xx_fw_dump_failed_0;
1274
1275 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1276 &nxt);
1277 if (rval != QLA_SUCCESS)
1278 goto qla24xx_fw_dump_failed_0;
1279
1280 nxt = qla2xxx_copy_queues(ha, nxt);
1281
1282 qla24xx_copy_eft(ha, nxt);
1283
1284 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1285 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1286 if (last_chain) {
1287 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1288 *last_chain |= htonl(DUMP_CHAIN_LAST);
1289 }
1290
1291 /* Adjust valid length. */
1292 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1293
1294 qla24xx_fw_dump_failed_0:
1295 qla2xxx_dump_post_process(base_vha, rval);
1296
1297 qla24xx_fw_dump_failed:
1298 #ifndef __CHECKER__
1299 if (!hardware_locked)
1300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1301 #else
1302 ;
1303 #endif
1304 }
1305
1306 void
1307 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1308 {
1309 int rval;
1310 uint32_t cnt;
1311 struct qla_hw_data *ha = vha->hw;
1312 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1313 uint32_t __iomem *dmp_reg;
1314 uint32_t *iter_reg;
1315 uint16_t __iomem *mbx_reg;
1316 unsigned long flags;
1317 struct qla25xx_fw_dump *fw;
1318 void *nxt, *nxt_chain;
1319 uint32_t *last_chain = NULL;
1320 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1321
1322 flags = 0;
1323 ha->fw_dump_cap_flags = 0;
1324
1325 #ifndef __CHECKER__
1326 if (!hardware_locked)
1327 spin_lock_irqsave(&ha->hardware_lock, flags);
1328 #endif
1329
1330 if (!ha->fw_dump) {
1331 ql_log(ql_log_warn, vha, 0xd008,
1332 "No buffer available for dump.\n");
1333 goto qla25xx_fw_dump_failed;
1334 }
1335
1336 if (ha->fw_dumped) {
1337 ql_log(ql_log_warn, vha, 0xd009,
1338 "Firmware has been previously dumped (%p) "
1339 "-- ignoring request.\n",
1340 ha->fw_dump);
1341 goto qla25xx_fw_dump_failed;
1342 }
1343 fw = &ha->fw_dump->isp.isp25;
1344 qla2xxx_prep_dump(ha, ha->fw_dump);
1345 ha->fw_dump->version = htonl(2);
1346
1347 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1348
1349 /*
1350 * Pause RISC. No need to track timeout, as resetting the chip
1351 * is the right approach incase of pause timeout
1352 */
1353 qla24xx_pause_risc(reg, ha);
1354
1355 /* Host/Risc registers. */
1356 iter_reg = fw->host_risc_reg;
1357 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1358 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1359
1360 /* PCIe registers. */
1361 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1362 RD_REG_DWORD(&reg->iobase_addr);
1363 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1364 dmp_reg = &reg->iobase_c4;
1365 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1366 dmp_reg++;
1367 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1368 dmp_reg++;
1369 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1370 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1371
1372 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1373 RD_REG_DWORD(&reg->iobase_window);
1374
1375 /* Host interface registers. */
1376 dmp_reg = &reg->flash_addr;
1377 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1378 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1379
1380 /* Disable interrupts. */
1381 WRT_REG_DWORD(&reg->ictrl, 0);
1382 RD_REG_DWORD(&reg->ictrl);
1383
1384 /* Shadow registers. */
1385 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1386 RD_REG_DWORD(&reg->iobase_addr);
1387 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1388 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1389
1390 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1391 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1392
1393 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1394 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1395
1396 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1397 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1398
1399 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1400 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1401
1402 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1403 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1404
1405 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1406 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1407
1408 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1409 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1410
1411 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1412 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1413
1414 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1415 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1416
1417 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1418 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1419
1420 /* RISC I/O register. */
1421 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1422 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1423
1424 /* Mailbox registers. */
1425 mbx_reg = &reg->mailbox0;
1426 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1427 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1428
1429 /* Transfer sequence registers. */
1430 iter_reg = fw->xseq_gp_reg;
1431 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1432 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1433 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1434 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1435 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1436 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1437 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1438 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1439
1440 iter_reg = fw->xseq_0_reg;
1441 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1442 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1443 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1444
1445 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1446
1447 /* Receive sequence registers. */
1448 iter_reg = fw->rseq_gp_reg;
1449 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1451 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1452 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1453 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1454 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1455 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1456 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1457
1458 iter_reg = fw->rseq_0_reg;
1459 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1460 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1461
1462 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1463 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1464
1465 /* Auxiliary sequence registers. */
1466 iter_reg = fw->aseq_gp_reg;
1467 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1469 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1470 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1471 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1472 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1473 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1474 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1475
1476 iter_reg = fw->aseq_0_reg;
1477 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1478 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1479
1480 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1481 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1482
1483 /* Command DMA registers. */
1484 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1485
1486 /* Queues. */
1487 iter_reg = fw->req0_dma_reg;
1488 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1489 dmp_reg = &reg->iobase_q;
1490 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1491 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1492
1493 iter_reg = fw->resp0_dma_reg;
1494 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1495 dmp_reg = &reg->iobase_q;
1496 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1497 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1498
1499 iter_reg = fw->req1_dma_reg;
1500 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1501 dmp_reg = &reg->iobase_q;
1502 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1503 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1504
1505 /* Transmit DMA registers. */
1506 iter_reg = fw->xmt0_dma_reg;
1507 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1508 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1509
1510 iter_reg = fw->xmt1_dma_reg;
1511 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1512 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1513
1514 iter_reg = fw->xmt2_dma_reg;
1515 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1516 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1517
1518 iter_reg = fw->xmt3_dma_reg;
1519 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1520 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1521
1522 iter_reg = fw->xmt4_dma_reg;
1523 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1524 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1525
1526 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1527
1528 /* Receive DMA registers. */
1529 iter_reg = fw->rcvt0_data_dma_reg;
1530 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1531 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1532
1533 iter_reg = fw->rcvt1_data_dma_reg;
1534 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1535 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1536
1537 /* RISC registers. */
1538 iter_reg = fw->risc_gp_reg;
1539 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1540 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1541 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1542 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1543 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1544 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1545 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1546 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1547
1548 /* Local memory controller registers. */
1549 iter_reg = fw->lmc_reg;
1550 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1551 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1552 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1553 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1554 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1555 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1556 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1557 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1558
1559 /* Fibre Protocol Module registers. */
1560 iter_reg = fw->fpm_hdw_reg;
1561 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1563 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1564 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1566 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1567 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1570 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1571 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1572 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1573
1574 /* Frame Buffer registers. */
1575 iter_reg = fw->fb_hdw_reg;
1576 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1587 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1588
1589 /* Multi queue registers */
1590 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1591 &last_chain);
1592
1593 rval = qla24xx_soft_reset(ha);
1594 if (rval != QLA_SUCCESS)
1595 goto qla25xx_fw_dump_failed_0;
1596
1597 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1598 &nxt);
1599 if (rval != QLA_SUCCESS)
1600 goto qla25xx_fw_dump_failed_0;
1601
1602 nxt = qla2xxx_copy_queues(ha, nxt);
1603
1604 qla24xx_copy_eft(ha, nxt);
1605
1606 /* Chain entries -- started with MQ. */
1607 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1608 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1609 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1610 if (last_chain) {
1611 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1612 *last_chain |= htonl(DUMP_CHAIN_LAST);
1613 }
1614
1615 /* Adjust valid length. */
1616 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1617
1618 qla25xx_fw_dump_failed_0:
1619 qla2xxx_dump_post_process(base_vha, rval);
1620
1621 qla25xx_fw_dump_failed:
1622 #ifndef __CHECKER__
1623 if (!hardware_locked)
1624 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1625 #else
1626 ;
1627 #endif
1628 }
1629
1630 void
1631 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1632 {
1633 int rval;
1634 uint32_t cnt;
1635 struct qla_hw_data *ha = vha->hw;
1636 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1637 uint32_t __iomem *dmp_reg;
1638 uint32_t *iter_reg;
1639 uint16_t __iomem *mbx_reg;
1640 unsigned long flags;
1641 struct qla81xx_fw_dump *fw;
1642 void *nxt, *nxt_chain;
1643 uint32_t *last_chain = NULL;
1644 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1645
1646 flags = 0;
1647 ha->fw_dump_cap_flags = 0;
1648
1649 #ifndef __CHECKER__
1650 if (!hardware_locked)
1651 spin_lock_irqsave(&ha->hardware_lock, flags);
1652 #endif
1653
1654 if (!ha->fw_dump) {
1655 ql_log(ql_log_warn, vha, 0xd00a,
1656 "No buffer available for dump.\n");
1657 goto qla81xx_fw_dump_failed;
1658 }
1659
1660 if (ha->fw_dumped) {
1661 ql_log(ql_log_warn, vha, 0xd00b,
1662 "Firmware has been previously dumped (%p) "
1663 "-- ignoring request.\n",
1664 ha->fw_dump);
1665 goto qla81xx_fw_dump_failed;
1666 }
1667 fw = &ha->fw_dump->isp.isp81;
1668 qla2xxx_prep_dump(ha, ha->fw_dump);
1669
1670 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1671
1672 /*
1673 * Pause RISC. No need to track timeout, as resetting the chip
1674 * is the right approach incase of pause timeout
1675 */
1676 qla24xx_pause_risc(reg, ha);
1677
1678 /* Host/Risc registers. */
1679 iter_reg = fw->host_risc_reg;
1680 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1681 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1682
1683 /* PCIe registers. */
1684 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1685 RD_REG_DWORD(&reg->iobase_addr);
1686 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1687 dmp_reg = &reg->iobase_c4;
1688 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1689 dmp_reg++;
1690 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1691 dmp_reg++;
1692 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1693 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1694
1695 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1696 RD_REG_DWORD(&reg->iobase_window);
1697
1698 /* Host interface registers. */
1699 dmp_reg = &reg->flash_addr;
1700 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1701 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1702
1703 /* Disable interrupts. */
1704 WRT_REG_DWORD(&reg->ictrl, 0);
1705 RD_REG_DWORD(&reg->ictrl);
1706
1707 /* Shadow registers. */
1708 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1709 RD_REG_DWORD(&reg->iobase_addr);
1710 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1711 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1712
1713 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1714 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1715
1716 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1717 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1718
1719 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1720 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1721
1722 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1723 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1724
1725 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1726 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1727
1728 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1729 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1730
1731 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1732 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1733
1734 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1735 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1736
1737 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1738 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1739
1740 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1741 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1742
1743 /* RISC I/O register. */
1744 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1745 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1746
1747 /* Mailbox registers. */
1748 mbx_reg = &reg->mailbox0;
1749 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1750 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1751
1752 /* Transfer sequence registers. */
1753 iter_reg = fw->xseq_gp_reg;
1754 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1755 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1756 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1757 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1758 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1759 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1760 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1761 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1762
1763 iter_reg = fw->xseq_0_reg;
1764 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1765 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1766 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1767
1768 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1769
1770 /* Receive sequence registers. */
1771 iter_reg = fw->rseq_gp_reg;
1772 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1773 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1774 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1775 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1776 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1777 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1778 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1779 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1780
1781 iter_reg = fw->rseq_0_reg;
1782 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1783 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1784
1785 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1786 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1787
1788 /* Auxiliary sequence registers. */
1789 iter_reg = fw->aseq_gp_reg;
1790 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1791 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1792 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1793 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1794 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1795 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1796 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1797 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1798
1799 iter_reg = fw->aseq_0_reg;
1800 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1801 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1802
1803 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1804 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1805
1806 /* Command DMA registers. */
1807 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1808
1809 /* Queues. */
1810 iter_reg = fw->req0_dma_reg;
1811 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1812 dmp_reg = &reg->iobase_q;
1813 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1814 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1815
1816 iter_reg = fw->resp0_dma_reg;
1817 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1818 dmp_reg = &reg->iobase_q;
1819 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1820 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1821
1822 iter_reg = fw->req1_dma_reg;
1823 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1824 dmp_reg = &reg->iobase_q;
1825 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1826 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1827
1828 /* Transmit DMA registers. */
1829 iter_reg = fw->xmt0_dma_reg;
1830 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1831 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1832
1833 iter_reg = fw->xmt1_dma_reg;
1834 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1835 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1836
1837 iter_reg = fw->xmt2_dma_reg;
1838 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1839 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1840
1841 iter_reg = fw->xmt3_dma_reg;
1842 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1843 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1844
1845 iter_reg = fw->xmt4_dma_reg;
1846 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1847 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1848
1849 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1850
1851 /* Receive DMA registers. */
1852 iter_reg = fw->rcvt0_data_dma_reg;
1853 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1854 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1855
1856 iter_reg = fw->rcvt1_data_dma_reg;
1857 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1858 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1859
1860 /* RISC registers. */
1861 iter_reg = fw->risc_gp_reg;
1862 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1863 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1864 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1865 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1866 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1867 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1868 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1869 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1870
1871 /* Local memory controller registers. */
1872 iter_reg = fw->lmc_reg;
1873 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1874 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1875 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1876 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1877 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1878 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1879 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1880 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1881
1882 /* Fibre Protocol Module registers. */
1883 iter_reg = fw->fpm_hdw_reg;
1884 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1885 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1886 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1887 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1888 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1889 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1890 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1891 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1892 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1893 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1894 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1895 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1896 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1897 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1898
1899 /* Frame Buffer registers. */
1900 iter_reg = fw->fb_hdw_reg;
1901 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1902 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1903 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1904 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1905 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1906 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1907 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1913 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1914
1915 /* Multi queue registers */
1916 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1917 &last_chain);
1918
1919 rval = qla24xx_soft_reset(ha);
1920 if (rval != QLA_SUCCESS)
1921 goto qla81xx_fw_dump_failed_0;
1922
1923 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1924 &nxt);
1925 if (rval != QLA_SUCCESS)
1926 goto qla81xx_fw_dump_failed_0;
1927
1928 nxt = qla2xxx_copy_queues(ha, nxt);
1929
1930 qla24xx_copy_eft(ha, nxt);
1931
1932 /* Chain entries -- started with MQ. */
1933 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1934 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1935 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1936 if (last_chain) {
1937 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1938 *last_chain |= htonl(DUMP_CHAIN_LAST);
1939 }
1940
1941 /* Adjust valid length. */
1942 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1943
1944 qla81xx_fw_dump_failed_0:
1945 qla2xxx_dump_post_process(base_vha, rval);
1946
1947 qla81xx_fw_dump_failed:
1948 #ifndef __CHECKER__
1949 if (!hardware_locked)
1950 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1951 #else
1952 ;
1953 #endif
1954 }
1955
1956 void
1957 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1958 {
1959 int rval;
1960 uint32_t cnt;
1961 struct qla_hw_data *ha = vha->hw;
1962 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1963 uint32_t __iomem *dmp_reg;
1964 uint32_t *iter_reg;
1965 uint16_t __iomem *mbx_reg;
1966 unsigned long flags;
1967 struct qla83xx_fw_dump *fw;
1968 void *nxt, *nxt_chain;
1969 uint32_t *last_chain = NULL;
1970 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1971
1972 flags = 0;
1973 ha->fw_dump_cap_flags = 0;
1974
1975 #ifndef __CHECKER__
1976 if (!hardware_locked)
1977 spin_lock_irqsave(&ha->hardware_lock, flags);
1978 #endif
1979
1980 if (!ha->fw_dump) {
1981 ql_log(ql_log_warn, vha, 0xd00c,
1982 "No buffer available for dump!!!\n");
1983 goto qla83xx_fw_dump_failed;
1984 }
1985
1986 if (ha->fw_dumped) {
1987 ql_log(ql_log_warn, vha, 0xd00d,
1988 "Firmware has been previously dumped (%p) -- ignoring "
1989 "request...\n", ha->fw_dump);
1990 goto qla83xx_fw_dump_failed;
1991 }
1992 fw = &ha->fw_dump->isp.isp83;
1993 qla2xxx_prep_dump(ha, ha->fw_dump);
1994
1995 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1996
1997 /*
1998 * Pause RISC. No need to track timeout, as resetting the chip
1999 * is the right approach incase of pause timeout
2000 */
2001 qla24xx_pause_risc(reg, ha);
2002
2003 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2004 dmp_reg = &reg->iobase_window;
2005 RD_REG_DWORD(dmp_reg);
2006 WRT_REG_DWORD(dmp_reg, 0);
2007
2008 dmp_reg = &reg->unused_4_1[0];
2009 RD_REG_DWORD(dmp_reg);
2010 WRT_REG_DWORD(dmp_reg, 0);
2011
2012 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2013 dmp_reg = &reg->unused_4_1[2];
2014 RD_REG_DWORD(dmp_reg);
2015 WRT_REG_DWORD(dmp_reg, 0);
2016
2017 /* select PCR and disable ecc checking and correction */
2018 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2019 RD_REG_DWORD(&reg->iobase_addr);
2020 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2021
2022 /* Host/Risc registers. */
2023 iter_reg = fw->host_risc_reg;
2024 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2025 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2026 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2027
2028 /* PCIe registers. */
2029 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2030 RD_REG_DWORD(&reg->iobase_addr);
2031 WRT_REG_DWORD(&reg->iobase_window, 0x01);
2032 dmp_reg = &reg->iobase_c4;
2033 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2034 dmp_reg++;
2035 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2036 dmp_reg++;
2037 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2038 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2039
2040 WRT_REG_DWORD(&reg->iobase_window, 0x00);
2041 RD_REG_DWORD(&reg->iobase_window);
2042
2043 /* Host interface registers. */
2044 dmp_reg = &reg->flash_addr;
2045 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2046 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2047
2048 /* Disable interrupts. */
2049 WRT_REG_DWORD(&reg->ictrl, 0);
2050 RD_REG_DWORD(&reg->ictrl);
2051
2052 /* Shadow registers. */
2053 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2054 RD_REG_DWORD(&reg->iobase_addr);
2055 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2056 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2057
2058 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2059 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2060
2061 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2062 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2063
2064 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2065 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2066
2067 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2068 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2069
2070 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2071 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2072
2073 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2074 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2075
2076 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2077 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2078
2079 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2080 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2081
2082 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2083 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2084
2085 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2086 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2087
2088 /* RISC I/O register. */
2089 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2090 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2091
2092 /* Mailbox registers. */
2093 mbx_reg = &reg->mailbox0;
2094 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
2095 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2096
2097 /* Transfer sequence registers. */
2098 iter_reg = fw->xseq_gp_reg;
2099 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2100 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2101 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2102 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2103 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2104 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2105 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2106 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2107 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2108 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2109 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2110 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2111 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2112 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2113 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2114 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2115
2116 iter_reg = fw->xseq_0_reg;
2117 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2118 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2119 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2120
2121 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2122
2123 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2124
2125 /* Receive sequence registers. */
2126 iter_reg = fw->rseq_gp_reg;
2127 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2128 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2129 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2130 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2131 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2132 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2133 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2134 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2135 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2136 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2137 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2138 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2139 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2140 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2141 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2142 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2143
2144 iter_reg = fw->rseq_0_reg;
2145 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2146 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2147
2148 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2149 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2150 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2151
2152 /* Auxiliary sequence registers. */
2153 iter_reg = fw->aseq_gp_reg;
2154 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2161 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2162 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2163 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2164 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2165 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2166 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2167 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2168 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2169 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2170
2171 iter_reg = fw->aseq_0_reg;
2172 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2173 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2174
2175 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2176 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2177 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2178
2179 /* Command DMA registers. */
2180 iter_reg = fw->cmd_dma_reg;
2181 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2182 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2183 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2184 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2185
2186 /* Queues. */
2187 iter_reg = fw->req0_dma_reg;
2188 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2189 dmp_reg = &reg->iobase_q;
2190 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2191 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2192
2193 iter_reg = fw->resp0_dma_reg;
2194 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2195 dmp_reg = &reg->iobase_q;
2196 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2197 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2198
2199 iter_reg = fw->req1_dma_reg;
2200 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2201 dmp_reg = &reg->iobase_q;
2202 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2203 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2204
2205 /* Transmit DMA registers. */
2206 iter_reg = fw->xmt0_dma_reg;
2207 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2208 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2209
2210 iter_reg = fw->xmt1_dma_reg;
2211 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2212 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2213
2214 iter_reg = fw->xmt2_dma_reg;
2215 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2216 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2217
2218 iter_reg = fw->xmt3_dma_reg;
2219 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2220 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2221
2222 iter_reg = fw->xmt4_dma_reg;
2223 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2224 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2225
2226 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2227
2228 /* Receive DMA registers. */
2229 iter_reg = fw->rcvt0_data_dma_reg;
2230 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2231 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2232
2233 iter_reg = fw->rcvt1_data_dma_reg;
2234 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2235 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2236
2237 /* RISC registers. */
2238 iter_reg = fw->risc_gp_reg;
2239 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2240 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2241 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2242 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2243 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2244 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2245 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2246 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2247
2248 /* Local memory controller registers. */
2249 iter_reg = fw->lmc_reg;
2250 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2251 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2252 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2253 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2254 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2255 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2256 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2257 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2258
2259 /* Fibre Protocol Module registers. */
2260 iter_reg = fw->fpm_hdw_reg;
2261 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2262 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2263 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2264 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2265 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2266 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2267 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2268 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2269 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2270 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2271 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2272 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2273 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2274 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2275 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2276 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2277
2278 /* RQ0 Array registers. */
2279 iter_reg = fw->rq0_array_reg;
2280 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2281 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2282 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2283 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2284 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2285 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2286 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2287 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2288 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2289 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2290 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2293 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2294 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2295 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2296
2297 /* RQ1 Array registers. */
2298 iter_reg = fw->rq1_array_reg;
2299 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2305 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2306 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2307 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2308 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2309 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2310 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2314 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2315
2316 /* RP0 Array registers. */
2317 iter_reg = fw->rp0_array_reg;
2318 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2324 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2325 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2326 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2327 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2328 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2329 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2333 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2334
2335 /* RP1 Array registers. */
2336 iter_reg = fw->rp1_array_reg;
2337 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2343 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2344 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2345 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2346 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2347 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2348 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2352 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2353
2354 iter_reg = fw->at0_array_reg;
2355 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2357 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2358 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2359 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2360 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2361 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2362 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2363
2364 /* I/O Queue Control registers. */
2365 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2366
2367 /* Frame Buffer registers. */
2368 iter_reg = fw->fb_hdw_reg;
2369 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2382 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2383 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2384 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2386 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2387 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2390 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2391 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2392 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2393 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2394 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2395 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2396
2397 /* Multi queue registers */
2398 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2399 &last_chain);
2400
2401 rval = qla24xx_soft_reset(ha);
2402 if (rval != QLA_SUCCESS) {
2403 ql_log(ql_log_warn, vha, 0xd00e,
2404 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2405 rval = QLA_SUCCESS;
2406
2407 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2408
2409 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2410 RD_REG_DWORD(&reg->hccr);
2411
2412 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2413 RD_REG_DWORD(&reg->hccr);
2414
2415 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2416 RD_REG_DWORD(&reg->hccr);
2417
2418 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2419 udelay(5);
2420
2421 if (!cnt) {
2422 nxt = fw->code_ram;
2423 nxt += sizeof(fw->code_ram);
2424 nxt += (ha->fw_memory_size - 0x100000 + 1);
2425 goto copy_queue;
2426 } else {
2427 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2428 ql_log(ql_log_warn, vha, 0xd010,
2429 "bigger hammer success?\n");
2430 }
2431 }
2432
2433 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2434 &nxt);
2435 if (rval != QLA_SUCCESS)
2436 goto qla83xx_fw_dump_failed_0;
2437
2438 copy_queue:
2439 nxt = qla2xxx_copy_queues(ha, nxt);
2440
2441 qla24xx_copy_eft(ha, nxt);
2442
2443 /* Chain entries -- started with MQ. */
2444 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2445 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2446 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2447 if (last_chain) {
2448 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2449 *last_chain |= htonl(DUMP_CHAIN_LAST);
2450 }
2451
2452 /* Adjust valid length. */
2453 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2454
2455 qla83xx_fw_dump_failed_0:
2456 qla2xxx_dump_post_process(base_vha, rval);
2457
2458 qla83xx_fw_dump_failed:
2459 #ifndef __CHECKER__
2460 if (!hardware_locked)
2461 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2462 #else
2463 ;
2464 #endif
2465 }
2466
2467 /****************************************************************************/
2468 /* Driver Debug Functions. */
2469 /****************************************************************************/
2470
2471 static inline int
2472 ql_mask_match(uint32_t level)
2473 {
2474 if (ql2xextended_error_logging == 1)
2475 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2476 return (level & ql2xextended_error_logging) == level;
2477 }
2478
2479 /*
2480 * This function is for formatting and logging debug information.
2481 * It is to be used when vha is available. It formats the message
2482 * and logs it to the messages file.
2483 * parameters:
2484 * level: The level of the debug messages to be printed.
2485 * If ql2xextended_error_logging value is correctly set,
2486 * this message will appear in the messages file.
2487 * vha: Pointer to the scsi_qla_host_t.
2488 * id: This is a unique identifier for the level. It identifies the
2489 * part of the code from where the message originated.
2490 * msg: The message to be displayed.
2491 */
2492 void
2493 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2494 {
2495 va_list va;
2496 struct va_format vaf;
2497
2498 if (!ql_mask_match(level))
2499 return;
2500
2501 va_start(va, fmt);
2502
2503 vaf.fmt = fmt;
2504 vaf.va = &va;
2505
2506 if (vha != NULL) {
2507 const struct pci_dev *pdev = vha->hw->pdev;
2508 /* <module-name> <pci-name> <msg-id>:<host> Message */
2509 pr_warn("%s [%s]-%04x:%ld: %pV",
2510 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2511 vha->host_no, &vaf);
2512 } else {
2513 pr_warn("%s [%s]-%04x: : %pV",
2514 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2515 }
2516
2517 va_end(va);
2518
2519 }
2520
2521 /*
2522 * This function is for formatting and logging debug information.
2523 * It is to be used when vha is not available and pci is available,
2524 * i.e., before host allocation. It formats the message and logs it
2525 * to the messages file.
2526 * parameters:
2527 * level: The level of the debug messages to be printed.
2528 * If ql2xextended_error_logging value is correctly set,
2529 * this message will appear in the messages file.
2530 * pdev: Pointer to the struct pci_dev.
2531 * id: This is a unique id for the level. It identifies the part
2532 * of the code from where the message originated.
2533 * msg: The message to be displayed.
2534 */
2535 void
2536 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2537 const char *fmt, ...)
2538 {
2539 va_list va;
2540 struct va_format vaf;
2541
2542 if (pdev == NULL)
2543 return;
2544 if (!ql_mask_match(level))
2545 return;
2546
2547 va_start(va, fmt);
2548
2549 vaf.fmt = fmt;
2550 vaf.va = &va;
2551
2552 /* <module-name> <dev-name>:<msg-id> Message */
2553 pr_warn("%s [%s]-%04x: : %pV",
2554 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2555
2556 va_end(va);
2557 }
2558
2559 /*
2560 * This function is for formatting and logging log messages.
2561 * It is to be used when vha is available. It formats the message
2562 * and logs it to the messages file. All the messages will be logged
2563 * irrespective of value of ql2xextended_error_logging.
2564 * parameters:
2565 * level: The level of the log messages to be printed in the
2566 * messages file.
2567 * vha: Pointer to the scsi_qla_host_t
2568 * id: This is a unique id for the level. It identifies the
2569 * part of the code from where the message originated.
2570 * msg: The message to be displayed.
2571 */
2572 void
2573 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2574 {
2575 va_list va;
2576 struct va_format vaf;
2577 char pbuf[128];
2578
2579 if (level > ql_errlev)
2580 return;
2581
2582 if (vha != NULL) {
2583 const struct pci_dev *pdev = vha->hw->pdev;
2584 /* <module-name> <msg-id>:<host> Message */
2585 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2586 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2587 } else {
2588 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2589 QL_MSGHDR, "0000:00:00.0", id);
2590 }
2591 pbuf[sizeof(pbuf) - 1] = 0;
2592
2593 va_start(va, fmt);
2594
2595 vaf.fmt = fmt;
2596 vaf.va = &va;
2597
2598 switch (level) {
2599 case ql_log_fatal: /* FATAL LOG */
2600 pr_crit("%s%pV", pbuf, &vaf);
2601 break;
2602 case ql_log_warn:
2603 pr_err("%s%pV", pbuf, &vaf);
2604 break;
2605 case ql_log_info:
2606 pr_warn("%s%pV", pbuf, &vaf);
2607 break;
2608 default:
2609 pr_info("%s%pV", pbuf, &vaf);
2610 break;
2611 }
2612
2613 va_end(va);
2614 }
2615
2616 /*
2617 * This function is for formatting and logging log messages.
2618 * It is to be used when vha is not available and pci is available,
2619 * i.e., before host allocation. It formats the message and logs
2620 * it to the messages file. All the messages are logged irrespective
2621 * of the value of ql2xextended_error_logging.
2622 * parameters:
2623 * level: The level of the log messages to be printed in the
2624 * messages file.
2625 * pdev: Pointer to the struct pci_dev.
2626 * id: This is a unique id for the level. It identifies the
2627 * part of the code from where the message originated.
2628 * msg: The message to be displayed.
2629 */
2630 void
2631 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2632 const char *fmt, ...)
2633 {
2634 va_list va;
2635 struct va_format vaf;
2636 char pbuf[128];
2637
2638 if (pdev == NULL)
2639 return;
2640 if (level > ql_errlev)
2641 return;
2642
2643 /* <module-name> <dev-name>:<msg-id> Message */
2644 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2645 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2646 pbuf[sizeof(pbuf) - 1] = 0;
2647
2648 va_start(va, fmt);
2649
2650 vaf.fmt = fmt;
2651 vaf.va = &va;
2652
2653 switch (level) {
2654 case ql_log_fatal: /* FATAL LOG */
2655 pr_crit("%s%pV", pbuf, &vaf);
2656 break;
2657 case ql_log_warn:
2658 pr_err("%s%pV", pbuf, &vaf);
2659 break;
2660 case ql_log_info:
2661 pr_warn("%s%pV", pbuf, &vaf);
2662 break;
2663 default:
2664 pr_info("%s%pV", pbuf, &vaf);
2665 break;
2666 }
2667
2668 va_end(va);
2669 }
2670
2671 void
2672 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2673 {
2674 int i;
2675 struct qla_hw_data *ha = vha->hw;
2676 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2677 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2678 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2679 uint16_t __iomem *mbx_reg;
2680
2681 if (!ql_mask_match(level))
2682 return;
2683
2684 if (IS_P3P_TYPE(ha))
2685 mbx_reg = &reg82->mailbox_in[0];
2686 else if (IS_FWI2_CAPABLE(ha))
2687 mbx_reg = &reg24->mailbox0;
2688 else
2689 mbx_reg = MAILBOX_REG(ha, reg, 0);
2690
2691 ql_dbg(level, vha, id, "Mailbox registers:\n");
2692 for (i = 0; i < 6; i++)
2693 ql_dbg(level, vha, id,
2694 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
2695 }
2696
2697
2698 void
2699 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2700 uint8_t *b, uint32_t size)
2701 {
2702 uint32_t cnt;
2703 uint8_t c;
2704
2705 if (!ql_mask_match(level))
2706 return;
2707
2708 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
2709 "9 Ah Bh Ch Dh Eh Fh\n");
2710 ql_dbg(level, vha, id, "----------------------------------"
2711 "----------------------------\n");
2712
2713 ql_dbg(level, vha, id, " ");
2714 for (cnt = 0; cnt < size;) {
2715 c = *b++;
2716 printk("%02x", (uint32_t) c);
2717 cnt++;
2718 if (!(cnt % 16))
2719 printk("\n");
2720 else
2721 printk(" ");
2722 }
2723 if (cnt % 16)
2724 ql_dbg(level, vha, id, "\n");
2725 }
This page took 0.142927 seconds and 5 git commands to generate.