[SCSI] qla2xxx: Add ISP81XX support.
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
05236a05 9#include <linux/delay.h>
df7baa50
AV
10#include <scsi/scsi_tcq.h>
11
1da177e4 12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
1da177e4 16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
73208dfd
AC
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *);
e315cd28 19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
9a853f71 20
1da177e4
LT
21/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23 * @irq:
24 * @dev_id: SCSI driver HA context
1da177e4
LT
25 *
26 * Called by system whenever the host adapter generates an interrupt.
27 *
28 * Returns handled flag.
29 */
30irqreturn_t
7d12e780 31qla2100_intr_handler(int irq, void *dev_id)
1da177e4 32{
e315cd28
AC
33 scsi_qla_host_t *vha;
34 struct qla_hw_data *ha;
3d71644c 35 struct device_reg_2xxx __iomem *reg;
1da177e4 36 int status;
1da177e4 37 unsigned long iter;
14e660e6 38 uint16_t hccr;
9a853f71 39 uint16_t mb[4];
e315cd28 40 struct rsp_que *rsp;
1da177e4 41
e315cd28
AC
42 rsp = (struct rsp_que *) dev_id;
43 if (!rsp) {
1da177e4 44 printk(KERN_INFO
e315cd28 45 "%s(): NULL response queue pointer\n", __func__);
1da177e4
LT
46 return (IRQ_NONE);
47 }
48
e315cd28 49 ha = rsp->hw;
3d71644c 50 reg = &ha->iobase->isp;
1da177e4
LT
51 status = 0;
52
c6952483 53 spin_lock(&ha->hardware_lock);
e315cd28 54 vha = qla2x00_get_rsp_host(rsp);
1da177e4 55 for (iter = 50; iter--; ) {
14e660e6
SJ
56 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) {
58 if (pci_channel_offline(ha->pdev))
59 break;
60
61 /*
62 * Issue a "HARD" reset in order for the RISC interrupt
63 * bit to be cleared. Schedule a big hammmer to get
64 * out of the RISC PAUSED state.
65 */
66 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67 RD_REG_WORD(&reg->hccr);
68
e315cd28
AC
69 ha->isp_ops->fw_dump(vha, 1);
70 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
71 break;
72 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
73 break;
74
75 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77 RD_REG_WORD(&reg->hccr);
78
79 /* Get mailbox data. */
9a853f71
AV
80 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 82 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 83 status |= MBX_INTERRUPT;
9a853f71
AV
84 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 88 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
89 } else {
90 /*EMPTY*/
91 DEBUG2(printk("scsi(%ld): Unrecognized "
9a853f71 92 "interrupt type (%d).\n",
e315cd28 93 vha->host_no, mb[0]));
1da177e4
LT
94 }
95 /* Release mailbox registers. */
96 WRT_REG_WORD(&reg->semaphore, 0);
97 RD_REG_WORD(&reg->semaphore);
98 } else {
73208dfd 99 qla2x00_process_response_queue(rsp);
1da177e4
LT
100
101 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102 RD_REG_WORD(&reg->hccr);
103 }
104 }
c6952483 105 spin_unlock(&ha->hardware_lock);
1da177e4 106
1da177e4
LT
107 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 109 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 110 complete(&ha->mbx_intr_comp);
1da177e4
LT
111 }
112
1da177e4
LT
113 return (IRQ_HANDLED);
114}
115
116/**
117 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118 * @irq:
119 * @dev_id: SCSI driver HA context
1da177e4
LT
120 *
121 * Called by system whenever the host adapter generates an interrupt.
122 *
123 * Returns handled flag.
124 */
125irqreturn_t
7d12e780 126qla2300_intr_handler(int irq, void *dev_id)
1da177e4 127{
e315cd28 128 scsi_qla_host_t *vha;
3d71644c 129 struct device_reg_2xxx __iomem *reg;
1da177e4 130 int status;
1da177e4
LT
131 unsigned long iter;
132 uint32_t stat;
1da177e4 133 uint16_t hccr;
9a853f71 134 uint16_t mb[4];
e315cd28
AC
135 struct rsp_que *rsp;
136 struct qla_hw_data *ha;
1da177e4 137
e315cd28
AC
138 rsp = (struct rsp_que *) dev_id;
139 if (!rsp) {
1da177e4 140 printk(KERN_INFO
e315cd28 141 "%s(): NULL response queue pointer\n", __func__);
1da177e4
LT
142 return (IRQ_NONE);
143 }
144
e315cd28 145 ha = rsp->hw;
3d71644c 146 reg = &ha->iobase->isp;
1da177e4
LT
147 status = 0;
148
c6952483 149 spin_lock(&ha->hardware_lock);
e315cd28 150 vha = qla2x00_get_rsp_host(rsp);
1da177e4
LT
151 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) {
14e660e6
SJ
154 if (pci_channel_offline(ha->pdev))
155 break;
156
1da177e4
LT
157 hccr = RD_REG_WORD(&reg->hccr);
158 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
07f31805
AV
159 qla_printk(KERN_INFO, ha, "Parity error -- "
160 "HCCR=%x, Dumping firmware!\n", hccr);
1da177e4 161 else
07f31805
AV
162 qla_printk(KERN_INFO, ha, "RISC paused -- "
163 "HCCR=%x, Dumping firmware!\n", hccr);
1da177e4
LT
164
165 /*
166 * Issue a "HARD" reset in order for the RISC
167 * interrupt bit to be cleared. Schedule a big
168 * hammmer to get out of the RISC PAUSED state.
169 */
170 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171 RD_REG_WORD(&reg->hccr);
07f31805 172
e315cd28
AC
173 ha->isp_ops->fw_dump(vha, 1);
174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
175 break;
176 } else if ((stat & HSR_RISC_INT) == 0)
177 break;
178
1da177e4 179 switch (stat & 0xff) {
1da177e4
LT
180 case 0x1:
181 case 0x2:
182 case 0x10:
183 case 0x11:
e315cd28 184 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
185 status |= MBX_INTERRUPT;
186
187 /* Release mailbox registers. */
188 WRT_REG_WORD(&reg->semaphore, 0);
189 break;
190 case 0x12:
9a853f71
AV
191 mb[0] = MSW(stat);
192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 195 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
196 break;
197 case 0x13:
73208dfd 198 qla2x00_process_response_queue(rsp);
1da177e4
LT
199 break;
200 case 0x15:
9a853f71
AV
201 mb[0] = MBA_CMPLT_1_16BIT;
202 mb[1] = MSW(stat);
73208dfd 203 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
204 break;
205 case 0x16:
9a853f71
AV
206 mb[0] = MBA_SCSI_COMPLETION;
207 mb[1] = MSW(stat);
208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 209 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
210 break;
211 default:
212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
9a853f71 213 "(%d).\n",
e315cd28 214 vha->host_no, stat & 0xff));
1da177e4
LT
215 break;
216 }
217 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218 RD_REG_WORD_RELAXED(&reg->hccr);
219 }
c6952483 220 spin_unlock(&ha->hardware_lock);
1da177e4 221
1da177e4
LT
222 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 224 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 225 complete(&ha->mbx_intr_comp);
1da177e4
LT
226 }
227
1da177e4
LT
228 return (IRQ_HANDLED);
229}
230
231/**
232 * qla2x00_mbx_completion() - Process mailbox command completions.
233 * @ha: SCSI driver HA context
234 * @mb0: Mailbox0 register
235 */
236static void
e315cd28 237qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
238{
239 uint16_t cnt;
240 uint16_t __iomem *wptr;
e315cd28 241 struct qla_hw_data *ha = vha->hw;
3d71644c 242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
243
244 /* Load return mailbox registers. */
245 ha->flags.mbox_int = 1;
246 ha->mailbox_out[0] = mb0;
247 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 250 if (IS_QLA2200(ha) && cnt == 8)
1da177e4
LT
251 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252 if (cnt == 4 || cnt == 5)
253 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254 else
255 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 256
1da177e4
LT
257 wptr++;
258 }
259
260 if (ha->mcp) {
261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
e315cd28 262 __func__, vha->host_no, ha->mcp->mb[0]));
1da177e4
LT
263 } else {
264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
e315cd28 265 __func__, vha->host_no));
1da177e4
LT
266 }
267}
268
269/**
270 * qla2x00_async_event() - Process aynchronous events.
271 * @ha: SCSI driver HA context
9a853f71 272 * @mb: Mailbox registers (0 - 3)
1da177e4 273 */
2c3dfe3f 274void
73208dfd 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 276{
9a853f71 277#define LS_UNKNOWN 2
3a03eb79 278 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
1da177e4 279 char *link_speed;
1da177e4
LT
280 uint16_t handle_cnt;
281 uint16_t cnt;
282 uint32_t handles[5];
e315cd28 283 struct qla_hw_data *ha = vha->hw;
3d71644c 284 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
285 uint32_t rscn_entry, host_pid;
286 uint8_t rscn_queue_index;
4d4df193 287 unsigned long flags;
1da177e4
LT
288
289 /* Setup to process RIO completion. */
290 handle_cnt = 0;
3a03eb79
AV
291 if (IS_QLA81XX(ha))
292 goto skip_rio;
1da177e4
LT
293 switch (mb[0]) {
294 case MBA_SCSI_COMPLETION:
9a853f71 295 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
296 handle_cnt = 1;
297 break;
298 case MBA_CMPLT_1_16BIT:
9a853f71 299 handles[0] = mb[1];
1da177e4
LT
300 handle_cnt = 1;
301 mb[0] = MBA_SCSI_COMPLETION;
302 break;
303 case MBA_CMPLT_2_16BIT:
9a853f71
AV
304 handles[0] = mb[1];
305 handles[1] = mb[2];
1da177e4
LT
306 handle_cnt = 2;
307 mb[0] = MBA_SCSI_COMPLETION;
308 break;
309 case MBA_CMPLT_3_16BIT:
9a853f71
AV
310 handles[0] = mb[1];
311 handles[1] = mb[2];
312 handles[2] = mb[3];
1da177e4
LT
313 handle_cnt = 3;
314 mb[0] = MBA_SCSI_COMPLETION;
315 break;
316 case MBA_CMPLT_4_16BIT:
9a853f71
AV
317 handles[0] = mb[1];
318 handles[1] = mb[2];
319 handles[2] = mb[3];
1da177e4
LT
320 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
321 handle_cnt = 4;
322 mb[0] = MBA_SCSI_COMPLETION;
323 break;
324 case MBA_CMPLT_5_16BIT:
9a853f71
AV
325 handles[0] = mb[1];
326 handles[1] = mb[2];
327 handles[2] = mb[3];
1da177e4
LT
328 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
329 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
330 handle_cnt = 5;
331 mb[0] = MBA_SCSI_COMPLETION;
332 break;
333 case MBA_CMPLT_2_32BIT:
9a853f71 334 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
335 handles[1] = le32_to_cpu(
336 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
337 RD_MAILBOX_REG(ha, reg, 6));
338 handle_cnt = 2;
339 mb[0] = MBA_SCSI_COMPLETION;
340 break;
341 default:
342 break;
343 }
3a03eb79 344skip_rio:
1da177e4
LT
345 switch (mb[0]) {
346 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 347 if (!vha->flags.online)
1da177e4
LT
348 break;
349
350 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
351 qla2x00_process_completed_request(vha, rsp->req,
352 handles[cnt]);
1da177e4
LT
353 break;
354
355 case MBA_RESET: /* Reset */
e315cd28
AC
356 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
357 vha->host_no));
1da177e4 358
e315cd28 359 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
360 break;
361
362 case MBA_SYSTEM_ERR: /* System Error */
1da177e4
LT
363 qla_printk(KERN_INFO, ha,
364 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
365 mb[1], mb[2], mb[3]);
366
e315cd28 367 ha->isp_ops->fw_dump(vha, 1);
1da177e4 368
e428924c 369 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
370 if (mb[1] == 0 && mb[2] == 0) {
371 qla_printk(KERN_ERR, ha,
372 "Unrecoverable Hardware Error: adapter "
373 "marked OFFLINE!\n");
e315cd28 374 vha->flags.online = 0;
9a853f71 375 } else
e315cd28 376 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71 377 } else if (mb[1] == 0) {
1da177e4
LT
378 qla_printk(KERN_INFO, ha,
379 "Unrecoverable Hardware Error: adapter marked "
380 "OFFLINE!\n");
e315cd28 381 vha->flags.online = 0;
1da177e4 382 } else
e315cd28 383 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
384 break;
385
386 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
387 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
e315cd28 388 vha->host_no));
1da177e4
LT
389 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
390
e315cd28 391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
392 break;
393
394 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
395 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
e315cd28 396 vha->host_no));
1da177e4
LT
397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398
e315cd28 399 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
400 break;
401
402 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
403 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
e315cd28 404 vha->host_no));
1da177e4
LT
405 break;
406
407 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
e315cd28 408 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
1da177e4 409 mb[1]));
cc3ef7bc 410 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
1da177e4 411
e315cd28
AC
412 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
413 atomic_set(&vha->loop_state, LOOP_DOWN);
414 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
415 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
416 }
417
e315cd28
AC
418 if (vha->vp_idx) {
419 atomic_set(&vha->vp_state, VP_FAILED);
420 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
421 }
422
e315cd28
AC
423 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
424 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 425
e315cd28
AC
426 vha->flags.management_server_logged_in = 0;
427 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
428 break;
429
430 case MBA_LOOP_UP: /* Loop Up Event */
1da177e4
LT
431 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
432 link_speed = link_speeds[0];
d8b45213 433 ha->link_data_rate = PORT_SPEED_1GB;
1da177e4 434 } else {
9a853f71 435 link_speed = link_speeds[LS_UNKNOWN];
1da177e4
LT
436 if (mb[1] < 5)
437 link_speed = link_speeds[mb[1]];
3a03eb79
AV
438 else if (mb[1] == 0x13)
439 link_speed = link_speeds[5];
1da177e4
LT
440 ha->link_data_rate = mb[1];
441 }
442
443 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
e315cd28 444 vha->host_no, link_speed));
1da177e4
LT
445 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
446 link_speed);
447
e315cd28
AC
448 vha->flags.management_server_logged_in = 0;
449 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
450 break;
451
452 case MBA_LOOP_DOWN: /* Loop Down Event */
4d4df193 453 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
e315cd28 454 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
4d4df193
HK
455 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
456 mb[1], mb[2], mb[3]);
1da177e4 457
e315cd28
AC
458 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
459 atomic_set(&vha->loop_state, LOOP_DOWN);
460 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
461 vha->device_flags |= DFLG_NO_CABLE;
462 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
463 }
464
e315cd28
AC
465 if (vha->vp_idx) {
466 atomic_set(&vha->vp_state, VP_FAILED);
467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
468 }
469
e315cd28 470 vha->flags.management_server_logged_in = 0;
d8b45213 471 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 472 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
473 break;
474
475 case MBA_LIP_RESET: /* LIP reset occurred */
1da177e4 476 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
e315cd28 477 vha->host_no, mb[1]));
1da177e4 478 qla_printk(KERN_INFO, ha,
cc3ef7bc 479 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 480
e315cd28
AC
481 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
482 atomic_set(&vha->loop_state, LOOP_DOWN);
483 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
484 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
485 }
486
e315cd28
AC
487 if (vha->vp_idx) {
488 atomic_set(&vha->vp_state, VP_FAILED);
489 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
490 }
491
e315cd28 492 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
493
494 ha->operating_mode = LOOP;
e315cd28
AC
495 vha->flags.management_server_logged_in = 0;
496 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
497 break;
498
3a03eb79 499 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
500 case MBA_POINT_TO_POINT: /* Point-to-Point */
501 if (IS_QLA2100(ha))
502 break;
503
3a03eb79
AV
504 if (IS_QLA81XX(ha))
505 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
506 "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
507 else
508 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
509 "received.\n", vha->host_no));
1da177e4
LT
510
511 /*
512 * Until there's a transition from loop down to loop up, treat
513 * this as loop down only.
514 */
e315cd28
AC
515 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
516 atomic_set(&vha->loop_state, LOOP_DOWN);
517 if (!atomic_read(&vha->loop_down_timer))
518 atomic_set(&vha->loop_down_timer,
1da177e4 519 LOOP_DOWN_TIME);
e315cd28 520 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
521 }
522
e315cd28
AC
523 if (vha->vp_idx) {
524 atomic_set(&vha->vp_state, VP_FAILED);
525 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
526 }
527
e315cd28
AC
528 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
529 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
530
531 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
532 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
533
534 ha->flags.gpsc_supported = 1;
e315cd28 535 vha->flags.management_server_logged_in = 0;
1da177e4
LT
536 break;
537
538 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
539 if (IS_QLA2100(ha))
540 break;
541
1da177e4
LT
542 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
543 "received.\n",
e315cd28 544 vha->host_no));
1da177e4
LT
545 qla_printk(KERN_INFO, ha,
546 "Configuration change detected: value=%x.\n", mb[1]);
547
e315cd28
AC
548 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
549 atomic_set(&vha->loop_state, LOOP_DOWN);
550 if (!atomic_read(&vha->loop_down_timer))
551 atomic_set(&vha->loop_down_timer,
1da177e4 552 LOOP_DOWN_TIME);
e315cd28 553 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
554 }
555
e315cd28
AC
556 if (vha->vp_idx) {
557 atomic_set(&vha->vp_state, VP_FAILED);
558 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
559 }
560
e315cd28
AC
561 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
562 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
563 break;
564
565 case MBA_PORT_UPDATE: /* Port database update */
73208dfd
AC
566 /* Only handle SCNs for our Vport index. */
567 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
568 break;
569
1da177e4 570 /*
cc3ef7bc 571 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
572 * event etc. earlier indicating loop is down) then process
573 * it. Otherwise ignore it and Wait for RSCN to come in.
574 */
e315cd28
AC
575 atomic_set(&vha->loop_down_timer, 0);
576 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
577 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1da177e4 578 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
e315cd28 579 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
9a853f71 580 mb[2], mb[3]));
1da177e4
LT
581 break;
582 }
583
584 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
e315cd28 585 vha->host_no));
1da177e4 586 DEBUG(printk(KERN_INFO
9a853f71 587 "scsi(%ld): Port database changed %04x %04x %04x.\n",
e315cd28 588 vha->host_no, mb[1], mb[2], mb[3]));
1da177e4
LT
589
590 /*
591 * Mark all devices as missing so we will login again.
592 */
e315cd28 593 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 594
e315cd28 595 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 596
e315cd28 597 vha->flags.rscn_queue_overflow = 1;
1da177e4 598
e315cd28
AC
599 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
600 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
601 break;
602
603 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 604 /* Check if the Vport has issued a SCR */
e315cd28 605 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
606 break;
607 /* Only handle SCNs for our Vport index. */
e315cd28 608 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
3c397400 609 break;
1da177e4 610 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
e315cd28 611 vha->host_no));
1da177e4 612 DEBUG(printk(KERN_INFO
f4a8dbc7 613 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
e315cd28 614 vha->host_no, mb[1], mb[2], mb[3]));
1da177e4 615
59d72d87 616 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
617 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
618 | vha->d_id.b.al_pa;
1da177e4
LT
619 if (rscn_entry == host_pid) {
620 DEBUG(printk(KERN_INFO
621 "scsi(%ld): Ignoring RSCN update to local host "
622 "port ID (%06x)\n",
e315cd28 623 vha->host_no, host_pid));
1da177e4
LT
624 break;
625 }
626
59d72d87
RA
627 /* Ignore reserved bits from RSCN-payload. */
628 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
e315cd28 629 rscn_queue_index = vha->rscn_in_ptr + 1;
1da177e4
LT
630 if (rscn_queue_index == MAX_RSCN_COUNT)
631 rscn_queue_index = 0;
e315cd28
AC
632 if (rscn_queue_index != vha->rscn_out_ptr) {
633 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
634 vha->rscn_in_ptr = rscn_queue_index;
1da177e4 635 } else {
e315cd28 636 vha->flags.rscn_queue_overflow = 1;
1da177e4
LT
637 }
638
e315cd28
AC
639 atomic_set(&vha->loop_state, LOOP_UPDATE);
640 atomic_set(&vha->loop_down_timer, 0);
641 vha->flags.management_server_logged_in = 0;
1da177e4 642
e315cd28
AC
643 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
644 set_bit(RSCN_UPDATE, &vha->dpc_flags);
645 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
646 break;
647
648 /* case MBA_RIO_RESPONSE: */
649 case MBA_ZIO_RESPONSE:
3fd67cdf 650 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
e315cd28 651 vha->host_no));
1da177e4 652
e428924c 653 if (IS_FWI2_CAPABLE(ha))
73208dfd 654 qla24xx_process_response_queue(rsp);
4fdfefe5 655 else
73208dfd 656 qla2x00_process_response_queue(rsp);
1da177e4 657 break;
9a853f71
AV
658
659 case MBA_DISCARD_RND_FRAME:
660 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
e315cd28 661 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
9a853f71 662 break;
45ebeb56
AV
663
664 case MBA_TRACE_NOTIFICATION:
665 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
e315cd28 666 vha->host_no, mb[1], mb[2]));
45ebeb56 667 break;
4d4df193
HK
668
669 case MBA_ISP84XX_ALERT:
670 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
e315cd28 671 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
4d4df193
HK
672
673 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
674 switch (mb[1]) {
675 case A84_PANIC_RECOVERY:
676 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
677 "%04x %04x\n", mb[2], mb[3]);
678 break;
679 case A84_OP_LOGIN_COMPLETE:
680 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
681 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
682 "firmware version %x\n", ha->cs84xx->op_fw_version));
683 break;
684 case A84_DIAG_LOGIN_COMPLETE:
685 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
686 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
687 "diagnostic firmware version %x\n",
688 ha->cs84xx->diag_fw_version));
689 break;
690 case A84_GOLD_LOGIN_COMPLETE:
691 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
692 ha->cs84xx->fw_update = 1;
693 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
694 "firmware version %x\n",
695 ha->cs84xx->gold_fw_version));
696 break;
697 default:
698 qla_printk(KERN_ERR, ha,
699 "Alert 84xx: Invalid Alert %04x %04x %04x\n",
700 mb[1], mb[2], mb[3]);
701 }
702 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
703 break;
3a03eb79
AV
704 case MBA_DCBX_START:
705 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
706 vha->host_no, mb[1], mb[2], mb[3]));
707 break;
708 case MBA_DCBX_PARAM_UPDATE:
709 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
710 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
711 break;
712 case MBA_FCF_CONF_ERR:
713 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
714 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
715 break;
716 case MBA_IDC_COMPLETE:
717 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
718 "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2],
719 mb[3]));
720 break;
721 case MBA_IDC_NOTIFY:
722 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
723 "Request Notification -- %04x %04x %04x\n", vha->host_no,
724 mb[1], mb[2], mb[3]));
725 /**** Mailbox registers 4 - 7 valid!!! */
726 break;
727 case MBA_IDC_TIME_EXT:
728 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
729 "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1],
730 mb[2], mb[3]));
731 /**** Mailbox registers 4 - 7 valid!!! */
732 break;
1da177e4 733 }
2c3dfe3f 734
e315cd28 735 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 736 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
737}
738
df7baa50
AV
739static void
740qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
741{
742 fc_port_t *fcport = data;
73208dfd
AC
743 struct scsi_qla_host *vha = fcport->vha;
744 struct qla_hw_data *ha = vha->hw;
745 struct req_que *req = NULL;
746
747 req = ha->req_q_map[vha->req_ques[0]];
748 if (!req)
749 return;
750 if (req->max_q_depth <= sdev->queue_depth)
df7baa50
AV
751 return;
752
753 if (sdev->ordered_tags)
754 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
755 sdev->queue_depth + 1);
756 else
757 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
758 sdev->queue_depth + 1);
759
760 fcport->last_ramp_up = jiffies;
761
e315cd28 762 DEBUG2(qla_printk(KERN_INFO, ha,
df7baa50 763 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
e315cd28 764 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
df7baa50
AV
765 sdev->queue_depth));
766}
767
768static void
769qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
770{
771 fc_port_t *fcport = data;
772
773 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
774 return;
775
e315cd28 776 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
df7baa50 777 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
e315cd28 778 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
df7baa50
AV
779 sdev->queue_depth));
780}
781
782static inline void
73208dfd
AC
783qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
784 srb_t *sp)
df7baa50
AV
785{
786 fc_port_t *fcport;
787 struct scsi_device *sdev;
788
789 sdev = sp->cmd->device;
73208dfd 790 if (sdev->queue_depth >= req->max_q_depth)
df7baa50
AV
791 return;
792
793 fcport = sp->fcport;
794 if (time_before(jiffies,
795 fcport->last_ramp_up + ql2xqfullrampup * HZ))
796 return;
797 if (time_before(jiffies,
798 fcport->last_queue_full + ql2xqfullrampup * HZ))
799 return;
800
df7baa50
AV
801 starget_for_each_device(sdev->sdev_target, fcport,
802 qla2x00_adjust_sdev_qdepth_up);
df7baa50
AV
803}
804
1da177e4
LT
805/**
806 * qla2x00_process_completed_request() - Process a Fast Post response.
807 * @ha: SCSI driver HA context
808 * @index: SRB index
809 */
810static void
73208dfd
AC
811qla2x00_process_completed_request(struct scsi_qla_host *vha,
812 struct req_que *req, uint32_t index)
1da177e4
LT
813{
814 srb_t *sp;
e315cd28 815 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
816
817 /* Validate handle. */
818 if (index >= MAX_OUTSTANDING_COMMANDS) {
819 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
e315cd28 820 vha->host_no, index));
1da177e4
LT
821 qla_printk(KERN_WARNING, ha,
822 "Invalid SCSI completion handle %d.\n", index);
823
e315cd28 824 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
825 return;
826 }
827
e315cd28 828 sp = req->outstanding_cmds[index];
1da177e4
LT
829 if (sp) {
830 /* Free outstanding command slot. */
e315cd28 831 req->outstanding_cmds[index] = NULL;
1da177e4 832
1da177e4
LT
833 CMD_COMPL_STATUS(sp->cmd) = 0L;
834 CMD_SCSI_STATUS(sp->cmd) = 0L;
835
836 /* Save ISP completion status */
837 sp->cmd->result = DID_OK << 16;
df7baa50 838
73208dfd
AC
839 qla2x00_ramp_up_queue_depth(vha, req, sp);
840 qla2x00_sp_compl(ha, sp);
1da177e4
LT
841 } else {
842 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
e315cd28 843 vha->host_no));
1da177e4
LT
844 qla_printk(KERN_WARNING, ha,
845 "Invalid ISP SCSI completion handle\n");
846
e315cd28 847 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
848 }
849}
850
851/**
852 * qla2x00_process_response_queue() - Process response queue entries.
853 * @ha: SCSI driver HA context
854 */
855void
73208dfd 856qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 857{
73208dfd
AC
858 struct scsi_qla_host *vha;
859 struct qla_hw_data *ha = rsp->hw;
3d71644c 860 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
861 sts_entry_t *pkt;
862 uint16_t handle_cnt;
863 uint16_t cnt;
73208dfd
AC
864
865 vha = qla2x00_get_rsp_host(rsp);
1da177e4 866
e315cd28 867 if (!vha->flags.online)
1da177e4
LT
868 return;
869
e315cd28
AC
870 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
871 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 872
e315cd28
AC
873 rsp->ring_index++;
874 if (rsp->ring_index == rsp->length) {
875 rsp->ring_index = 0;
876 rsp->ring_ptr = rsp->ring;
1da177e4 877 } else {
e315cd28 878 rsp->ring_ptr++;
1da177e4
LT
879 }
880
881 if (pkt->entry_status != 0) {
882 DEBUG3(printk(KERN_INFO
e315cd28 883 "scsi(%ld): Process error entry.\n", vha->host_no));
1da177e4 884
73208dfd 885 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
886 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
887 wmb();
888 continue;
889 }
890
891 switch (pkt->entry_type) {
892 case STATUS_TYPE:
73208dfd 893 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
894 break;
895 case STATUS_TYPE_21:
896 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
897 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 898 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
899 ((sts21_entry_t *)pkt)->handle[cnt]);
900 }
901 break;
902 case STATUS_TYPE_22:
903 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
904 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 905 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
906 ((sts22_entry_t *)pkt)->handle[cnt]);
907 }
908 break;
909 case STATUS_CONT_TYPE:
e315cd28 910 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1da177e4 911 break;
1da177e4
LT
912 default:
913 /* Type Not Supported. */
914 DEBUG4(printk(KERN_WARNING
915 "scsi(%ld): Received unknown response pkt type %x "
916 "entry status=%x.\n",
e315cd28 917 vha->host_no, pkt->entry_type, pkt->entry_status));
1da177e4
LT
918 break;
919 }
920 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
921 wmb();
922 }
923
924 /* Adjust ring index */
e315cd28 925 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
926}
927
4733fcb1
AV
928static inline void
929qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
930{
931 struct scsi_cmnd *cp = sp->cmd;
932
933 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
934 sense_len = SCSI_SENSE_BUFFERSIZE;
935
936 CMD_ACTUAL_SNSLEN(cp) = sense_len;
937 sp->request_sense_length = sense_len;
938 sp->request_sense_ptr = cp->sense_buffer;
939 if (sp->request_sense_length > 32)
940 sense_len = 32;
941
942 memcpy(cp->sense_buffer, sense_data, sense_len);
943
944 sp->request_sense_ptr += sense_len;
945 sp->request_sense_length -= sense_len;
946 if (sp->request_sense_length != 0)
e315cd28 947 sp->fcport->vha->status_srb = sp;
4733fcb1
AV
948
949 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
e315cd28 950 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
19851f13
AV
951 cp->device->channel, cp->device->id, cp->device->lun, cp,
952 cp->serial_number));
4733fcb1
AV
953 if (sense_len)
954 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
955 CMD_ACTUAL_SNSLEN(cp)));
956}
957
1da177e4
LT
958/**
959 * qla2x00_status_entry() - Process a Status IOCB entry.
960 * @ha: SCSI driver HA context
961 * @pkt: Entry pointer
962 */
963static void
73208dfd 964qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 965{
1da177e4 966 srb_t *sp;
1da177e4
LT
967 fc_port_t *fcport;
968 struct scsi_cmnd *cp;
9a853f71
AV
969 sts_entry_t *sts;
970 struct sts_entry_24xx *sts24;
1da177e4
LT
971 uint16_t comp_status;
972 uint16_t scsi_status;
973 uint8_t lscsi_status;
974 int32_t resid;
ed17c71b 975 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
9a853f71 976 uint8_t *rsp_info, *sense_data;
e315cd28 977 struct qla_hw_data *ha = vha->hw;
73208dfd 978 struct req_que *req = rsp->req;
9a853f71
AV
979
980 sts = (sts_entry_t *) pkt;
981 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 982 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
983 comp_status = le16_to_cpu(sts24->comp_status);
984 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
985 } else {
986 comp_status = le16_to_cpu(sts->comp_status);
987 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
988 }
1da177e4
LT
989
990 /* Fast path completion. */
9a853f71 991 if (comp_status == CS_COMPLETE && scsi_status == 0) {
73208dfd 992 qla2x00_process_completed_request(vha, req, sts->handle);
1da177e4
LT
993
994 return;
995 }
996
997 /* Validate handle. */
9a853f71 998 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
e315cd28
AC
999 sp = req->outstanding_cmds[sts->handle];
1000 req->outstanding_cmds[sts->handle] = NULL;
1da177e4
LT
1001 } else
1002 sp = NULL;
1003
1004 if (sp == NULL) {
1005 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
e315cd28 1006 vha->host_no));
1da177e4
LT
1007 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
1008
e315cd28
AC
1009 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1010 qla2xxx_wake_dpc(vha);
1da177e4
LT
1011 return;
1012 }
1013 cp = sp->cmd;
1014 if (cp == NULL) {
1015 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
e315cd28 1016 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
1da177e4
LT
1017 qla_printk(KERN_WARNING, ha,
1018 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1019
1020 return;
1021 }
1022
9a853f71
AV
1023 lscsi_status = scsi_status & STATUS_MASK;
1024 CMD_ENTRY_STATUS(cp) = sts->entry_status;
1da177e4
LT
1025 CMD_COMPL_STATUS(cp) = comp_status;
1026 CMD_SCSI_STATUS(cp) = scsi_status;
1027
bdf79621 1028 fcport = sp->fcport;
1da177e4 1029
ed17c71b 1030 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
e428924c 1031 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1032 sense_len = le32_to_cpu(sts24->sense_len);
1033 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1034 resid_len = le32_to_cpu(sts24->rsp_residual_count);
ed17c71b 1035 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1036 rsp_info = sts24->data;
1037 sense_data = sts24->data;
1038 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1039 } else {
1040 sense_len = le16_to_cpu(sts->req_sense_length);
1041 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1042 resid_len = le32_to_cpu(sts->residual_length);
1043 rsp_info = sts->rsp_info;
1044 sense_data = sts->req_sense_data;
1045 }
1046
1da177e4
LT
1047 /* Check for any FCP transport errors. */
1048 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1049 /* Sense data lies beyond any FCP RESPONSE data. */
e428924c 1050 if (IS_FWI2_CAPABLE(ha))
9a853f71
AV
1051 sense_data += rsp_info_len;
1052 if (rsp_info_len > 3 && rsp_info[3]) {
1da177e4
LT
1053 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1054 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
e315cd28 1055 "retrying command\n", vha->host_no,
9a853f71
AV
1056 cp->device->channel, cp->device->id,
1057 cp->device->lun, rsp_info_len, rsp_info[0],
1058 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1059 rsp_info[5], rsp_info[6], rsp_info[7]));
1da177e4
LT
1060
1061 cp->result = DID_BUS_BUSY << 16;
73208dfd 1062 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1063 return;
1064 }
1065 }
1066
3e8ce320
AV
1067 /* Check for overrun. */
1068 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1069 scsi_status & SS_RESIDUAL_OVER)
1070 comp_status = CS_DATA_OVERRUN;
1071
1da177e4
LT
1072 /*
1073 * Based on Host and scsi status generate status code for Linux
1074 */
1075 switch (comp_status) {
1076 case CS_COMPLETE:
df7baa50 1077 case CS_QUEUE_FULL:
1da177e4
LT
1078 if (scsi_status == 0) {
1079 cp->result = DID_OK << 16;
1080 break;
1081 }
1082 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 1083 resid = resid_len;
385d70b4 1084 scsi_set_resid(cp, resid);
1da177e4 1085 CMD_RESID_LEN(cp) = resid;
0da69df1
AV
1086
1087 if (!lscsi_status &&
385d70b4 1088 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1
AV
1089 cp->underflow)) {
1090 qla_printk(KERN_INFO, ha,
385d70b4
FT
1091 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1092 "detected (%x of %x bytes)...returning "
e315cd28 1093 "error status.\n", vha->host_no,
385d70b4
FT
1094 cp->device->channel, cp->device->id,
1095 cp->device->lun, resid,
1096 scsi_bufflen(cp));
0da69df1
AV
1097
1098 cp->result = DID_ERROR << 16;
1099 break;
1100 }
1da177e4 1101 }
1da177e4
LT
1102 cp->result = DID_OK << 16 | lscsi_status;
1103
df7baa50
AV
1104 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1105 DEBUG2(printk(KERN_INFO
1106 "scsi(%ld): QUEUE FULL status detected "
e315cd28 1107 "0x%x-0x%x.\n", vha->host_no, comp_status,
df7baa50
AV
1108 scsi_status));
1109
1110 /* Adjust queue depth for all luns on the port. */
1111 fcport->last_queue_full = jiffies;
df7baa50
AV
1112 starget_for_each_device(cp->device->sdev_target,
1113 fcport, qla2x00_adjust_sdev_qdepth_down);
df7baa50
AV
1114 break;
1115 }
1da177e4
LT
1116 if (lscsi_status != SS_CHECK_CONDITION)
1117 break;
1118
b80ca4f7 1119 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1120 if (!(scsi_status & SS_SENSE_LEN_VALID))
1121 break;
1122
4733fcb1 1123 qla2x00_handle_sense(sp, sense_data, sense_len);
1da177e4
LT
1124 break;
1125
1126 case CS_DATA_UNDERRUN:
9a853f71 1127 resid = resid_len;
ed17c71b 1128 /* Use F/W calculated residual length. */
6acf8190 1129 if (IS_FWI2_CAPABLE(ha)) {
2d136938
AV
1130 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1131 lscsi_status = 0;
1132 } else if (resid != fw_resid_len) {
6acf8190
AV
1133 scsi_status &= ~SS_RESIDUAL_UNDER;
1134 lscsi_status = 0;
1135 }
ed17c71b 1136 resid = fw_resid_len;
6acf8190 1137 }
ed17c71b 1138
1da177e4 1139 if (scsi_status & SS_RESIDUAL_UNDER) {
385d70b4 1140 scsi_set_resid(cp, resid);
1da177e4 1141 CMD_RESID_LEN(cp) = resid;
e038a1be 1142 } else {
1143 DEBUG2(printk(KERN_INFO
1144 "scsi(%ld:%d:%d) UNDERRUN status detected "
ed17c71b 1145 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
e315cd28 1146 "os_underflow=0x%x\n", vha->host_no,
ed17c71b
RA
1147 cp->device->id, cp->device->lun, comp_status,
1148 scsi_status, resid_len, resid, cp->cmnd[0],
1149 cp->underflow));
e038a1be 1150
1da177e4
LT
1151 }
1152
1153 /*
fa2a1ce5 1154 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
1155 * Status.
1156 */
1157 if (lscsi_status != 0) {
1da177e4
LT
1158 cp->result = DID_OK << 16 | lscsi_status;
1159
ffec28a3
AV
1160 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1161 DEBUG2(printk(KERN_INFO
1162 "scsi(%ld): QUEUE FULL status detected "
e315cd28 1163 "0x%x-0x%x.\n", vha->host_no, comp_status,
ffec28a3
AV
1164 scsi_status));
1165
1166 /*
1167 * Adjust queue depth for all luns on the
1168 * port.
1169 */
1170 fcport->last_queue_full = jiffies;
1171 starget_for_each_device(
1172 cp->device->sdev_target, fcport,
1173 qla2x00_adjust_sdev_qdepth_down);
1174 break;
1175 }
1da177e4
LT
1176 if (lscsi_status != SS_CHECK_CONDITION)
1177 break;
1178
b80ca4f7 1179 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1180 if (!(scsi_status & SS_SENSE_LEN_VALID))
1181 break;
1182
4733fcb1 1183 qla2x00_handle_sense(sp, sense_data, sense_len);
1da177e4
LT
1184 } else {
1185 /*
1186 * If RISC reports underrun and target does not report
1187 * it then we must have a lost frame, so tell upper
1188 * layer to retry it by reporting a bus busy.
1189 */
1190 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1191 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
385d70b4 1192 "frame(s) detected (%x of %x bytes)..."
e315cd28
AC
1193 "retrying command.\n",
1194 vha->host_no, cp->device->channel,
1195 cp->device->id, cp->device->lun, resid,
1196 scsi_bufflen(cp)));
1da177e4
LT
1197
1198 cp->result = DID_BUS_BUSY << 16;
1da177e4
LT
1199 break;
1200 }
1201
1202 /* Handle mid-layer underflow */
385d70b4 1203 if ((unsigned)(scsi_bufflen(cp) - resid) <
1da177e4
LT
1204 cp->underflow) {
1205 qla_printk(KERN_INFO, ha,
385d70b4
FT
1206 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1207 "detected (%x of %x bytes)...returning "
e315cd28 1208 "error status.\n", vha->host_no,
385d70b4
FT
1209 cp->device->channel, cp->device->id,
1210 cp->device->lun, resid,
1211 scsi_bufflen(cp));
1da177e4
LT
1212
1213 cp->result = DID_ERROR << 16;
1214 break;
1215 }
1216
1217 /* Everybody online, looking good... */
1218 cp->result = DID_OK << 16;
1219 }
1220 break;
1221
1222 case CS_DATA_OVERRUN:
1223 DEBUG2(printk(KERN_INFO
1224 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
e315cd28 1225 vha->host_no, cp->device->id, cp->device->lun, comp_status,
9a853f71 1226 scsi_status));
1da177e4
LT
1227 DEBUG2(printk(KERN_INFO
1228 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1229 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1230 cp->cmnd[4], cp->cmnd[5]));
1231 DEBUG2(printk(KERN_INFO
1232 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1233 "status!\n",
385d70b4 1234 cp->serial_number, scsi_bufflen(cp), resid_len));
1da177e4
LT
1235
1236 cp->result = DID_ERROR << 16;
1237 break;
1238
1239 case CS_PORT_LOGGED_OUT:
1240 case CS_PORT_CONFIG_CHG:
1241 case CS_PORT_BUSY:
1242 case CS_INCOMPLETE:
1243 case CS_PORT_UNAVAILABLE:
1244 /*
1245 * If the port is in Target Down state, return all IOs for this
1246 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1247 * retry_queue.
1248 */
1da177e4
LT
1249 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1250 "pid=%ld, compl status=0x%x, port state=0x%x\n",
e315cd28 1251 vha->host_no, cp->device->id, cp->device->lun,
9a853f71 1252 cp->serial_number, comp_status,
1da177e4
LT
1253 atomic_read(&fcport->state)));
1254
056a4483
MC
1255 /*
1256 * We are going to have the fc class block the rport
1257 * while we try to recover so instruct the mid layer
1258 * to requeue until the class decides how to handle this.
1259 */
1260 cp->result = DID_TRANSPORT_DISRUPTED << 16;
a7a28504 1261 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 1262 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1263 break;
1264
1265 case CS_RESET:
1266 DEBUG2(printk(KERN_INFO
1267 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
e315cd28 1268 vha->host_no, comp_status, scsi_status));
1da177e4 1269
f4f051eb 1270 cp->result = DID_RESET << 16;
1da177e4
LT
1271 break;
1272
1273 case CS_ABORTED:
fa2a1ce5 1274 /*
1da177e4
LT
1275 * hv2.19.12 - DID_ABORT does not retry the request if we
1276 * aborted this request then abort otherwise it must be a
1277 * reset.
1278 */
1279 DEBUG2(printk(KERN_INFO
1280 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
e315cd28 1281 vha->host_no, comp_status, scsi_status));
1da177e4
LT
1282
1283 cp->result = DID_RESET << 16;
1284 break;
1285
1286 case CS_TIMEOUT:
056a4483
MC
1287 /*
1288 * We are going to have the fc class block the rport
1289 * while we try to recover so instruct the mid layer
1290 * to requeue until the class decides how to handle this.
1291 */
1292 cp->result = DID_TRANSPORT_DISRUPTED << 16;
9a853f71 1293
e428924c 1294 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1295 DEBUG2(printk(KERN_INFO
1296 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
e315cd28 1297 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
9a853f71
AV
1298 cp->device->id, cp->device->lun, comp_status,
1299 scsi_status));
1300 break;
1301 }
1da177e4
LT
1302 DEBUG2(printk(KERN_INFO
1303 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
e315cd28 1304 "sflags=%x.\n", vha->host_no, cp->device->channel,
9a853f71
AV
1305 cp->device->id, cp->device->lun, comp_status, scsi_status,
1306 le16_to_cpu(sts->status_flags)));
1da177e4 1307
9a853f71
AV
1308 /* Check to see if logout occurred. */
1309 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
e315cd28 1310 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1311 break;
1312
1da177e4
LT
1313 default:
1314 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
e315cd28 1315 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1da177e4
LT
1316 qla_printk(KERN_INFO, ha,
1317 "Unknown status detected 0x%x-0x%x.\n",
1318 comp_status, scsi_status);
1319
1320 cp->result = DID_ERROR << 16;
1321 break;
1322 }
1323
1324 /* Place command on done queue. */
e315cd28 1325 if (vha->status_srb == NULL)
73208dfd 1326 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1327}
1328
1329/**
1330 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1331 * @ha: SCSI driver HA context
1332 * @pkt: Entry pointer
1333 *
1334 * Extended sense data.
1335 */
1336static void
e315cd28 1337qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1da177e4
LT
1338{
1339 uint8_t sense_sz = 0;
e315cd28
AC
1340 struct qla_hw_data *ha = vha->hw;
1341 srb_t *sp = vha->status_srb;
1da177e4
LT
1342 struct scsi_cmnd *cp;
1343
1344 if (sp != NULL && sp->request_sense_length != 0) {
1345 cp = sp->cmd;
1346 if (cp == NULL) {
1347 DEBUG2(printk("%s(): Cmd already returned back to OS "
75bc4190 1348 "sp=%p.\n", __func__, sp));
1da177e4
LT
1349 qla_printk(KERN_INFO, ha,
1350 "cmd is NULL: already returned to OS (sp=%p)\n",
fa2a1ce5 1351 sp);
1da177e4 1352
e315cd28 1353 vha->status_srb = NULL;
1da177e4
LT
1354 return;
1355 }
1356
1357 if (sp->request_sense_length > sizeof(pkt->data)) {
1358 sense_sz = sizeof(pkt->data);
1359 } else {
1360 sense_sz = sp->request_sense_length;
1361 }
1362
1363 /* Move sense data. */
e428924c 1364 if (IS_FWI2_CAPABLE(ha))
9a853f71 1365 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1da177e4
LT
1366 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1367 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1368
1369 sp->request_sense_ptr += sense_sz;
1370 sp->request_sense_length -= sense_sz;
1371
1372 /* Place command on done queue. */
1373 if (sp->request_sense_length == 0) {
e315cd28 1374 vha->status_srb = NULL;
73208dfd 1375 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1376 }
1377 }
1378}
1379
1380/**
1381 * qla2x00_error_entry() - Process an error entry.
1382 * @ha: SCSI driver HA context
1383 * @pkt: Entry pointer
1384 */
1385static void
73208dfd 1386qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
1387{
1388 srb_t *sp;
e315cd28 1389 struct qla_hw_data *ha = vha->hw;
73208dfd 1390 struct req_que *req = rsp->req;
1da177e4
LT
1391#if defined(QL_DEBUG_LEVEL_2)
1392 if (pkt->entry_status & RF_INV_E_ORDER)
1393 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1394 else if (pkt->entry_status & RF_INV_E_COUNT)
1395 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1396 else if (pkt->entry_status & RF_INV_E_PARAM)
fa2a1ce5 1397 qla_printk(KERN_ERR, ha,
1da177e4
LT
1398 "%s: Invalid Entry Parameter\n", __func__);
1399 else if (pkt->entry_status & RF_INV_E_TYPE)
1400 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1401 else if (pkt->entry_status & RF_BUSY)
1402 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1403 else
1404 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1405#endif
1406
1407 /* Validate handle. */
1408 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
e315cd28 1409 sp = req->outstanding_cmds[pkt->handle];
1da177e4
LT
1410 else
1411 sp = NULL;
1412
1413 if (sp) {
1414 /* Free outstanding command slot. */
e315cd28 1415 req->outstanding_cmds[pkt->handle] = NULL;
354d6b21 1416
1da177e4
LT
1417 /* Bad payload or header */
1418 if (pkt->entry_status &
1419 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1420 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1421 sp->cmd->result = DID_ERROR << 16;
1422 } else if (pkt->entry_status & RF_BUSY) {
1423 sp->cmd->result = DID_BUS_BUSY << 16;
1424 } else {
1425 sp->cmd->result = DID_ERROR << 16;
1426 }
73208dfd 1427 qla2x00_sp_compl(ha, sp);
1da177e4 1428
9a853f71
AV
1429 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1430 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1da177e4 1431 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
e315cd28 1432 vha->host_no));
1da177e4
LT
1433 qla_printk(KERN_WARNING, ha,
1434 "Error entry - invalid handle\n");
1435
e315cd28
AC
1436 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1437 qla2xxx_wake_dpc(vha);
1da177e4
LT
1438 }
1439}
1440
9a853f71
AV
1441/**
1442 * qla24xx_mbx_completion() - Process mailbox command completions.
1443 * @ha: SCSI driver HA context
1444 * @mb0: Mailbox0 register
1445 */
1446static void
e315cd28 1447qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
1448{
1449 uint16_t cnt;
1450 uint16_t __iomem *wptr;
e315cd28 1451 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
1452 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1453
1454 /* Load return mailbox registers. */
1455 ha->flags.mbox_int = 1;
1456 ha->mailbox_out[0] = mb0;
1457 wptr = (uint16_t __iomem *)&reg->mailbox1;
1458
1459 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1460 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1461 wptr++;
1462 }
1463
1464 if (ha->mcp) {
1465 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
e315cd28 1466 __func__, vha->host_no, ha->mcp->mb[0]));
9a853f71
AV
1467 } else {
1468 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
e315cd28 1469 __func__, vha->host_no));
9a853f71
AV
1470 }
1471}
1472
1473/**
1474 * qla24xx_process_response_queue() - Process response queue entries.
1475 * @ha: SCSI driver HA context
1476 */
1477void
73208dfd 1478qla24xx_process_response_queue(struct rsp_que *rsp)
9a853f71 1479{
73208dfd 1480 struct qla_hw_data *ha = rsp->hw;
9a853f71 1481 struct sts_entry_24xx *pkt;
73208dfd
AC
1482 struct scsi_qla_host *vha;
1483
1484 vha = qla2x00_get_rsp_host(rsp);
9a853f71 1485
e315cd28 1486 if (!vha->flags.online)
9a853f71
AV
1487 return;
1488
e315cd28
AC
1489 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1490 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 1491
e315cd28
AC
1492 rsp->ring_index++;
1493 if (rsp->ring_index == rsp->length) {
1494 rsp->ring_index = 0;
1495 rsp->ring_ptr = rsp->ring;
9a853f71 1496 } else {
e315cd28 1497 rsp->ring_ptr++;
9a853f71
AV
1498 }
1499
1500 if (pkt->entry_status != 0) {
1501 DEBUG3(printk(KERN_INFO
e315cd28 1502 "scsi(%ld): Process error entry.\n", vha->host_no));
9a853f71 1503
73208dfd 1504 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
9a853f71
AV
1505 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1506 wmb();
1507 continue;
1508 }
1509
1510 switch (pkt->entry_type) {
1511 case STATUS_TYPE:
73208dfd 1512 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
1513 break;
1514 case STATUS_CONT_TYPE:
e315cd28 1515 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
9a853f71 1516 break;
2c3dfe3f 1517 case VP_RPT_ID_IOCB_TYPE:
e315cd28 1518 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
1519 (struct vp_rpt_id_entry_24xx *)pkt);
1520 break;
9a853f71
AV
1521 default:
1522 /* Type Not Supported. */
1523 DEBUG4(printk(KERN_WARNING
1524 "scsi(%ld): Received unknown response pkt type %x "
1525 "entry status=%x.\n",
e315cd28 1526 vha->host_no, pkt->entry_type, pkt->entry_status));
9a853f71
AV
1527 break;
1528 }
1529 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1530 wmb();
1531 }
1532
1533 /* Adjust ring index */
17d98630 1534 ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
9a853f71
AV
1535}
1536
05236a05 1537static void
e315cd28 1538qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
1539{
1540 int rval;
1541 uint32_t cnt;
e315cd28 1542 struct qla_hw_data *ha = vha->hw;
05236a05
AV
1543 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1544
3a03eb79 1545 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
05236a05
AV
1546 return;
1547
1548 rval = QLA_SUCCESS;
1549 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1550 RD_REG_DWORD(&reg->iobase_addr);
1551 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1552 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1553 rval == QLA_SUCCESS; cnt--) {
1554 if (cnt) {
1555 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1556 udelay(10);
1557 } else
1558 rval = QLA_FUNCTION_TIMEOUT;
1559 }
1560 if (rval == QLA_SUCCESS)
1561 goto next_test;
1562
1563 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1564 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1565 rval == QLA_SUCCESS; cnt--) {
1566 if (cnt) {
1567 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1568 udelay(10);
1569 } else
1570 rval = QLA_FUNCTION_TIMEOUT;
1571 }
1572 if (rval != QLA_SUCCESS)
1573 goto done;
1574
1575next_test:
1576 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1577 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1578
1579done:
1580 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1581 RD_REG_DWORD(&reg->iobase_window);
1582}
1583
9a853f71
AV
1584/**
1585 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1586 * @irq:
1587 * @dev_id: SCSI driver HA context
9a853f71
AV
1588 *
1589 * Called by system whenever the host adapter generates an interrupt.
1590 *
1591 * Returns handled flag.
1592 */
1593irqreturn_t
7d12e780 1594qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 1595{
e315cd28
AC
1596 scsi_qla_host_t *vha;
1597 struct qla_hw_data *ha;
9a853f71
AV
1598 struct device_reg_24xx __iomem *reg;
1599 int status;
9a853f71
AV
1600 unsigned long iter;
1601 uint32_t stat;
1602 uint32_t hccr;
1603 uint16_t mb[4];
e315cd28 1604 struct rsp_que *rsp;
9a853f71 1605
e315cd28
AC
1606 rsp = (struct rsp_que *) dev_id;
1607 if (!rsp) {
9a853f71 1608 printk(KERN_INFO
e315cd28 1609 "%s(): NULL response queue pointer\n", __func__);
9a853f71
AV
1610 return IRQ_NONE;
1611 }
1612
e315cd28 1613 ha = rsp->hw;
9a853f71
AV
1614 reg = &ha->iobase->isp24;
1615 status = 0;
1616
c6952483 1617 spin_lock(&ha->hardware_lock);
e315cd28 1618 vha = qla2x00_get_rsp_host(rsp);
9a853f71
AV
1619 for (iter = 50; iter--; ) {
1620 stat = RD_REG_DWORD(&reg->host_status);
1621 if (stat & HSRX_RISC_PAUSED) {
14e660e6
SJ
1622 if (pci_channel_offline(ha->pdev))
1623 break;
1624
9a853f71
AV
1625 hccr = RD_REG_DWORD(&reg->hccr);
1626
1627 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1628 "Dumping firmware!\n", hccr);
05236a05 1629
e315cd28 1630 qla2xxx_check_risc_status(vha);
05236a05 1631
e315cd28
AC
1632 ha->isp_ops->fw_dump(vha, 1);
1633 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
1634 break;
1635 } else if ((stat & HSRX_RISC_INT) == 0)
1636 break;
1637
1638 switch (stat & 0xff) {
1639 case 0x1:
1640 case 0x2:
1641 case 0x10:
1642 case 0x11:
e315cd28 1643 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
1644 status |= MBX_INTERRUPT;
1645
1646 break;
1647 case 0x12:
1648 mb[0] = MSW(stat);
1649 mb[1] = RD_REG_WORD(&reg->mailbox1);
1650 mb[2] = RD_REG_WORD(&reg->mailbox2);
1651 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 1652 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
1653 break;
1654 case 0x13:
73208dfd
AC
1655 case 0x14:
1656 qla24xx_process_response_queue(rsp);
9a853f71
AV
1657 break;
1658 default:
1659 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1660 "(%d).\n",
e315cd28 1661 vha->host_no, stat & 0xff));
9a853f71
AV
1662 break;
1663 }
1664 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1665 RD_REG_DWORD_RELAXED(&reg->hccr);
1666 }
c6952483 1667 spin_unlock(&ha->hardware_lock);
9a853f71
AV
1668
1669 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1670 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 1671 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 1672 complete(&ha->mbx_intr_comp);
9a853f71
AV
1673 }
1674
1675 return IRQ_HANDLED;
1676}
1677
a8488abe
AV
1678static irqreturn_t
1679qla24xx_msix_rsp_q(int irq, void *dev_id)
1680{
e315cd28
AC
1681 struct qla_hw_data *ha;
1682 struct rsp_que *rsp;
a8488abe 1683 struct device_reg_24xx __iomem *reg;
a8488abe 1684
e315cd28
AC
1685 rsp = (struct rsp_que *) dev_id;
1686 if (!rsp) {
1687 printk(KERN_INFO
1688 "%s(): NULL response queue pointer\n", __func__);
1689 return IRQ_NONE;
1690 }
1691 ha = rsp->hw;
a8488abe
AV
1692 reg = &ha->iobase->isp24;
1693
0e973a24 1694 spin_lock_irq(&ha->hardware_lock);
a8488abe 1695
73208dfd 1696 qla24xx_process_response_queue(rsp);
a8488abe 1697 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
a8488abe 1698
0e973a24 1699 spin_unlock_irq(&ha->hardware_lock);
a8488abe
AV
1700
1701 return IRQ_HANDLED;
1702}
1703
73208dfd
AC
1704static irqreturn_t
1705qla25xx_msix_rsp_q(int irq, void *dev_id)
1706{
1707 struct qla_hw_data *ha;
1708 struct rsp_que *rsp;
1709 struct device_reg_24xx __iomem *reg;
1710 uint16_t msix_disabled_hccr = 0;
1711
1712 rsp = (struct rsp_que *) dev_id;
1713 if (!rsp) {
1714 printk(KERN_INFO
1715 "%s(): NULL response queue pointer\n", __func__);
1716 return IRQ_NONE;
1717 }
1718 ha = rsp->hw;
1719 reg = &ha->iobase->isp24;
1720
1721 spin_lock_irq(&ha->hardware_lock);
1722
1723 msix_disabled_hccr = rsp->options;
1724 if (!rsp->id)
1725 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1726 else
17d98630 1727 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
73208dfd
AC
1728
1729 qla24xx_process_response_queue(rsp);
1730
1731 if (!msix_disabled_hccr)
1732 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1733
1734 spin_unlock_irq(&ha->hardware_lock);
1735
1736 return IRQ_HANDLED;
1737}
1738
a8488abe
AV
1739static irqreturn_t
1740qla24xx_msix_default(int irq, void *dev_id)
1741{
e315cd28
AC
1742 scsi_qla_host_t *vha;
1743 struct qla_hw_data *ha;
1744 struct rsp_que *rsp;
a8488abe
AV
1745 struct device_reg_24xx __iomem *reg;
1746 int status;
a8488abe
AV
1747 uint32_t stat;
1748 uint32_t hccr;
1749 uint16_t mb[4];
1750
e315cd28
AC
1751 rsp = (struct rsp_que *) dev_id;
1752 if (!rsp) {
1753 DEBUG(printk(
1754 "%s(): NULL response queue pointer\n", __func__));
1755 return IRQ_NONE;
1756 }
1757 ha = rsp->hw;
a8488abe
AV
1758 reg = &ha->iobase->isp24;
1759 status = 0;
1760
0e973a24 1761 spin_lock_irq(&ha->hardware_lock);
e315cd28 1762 vha = qla2x00_get_rsp_host(rsp);
87f27015 1763 do {
a8488abe
AV
1764 stat = RD_REG_DWORD(&reg->host_status);
1765 if (stat & HSRX_RISC_PAUSED) {
14e660e6
SJ
1766 if (pci_channel_offline(ha->pdev))
1767 break;
1768
a8488abe
AV
1769 hccr = RD_REG_DWORD(&reg->hccr);
1770
1771 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1772 "Dumping firmware!\n", hccr);
05236a05 1773
e315cd28 1774 qla2xxx_check_risc_status(vha);
05236a05 1775
e315cd28
AC
1776 ha->isp_ops->fw_dump(vha, 1);
1777 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
1778 break;
1779 } else if ((stat & HSRX_RISC_INT) == 0)
1780 break;
1781
1782 switch (stat & 0xff) {
1783 case 0x1:
1784 case 0x2:
1785 case 0x10:
1786 case 0x11:
e315cd28 1787 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
1788 status |= MBX_INTERRUPT;
1789
1790 break;
1791 case 0x12:
1792 mb[0] = MSW(stat);
1793 mb[1] = RD_REG_WORD(&reg->mailbox1);
1794 mb[2] = RD_REG_WORD(&reg->mailbox2);
1795 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 1796 qla2x00_async_event(vha, rsp, mb);
a8488abe
AV
1797 break;
1798 case 0x13:
73208dfd
AC
1799 case 0x14:
1800 qla24xx_process_response_queue(rsp);
a8488abe
AV
1801 break;
1802 default:
1803 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1804 "(%d).\n",
e315cd28 1805 vha->host_no, stat & 0xff));
a8488abe
AV
1806 break;
1807 }
1808 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 1809 } while (0);
0e973a24 1810 spin_unlock_irq(&ha->hardware_lock);
a8488abe
AV
1811
1812 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1813 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 1814 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 1815 complete(&ha->mbx_intr_comp);
a8488abe
AV
1816 }
1817
1818 return IRQ_HANDLED;
1819}
1820
1821/* Interrupt handling helpers. */
1822
1823struct qla_init_msix_entry {
1824 uint16_t entry;
1825 uint16_t index;
1826 const char *name;
476834c2 1827 irq_handler_t handler;
a8488abe
AV
1828};
1829
73208dfd
AC
1830static struct qla_init_msix_entry base_queue = {
1831 .entry = 0,
1832 .index = 0,
1833 .name = "qla2xxx (default)",
1834 .handler = qla24xx_msix_default,
1835};
1836
1837static struct qla_init_msix_entry base_rsp_queue = {
1838 .entry = 1,
1839 .index = 1,
1840 .name = "qla2xxx (rsp_q)",
1841 .handler = qla24xx_msix_rsp_q,
1842};
a8488abe 1843
73208dfd
AC
1844static struct qla_init_msix_entry multi_rsp_queue = {
1845 .entry = 1,
1846 .index = 1,
1847 .name = "qla2xxx (multi_q)",
1848 .handler = qla25xx_msix_rsp_q,
a8488abe
AV
1849};
1850
1851static void
e315cd28 1852qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
1853{
1854 int i;
1855 struct qla_msix_entry *qentry;
1856
73208dfd
AC
1857 for (i = 0; i < ha->msix_count; i++) {
1858 qentry = &ha->msix_entries[i];
a8488abe 1859 if (qentry->have_irq)
73208dfd 1860 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
1861 }
1862 pci_disable_msix(ha->pdev);
73208dfd
AC
1863 kfree(ha->msix_entries);
1864 ha->msix_entries = NULL;
1865 ha->flags.msix_enabled = 0;
a8488abe
AV
1866}
1867
1868static int
73208dfd 1869qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
1870{
1871 int i, ret;
73208dfd 1872 struct msix_entry *entries;
a8488abe 1873 struct qla_msix_entry *qentry;
73208dfd
AC
1874 struct qla_init_msix_entry *msix_queue;
1875
1876 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1877 GFP_KERNEL);
1878 if (!entries)
1879 return -ENOMEM;
a8488abe 1880
73208dfd
AC
1881 for (i = 0; i < ha->msix_count; i++)
1882 entries[i].entry = i;
a8488abe 1883
73208dfd 1884 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe
AV
1885 if (ret) {
1886 qla_printk(KERN_WARNING, ha,
73208dfd
AC
1887 "MSI-X: Failed to enable support -- %d/%d\n"
1888 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1889 ha->msix_count = ret;
1890 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1891 if (ret) {
1892 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1893 " support, giving up -- %d/%d\n",
1894 ha->msix_count, ret);
1895 goto msix_out;
1896 }
1897 ha->max_queues = ha->msix_count - 1;
1898 }
1899 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1900 ha->msix_count, GFP_KERNEL);
1901 if (!ha->msix_entries) {
1902 ret = -ENOMEM;
a8488abe
AV
1903 goto msix_out;
1904 }
1905 ha->flags.msix_enabled = 1;
1906
73208dfd
AC
1907 for (i = 0; i < ha->msix_count; i++) {
1908 qentry = &ha->msix_entries[i];
1909 qentry->vector = entries[i].vector;
1910 qentry->entry = entries[i].entry;
a8488abe 1911 qentry->have_irq = 0;
73208dfd 1912 qentry->rsp = NULL;
a8488abe
AV
1913 }
1914
73208dfd
AC
1915 /* Enable MSI-X for AENs for queue 0 */
1916 qentry = &ha->msix_entries[0];
1917 ret = request_irq(qentry->vector, base_queue.handler, 0,
1918 base_queue.name, rsp);
1919 if (ret) {
1920 qla_printk(KERN_WARNING, ha,
1921 "MSI-X: Unable to register handler -- %x/%d.\n",
1922 qentry->vector, ret);
1923 qla24xx_disable_msix(ha);
1924 goto msix_out;
1925 }
1926 qentry->have_irq = 1;
1927 qentry->rsp = rsp;
1928
1929 /* Enable MSI-X vector for response queue update for queue 0 */
1930 if (ha->max_queues > 1 && ha->mqiobase) {
1931 ha->mqenable = 1;
1932 msix_queue = &multi_rsp_queue;
1933 qla_printk(KERN_INFO, ha,
1934 "MQ enabled, Number of Queue Resources: %d \n",
1935 ha->max_queues);
1936 } else {
1937 ha->mqenable = 0;
1938 msix_queue = &base_rsp_queue;
1939 }
1940
1941 qentry = &ha->msix_entries[1];
1942 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1943 msix_queue->name, rsp);
1944 if (ret) {
1945 qla_printk(KERN_WARNING, ha,
1946 "MSI-X: Unable to register handler -- %x/%d.\n",
1947 qentry->vector, ret);
1948 qla24xx_disable_msix(ha);
1949 ha->mqenable = 0;
1950 goto msix_out;
1951 }
1952 qentry->have_irq = 1;
1953 qentry->rsp = rsp;
1954
a8488abe 1955msix_out:
73208dfd 1956 kfree(entries);
a8488abe
AV
1957 return ret;
1958}
1959
1960int
73208dfd 1961qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
1962{
1963 int ret;
963b0fdd 1964 device_reg_t __iomem *reg = ha->iobase;
a8488abe
AV
1965
1966 /* If possible, enable MSI-X. */
3a03eb79
AV
1967 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
1968 !IS_QLA8432(ha) && !IS_QLA8001(ha))
a8488abe
AV
1969 goto skip_msix;
1970
e315cd28
AC
1971 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1972 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
a8488abe 1973 DEBUG2(qla_printk(KERN_WARNING, ha,
e315cd28
AC
1974 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1975 ha->pdev->revision, ha->fw_attributes));
a8488abe
AV
1976
1977 goto skip_msix;
1978 }
1979
da7429f9
AV
1980 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1981 (ha->pdev->subsystem_device == 0x7040 ||
1982 ha->pdev->subsystem_device == 0x7041 ||
1983 ha->pdev->subsystem_device == 0x1705)) {
1984 DEBUG2(qla_printk(KERN_WARNING, ha,
1985 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1986 ha->pdev->subsystem_vendor,
1987 ha->pdev->subsystem_device));
1988
1989 goto skip_msi;
1990 }
1991
73208dfd 1992 ret = qla24xx_enable_msix(ha, rsp);
a8488abe
AV
1993 if (!ret) {
1994 DEBUG2(qla_printk(KERN_INFO, ha,
1995 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1996 ha->fw_attributes));
963b0fdd 1997 goto clear_risc_ints;
a8488abe
AV
1998 }
1999 qla_printk(KERN_WARNING, ha,
2000 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2001skip_msix:
cbedb601 2002
3a03eb79
AV
2003 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2004 !IS_QLA8001(ha))
cbedb601
AV
2005 goto skip_msi;
2006
2007 ret = pci_enable_msi(ha->pdev);
2008 if (!ret) {
2009 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2010 ha->flags.msi_enabled = 1;
2011 }
2012skip_msi:
2013
fd34f556 2014 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
e315cd28 2015 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2016 if (ret) {
a8488abe
AV
2017 qla_printk(KERN_WARNING, ha,
2018 "Failed to reserve interrupt %d already in use.\n",
2019 ha->pdev->irq);
963b0fdd
AV
2020 goto fail;
2021 }
2022 ha->flags.inta_enabled = 1;
963b0fdd
AV
2023clear_risc_ints:
2024
3a03eb79
AV
2025 /*
2026 * FIXME: Noted that 8014s were being dropped during NK testing.
2027 * Timing deltas during MSI-X/INTa transitions?
2028 */
2029 if (IS_QLA81XX(ha))
2030 goto fail;
c6952483 2031 spin_lock_irq(&ha->hardware_lock);
963b0fdd
AV
2032 if (IS_FWI2_CAPABLE(ha)) {
2033 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2034 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2035 } else {
2036 WRT_REG_WORD(&reg->isp.semaphore, 0);
2037 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2038 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
a8488abe 2039 }
c6952483 2040 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2041
963b0fdd 2042fail:
a8488abe
AV
2043 return ret;
2044}
2045
2046void
e315cd28 2047qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2048{
e315cd28 2049 struct qla_hw_data *ha = vha->hw;
73208dfd 2050 struct rsp_que *rsp = ha->rsp_q_map[0];
a8488abe
AV
2051
2052 if (ha->flags.msix_enabled)
2053 qla24xx_disable_msix(ha);
cbedb601 2054 else if (ha->flags.inta_enabled) {
e315cd28 2055 free_irq(ha->pdev->irq, rsp);
cbedb601
AV
2056 pci_disable_msi(ha->pdev);
2057 }
a8488abe 2058}
e315cd28
AC
2059
2060static struct scsi_qla_host *
2061qla2x00_get_rsp_host(struct rsp_que *rsp)
2062{
2063 srb_t *sp;
2064 struct qla_hw_data *ha = rsp->hw;
2065 struct scsi_qla_host *vha = NULL;
73208dfd
AC
2066 struct sts_entry_24xx *pkt;
2067 struct req_que *req;
2068
2069 if (rsp->id) {
2070 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2071 req = rsp->req;
2072 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2073 sp = req->outstanding_cmds[pkt->handle];
2074 if (sp)
444786d7 2075 vha = sp->fcport->vha;
73208dfd 2076 }
e315cd28
AC
2077 }
2078 if (!vha)
73208dfd 2079 /* handle it in base queue */
e315cd28
AC
2080 vha = pci_get_drvdata(ha->pdev);
2081
2082 return vha;
2083}
73208dfd
AC
2084
2085int qla25xx_request_irq(struct rsp_que *rsp)
2086{
2087 struct qla_hw_data *ha = rsp->hw;
2088 struct qla_init_msix_entry *intr = &multi_rsp_queue;
2089 struct qla_msix_entry *msix = rsp->msix;
2090 int ret;
2091
2092 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2093 if (ret) {
2094 qla_printk(KERN_WARNING, ha,
2095 "MSI-X: Unable to register handler -- %x/%d.\n",
2096 msix->vector, ret);
2097 return ret;
2098 }
2099 msix->have_irq = 1;
2100 msix->rsp = rsp;
2101 return ret;
2102}
2103
17d98630
AC
2104void
2105qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2106{
2107 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2108 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2109}
2110
2111void
2112qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2113{
2114 device_reg_t __iomem *reg = (void *) ha->iobase;
2115 WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2116}
2117
This page took 0.476678 seconds and 5 git commands to generate.