qla2xxx: Remove restriction on starting remote device discovery on port update.
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
15
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 sts_entry_t *);
21
22 /**
23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
24 * @irq:
25 * @dev_id: SCSI driver HA context
26 *
27 * Called by system whenever the host adapter generates an interrupt.
28 *
29 * Returns handled flag.
30 */
31 irqreturn_t
32 qla2100_intr_handler(int irq, void *dev_id)
33 {
34 scsi_qla_host_t *vha;
35 struct qla_hw_data *ha;
36 struct device_reg_2xxx __iomem *reg;
37 int status;
38 unsigned long iter;
39 uint16_t hccr;
40 uint16_t mb[4];
41 struct rsp_que *rsp;
42 unsigned long flags;
43
44 rsp = (struct rsp_que *) dev_id;
45 if (!rsp) {
46 ql_log(ql_log_info, NULL, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__);
48 return (IRQ_NONE);
49 }
50
51 ha = rsp->hw;
52 reg = &ha->iobase->isp;
53 status = 0;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 vha = pci_get_drvdata(ha->pdev);
57 for (iter = 50; iter--; ) {
58 hccr = RD_REG_WORD(&reg->hccr);
59 /* Check for PCI disconnection */
60 if (hccr == 0xffff) {
61 /*
62 * Schedule this on the default system workqueue so that
63 * all the adapter workqueues and the DPC thread can be
64 * shutdown cleanly.
65 */
66 schedule_work(&ha->board_disable);
67 break;
68 }
69 if (hccr & HCCR_RISC_PAUSE) {
70 if (pci_channel_offline(ha->pdev))
71 break;
72
73 /*
74 * Issue a "HARD" reset in order for the RISC interrupt
75 * bit to be cleared. Schedule a big hammer to get
76 * out of the RISC PAUSED state.
77 */
78 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
79 RD_REG_WORD(&reg->hccr);
80
81 ha->isp_ops->fw_dump(vha, 1);
82 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
83 break;
84 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
85 break;
86
87 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
88 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
89 RD_REG_WORD(&reg->hccr);
90
91 /* Get mailbox data. */
92 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
93 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
94 qla2x00_mbx_completion(vha, mb[0]);
95 status |= MBX_INTERRUPT;
96 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
97 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
98 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
99 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
100 qla2x00_async_event(vha, rsp, mb);
101 } else {
102 /*EMPTY*/
103 ql_dbg(ql_dbg_async, vha, 0x5025,
104 "Unrecognized interrupt type (%d).\n",
105 mb[0]);
106 }
107 /* Release mailbox registers. */
108 WRT_REG_WORD(&reg->semaphore, 0);
109 RD_REG_WORD(&reg->semaphore);
110 } else {
111 qla2x00_process_response_queue(rsp);
112
113 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
114 RD_REG_WORD(&reg->hccr);
115 }
116 }
117 qla2x00_handle_mbx_completion(ha, status);
118 spin_unlock_irqrestore(&ha->hardware_lock, flags);
119
120 return (IRQ_HANDLED);
121 }
122
123 bool
124 qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
125 {
126 /* Check for PCI disconnection */
127 if (reg == 0xffffffff) {
128 /*
129 * Schedule this on the default system workqueue so that all the
130 * adapter workqueues and the DPC thread can be shutdown
131 * cleanly.
132 */
133 schedule_work(&vha->hw->board_disable);
134 return true;
135 } else
136 return false;
137 }
138
139 /**
140 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
141 * @irq:
142 * @dev_id: SCSI driver HA context
143 *
144 * Called by system whenever the host adapter generates an interrupt.
145 *
146 * Returns handled flag.
147 */
148 irqreturn_t
149 qla2300_intr_handler(int irq, void *dev_id)
150 {
151 scsi_qla_host_t *vha;
152 struct device_reg_2xxx __iomem *reg;
153 int status;
154 unsigned long iter;
155 uint32_t stat;
156 uint16_t hccr;
157 uint16_t mb[4];
158 struct rsp_que *rsp;
159 struct qla_hw_data *ha;
160 unsigned long flags;
161
162 rsp = (struct rsp_que *) dev_id;
163 if (!rsp) {
164 ql_log(ql_log_info, NULL, 0x5058,
165 "%s: NULL response queue pointer.\n", __func__);
166 return (IRQ_NONE);
167 }
168
169 ha = rsp->hw;
170 reg = &ha->iobase->isp;
171 status = 0;
172
173 spin_lock_irqsave(&ha->hardware_lock, flags);
174 vha = pci_get_drvdata(ha->pdev);
175 for (iter = 50; iter--; ) {
176 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
177 if (qla2x00_check_reg_for_disconnect(vha, stat))
178 break;
179 if (stat & HSR_RISC_PAUSED) {
180 if (unlikely(pci_channel_offline(ha->pdev)))
181 break;
182
183 hccr = RD_REG_WORD(&reg->hccr);
184
185 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
186 ql_log(ql_log_warn, vha, 0x5026,
187 "Parity error -- HCCR=%x, Dumping "
188 "firmware.\n", hccr);
189 else
190 ql_log(ql_log_warn, vha, 0x5027,
191 "RISC paused -- HCCR=%x, Dumping "
192 "firmware.\n", hccr);
193
194 /*
195 * Issue a "HARD" reset in order for the RISC
196 * interrupt bit to be cleared. Schedule a big
197 * hammer to get out of the RISC PAUSED state.
198 */
199 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
200 RD_REG_WORD(&reg->hccr);
201
202 ha->isp_ops->fw_dump(vha, 1);
203 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
204 break;
205 } else if ((stat & HSR_RISC_INT) == 0)
206 break;
207
208 switch (stat & 0xff) {
209 case 0x1:
210 case 0x2:
211 case 0x10:
212 case 0x11:
213 qla2x00_mbx_completion(vha, MSW(stat));
214 status |= MBX_INTERRUPT;
215
216 /* Release mailbox registers. */
217 WRT_REG_WORD(&reg->semaphore, 0);
218 break;
219 case 0x12:
220 mb[0] = MSW(stat);
221 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
222 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
223 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
224 qla2x00_async_event(vha, rsp, mb);
225 break;
226 case 0x13:
227 qla2x00_process_response_queue(rsp);
228 break;
229 case 0x15:
230 mb[0] = MBA_CMPLT_1_16BIT;
231 mb[1] = MSW(stat);
232 qla2x00_async_event(vha, rsp, mb);
233 break;
234 case 0x16:
235 mb[0] = MBA_SCSI_COMPLETION;
236 mb[1] = MSW(stat);
237 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
238 qla2x00_async_event(vha, rsp, mb);
239 break;
240 default:
241 ql_dbg(ql_dbg_async, vha, 0x5028,
242 "Unrecognized interrupt type (%d).\n", stat & 0xff);
243 break;
244 }
245 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
246 RD_REG_WORD_RELAXED(&reg->hccr);
247 }
248 qla2x00_handle_mbx_completion(ha, status);
249 spin_unlock_irqrestore(&ha->hardware_lock, flags);
250
251 return (IRQ_HANDLED);
252 }
253
254 /**
255 * qla2x00_mbx_completion() - Process mailbox command completions.
256 * @ha: SCSI driver HA context
257 * @mb0: Mailbox0 register
258 */
259 static void
260 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
261 {
262 uint16_t cnt;
263 uint32_t mboxes;
264 uint16_t __iomem *wptr;
265 struct qla_hw_data *ha = vha->hw;
266 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
267
268 /* Read all mbox registers? */
269 mboxes = (1 << ha->mbx_count) - 1;
270 if (!ha->mcp)
271 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
272 else
273 mboxes = ha->mcp->in_mb;
274
275 /* Load return mailbox registers. */
276 ha->flags.mbox_int = 1;
277 ha->mailbox_out[0] = mb0;
278 mboxes >>= 1;
279 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
280
281 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
282 if (IS_QLA2200(ha) && cnt == 8)
283 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
284 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
285 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
286 else if (mboxes & BIT_0)
287 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
288
289 wptr++;
290 mboxes >>= 1;
291 }
292 }
293
294 static void
295 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
296 {
297 static char *event[] =
298 { "Complete", "Request Notification", "Time Extension" };
299 int rval;
300 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
301 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
302 uint16_t __iomem *wptr;
303 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
304
305 /* Seed data -- mailbox1 -> mailbox7. */
306 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
307 wptr = (uint16_t __iomem *)&reg24->mailbox1;
308 else if (IS_QLA8044(vha->hw))
309 wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
310 else
311 return;
312
313 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
314 mb[cnt] = RD_REG_WORD(wptr);
315
316 ql_dbg(ql_dbg_async, vha, 0x5021,
317 "Inter-Driver Communication %s -- "
318 "%04x %04x %04x %04x %04x %04x %04x.\n",
319 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
320 mb[4], mb[5], mb[6]);
321 switch (aen) {
322 /* Handle IDC Error completion case. */
323 case MBA_IDC_COMPLETE:
324 if (mb[1] >> 15) {
325 vha->hw->flags.idc_compl_status = 1;
326 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
327 complete(&vha->hw->dcbx_comp);
328 }
329 break;
330
331 case MBA_IDC_NOTIFY:
332 /* Acknowledgement needed? [Notify && non-zero timeout]. */
333 timeout = (descr >> 8) & 0xf;
334 ql_dbg(ql_dbg_async, vha, 0x5022,
335 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
336 vha->host_no, event[aen & 0xff], timeout);
337
338 if (!timeout)
339 return;
340 rval = qla2x00_post_idc_ack_work(vha, mb);
341 if (rval != QLA_SUCCESS)
342 ql_log(ql_log_warn, vha, 0x5023,
343 "IDC failed to post ACK.\n");
344 break;
345 case MBA_IDC_TIME_EXT:
346 vha->hw->idc_extend_tmo = descr;
347 ql_dbg(ql_dbg_async, vha, 0x5087,
348 "%lu Inter-Driver Communication %s -- "
349 "Extend timeout by=%d.\n",
350 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
351 break;
352 }
353 }
354
355 #define LS_UNKNOWN 2
356 const char *
357 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
358 {
359 static const char *const link_speeds[] = {
360 "1", "2", "?", "4", "8", "16", "32", "10"
361 };
362 #define QLA_LAST_SPEED 7
363
364 if (IS_QLA2100(ha) || IS_QLA2200(ha))
365 return link_speeds[0];
366 else if (speed == 0x13)
367 return link_speeds[QLA_LAST_SPEED];
368 else if (speed < QLA_LAST_SPEED)
369 return link_speeds[speed];
370 else
371 return link_speeds[LS_UNKNOWN];
372 }
373
374 static void
375 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
376 {
377 struct qla_hw_data *ha = vha->hw;
378
379 /*
380 * 8200 AEN Interpretation:
381 * mb[0] = AEN code
382 * mb[1] = AEN Reason code
383 * mb[2] = LSW of Peg-Halt Status-1 Register
384 * mb[6] = MSW of Peg-Halt Status-1 Register
385 * mb[3] = LSW of Peg-Halt Status-2 register
386 * mb[7] = MSW of Peg-Halt Status-2 register
387 * mb[4] = IDC Device-State Register value
388 * mb[5] = IDC Driver-Presence Register value
389 */
390 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
391 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
392 mb[0], mb[1], mb[2], mb[6]);
393 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
394 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
395 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
396
397 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
398 IDC_HEARTBEAT_FAILURE)) {
399 ha->flags.nic_core_hung = 1;
400 ql_log(ql_log_warn, vha, 0x5060,
401 "83XX: F/W Error Reported: Check if reset required.\n");
402
403 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
404 uint32_t protocol_engine_id, fw_err_code, err_level;
405
406 /*
407 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
408 * - PEG-Halt Status-1 Register:
409 * (LSW = mb[2], MSW = mb[6])
410 * Bits 0-7 = protocol-engine ID
411 * Bits 8-28 = f/w error code
412 * Bits 29-31 = Error-level
413 * Error-level 0x1 = Non-Fatal error
414 * Error-level 0x2 = Recoverable Fatal error
415 * Error-level 0x4 = UnRecoverable Fatal error
416 * - PEG-Halt Status-2 Register:
417 * (LSW = mb[3], MSW = mb[7])
418 */
419 protocol_engine_id = (mb[2] & 0xff);
420 fw_err_code = (((mb[2] & 0xff00) >> 8) |
421 ((mb[6] & 0x1fff) << 8));
422 err_level = ((mb[6] & 0xe000) >> 13);
423 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
424 "Register: protocol_engine_id=0x%x "
425 "fw_err_code=0x%x err_level=0x%x.\n",
426 protocol_engine_id, fw_err_code, err_level);
427 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
428 "Register: 0x%x%x.\n", mb[7], mb[3]);
429 if (err_level == ERR_LEVEL_NON_FATAL) {
430 ql_log(ql_log_warn, vha, 0x5063,
431 "Not a fatal error, f/w has recovered "
432 "iteself.\n");
433 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
434 ql_log(ql_log_fatal, vha, 0x5064,
435 "Recoverable Fatal error: Chip reset "
436 "required.\n");
437 qla83xx_schedule_work(vha,
438 QLA83XX_NIC_CORE_RESET);
439 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
440 ql_log(ql_log_fatal, vha, 0x5065,
441 "Unrecoverable Fatal error: Set FAILED "
442 "state, reboot required.\n");
443 qla83xx_schedule_work(vha,
444 QLA83XX_NIC_CORE_UNRECOVERABLE);
445 }
446 }
447
448 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
449 uint16_t peg_fw_state, nw_interface_link_up;
450 uint16_t nw_interface_signal_detect, sfp_status;
451 uint16_t htbt_counter, htbt_monitor_enable;
452 uint16_t sfp_additonal_info, sfp_multirate;
453 uint16_t sfp_tx_fault, link_speed, dcbx_status;
454
455 /*
456 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
457 * - PEG-to-FC Status Register:
458 * (LSW = mb[2], MSW = mb[6])
459 * Bits 0-7 = Peg-Firmware state
460 * Bit 8 = N/W Interface Link-up
461 * Bit 9 = N/W Interface signal detected
462 * Bits 10-11 = SFP Status
463 * SFP Status 0x0 = SFP+ transceiver not expected
464 * SFP Status 0x1 = SFP+ transceiver not present
465 * SFP Status 0x2 = SFP+ transceiver invalid
466 * SFP Status 0x3 = SFP+ transceiver present and
467 * valid
468 * Bits 12-14 = Heartbeat Counter
469 * Bit 15 = Heartbeat Monitor Enable
470 * Bits 16-17 = SFP Additional Info
471 * SFP info 0x0 = Unregocnized transceiver for
472 * Ethernet
473 * SFP info 0x1 = SFP+ brand validation failed
474 * SFP info 0x2 = SFP+ speed validation failed
475 * SFP info 0x3 = SFP+ access error
476 * Bit 18 = SFP Multirate
477 * Bit 19 = SFP Tx Fault
478 * Bits 20-22 = Link Speed
479 * Bits 23-27 = Reserved
480 * Bits 28-30 = DCBX Status
481 * DCBX Status 0x0 = DCBX Disabled
482 * DCBX Status 0x1 = DCBX Enabled
483 * DCBX Status 0x2 = DCBX Exchange error
484 * Bit 31 = Reserved
485 */
486 peg_fw_state = (mb[2] & 0x00ff);
487 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
488 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
489 sfp_status = ((mb[2] & 0x0c00) >> 10);
490 htbt_counter = ((mb[2] & 0x7000) >> 12);
491 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
492 sfp_additonal_info = (mb[6] & 0x0003);
493 sfp_multirate = ((mb[6] & 0x0004) >> 2);
494 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
495 link_speed = ((mb[6] & 0x0070) >> 4);
496 dcbx_status = ((mb[6] & 0x7000) >> 12);
497
498 ql_log(ql_log_warn, vha, 0x5066,
499 "Peg-to-Fc Status Register:\n"
500 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
501 "nw_interface_signal_detect=0x%x"
502 "\nsfp_statis=0x%x.\n ", peg_fw_state,
503 nw_interface_link_up, nw_interface_signal_detect,
504 sfp_status);
505 ql_log(ql_log_warn, vha, 0x5067,
506 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
507 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
508 htbt_counter, htbt_monitor_enable,
509 sfp_additonal_info, sfp_multirate);
510 ql_log(ql_log_warn, vha, 0x5068,
511 "sfp_tx_fault=0x%x, link_state=0x%x, "
512 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
513 dcbx_status);
514
515 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
516 }
517
518 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
519 ql_log(ql_log_warn, vha, 0x5069,
520 "Heartbeat Failure encountered, chip reset "
521 "required.\n");
522
523 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
524 }
525 }
526
527 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
528 ql_log(ql_log_info, vha, 0x506a,
529 "IDC Device-State changed = 0x%x.\n", mb[4]);
530 if (ha->flags.nic_core_reset_owner)
531 return;
532 qla83xx_schedule_work(vha, MBA_IDC_AEN);
533 }
534 }
535
536 int
537 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
538 {
539 struct qla_hw_data *ha = vha->hw;
540 scsi_qla_host_t *vp;
541 uint32_t vp_did;
542 unsigned long flags;
543 int ret = 0;
544
545 if (!ha->num_vhosts)
546 return ret;
547
548 spin_lock_irqsave(&ha->vport_slock, flags);
549 list_for_each_entry(vp, &ha->vp_list, list) {
550 vp_did = vp->d_id.b24;
551 if (vp_did == rscn_entry) {
552 ret = 1;
553 break;
554 }
555 }
556 spin_unlock_irqrestore(&ha->vport_slock, flags);
557
558 return ret;
559 }
560
561 /**
562 * qla2x00_async_event() - Process aynchronous events.
563 * @ha: SCSI driver HA context
564 * @mb: Mailbox registers (0 - 3)
565 */
566 void
567 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
568 {
569 uint16_t handle_cnt;
570 uint16_t cnt, mbx;
571 uint32_t handles[5];
572 struct qla_hw_data *ha = vha->hw;
573 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
574 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
575 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
576 uint32_t rscn_entry, host_pid;
577 unsigned long flags;
578
579 /* Setup to process RIO completion. */
580 handle_cnt = 0;
581 if (IS_CNA_CAPABLE(ha))
582 goto skip_rio;
583 switch (mb[0]) {
584 case MBA_SCSI_COMPLETION:
585 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
586 handle_cnt = 1;
587 break;
588 case MBA_CMPLT_1_16BIT:
589 handles[0] = mb[1];
590 handle_cnt = 1;
591 mb[0] = MBA_SCSI_COMPLETION;
592 break;
593 case MBA_CMPLT_2_16BIT:
594 handles[0] = mb[1];
595 handles[1] = mb[2];
596 handle_cnt = 2;
597 mb[0] = MBA_SCSI_COMPLETION;
598 break;
599 case MBA_CMPLT_3_16BIT:
600 handles[0] = mb[1];
601 handles[1] = mb[2];
602 handles[2] = mb[3];
603 handle_cnt = 3;
604 mb[0] = MBA_SCSI_COMPLETION;
605 break;
606 case MBA_CMPLT_4_16BIT:
607 handles[0] = mb[1];
608 handles[1] = mb[2];
609 handles[2] = mb[3];
610 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
611 handle_cnt = 4;
612 mb[0] = MBA_SCSI_COMPLETION;
613 break;
614 case MBA_CMPLT_5_16BIT:
615 handles[0] = mb[1];
616 handles[1] = mb[2];
617 handles[2] = mb[3];
618 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
619 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
620 handle_cnt = 5;
621 mb[0] = MBA_SCSI_COMPLETION;
622 break;
623 case MBA_CMPLT_2_32BIT:
624 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
625 handles[1] = le32_to_cpu(
626 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
627 RD_MAILBOX_REG(ha, reg, 6));
628 handle_cnt = 2;
629 mb[0] = MBA_SCSI_COMPLETION;
630 break;
631 default:
632 break;
633 }
634 skip_rio:
635 switch (mb[0]) {
636 case MBA_SCSI_COMPLETION: /* Fast Post */
637 if (!vha->flags.online)
638 break;
639
640 for (cnt = 0; cnt < handle_cnt; cnt++)
641 qla2x00_process_completed_request(vha, rsp->req,
642 handles[cnt]);
643 break;
644
645 case MBA_RESET: /* Reset */
646 ql_dbg(ql_dbg_async, vha, 0x5002,
647 "Asynchronous RESET.\n");
648
649 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
650 break;
651
652 case MBA_SYSTEM_ERR: /* System Error */
653 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
654 RD_REG_WORD(&reg24->mailbox7) : 0;
655 ql_log(ql_log_warn, vha, 0x5003,
656 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
657 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
658
659 ha->isp_ops->fw_dump(vha, 1);
660
661 if (IS_FWI2_CAPABLE(ha)) {
662 if (mb[1] == 0 && mb[2] == 0) {
663 ql_log(ql_log_fatal, vha, 0x5004,
664 "Unrecoverable Hardware Error: adapter "
665 "marked OFFLINE!\n");
666 vha->flags.online = 0;
667 vha->device_flags |= DFLG_DEV_FAILED;
668 } else {
669 /* Check to see if MPI timeout occurred */
670 if ((mbx & MBX_3) && (ha->port_no == 0))
671 set_bit(MPI_RESET_NEEDED,
672 &vha->dpc_flags);
673
674 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
675 }
676 } else if (mb[1] == 0) {
677 ql_log(ql_log_fatal, vha, 0x5005,
678 "Unrecoverable Hardware Error: adapter marked "
679 "OFFLINE!\n");
680 vha->flags.online = 0;
681 vha->device_flags |= DFLG_DEV_FAILED;
682 } else
683 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
684 break;
685
686 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
687 ql_log(ql_log_warn, vha, 0x5006,
688 "ISP Request Transfer Error (%x).\n", mb[1]);
689
690 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
691 break;
692
693 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
694 ql_log(ql_log_warn, vha, 0x5007,
695 "ISP Response Transfer Error.\n");
696
697 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
698 break;
699
700 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
701 ql_dbg(ql_dbg_async, vha, 0x5008,
702 "Asynchronous WAKEUP_THRES.\n");
703
704 break;
705 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
706 ql_dbg(ql_dbg_async, vha, 0x5009,
707 "LIP occurred (%x).\n", mb[1]);
708
709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
710 atomic_set(&vha->loop_state, LOOP_DOWN);
711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
712 qla2x00_mark_all_devices_lost(vha, 1);
713 }
714
715 if (vha->vp_idx) {
716 atomic_set(&vha->vp_state, VP_FAILED);
717 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
718 }
719
720 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
721 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
722
723 vha->flags.management_server_logged_in = 0;
724 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
725 break;
726
727 case MBA_LOOP_UP: /* Loop Up Event */
728 if (IS_QLA2100(ha) || IS_QLA2200(ha))
729 ha->link_data_rate = PORT_SPEED_1GB;
730 else
731 ha->link_data_rate = mb[1];
732
733 ql_log(ql_log_info, vha, 0x500a,
734 "LOOP UP detected (%s Gbps).\n",
735 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
736
737 vha->flags.management_server_logged_in = 0;
738 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
739 break;
740
741 case MBA_LOOP_DOWN: /* Loop Down Event */
742 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
743 ? RD_REG_WORD(&reg24->mailbox4) : 0;
744 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
745 : mbx;
746 ql_log(ql_log_info, vha, 0x500b,
747 "LOOP DOWN detected (%x %x %x %x).\n",
748 mb[1], mb[2], mb[3], mbx);
749
750 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
751 atomic_set(&vha->loop_state, LOOP_DOWN);
752 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
753 vha->device_flags |= DFLG_NO_CABLE;
754 qla2x00_mark_all_devices_lost(vha, 1);
755 }
756
757 if (vha->vp_idx) {
758 atomic_set(&vha->vp_state, VP_FAILED);
759 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
760 }
761
762 vha->flags.management_server_logged_in = 0;
763 ha->link_data_rate = PORT_SPEED_UNKNOWN;
764 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
765 break;
766
767 case MBA_LIP_RESET: /* LIP reset occurred */
768 ql_dbg(ql_dbg_async, vha, 0x500c,
769 "LIP reset occurred (%x).\n", mb[1]);
770
771 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
772 atomic_set(&vha->loop_state, LOOP_DOWN);
773 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
774 qla2x00_mark_all_devices_lost(vha, 1);
775 }
776
777 if (vha->vp_idx) {
778 atomic_set(&vha->vp_state, VP_FAILED);
779 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
780 }
781
782 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
783
784 ha->operating_mode = LOOP;
785 vha->flags.management_server_logged_in = 0;
786 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
787 break;
788
789 /* case MBA_DCBX_COMPLETE: */
790 case MBA_POINT_TO_POINT: /* Point-to-Point */
791 if (IS_QLA2100(ha))
792 break;
793
794 if (IS_CNA_CAPABLE(ha)) {
795 ql_dbg(ql_dbg_async, vha, 0x500d,
796 "DCBX Completed -- %04x %04x %04x.\n",
797 mb[1], mb[2], mb[3]);
798 if (ha->notify_dcbx_comp && !vha->vp_idx)
799 complete(&ha->dcbx_comp);
800
801 } else
802 ql_dbg(ql_dbg_async, vha, 0x500e,
803 "Asynchronous P2P MODE received.\n");
804
805 /*
806 * Until there's a transition from loop down to loop up, treat
807 * this as loop down only.
808 */
809 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
810 atomic_set(&vha->loop_state, LOOP_DOWN);
811 if (!atomic_read(&vha->loop_down_timer))
812 atomic_set(&vha->loop_down_timer,
813 LOOP_DOWN_TIME);
814 qla2x00_mark_all_devices_lost(vha, 1);
815 }
816
817 if (vha->vp_idx) {
818 atomic_set(&vha->vp_state, VP_FAILED);
819 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
820 }
821
822 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
823 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
824
825 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
826 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
827
828 ha->flags.gpsc_supported = 1;
829 vha->flags.management_server_logged_in = 0;
830 break;
831
832 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
833 if (IS_QLA2100(ha))
834 break;
835
836 ql_dbg(ql_dbg_async, vha, 0x500f,
837 "Configuration change detected: value=%x.\n", mb[1]);
838
839 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
840 atomic_set(&vha->loop_state, LOOP_DOWN);
841 if (!atomic_read(&vha->loop_down_timer))
842 atomic_set(&vha->loop_down_timer,
843 LOOP_DOWN_TIME);
844 qla2x00_mark_all_devices_lost(vha, 1);
845 }
846
847 if (vha->vp_idx) {
848 atomic_set(&vha->vp_state, VP_FAILED);
849 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
850 }
851
852 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
853 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
854 break;
855
856 case MBA_PORT_UPDATE: /* Port database update */
857 /*
858 * Handle only global and vn-port update events
859 *
860 * Relevant inputs:
861 * mb[1] = N_Port handle of changed port
862 * OR 0xffff for global event
863 * mb[2] = New login state
864 * 7 = Port logged out
865 * mb[3] = LSB is vp_idx, 0xff = all vps
866 *
867 * Skip processing if:
868 * Event is global, vp_idx is NOT all vps,
869 * vp_idx does not match
870 * Event is not global, vp_idx does not match
871 */
872 if (IS_QLA2XXX_MIDTYPE(ha) &&
873 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
874 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
875 break;
876
877 /* Global event -- port logout or port unavailable. */
878 if (mb[1] == 0xffff && mb[2] == 0x7) {
879 ql_dbg(ql_dbg_async, vha, 0x5010,
880 "Port unavailable %04x %04x %04x.\n",
881 mb[1], mb[2], mb[3]);
882 ql_log(ql_log_warn, vha, 0x505e,
883 "Link is offline.\n");
884
885 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
886 atomic_set(&vha->loop_state, LOOP_DOWN);
887 atomic_set(&vha->loop_down_timer,
888 LOOP_DOWN_TIME);
889 vha->device_flags |= DFLG_NO_CABLE;
890 qla2x00_mark_all_devices_lost(vha, 1);
891 }
892
893 if (vha->vp_idx) {
894 atomic_set(&vha->vp_state, VP_FAILED);
895 fc_vport_set_state(vha->fc_vport,
896 FC_VPORT_FAILED);
897 qla2x00_mark_all_devices_lost(vha, 1);
898 }
899
900 vha->flags.management_server_logged_in = 0;
901 ha->link_data_rate = PORT_SPEED_UNKNOWN;
902 break;
903 }
904
905 /*
906 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
907 * event etc. earlier indicating loop is down) then process
908 * it. Otherwise ignore it and Wait for RSCN to come in.
909 */
910 atomic_set(&vha->loop_down_timer, 0);
911 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
912 atomic_read(&vha->loop_state) != LOOP_DEAD) {
913 ql_dbg(ql_dbg_async, vha, 0x5011,
914 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
915 mb[1], mb[2], mb[3]);
916
917 qlt_async_event(mb[0], vha, mb);
918 break;
919 }
920
921 ql_dbg(ql_dbg_async, vha, 0x5012,
922 "Port database changed %04x %04x %04x.\n",
923 mb[1], mb[2], mb[3]);
924
925 /*
926 * Mark all devices as missing so we will login again.
927 */
928 atomic_set(&vha->loop_state, LOOP_UP);
929
930 qla2x00_mark_all_devices_lost(vha, 1);
931
932 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
933 set_bit(SCR_PENDING, &vha->dpc_flags);
934
935 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
936 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
937
938 qlt_async_event(mb[0], vha, mb);
939 break;
940
941 case MBA_RSCN_UPDATE: /* State Change Registration */
942 /* Check if the Vport has issued a SCR */
943 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
944 break;
945 /* Only handle SCNs for our Vport index. */
946 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
947 break;
948
949 ql_dbg(ql_dbg_async, vha, 0x5013,
950 "RSCN database changed -- %04x %04x %04x.\n",
951 mb[1], mb[2], mb[3]);
952
953 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
954 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
955 | vha->d_id.b.al_pa;
956 if (rscn_entry == host_pid) {
957 ql_dbg(ql_dbg_async, vha, 0x5014,
958 "Ignoring RSCN update to local host "
959 "port ID (%06x).\n", host_pid);
960 break;
961 }
962
963 /* Ignore reserved bits from RSCN-payload. */
964 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
965
966 /* Skip RSCNs for virtual ports on the same physical port */
967 if (qla2x00_is_a_vp_did(vha, rscn_entry))
968 break;
969
970 atomic_set(&vha->loop_down_timer, 0);
971 vha->flags.management_server_logged_in = 0;
972
973 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
974 set_bit(RSCN_UPDATE, &vha->dpc_flags);
975 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
976 break;
977
978 /* case MBA_RIO_RESPONSE: */
979 case MBA_ZIO_RESPONSE:
980 ql_dbg(ql_dbg_async, vha, 0x5015,
981 "[R|Z]IO update completion.\n");
982
983 if (IS_FWI2_CAPABLE(ha))
984 qla24xx_process_response_queue(vha, rsp);
985 else
986 qla2x00_process_response_queue(rsp);
987 break;
988
989 case MBA_DISCARD_RND_FRAME:
990 ql_dbg(ql_dbg_async, vha, 0x5016,
991 "Discard RND Frame -- %04x %04x %04x.\n",
992 mb[1], mb[2], mb[3]);
993 break;
994
995 case MBA_TRACE_NOTIFICATION:
996 ql_dbg(ql_dbg_async, vha, 0x5017,
997 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
998 break;
999
1000 case MBA_ISP84XX_ALERT:
1001 ql_dbg(ql_dbg_async, vha, 0x5018,
1002 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1003 mb[1], mb[2], mb[3]);
1004
1005 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1006 switch (mb[1]) {
1007 case A84_PANIC_RECOVERY:
1008 ql_log(ql_log_info, vha, 0x5019,
1009 "Alert 84XX: panic recovery %04x %04x.\n",
1010 mb[2], mb[3]);
1011 break;
1012 case A84_OP_LOGIN_COMPLETE:
1013 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1014 ql_log(ql_log_info, vha, 0x501a,
1015 "Alert 84XX: firmware version %x.\n",
1016 ha->cs84xx->op_fw_version);
1017 break;
1018 case A84_DIAG_LOGIN_COMPLETE:
1019 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1020 ql_log(ql_log_info, vha, 0x501b,
1021 "Alert 84XX: diagnostic firmware version %x.\n",
1022 ha->cs84xx->diag_fw_version);
1023 break;
1024 case A84_GOLD_LOGIN_COMPLETE:
1025 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1026 ha->cs84xx->fw_update = 1;
1027 ql_log(ql_log_info, vha, 0x501c,
1028 "Alert 84XX: gold firmware version %x.\n",
1029 ha->cs84xx->gold_fw_version);
1030 break;
1031 default:
1032 ql_log(ql_log_warn, vha, 0x501d,
1033 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1034 mb[1], mb[2], mb[3]);
1035 }
1036 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1037 break;
1038 case MBA_DCBX_START:
1039 ql_dbg(ql_dbg_async, vha, 0x501e,
1040 "DCBX Started -- %04x %04x %04x.\n",
1041 mb[1], mb[2], mb[3]);
1042 break;
1043 case MBA_DCBX_PARAM_UPDATE:
1044 ql_dbg(ql_dbg_async, vha, 0x501f,
1045 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1046 mb[1], mb[2], mb[3]);
1047 break;
1048 case MBA_FCF_CONF_ERR:
1049 ql_dbg(ql_dbg_async, vha, 0x5020,
1050 "FCF Configuration Error -- %04x %04x %04x.\n",
1051 mb[1], mb[2], mb[3]);
1052 break;
1053 case MBA_IDC_NOTIFY:
1054 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1055 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1056 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1057 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1058 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1059 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1060 /*
1061 * Extend loop down timer since port is active.
1062 */
1063 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1064 atomic_set(&vha->loop_down_timer,
1065 LOOP_DOWN_TIME);
1066 qla2xxx_wake_dpc(vha);
1067 }
1068 }
1069 case MBA_IDC_COMPLETE:
1070 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1071 complete(&ha->lb_portup_comp);
1072 /* Fallthru */
1073 case MBA_IDC_TIME_EXT:
1074 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1075 IS_QLA8044(ha))
1076 qla81xx_idc_event(vha, mb[0], mb[1]);
1077 break;
1078
1079 case MBA_IDC_AEN:
1080 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1081 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1082 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1083 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1084 qla83xx_handle_8200_aen(vha, mb);
1085 break;
1086
1087 default:
1088 ql_dbg(ql_dbg_async, vha, 0x5057,
1089 "Unknown AEN:%04x %04x %04x %04x\n",
1090 mb[0], mb[1], mb[2], mb[3]);
1091 }
1092
1093 qlt_async_event(mb[0], vha, mb);
1094
1095 if (!vha->vp_idx && ha->num_vhosts)
1096 qla2x00_alert_all_vps(rsp, mb);
1097 }
1098
1099 /**
1100 * qla2x00_process_completed_request() - Process a Fast Post response.
1101 * @ha: SCSI driver HA context
1102 * @index: SRB index
1103 */
1104 void
1105 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1106 struct req_que *req, uint32_t index)
1107 {
1108 srb_t *sp;
1109 struct qla_hw_data *ha = vha->hw;
1110
1111 /* Validate handle. */
1112 if (index >= req->num_outstanding_cmds) {
1113 ql_log(ql_log_warn, vha, 0x3014,
1114 "Invalid SCSI command index (%x).\n", index);
1115
1116 if (IS_P3P_TYPE(ha))
1117 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1118 else
1119 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1120 return;
1121 }
1122
1123 sp = req->outstanding_cmds[index];
1124 if (sp) {
1125 /* Free outstanding command slot. */
1126 req->outstanding_cmds[index] = NULL;
1127
1128 /* Save ISP completion status */
1129 sp->done(ha, sp, DID_OK << 16);
1130 } else {
1131 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1132
1133 if (IS_P3P_TYPE(ha))
1134 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1135 else
1136 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1137 }
1138 }
1139
1140 srb_t *
1141 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1142 struct req_que *req, void *iocb)
1143 {
1144 struct qla_hw_data *ha = vha->hw;
1145 sts_entry_t *pkt = iocb;
1146 srb_t *sp = NULL;
1147 uint16_t index;
1148
1149 index = LSW(pkt->handle);
1150 if (index >= req->num_outstanding_cmds) {
1151 ql_log(ql_log_warn, vha, 0x5031,
1152 "Invalid command index (%x).\n", index);
1153 if (IS_P3P_TYPE(ha))
1154 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1155 else
1156 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1157 goto done;
1158 }
1159 sp = req->outstanding_cmds[index];
1160 if (!sp) {
1161 ql_log(ql_log_warn, vha, 0x5032,
1162 "Invalid completion handle (%x) -- timed-out.\n", index);
1163 return sp;
1164 }
1165 if (sp->handle != index) {
1166 ql_log(ql_log_warn, vha, 0x5033,
1167 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1168 return NULL;
1169 }
1170
1171 req->outstanding_cmds[index] = NULL;
1172
1173 done:
1174 return sp;
1175 }
1176
1177 static void
1178 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1179 struct mbx_entry *mbx)
1180 {
1181 const char func[] = "MBX-IOCB";
1182 const char *type;
1183 fc_port_t *fcport;
1184 srb_t *sp;
1185 struct srb_iocb *lio;
1186 uint16_t *data;
1187 uint16_t status;
1188
1189 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1190 if (!sp)
1191 return;
1192
1193 lio = &sp->u.iocb_cmd;
1194 type = sp->name;
1195 fcport = sp->fcport;
1196 data = lio->u.logio.data;
1197
1198 data[0] = MBS_COMMAND_ERROR;
1199 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1200 QLA_LOGIO_LOGIN_RETRIED : 0;
1201 if (mbx->entry_status) {
1202 ql_dbg(ql_dbg_async, vha, 0x5043,
1203 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1204 "entry-status=%x status=%x state-flag=%x "
1205 "status-flags=%x.\n", type, sp->handle,
1206 fcport->d_id.b.domain, fcport->d_id.b.area,
1207 fcport->d_id.b.al_pa, mbx->entry_status,
1208 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1209 le16_to_cpu(mbx->status_flags));
1210
1211 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1212 (uint8_t *)mbx, sizeof(*mbx));
1213
1214 goto logio_done;
1215 }
1216
1217 status = le16_to_cpu(mbx->status);
1218 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1219 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1220 status = 0;
1221 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1222 ql_dbg(ql_dbg_async, vha, 0x5045,
1223 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1224 type, sp->handle, fcport->d_id.b.domain,
1225 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1226 le16_to_cpu(mbx->mb1));
1227
1228 data[0] = MBS_COMMAND_COMPLETE;
1229 if (sp->type == SRB_LOGIN_CMD) {
1230 fcport->port_type = FCT_TARGET;
1231 if (le16_to_cpu(mbx->mb1) & BIT_0)
1232 fcport->port_type = FCT_INITIATOR;
1233 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1234 fcport->flags |= FCF_FCP2_DEVICE;
1235 }
1236 goto logio_done;
1237 }
1238
1239 data[0] = le16_to_cpu(mbx->mb0);
1240 switch (data[0]) {
1241 case MBS_PORT_ID_USED:
1242 data[1] = le16_to_cpu(mbx->mb1);
1243 break;
1244 case MBS_LOOP_ID_USED:
1245 break;
1246 default:
1247 data[0] = MBS_COMMAND_ERROR;
1248 break;
1249 }
1250
1251 ql_log(ql_log_warn, vha, 0x5046,
1252 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1253 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1254 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1255 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1256 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1257 le16_to_cpu(mbx->mb7));
1258
1259 logio_done:
1260 sp->done(vha, sp, 0);
1261 }
1262
1263 static void
1264 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1265 sts_entry_t *pkt, int iocb_type)
1266 {
1267 const char func[] = "CT_IOCB";
1268 const char *type;
1269 srb_t *sp;
1270 struct fc_bsg_job *bsg_job;
1271 uint16_t comp_status;
1272 int res;
1273
1274 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1275 if (!sp)
1276 return;
1277
1278 bsg_job = sp->u.bsg_job;
1279
1280 type = "ct pass-through";
1281
1282 comp_status = le16_to_cpu(pkt->comp_status);
1283
1284 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1285 * fc payload to the caller
1286 */
1287 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1288 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1289
1290 if (comp_status != CS_COMPLETE) {
1291 if (comp_status == CS_DATA_UNDERRUN) {
1292 res = DID_OK << 16;
1293 bsg_job->reply->reply_payload_rcv_len =
1294 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1295
1296 ql_log(ql_log_warn, vha, 0x5048,
1297 "CT pass-through-%s error "
1298 "comp_status-status=0x%x total_byte = 0x%x.\n",
1299 type, comp_status,
1300 bsg_job->reply->reply_payload_rcv_len);
1301 } else {
1302 ql_log(ql_log_warn, vha, 0x5049,
1303 "CT pass-through-%s error "
1304 "comp_status-status=0x%x.\n", type, comp_status);
1305 res = DID_ERROR << 16;
1306 bsg_job->reply->reply_payload_rcv_len = 0;
1307 }
1308 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1309 (uint8_t *)pkt, sizeof(*pkt));
1310 } else {
1311 res = DID_OK << 16;
1312 bsg_job->reply->reply_payload_rcv_len =
1313 bsg_job->reply_payload.payload_len;
1314 bsg_job->reply_len = 0;
1315 }
1316
1317 sp->done(vha, sp, res);
1318 }
1319
1320 static void
1321 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1322 struct sts_entry_24xx *pkt, int iocb_type)
1323 {
1324 const char func[] = "ELS_CT_IOCB";
1325 const char *type;
1326 srb_t *sp;
1327 struct fc_bsg_job *bsg_job;
1328 uint16_t comp_status;
1329 uint32_t fw_status[3];
1330 uint8_t* fw_sts_ptr;
1331 int res;
1332
1333 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1334 if (!sp)
1335 return;
1336 bsg_job = sp->u.bsg_job;
1337
1338 type = NULL;
1339 switch (sp->type) {
1340 case SRB_ELS_CMD_RPT:
1341 case SRB_ELS_CMD_HST:
1342 type = "els";
1343 break;
1344 case SRB_CT_CMD:
1345 type = "ct pass-through";
1346 break;
1347 default:
1348 ql_dbg(ql_dbg_user, vha, 0x503e,
1349 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1350 return;
1351 }
1352
1353 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1354 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1355 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1356
1357 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1358 * fc payload to the caller
1359 */
1360 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1361 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1362
1363 if (comp_status != CS_COMPLETE) {
1364 if (comp_status == CS_DATA_UNDERRUN) {
1365 res = DID_OK << 16;
1366 bsg_job->reply->reply_payload_rcv_len =
1367 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1368
1369 ql_dbg(ql_dbg_user, vha, 0x503f,
1370 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1371 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1372 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1373 le16_to_cpu(((struct els_sts_entry_24xx *)
1374 pkt)->total_byte_count));
1375 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1376 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1377 }
1378 else {
1379 ql_dbg(ql_dbg_user, vha, 0x5040,
1380 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1381 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1382 type, sp->handle, comp_status,
1383 le16_to_cpu(((struct els_sts_entry_24xx *)
1384 pkt)->error_subcode_1),
1385 le16_to_cpu(((struct els_sts_entry_24xx *)
1386 pkt)->error_subcode_2));
1387 res = DID_ERROR << 16;
1388 bsg_job->reply->reply_payload_rcv_len = 0;
1389 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1390 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1391 }
1392 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1393 (uint8_t *)pkt, sizeof(*pkt));
1394 }
1395 else {
1396 res = DID_OK << 16;
1397 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1398 bsg_job->reply_len = 0;
1399 }
1400
1401 sp->done(vha, sp, res);
1402 }
1403
1404 static void
1405 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1406 struct logio_entry_24xx *logio)
1407 {
1408 const char func[] = "LOGIO-IOCB";
1409 const char *type;
1410 fc_port_t *fcport;
1411 srb_t *sp;
1412 struct srb_iocb *lio;
1413 uint16_t *data;
1414 uint32_t iop[2];
1415
1416 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1417 if (!sp)
1418 return;
1419
1420 lio = &sp->u.iocb_cmd;
1421 type = sp->name;
1422 fcport = sp->fcport;
1423 data = lio->u.logio.data;
1424
1425 data[0] = MBS_COMMAND_ERROR;
1426 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1427 QLA_LOGIO_LOGIN_RETRIED : 0;
1428 if (logio->entry_status) {
1429 ql_log(ql_log_warn, fcport->vha, 0x5034,
1430 "Async-%s error entry - hdl=%x"
1431 "portid=%02x%02x%02x entry-status=%x.\n",
1432 type, sp->handle, fcport->d_id.b.domain,
1433 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1434 logio->entry_status);
1435 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1436 (uint8_t *)logio, sizeof(*logio));
1437
1438 goto logio_done;
1439 }
1440
1441 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1442 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1443 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1444 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1445 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1446 le32_to_cpu(logio->io_parameter[0]));
1447
1448 data[0] = MBS_COMMAND_COMPLETE;
1449 if (sp->type != SRB_LOGIN_CMD)
1450 goto logio_done;
1451
1452 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1453 if (iop[0] & BIT_4) {
1454 fcport->port_type = FCT_TARGET;
1455 if (iop[0] & BIT_8)
1456 fcport->flags |= FCF_FCP2_DEVICE;
1457 } else if (iop[0] & BIT_5)
1458 fcport->port_type = FCT_INITIATOR;
1459
1460 if (iop[0] & BIT_7)
1461 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1462
1463 if (logio->io_parameter[7] || logio->io_parameter[8])
1464 fcport->supported_classes |= FC_COS_CLASS2;
1465 if (logio->io_parameter[9] || logio->io_parameter[10])
1466 fcport->supported_classes |= FC_COS_CLASS3;
1467
1468 goto logio_done;
1469 }
1470
1471 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1472 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1473 switch (iop[0]) {
1474 case LSC_SCODE_PORTID_USED:
1475 data[0] = MBS_PORT_ID_USED;
1476 data[1] = LSW(iop[1]);
1477 break;
1478 case LSC_SCODE_NPORT_USED:
1479 data[0] = MBS_LOOP_ID_USED;
1480 break;
1481 default:
1482 data[0] = MBS_COMMAND_ERROR;
1483 break;
1484 }
1485
1486 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1487 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1488 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1489 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1490 le16_to_cpu(logio->comp_status),
1491 le32_to_cpu(logio->io_parameter[0]),
1492 le32_to_cpu(logio->io_parameter[1]));
1493
1494 logio_done:
1495 sp->done(vha, sp, 0);
1496 }
1497
1498 static void
1499 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1500 {
1501 const char func[] = "TMF-IOCB";
1502 const char *type;
1503 fc_port_t *fcport;
1504 srb_t *sp;
1505 struct srb_iocb *iocb;
1506 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1507
1508 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1509 if (!sp)
1510 return;
1511
1512 iocb = &sp->u.iocb_cmd;
1513 type = sp->name;
1514 fcport = sp->fcport;
1515 iocb->u.tmf.data = QLA_SUCCESS;
1516
1517 if (sts->entry_status) {
1518 ql_log(ql_log_warn, fcport->vha, 0x5038,
1519 "Async-%s error - hdl=%x entry-status(%x).\n",
1520 type, sp->handle, sts->entry_status);
1521 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1522 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1523 ql_log(ql_log_warn, fcport->vha, 0x5039,
1524 "Async-%s error - hdl=%x completion status(%x).\n",
1525 type, sp->handle, sts->comp_status);
1526 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1527 } else if ((le16_to_cpu(sts->scsi_status) &
1528 SS_RESPONSE_INFO_LEN_VALID)) {
1529 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1530 ql_log(ql_log_warn, fcport->vha, 0x503b,
1531 "Async-%s error - hdl=%x not enough response(%d).\n",
1532 type, sp->handle, sts->rsp_data_len);
1533 } else if (sts->data[3]) {
1534 ql_log(ql_log_warn, fcport->vha, 0x503c,
1535 "Async-%s error - hdl=%x response(%x).\n",
1536 type, sp->handle, sts->data[3]);
1537 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1538 }
1539 }
1540
1541 if (iocb->u.tmf.data != QLA_SUCCESS)
1542 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1543 (uint8_t *)sts, sizeof(*sts));
1544
1545 sp->done(vha, sp, 0);
1546 }
1547
1548 /**
1549 * qla2x00_process_response_queue() - Process response queue entries.
1550 * @ha: SCSI driver HA context
1551 */
1552 void
1553 qla2x00_process_response_queue(struct rsp_que *rsp)
1554 {
1555 struct scsi_qla_host *vha;
1556 struct qla_hw_data *ha = rsp->hw;
1557 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1558 sts_entry_t *pkt;
1559 uint16_t handle_cnt;
1560 uint16_t cnt;
1561
1562 vha = pci_get_drvdata(ha->pdev);
1563
1564 if (!vha->flags.online)
1565 return;
1566
1567 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1568 pkt = (sts_entry_t *)rsp->ring_ptr;
1569
1570 rsp->ring_index++;
1571 if (rsp->ring_index == rsp->length) {
1572 rsp->ring_index = 0;
1573 rsp->ring_ptr = rsp->ring;
1574 } else {
1575 rsp->ring_ptr++;
1576 }
1577
1578 if (pkt->entry_status != 0) {
1579 qla2x00_error_entry(vha, rsp, pkt);
1580 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1581 wmb();
1582 continue;
1583 }
1584
1585 switch (pkt->entry_type) {
1586 case STATUS_TYPE:
1587 qla2x00_status_entry(vha, rsp, pkt);
1588 break;
1589 case STATUS_TYPE_21:
1590 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1591 for (cnt = 0; cnt < handle_cnt; cnt++) {
1592 qla2x00_process_completed_request(vha, rsp->req,
1593 ((sts21_entry_t *)pkt)->handle[cnt]);
1594 }
1595 break;
1596 case STATUS_TYPE_22:
1597 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1598 for (cnt = 0; cnt < handle_cnt; cnt++) {
1599 qla2x00_process_completed_request(vha, rsp->req,
1600 ((sts22_entry_t *)pkt)->handle[cnt]);
1601 }
1602 break;
1603 case STATUS_CONT_TYPE:
1604 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1605 break;
1606 case MBX_IOCB_TYPE:
1607 qla2x00_mbx_iocb_entry(vha, rsp->req,
1608 (struct mbx_entry *)pkt);
1609 break;
1610 case CT_IOCB_TYPE:
1611 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1612 break;
1613 default:
1614 /* Type Not Supported. */
1615 ql_log(ql_log_warn, vha, 0x504a,
1616 "Received unknown response pkt type %x "
1617 "entry status=%x.\n",
1618 pkt->entry_type, pkt->entry_status);
1619 break;
1620 }
1621 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1622 wmb();
1623 }
1624
1625 /* Adjust ring index */
1626 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1627 }
1628
1629 static inline void
1630 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1631 uint32_t sense_len, struct rsp_que *rsp, int res)
1632 {
1633 struct scsi_qla_host *vha = sp->fcport->vha;
1634 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1635 uint32_t track_sense_len;
1636
1637 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1638 sense_len = SCSI_SENSE_BUFFERSIZE;
1639
1640 SET_CMD_SENSE_LEN(sp, sense_len);
1641 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1642 track_sense_len = sense_len;
1643
1644 if (sense_len > par_sense_len)
1645 sense_len = par_sense_len;
1646
1647 memcpy(cp->sense_buffer, sense_data, sense_len);
1648
1649 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1650 track_sense_len -= sense_len;
1651 SET_CMD_SENSE_LEN(sp, track_sense_len);
1652
1653 if (track_sense_len != 0) {
1654 rsp->status_srb = sp;
1655 cp->result = res;
1656 }
1657
1658 if (sense_len) {
1659 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1660 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1661 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1662 cp);
1663 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1664 cp->sense_buffer, sense_len);
1665 }
1666 }
1667
1668 struct scsi_dif_tuple {
1669 __be16 guard; /* Checksum */
1670 __be16 app_tag; /* APPL identifier */
1671 __be32 ref_tag; /* Target LBA or indirect LBA */
1672 };
1673
1674 /*
1675 * Checks the guard or meta-data for the type of error
1676 * detected by the HBA. In case of errors, we set the
1677 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1678 * to indicate to the kernel that the HBA detected error.
1679 */
1680 static inline int
1681 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1682 {
1683 struct scsi_qla_host *vha = sp->fcport->vha;
1684 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1685 uint8_t *ap = &sts24->data[12];
1686 uint8_t *ep = &sts24->data[20];
1687 uint32_t e_ref_tag, a_ref_tag;
1688 uint16_t e_app_tag, a_app_tag;
1689 uint16_t e_guard, a_guard;
1690
1691 /*
1692 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1693 * would make guard field appear at offset 2
1694 */
1695 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1696 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1697 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1698 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1699 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1700 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1701
1702 ql_dbg(ql_dbg_io, vha, 0x3023,
1703 "iocb(s) %p Returned STATUS.\n", sts24);
1704
1705 ql_dbg(ql_dbg_io, vha, 0x3024,
1706 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1707 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1708 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1709 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1710 a_app_tag, e_app_tag, a_guard, e_guard);
1711
1712 /*
1713 * Ignore sector if:
1714 * For type 3: ref & app tag is all 'f's
1715 * For type 0,1,2: app tag is all 'f's
1716 */
1717 if ((a_app_tag == 0xffff) &&
1718 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1719 (a_ref_tag == 0xffffffff))) {
1720 uint32_t blocks_done, resid;
1721 sector_t lba_s = scsi_get_lba(cmd);
1722
1723 /* 2TB boundary case covered automatically with this */
1724 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1725
1726 resid = scsi_bufflen(cmd) - (blocks_done *
1727 cmd->device->sector_size);
1728
1729 scsi_set_resid(cmd, resid);
1730 cmd->result = DID_OK << 16;
1731
1732 /* Update protection tag */
1733 if (scsi_prot_sg_count(cmd)) {
1734 uint32_t i, j = 0, k = 0, num_ent;
1735 struct scatterlist *sg;
1736 struct sd_dif_tuple *spt;
1737
1738 /* Patch the corresponding protection tags */
1739 scsi_for_each_prot_sg(cmd, sg,
1740 scsi_prot_sg_count(cmd), i) {
1741 num_ent = sg_dma_len(sg) / 8;
1742 if (k + num_ent < blocks_done) {
1743 k += num_ent;
1744 continue;
1745 }
1746 j = blocks_done - k - 1;
1747 k = blocks_done;
1748 break;
1749 }
1750
1751 if (k != blocks_done) {
1752 ql_log(ql_log_warn, vha, 0x302f,
1753 "unexpected tag values tag:lba=%x:%llx)\n",
1754 e_ref_tag, (unsigned long long)lba_s);
1755 return 1;
1756 }
1757
1758 spt = page_address(sg_page(sg)) + sg->offset;
1759 spt += j;
1760
1761 spt->app_tag = 0xffff;
1762 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1763 spt->ref_tag = 0xffffffff;
1764 }
1765
1766 return 0;
1767 }
1768
1769 /* check guard */
1770 if (e_guard != a_guard) {
1771 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1772 0x10, 0x1);
1773 set_driver_byte(cmd, DRIVER_SENSE);
1774 set_host_byte(cmd, DID_ABORT);
1775 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1776 return 1;
1777 }
1778
1779 /* check ref tag */
1780 if (e_ref_tag != a_ref_tag) {
1781 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1782 0x10, 0x3);
1783 set_driver_byte(cmd, DRIVER_SENSE);
1784 set_host_byte(cmd, DID_ABORT);
1785 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1786 return 1;
1787 }
1788
1789 /* check appl tag */
1790 if (e_app_tag != a_app_tag) {
1791 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1792 0x10, 0x2);
1793 set_driver_byte(cmd, DRIVER_SENSE);
1794 set_host_byte(cmd, DID_ABORT);
1795 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1796 return 1;
1797 }
1798
1799 return 1;
1800 }
1801
1802 static void
1803 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1804 struct req_que *req, uint32_t index)
1805 {
1806 struct qla_hw_data *ha = vha->hw;
1807 srb_t *sp;
1808 uint16_t comp_status;
1809 uint16_t scsi_status;
1810 uint16_t thread_id;
1811 uint32_t rval = EXT_STATUS_OK;
1812 struct fc_bsg_job *bsg_job = NULL;
1813 sts_entry_t *sts;
1814 struct sts_entry_24xx *sts24;
1815 sts = (sts_entry_t *) pkt;
1816 sts24 = (struct sts_entry_24xx *) pkt;
1817
1818 /* Validate handle. */
1819 if (index >= req->num_outstanding_cmds) {
1820 ql_log(ql_log_warn, vha, 0x70af,
1821 "Invalid SCSI completion handle 0x%x.\n", index);
1822 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1823 return;
1824 }
1825
1826 sp = req->outstanding_cmds[index];
1827 if (sp) {
1828 /* Free outstanding command slot. */
1829 req->outstanding_cmds[index] = NULL;
1830 bsg_job = sp->u.bsg_job;
1831 } else {
1832 ql_log(ql_log_warn, vha, 0x70b0,
1833 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1834 req->id, index);
1835
1836 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1837 return;
1838 }
1839
1840 if (IS_FWI2_CAPABLE(ha)) {
1841 comp_status = le16_to_cpu(sts24->comp_status);
1842 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1843 } else {
1844 comp_status = le16_to_cpu(sts->comp_status);
1845 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1846 }
1847
1848 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1849 switch (comp_status) {
1850 case CS_COMPLETE:
1851 if (scsi_status == 0) {
1852 bsg_job->reply->reply_payload_rcv_len =
1853 bsg_job->reply_payload.payload_len;
1854 vha->qla_stats.input_bytes +=
1855 bsg_job->reply->reply_payload_rcv_len;
1856 vha->qla_stats.input_requests++;
1857 rval = EXT_STATUS_OK;
1858 }
1859 goto done;
1860
1861 case CS_DATA_OVERRUN:
1862 ql_dbg(ql_dbg_user, vha, 0x70b1,
1863 "Command completed with date overrun thread_id=%d\n",
1864 thread_id);
1865 rval = EXT_STATUS_DATA_OVERRUN;
1866 break;
1867
1868 case CS_DATA_UNDERRUN:
1869 ql_dbg(ql_dbg_user, vha, 0x70b2,
1870 "Command completed with date underrun thread_id=%d\n",
1871 thread_id);
1872 rval = EXT_STATUS_DATA_UNDERRUN;
1873 break;
1874 case CS_BIDIR_RD_OVERRUN:
1875 ql_dbg(ql_dbg_user, vha, 0x70b3,
1876 "Command completed with read data overrun thread_id=%d\n",
1877 thread_id);
1878 rval = EXT_STATUS_DATA_OVERRUN;
1879 break;
1880
1881 case CS_BIDIR_RD_WR_OVERRUN:
1882 ql_dbg(ql_dbg_user, vha, 0x70b4,
1883 "Command completed with read and write data overrun "
1884 "thread_id=%d\n", thread_id);
1885 rval = EXT_STATUS_DATA_OVERRUN;
1886 break;
1887
1888 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1889 ql_dbg(ql_dbg_user, vha, 0x70b5,
1890 "Command completed with read data over and write data "
1891 "underrun thread_id=%d\n", thread_id);
1892 rval = EXT_STATUS_DATA_OVERRUN;
1893 break;
1894
1895 case CS_BIDIR_RD_UNDERRUN:
1896 ql_dbg(ql_dbg_user, vha, 0x70b6,
1897 "Command completed with read data data underrun "
1898 "thread_id=%d\n", thread_id);
1899 rval = EXT_STATUS_DATA_UNDERRUN;
1900 break;
1901
1902 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1903 ql_dbg(ql_dbg_user, vha, 0x70b7,
1904 "Command completed with read data under and write data "
1905 "overrun thread_id=%d\n", thread_id);
1906 rval = EXT_STATUS_DATA_UNDERRUN;
1907 break;
1908
1909 case CS_BIDIR_RD_WR_UNDERRUN:
1910 ql_dbg(ql_dbg_user, vha, 0x70b8,
1911 "Command completed with read and write data underrun "
1912 "thread_id=%d\n", thread_id);
1913 rval = EXT_STATUS_DATA_UNDERRUN;
1914 break;
1915
1916 case CS_BIDIR_DMA:
1917 ql_dbg(ql_dbg_user, vha, 0x70b9,
1918 "Command completed with data DMA error thread_id=%d\n",
1919 thread_id);
1920 rval = EXT_STATUS_DMA_ERR;
1921 break;
1922
1923 case CS_TIMEOUT:
1924 ql_dbg(ql_dbg_user, vha, 0x70ba,
1925 "Command completed with timeout thread_id=%d\n",
1926 thread_id);
1927 rval = EXT_STATUS_TIMEOUT;
1928 break;
1929 default:
1930 ql_dbg(ql_dbg_user, vha, 0x70bb,
1931 "Command completed with completion status=0x%x "
1932 "thread_id=%d\n", comp_status, thread_id);
1933 rval = EXT_STATUS_ERR;
1934 break;
1935 }
1936 bsg_job->reply->reply_payload_rcv_len = 0;
1937
1938 done:
1939 /* Return the vendor specific reply to API */
1940 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1941 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1942 /* Always return DID_OK, bsg will send the vendor specific response
1943 * in this case only */
1944 sp->done(vha, sp, (DID_OK << 6));
1945
1946 }
1947
1948 /**
1949 * qla2x00_status_entry() - Process a Status IOCB entry.
1950 * @ha: SCSI driver HA context
1951 * @pkt: Entry pointer
1952 */
1953 static void
1954 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1955 {
1956 srb_t *sp;
1957 fc_port_t *fcport;
1958 struct scsi_cmnd *cp;
1959 sts_entry_t *sts;
1960 struct sts_entry_24xx *sts24;
1961 uint16_t comp_status;
1962 uint16_t scsi_status;
1963 uint16_t ox_id;
1964 uint8_t lscsi_status;
1965 int32_t resid;
1966 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1967 fw_resid_len;
1968 uint8_t *rsp_info, *sense_data;
1969 struct qla_hw_data *ha = vha->hw;
1970 uint32_t handle;
1971 uint16_t que;
1972 struct req_que *req;
1973 int logit = 1;
1974 int res = 0;
1975 uint16_t state_flags = 0;
1976
1977 sts = (sts_entry_t *) pkt;
1978 sts24 = (struct sts_entry_24xx *) pkt;
1979 if (IS_FWI2_CAPABLE(ha)) {
1980 comp_status = le16_to_cpu(sts24->comp_status);
1981 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1982 state_flags = le16_to_cpu(sts24->state_flags);
1983 } else {
1984 comp_status = le16_to_cpu(sts->comp_status);
1985 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1986 }
1987 handle = (uint32_t) LSW(sts->handle);
1988 que = MSW(sts->handle);
1989 req = ha->req_q_map[que];
1990
1991 /* Check for invalid queue pointer */
1992 if (req == NULL ||
1993 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
1994 ql_dbg(ql_dbg_io, vha, 0x3059,
1995 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1996 "que=%u.\n", sts->handle, req, que);
1997 return;
1998 }
1999
2000 /* Validate handle. */
2001 if (handle < req->num_outstanding_cmds)
2002 sp = req->outstanding_cmds[handle];
2003 else
2004 sp = NULL;
2005
2006 if (sp == NULL) {
2007 ql_dbg(ql_dbg_io, vha, 0x3017,
2008 "Invalid status handle (0x%x).\n", sts->handle);
2009
2010 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2011 if (IS_P3P_TYPE(ha))
2012 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2013 else
2014 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2015 qla2xxx_wake_dpc(vha);
2016 }
2017 return;
2018 }
2019
2020 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2021 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2022 return;
2023 }
2024
2025 /* Task Management completion. */
2026 if (sp->type == SRB_TM_CMD) {
2027 qla24xx_tm_iocb_entry(vha, req, pkt);
2028 return;
2029 }
2030
2031 /* Fast path completion. */
2032 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2033 qla2x00_process_completed_request(vha, req, handle);
2034
2035 return;
2036 }
2037
2038 req->outstanding_cmds[handle] = NULL;
2039 cp = GET_CMD_SP(sp);
2040 if (cp == NULL) {
2041 ql_dbg(ql_dbg_io, vha, 0x3018,
2042 "Command already returned (0x%x/%p).\n",
2043 sts->handle, sp);
2044
2045 return;
2046 }
2047
2048 lscsi_status = scsi_status & STATUS_MASK;
2049
2050 fcport = sp->fcport;
2051
2052 ox_id = 0;
2053 sense_len = par_sense_len = rsp_info_len = resid_len =
2054 fw_resid_len = 0;
2055 if (IS_FWI2_CAPABLE(ha)) {
2056 if (scsi_status & SS_SENSE_LEN_VALID)
2057 sense_len = le32_to_cpu(sts24->sense_len);
2058 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2059 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2060 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2061 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2062 if (comp_status == CS_DATA_UNDERRUN)
2063 fw_resid_len = le32_to_cpu(sts24->residual_len);
2064 rsp_info = sts24->data;
2065 sense_data = sts24->data;
2066 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2067 ox_id = le16_to_cpu(sts24->ox_id);
2068 par_sense_len = sizeof(sts24->data);
2069 } else {
2070 if (scsi_status & SS_SENSE_LEN_VALID)
2071 sense_len = le16_to_cpu(sts->req_sense_length);
2072 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2073 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2074 resid_len = le32_to_cpu(sts->residual_length);
2075 rsp_info = sts->rsp_info;
2076 sense_data = sts->req_sense_data;
2077 par_sense_len = sizeof(sts->req_sense_data);
2078 }
2079
2080 /* Check for any FCP transport errors. */
2081 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2082 /* Sense data lies beyond any FCP RESPONSE data. */
2083 if (IS_FWI2_CAPABLE(ha)) {
2084 sense_data += rsp_info_len;
2085 par_sense_len -= rsp_info_len;
2086 }
2087 if (rsp_info_len > 3 && rsp_info[3]) {
2088 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2089 "FCP I/O protocol failure (0x%x/0x%x).\n",
2090 rsp_info_len, rsp_info[3]);
2091
2092 res = DID_BUS_BUSY << 16;
2093 goto out;
2094 }
2095 }
2096
2097 /* Check for overrun. */
2098 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2099 scsi_status & SS_RESIDUAL_OVER)
2100 comp_status = CS_DATA_OVERRUN;
2101
2102 /*
2103 * Based on Host and scsi status generate status code for Linux
2104 */
2105 switch (comp_status) {
2106 case CS_COMPLETE:
2107 case CS_QUEUE_FULL:
2108 if (scsi_status == 0) {
2109 res = DID_OK << 16;
2110 break;
2111 }
2112 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2113 resid = resid_len;
2114 scsi_set_resid(cp, resid);
2115
2116 if (!lscsi_status &&
2117 ((unsigned)(scsi_bufflen(cp) - resid) <
2118 cp->underflow)) {
2119 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2120 "Mid-layer underflow "
2121 "detected (0x%x of 0x%x bytes).\n",
2122 resid, scsi_bufflen(cp));
2123
2124 res = DID_ERROR << 16;
2125 break;
2126 }
2127 }
2128 res = DID_OK << 16 | lscsi_status;
2129
2130 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2131 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2132 "QUEUE FULL detected.\n");
2133 break;
2134 }
2135 logit = 0;
2136 if (lscsi_status != SS_CHECK_CONDITION)
2137 break;
2138
2139 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2140 if (!(scsi_status & SS_SENSE_LEN_VALID))
2141 break;
2142
2143 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2144 rsp, res);
2145 break;
2146
2147 case CS_DATA_UNDERRUN:
2148 /* Use F/W calculated residual length. */
2149 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2150 scsi_set_resid(cp, resid);
2151 if (scsi_status & SS_RESIDUAL_UNDER) {
2152 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2153 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2154 "Dropped frame(s) detected "
2155 "(0x%x of 0x%x bytes).\n",
2156 resid, scsi_bufflen(cp));
2157
2158 res = DID_ERROR << 16 | lscsi_status;
2159 goto check_scsi_status;
2160 }
2161
2162 if (!lscsi_status &&
2163 ((unsigned)(scsi_bufflen(cp) - resid) <
2164 cp->underflow)) {
2165 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2166 "Mid-layer underflow "
2167 "detected (0x%x of 0x%x bytes).\n",
2168 resid, scsi_bufflen(cp));
2169
2170 res = DID_ERROR << 16;
2171 break;
2172 }
2173 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2174 lscsi_status != SAM_STAT_BUSY) {
2175 /*
2176 * scsi status of task set and busy are considered to be
2177 * task not completed.
2178 */
2179
2180 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2181 "Dropped frame(s) detected (0x%x "
2182 "of 0x%x bytes).\n", resid,
2183 scsi_bufflen(cp));
2184
2185 res = DID_ERROR << 16 | lscsi_status;
2186 goto check_scsi_status;
2187 } else {
2188 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2189 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2190 scsi_status, lscsi_status);
2191 }
2192
2193 res = DID_OK << 16 | lscsi_status;
2194 logit = 0;
2195
2196 check_scsi_status:
2197 /*
2198 * Check to see if SCSI Status is non zero. If so report SCSI
2199 * Status.
2200 */
2201 if (lscsi_status != 0) {
2202 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2203 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2204 "QUEUE FULL detected.\n");
2205 logit = 1;
2206 break;
2207 }
2208 if (lscsi_status != SS_CHECK_CONDITION)
2209 break;
2210
2211 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2212 if (!(scsi_status & SS_SENSE_LEN_VALID))
2213 break;
2214
2215 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2216 sense_len, rsp, res);
2217 }
2218 break;
2219
2220 case CS_PORT_LOGGED_OUT:
2221 case CS_PORT_CONFIG_CHG:
2222 case CS_PORT_BUSY:
2223 case CS_INCOMPLETE:
2224 case CS_PORT_UNAVAILABLE:
2225 case CS_TIMEOUT:
2226 case CS_RESET:
2227
2228 /*
2229 * We are going to have the fc class block the rport
2230 * while we try to recover so instruct the mid layer
2231 * to requeue until the class decides how to handle this.
2232 */
2233 res = DID_TRANSPORT_DISRUPTED << 16;
2234
2235 if (comp_status == CS_TIMEOUT) {
2236 if (IS_FWI2_CAPABLE(ha))
2237 break;
2238 else if ((le16_to_cpu(sts->status_flags) &
2239 SF_LOGOUT_SENT) == 0)
2240 break;
2241 }
2242
2243 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2244 "Port to be marked lost on fcport=%02x%02x%02x, current "
2245 "port state= %s.\n", fcport->d_id.b.domain,
2246 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2247 port_state_str[atomic_read(&fcport->state)]);
2248
2249 if (atomic_read(&fcport->state) == FCS_ONLINE)
2250 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2251 break;
2252
2253 case CS_ABORTED:
2254 res = DID_RESET << 16;
2255 break;
2256
2257 case CS_DIF_ERROR:
2258 logit = qla2x00_handle_dif_error(sp, sts24);
2259 res = cp->result;
2260 break;
2261
2262 case CS_TRANSPORT:
2263 res = DID_ERROR << 16;
2264
2265 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2266 break;
2267
2268 if (state_flags & BIT_4)
2269 scmd_printk(KERN_WARNING, cp,
2270 "Unsupported device '%s' found.\n",
2271 cp->device->vendor);
2272 break;
2273
2274 default:
2275 res = DID_ERROR << 16;
2276 break;
2277 }
2278
2279 out:
2280 if (logit)
2281 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2282 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2283 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2284 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2285 comp_status, scsi_status, res, vha->host_no,
2286 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2287 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2288 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2289 resid_len, fw_resid_len);
2290
2291 if (rsp->status_srb == NULL)
2292 sp->done(ha, sp, res);
2293 }
2294
2295 /**
2296 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2297 * @ha: SCSI driver HA context
2298 * @pkt: Entry pointer
2299 *
2300 * Extended sense data.
2301 */
2302 static void
2303 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2304 {
2305 uint8_t sense_sz = 0;
2306 struct qla_hw_data *ha = rsp->hw;
2307 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2308 srb_t *sp = rsp->status_srb;
2309 struct scsi_cmnd *cp;
2310 uint32_t sense_len;
2311 uint8_t *sense_ptr;
2312
2313 if (!sp || !GET_CMD_SENSE_LEN(sp))
2314 return;
2315
2316 sense_len = GET_CMD_SENSE_LEN(sp);
2317 sense_ptr = GET_CMD_SENSE_PTR(sp);
2318
2319 cp = GET_CMD_SP(sp);
2320 if (cp == NULL) {
2321 ql_log(ql_log_warn, vha, 0x3025,
2322 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2323
2324 rsp->status_srb = NULL;
2325 return;
2326 }
2327
2328 if (sense_len > sizeof(pkt->data))
2329 sense_sz = sizeof(pkt->data);
2330 else
2331 sense_sz = sense_len;
2332
2333 /* Move sense data. */
2334 if (IS_FWI2_CAPABLE(ha))
2335 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2336 memcpy(sense_ptr, pkt->data, sense_sz);
2337 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2338 sense_ptr, sense_sz);
2339
2340 sense_len -= sense_sz;
2341 sense_ptr += sense_sz;
2342
2343 SET_CMD_SENSE_PTR(sp, sense_ptr);
2344 SET_CMD_SENSE_LEN(sp, sense_len);
2345
2346 /* Place command on done queue. */
2347 if (sense_len == 0) {
2348 rsp->status_srb = NULL;
2349 sp->done(ha, sp, cp->result);
2350 }
2351 }
2352
2353 /**
2354 * qla2x00_error_entry() - Process an error entry.
2355 * @ha: SCSI driver HA context
2356 * @pkt: Entry pointer
2357 */
2358 static void
2359 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2360 {
2361 srb_t *sp;
2362 struct qla_hw_data *ha = vha->hw;
2363 const char func[] = "ERROR-IOCB";
2364 uint16_t que = MSW(pkt->handle);
2365 struct req_que *req = NULL;
2366 int res = DID_ERROR << 16;
2367
2368 ql_dbg(ql_dbg_async, vha, 0x502a,
2369 "type of error status in response: 0x%x\n", pkt->entry_status);
2370
2371 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2372 goto fatal;
2373
2374 req = ha->req_q_map[que];
2375
2376 if (pkt->entry_status & RF_BUSY)
2377 res = DID_BUS_BUSY << 16;
2378
2379 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2380 if (sp) {
2381 sp->done(ha, sp, res);
2382 return;
2383 }
2384 fatal:
2385 ql_log(ql_log_warn, vha, 0x5030,
2386 "Error entry - invalid handle/queue.\n");
2387
2388 if (IS_P3P_TYPE(ha))
2389 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2390 else
2391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2392 qla2xxx_wake_dpc(vha);
2393 }
2394
2395 /**
2396 * qla24xx_mbx_completion() - Process mailbox command completions.
2397 * @ha: SCSI driver HA context
2398 * @mb0: Mailbox0 register
2399 */
2400 static void
2401 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2402 {
2403 uint16_t cnt;
2404 uint32_t mboxes;
2405 uint16_t __iomem *wptr;
2406 struct qla_hw_data *ha = vha->hw;
2407 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2408
2409 /* Read all mbox registers? */
2410 mboxes = (1 << ha->mbx_count) - 1;
2411 if (!ha->mcp)
2412 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2413 else
2414 mboxes = ha->mcp->in_mb;
2415
2416 /* Load return mailbox registers. */
2417 ha->flags.mbox_int = 1;
2418 ha->mailbox_out[0] = mb0;
2419 mboxes >>= 1;
2420 wptr = (uint16_t __iomem *)&reg->mailbox1;
2421
2422 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2423 if (mboxes & BIT_0)
2424 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2425
2426 mboxes >>= 1;
2427 wptr++;
2428 }
2429 }
2430
2431 static void
2432 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2433 struct abort_entry_24xx *pkt)
2434 {
2435 const char func[] = "ABT_IOCB";
2436 srb_t *sp;
2437 struct srb_iocb *abt;
2438
2439 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2440 if (!sp)
2441 return;
2442
2443 abt = &sp->u.iocb_cmd;
2444 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2445 sp->done(vha, sp, 0);
2446 }
2447
2448 /**
2449 * qla24xx_process_response_queue() - Process response queue entries.
2450 * @ha: SCSI driver HA context
2451 */
2452 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2453 struct rsp_que *rsp)
2454 {
2455 struct sts_entry_24xx *pkt;
2456 struct qla_hw_data *ha = vha->hw;
2457
2458 if (!vha->flags.online)
2459 return;
2460
2461 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2462 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2463
2464 rsp->ring_index++;
2465 if (rsp->ring_index == rsp->length) {
2466 rsp->ring_index = 0;
2467 rsp->ring_ptr = rsp->ring;
2468 } else {
2469 rsp->ring_ptr++;
2470 }
2471
2472 if (pkt->entry_status != 0) {
2473 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2474
2475 if (qlt_24xx_process_response_error(vha, pkt))
2476 goto process_err;
2477
2478 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2479 wmb();
2480 continue;
2481 }
2482 process_err:
2483
2484 switch (pkt->entry_type) {
2485 case STATUS_TYPE:
2486 qla2x00_status_entry(vha, rsp, pkt);
2487 break;
2488 case STATUS_CONT_TYPE:
2489 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2490 break;
2491 case VP_RPT_ID_IOCB_TYPE:
2492 qla24xx_report_id_acquisition(vha,
2493 (struct vp_rpt_id_entry_24xx *)pkt);
2494 break;
2495 case LOGINOUT_PORT_IOCB_TYPE:
2496 qla24xx_logio_entry(vha, rsp->req,
2497 (struct logio_entry_24xx *)pkt);
2498 break;
2499 case CT_IOCB_TYPE:
2500 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2501 break;
2502 case ELS_IOCB_TYPE:
2503 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2504 break;
2505 case ABTS_RECV_24XX:
2506 /* ensure that the ATIO queue is empty */
2507 qlt_24xx_process_atio_queue(vha);
2508 case ABTS_RESP_24XX:
2509 case CTIO_TYPE7:
2510 case NOTIFY_ACK_TYPE:
2511 case CTIO_CRC2:
2512 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2513 break;
2514 case MARKER_TYPE:
2515 /* Do nothing in this case, this check is to prevent it
2516 * from falling into default case
2517 */
2518 break;
2519 case ABORT_IOCB_TYPE:
2520 qla24xx_abort_iocb_entry(vha, rsp->req,
2521 (struct abort_entry_24xx *)pkt);
2522 break;
2523 default:
2524 /* Type Not Supported. */
2525 ql_dbg(ql_dbg_async, vha, 0x5042,
2526 "Received unknown response pkt type %x "
2527 "entry status=%x.\n",
2528 pkt->entry_type, pkt->entry_status);
2529 break;
2530 }
2531 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2532 wmb();
2533 }
2534
2535 /* Adjust ring index */
2536 if (IS_P3P_TYPE(ha)) {
2537 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2538 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2539 } else
2540 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2541 }
2542
2543 static void
2544 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2545 {
2546 int rval;
2547 uint32_t cnt;
2548 struct qla_hw_data *ha = vha->hw;
2549 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2550
2551 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2552 !IS_QLA27XX(ha))
2553 return;
2554
2555 rval = QLA_SUCCESS;
2556 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2557 RD_REG_DWORD(&reg->iobase_addr);
2558 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2559 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2560 rval == QLA_SUCCESS; cnt--) {
2561 if (cnt) {
2562 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2563 udelay(10);
2564 } else
2565 rval = QLA_FUNCTION_TIMEOUT;
2566 }
2567 if (rval == QLA_SUCCESS)
2568 goto next_test;
2569
2570 rval = QLA_SUCCESS;
2571 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2572 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2573 rval == QLA_SUCCESS; cnt--) {
2574 if (cnt) {
2575 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2576 udelay(10);
2577 } else
2578 rval = QLA_FUNCTION_TIMEOUT;
2579 }
2580 if (rval != QLA_SUCCESS)
2581 goto done;
2582
2583 next_test:
2584 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2585 ql_log(ql_log_info, vha, 0x504c,
2586 "Additional code -- 0x55AA.\n");
2587
2588 done:
2589 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2590 RD_REG_DWORD(&reg->iobase_window);
2591 }
2592
2593 /**
2594 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2595 * @irq:
2596 * @dev_id: SCSI driver HA context
2597 *
2598 * Called by system whenever the host adapter generates an interrupt.
2599 *
2600 * Returns handled flag.
2601 */
2602 irqreturn_t
2603 qla24xx_intr_handler(int irq, void *dev_id)
2604 {
2605 scsi_qla_host_t *vha;
2606 struct qla_hw_data *ha;
2607 struct device_reg_24xx __iomem *reg;
2608 int status;
2609 unsigned long iter;
2610 uint32_t stat;
2611 uint32_t hccr;
2612 uint16_t mb[8];
2613 struct rsp_que *rsp;
2614 unsigned long flags;
2615
2616 rsp = (struct rsp_que *) dev_id;
2617 if (!rsp) {
2618 ql_log(ql_log_info, NULL, 0x5059,
2619 "%s: NULL response queue pointer.\n", __func__);
2620 return IRQ_NONE;
2621 }
2622
2623 ha = rsp->hw;
2624 reg = &ha->iobase->isp24;
2625 status = 0;
2626
2627 if (unlikely(pci_channel_offline(ha->pdev)))
2628 return IRQ_HANDLED;
2629
2630 spin_lock_irqsave(&ha->hardware_lock, flags);
2631 vha = pci_get_drvdata(ha->pdev);
2632 for (iter = 50; iter--; ) {
2633 stat = RD_REG_DWORD(&reg->host_status);
2634 if (qla2x00_check_reg_for_disconnect(vha, stat))
2635 break;
2636 if (stat & HSRX_RISC_PAUSED) {
2637 if (unlikely(pci_channel_offline(ha->pdev)))
2638 break;
2639
2640 hccr = RD_REG_DWORD(&reg->hccr);
2641
2642 ql_log(ql_log_warn, vha, 0x504b,
2643 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2644 hccr);
2645
2646 qla2xxx_check_risc_status(vha);
2647
2648 ha->isp_ops->fw_dump(vha, 1);
2649 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2650 break;
2651 } else if ((stat & HSRX_RISC_INT) == 0)
2652 break;
2653
2654 switch (stat & 0xff) {
2655 case INTR_ROM_MB_SUCCESS:
2656 case INTR_ROM_MB_FAILED:
2657 case INTR_MB_SUCCESS:
2658 case INTR_MB_FAILED:
2659 qla24xx_mbx_completion(vha, MSW(stat));
2660 status |= MBX_INTERRUPT;
2661
2662 break;
2663 case INTR_ASYNC_EVENT:
2664 mb[0] = MSW(stat);
2665 mb[1] = RD_REG_WORD(&reg->mailbox1);
2666 mb[2] = RD_REG_WORD(&reg->mailbox2);
2667 mb[3] = RD_REG_WORD(&reg->mailbox3);
2668 qla2x00_async_event(vha, rsp, mb);
2669 break;
2670 case INTR_RSP_QUE_UPDATE:
2671 case INTR_RSP_QUE_UPDATE_83XX:
2672 qla24xx_process_response_queue(vha, rsp);
2673 break;
2674 case INTR_ATIO_QUE_UPDATE:
2675 qlt_24xx_process_atio_queue(vha);
2676 break;
2677 case INTR_ATIO_RSP_QUE_UPDATE:
2678 qlt_24xx_process_atio_queue(vha);
2679 qla24xx_process_response_queue(vha, rsp);
2680 break;
2681 default:
2682 ql_dbg(ql_dbg_async, vha, 0x504f,
2683 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2684 break;
2685 }
2686 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2687 RD_REG_DWORD_RELAXED(&reg->hccr);
2688 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2689 ndelay(3500);
2690 }
2691 qla2x00_handle_mbx_completion(ha, status);
2692 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2693
2694 return IRQ_HANDLED;
2695 }
2696
2697 static irqreturn_t
2698 qla24xx_msix_rsp_q(int irq, void *dev_id)
2699 {
2700 struct qla_hw_data *ha;
2701 struct rsp_que *rsp;
2702 struct device_reg_24xx __iomem *reg;
2703 struct scsi_qla_host *vha;
2704 unsigned long flags;
2705 uint32_t stat = 0;
2706
2707 rsp = (struct rsp_que *) dev_id;
2708 if (!rsp) {
2709 ql_log(ql_log_info, NULL, 0x505a,
2710 "%s: NULL response queue pointer.\n", __func__);
2711 return IRQ_NONE;
2712 }
2713 ha = rsp->hw;
2714 reg = &ha->iobase->isp24;
2715
2716 spin_lock_irqsave(&ha->hardware_lock, flags);
2717
2718 vha = pci_get_drvdata(ha->pdev);
2719 /*
2720 * Use host_status register to check to PCI disconnection before we
2721 * we process the response queue.
2722 */
2723 stat = RD_REG_DWORD(&reg->host_status);
2724 if (qla2x00_check_reg_for_disconnect(vha, stat))
2725 goto out;
2726 qla24xx_process_response_queue(vha, rsp);
2727 if (!ha->flags.disable_msix_handshake) {
2728 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2729 RD_REG_DWORD_RELAXED(&reg->hccr);
2730 }
2731 out:
2732 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2733
2734 return IRQ_HANDLED;
2735 }
2736
2737 static irqreturn_t
2738 qla25xx_msix_rsp_q(int irq, void *dev_id)
2739 {
2740 struct qla_hw_data *ha;
2741 scsi_qla_host_t *vha;
2742 struct rsp_que *rsp;
2743 struct device_reg_24xx __iomem *reg;
2744 unsigned long flags;
2745 uint32_t hccr = 0;
2746
2747 rsp = (struct rsp_que *) dev_id;
2748 if (!rsp) {
2749 ql_log(ql_log_info, NULL, 0x505b,
2750 "%s: NULL response queue pointer.\n", __func__);
2751 return IRQ_NONE;
2752 }
2753 ha = rsp->hw;
2754 vha = pci_get_drvdata(ha->pdev);
2755
2756 /* Clear the interrupt, if enabled, for this response queue */
2757 if (!ha->flags.disable_msix_handshake) {
2758 reg = &ha->iobase->isp24;
2759 spin_lock_irqsave(&ha->hardware_lock, flags);
2760 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2761 hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
2762 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2763 }
2764 if (qla2x00_check_reg_for_disconnect(vha, hccr))
2765 goto out;
2766 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2767
2768 out:
2769 return IRQ_HANDLED;
2770 }
2771
2772 static irqreturn_t
2773 qla24xx_msix_default(int irq, void *dev_id)
2774 {
2775 scsi_qla_host_t *vha;
2776 struct qla_hw_data *ha;
2777 struct rsp_que *rsp;
2778 struct device_reg_24xx __iomem *reg;
2779 int status;
2780 uint32_t stat;
2781 uint32_t hccr;
2782 uint16_t mb[8];
2783 unsigned long flags;
2784
2785 rsp = (struct rsp_que *) dev_id;
2786 if (!rsp) {
2787 ql_log(ql_log_info, NULL, 0x505c,
2788 "%s: NULL response queue pointer.\n", __func__);
2789 return IRQ_NONE;
2790 }
2791 ha = rsp->hw;
2792 reg = &ha->iobase->isp24;
2793 status = 0;
2794
2795 spin_lock_irqsave(&ha->hardware_lock, flags);
2796 vha = pci_get_drvdata(ha->pdev);
2797 do {
2798 stat = RD_REG_DWORD(&reg->host_status);
2799 if (qla2x00_check_reg_for_disconnect(vha, stat))
2800 break;
2801 if (stat & HSRX_RISC_PAUSED) {
2802 if (unlikely(pci_channel_offline(ha->pdev)))
2803 break;
2804
2805 hccr = RD_REG_DWORD(&reg->hccr);
2806
2807 ql_log(ql_log_info, vha, 0x5050,
2808 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2809 hccr);
2810
2811 qla2xxx_check_risc_status(vha);
2812
2813 ha->isp_ops->fw_dump(vha, 1);
2814 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2815 break;
2816 } else if ((stat & HSRX_RISC_INT) == 0)
2817 break;
2818
2819 switch (stat & 0xff) {
2820 case INTR_ROM_MB_SUCCESS:
2821 case INTR_ROM_MB_FAILED:
2822 case INTR_MB_SUCCESS:
2823 case INTR_MB_FAILED:
2824 qla24xx_mbx_completion(vha, MSW(stat));
2825 status |= MBX_INTERRUPT;
2826
2827 break;
2828 case INTR_ASYNC_EVENT:
2829 mb[0] = MSW(stat);
2830 mb[1] = RD_REG_WORD(&reg->mailbox1);
2831 mb[2] = RD_REG_WORD(&reg->mailbox2);
2832 mb[3] = RD_REG_WORD(&reg->mailbox3);
2833 qla2x00_async_event(vha, rsp, mb);
2834 break;
2835 case INTR_RSP_QUE_UPDATE:
2836 case INTR_RSP_QUE_UPDATE_83XX:
2837 qla24xx_process_response_queue(vha, rsp);
2838 break;
2839 case INTR_ATIO_QUE_UPDATE:
2840 qlt_24xx_process_atio_queue(vha);
2841 break;
2842 case INTR_ATIO_RSP_QUE_UPDATE:
2843 qlt_24xx_process_atio_queue(vha);
2844 qla24xx_process_response_queue(vha, rsp);
2845 break;
2846 default:
2847 ql_dbg(ql_dbg_async, vha, 0x5051,
2848 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2849 break;
2850 }
2851 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2852 } while (0);
2853 qla2x00_handle_mbx_completion(ha, status);
2854 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2855
2856 return IRQ_HANDLED;
2857 }
2858
2859 /* Interrupt handling helpers. */
2860
2861 struct qla_init_msix_entry {
2862 const char *name;
2863 irq_handler_t handler;
2864 };
2865
2866 static struct qla_init_msix_entry msix_entries[3] = {
2867 { "qla2xxx (default)", qla24xx_msix_default },
2868 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2869 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2870 };
2871
2872 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2873 { "qla2xxx (default)", qla82xx_msix_default },
2874 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2875 };
2876
2877 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2878 { "qla2xxx (default)", qla24xx_msix_default },
2879 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2880 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2881 };
2882
2883 static void
2884 qla24xx_disable_msix(struct qla_hw_data *ha)
2885 {
2886 int i;
2887 struct qla_msix_entry *qentry;
2888 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2889
2890 for (i = 0; i < ha->msix_count; i++) {
2891 qentry = &ha->msix_entries[i];
2892 if (qentry->have_irq)
2893 free_irq(qentry->vector, qentry->rsp);
2894 }
2895 pci_disable_msix(ha->pdev);
2896 kfree(ha->msix_entries);
2897 ha->msix_entries = NULL;
2898 ha->flags.msix_enabled = 0;
2899 ql_dbg(ql_dbg_init, vha, 0x0042,
2900 "Disabled the MSI.\n");
2901 }
2902
2903 static int
2904 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2905 {
2906 #define MIN_MSIX_COUNT 2
2907 #define ATIO_VECTOR 2
2908 int i, ret;
2909 struct msix_entry *entries;
2910 struct qla_msix_entry *qentry;
2911 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2912
2913 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2914 GFP_KERNEL);
2915 if (!entries) {
2916 ql_log(ql_log_warn, vha, 0x00bc,
2917 "Failed to allocate memory for msix_entry.\n");
2918 return -ENOMEM;
2919 }
2920
2921 for (i = 0; i < ha->msix_count; i++)
2922 entries[i].entry = i;
2923
2924 ret = pci_enable_msix_range(ha->pdev,
2925 entries, MIN_MSIX_COUNT, ha->msix_count);
2926 if (ret < 0) {
2927 ql_log(ql_log_fatal, vha, 0x00c7,
2928 "MSI-X: Failed to enable support, "
2929 "giving up -- %d/%d.\n",
2930 ha->msix_count, ret);
2931 goto msix_out;
2932 } else if (ret < ha->msix_count) {
2933 ql_log(ql_log_warn, vha, 0x00c6,
2934 "MSI-X: Failed to enable support "
2935 "-- %d/%d\n Retry with %d vectors.\n",
2936 ha->msix_count, ret, ret);
2937 }
2938 ha->msix_count = ret;
2939 ha->max_rsp_queues = ha->msix_count - 1;
2940 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2941 ha->msix_count, GFP_KERNEL);
2942 if (!ha->msix_entries) {
2943 ql_log(ql_log_fatal, vha, 0x00c8,
2944 "Failed to allocate memory for ha->msix_entries.\n");
2945 ret = -ENOMEM;
2946 goto msix_out;
2947 }
2948 ha->flags.msix_enabled = 1;
2949
2950 for (i = 0; i < ha->msix_count; i++) {
2951 qentry = &ha->msix_entries[i];
2952 qentry->vector = entries[i].vector;
2953 qentry->entry = entries[i].entry;
2954 qentry->have_irq = 0;
2955 qentry->rsp = NULL;
2956 }
2957
2958 /* Enable MSI-X vectors for the base queue */
2959 for (i = 0; i < 2; i++) {
2960 qentry = &ha->msix_entries[i];
2961 if (IS_P3P_TYPE(ha))
2962 ret = request_irq(qentry->vector,
2963 qla82xx_msix_entries[i].handler,
2964 0, qla82xx_msix_entries[i].name, rsp);
2965 else
2966 ret = request_irq(qentry->vector,
2967 msix_entries[i].handler,
2968 0, msix_entries[i].name, rsp);
2969 if (ret)
2970 goto msix_register_fail;
2971 qentry->have_irq = 1;
2972 qentry->rsp = rsp;
2973 rsp->msix = qentry;
2974 }
2975
2976 /*
2977 * If target mode is enable, also request the vector for the ATIO
2978 * queue.
2979 */
2980 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2981 qentry = &ha->msix_entries[ATIO_VECTOR];
2982 ret = request_irq(qentry->vector,
2983 qla83xx_msix_entries[ATIO_VECTOR].handler,
2984 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
2985 qentry->have_irq = 1;
2986 qentry->rsp = rsp;
2987 rsp->msix = qentry;
2988 }
2989
2990 msix_register_fail:
2991 if (ret) {
2992 ql_log(ql_log_fatal, vha, 0x00cb,
2993 "MSI-X: unable to register handler -- %x/%d.\n",
2994 qentry->vector, ret);
2995 qla24xx_disable_msix(ha);
2996 ha->mqenable = 0;
2997 goto msix_out;
2998 }
2999
3000 /* Enable MSI-X vector for response queue update for queue 0 */
3001 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3002 if (ha->msixbase && ha->mqiobase &&
3003 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3004 ha->mqenable = 1;
3005 } else
3006 if (ha->mqiobase
3007 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3008 ha->mqenable = 1;
3009 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3010 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3011 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3012 ql_dbg(ql_dbg_init, vha, 0x0055,
3013 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3014 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3015
3016 msix_out:
3017 kfree(entries);
3018 return ret;
3019 }
3020
3021 int
3022 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3023 {
3024 int ret = QLA_FUNCTION_FAILED;
3025 device_reg_t *reg = ha->iobase;
3026 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3027
3028 /* If possible, enable MSI-X. */
3029 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3030 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3031 !IS_QLA27XX(ha))
3032 goto skip_msi;
3033
3034 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3035 (ha->pdev->subsystem_device == 0x7040 ||
3036 ha->pdev->subsystem_device == 0x7041 ||
3037 ha->pdev->subsystem_device == 0x1705)) {
3038 ql_log(ql_log_warn, vha, 0x0034,
3039 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3040 ha->pdev->subsystem_vendor,
3041 ha->pdev->subsystem_device);
3042 goto skip_msi;
3043 }
3044
3045 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3046 ql_log(ql_log_warn, vha, 0x0035,
3047 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3048 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3049 goto skip_msix;
3050 }
3051
3052 ret = qla24xx_enable_msix(ha, rsp);
3053 if (!ret) {
3054 ql_dbg(ql_dbg_init, vha, 0x0036,
3055 "MSI-X: Enabled (0x%X, 0x%X).\n",
3056 ha->chip_revision, ha->fw_attributes);
3057 goto clear_risc_ints;
3058 }
3059
3060 skip_msix:
3061
3062 ql_log(ql_log_info, vha, 0x0037,
3063 "Falling back-to MSI mode -%d.\n", ret);
3064
3065 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3066 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3067 !IS_QLA27XX(ha))
3068 goto skip_msi;
3069
3070 ret = pci_enable_msi(ha->pdev);
3071 if (!ret) {
3072 ql_dbg(ql_dbg_init, vha, 0x0038,
3073 "MSI: Enabled.\n");
3074 ha->flags.msi_enabled = 1;
3075 } else
3076 ql_log(ql_log_warn, vha, 0x0039,
3077 "Falling back-to INTa mode -- %d.\n", ret);
3078 skip_msi:
3079
3080 /* Skip INTx on ISP82xx. */
3081 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3082 return QLA_FUNCTION_FAILED;
3083
3084 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3085 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3086 QLA2XXX_DRIVER_NAME, rsp);
3087 if (ret) {
3088 ql_log(ql_log_warn, vha, 0x003a,
3089 "Failed to reserve interrupt %d already in use.\n",
3090 ha->pdev->irq);
3091 goto fail;
3092 } else if (!ha->flags.msi_enabled) {
3093 ql_dbg(ql_dbg_init, vha, 0x0125,
3094 "INTa mode: Enabled.\n");
3095 ha->flags.mr_intr_valid = 1;
3096 }
3097
3098 clear_risc_ints:
3099
3100 spin_lock_irq(&ha->hardware_lock);
3101 if (!IS_FWI2_CAPABLE(ha))
3102 WRT_REG_WORD(&reg->isp.semaphore, 0);
3103 spin_unlock_irq(&ha->hardware_lock);
3104
3105 fail:
3106 return ret;
3107 }
3108
3109 void
3110 qla2x00_free_irqs(scsi_qla_host_t *vha)
3111 {
3112 struct qla_hw_data *ha = vha->hw;
3113 struct rsp_que *rsp;
3114
3115 /*
3116 * We need to check that ha->rsp_q_map is valid in case we are called
3117 * from a probe failure context.
3118 */
3119 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3120 return;
3121 rsp = ha->rsp_q_map[0];
3122
3123 if (ha->flags.msix_enabled)
3124 qla24xx_disable_msix(ha);
3125 else if (ha->flags.msi_enabled) {
3126 free_irq(ha->pdev->irq, rsp);
3127 pci_disable_msi(ha->pdev);
3128 } else
3129 free_irq(ha->pdev->irq, rsp);
3130 }
3131
3132
3133 int qla25xx_request_irq(struct rsp_que *rsp)
3134 {
3135 struct qla_hw_data *ha = rsp->hw;
3136 struct qla_init_msix_entry *intr = &msix_entries[2];
3137 struct qla_msix_entry *msix = rsp->msix;
3138 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3139 int ret;
3140
3141 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3142 if (ret) {
3143 ql_log(ql_log_fatal, vha, 0x00e6,
3144 "MSI-X: Unable to register handler -- %x/%d.\n",
3145 msix->vector, ret);
3146 return ret;
3147 }
3148 msix->have_irq = 1;
3149 msix->rsp = rsp;
3150 return ret;
3151 }
This page took 0.13882 seconds and 5 git commands to generate.