IB/qib: Fix issue with link states and QSFP cables
[deliverable/linux.git] / drivers / scsi / qla4xxx / ql4_os.c
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9
10 #include <scsi/scsi_tcq.h>
11 #include <scsi/scsicam.h>
12
13 #include "ql4_def.h"
14 #include "ql4_version.h"
15 #include "ql4_glbl.h"
16 #include "ql4_dbg.h"
17 #include "ql4_inline.h"
18
19 /*
20 * Driver version
21 */
22 static char qla4xxx_version_str[40];
23
24 /*
25 * SRB allocation cache
26 */
27 static struct kmem_cache *srb_cachep;
28
29 /*
30 * Module parameter information and variables
31 */
32 int ql4xdontresethba = 0;
33 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
34 MODULE_PARM_DESC(ql4xdontresethba,
35 "Don't reset the HBA for driver recovery \n"
36 " 0 - It will reset HBA (Default)\n"
37 " 1 - It will NOT reset HBA");
38
39 int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
40 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
41 MODULE_PARM_DESC(ql4xextended_error_logging,
42 "Option to enable extended error logging, "
43 "Default is 0 - no logging, 1 - debug logging");
44
45 int ql4xenablemsix = 1;
46 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql4xenablemsix,
48 "Set to enable MSI or MSI-X interrupt mechanism.\n"
49 " 0 = enable INTx interrupt mechanism.\n"
50 " 1 = enable MSI-X interrupt mechanism (Default).\n"
51 " 2 = enable MSI interrupt mechanism.");
52
53 #define QL4_DEF_QDEPTH 32
54 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
55 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(ql4xmaxqdepth,
57 "Maximum queue depth to report for target devices.\n"
58 " Default: 32.");
59
60 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
61 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
62 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
63 "Target Session Recovery Timeout.\n"
64 " Default: 30 sec.");
65
66 /*
67 * SCSI host template entry points
68 */
69 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
70
71 /*
72 * iSCSI template entry points
73 */
74 static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
75 enum iscsi_tgt_dscvr type, uint32_t enable,
76 struct sockaddr *dst_addr);
77 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf);
79 static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
80 enum iscsi_param param, char *buf);
81 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
82 enum iscsi_host_param param, char *buf);
83 static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
84 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
85
86 /*
87 * SCSI host template entry points
88 */
89 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
90 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
91 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
92 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
93 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
94 static int qla4xxx_slave_alloc(struct scsi_device *device);
95 static int qla4xxx_slave_configure(struct scsi_device *device);
96 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
97 static void qla4xxx_scan_start(struct Scsi_Host *shost);
98
99 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
100 QLA82XX_LEGACY_INTR_CONFIG;
101
102 static struct scsi_host_template qla4xxx_driver_template = {
103 .module = THIS_MODULE,
104 .name = DRIVER_NAME,
105 .proc_name = DRIVER_NAME,
106 .queuecommand = qla4xxx_queuecommand,
107
108 .eh_abort_handler = qla4xxx_eh_abort,
109 .eh_device_reset_handler = qla4xxx_eh_device_reset,
110 .eh_target_reset_handler = qla4xxx_eh_target_reset,
111 .eh_host_reset_handler = qla4xxx_eh_host_reset,
112 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
113
114 .slave_configure = qla4xxx_slave_configure,
115 .slave_alloc = qla4xxx_slave_alloc,
116 .slave_destroy = qla4xxx_slave_destroy,
117
118 .scan_finished = iscsi_scan_finished,
119 .scan_start = qla4xxx_scan_start,
120
121 .this_id = -1,
122 .cmd_per_lun = 3,
123 .use_clustering = ENABLE_CLUSTERING,
124 .sg_tablesize = SG_ALL,
125
126 .max_sectors = 0xFFFF,
127 .shost_attrs = qla4xxx_host_attrs,
128 };
129
130 static struct iscsi_transport qla4xxx_iscsi_transport = {
131 .owner = THIS_MODULE,
132 .name = DRIVER_NAME,
133 .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD |
134 CAP_DATA_PATH_OFFLOAD,
135 .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
136 ISCSI_TARGET_NAME | ISCSI_TPGT |
137 ISCSI_TARGET_ALIAS,
138 .host_param_mask = ISCSI_HOST_HWADDRESS |
139 ISCSI_HOST_IPADDRESS |
140 ISCSI_HOST_INITIATOR_NAME,
141 .tgt_dscvr = qla4xxx_tgt_dscvr,
142 .get_conn_param = qla4xxx_conn_get_param,
143 .get_session_param = qla4xxx_sess_get_param,
144 .get_host_param = qla4xxx_host_get_param,
145 .session_recovery_timedout = qla4xxx_recovery_timedout,
146 };
147
148 static struct scsi_transport_template *qla4xxx_scsi_transport;
149
150 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
151 {
152 struct iscsi_cls_session *session;
153 struct ddb_entry *ddb_entry;
154
155 session = starget_to_session(scsi_target(sc->device));
156 ddb_entry = session->dd_data;
157
158 /* if we are not logged in then the LLD is going to clean up the cmd */
159 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
160 return BLK_EH_RESET_TIMER;
161 else
162 return BLK_EH_NOT_HANDLED;
163 }
164
165 static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
166 {
167 struct ddb_entry *ddb_entry = session->dd_data;
168 struct scsi_qla_host *ha = ddb_entry->ha;
169
170 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
171 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
172
173 DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout "
174 "of (%d) secs exhausted, marking device DEAD.\n",
175 ha->host_no, __func__, ddb_entry->fw_ddb_index,
176 ddb_entry->sess->recovery_tmo));
177 }
178 }
179
180 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
181 enum iscsi_host_param param, char *buf)
182 {
183 struct scsi_qla_host *ha = to_qla_host(shost);
184 int len;
185
186 switch (param) {
187 case ISCSI_HOST_PARAM_HWADDRESS:
188 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
189 break;
190 case ISCSI_HOST_PARAM_IPADDRESS:
191 len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0],
192 ha->ip_address[1], ha->ip_address[2],
193 ha->ip_address[3]);
194 break;
195 case ISCSI_HOST_PARAM_INITIATOR_NAME:
196 len = sprintf(buf, "%s\n", ha->name_string);
197 break;
198 default:
199 return -ENOSYS;
200 }
201
202 return len;
203 }
204
205 static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
206 enum iscsi_param param, char *buf)
207 {
208 struct ddb_entry *ddb_entry = sess->dd_data;
209 int len;
210
211 switch (param) {
212 case ISCSI_PARAM_TARGET_NAME:
213 len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
214 ddb_entry->iscsi_name);
215 break;
216 case ISCSI_PARAM_TPGT:
217 len = sprintf(buf, "%u\n", ddb_entry->tpgt);
218 break;
219 case ISCSI_PARAM_TARGET_ALIAS:
220 len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
221 ddb_entry->iscsi_alias);
222 break;
223 default:
224 return -ENOSYS;
225 }
226
227 return len;
228 }
229
230 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
231 enum iscsi_param param, char *buf)
232 {
233 struct iscsi_cls_session *session;
234 struct ddb_entry *ddb_entry;
235 int len;
236
237 session = iscsi_dev_to_session(conn->dev.parent);
238 ddb_entry = session->dd_data;
239
240 switch (param) {
241 case ISCSI_PARAM_CONN_PORT:
242 len = sprintf(buf, "%hu\n", ddb_entry->port);
243 break;
244 case ISCSI_PARAM_CONN_ADDRESS:
245 /* TODO: what are the ipv6 bits */
246 len = sprintf(buf, "%pI4\n", &ddb_entry->ip_addr);
247 break;
248 default:
249 return -ENOSYS;
250 }
251
252 return len;
253 }
254
255 static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
256 enum iscsi_tgt_dscvr type, uint32_t enable,
257 struct sockaddr *dst_addr)
258 {
259 struct scsi_qla_host *ha;
260 struct sockaddr_in *addr;
261 struct sockaddr_in6 *addr6;
262 int ret = 0;
263
264 ha = (struct scsi_qla_host *) shost->hostdata;
265
266 switch (type) {
267 case ISCSI_TGT_DSCVR_SEND_TARGETS:
268 if (dst_addr->sa_family == AF_INET) {
269 addr = (struct sockaddr_in *)dst_addr;
270 if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr,
271 addr->sin_port) != QLA_SUCCESS)
272 ret = -EIO;
273 } else if (dst_addr->sa_family == AF_INET6) {
274 /*
275 * TODO: fix qla4xxx_send_tgts
276 */
277 addr6 = (struct sockaddr_in6 *)dst_addr;
278 if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr,
279 addr6->sin6_port) != QLA_SUCCESS)
280 ret = -EIO;
281 } else
282 ret = -ENOSYS;
283 break;
284 default:
285 ret = -ENOSYS;
286 }
287 return ret;
288 }
289
290 void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
291 {
292 if (!ddb_entry->sess)
293 return;
294
295 if (ddb_entry->conn) {
296 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
297 iscsi_remove_session(ddb_entry->sess);
298 }
299 iscsi_free_session(ddb_entry->sess);
300 }
301
302 int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
303 {
304 int err;
305
306 ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo;
307
308 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
309 if (err) {
310 DEBUG2(printk(KERN_ERR "Could not add session.\n"));
311 return err;
312 }
313
314 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
315 if (!ddb_entry->conn) {
316 iscsi_remove_session(ddb_entry->sess);
317 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
318 return -ENOMEM;
319 }
320
321 /* finally ready to go */
322 iscsi_unblock_session(ddb_entry->sess);
323 return 0;
324 }
325
326 struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
327 {
328 struct ddb_entry *ddb_entry;
329 struct iscsi_cls_session *sess;
330
331 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
332 sizeof(struct ddb_entry));
333 if (!sess)
334 return NULL;
335
336 ddb_entry = sess->dd_data;
337 memset(ddb_entry, 0, sizeof(*ddb_entry));
338 ddb_entry->ha = ha;
339 ddb_entry->sess = sess;
340 return ddb_entry;
341 }
342
343 static void qla4xxx_scan_start(struct Scsi_Host *shost)
344 {
345 struct scsi_qla_host *ha = shost_priv(shost);
346 struct ddb_entry *ddb_entry, *ddbtemp;
347
348 /* finish setup of sessions that were already setup in firmware */
349 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
350 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
351 qla4xxx_add_sess(ddb_entry);
352 }
353 }
354
355 /*
356 * Timer routines
357 */
358
359 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
360 unsigned long interval)
361 {
362 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
363 __func__, ha->host->host_no));
364 init_timer(&ha->timer);
365 ha->timer.expires = jiffies + interval * HZ;
366 ha->timer.data = (unsigned long)ha;
367 ha->timer.function = (void (*)(unsigned long))func;
368 add_timer(&ha->timer);
369 ha->timer_active = 1;
370 }
371
372 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
373 {
374 del_timer_sync(&ha->timer);
375 ha->timer_active = 0;
376 }
377
378 /***
379 * qla4xxx_mark_device_missing - mark a device as missing.
380 * @ha: Pointer to host adapter structure.
381 * @ddb_entry: Pointer to device database entry
382 *
383 * This routine marks a device missing and close connection.
384 **/
385 void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
386 struct ddb_entry *ddb_entry)
387 {
388 if ((atomic_read(&ddb_entry->state) != DDB_STATE_DEAD)) {
389 atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
390 DEBUG2(printk("scsi%ld: ddb [%d] marked MISSING\n",
391 ha->host_no, ddb_entry->fw_ddb_index));
392 } else
393 DEBUG2(printk("scsi%ld: ddb [%d] DEAD\n", ha->host_no,
394 ddb_entry->fw_ddb_index))
395
396 iscsi_block_session(ddb_entry->sess);
397 iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
398 }
399
400 /**
401 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
402 * @ha: Pointer to host adapter structure.
403 *
404 * This routine marks a device missing and resets the relogin retry count.
405 **/
406 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
407 {
408 struct ddb_entry *ddb_entry, *ddbtemp;
409 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
410 qla4xxx_mark_device_missing(ha, ddb_entry);
411 }
412 }
413
414 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
415 struct ddb_entry *ddb_entry,
416 struct scsi_cmnd *cmd)
417 {
418 struct srb *srb;
419
420 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
421 if (!srb)
422 return srb;
423
424 kref_init(&srb->srb_ref);
425 srb->ha = ha;
426 srb->ddb = ddb_entry;
427 srb->cmd = cmd;
428 srb->flags = 0;
429 CMD_SP(cmd) = (void *)srb;
430
431 return srb;
432 }
433
434 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
435 {
436 struct scsi_cmnd *cmd = srb->cmd;
437
438 if (srb->flags & SRB_DMA_VALID) {
439 scsi_dma_unmap(cmd);
440 srb->flags &= ~SRB_DMA_VALID;
441 }
442 CMD_SP(cmd) = NULL;
443 }
444
445 void qla4xxx_srb_compl(struct kref *ref)
446 {
447 struct srb *srb = container_of(ref, struct srb, srb_ref);
448 struct scsi_cmnd *cmd = srb->cmd;
449 struct scsi_qla_host *ha = srb->ha;
450
451 qla4xxx_srb_free_dma(ha, srb);
452
453 mempool_free(srb, ha->srb_mempool);
454
455 cmd->scsi_done(cmd);
456 }
457
458 /**
459 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
460 * @host: scsi host
461 * @cmd: Pointer to Linux's SCSI command structure
462 *
463 * Remarks:
464 * This routine is invoked by Linux to send a SCSI command to the driver.
465 * The mid-level driver tries to ensure that queuecommand never gets
466 * invoked concurrently with itself or the interrupt handler (although
467 * the interrupt handler may call this routine as part of request-
468 * completion handling). Unfortunely, it sometimes calls the scheduler
469 * in interrupt context which is a big NO! NO!.
470 **/
471 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
472 {
473 struct scsi_qla_host *ha = to_qla_host(host);
474 struct ddb_entry *ddb_entry = cmd->device->hostdata;
475 struct iscsi_cls_session *sess = ddb_entry->sess;
476 struct srb *srb;
477 int rval;
478
479 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
480 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
481 cmd->result = DID_NO_CONNECT << 16;
482 else
483 cmd->result = DID_REQUEUE << 16;
484 goto qc_fail_command;
485 }
486
487 if (!sess) {
488 cmd->result = DID_IMM_RETRY << 16;
489 goto qc_fail_command;
490 }
491
492 rval = iscsi_session_chkready(sess);
493 if (rval) {
494 cmd->result = rval;
495 goto qc_fail_command;
496 }
497
498 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
499 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
500 cmd->result = DID_NO_CONNECT << 16;
501 goto qc_fail_command;
502 }
503 return SCSI_MLQUEUE_TARGET_BUSY;
504 }
505
506 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
507 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
508 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
509 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
510 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
511 !test_bit(AF_ONLINE, &ha->flags) ||
512 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
513 goto qc_host_busy;
514
515 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
516 if (!srb)
517 goto qc_host_busy;
518
519 rval = qla4xxx_send_command_to_isp(ha, srb);
520 if (rval != QLA_SUCCESS)
521 goto qc_host_busy_free_sp;
522
523 return 0;
524
525 qc_host_busy_free_sp:
526 qla4xxx_srb_free_dma(ha, srb);
527 mempool_free(srb, ha->srb_mempool);
528
529 qc_host_busy:
530 return SCSI_MLQUEUE_HOST_BUSY;
531
532 qc_fail_command:
533 cmd->scsi_done(cmd);
534
535 return 0;
536 }
537
538 /**
539 * qla4xxx_mem_free - frees memory allocated to adapter
540 * @ha: Pointer to host adapter structure.
541 *
542 * Frees memory previously allocated by qla4xxx_mem_alloc
543 **/
544 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
545 {
546 if (ha->queues)
547 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
548 ha->queues_dma);
549
550 ha->queues_len = 0;
551 ha->queues = NULL;
552 ha->queues_dma = 0;
553 ha->request_ring = NULL;
554 ha->request_dma = 0;
555 ha->response_ring = NULL;
556 ha->response_dma = 0;
557 ha->shadow_regs = NULL;
558 ha->shadow_regs_dma = 0;
559
560 /* Free srb pool. */
561 if (ha->srb_mempool)
562 mempool_destroy(ha->srb_mempool);
563
564 ha->srb_mempool = NULL;
565
566 /* release io space registers */
567 if (is_qla8022(ha)) {
568 if (ha->nx_pcibase)
569 iounmap(
570 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
571 } else if (ha->reg)
572 iounmap(ha->reg);
573 pci_release_regions(ha->pdev);
574 }
575
576 /**
577 * qla4xxx_mem_alloc - allocates memory for use by adapter.
578 * @ha: Pointer to host adapter structure
579 *
580 * Allocates DMA memory for request and response queues. Also allocates memory
581 * for srbs.
582 **/
583 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
584 {
585 unsigned long align;
586
587 /* Allocate contiguous block of DMA memory for queues. */
588 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
589 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
590 sizeof(struct shadow_regs) +
591 MEM_ALIGN_VALUE +
592 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
593 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
594 &ha->queues_dma, GFP_KERNEL);
595 if (ha->queues == NULL) {
596 ql4_printk(KERN_WARNING, ha,
597 "Memory Allocation failed - queues.\n");
598
599 goto mem_alloc_error_exit;
600 }
601 memset(ha->queues, 0, ha->queues_len);
602
603 /*
604 * As per RISC alignment requirements -- the bus-address must be a
605 * multiple of the request-ring size (in bytes).
606 */
607 align = 0;
608 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
609 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
610 (MEM_ALIGN_VALUE - 1));
611
612 /* Update request and response queue pointers. */
613 ha->request_dma = ha->queues_dma + align;
614 ha->request_ring = (struct queue_entry *) (ha->queues + align);
615 ha->response_dma = ha->queues_dma + align +
616 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
617 ha->response_ring = (struct queue_entry *) (ha->queues + align +
618 (REQUEST_QUEUE_DEPTH *
619 QUEUE_SIZE));
620 ha->shadow_regs_dma = ha->queues_dma + align +
621 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
622 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
623 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
624 (REQUEST_QUEUE_DEPTH *
625 QUEUE_SIZE) +
626 (RESPONSE_QUEUE_DEPTH *
627 QUEUE_SIZE));
628
629 /* Allocate memory for srb pool. */
630 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
631 mempool_free_slab, srb_cachep);
632 if (ha->srb_mempool == NULL) {
633 ql4_printk(KERN_WARNING, ha,
634 "Memory Allocation failed - SRB Pool.\n");
635
636 goto mem_alloc_error_exit;
637 }
638
639 return QLA_SUCCESS;
640
641 mem_alloc_error_exit:
642 qla4xxx_mem_free(ha);
643 return QLA_ERROR;
644 }
645
646 /**
647 * qla4_8xxx_check_fw_alive - Check firmware health
648 * @ha: Pointer to host adapter structure.
649 *
650 * Context: Interrupt
651 **/
652 static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
653 {
654 uint32_t fw_heartbeat_counter, halt_status;
655
656 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
657 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
658 if (fw_heartbeat_counter == 0xffffffff) {
659 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
660 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
661 ha->host_no, __func__));
662 return;
663 }
664
665 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
666 ha->seconds_since_last_heartbeat++;
667 /* FW not alive after 2 seconds */
668 if (ha->seconds_since_last_heartbeat == 2) {
669 ha->seconds_since_last_heartbeat = 0;
670 halt_status = qla4_8xxx_rd_32(ha,
671 QLA82XX_PEG_HALT_STATUS1);
672
673 ql4_printk(KERN_INFO, ha,
674 "scsi(%ld): %s, Dumping hw/fw registers:\n "
675 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
676 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
677 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
678 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
679 ha->host_no, __func__, halt_status,
680 qla4_8xxx_rd_32(ha,
681 QLA82XX_PEG_HALT_STATUS2),
682 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
683 0x3c),
684 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
685 0x3c),
686 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
687 0x3c),
688 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
689 0x3c),
690 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
691 0x3c));
692
693 /* Since we cannot change dev_state in interrupt
694 * context, set appropriate DPC flag then wakeup
695 * DPC */
696 if (halt_status & HALT_STATUS_UNRECOVERABLE)
697 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
698 else {
699 printk("scsi%ld: %s: detect abort needed!\n",
700 ha->host_no, __func__);
701 set_bit(DPC_RESET_HA, &ha->dpc_flags);
702 }
703 qla4xxx_wake_dpc(ha);
704 qla4xxx_mailbox_premature_completion(ha);
705 }
706 } else
707 ha->seconds_since_last_heartbeat = 0;
708
709 ha->fw_heartbeat_counter = fw_heartbeat_counter;
710 }
711
712 /**
713 * qla4_8xxx_watchdog - Poll dev state
714 * @ha: Pointer to host adapter structure.
715 *
716 * Context: Interrupt
717 **/
718 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
719 {
720 uint32_t dev_state;
721
722 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
723
724 /* don't poll if reset is going on */
725 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
726 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
727 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
728 if (dev_state == QLA82XX_DEV_NEED_RESET &&
729 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
730 if (!ql4xdontresethba) {
731 ql4_printk(KERN_INFO, ha, "%s: HW State: "
732 "NEED RESET!\n", __func__);
733 set_bit(DPC_RESET_HA, &ha->dpc_flags);
734 qla4xxx_wake_dpc(ha);
735 qla4xxx_mailbox_premature_completion(ha);
736 }
737 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
738 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
739 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
740 __func__);
741 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
742 qla4xxx_wake_dpc(ha);
743 } else {
744 /* Check firmware health */
745 qla4_8xxx_check_fw_alive(ha);
746 }
747 }
748 }
749
750 /**
751 * qla4xxx_timer - checks every second for work to do.
752 * @ha: Pointer to host adapter structure.
753 **/
754 static void qla4xxx_timer(struct scsi_qla_host *ha)
755 {
756 struct ddb_entry *ddb_entry, *dtemp;
757 int start_dpc = 0;
758 uint16_t w;
759
760 /* If we are in the middle of AER/EEH processing
761 * skip any processing and reschedule the timer
762 */
763 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
764 mod_timer(&ha->timer, jiffies + HZ);
765 return;
766 }
767
768 /* Hardware read to trigger an EEH error during mailbox waits. */
769 if (!pci_channel_offline(ha->pdev))
770 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
771
772 if (is_qla8022(ha)) {
773 qla4_8xxx_watchdog(ha);
774 }
775
776 /* Search for relogin's to time-out and port down retry. */
777 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
778 /* Count down time between sending relogins */
779 if (adapter_up(ha) &&
780 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
781 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
782 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
783 INVALID_ENTRY) {
784 if (atomic_read(&ddb_entry->retry_relogin_timer)
785 == 0) {
786 atomic_set(&ddb_entry->
787 retry_relogin_timer,
788 INVALID_ENTRY);
789 set_bit(DPC_RELOGIN_DEVICE,
790 &ha->dpc_flags);
791 set_bit(DF_RELOGIN, &ddb_entry->flags);
792 DEBUG2(printk("scsi%ld: %s: ddb [%d]"
793 " login device\n",
794 ha->host_no, __func__,
795 ddb_entry->fw_ddb_index));
796 } else
797 atomic_dec(&ddb_entry->
798 retry_relogin_timer);
799 }
800 }
801
802 /* Wait for relogin to timeout */
803 if (atomic_read(&ddb_entry->relogin_timer) &&
804 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
805 /*
806 * If the relogin times out and the device is
807 * still NOT ONLINE then try and relogin again.
808 */
809 if (atomic_read(&ddb_entry->state) !=
810 DDB_STATE_ONLINE &&
811 ddb_entry->fw_ddb_device_state ==
812 DDB_DS_SESSION_FAILED) {
813 /* Reset retry relogin timer */
814 atomic_inc(&ddb_entry->relogin_retry_count);
815 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
816 " timed out-retrying"
817 " relogin (%d)\n",
818 ha->host_no,
819 ddb_entry->fw_ddb_index,
820 atomic_read(&ddb_entry->
821 relogin_retry_count))
822 );
823 start_dpc++;
824 DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
825 "initiate relogin after"
826 " %d seconds\n",
827 ha->host_no, ddb_entry->bus,
828 ddb_entry->target,
829 ddb_entry->fw_ddb_index,
830 ddb_entry->default_time2wait + 4)
831 );
832
833 atomic_set(&ddb_entry->retry_relogin_timer,
834 ddb_entry->default_time2wait + 4);
835 }
836 }
837 }
838
839 if (!is_qla8022(ha)) {
840 /* Check for heartbeat interval. */
841 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
842 ha->heartbeat_interval != 0) {
843 ha->seconds_since_last_heartbeat++;
844 if (ha->seconds_since_last_heartbeat >
845 ha->heartbeat_interval + 2)
846 set_bit(DPC_RESET_HA, &ha->dpc_flags);
847 }
848 }
849
850 /* Wakeup the dpc routine for this adapter, if needed. */
851 if (start_dpc ||
852 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
853 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
854 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
855 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
856 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
857 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
858 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
859 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
860 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
861 test_bit(DPC_AEN, &ha->dpc_flags)) {
862 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
863 " - dpc flags = 0x%lx\n",
864 ha->host_no, __func__, ha->dpc_flags));
865 qla4xxx_wake_dpc(ha);
866 }
867
868 /* Reschedule timer thread to call us back in one second */
869 mod_timer(&ha->timer, jiffies + HZ);
870
871 DEBUG2(ha->seconds_since_last_intr++);
872 }
873
874 /**
875 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
876 * @ha: Pointer to host adapter structure.
877 *
878 * This routine stalls the driver until all outstanding commands are returned.
879 * Caller must release the Hardware Lock prior to calling this routine.
880 **/
881 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
882 {
883 uint32_t index = 0;
884 unsigned long flags;
885 struct scsi_cmnd *cmd;
886
887 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
888
889 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
890 "complete\n", WAIT_CMD_TOV));
891
892 while (!time_after_eq(jiffies, wtime)) {
893 spin_lock_irqsave(&ha->hardware_lock, flags);
894 /* Find a command that hasn't completed. */
895 for (index = 0; index < ha->host->can_queue; index++) {
896 cmd = scsi_host_find_tag(ha->host, index);
897 /*
898 * We cannot just check if the index is valid,
899 * becase if we are run from the scsi eh, then
900 * the scsi/block layer is going to prevent
901 * the tag from being released.
902 */
903 if (cmd != NULL && CMD_SP(cmd))
904 break;
905 }
906 spin_unlock_irqrestore(&ha->hardware_lock, flags);
907
908 /* If No Commands are pending, wait is complete */
909 if (index == ha->host->can_queue)
910 return QLA_SUCCESS;
911
912 msleep(1000);
913 }
914 /* If we timed out on waiting for commands to come back
915 * return ERROR. */
916 return QLA_ERROR;
917 }
918
919 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
920 {
921 uint32_t ctrl_status;
922 unsigned long flags = 0;
923
924 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
925
926 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
927 return QLA_ERROR;
928
929 spin_lock_irqsave(&ha->hardware_lock, flags);
930
931 /*
932 * If the SCSI Reset Interrupt bit is set, clear it.
933 * Otherwise, the Soft Reset won't work.
934 */
935 ctrl_status = readw(&ha->reg->ctrl_status);
936 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
937 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
938
939 /* Issue Soft Reset */
940 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
941 readl(&ha->reg->ctrl_status);
942
943 spin_unlock_irqrestore(&ha->hardware_lock, flags);
944 return QLA_SUCCESS;
945 }
946
947 /**
948 * qla4xxx_soft_reset - performs soft reset.
949 * @ha: Pointer to host adapter structure.
950 **/
951 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
952 {
953 uint32_t max_wait_time;
954 unsigned long flags = 0;
955 int status;
956 uint32_t ctrl_status;
957
958 status = qla4xxx_hw_reset(ha);
959 if (status != QLA_SUCCESS)
960 return status;
961
962 status = QLA_ERROR;
963 /* Wait until the Network Reset Intr bit is cleared */
964 max_wait_time = RESET_INTR_TOV;
965 do {
966 spin_lock_irqsave(&ha->hardware_lock, flags);
967 ctrl_status = readw(&ha->reg->ctrl_status);
968 spin_unlock_irqrestore(&ha->hardware_lock, flags);
969
970 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
971 break;
972
973 msleep(1000);
974 } while ((--max_wait_time));
975
976 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
977 DEBUG2(printk(KERN_WARNING
978 "scsi%ld: Network Reset Intr not cleared by "
979 "Network function, clearing it now!\n",
980 ha->host_no));
981 spin_lock_irqsave(&ha->hardware_lock, flags);
982 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
983 readl(&ha->reg->ctrl_status);
984 spin_unlock_irqrestore(&ha->hardware_lock, flags);
985 }
986
987 /* Wait until the firmware tells us the Soft Reset is done */
988 max_wait_time = SOFT_RESET_TOV;
989 do {
990 spin_lock_irqsave(&ha->hardware_lock, flags);
991 ctrl_status = readw(&ha->reg->ctrl_status);
992 spin_unlock_irqrestore(&ha->hardware_lock, flags);
993
994 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
995 status = QLA_SUCCESS;
996 break;
997 }
998
999 msleep(1000);
1000 } while ((--max_wait_time));
1001
1002 /*
1003 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1004 * after the soft reset has taken place.
1005 */
1006 spin_lock_irqsave(&ha->hardware_lock, flags);
1007 ctrl_status = readw(&ha->reg->ctrl_status);
1008 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
1009 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1010 readl(&ha->reg->ctrl_status);
1011 }
1012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1013
1014 /* If soft reset fails then most probably the bios on other
1015 * function is also enabled.
1016 * Since the initialization is sequential the other fn
1017 * wont be able to acknowledge the soft reset.
1018 * Issue a force soft reset to workaround this scenario.
1019 */
1020 if (max_wait_time == 0) {
1021 /* Issue Force Soft Reset */
1022 spin_lock_irqsave(&ha->hardware_lock, flags);
1023 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
1024 readl(&ha->reg->ctrl_status);
1025 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1026 /* Wait until the firmware tells us the Soft Reset is done */
1027 max_wait_time = SOFT_RESET_TOV;
1028 do {
1029 spin_lock_irqsave(&ha->hardware_lock, flags);
1030 ctrl_status = readw(&ha->reg->ctrl_status);
1031 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1032
1033 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
1034 status = QLA_SUCCESS;
1035 break;
1036 }
1037
1038 msleep(1000);
1039 } while ((--max_wait_time));
1040 }
1041
1042 return status;
1043 }
1044
1045 /**
1046 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
1047 * @ha: Pointer to host adapter structure.
1048 * @res: returned scsi status
1049 *
1050 * This routine is called just prior to a HARD RESET to return all
1051 * outstanding commands back to the Operating System.
1052 * Caller should make sure that the following locks are released
1053 * before this calling routine: Hardware lock, and io_request_lock.
1054 **/
1055 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
1056 {
1057 struct srb *srb;
1058 int i;
1059 unsigned long flags;
1060
1061 spin_lock_irqsave(&ha->hardware_lock, flags);
1062 for (i = 0; i < ha->host->can_queue; i++) {
1063 srb = qla4xxx_del_from_active_array(ha, i);
1064 if (srb != NULL) {
1065 srb->cmd->result = res;
1066 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
1067 }
1068 }
1069 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1070 }
1071
1072 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
1073 {
1074 clear_bit(AF_ONLINE, &ha->flags);
1075
1076 /* Disable the board */
1077 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
1078
1079 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
1080 qla4xxx_mark_all_devices_missing(ha);
1081 clear_bit(AF_INIT_DONE, &ha->flags);
1082 }
1083
1084 /**
1085 * qla4xxx_recover_adapter - recovers adapter after a fatal error
1086 * @ha: Pointer to host adapter structure.
1087 **/
1088 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
1089 {
1090 int status = QLA_ERROR;
1091 uint8_t reset_chip = 0;
1092
1093 /* Stall incoming I/O until we are done */
1094 scsi_block_requests(ha->host);
1095 clear_bit(AF_ONLINE, &ha->flags);
1096
1097 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
1098
1099 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
1100
1101 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1102 reset_chip = 1;
1103
1104 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
1105 * do not reset adapter, jump to initialize_adapter */
1106 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1107 status = QLA_SUCCESS;
1108 goto recover_ha_init_adapter;
1109 }
1110
1111 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
1112 * from eh_host_reset or ioctl module */
1113 if (is_qla8022(ha) && !reset_chip &&
1114 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
1115
1116 DEBUG2(ql4_printk(KERN_INFO, ha,
1117 "scsi%ld: %s - Performing stop_firmware...\n",
1118 ha->host_no, __func__));
1119 status = ha->isp_ops->reset_firmware(ha);
1120 if (status == QLA_SUCCESS) {
1121 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
1122 qla4xxx_cmd_wait(ha);
1123 ha->isp_ops->disable_intrs(ha);
1124 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1125 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
1126 } else {
1127 /* If the stop_firmware fails then
1128 * reset the entire chip */
1129 reset_chip = 1;
1130 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
1131 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1132 }
1133 }
1134
1135 /* Issue full chip reset if recovering from a catastrophic error,
1136 * or if stop_firmware fails for ISP-82xx.
1137 * This is the default case for ISP-4xxx */
1138 if (!is_qla8022(ha) || reset_chip) {
1139 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
1140 qla4xxx_cmd_wait(ha);
1141 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1142 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
1143 DEBUG2(ql4_printk(KERN_INFO, ha,
1144 "scsi%ld: %s - Performing chip reset..\n",
1145 ha->host_no, __func__));
1146 status = ha->isp_ops->reset_chip(ha);
1147 }
1148
1149 /* Flush any pending ddb changed AENs */
1150 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1151
1152 recover_ha_init_adapter:
1153 /* Upon successful firmware/chip reset, re-initialize the adapter */
1154 if (status == QLA_SUCCESS) {
1155 /* For ISP-4xxx, force function 1 to always initialize
1156 * before function 3 to prevent both funcions from
1157 * stepping on top of the other */
1158 if (!is_qla8022(ha) && (ha->mac_index == 3))
1159 ssleep(6);
1160
1161 /* NOTE: AF_ONLINE flag set upon successful completion of
1162 * qla4xxx_initialize_adapter */
1163 status = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
1164 }
1165
1166 /* Retry failed adapter initialization, if necessary
1167 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
1168 * case to prevent ping-pong resets between functions */
1169 if (!test_bit(AF_ONLINE, &ha->flags) &&
1170 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1171 /* Adapter initialization failed, see if we can retry
1172 * resetting the ha.
1173 * Since we don't want to block the DPC for too long
1174 * with multiple resets in the same thread,
1175 * utilize DPC to retry */
1176 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
1177 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
1178 DEBUG2(printk("scsi%ld: recover adapter - retrying "
1179 "(%d) more times\n", ha->host_no,
1180 ha->retry_reset_ha_cnt));
1181 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
1182 status = QLA_ERROR;
1183 } else {
1184 if (ha->retry_reset_ha_cnt > 0) {
1185 /* Schedule another Reset HA--DPC will retry */
1186 ha->retry_reset_ha_cnt--;
1187 DEBUG2(printk("scsi%ld: recover adapter - "
1188 "retry remaining %d\n",
1189 ha->host_no,
1190 ha->retry_reset_ha_cnt));
1191 status = QLA_ERROR;
1192 }
1193
1194 if (ha->retry_reset_ha_cnt == 0) {
1195 /* Recover adapter retries have been exhausted.
1196 * Adapter DEAD */
1197 DEBUG2(printk("scsi%ld: recover adapter "
1198 "failed - board disabled\n",
1199 ha->host_no));
1200 qla4xxx_dead_adapter_cleanup(ha);
1201 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
1202 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
1203 clear_bit(DPC_RESET_HA_FW_CONTEXT,
1204 &ha->dpc_flags);
1205 status = QLA_ERROR;
1206 }
1207 }
1208 } else {
1209 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
1210 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
1211 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
1212 }
1213
1214 ha->adapter_error_count++;
1215
1216 if (test_bit(AF_ONLINE, &ha->flags))
1217 ha->isp_ops->enable_intrs(ha);
1218
1219 scsi_unblock_requests(ha->host);
1220
1221 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
1222 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
1223 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
1224
1225 return status;
1226 }
1227
1228 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
1229 {
1230 struct ddb_entry *ddb_entry, *dtemp;
1231
1232 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
1233 if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) ||
1234 (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) {
1235 if (ddb_entry->fw_ddb_device_state ==
1236 DDB_DS_SESSION_ACTIVE) {
1237 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1238 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
1239 " marked ONLINE\n", ha->host_no, __func__,
1240 ddb_entry->fw_ddb_index);
1241
1242 iscsi_unblock_session(ddb_entry->sess);
1243 } else
1244 qla4xxx_relogin_device(ha, ddb_entry);
1245 }
1246 }
1247 }
1248
1249 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
1250 {
1251 if (ha->dpc_thread)
1252 queue_work(ha->dpc_thread, &ha->dpc_work);
1253 }
1254
1255 /**
1256 * qla4xxx_do_dpc - dpc routine
1257 * @data: in our case pointer to adapter structure
1258 *
1259 * This routine is a task that is schedule by the interrupt handler
1260 * to perform the background processing for interrupts. We put it
1261 * on a task queue that is consumed whenever the scheduler runs; that's
1262 * so you can do anything (i.e. put the process to sleep etc). In fact,
1263 * the mid-level tries to sleep when it reaches the driver threshold
1264 * "host->can_queue". This can cause a panic if we were in our interrupt code.
1265 **/
1266 static void qla4xxx_do_dpc(struct work_struct *work)
1267 {
1268 struct scsi_qla_host *ha =
1269 container_of(work, struct scsi_qla_host, dpc_work);
1270 struct ddb_entry *ddb_entry, *dtemp;
1271 int status = QLA_ERROR;
1272
1273 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
1274 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
1275 ha->host_no, __func__, ha->flags, ha->dpc_flags))
1276
1277 /* Initialization not yet finished. Don't do anything yet. */
1278 if (!test_bit(AF_INIT_DONE, &ha->flags))
1279 return;
1280
1281 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1282 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
1283 ha->host_no, __func__, ha->flags));
1284 return;
1285 }
1286
1287 if (is_qla8022(ha)) {
1288 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
1289 qla4_8xxx_idc_lock(ha);
1290 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1291 QLA82XX_DEV_FAILED);
1292 qla4_8xxx_idc_unlock(ha);
1293 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
1294 qla4_8xxx_device_state_handler(ha);
1295 }
1296 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
1297 qla4_8xxx_need_qsnt_handler(ha);
1298 }
1299 }
1300
1301 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
1302 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1303 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1304 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
1305 if (ql4xdontresethba) {
1306 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
1307 ha->host_no, __func__));
1308 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
1309 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1310 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
1311 goto dpc_post_reset_ha;
1312 }
1313 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
1314 test_bit(DPC_RESET_HA, &ha->dpc_flags))
1315 qla4xxx_recover_adapter(ha);
1316
1317 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
1318 uint8_t wait_time = RESET_INTR_TOV;
1319
1320 while ((readw(&ha->reg->ctrl_status) &
1321 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
1322 if (--wait_time == 0)
1323 break;
1324 msleep(1000);
1325 }
1326 if (wait_time == 0)
1327 DEBUG2(printk("scsi%ld: %s: SR|FSR "
1328 "bit not cleared-- resetting\n",
1329 ha->host_no, __func__));
1330 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
1331 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
1332 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1333 status = qla4xxx_recover_adapter(ha);
1334 }
1335 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1336 if (status == QLA_SUCCESS)
1337 ha->isp_ops->enable_intrs(ha);
1338 }
1339 }
1340
1341 dpc_post_reset_ha:
1342 /* ---- process AEN? --- */
1343 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
1344 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
1345
1346 /* ---- Get DHCP IP Address? --- */
1347 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
1348 qla4xxx_get_dhcp_ip_address(ha);
1349
1350 /* ---- link change? --- */
1351 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
1352 if (!test_bit(AF_LINK_UP, &ha->flags)) {
1353 /* ---- link down? --- */
1354 qla4xxx_mark_all_devices_missing(ha);
1355 } else {
1356 /* ---- link up? --- *
1357 * F/W will auto login to all devices ONLY ONCE after
1358 * link up during driver initialization and runtime
1359 * fatal error recovery. Therefore, the driver must
1360 * manually relogin to devices when recovering from
1361 * connection failures, logouts, expired KATO, etc. */
1362
1363 qla4xxx_relogin_all_devices(ha);
1364 }
1365 }
1366
1367 /* ---- relogin device? --- */
1368 if (adapter_up(ha) &&
1369 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
1370 list_for_each_entry_safe(ddb_entry, dtemp,
1371 &ha->ddb_list, list) {
1372 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
1373 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
1374 qla4xxx_relogin_device(ha, ddb_entry);
1375
1376 /*
1377 * If mbx cmd times out there is no point
1378 * in continuing further.
1379 * With large no of targets this can hang
1380 * the system.
1381 */
1382 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1383 printk(KERN_WARNING "scsi%ld: %s: "
1384 "need to reset hba\n",
1385 ha->host_no, __func__);
1386 break;
1387 }
1388 }
1389 }
1390
1391 }
1392
1393 /**
1394 * qla4xxx_free_adapter - release the adapter
1395 * @ha: pointer to adapter structure
1396 **/
1397 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1398 {
1399
1400 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
1401 /* Turn-off interrupts on the card. */
1402 ha->isp_ops->disable_intrs(ha);
1403 }
1404
1405 /* Remove timer thread, if present */
1406 if (ha->timer_active)
1407 qla4xxx_stop_timer(ha);
1408
1409 /* Kill the kernel thread for this host */
1410 if (ha->dpc_thread)
1411 destroy_workqueue(ha->dpc_thread);
1412
1413 /* Put firmware in known state */
1414 ha->isp_ops->reset_firmware(ha);
1415
1416 if (is_qla8022(ha)) {
1417 qla4_8xxx_idc_lock(ha);
1418 qla4_8xxx_clear_drv_active(ha);
1419 qla4_8xxx_idc_unlock(ha);
1420 }
1421
1422 /* Detach interrupts */
1423 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1424 qla4xxx_free_irqs(ha);
1425
1426 /* free extra memory */
1427 qla4xxx_mem_free(ha);
1428 }
1429
1430 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
1431 {
1432 int status = 0;
1433 uint8_t revision_id;
1434 unsigned long mem_base, mem_len, db_base, db_len;
1435 struct pci_dev *pdev = ha->pdev;
1436
1437 status = pci_request_regions(pdev, DRIVER_NAME);
1438 if (status) {
1439 printk(KERN_WARNING
1440 "scsi(%ld) Failed to reserve PIO regions (%s) "
1441 "status=%d\n", ha->host_no, pci_name(pdev), status);
1442 goto iospace_error_exit;
1443 }
1444
1445 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
1446 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
1447 __func__, revision_id));
1448 ha->revision_id = revision_id;
1449
1450 /* remap phys address */
1451 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
1452 mem_len = pci_resource_len(pdev, 0);
1453 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
1454 __func__, mem_base, mem_len));
1455
1456 /* mapping of pcibase pointer */
1457 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
1458 if (!ha->nx_pcibase) {
1459 printk(KERN_ERR
1460 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
1461 pci_release_regions(ha->pdev);
1462 goto iospace_error_exit;
1463 }
1464
1465 /* Mapping of IO base pointer, door bell read and write pointer */
1466
1467 /* mapping of IO base pointer */
1468 ha->qla4_8xxx_reg =
1469 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
1470 0xbc000 + (ha->pdev->devfn << 11));
1471
1472 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
1473 db_len = pci_resource_len(pdev, 4);
1474
1475 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
1476 QLA82XX_CAM_RAM_DB2);
1477
1478 return 0;
1479 iospace_error_exit:
1480 return -ENOMEM;
1481 }
1482
1483 /***
1484 * qla4xxx_iospace_config - maps registers
1485 * @ha: pointer to adapter structure
1486 *
1487 * This routines maps HBA's registers from the pci address space
1488 * into the kernel virtual address space for memory mapped i/o.
1489 **/
1490 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
1491 {
1492 unsigned long pio, pio_len, pio_flags;
1493 unsigned long mmio, mmio_len, mmio_flags;
1494
1495 pio = pci_resource_start(ha->pdev, 0);
1496 pio_len = pci_resource_len(ha->pdev, 0);
1497 pio_flags = pci_resource_flags(ha->pdev, 0);
1498 if (pio_flags & IORESOURCE_IO) {
1499 if (pio_len < MIN_IOBASE_LEN) {
1500 ql4_printk(KERN_WARNING, ha,
1501 "Invalid PCI I/O region size\n");
1502 pio = 0;
1503 }
1504 } else {
1505 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
1506 pio = 0;
1507 }
1508
1509 /* Use MMIO operations for all accesses. */
1510 mmio = pci_resource_start(ha->pdev, 1);
1511 mmio_len = pci_resource_len(ha->pdev, 1);
1512 mmio_flags = pci_resource_flags(ha->pdev, 1);
1513
1514 if (!(mmio_flags & IORESOURCE_MEM)) {
1515 ql4_printk(KERN_ERR, ha,
1516 "region #0 not an MMIO resource, aborting\n");
1517
1518 goto iospace_error_exit;
1519 }
1520
1521 if (mmio_len < MIN_IOBASE_LEN) {
1522 ql4_printk(KERN_ERR, ha,
1523 "Invalid PCI mem region size, aborting\n");
1524 goto iospace_error_exit;
1525 }
1526
1527 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
1528 ql4_printk(KERN_WARNING, ha,
1529 "Failed to reserve PIO/MMIO regions\n");
1530
1531 goto iospace_error_exit;
1532 }
1533
1534 ha->pio_address = pio;
1535 ha->pio_length = pio_len;
1536 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
1537 if (!ha->reg) {
1538 ql4_printk(KERN_ERR, ha,
1539 "cannot remap MMIO, aborting\n");
1540
1541 goto iospace_error_exit;
1542 }
1543
1544 return 0;
1545
1546 iospace_error_exit:
1547 return -ENOMEM;
1548 }
1549
1550 static struct isp_operations qla4xxx_isp_ops = {
1551 .iospace_config = qla4xxx_iospace_config,
1552 .pci_config = qla4xxx_pci_config,
1553 .disable_intrs = qla4xxx_disable_intrs,
1554 .enable_intrs = qla4xxx_enable_intrs,
1555 .start_firmware = qla4xxx_start_firmware,
1556 .intr_handler = qla4xxx_intr_handler,
1557 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
1558 .reset_chip = qla4xxx_soft_reset,
1559 .reset_firmware = qla4xxx_hw_reset,
1560 .queue_iocb = qla4xxx_queue_iocb,
1561 .complete_iocb = qla4xxx_complete_iocb,
1562 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
1563 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
1564 .get_sys_info = qla4xxx_get_sys_info,
1565 };
1566
1567 static struct isp_operations qla4_8xxx_isp_ops = {
1568 .iospace_config = qla4_8xxx_iospace_config,
1569 .pci_config = qla4_8xxx_pci_config,
1570 .disable_intrs = qla4_8xxx_disable_intrs,
1571 .enable_intrs = qla4_8xxx_enable_intrs,
1572 .start_firmware = qla4_8xxx_load_risc,
1573 .intr_handler = qla4_8xxx_intr_handler,
1574 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
1575 .reset_chip = qla4_8xxx_isp_reset,
1576 .reset_firmware = qla4_8xxx_stop_firmware,
1577 .queue_iocb = qla4_8xxx_queue_iocb,
1578 .complete_iocb = qla4_8xxx_complete_iocb,
1579 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
1580 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
1581 .get_sys_info = qla4_8xxx_get_sys_info,
1582 };
1583
1584 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
1585 {
1586 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
1587 }
1588
1589 uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
1590 {
1591 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
1592 }
1593
1594 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
1595 {
1596 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
1597 }
1598
1599 uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
1600 {
1601 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
1602 }
1603
1604 /**
1605 * qla4xxx_probe_adapter - callback function to probe HBA
1606 * @pdev: pointer to pci_dev structure
1607 * @pci_device_id: pointer to pci_device entry
1608 *
1609 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
1610 * It returns zero if successful. It also initializes all data necessary for
1611 * the driver.
1612 **/
1613 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1614 const struct pci_device_id *ent)
1615 {
1616 int ret = -ENODEV, status;
1617 struct Scsi_Host *host;
1618 struct scsi_qla_host *ha;
1619 uint8_t init_retry_count = 0;
1620 char buf[34];
1621 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
1622 uint32_t dev_state;
1623
1624 if (pci_enable_device(pdev))
1625 return -1;
1626
1627 host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha));
1628 if (host == NULL) {
1629 printk(KERN_WARNING
1630 "qla4xxx: Couldn't allocate host from scsi layer!\n");
1631 goto probe_disable_device;
1632 }
1633
1634 /* Clear our data area */
1635 ha = (struct scsi_qla_host *) host->hostdata;
1636 memset(ha, 0, sizeof(*ha));
1637
1638 /* Save the information from PCI BIOS. */
1639 ha->pdev = pdev;
1640 ha->host = host;
1641 ha->host_no = host->host_no;
1642
1643 pci_enable_pcie_error_reporting(pdev);
1644
1645 /* Setup Runtime configurable options */
1646 if (is_qla8022(ha)) {
1647 ha->isp_ops = &qla4_8xxx_isp_ops;
1648 rwlock_init(&ha->hw_lock);
1649 ha->qdr_sn_window = -1;
1650 ha->ddr_mn_window = -1;
1651 ha->curr_window = 255;
1652 ha->func_num = PCI_FUNC(ha->pdev->devfn);
1653 nx_legacy_intr = &legacy_intr[ha->func_num];
1654 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
1655 ha->nx_legacy_intr.tgt_status_reg =
1656 nx_legacy_intr->tgt_status_reg;
1657 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
1658 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
1659 } else {
1660 ha->isp_ops = &qla4xxx_isp_ops;
1661 }
1662
1663 /* Set EEH reset type to fundamental if required by hba */
1664 if (is_qla8022(ha))
1665 pdev->needs_freset = 1;
1666
1667 /* Configure PCI I/O space. */
1668 ret = ha->isp_ops->iospace_config(ha);
1669 if (ret)
1670 goto probe_failed_ioconfig;
1671
1672 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
1673 pdev->device, pdev->irq, ha->reg);
1674
1675 qla4xxx_config_dma_addressing(ha);
1676
1677 /* Initialize lists and spinlocks. */
1678 INIT_LIST_HEAD(&ha->ddb_list);
1679 INIT_LIST_HEAD(&ha->free_srb_q);
1680
1681 mutex_init(&ha->mbox_sem);
1682 init_completion(&ha->mbx_intr_comp);
1683
1684 spin_lock_init(&ha->hardware_lock);
1685
1686 /* Allocate dma buffers */
1687 if (qla4xxx_mem_alloc(ha)) {
1688 ql4_printk(KERN_WARNING, ha,
1689 "[ERROR] Failed to allocate memory for adapter\n");
1690
1691 ret = -ENOMEM;
1692 goto probe_failed;
1693 }
1694
1695 if (is_qla8022(ha))
1696 (void) qla4_8xxx_get_flash_info(ha);
1697
1698 /*
1699 * Initialize the Host adapter request/response queues and
1700 * firmware
1701 * NOTE: interrupts enabled upon successful completion
1702 */
1703 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1704 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
1705 init_retry_count++ < MAX_INIT_RETRIES) {
1706
1707 if (is_qla8022(ha)) {
1708 qla4_8xxx_idc_lock(ha);
1709 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1710 qla4_8xxx_idc_unlock(ha);
1711 if (dev_state == QLA82XX_DEV_FAILED) {
1712 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
1713 "initialize adapter. H/W is in failed state\n",
1714 __func__);
1715 break;
1716 }
1717 }
1718 DEBUG2(printk("scsi: %s: retrying adapter initialization "
1719 "(%d)\n", __func__, init_retry_count));
1720
1721 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
1722 continue;
1723
1724 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
1725 }
1726
1727 if (!test_bit(AF_ONLINE, &ha->flags)) {
1728 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
1729
1730 if (is_qla8022(ha) && ql4xdontresethba) {
1731 /* Put the device in failed state. */
1732 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
1733 qla4_8xxx_idc_lock(ha);
1734 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1735 QLA82XX_DEV_FAILED);
1736 qla4_8xxx_idc_unlock(ha);
1737 }
1738 ret = -ENODEV;
1739 goto probe_failed;
1740 }
1741
1742 host->cmd_per_lun = 3;
1743 host->max_channel = 0;
1744 host->max_lun = MAX_LUNS - 1;
1745 host->max_id = MAX_TARGETS;
1746 host->max_cmd_len = IOCB_MAX_CDB_LEN;
1747 host->can_queue = MAX_SRBS ;
1748 host->transportt = qla4xxx_scsi_transport;
1749
1750 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
1751 if (ret) {
1752 ql4_printk(KERN_WARNING, ha,
1753 "scsi_init_shared_tag_map failed\n");
1754 goto probe_failed;
1755 }
1756
1757 /* Startup the kernel thread for this host adapter. */
1758 DEBUG2(printk("scsi: %s: Starting kernel thread for "
1759 "qla4xxx_dpc\n", __func__));
1760 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
1761 ha->dpc_thread = create_singlethread_workqueue(buf);
1762 if (!ha->dpc_thread) {
1763 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
1764 ret = -ENODEV;
1765 goto probe_failed;
1766 }
1767 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
1768
1769 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
1770 * (which is called indirectly by qla4xxx_initialize_adapter),
1771 * so that irqs will be registered after crbinit but before
1772 * mbx_intr_enable.
1773 */
1774 if (!is_qla8022(ha)) {
1775 ret = qla4xxx_request_irqs(ha);
1776 if (ret) {
1777 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
1778 "interrupt %d already in use.\n", pdev->irq);
1779 goto probe_failed;
1780 }
1781 }
1782
1783 pci_save_state(ha->pdev);
1784 ha->isp_ops->enable_intrs(ha);
1785
1786 /* Start timer thread. */
1787 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
1788
1789 set_bit(AF_INIT_DONE, &ha->flags);
1790
1791 pci_set_drvdata(pdev, ha);
1792
1793 ret = scsi_add_host(host, &pdev->dev);
1794 if (ret)
1795 goto probe_failed;
1796
1797 printk(KERN_INFO
1798 " QLogic iSCSI HBA Driver version: %s\n"
1799 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
1800 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
1801 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
1802 ha->patch_number, ha->build_number);
1803 scsi_scan_host(host);
1804 return 0;
1805
1806 probe_failed:
1807 qla4xxx_free_adapter(ha);
1808
1809 probe_failed_ioconfig:
1810 pci_disable_pcie_error_reporting(pdev);
1811 scsi_host_put(ha->host);
1812
1813 probe_disable_device:
1814 pci_disable_device(pdev);
1815
1816 return ret;
1817 }
1818
1819 /**
1820 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
1821 * @ha: pointer to adapter structure
1822 *
1823 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
1824 * so that the other port will not re-initialize while in the process of
1825 * removing the ha due to driver unload or hba hotplug.
1826 **/
1827 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
1828 {
1829 struct scsi_qla_host *other_ha = NULL;
1830 struct pci_dev *other_pdev = NULL;
1831 int fn = ISP4XXX_PCI_FN_2;
1832
1833 /*iscsi function numbers for ISP4xxx is 1 and 3*/
1834 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
1835 fn = ISP4XXX_PCI_FN_1;
1836
1837 other_pdev =
1838 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
1839 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
1840 fn));
1841
1842 /* Get other_ha if other_pdev is valid and state is enable*/
1843 if (other_pdev) {
1844 if (atomic_read(&other_pdev->enable_cnt)) {
1845 other_ha = pci_get_drvdata(other_pdev);
1846 if (other_ha) {
1847 set_bit(AF_HA_REMOVAL, &other_ha->flags);
1848 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
1849 "Prevent %s reinit\n", __func__,
1850 dev_name(&other_ha->pdev->dev)));
1851 }
1852 }
1853 pci_dev_put(other_pdev);
1854 }
1855 }
1856
1857 /**
1858 * qla4xxx_remove_adapter - calback function to remove adapter.
1859 * @pci_dev: PCI device pointer
1860 **/
1861 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1862 {
1863 struct scsi_qla_host *ha;
1864
1865 ha = pci_get_drvdata(pdev);
1866
1867 if (!is_qla8022(ha))
1868 qla4xxx_prevent_other_port_reinit(ha);
1869
1870 /* remove devs from iscsi_sessions to scsi_devices */
1871 qla4xxx_free_ddb_list(ha);
1872
1873 scsi_remove_host(ha->host);
1874
1875 qla4xxx_free_adapter(ha);
1876
1877 scsi_host_put(ha->host);
1878
1879 pci_disable_pcie_error_reporting(pdev);
1880 pci_disable_device(pdev);
1881 pci_set_drvdata(pdev, NULL);
1882 }
1883
1884 /**
1885 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
1886 * @ha: HA context
1887 *
1888 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1889 * supported addressing method.
1890 */
1891 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
1892 {
1893 int retval;
1894
1895 /* Update our PCI device dma_mask for full 64 bit mask */
1896 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
1897 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1898 dev_dbg(&ha->pdev->dev,
1899 "Failed to set 64 bit PCI consistent mask; "
1900 "using 32 bit.\n");
1901 retval = pci_set_consistent_dma_mask(ha->pdev,
1902 DMA_BIT_MASK(32));
1903 }
1904 } else
1905 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1906 }
1907
1908 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
1909 {
1910 struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
1911 struct ddb_entry *ddb = sess->dd_data;
1912 int queue_depth = QL4_DEF_QDEPTH;
1913
1914 sdev->hostdata = ddb;
1915 sdev->tagged_supported = 1;
1916
1917 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
1918 queue_depth = ql4xmaxqdepth;
1919
1920 scsi_activate_tcq(sdev, queue_depth);
1921 return 0;
1922 }
1923
1924 static int qla4xxx_slave_configure(struct scsi_device *sdev)
1925 {
1926 sdev->tagged_supported = 1;
1927 return 0;
1928 }
1929
1930 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
1931 {
1932 scsi_deactivate_tcq(sdev, 1);
1933 }
1934
1935 /**
1936 * qla4xxx_del_from_active_array - returns an active srb
1937 * @ha: Pointer to host adapter structure.
1938 * @index: index into the active_array
1939 *
1940 * This routine removes and returns the srb at the specified index
1941 **/
1942 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
1943 uint32_t index)
1944 {
1945 struct srb *srb = NULL;
1946 struct scsi_cmnd *cmd = NULL;
1947
1948 cmd = scsi_host_find_tag(ha->host, index);
1949 if (!cmd)
1950 return srb;
1951
1952 srb = (struct srb *)CMD_SP(cmd);
1953 if (!srb)
1954 return srb;
1955
1956 /* update counters */
1957 if (srb->flags & SRB_DMA_VALID) {
1958 ha->req_q_count += srb->iocb_cnt;
1959 ha->iocb_cnt -= srb->iocb_cnt;
1960 if (srb->cmd)
1961 srb->cmd->host_scribble =
1962 (unsigned char *)(unsigned long) MAX_SRBS;
1963 }
1964 return srb;
1965 }
1966
1967 /**
1968 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
1969 * @ha: Pointer to host adapter structure.
1970 * @cmd: Scsi Command to wait on.
1971 *
1972 * This routine waits for the command to be returned by the Firmware
1973 * for some max time.
1974 **/
1975 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
1976 struct scsi_cmnd *cmd)
1977 {
1978 int done = 0;
1979 struct srb *rp;
1980 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
1981 int ret = SUCCESS;
1982
1983 /* Dont wait on command if PCI error is being handled
1984 * by PCI AER driver
1985 */
1986 if (unlikely(pci_channel_offline(ha->pdev)) ||
1987 (test_bit(AF_EEH_BUSY, &ha->flags))) {
1988 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
1989 ha->host_no, __func__);
1990 return ret;
1991 }
1992
1993 do {
1994 /* Checking to see if its returned to OS */
1995 rp = (struct srb *) CMD_SP(cmd);
1996 if (rp == NULL) {
1997 done++;
1998 break;
1999 }
2000
2001 msleep(2000);
2002 } while (max_wait_time--);
2003
2004 return done;
2005 }
2006
2007 /**
2008 * qla4xxx_wait_for_hba_online - waits for HBA to come online
2009 * @ha: Pointer to host adapter structure
2010 **/
2011 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
2012 {
2013 unsigned long wait_online;
2014
2015 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
2016 while (time_before(jiffies, wait_online)) {
2017
2018 if (adapter_up(ha))
2019 return QLA_SUCCESS;
2020
2021 msleep(2000);
2022 }
2023
2024 return QLA_ERROR;
2025 }
2026
2027 /**
2028 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
2029 * @ha: pointer to HBA
2030 * @t: target id
2031 * @l: lun id
2032 *
2033 * This function waits for all outstanding commands to a lun to complete. It
2034 * returns 0 if all pending commands are returned and 1 otherwise.
2035 **/
2036 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
2037 struct scsi_target *stgt,
2038 struct scsi_device *sdev)
2039 {
2040 int cnt;
2041 int status = 0;
2042 struct scsi_cmnd *cmd;
2043
2044 /*
2045 * Waiting for all commands for the designated target or dev
2046 * in the active array
2047 */
2048 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
2049 cmd = scsi_host_find_tag(ha->host, cnt);
2050 if (cmd && stgt == scsi_target(cmd->device) &&
2051 (!sdev || sdev == cmd->device)) {
2052 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
2053 status++;
2054 break;
2055 }
2056 }
2057 }
2058 return status;
2059 }
2060
2061 /**
2062 * qla4xxx_eh_abort - callback for abort task.
2063 * @cmd: Pointer to Linux's SCSI command structure
2064 *
2065 * This routine is called by the Linux OS to abort the specified
2066 * command.
2067 **/
2068 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
2069 {
2070 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
2071 unsigned int id = cmd->device->id;
2072 unsigned int lun = cmd->device->lun;
2073 unsigned long flags;
2074 struct srb *srb = NULL;
2075 int ret = SUCCESS;
2076 int wait = 0;
2077
2078 ql4_printk(KERN_INFO, ha,
2079 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
2080 ha->host_no, id, lun, cmd);
2081
2082 spin_lock_irqsave(&ha->hardware_lock, flags);
2083 srb = (struct srb *) CMD_SP(cmd);
2084 if (!srb) {
2085 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2086 return SUCCESS;
2087 }
2088 kref_get(&srb->srb_ref);
2089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2090
2091 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
2092 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
2093 ha->host_no, id, lun));
2094 ret = FAILED;
2095 } else {
2096 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
2097 ha->host_no, id, lun));
2098 wait = 1;
2099 }
2100
2101 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2102
2103 /* Wait for command to complete */
2104 if (wait) {
2105 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
2106 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
2107 ha->host_no, id, lun));
2108 ret = FAILED;
2109 }
2110 }
2111
2112 ql4_printk(KERN_INFO, ha,
2113 "scsi%ld:%d:%d: Abort command - %s\n",
2114 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
2115
2116 return ret;
2117 }
2118
2119 /**
2120 * qla4xxx_eh_device_reset - callback for target reset.
2121 * @cmd: Pointer to Linux's SCSI command structure
2122 *
2123 * This routine is called by the Linux OS to reset all luns on the
2124 * specified target.
2125 **/
2126 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
2127 {
2128 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
2129 struct ddb_entry *ddb_entry = cmd->device->hostdata;
2130 int ret = FAILED, stat;
2131
2132 if (!ddb_entry)
2133 return ret;
2134
2135 ret = iscsi_block_scsi_eh(cmd);
2136 if (ret)
2137 return ret;
2138 ret = FAILED;
2139
2140 ql4_printk(KERN_INFO, ha,
2141 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
2142 cmd->device->channel, cmd->device->id, cmd->device->lun);
2143
2144 DEBUG2(printk(KERN_INFO
2145 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
2146 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
2147 cmd, jiffies, cmd->request->timeout / HZ,
2148 ha->dpc_flags, cmd->result, cmd->allowed));
2149
2150 /* FIXME: wait for hba to go online */
2151 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
2152 if (stat != QLA_SUCCESS) {
2153 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
2154 goto eh_dev_reset_done;
2155 }
2156
2157 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
2158 cmd->device)) {
2159 ql4_printk(KERN_INFO, ha,
2160 "DEVICE RESET FAILED - waiting for "
2161 "commands.\n");
2162 goto eh_dev_reset_done;
2163 }
2164
2165 /* Send marker. */
2166 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
2167 MM_LUN_RESET) != QLA_SUCCESS)
2168 goto eh_dev_reset_done;
2169
2170 ql4_printk(KERN_INFO, ha,
2171 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
2172 ha->host_no, cmd->device->channel, cmd->device->id,
2173 cmd->device->lun);
2174
2175 ret = SUCCESS;
2176
2177 eh_dev_reset_done:
2178
2179 return ret;
2180 }
2181
2182 /**
2183 * qla4xxx_eh_target_reset - callback for target reset.
2184 * @cmd: Pointer to Linux's SCSI command structure
2185 *
2186 * This routine is called by the Linux OS to reset the target.
2187 **/
2188 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
2189 {
2190 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
2191 struct ddb_entry *ddb_entry = cmd->device->hostdata;
2192 int stat, ret;
2193
2194 if (!ddb_entry)
2195 return FAILED;
2196
2197 ret = iscsi_block_scsi_eh(cmd);
2198 if (ret)
2199 return ret;
2200
2201 starget_printk(KERN_INFO, scsi_target(cmd->device),
2202 "WARM TARGET RESET ISSUED.\n");
2203
2204 DEBUG2(printk(KERN_INFO
2205 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
2206 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
2207 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
2208 ha->dpc_flags, cmd->result, cmd->allowed));
2209
2210 stat = qla4xxx_reset_target(ha, ddb_entry);
2211 if (stat != QLA_SUCCESS) {
2212 starget_printk(KERN_INFO, scsi_target(cmd->device),
2213 "WARM TARGET RESET FAILED.\n");
2214 return FAILED;
2215 }
2216
2217 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
2218 NULL)) {
2219 starget_printk(KERN_INFO, scsi_target(cmd->device),
2220 "WARM TARGET DEVICE RESET FAILED - "
2221 "waiting for commands.\n");
2222 return FAILED;
2223 }
2224
2225 /* Send marker. */
2226 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
2227 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
2228 starget_printk(KERN_INFO, scsi_target(cmd->device),
2229 "WARM TARGET DEVICE RESET FAILED - "
2230 "marker iocb failed.\n");
2231 return FAILED;
2232 }
2233
2234 starget_printk(KERN_INFO, scsi_target(cmd->device),
2235 "WARM TARGET RESET SUCCEEDED.\n");
2236 return SUCCESS;
2237 }
2238
2239 /**
2240 * qla4xxx_eh_host_reset - kernel callback
2241 * @cmd: Pointer to Linux's SCSI command structure
2242 *
2243 * This routine is invoked by the Linux kernel to perform fatal error
2244 * recovery on the specified adapter.
2245 **/
2246 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
2247 {
2248 int return_status = FAILED;
2249 struct scsi_qla_host *ha;
2250
2251 ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
2252
2253 if (ql4xdontresethba) {
2254 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2255 ha->host_no, __func__));
2256 return FAILED;
2257 }
2258
2259 ql4_printk(KERN_INFO, ha,
2260 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
2261 cmd->device->channel, cmd->device->id, cmd->device->lun);
2262
2263 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
2264 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
2265 "DEAD.\n", ha->host_no, cmd->device->channel,
2266 __func__));
2267
2268 return FAILED;
2269 }
2270
2271 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2272 if (is_qla8022(ha))
2273 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2274 else
2275 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2276 }
2277
2278 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
2279 return_status = SUCCESS;
2280
2281 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
2282 return_status == FAILED ? "FAILED" : "SUCCEEDED");
2283
2284 return return_status;
2285 }
2286
2287 /* PCI AER driver recovers from all correctable errors w/o
2288 * driver intervention. For uncorrectable errors PCI AER
2289 * driver calls the following device driver's callbacks
2290 *
2291 * - Fatal Errors - link_reset
2292 * - Non-Fatal Errors - driver's pci_error_detected() which
2293 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
2294 *
2295 * PCI AER driver calls
2296 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
2297 * returns RECOVERED or NEED_RESET if fw_hung
2298 * NEED_RESET - driver's slot_reset()
2299 * DISCONNECT - device is dead & cannot recover
2300 * RECOVERED - driver's pci_resume()
2301 */
2302 static pci_ers_result_t
2303 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2304 {
2305 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
2306
2307 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
2308 ha->host_no, __func__, state);
2309
2310 if (!is_aer_supported(ha))
2311 return PCI_ERS_RESULT_NONE;
2312
2313 switch (state) {
2314 case pci_channel_io_normal:
2315 clear_bit(AF_EEH_BUSY, &ha->flags);
2316 return PCI_ERS_RESULT_CAN_RECOVER;
2317 case pci_channel_io_frozen:
2318 set_bit(AF_EEH_BUSY, &ha->flags);
2319 qla4xxx_mailbox_premature_completion(ha);
2320 qla4xxx_free_irqs(ha);
2321 pci_disable_device(pdev);
2322 /* Return back all IOs */
2323 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2324 return PCI_ERS_RESULT_NEED_RESET;
2325 case pci_channel_io_perm_failure:
2326 set_bit(AF_EEH_BUSY, &ha->flags);
2327 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
2328 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2329 return PCI_ERS_RESULT_DISCONNECT;
2330 }
2331 return PCI_ERS_RESULT_NEED_RESET;
2332 }
2333
2334 /**
2335 * qla4xxx_pci_mmio_enabled() gets called if
2336 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
2337 * and read/write to the device still works.
2338 **/
2339 static pci_ers_result_t
2340 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
2341 {
2342 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
2343
2344 if (!is_aer_supported(ha))
2345 return PCI_ERS_RESULT_NONE;
2346
2347 return PCI_ERS_RESULT_RECOVERED;
2348 }
2349
2350 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2351 {
2352 uint32_t rval = QLA_ERROR;
2353 uint32_t ret = 0;
2354 int fn;
2355 struct pci_dev *other_pdev = NULL;
2356
2357 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
2358
2359 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2360
2361 if (test_bit(AF_ONLINE, &ha->flags)) {
2362 clear_bit(AF_ONLINE, &ha->flags);
2363 qla4xxx_mark_all_devices_missing(ha);
2364 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2365 }
2366
2367 fn = PCI_FUNC(ha->pdev->devfn);
2368 while (fn > 0) {
2369 fn--;
2370 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
2371 "func %x\n", ha->host_no, __func__, fn);
2372 /* Get the pci device given the domain, bus,
2373 * slot/function number */
2374 other_pdev =
2375 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
2376 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
2377 fn));
2378
2379 if (!other_pdev)
2380 continue;
2381
2382 if (atomic_read(&other_pdev->enable_cnt)) {
2383 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
2384 "func in enabled state%x\n", ha->host_no,
2385 __func__, fn);
2386 pci_dev_put(other_pdev);
2387 break;
2388 }
2389 pci_dev_put(other_pdev);
2390 }
2391
2392 /* The first function on the card, the reset owner will
2393 * start & initialize the firmware. The other functions
2394 * on the card will reset the firmware context
2395 */
2396 if (!fn) {
2397 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
2398 "0x%x is the owner\n", ha->host_no, __func__,
2399 ha->pdev->devfn);
2400
2401 qla4_8xxx_idc_lock(ha);
2402 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2403 QLA82XX_DEV_COLD);
2404
2405 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
2406 QLA82XX_IDC_VERSION);
2407
2408 qla4_8xxx_idc_unlock(ha);
2409 clear_bit(AF_FW_RECOVERY, &ha->flags);
2410 rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
2411 qla4_8xxx_idc_lock(ha);
2412
2413 if (rval != QLA_SUCCESS) {
2414 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
2415 "FAILED\n", ha->host_no, __func__);
2416 qla4_8xxx_clear_drv_active(ha);
2417 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2418 QLA82XX_DEV_FAILED);
2419 } else {
2420 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
2421 "READY\n", ha->host_no, __func__);
2422 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2423 QLA82XX_DEV_READY);
2424 /* Clear driver state register */
2425 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
2426 qla4_8xxx_set_drv_active(ha);
2427 ret = qla4xxx_request_irqs(ha);
2428 if (ret) {
2429 ql4_printk(KERN_WARNING, ha, "Failed to "
2430 "reserve interrupt %d already in use.\n",
2431 ha->pdev->irq);
2432 rval = QLA_ERROR;
2433 } else {
2434 ha->isp_ops->enable_intrs(ha);
2435 rval = QLA_SUCCESS;
2436 }
2437 }
2438 qla4_8xxx_idc_unlock(ha);
2439 } else {
2440 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
2441 "the reset owner\n", ha->host_no, __func__,
2442 ha->pdev->devfn);
2443 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
2444 QLA82XX_DEV_READY)) {
2445 clear_bit(AF_FW_RECOVERY, &ha->flags);
2446 rval = qla4xxx_initialize_adapter(ha,
2447 PRESERVE_DDB_LIST);
2448 if (rval == QLA_SUCCESS) {
2449 ret = qla4xxx_request_irqs(ha);
2450 if (ret) {
2451 ql4_printk(KERN_WARNING, ha, "Failed to"
2452 " reserve interrupt %d already in"
2453 " use.\n", ha->pdev->irq);
2454 rval = QLA_ERROR;
2455 } else {
2456 ha->isp_ops->enable_intrs(ha);
2457 rval = QLA_SUCCESS;
2458 }
2459 }
2460 qla4_8xxx_idc_lock(ha);
2461 qla4_8xxx_set_drv_active(ha);
2462 qla4_8xxx_idc_unlock(ha);
2463 }
2464 }
2465 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2466 return rval;
2467 }
2468
2469 static pci_ers_result_t
2470 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
2471 {
2472 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
2473 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
2474 int rc;
2475
2476 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
2477 ha->host_no, __func__);
2478
2479 if (!is_aer_supported(ha))
2480 return PCI_ERS_RESULT_NONE;
2481
2482 /* Restore the saved state of PCIe device -
2483 * BAR registers, PCI Config space, PCIX, MSI,
2484 * IOV states
2485 */
2486 pci_restore_state(pdev);
2487
2488 /* pci_restore_state() clears the saved_state flag of the device
2489 * save restored state which resets saved_state flag
2490 */
2491 pci_save_state(pdev);
2492
2493 /* Initialize device or resume if in suspended state */
2494 rc = pci_enable_device(pdev);
2495 if (rc) {
2496 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2497 "device after reset\n", ha->host_no, __func__);
2498 goto exit_slot_reset;
2499 }
2500
2501 ha->isp_ops->disable_intrs(ha);
2502
2503 if (is_qla8022(ha)) {
2504 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
2505 ret = PCI_ERS_RESULT_RECOVERED;
2506 goto exit_slot_reset;
2507 } else
2508 goto exit_slot_reset;
2509 }
2510
2511 exit_slot_reset:
2512 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
2513 "device after reset\n", ha->host_no, __func__, ret);
2514 return ret;
2515 }
2516
2517 static void
2518 qla4xxx_pci_resume(struct pci_dev *pdev)
2519 {
2520 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
2521 int ret;
2522
2523 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
2524 ha->host_no, __func__);
2525
2526 ret = qla4xxx_wait_for_hba_online(ha);
2527 if (ret != QLA_SUCCESS) {
2528 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
2529 "resume I/O from slot/link_reset\n", ha->host_no,
2530 __func__);
2531 }
2532
2533 pci_cleanup_aer_uncorrect_error_status(pdev);
2534 clear_bit(AF_EEH_BUSY, &ha->flags);
2535 }
2536
2537 static struct pci_error_handlers qla4xxx_err_handler = {
2538 .error_detected = qla4xxx_pci_error_detected,
2539 .mmio_enabled = qla4xxx_pci_mmio_enabled,
2540 .slot_reset = qla4xxx_pci_slot_reset,
2541 .resume = qla4xxx_pci_resume,
2542 };
2543
2544 static struct pci_device_id qla4xxx_pci_tbl[] = {
2545 {
2546 .vendor = PCI_VENDOR_ID_QLOGIC,
2547 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
2548 .subvendor = PCI_ANY_ID,
2549 .subdevice = PCI_ANY_ID,
2550 },
2551 {
2552 .vendor = PCI_VENDOR_ID_QLOGIC,
2553 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
2554 .subvendor = PCI_ANY_ID,
2555 .subdevice = PCI_ANY_ID,
2556 },
2557 {
2558 .vendor = PCI_VENDOR_ID_QLOGIC,
2559 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
2560 .subvendor = PCI_ANY_ID,
2561 .subdevice = PCI_ANY_ID,
2562 },
2563 {
2564 .vendor = PCI_VENDOR_ID_QLOGIC,
2565 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
2566 .subvendor = PCI_ANY_ID,
2567 .subdevice = PCI_ANY_ID,
2568 },
2569 {0, 0},
2570 };
2571 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
2572
2573 static struct pci_driver qla4xxx_pci_driver = {
2574 .name = DRIVER_NAME,
2575 .id_table = qla4xxx_pci_tbl,
2576 .probe = qla4xxx_probe_adapter,
2577 .remove = qla4xxx_remove_adapter,
2578 .err_handler = &qla4xxx_err_handler,
2579 };
2580
2581 static int __init qla4xxx_module_init(void)
2582 {
2583 int ret;
2584
2585 /* Allocate cache for SRBs. */
2586 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
2587 SLAB_HWCACHE_ALIGN, NULL);
2588 if (srb_cachep == NULL) {
2589 printk(KERN_ERR
2590 "%s: Unable to allocate SRB cache..."
2591 "Failing load!\n", DRIVER_NAME);
2592 ret = -ENOMEM;
2593 goto no_srp_cache;
2594 }
2595
2596 /* Derive version string. */
2597 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
2598 if (ql4xextended_error_logging)
2599 strcat(qla4xxx_version_str, "-debug");
2600
2601 qla4xxx_scsi_transport =
2602 iscsi_register_transport(&qla4xxx_iscsi_transport);
2603 if (!qla4xxx_scsi_transport){
2604 ret = -ENODEV;
2605 goto release_srb_cache;
2606 }
2607
2608 ret = pci_register_driver(&qla4xxx_pci_driver);
2609 if (ret)
2610 goto unregister_transport;
2611
2612 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
2613 return 0;
2614
2615 unregister_transport:
2616 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
2617 release_srb_cache:
2618 kmem_cache_destroy(srb_cachep);
2619 no_srp_cache:
2620 return ret;
2621 }
2622
2623 static void __exit qla4xxx_module_exit(void)
2624 {
2625 pci_unregister_driver(&qla4xxx_pci_driver);
2626 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
2627 kmem_cache_destroy(srb_cachep);
2628 }
2629
2630 module_init(qla4xxx_module_init);
2631 module_exit(qla4xxx_module_exit);
2632
2633 MODULE_AUTHOR("QLogic Corporation");
2634 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
2635 MODULE_LICENSE("GPL");
2636 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
This page took 0.092437 seconds and 5 git commands to generate.