1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray
[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
59 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
60 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
63 lpfc_terminate_rport_io(struct fc_rport
*rport
)
65 struct lpfc_rport_data
*rdata
;
66 struct lpfc_nodelist
* ndlp
;
67 struct lpfc_hba
*phba
;
69 rdata
= rport
->dd_data
;
72 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
73 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
74 printk(KERN_ERR
"Cannot find remote node"
75 " to terminate I/O Data x%x\n",
80 phba
= ndlp
->vport
->phba
;
82 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
83 "rport terminate: sid:x%x did:x%x flg:x%x",
84 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
86 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
87 lpfc_sli_abort_iocb(ndlp
->vport
,
88 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
89 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
94 * This function will be called when dev_loss_tmo fire.
97 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
99 struct lpfc_rport_data
*rdata
;
100 struct lpfc_nodelist
* ndlp
;
101 struct lpfc_vport
*vport
;
102 struct lpfc_hba
*phba
;
103 struct lpfc_work_evt
*evtp
;
107 rdata
= rport
->dd_data
;
109 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
115 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
116 "rport devlosscb: sid:x%x did:x%x flg:x%x",
117 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
119 /* Don't defer this if we are in the process of deleting the vport
120 * or unloading the driver. The unload will cleanup the node
121 * appropriately we just need to cleanup the ndlp rport info here.
123 if (vport
->load_flag
& FC_UNLOADING
) {
124 put_node
= rdata
->pnode
!= NULL
;
125 put_rport
= ndlp
->rport
!= NULL
;
131 put_device(&rport
->dev
);
135 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
138 evtp
= &ndlp
->dev_loss_evt
;
140 if (!list_empty(&evtp
->evt_listp
))
143 spin_lock_irq(&phba
->hbalock
);
144 /* We need to hold the node by incrementing the reference
145 * count until this queued work is done
147 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
148 if (evtp
->evt_arg1
) {
149 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
150 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
151 lpfc_worker_wake_up(phba
);
153 spin_unlock_irq(&phba
->hbalock
);
159 * This function is called from the worker thread when dev_loss_tmo
163 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
165 struct lpfc_rport_data
*rdata
;
166 struct fc_rport
*rport
;
167 struct lpfc_vport
*vport
;
168 struct lpfc_hba
*phba
;
179 rdata
= rport
->dd_data
;
180 name
= (uint8_t *) &ndlp
->nlp_portname
;
184 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
185 "rport devlosstmo:did:x%x type:x%x id:x%x",
186 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
188 /* Don't defer this if we are in the process of deleting the vport
189 * or unloading the driver. The unload will cleanup the node
190 * appropriately we just need to cleanup the ndlp rport info here.
192 if (vport
->load_flag
& FC_UNLOADING
) {
193 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
194 /* flush the target */
195 lpfc_sli_abort_iocb(vport
,
196 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
197 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
199 put_node
= rdata
->pnode
!= NULL
;
200 put_rport
= ndlp
->rport
!= NULL
;
206 put_device(&rport
->dev
);
210 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
211 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
212 "0284 Devloss timeout Ignored on "
213 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
215 *name
, *(name
+1), *(name
+2), *(name
+3),
216 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
221 if (ndlp
->nlp_type
& NLP_FABRIC
) {
222 /* We will clean up these Nodes in linkup */
223 put_node
= rdata
->pnode
!= NULL
;
224 put_rport
= ndlp
->rport
!= NULL
;
230 put_device(&rport
->dev
);
234 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
236 /* flush the target */
237 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
238 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
242 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
243 "0203 Devloss timeout on "
244 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
245 "NPort x%06x Data: x%x x%x x%x\n",
246 *name
, *(name
+1), *(name
+2), *(name
+3),
247 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
248 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
249 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
251 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
252 "0204 Devloss timeout on "
253 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
254 "NPort x%06x Data: x%x x%x x%x\n",
255 *name
, *(name
+1), *(name
+2), *(name
+3),
256 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
257 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
258 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
261 put_node
= rdata
->pnode
!= NULL
;
262 put_rport
= ndlp
->rport
!= NULL
;
268 put_device(&rport
->dev
);
270 if (!(vport
->load_flag
& FC_UNLOADING
) &&
271 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
272 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
273 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
))
274 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
278 lpfc_work_list_done(struct lpfc_hba
*phba
)
280 struct lpfc_work_evt
*evtp
= NULL
;
281 struct lpfc_nodelist
*ndlp
;
284 spin_lock_irq(&phba
->hbalock
);
285 while (!list_empty(&phba
->work_list
)) {
286 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
288 spin_unlock_irq(&phba
->hbalock
);
291 case LPFC_EVT_ELS_RETRY
:
292 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
293 lpfc_els_retry_delay_handler(ndlp
);
294 free_evt
= 0; /* evt is part of ndlp */
295 /* decrement the node reference count held
296 * for this queued work
300 case LPFC_EVT_DEV_LOSS
:
301 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
302 lpfc_dev_loss_tmo_handler(ndlp
);
304 /* decrement the node reference count held for
309 case LPFC_EVT_ONLINE
:
310 if (phba
->link_state
< LPFC_LINK_DOWN
)
311 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
313 *(int *) (evtp
->evt_arg1
) = 0;
314 complete((struct completion
*)(evtp
->evt_arg2
));
316 case LPFC_EVT_OFFLINE_PREP
:
317 if (phba
->link_state
>= LPFC_LINK_DOWN
)
318 lpfc_offline_prep(phba
);
319 *(int *)(evtp
->evt_arg1
) = 0;
320 complete((struct completion
*)(evtp
->evt_arg2
));
322 case LPFC_EVT_OFFLINE
:
324 lpfc_sli_brdrestart(phba
);
325 *(int *)(evtp
->evt_arg1
) =
326 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
327 lpfc_unblock_mgmt_io(phba
);
328 complete((struct completion
*)(evtp
->evt_arg2
));
330 case LPFC_EVT_WARM_START
:
332 lpfc_reset_barrier(phba
);
333 lpfc_sli_brdreset(phba
);
334 lpfc_hba_down_post(phba
);
335 *(int *)(evtp
->evt_arg1
) =
336 lpfc_sli_brdready(phba
, HS_MBRDY
);
337 lpfc_unblock_mgmt_io(phba
);
338 complete((struct completion
*)(evtp
->evt_arg2
));
342 *(int *)(evtp
->evt_arg1
)
343 = (phba
->pport
->stopped
)
344 ? 0 : lpfc_sli_brdkill(phba
);
345 lpfc_unblock_mgmt_io(phba
);
346 complete((struct completion
*)(evtp
->evt_arg2
));
351 spin_lock_irq(&phba
->hbalock
);
353 spin_unlock_irq(&phba
->hbalock
);
358 lpfc_work_done(struct lpfc_hba
*phba
)
360 struct lpfc_sli_ring
*pring
;
361 uint32_t ha_copy
, status
, control
, work_port_events
;
362 struct lpfc_vport
**vports
;
363 struct lpfc_vport
*vport
;
366 spin_lock_irq(&phba
->hbalock
);
367 ha_copy
= phba
->work_ha
;
369 spin_unlock_irq(&phba
->hbalock
);
371 if (ha_copy
& HA_ERATT
)
372 /* Handle the error attention event */
373 lpfc_handle_eratt(phba
);
375 if (ha_copy
& HA_MBATT
)
376 lpfc_sli_handle_mb_event(phba
);
378 if (ha_copy
& HA_LATT
)
379 lpfc_handle_latt(phba
);
381 vports
= lpfc_create_vport_work_array(phba
);
383 for(i
= 0; i
<= phba
->max_vpi
; i
++) {
385 * We could have no vports in array if unloading, so if
386 * this happens then just use the pport
388 if (vports
[i
] == NULL
&& i
== 0)
394 spin_lock_irq(&vport
->work_port_lock
);
395 work_port_events
= vport
->work_port_events
;
396 vport
->work_port_events
&= ~work_port_events
;
397 spin_unlock_irq(&vport
->work_port_lock
);
398 if (work_port_events
& WORKER_DISC_TMO
)
399 lpfc_disc_timeout_handler(vport
);
400 if (work_port_events
& WORKER_ELS_TMO
)
401 lpfc_els_timeout_handler(vport
);
402 if (work_port_events
& WORKER_HB_TMO
)
403 lpfc_hb_timeout_handler(phba
);
404 if (work_port_events
& WORKER_MBOX_TMO
)
405 lpfc_mbox_timeout_handler(phba
);
406 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
407 lpfc_unblock_fabric_iocbs(phba
);
408 if (work_port_events
& WORKER_FDMI_TMO
)
409 lpfc_fdmi_timeout_handler(vport
);
410 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
411 lpfc_ramp_down_queue_handler(phba
);
412 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
413 lpfc_ramp_up_queue_handler(phba
);
415 lpfc_destroy_vport_work_array(phba
, vports
);
417 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
418 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
419 status
>>= (4*LPFC_ELS_RING
);
420 if ((status
& HA_RXMASK
)
421 || (pring
->flag
& LPFC_DEFERRED_RING_EVENT
)) {
422 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
423 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
424 /* Set the lpfc data pending flag */
425 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
427 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
428 lpfc_sli_handle_slow_ring_event(phba
, pring
,
433 * Turn on Ring interrupts
435 spin_lock_irq(&phba
->hbalock
);
436 control
= readl(phba
->HCregaddr
);
437 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
438 lpfc_debugfs_slow_ring_trc(phba
,
439 "WRK Enable ring: cntl:x%x hacopy:x%x",
440 control
, ha_copy
, 0);
442 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
443 writel(control
, phba
->HCregaddr
);
444 readl(phba
->HCregaddr
); /* flush */
447 lpfc_debugfs_slow_ring_trc(phba
,
448 "WRK Ring ok: cntl:x%x hacopy:x%x",
449 control
, ha_copy
, 0);
451 spin_unlock_irq(&phba
->hbalock
);
453 lpfc_work_list_done(phba
);
457 lpfc_do_work(void *p
)
459 struct lpfc_hba
*phba
= p
;
462 set_user_nice(current
, -20);
463 phba
->data_flags
= 0;
466 /* wait and check worker queue activities */
467 rc
= wait_event_interruptible(phba
->work_waitq
,
468 (test_and_clear_bit(LPFC_DATA_READY
,
470 || kthread_should_stop()));
473 if (kthread_should_stop())
476 /* Attend pending lpfc data processing */
477 lpfc_work_done(phba
);
483 * This is only called to handle FC worker events. Since this a rare
484 * occurance, we allocate a struct lpfc_work_evt structure here instead of
485 * embedding it in the IOCB.
488 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
491 struct lpfc_work_evt
*evtp
;
495 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
496 * be queued to worker thread for processing
498 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
502 evtp
->evt_arg1
= arg1
;
503 evtp
->evt_arg2
= arg2
;
506 spin_lock_irqsave(&phba
->hbalock
, flags
);
507 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
508 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
510 lpfc_worker_wake_up(phba
);
516 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
518 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
519 struct lpfc_hba
*phba
= vport
->phba
;
520 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
523 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
524 if (!NLP_CHK_NODE_ACT(ndlp
))
526 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
528 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
529 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
530 (ndlp
->nlp_DID
== NameServer_DID
)))
531 lpfc_unreg_rpi(vport
, ndlp
);
533 /* Leave Fabric nodes alone on link down */
534 if (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
)
536 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
539 : NLP_EVT_DEVICE_RECOVERY
);
541 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
542 lpfc_mbx_unreg_vpi(vport
);
543 spin_lock_irq(shost
->host_lock
);
544 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
545 spin_unlock_irq(shost
->host_lock
);
550 lpfc_port_link_failure(struct lpfc_vport
*vport
)
552 /* Cleanup any outstanding RSCN activity */
553 lpfc_els_flush_rscn(vport
);
555 /* Cleanup any outstanding ELS commands */
556 lpfc_els_flush_cmd(vport
);
558 lpfc_cleanup_rpis(vport
, 0);
560 /* Turn off discovery timer if its running */
561 lpfc_can_disctmo(vport
);
565 lpfc_linkdown_port(struct lpfc_vport
*vport
)
567 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
569 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
571 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
572 "Link Down: state:x%x rtry:x%x flg:x%x",
573 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
575 lpfc_port_link_failure(vport
);
580 lpfc_linkdown(struct lpfc_hba
*phba
)
582 struct lpfc_vport
*vport
= phba
->pport
;
583 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
584 struct lpfc_vport
**vports
;
588 if (phba
->link_state
== LPFC_LINK_DOWN
)
590 spin_lock_irq(&phba
->hbalock
);
591 if (phba
->link_state
> LPFC_LINK_DOWN
) {
592 phba
->link_state
= LPFC_LINK_DOWN
;
593 phba
->pport
->fc_flag
&= ~FC_LBIT
;
595 spin_unlock_irq(&phba
->hbalock
);
596 vports
= lpfc_create_vport_work_array(phba
);
598 for(i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
599 /* Issue a LINK DOWN event to all nodes */
600 lpfc_linkdown_port(vports
[i
]);
602 lpfc_destroy_vport_work_array(phba
, vports
);
603 /* Clean up any firmware default rpi's */
604 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
606 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
608 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
609 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
610 == MBX_NOT_FINISHED
) {
611 mempool_free(mb
, phba
->mbox_mem_pool
);
615 /* Setup myDID for link up if we are in pt2pt mode */
616 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
617 phba
->pport
->fc_myDID
= 0;
618 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
620 lpfc_config_link(phba
, mb
);
621 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
623 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
624 == MBX_NOT_FINISHED
) {
625 mempool_free(mb
, phba
->mbox_mem_pool
);
628 spin_lock_irq(shost
->host_lock
);
629 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
630 spin_unlock_irq(shost
->host_lock
);
637 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
639 struct lpfc_nodelist
*ndlp
;
641 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
642 if (!NLP_CHK_NODE_ACT(ndlp
))
644 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
646 if (ndlp
->nlp_type
& NLP_FABRIC
) {
647 /* On Linkup its safe to clean up the ndlp
648 * from Fabric connections.
650 if (ndlp
->nlp_DID
!= Fabric_DID
)
651 lpfc_unreg_rpi(vport
, ndlp
);
652 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
653 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
654 /* Fail outstanding IO now since device is
657 lpfc_unreg_rpi(vport
, ndlp
);
663 lpfc_linkup_port(struct lpfc_vport
*vport
)
665 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
666 struct lpfc_hba
*phba
= vport
->phba
;
668 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
671 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
672 "Link Up: top:x%x speed:x%x flg:x%x",
673 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
675 /* If NPIV is not enabled, only bring the physical port up */
676 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
677 (vport
!= phba
->pport
))
680 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
682 spin_lock_irq(shost
->host_lock
);
683 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
684 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
685 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
686 vport
->fc_ns_retry
= 0;
687 spin_unlock_irq(shost
->host_lock
);
689 if (vport
->fc_flag
& FC_LBIT
)
690 lpfc_linkup_cleanup_nodes(vport
);
695 lpfc_linkup(struct lpfc_hba
*phba
)
697 struct lpfc_vport
**vports
;
700 phba
->link_state
= LPFC_LINK_UP
;
702 /* Unblock fabric iocbs if they are blocked */
703 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
704 del_timer_sync(&phba
->fabric_block_timer
);
706 vports
= lpfc_create_vport_work_array(phba
);
708 for(i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++)
709 lpfc_linkup_port(vports
[i
]);
710 lpfc_destroy_vport_work_array(phba
, vports
);
711 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
712 lpfc_issue_clear_la(phba
, phba
->pport
);
718 * This routine handles processing a CLEAR_LA mailbox
719 * command upon completion. It is setup in the LPFC_MBOXQ
720 * as the completion routine when the command is
721 * handed off to the SLI layer.
724 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
726 struct lpfc_vport
*vport
= pmb
->vport
;
727 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
728 struct lpfc_sli
*psli
= &phba
->sli
;
729 MAILBOX_t
*mb
= &pmb
->mb
;
732 /* Since we don't do discovery right now, turn these off here */
733 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
734 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
735 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
737 /* Check for error */
738 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
739 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
740 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
741 "0320 CLEAR_LA mbxStatus error x%x hba "
743 mb
->mbxStatus
, vport
->port_state
);
744 phba
->link_state
= LPFC_HBA_ERROR
;
748 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
749 phba
->link_state
= LPFC_HBA_READY
;
751 spin_lock_irq(&phba
->hbalock
);
752 psli
->sli_flag
|= LPFC_PROCESS_LA
;
753 control
= readl(phba
->HCregaddr
);
754 control
|= HC_LAINT_ENA
;
755 writel(control
, phba
->HCregaddr
);
756 readl(phba
->HCregaddr
); /* flush */
757 spin_unlock_irq(&phba
->hbalock
);
758 mempool_free(pmb
, phba
->mbox_mem_pool
);
762 /* Device Discovery completes */
763 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
764 "0225 Device Discovery completes\n");
765 mempool_free(pmb
, phba
->mbox_mem_pool
);
767 spin_lock_irq(shost
->host_lock
);
768 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
769 spin_unlock_irq(shost
->host_lock
);
771 lpfc_can_disctmo(vport
);
773 /* turn on Link Attention interrupts */
775 spin_lock_irq(&phba
->hbalock
);
776 psli
->sli_flag
|= LPFC_PROCESS_LA
;
777 control
= readl(phba
->HCregaddr
);
778 control
|= HC_LAINT_ENA
;
779 writel(control
, phba
->HCregaddr
);
780 readl(phba
->HCregaddr
); /* flush */
781 spin_unlock_irq(&phba
->hbalock
);
788 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
790 struct lpfc_vport
*vport
= pmb
->vport
;
792 if (pmb
->mb
.mbxStatus
)
795 mempool_free(pmb
, phba
->mbox_mem_pool
);
797 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
798 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
799 !(vport
->fc_flag
& FC_LBIT
)) {
800 /* Need to wait for FAN - use discovery timer
801 * for timeout. port_state is identically
802 * LPFC_LOCAL_CFG_LINK while waiting for FAN
804 lpfc_set_disctmo(vport
);
808 /* Start discovery by sending a FLOGI. port_state is identically
809 * LPFC_FLOGI while waiting for FLOGI cmpl
811 if (vport
->port_state
!= LPFC_FLOGI
) {
812 lpfc_initial_flogi(vport
);
817 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
818 "0306 CONFIG_LINK mbxStatus error x%x "
820 pmb
->mb
.mbxStatus
, vport
->port_state
);
821 mempool_free(pmb
, phba
->mbox_mem_pool
);
825 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
826 "0200 CONFIG_LINK bad hba state x%x\n",
829 lpfc_issue_clear_la(phba
, vport
);
834 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
836 MAILBOX_t
*mb
= &pmb
->mb
;
837 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
838 struct lpfc_vport
*vport
= pmb
->vport
;
841 /* Check for error */
843 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
844 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
845 "0319 READ_SPARAM mbxStatus error x%x "
847 mb
->mbxStatus
, vport
->port_state
);
852 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
853 sizeof (struct serv_parm
));
854 if (phba
->cfg_soft_wwnn
)
855 u64_to_wwn(phba
->cfg_soft_wwnn
,
856 vport
->fc_sparam
.nodeName
.u
.wwn
);
857 if (phba
->cfg_soft_wwpn
)
858 u64_to_wwn(phba
->cfg_soft_wwpn
,
859 vport
->fc_sparam
.portName
.u
.wwn
);
860 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
861 sizeof(vport
->fc_nodename
));
862 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
863 sizeof(vport
->fc_portname
));
864 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
865 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
866 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
869 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
871 mempool_free(pmb
, phba
->mbox_mem_pool
);
875 pmb
->context1
= NULL
;
876 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
878 lpfc_issue_clear_la(phba
, vport
);
879 mempool_free(pmb
, phba
->mbox_mem_pool
);
884 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
886 struct lpfc_vport
*vport
= phba
->pport
;
887 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
;
889 struct lpfc_dmabuf
*mp
;
892 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
893 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
895 spin_lock_irq(&phba
->hbalock
);
896 switch (la
->UlnkSpeed
) {
898 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
901 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
904 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
907 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
910 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
914 phba
->fc_topology
= la
->topology
;
915 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
917 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
918 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
920 if (phba
->cfg_enable_npiv
)
921 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
922 "1309 Link Up Event npiv not supported in loop "
924 /* Get Loop Map information */
926 vport
->fc_flag
|= FC_LBIT
;
928 vport
->fc_myDID
= la
->granted_AL_PA
;
929 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
932 phba
->alpa_map
[0] = 0;
934 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
945 numalpa
= phba
->alpa_map
[0];
947 while (j
< numalpa
) {
948 memset(un
.pamap
, 0, 16);
949 for (k
= 1; j
< numalpa
; k
++) {
951 phba
->alpa_map
[j
+ 1];
956 /* Link Up Event ALPA map */
957 lpfc_printf_log(phba
,
960 "1304 Link Up Event "
961 "ALPA map Data: x%x "
963 un
.pa
.wd1
, un
.pa
.wd2
,
964 un
.pa
.wd3
, un
.pa
.wd4
);
969 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
970 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
971 (phba
->sli_rev
== 3))
972 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
974 vport
->fc_myDID
= phba
->fc_pref_DID
;
975 vport
->fc_flag
|= FC_LBIT
;
977 spin_unlock_irq(&phba
->hbalock
);
981 lpfc_read_sparam(phba
, sparam_mbox
, 0);
982 sparam_mbox
->vport
= vport
;
983 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
984 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
985 if (rc
== MBX_NOT_FINISHED
) {
986 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
987 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
989 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
991 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
997 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
998 lpfc_config_link(phba
, cfglink_mbox
);
999 cfglink_mbox
->vport
= vport
;
1000 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
1001 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
1002 if (rc
!= MBX_NOT_FINISHED
)
1004 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
1007 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1008 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1009 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1010 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
1011 lpfc_issue_clear_la(phba
, vport
);
1016 lpfc_enable_la(struct lpfc_hba
*phba
)
1019 struct lpfc_sli
*psli
= &phba
->sli
;
1020 spin_lock_irq(&phba
->hbalock
);
1021 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1022 control
= readl(phba
->HCregaddr
);
1023 control
|= HC_LAINT_ENA
;
1024 writel(control
, phba
->HCregaddr
);
1025 readl(phba
->HCregaddr
); /* flush */
1026 spin_unlock_irq(&phba
->hbalock
);
1030 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
1032 lpfc_linkdown(phba
);
1033 lpfc_enable_la(phba
);
1034 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1039 * This routine handles processing a READ_LA mailbox
1040 * command upon completion. It is setup in the LPFC_MBOXQ
1041 * as the completion routine when the command is
1042 * handed off to the SLI layer.
1045 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1047 struct lpfc_vport
*vport
= pmb
->vport
;
1048 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1050 MAILBOX_t
*mb
= &pmb
->mb
;
1051 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1053 /* Unblock ELS traffic */
1054 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1055 /* Check for error */
1056 if (mb
->mbxStatus
) {
1057 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1058 "1307 READ_LA mbox error x%x state x%x\n",
1059 mb
->mbxStatus
, vport
->port_state
);
1060 lpfc_mbx_issue_link_down(phba
);
1061 phba
->link_state
= LPFC_HBA_ERROR
;
1062 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
1065 la
= (READ_LA_VAR
*) & pmb
->mb
.un
.varReadLA
;
1067 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
1069 spin_lock_irq(shost
->host_lock
);
1071 vport
->fc_flag
|= FC_BYPASSED_MODE
;
1073 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
1074 spin_unlock_irq(shost
->host_lock
);
1076 if (((phba
->fc_eventTag
+ 1) < la
->eventTag
) ||
1077 (phba
->fc_eventTag
== la
->eventTag
)) {
1078 phba
->fc_stat
.LinkMultiEvent
++;
1079 if (la
->attType
== AT_LINK_UP
)
1080 if (phba
->fc_eventTag
!= 0)
1081 lpfc_linkdown(phba
);
1084 phba
->fc_eventTag
= la
->eventTag
;
1086 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
1088 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
1090 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
1091 phba
->fc_stat
.LinkUp
++;
1092 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
1093 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1094 "1306 Link Up Event in loop back mode "
1095 "x%x received Data: x%x x%x x%x x%x\n",
1096 la
->eventTag
, phba
->fc_eventTag
,
1097 la
->granted_AL_PA
, la
->UlnkSpeed
,
1100 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1101 "1303 Link Up Event x%x received "
1102 "Data: x%x x%x x%x x%x x%x x%x %d\n",
1103 la
->eventTag
, phba
->fc_eventTag
,
1104 la
->granted_AL_PA
, la
->UlnkSpeed
,
1107 phba
->wait_4_mlo_maint_flg
);
1109 lpfc_mbx_process_link_up(phba
, la
);
1110 } else if (la
->attType
== AT_LINK_DOWN
) {
1111 phba
->fc_stat
.LinkDown
++;
1112 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
1113 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1114 "1308 Link Down Event in loop back mode "
1116 "Data: x%x x%x x%x\n",
1117 la
->eventTag
, phba
->fc_eventTag
,
1118 phba
->pport
->port_state
, vport
->fc_flag
);
1121 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1122 "1305 Link Down Event x%x received "
1123 "Data: x%x x%x x%x x%x x%x\n",
1124 la
->eventTag
, phba
->fc_eventTag
,
1125 phba
->pport
->port_state
, vport
->fc_flag
,
1128 lpfc_mbx_issue_link_down(phba
);
1130 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
1131 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
1132 phba
->fc_stat
.LinkDown
++;
1133 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1134 "1312 Link Down Event x%x received "
1135 "Data: x%x x%x x%x\n",
1136 la
->eventTag
, phba
->fc_eventTag
,
1137 phba
->pport
->port_state
, vport
->fc_flag
);
1138 lpfc_mbx_issue_link_down(phba
);
1140 lpfc_enable_la(phba
);
1142 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
1143 "1310 Menlo Maint Mode Link up Event x%x rcvd "
1144 "Data: x%x x%x x%x\n",
1145 la
->eventTag
, phba
->fc_eventTag
,
1146 phba
->pport
->port_state
, vport
->fc_flag
);
1148 * The cmnd that triggered this will be waiting for this
1151 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
1152 if (phba
->wait_4_mlo_maint_flg
) {
1153 phba
->wait_4_mlo_maint_flg
= 0;
1154 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
1160 lpfc_issue_clear_la(phba
, vport
);
1161 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1162 "1311 fa %d\n", la
->fa
);
1165 lpfc_mbx_cmpl_read_la_free_mbuf
:
1166 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1168 mempool_free(pmb
, phba
->mbox_mem_pool
);
1173 * This routine handles processing a REG_LOGIN mailbox
1174 * command upon completion. It is setup in the LPFC_MBOXQ
1175 * as the completion routine when the command is
1176 * handed off to the SLI layer.
1179 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1181 struct lpfc_vport
*vport
= pmb
->vport
;
1182 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1183 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
1185 pmb
->context1
= NULL
;
1187 /* Good status, call state machine */
1188 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
1189 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1191 mempool_free(pmb
, phba
->mbox_mem_pool
);
1192 /* decrement the node reference count held for this callback
1201 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1203 MAILBOX_t
*mb
= &pmb
->mb
;
1204 struct lpfc_vport
*vport
= pmb
->vport
;
1205 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1207 switch (mb
->mbxStatus
) {
1211 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
1212 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1216 vport
->unreg_vpi_cmpl
= VPORT_OK
;
1217 mempool_free(pmb
, phba
->mbox_mem_pool
);
1219 * This shost reference might have been taken at the beginning of
1220 * lpfc_vport_delete()
1222 if (vport
->load_flag
& FC_UNLOADING
)
1223 scsi_host_put(shost
);
1227 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
1229 struct lpfc_hba
*phba
= vport
->phba
;
1233 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1237 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
1238 mbox
->vport
= vport
;
1239 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
1240 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
1241 if (rc
== MBX_NOT_FINISHED
) {
1242 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
1243 "1800 Could not issue unreg_vpi\n");
1244 mempool_free(mbox
, phba
->mbox_mem_pool
);
1245 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
1252 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1254 struct lpfc_vport
*vport
= pmb
->vport
;
1255 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1256 MAILBOX_t
*mb
= &pmb
->mb
;
1258 switch (mb
->mbxStatus
) {
1262 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
1263 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1265 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1266 spin_lock_irq(shost
->host_lock
);
1267 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
1268 spin_unlock_irq(shost
->host_lock
);
1269 vport
->fc_myDID
= 0;
1273 vport
->num_disc_nodes
= 0;
1274 /* go thru NPR list and issue ELS PLOGIs */
1275 if (vport
->fc_npr_cnt
)
1276 lpfc_els_disc_plogi(vport
);
1278 if (!vport
->num_disc_nodes
) {
1279 spin_lock_irq(shost
->host_lock
);
1280 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
1281 spin_unlock_irq(shost
->host_lock
);
1282 lpfc_can_disctmo(vport
);
1284 vport
->port_state
= LPFC_VPORT_READY
;
1287 mempool_free(pmb
, phba
->mbox_mem_pool
);
1292 * This routine handles processing a Fabric REG_LOGIN mailbox
1293 * command upon completion. It is setup in the LPFC_MBOXQ
1294 * as the completion routine when the command is
1295 * handed off to the SLI layer.
1298 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1300 struct lpfc_vport
*vport
= pmb
->vport
;
1301 MAILBOX_t
*mb
= &pmb
->mb
;
1302 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1303 struct lpfc_nodelist
*ndlp
;
1304 struct lpfc_vport
**vports
;
1307 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
1308 pmb
->context1
= NULL
;
1309 pmb
->context2
= NULL
;
1310 if (mb
->mbxStatus
) {
1311 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1313 mempool_free(pmb
, phba
->mbox_mem_pool
);
1315 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1316 /* FLOGI failed, use loop map to make discovery list */
1317 lpfc_disc_list_loopmap(vport
);
1319 /* Start discovery */
1320 lpfc_disc_start(vport
);
1321 /* Decrement the reference count to ndlp after the
1322 * reference to the ndlp are done.
1328 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1329 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1330 "0258 Register Fabric login error: 0x%x\n",
1332 /* Decrement the reference count to ndlp after the reference
1333 * to the ndlp are done.
1339 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
1340 ndlp
->nlp_type
|= NLP_FABRIC
;
1341 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1343 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
1344 vports
= lpfc_create_vport_work_array(phba
);
1347 i
<= phba
->max_vpi
&& vports
[i
] != NULL
;
1349 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
1351 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1352 lpfc_vport_set_state(vports
[i
],
1356 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
1357 lpfc_initial_fdisc(vports
[i
]);
1359 lpfc_vport_set_state(vports
[i
],
1360 FC_VPORT_NO_FABRIC_SUPP
);
1361 lpfc_printf_vlog(vport
, KERN_ERR
,
1364 "Fabric support\n");
1367 lpfc_destroy_vport_work_array(phba
, vports
);
1368 lpfc_do_scr_ns_plogi(phba
, vport
);
1371 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1373 mempool_free(pmb
, phba
->mbox_mem_pool
);
1375 /* Drop the reference count from the mbox at the end after
1376 * all the current reference to the ndlp have been done.
1383 * This routine handles processing a NameServer REG_LOGIN mailbox
1384 * command upon completion. It is setup in the LPFC_MBOXQ
1385 * as the completion routine when the command is
1386 * handed off to the SLI layer.
1389 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1391 MAILBOX_t
*mb
= &pmb
->mb
;
1392 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1393 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
1394 struct lpfc_vport
*vport
= pmb
->vport
;
1396 if (mb
->mbxStatus
) {
1398 /* decrement the node reference count held for this
1399 * callback function.
1402 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1404 mempool_free(pmb
, phba
->mbox_mem_pool
);
1406 /* If no other thread is using the ndlp, free it */
1407 lpfc_nlp_not_used(ndlp
);
1409 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1411 * RegLogin failed, use loop map to make discovery
1414 lpfc_disc_list_loopmap(vport
);
1416 /* Start discovery */
1417 lpfc_disc_start(vport
);
1420 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1421 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1422 "0260 Register NameServer error: 0x%x\n",
1427 pmb
->context1
= NULL
;
1429 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
1430 ndlp
->nlp_type
|= NLP_FABRIC
;
1431 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1433 if (vport
->port_state
< LPFC_VPORT_READY
) {
1434 /* Link up discovery requires Fabric registration. */
1435 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
1436 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
1437 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
1438 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
1439 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
1441 /* Issue SCR just before NameServer GID_FT Query */
1442 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
1445 vport
->fc_ns_retry
= 0;
1446 /* Good status, issue CT Request to NameServer */
1447 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
1448 /* Cannot issue NameServer Query, so finish up discovery */
1452 /* decrement the node reference count held for this
1453 * callback function.
1456 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1458 mempool_free(pmb
, phba
->mbox_mem_pool
);
1464 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1466 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1467 struct fc_rport
*rport
;
1468 struct lpfc_rport_data
*rdata
;
1469 struct fc_rport_identifiers rport_ids
;
1470 struct lpfc_hba
*phba
= vport
->phba
;
1472 /* Remote port has reappeared. Re-register w/ FC transport */
1473 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
1474 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
1475 rport_ids
.port_id
= ndlp
->nlp_DID
;
1476 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
1479 * We leave our node pointer in rport->dd_data when we unregister a
1480 * FCP target port. But fc_remote_port_add zeros the space to which
1481 * rport->dd_data points. So, if we're reusing a previously
1482 * registered port, drop the reference that we took the last time we
1483 * registered the port.
1485 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
1486 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
1489 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
1490 "rport add: did:x%x flg:x%x type x%x",
1491 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
1493 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
1494 if (!rport
|| !get_device(&rport
->dev
)) {
1495 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
1496 "Warning: fc_remote_port_add failed\n");
1500 /* initialize static port data */
1501 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
1502 rport
->supported_classes
= ndlp
->nlp_class_sup
;
1503 rdata
= rport
->dd_data
;
1504 rdata
->pnode
= lpfc_nlp_get(ndlp
);
1506 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
1507 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
1508 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
1509 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
1512 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
1513 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
1515 if ((rport
->scsi_target_id
!= -1) &&
1516 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
1517 ndlp
->nlp_sid
= rport
->scsi_target_id
;
1523 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
1525 struct fc_rport
*rport
= ndlp
->rport
;
1527 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
1528 "rport delete: did:x%x flg:x%x type x%x",
1529 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
1531 fc_remote_port_delete(rport
);
1537 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
1539 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1541 spin_lock_irq(shost
->host_lock
);
1543 case NLP_STE_UNUSED_NODE
:
1544 vport
->fc_unused_cnt
+= count
;
1546 case NLP_STE_PLOGI_ISSUE
:
1547 vport
->fc_plogi_cnt
+= count
;
1549 case NLP_STE_ADISC_ISSUE
:
1550 vport
->fc_adisc_cnt
+= count
;
1552 case NLP_STE_REG_LOGIN_ISSUE
:
1553 vport
->fc_reglogin_cnt
+= count
;
1555 case NLP_STE_PRLI_ISSUE
:
1556 vport
->fc_prli_cnt
+= count
;
1558 case NLP_STE_UNMAPPED_NODE
:
1559 vport
->fc_unmap_cnt
+= count
;
1561 case NLP_STE_MAPPED_NODE
:
1562 vport
->fc_map_cnt
+= count
;
1564 case NLP_STE_NPR_NODE
:
1565 vport
->fc_npr_cnt
+= count
;
1568 spin_unlock_irq(shost
->host_lock
);
1572 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1573 int old_state
, int new_state
)
1575 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1577 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
1578 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
1579 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1580 ndlp
->nlp_type
|= NLP_FC_NODE
;
1582 if (new_state
== NLP_STE_MAPPED_NODE
)
1583 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1584 if (new_state
== NLP_STE_NPR_NODE
)
1585 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
1587 /* Transport interface */
1588 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
1589 old_state
== NLP_STE_UNMAPPED_NODE
)) {
1590 vport
->phba
->nport_event_cnt
++;
1591 lpfc_unregister_remote_port(ndlp
);
1594 if (new_state
== NLP_STE_MAPPED_NODE
||
1595 new_state
== NLP_STE_UNMAPPED_NODE
) {
1596 vport
->phba
->nport_event_cnt
++;
1598 * Tell the fc transport about the port, if we haven't
1599 * already. If we have, and it's a scsi entity, be
1600 * sure to unblock any attached scsi devices
1602 lpfc_register_remote_port(vport
, ndlp
);
1605 * if we added to Mapped list, but the remote port
1606 * registration failed or assigned a target id outside
1607 * our presentable range - move the node to the
1610 if (new_state
== NLP_STE_MAPPED_NODE
&&
1612 ndlp
->rport
->scsi_target_id
== -1 ||
1613 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
1614 spin_lock_irq(shost
->host_lock
);
1615 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
1616 spin_unlock_irq(shost
->host_lock
);
1617 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1622 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
1624 static char *states
[] = {
1625 [NLP_STE_UNUSED_NODE
] = "UNUSED",
1626 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
1627 [NLP_STE_ADISC_ISSUE
] = "ADISC",
1628 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
1629 [NLP_STE_PRLI_ISSUE
] = "PRLI",
1630 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
1631 [NLP_STE_MAPPED_NODE
] = "MAPPED",
1632 [NLP_STE_NPR_NODE
] = "NPR",
1635 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
1636 strlcpy(buffer
, states
[state
], size
);
1638 snprintf(buffer
, size
, "unknown (%d)", state
);
1643 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1646 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1647 int old_state
= ndlp
->nlp_state
;
1648 char name1
[16], name2
[16];
1650 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
1651 "0904 NPort state transition x%06x, %s -> %s\n",
1653 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
1654 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
1656 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
1657 "node statechg did:x%x old:%d ste:%d",
1658 ndlp
->nlp_DID
, old_state
, state
);
1660 if (old_state
== NLP_STE_NPR_NODE
&&
1661 state
!= NLP_STE_NPR_NODE
)
1662 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
1663 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
1664 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
1665 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
1668 if (list_empty(&ndlp
->nlp_listp
)) {
1669 spin_lock_irq(shost
->host_lock
);
1670 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
1671 spin_unlock_irq(shost
->host_lock
);
1672 } else if (old_state
)
1673 lpfc_nlp_counters(vport
, old_state
, -1);
1675 ndlp
->nlp_state
= state
;
1676 lpfc_nlp_counters(vport
, state
, 1);
1677 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
1681 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1683 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1685 if (list_empty(&ndlp
->nlp_listp
)) {
1686 spin_lock_irq(shost
->host_lock
);
1687 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
1688 spin_unlock_irq(shost
->host_lock
);
1693 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1695 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1697 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
1698 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
1699 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
1700 spin_lock_irq(shost
->host_lock
);
1701 list_del_init(&ndlp
->nlp_listp
);
1702 spin_unlock_irq(shost
->host_lock
);
1703 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
1704 NLP_STE_UNUSED_NODE
);
1708 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1710 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
1711 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
1712 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
1713 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
1714 NLP_STE_UNUSED_NODE
);
1717 struct lpfc_nodelist
*
1718 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1721 struct lpfc_hba
*phba
= vport
->phba
;
1723 unsigned long flags
;
1728 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
1729 /* The ndlp should not be in memory free mode */
1730 if (NLP_CHK_FREE_REQ(ndlp
)) {
1731 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
1732 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
1733 "0277 lpfc_enable_node: ndlp:x%p "
1734 "usgmap:x%x refcnt:%d\n",
1735 (void *)ndlp
, ndlp
->nlp_usg_map
,
1736 atomic_read(&ndlp
->kref
.refcount
));
1739 /* The ndlp should not already be in active mode */
1740 if (NLP_CHK_NODE_ACT(ndlp
)) {
1741 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
1742 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
1743 "0278 lpfc_enable_node: ndlp:x%p "
1744 "usgmap:x%x refcnt:%d\n",
1745 (void *)ndlp
, ndlp
->nlp_usg_map
,
1746 atomic_read(&ndlp
->kref
.refcount
));
1750 /* Keep the original DID */
1751 did
= ndlp
->nlp_DID
;
1753 /* re-initialize ndlp except of ndlp linked list pointer */
1754 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
1755 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
1756 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
1757 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
1758 init_timer(&ndlp
->nlp_delayfunc
);
1759 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
1760 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
1761 ndlp
->nlp_DID
= did
;
1762 ndlp
->vport
= vport
;
1763 ndlp
->nlp_sid
= NLP_NO_SID
;
1764 /* ndlp management re-initialize */
1765 kref_init(&ndlp
->kref
);
1766 NLP_INT_NODE_ACT(ndlp
);
1768 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
1770 if (state
!= NLP_STE_UNUSED_NODE
)
1771 lpfc_nlp_set_state(vport
, ndlp
, state
);
1773 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
1774 "node enable: did:x%x",
1775 ndlp
->nlp_DID
, 0, 0);
1780 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1783 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
1784 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1785 * the ndlp from the vport. The ndlp marked as UNUSED on the list
1786 * until ALL other outstanding threads have completed. We check
1787 * that the ndlp not already in the UNUSED state before we proceed.
1789 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
1791 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
1797 * Start / ReStart rescue timer for Discovery / RSCN handling
1800 lpfc_set_disctmo(struct lpfc_vport
*vport
)
1802 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1803 struct lpfc_hba
*phba
= vport
->phba
;
1806 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
1807 /* For FAN, timeout should be greater then edtov */
1808 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
1810 /* Normal discovery timeout should be > then ELS/CT timeout
1811 * FC spec states we need 3 * ratov for CT requests
1813 tmo
= ((phba
->fc_ratov
* 3) + 3);
1817 if (!timer_pending(&vport
->fc_disctmo
)) {
1818 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1819 "set disc timer: tmo:x%x state:x%x flg:x%x",
1820 tmo
, vport
->port_state
, vport
->fc_flag
);
1823 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
1824 spin_lock_irq(shost
->host_lock
);
1825 vport
->fc_flag
|= FC_DISC_TMO
;
1826 spin_unlock_irq(shost
->host_lock
);
1828 /* Start Discovery Timer state <hba_state> */
1829 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1830 "0247 Start Discovery Timer state x%x "
1831 "Data: x%x x%lx x%x x%x\n",
1832 vport
->port_state
, tmo
,
1833 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
1834 vport
->fc_adisc_cnt
);
1840 * Cancel rescue timer for Discovery / RSCN handling
1843 lpfc_can_disctmo(struct lpfc_vport
*vport
)
1845 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1846 unsigned long iflags
;
1848 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
1849 "can disc timer: state:x%x rtry:x%x flg:x%x",
1850 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
1852 /* Turn off discovery timer if its running */
1853 if (vport
->fc_flag
& FC_DISC_TMO
) {
1854 spin_lock_irqsave(shost
->host_lock
, iflags
);
1855 vport
->fc_flag
&= ~FC_DISC_TMO
;
1856 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
1857 del_timer_sync(&vport
->fc_disctmo
);
1858 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
1859 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
1860 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
1863 /* Cancel Discovery Timer state <hba_state> */
1864 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1865 "0248 Cancel Discovery Timer state x%x "
1866 "Data: x%x x%x x%x\n",
1867 vport
->port_state
, vport
->fc_flag
,
1868 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
1873 * Check specified ring for outstanding IOCB on the SLI queue
1874 * Return true if iocb matches the specified nport
1877 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
1878 struct lpfc_sli_ring
*pring
,
1879 struct lpfc_iocbq
*iocb
,
1880 struct lpfc_nodelist
*ndlp
)
1882 struct lpfc_sli
*psli
= &phba
->sli
;
1883 IOCB_t
*icmd
= &iocb
->iocb
;
1884 struct lpfc_vport
*vport
= ndlp
->vport
;
1886 if (iocb
->vport
!= vport
)
1889 if (pring
->ringno
== LPFC_ELS_RING
) {
1890 switch (icmd
->ulpCommand
) {
1891 case CMD_GEN_REQUEST64_CR
:
1892 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
)
1894 case CMD_ELS_REQUEST64_CR
:
1895 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
1897 case CMD_XMIT_ELS_RSP64_CX
:
1898 if (iocb
->context1
== (uint8_t *) ndlp
)
1901 } else if (pring
->ringno
== psli
->extra_ring
) {
1903 } else if (pring
->ringno
== psli
->fcp_ring
) {
1904 /* Skip match check if waiting to relogin to FCP target */
1905 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
1906 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
1909 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
1912 } else if (pring
->ringno
== psli
->next_ring
) {
1919 * Free resources / clean up outstanding I/Os
1920 * associated with nlp_rpi in the LPFC_NODELIST entry.
1923 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1925 LIST_HEAD(completions
);
1926 struct lpfc_sli
*psli
;
1927 struct lpfc_sli_ring
*pring
;
1928 struct lpfc_iocbq
*iocb
, *next_iocb
;
1932 lpfc_fabric_abort_nport(ndlp
);
1935 * Everything that matches on txcmplq will be returned
1936 * by firmware with a no rpi error.
1939 rpi
= ndlp
->nlp_rpi
;
1941 /* Now process each ring */
1942 for (i
= 0; i
< psli
->num_rings
; i
++) {
1943 pring
= &psli
->ring
[i
];
1945 spin_lock_irq(&phba
->hbalock
);
1946 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
1949 * Check to see if iocb matches the nport we are
1952 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
1954 /* It matches, so deque and call compl
1956 list_move_tail(&iocb
->list
,
1961 spin_unlock_irq(&phba
->hbalock
);
1965 while (!list_empty(&completions
)) {
1966 iocb
= list_get_first(&completions
, struct lpfc_iocbq
, list
);
1967 list_del_init(&iocb
->list
);
1969 if (!iocb
->iocb_cmpl
)
1970 lpfc_sli_release_iocbq(phba
, iocb
);
1973 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1974 icmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1975 (iocb
->iocb_cmpl
)(phba
, iocb
, iocb
);
1983 * Free rpi associated with LPFC_NODELIST entry.
1984 * This routine is called from lpfc_freenode(), when we are removing
1985 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1986 * LOGO that completes successfully, and we are waiting to PLOGI back
1987 * to the remote NPort. In addition, it is called after we receive
1988 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1989 * we are waiting to PLOGI back to the remote NPort.
1992 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1994 struct lpfc_hba
*phba
= vport
->phba
;
1998 if (ndlp
->nlp_rpi
) {
1999 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2001 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
2002 mbox
->vport
= vport
;
2003 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2004 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2005 if (rc
== MBX_NOT_FINISHED
)
2006 mempool_free(mbox
, phba
->mbox_mem_pool
);
2008 lpfc_no_rpi(phba
, ndlp
);
2016 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
2018 struct lpfc_hba
*phba
= vport
->phba
;
2022 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2024 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
2025 mbox
->vport
= vport
;
2026 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2027 mbox
->context1
= NULL
;
2028 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
2029 if (rc
== MBX_NOT_FINISHED
) {
2030 mempool_free(mbox
, phba
->mbox_mem_pool
);
2036 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
2038 struct lpfc_hba
*phba
= vport
->phba
;
2042 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2044 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
2045 mbox
->vport
= vport
;
2046 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2047 mbox
->context1
= NULL
;
2048 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
2049 if (rc
== MBX_NOT_FINISHED
) {
2050 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2051 "1815 Could not issue "
2052 "unreg_did (default rpis)\n");
2053 mempool_free(mbox
, phba
->mbox_mem_pool
);
2059 * Free resources associated with LPFC_NODELIST entry
2060 * so it can be freed.
2063 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2065 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2066 struct lpfc_hba
*phba
= vport
->phba
;
2067 LPFC_MBOXQ_t
*mb
, *nextmb
;
2068 struct lpfc_dmabuf
*mp
;
2070 /* Cleanup node for NPort <nlp_DID> */
2071 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2072 "0900 Cleanup node for NPort x%x "
2073 "Data: x%x x%x x%x\n",
2074 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
2075 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
2076 if (NLP_CHK_FREE_REQ(ndlp
)) {
2077 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
2078 "0280 lpfc_cleanup_node: ndlp:x%p "
2079 "usgmap:x%x refcnt:%d\n",
2080 (void *)ndlp
, ndlp
->nlp_usg_map
,
2081 atomic_read(&ndlp
->kref
.refcount
));
2082 lpfc_dequeue_node(vport
, ndlp
);
2084 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
2085 "0281 lpfc_cleanup_node: ndlp:x%p "
2086 "usgmap:x%x refcnt:%d\n",
2087 (void *)ndlp
, ndlp
->nlp_usg_map
,
2088 atomic_read(&ndlp
->kref
.refcount
));
2089 lpfc_disable_node(vport
, ndlp
);
2092 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2093 if ((mb
= phba
->sli
.mbox_active
)) {
2094 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
2095 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
2096 mb
->context2
= NULL
;
2097 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2101 spin_lock_irq(&phba
->hbalock
);
2102 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
2103 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
2104 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
2105 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
2107 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2110 list_del(&mb
->list
);
2111 mempool_free(mb
, phba
->mbox_mem_pool
);
2112 /* We shall not invoke the lpfc_nlp_put to decrement
2113 * the ndlp reference count as we are in the process
2114 * of lpfc_nlp_release.
2118 spin_unlock_irq(&phba
->hbalock
);
2120 lpfc_els_abort(phba
, ndlp
);
2122 spin_lock_irq(shost
->host_lock
);
2123 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
2124 spin_unlock_irq(shost
->host_lock
);
2126 ndlp
->nlp_last_elscmd
= 0;
2127 del_timer_sync(&ndlp
->nlp_delayfunc
);
2129 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
2130 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
2132 lpfc_unreg_rpi(vport
, ndlp
);
2138 * Check to see if we can free the nlp back to the freelist.
2139 * If we are in the middle of using the nlp in the discovery state
2140 * machine, defer the free till we reach the end of the state machine.
2143 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2145 struct lpfc_hba
*phba
= vport
->phba
;
2146 struct lpfc_rport_data
*rdata
;
2150 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2151 if (ndlp
->nlp_flag
& NLP_DEFER_RM
&& !ndlp
->nlp_rpi
) {
2152 /* For this case we need to cleanup the default rpi
2153 * allocated by the firmware.
2155 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
2157 rc
= lpfc_reg_login(phba
, vport
->vpi
, ndlp
->nlp_DID
,
2158 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
2160 mempool_free(mbox
, phba
->mbox_mem_pool
);
2163 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
2164 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
2165 mbox
->vport
= vport
;
2166 mbox
->context2
= NULL
;
2167 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2168 if (rc
== MBX_NOT_FINISHED
) {
2169 mempool_free(mbox
, phba
->mbox_mem_pool
);
2174 lpfc_cleanup_node(vport
, ndlp
);
2177 * We can get here with a non-NULL ndlp->rport because when we
2178 * unregister a rport we don't break the rport/node linkage. So if we
2179 * do, make sure we don't leaving any dangling pointers behind.
2182 rdata
= ndlp
->rport
->dd_data
;
2183 rdata
->pnode
= NULL
;
2189 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2192 D_ID mydid
, ndlpdid
, matchdid
;
2194 if (did
== Bcast_DID
)
2197 /* First check for Direct match */
2198 if (ndlp
->nlp_DID
== did
)
2201 /* Next check for area/domain identically equals 0 match */
2202 mydid
.un
.word
= vport
->fc_myDID
;
2203 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
2207 matchdid
.un
.word
= did
;
2208 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
2209 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
2210 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
2211 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
2212 if ((ndlpdid
.un
.b
.domain
== 0) &&
2213 (ndlpdid
.un
.b
.area
== 0)) {
2214 if (ndlpdid
.un
.b
.id
)
2220 matchdid
.un
.word
= ndlp
->nlp_DID
;
2221 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
2222 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
2223 if ((matchdid
.un
.b
.domain
== 0) &&
2224 (matchdid
.un
.b
.area
== 0)) {
2225 if (matchdid
.un
.b
.id
)
2233 /* Search for a nodelist entry */
2234 static struct lpfc_nodelist
*
2235 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
2237 struct lpfc_nodelist
*ndlp
;
2240 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
2241 if (lpfc_matchdid(vport
, ndlp
, did
)) {
2242 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
2243 ((uint32_t) ndlp
->nlp_xri
<< 16) |
2244 ((uint32_t) ndlp
->nlp_type
<< 8) |
2245 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
2246 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2247 "0929 FIND node DID "
2248 "Data: x%p x%x x%x x%x\n",
2249 ndlp
, ndlp
->nlp_DID
,
2250 ndlp
->nlp_flag
, data1
);
2255 /* FIND node did <did> NOT FOUND */
2256 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2257 "0932 FIND node did x%x NOT FOUND.\n", did
);
2261 struct lpfc_nodelist
*
2262 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
2264 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2265 struct lpfc_nodelist
*ndlp
;
2267 spin_lock_irq(shost
->host_lock
);
2268 ndlp
= __lpfc_findnode_did(vport
, did
);
2269 spin_unlock_irq(shost
->host_lock
);
2273 struct lpfc_nodelist
*
2274 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
2276 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2277 struct lpfc_nodelist
*ndlp
;
2279 ndlp
= lpfc_findnode_did(vport
, did
);
2281 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
2282 lpfc_rscn_payload_check(vport
, did
) == 0)
2284 ndlp
= (struct lpfc_nodelist
*)
2285 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
2288 lpfc_nlp_init(vport
, ndlp
, did
);
2289 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2290 spin_lock_irq(shost
->host_lock
);
2291 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2292 spin_unlock_irq(shost
->host_lock
);
2294 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
2295 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
2298 spin_lock_irq(shost
->host_lock
);
2299 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2300 spin_unlock_irq(shost
->host_lock
);
2304 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
2305 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
2306 if (lpfc_rscn_payload_check(vport
, did
)) {
2307 /* If we've already recieved a PLOGI from this NPort
2308 * we don't need to try to discover it again.
2310 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
2313 spin_lock_irq(shost
->host_lock
);
2314 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2315 spin_unlock_irq(shost
->host_lock
);
2317 /* Since this node is marked for discovery,
2318 * delay timeout is not needed.
2320 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2324 /* If we've already recieved a PLOGI from this NPort,
2325 * or we are already in the process of discovery on it,
2326 * we don't need to try to discover it again.
2328 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
2329 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
2330 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
2332 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2333 spin_lock_irq(shost
->host_lock
);
2334 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
2335 spin_unlock_irq(shost
->host_lock
);
2340 /* Build a list of nodes to discover based on the loopmap */
2342 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
2344 struct lpfc_hba
*phba
= vport
->phba
;
2346 uint32_t alpa
, index
;
2348 if (!lpfc_is_link_up(phba
))
2351 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
2354 /* Check for loop map present or not */
2355 if (phba
->alpa_map
[0]) {
2356 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
2357 alpa
= phba
->alpa_map
[j
];
2358 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
2360 lpfc_setup_disc_node(vport
, alpa
);
2363 /* No alpamap, so try all alpa's */
2364 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
2365 /* If cfg_scan_down is set, start from highest
2366 * ALPA (0xef) to lowest (0x1).
2368 if (vport
->cfg_scan_down
)
2371 index
= FC_MAXLOOP
- j
- 1;
2372 alpa
= lpfcAlpaArray
[index
];
2373 if ((vport
->fc_myDID
& 0xff) == alpa
)
2375 lpfc_setup_disc_node(vport
, alpa
);
2382 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
2385 struct lpfc_sli
*psli
= &phba
->sli
;
2386 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
2387 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
2388 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
2392 * if it's not a physical port or if we already send
2393 * clear_la then don't send it.
2395 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
2396 (vport
->port_type
!= LPFC_PHYSICAL_PORT
))
2399 /* Link up discovery */
2400 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
2401 phba
->link_state
= LPFC_CLEAR_LA
;
2402 lpfc_clear_la(phba
, mbox
);
2403 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2404 mbox
->vport
= vport
;
2405 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2406 if (rc
== MBX_NOT_FINISHED
) {
2407 mempool_free(mbox
, phba
->mbox_mem_pool
);
2408 lpfc_disc_flush_list(vport
);
2409 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
2410 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
2411 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
2412 phba
->link_state
= LPFC_HBA_ERROR
;
2417 /* Reg_vpi to tell firmware to resume normal operations */
2419 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
2421 LPFC_MBOXQ_t
*regvpimbox
;
2423 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2425 lpfc_reg_vpi(phba
, vport
->vpi
, vport
->fc_myDID
, regvpimbox
);
2426 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
2427 regvpimbox
->vport
= vport
;
2428 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
2429 == MBX_NOT_FINISHED
) {
2430 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
2435 /* Start Link up / RSCN discovery on NPR nodes */
2437 lpfc_disc_start(struct lpfc_vport
*vport
)
2439 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2440 struct lpfc_hba
*phba
= vport
->phba
;
2442 uint32_t clear_la_pending
;
2445 if (!lpfc_is_link_up(phba
))
2448 if (phba
->link_state
== LPFC_CLEAR_LA
)
2449 clear_la_pending
= 1;
2451 clear_la_pending
= 0;
2453 if (vport
->port_state
< LPFC_VPORT_READY
)
2454 vport
->port_state
= LPFC_DISC_AUTH
;
2456 lpfc_set_disctmo(vport
);
2458 if (vport
->fc_prevDID
== vport
->fc_myDID
)
2463 vport
->fc_prevDID
= vport
->fc_myDID
;
2464 vport
->num_disc_nodes
= 0;
2466 /* Start Discovery state <hba_state> */
2467 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2468 "0202 Start Discovery hba state x%x "
2469 "Data: x%x x%x x%x\n",
2470 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
2471 vport
->fc_adisc_cnt
);
2473 /* First do ADISCs - if any */
2474 num_sent
= lpfc_els_disc_adisc(vport
);
2480 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2481 * continue discovery.
2483 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
2484 !(vport
->fc_flag
& FC_PT2PT
) &&
2485 !(vport
->fc_flag
& FC_RSCN_MODE
)) {
2486 lpfc_issue_reg_vpi(phba
, vport
);
2491 * For SLI2, we need to set port_state to READY and continue
2494 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
2495 /* If we get here, there is nothing to ADISC */
2496 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
2497 lpfc_issue_clear_la(phba
, vport
);
2499 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
2500 vport
->num_disc_nodes
= 0;
2501 /* go thru NPR nodes and issue ELS PLOGIs */
2502 if (vport
->fc_npr_cnt
)
2503 lpfc_els_disc_plogi(vport
);
2505 if (!vport
->num_disc_nodes
) {
2506 spin_lock_irq(shost
->host_lock
);
2507 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2508 spin_unlock_irq(shost
->host_lock
);
2509 lpfc_can_disctmo(vport
);
2512 vport
->port_state
= LPFC_VPORT_READY
;
2514 /* Next do PLOGIs - if any */
2515 num_sent
= lpfc_els_disc_plogi(vport
);
2520 if (vport
->fc_flag
& FC_RSCN_MODE
) {
2521 /* Check to see if more RSCNs came in while we
2522 * were processing this one.
2524 if ((vport
->fc_rscn_id_cnt
== 0) &&
2525 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
2526 spin_lock_irq(shost
->host_lock
);
2527 vport
->fc_flag
&= ~FC_RSCN_MODE
;
2528 spin_unlock_irq(shost
->host_lock
);
2529 lpfc_can_disctmo(vport
);
2531 lpfc_els_handle_rscn(vport
);
2538 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2539 * ring the match the sppecified nodelist.
2542 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
2544 LIST_HEAD(completions
);
2545 struct lpfc_sli
*psli
;
2547 struct lpfc_iocbq
*iocb
, *next_iocb
;
2548 struct lpfc_sli_ring
*pring
;
2551 pring
= &psli
->ring
[LPFC_ELS_RING
];
2553 /* Error matching iocb on txq or txcmplq
2554 * First check the txq.
2556 spin_lock_irq(&phba
->hbalock
);
2557 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
2558 if (iocb
->context1
!= ndlp
) {
2562 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
2563 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
2565 list_move_tail(&iocb
->list
, &completions
);
2570 /* Next check the txcmplq */
2571 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2572 if (iocb
->context1
!= ndlp
) {
2576 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
2577 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
2578 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
2581 spin_unlock_irq(&phba
->hbalock
);
2583 while (!list_empty(&completions
)) {
2584 iocb
= list_get_first(&completions
, struct lpfc_iocbq
, list
);
2585 list_del_init(&iocb
->list
);
2587 if (!iocb
->iocb_cmpl
)
2588 lpfc_sli_release_iocbq(phba
, iocb
);
2591 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
2592 icmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
2593 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
2599 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
2601 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2602 struct lpfc_hba
*phba
= vport
->phba
;
2604 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
2605 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
2607 if (!NLP_CHK_NODE_ACT(ndlp
))
2609 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
2610 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
2611 lpfc_free_tx(phba
, ndlp
);
2618 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
2620 lpfc_els_flush_rscn(vport
);
2621 lpfc_els_flush_cmd(vport
);
2622 lpfc_disc_flush_list(vport
);
2625 /*****************************************************************************/
2627 * NAME: lpfc_disc_timeout
2629 * FUNCTION: Fibre Channel driver discovery timeout routine.
2631 * EXECUTION ENVIRONMENT: interrupt only
2639 /*****************************************************************************/
2641 lpfc_disc_timeout(unsigned long ptr
)
2643 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
2644 struct lpfc_hba
*phba
= vport
->phba
;
2645 uint32_t tmo_posted
;
2646 unsigned long flags
= 0;
2648 if (unlikely(!phba
))
2651 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
2652 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
2654 vport
->work_port_events
|= WORKER_DISC_TMO
;
2655 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
2658 lpfc_worker_wake_up(phba
);
2663 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
2665 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2666 struct lpfc_hba
*phba
= vport
->phba
;
2667 struct lpfc_sli
*psli
= &phba
->sli
;
2668 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2669 LPFC_MBOXQ_t
*initlinkmbox
;
2670 int rc
, clrlaerr
= 0;
2672 if (!(vport
->fc_flag
& FC_DISC_TMO
))
2675 spin_lock_irq(shost
->host_lock
);
2676 vport
->fc_flag
&= ~FC_DISC_TMO
;
2677 spin_unlock_irq(shost
->host_lock
);
2679 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
2680 "disc timeout: state:x%x rtry:x%x flg:x%x",
2681 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
2683 switch (vport
->port_state
) {
2685 case LPFC_LOCAL_CFG_LINK
:
2686 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2690 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
2691 "0221 FAN timeout\n");
2692 /* Start discovery by sending FLOGI, clean up old rpis */
2693 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
2695 if (!NLP_CHK_NODE_ACT(ndlp
))
2697 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
2699 if (ndlp
->nlp_type
& NLP_FABRIC
) {
2700 /* Clean up the ndlp on Fabric connections */
2701 lpfc_drop_node(vport
, ndlp
);
2703 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
2704 /* Fail outstanding IO now since device
2705 * is marked for PLOGI.
2707 lpfc_unreg_rpi(vport
, ndlp
);
2710 if (vport
->port_state
!= LPFC_FLOGI
) {
2711 lpfc_initial_flogi(vport
);
2718 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2719 /* Initial FLOGI timeout */
2720 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2721 "0222 Initial %s timeout\n",
2722 vport
->vpi
? "FDISC" : "FLOGI");
2724 /* Assume no Fabric and go on with discovery.
2725 * Check for outstanding ELS FLOGI to abort.
2728 /* FLOGI failed, so just use loop map to make discovery list */
2729 lpfc_disc_list_loopmap(vport
);
2731 /* Start discovery */
2732 lpfc_disc_start(vport
);
2735 case LPFC_FABRIC_CFG_LINK
:
2736 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2738 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2739 "0223 Timeout while waiting for "
2740 "NameServer login\n");
2741 /* Next look for NameServer ndlp */
2742 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
2743 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
2744 lpfc_els_abort(phba
, ndlp
);
2746 /* ReStart discovery */
2750 /* Check for wait for NameServer Rsp timeout */
2751 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2752 "0224 NameServer Query timeout "
2754 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2756 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
2757 /* Try it one more time */
2758 vport
->fc_ns_retry
++;
2759 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
2760 vport
->fc_ns_retry
, 0);
2764 vport
->fc_ns_retry
= 0;
2768 * Discovery is over.
2769 * set port_state to PORT_READY if SLI2.
2770 * cmpl_reg_vpi will set port_state to READY for SLI3.
2772 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
2773 lpfc_issue_reg_vpi(phba
, vport
);
2774 else { /* NPIV Not enabled */
2775 lpfc_issue_clear_la(phba
, vport
);
2776 vport
->port_state
= LPFC_VPORT_READY
;
2779 /* Setup and issue mailbox INITIALIZE LINK command */
2780 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2781 if (!initlinkmbox
) {
2782 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2783 "0206 Device Discovery "
2784 "completion error\n");
2785 phba
->link_state
= LPFC_HBA_ERROR
;
2789 lpfc_linkdown(phba
);
2790 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
2791 phba
->cfg_link_speed
);
2792 initlinkmbox
->mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
2793 initlinkmbox
->vport
= vport
;
2794 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2795 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
2796 lpfc_set_loopback_flag(phba
);
2797 if (rc
== MBX_NOT_FINISHED
)
2798 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
2802 case LPFC_DISC_AUTH
:
2803 /* Node Authentication timeout */
2804 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2805 "0227 Node Authentication timeout\n");
2806 lpfc_disc_flush_list(vport
);
2809 * set port_state to PORT_READY if SLI2.
2810 * cmpl_reg_vpi will set port_state to READY for SLI3.
2812 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
2813 lpfc_issue_reg_vpi(phba
, vport
);
2814 else { /* NPIV Not enabled */
2815 lpfc_issue_clear_la(phba
, vport
);
2816 vport
->port_state
= LPFC_VPORT_READY
;
2820 case LPFC_VPORT_READY
:
2821 if (vport
->fc_flag
& FC_RSCN_MODE
) {
2822 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2823 "0231 RSCN timeout Data: x%x "
2825 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2827 /* Cleanup any outstanding ELS commands */
2828 lpfc_els_flush_cmd(vport
);
2830 lpfc_els_flush_rscn(vport
);
2831 lpfc_disc_flush_list(vport
);
2836 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2837 "0273 Unexpected discovery timeout, "
2838 "vport State x%x\n", vport
->port_state
);
2842 switch (phba
->link_state
) {
2844 /* CLEAR LA timeout */
2845 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2846 "0228 CLEAR LA timeout\n");
2851 lpfc_issue_clear_la(phba
, vport
);
2853 case LPFC_LINK_UNKNOWN
:
2854 case LPFC_WARM_START
:
2855 case LPFC_INIT_START
:
2856 case LPFC_INIT_MBX_CMDS
:
2857 case LPFC_LINK_DOWN
:
2858 case LPFC_HBA_ERROR
:
2859 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2860 "0230 Unexpected timeout, hba link "
2861 "state x%x\n", phba
->link_state
);
2865 case LPFC_HBA_READY
:
2870 lpfc_disc_flush_list(vport
);
2871 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2872 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2873 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2874 vport
->port_state
= LPFC_VPORT_READY
;
2881 * This routine handles processing a NameServer REG_LOGIN mailbox
2882 * command upon completion. It is setup in the LPFC_MBOXQ
2883 * as the completion routine when the command is
2884 * handed off to the SLI layer.
2887 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2889 MAILBOX_t
*mb
= &pmb
->mb
;
2890 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2891 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2892 struct lpfc_vport
*vport
= pmb
->vport
;
2894 pmb
->context1
= NULL
;
2896 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2897 ndlp
->nlp_type
|= NLP_FABRIC
;
2898 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2901 * Start issuing Fabric-Device Management Interface (FDMI) command to
2902 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2903 * fdmi-on=2 (supporting RPA/hostnmae)
2906 if (vport
->cfg_fdmi_on
== 1)
2907 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
2909 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
2911 /* decrement the node reference count held for this callback
2915 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2917 mempool_free(pmb
, phba
->mbox_mem_pool
);
2923 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
2925 uint16_t *rpi
= param
;
2927 return ndlp
->nlp_rpi
== *rpi
;
2931 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
2933 return memcmp(&ndlp
->nlp_portname
, param
,
2934 sizeof(ndlp
->nlp_portname
)) == 0;
2937 static struct lpfc_nodelist
*
2938 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
2940 struct lpfc_nodelist
*ndlp
;
2942 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
2943 if (filter(ndlp
, param
))
2950 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2951 * returns the node list element pointer else return NULL.
2953 struct lpfc_nodelist
*
2954 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
2956 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
2960 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2961 * returns the node element list pointer else return NULL.
2963 struct lpfc_nodelist
*
2964 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
2966 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2967 struct lpfc_nodelist
*ndlp
;
2969 spin_lock_irq(shost
->host_lock
);
2970 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
2971 spin_unlock_irq(shost
->host_lock
);
2976 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2979 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
2980 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
2981 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
2982 init_timer(&ndlp
->nlp_delayfunc
);
2983 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
2984 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
2985 ndlp
->nlp_DID
= did
;
2986 ndlp
->vport
= vport
;
2987 ndlp
->nlp_sid
= NLP_NO_SID
;
2988 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
2989 kref_init(&ndlp
->kref
);
2990 NLP_INT_NODE_ACT(ndlp
);
2992 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
2993 "node init: did:x%x",
2994 ndlp
->nlp_DID
, 0, 0);
2999 /* This routine releases all resources associated with a specifc NPort's ndlp
3000 * and mempool_free's the nodelist.
3003 lpfc_nlp_release(struct kref
*kref
)
3005 struct lpfc_hba
*phba
;
3006 unsigned long flags
;
3007 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
3010 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
3011 "node release: did:x%x flg:x%x type:x%x",
3012 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3014 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3015 "0279 lpfc_nlp_release: ndlp:x%p "
3016 "usgmap:x%x refcnt:%d\n",
3017 (void *)ndlp
, ndlp
->nlp_usg_map
,
3018 atomic_read(&ndlp
->kref
.refcount
));
3020 /* remove ndlp from action. */
3021 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
3023 /* clear the ndlp active flag for all release cases */
3024 phba
= ndlp
->vport
->phba
;
3025 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3026 NLP_CLR_NODE_ACT(ndlp
);
3027 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3029 /* free ndlp memory for final ndlp release */
3030 if (NLP_CHK_FREE_REQ(ndlp
))
3031 mempool_free(ndlp
, ndlp
->vport
->phba
->nlp_mem_pool
);
3034 /* This routine bumps the reference count for a ndlp structure to ensure
3035 * that one discovery thread won't free a ndlp while another discovery thread
3038 struct lpfc_nodelist
*
3039 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
3041 struct lpfc_hba
*phba
;
3042 unsigned long flags
;
3045 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
3046 "node get: did:x%x flg:x%x refcnt:x%x",
3047 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3048 atomic_read(&ndlp
->kref
.refcount
));
3049 /* The check of ndlp usage to prevent incrementing the
3050 * ndlp reference count that is in the process of being
3053 phba
= ndlp
->vport
->phba
;
3054 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3055 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
3056 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3057 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
3058 "0276 lpfc_nlp_get: ndlp:x%p "
3059 "usgmap:x%x refcnt:%d\n",
3060 (void *)ndlp
, ndlp
->nlp_usg_map
,
3061 atomic_read(&ndlp
->kref
.refcount
));
3064 kref_get(&ndlp
->kref
);
3065 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3070 /* This routine decrements the reference count for a ndlp structure. If the
3071 * count goes to 0, this indicates the the associated nodelist should be
3072 * freed. Returning 1 indicates the ndlp resource has been released; on the
3073 * other hand, returning 0 indicates the ndlp resource has not been released
3077 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
3079 struct lpfc_hba
*phba
;
3080 unsigned long flags
;
3085 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
3086 "node put: did:x%x flg:x%x refcnt:x%x",
3087 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3088 atomic_read(&ndlp
->kref
.refcount
));
3089 phba
= ndlp
->vport
->phba
;
3090 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3091 /* Check the ndlp memory free acknowledge flag to avoid the
3092 * possible race condition that kref_put got invoked again
3093 * after previous one has done ndlp memory free.
3095 if (NLP_CHK_FREE_ACK(ndlp
)) {
3096 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3097 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
3098 "0274 lpfc_nlp_put: ndlp:x%p "
3099 "usgmap:x%x refcnt:%d\n",
3100 (void *)ndlp
, ndlp
->nlp_usg_map
,
3101 atomic_read(&ndlp
->kref
.refcount
));
3104 /* Check the ndlp inactivate log flag to avoid the possible
3105 * race condition that kref_put got invoked again after ndlp
3106 * is already in inactivating state.
3108 if (NLP_CHK_IACT_REQ(ndlp
)) {
3109 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3110 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
3111 "0275 lpfc_nlp_put: ndlp:x%p "
3112 "usgmap:x%x refcnt:%d\n",
3113 (void *)ndlp
, ndlp
->nlp_usg_map
,
3114 atomic_read(&ndlp
->kref
.refcount
));
3117 /* For last put, mark the ndlp usage flags to make sure no
3118 * other kref_get and kref_put on the same ndlp shall get
3119 * in between the process when the final kref_put has been
3120 * invoked on this ndlp.
3122 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
3123 /* Indicate ndlp is put to inactive state. */
3124 NLP_SET_IACT_REQ(ndlp
);
3125 /* Acknowledge ndlp memory free has been seen. */
3126 if (NLP_CHK_FREE_REQ(ndlp
))
3127 NLP_SET_FREE_ACK(ndlp
);
3129 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3130 /* Note, the kref_put returns 1 when decrementing a reference
3131 * count that was 1, it invokes the release callback function,
3132 * but it still left the reference count as 1 (not actually
3133 * performs the last decrementation). Otherwise, it actually
3134 * decrements the reference count and returns 0.
3136 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
3139 /* This routine free's the specified nodelist if it is not in use
3140 * by any other discovery thread. This routine returns 1 if the
3141 * ndlp has been freed. A return value of 0 indicates the ndlp is
3142 * not yet been released.
3145 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
3147 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
3148 "node not used: did:x%x flg:x%x refcnt:x%x",
3149 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3150 atomic_read(&ndlp
->kref
.refcount
));
3151 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
3152 if (lpfc_nlp_put(ndlp
))