1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/slab.h>
24 #include <linux/pci.h>
25 #include <linux/kthread.h>
26 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
36 #include "lpfc_disc.h"
38 #include "lpfc_sli4.h"
39 #include "lpfc_scsi.h"
41 #include "lpfc_logmsg.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_vport.h"
44 #include "lpfc_debugfs.h"
46 /* AlpaArray for assignment of scsid for scan-down and bind_method */
47 static uint8_t lpfcAlpaArray
[] = {
48 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
49 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
50 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
51 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
52 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
53 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
54 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
55 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
56 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
57 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
58 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
59 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
60 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
63 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
64 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
65 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
68 lpfc_terminate_rport_io(struct fc_rport
*rport
)
70 struct lpfc_rport_data
*rdata
;
71 struct lpfc_nodelist
* ndlp
;
72 struct lpfc_hba
*phba
;
74 rdata
= rport
->dd_data
;
77 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
78 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
79 printk(KERN_ERR
"Cannot find remote node"
80 " to terminate I/O Data x%x\n",
87 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
88 "rport terminate: sid:x%x did:x%x flg:x%x",
89 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
91 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
92 lpfc_sli_abort_iocb(ndlp
->vport
,
93 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
94 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
99 * This function will be called when dev_loss_tmo fire.
102 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
104 struct lpfc_rport_data
*rdata
;
105 struct lpfc_nodelist
* ndlp
;
106 struct lpfc_vport
*vport
;
107 struct lpfc_hba
*phba
;
108 struct lpfc_work_evt
*evtp
;
112 rdata
= rport
->dd_data
;
114 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
120 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
121 "rport devlosscb: sid:x%x did:x%x flg:x%x",
122 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
124 /* Don't defer this if we are in the process of deleting the vport
125 * or unloading the driver. The unload will cleanup the node
126 * appropriately we just need to cleanup the ndlp rport info here.
128 if (vport
->load_flag
& FC_UNLOADING
) {
129 put_node
= rdata
->pnode
!= NULL
;
130 put_rport
= ndlp
->rport
!= NULL
;
136 put_device(&rport
->dev
);
140 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
143 evtp
= &ndlp
->dev_loss_evt
;
145 if (!list_empty(&evtp
->evt_listp
))
148 spin_lock_irq(&phba
->hbalock
);
149 /* We need to hold the node by incrementing the reference
150 * count until this queued work is done
152 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
153 if (evtp
->evt_arg1
) {
154 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
155 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
156 lpfc_worker_wake_up(phba
);
158 spin_unlock_irq(&phba
->hbalock
);
164 * This function is called from the worker thread when dev_loss_tmo
168 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
170 struct lpfc_rport_data
*rdata
;
171 struct fc_rport
*rport
;
172 struct lpfc_vport
*vport
;
173 struct lpfc_hba
*phba
;
184 rdata
= rport
->dd_data
;
185 name
= (uint8_t *) &ndlp
->nlp_portname
;
189 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
190 "rport devlosstmo:did:x%x type:x%x id:x%x",
191 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
193 /* Don't defer this if we are in the process of deleting the vport
194 * or unloading the driver. The unload will cleanup the node
195 * appropriately we just need to cleanup the ndlp rport info here.
197 if (vport
->load_flag
& FC_UNLOADING
) {
198 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
199 /* flush the target */
200 lpfc_sli_abort_iocb(vport
,
201 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
202 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
204 put_node
= rdata
->pnode
!= NULL
;
205 put_rport
= ndlp
->rport
!= NULL
;
211 put_device(&rport
->dev
);
215 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
216 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
217 "0284 Devloss timeout Ignored on "
218 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
220 *name
, *(name
+1), *(name
+2), *(name
+3),
221 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
226 if (ndlp
->nlp_type
& NLP_FABRIC
) {
227 /* We will clean up these Nodes in linkup */
228 put_node
= rdata
->pnode
!= NULL
;
229 put_rport
= ndlp
->rport
!= NULL
;
235 put_device(&rport
->dev
);
239 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
241 /* flush the target */
242 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
243 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
247 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
248 "0203 Devloss timeout on "
249 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
250 "NPort x%06x Data: x%x x%x x%x\n",
251 *name
, *(name
+1), *(name
+2), *(name
+3),
252 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
253 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
254 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
256 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
257 "0204 Devloss timeout on "
258 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
259 "NPort x%06x Data: x%x x%x x%x\n",
260 *name
, *(name
+1), *(name
+2), *(name
+3),
261 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
262 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
263 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
266 put_node
= rdata
->pnode
!= NULL
;
267 put_rport
= ndlp
->rport
!= NULL
;
273 put_device(&rport
->dev
);
275 if (!(vport
->load_flag
& FC_UNLOADING
) &&
276 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
277 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
278 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
279 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
))
280 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
282 lpfc_unregister_unused_fcf(phba
);
286 * lpfc_alloc_fast_evt - Allocates data structure for posting event
287 * @phba: Pointer to hba context object.
289 * This function is called from the functions which need to post
290 * events from interrupt context. This function allocates data
291 * structure required for posting event. It also keeps track of
292 * number of events pending and prevent event storm when there are
295 struct lpfc_fast_path_event
*
296 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
297 struct lpfc_fast_path_event
*ret
;
299 /* If there are lot of fast event do not exhaust memory due to this */
300 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
303 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
306 atomic_inc(&phba
->fast_event_count
);
307 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
308 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
314 * lpfc_free_fast_evt - Frees event data structure
315 * @phba: Pointer to hba context object.
316 * @evt: Event object which need to be freed.
318 * This function frees the data structure required for posting
322 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
323 struct lpfc_fast_path_event
*evt
) {
325 atomic_dec(&phba
->fast_event_count
);
330 * lpfc_send_fastpath_evt - Posts events generated from fast path
331 * @phba: Pointer to hba context object.
332 * @evtp: Event data structure.
334 * This function is called from worker thread, when the interrupt
335 * context need to post an event. This function posts the event
336 * to fc transport netlink interface.
339 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
340 struct lpfc_work_evt
*evtp
)
342 unsigned long evt_category
, evt_sub_category
;
343 struct lpfc_fast_path_event
*fast_evt_data
;
345 uint32_t evt_data_size
;
346 struct Scsi_Host
*shost
;
348 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
351 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
352 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
353 fabric_evt
.subcategory
;
354 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
355 if (evt_category
== FC_REG_FABRIC_EVENT
) {
356 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
357 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
358 evt_data_size
= sizeof(fast_evt_data
->un
.
360 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
361 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
362 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
363 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
365 lpfc_free_fast_evt(phba
, fast_evt_data
);
368 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
369 switch (evt_sub_category
) {
370 case LPFC_EVENT_QFULL
:
371 case LPFC_EVENT_DEVBSY
:
372 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
373 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
375 case LPFC_EVENT_CHECK_COND
:
376 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
377 evt_data_size
= sizeof(fast_evt_data
->un
.
380 case LPFC_EVENT_VARQUEDEPTH
:
381 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
382 evt_data_size
= sizeof(fast_evt_data
->un
.
386 lpfc_free_fast_evt(phba
, fast_evt_data
);
390 lpfc_free_fast_evt(phba
, fast_evt_data
);
394 fc_host_post_vendor_event(shost
,
395 fc_get_event_number(),
400 lpfc_free_fast_evt(phba
, fast_evt_data
);
405 lpfc_work_list_done(struct lpfc_hba
*phba
)
407 struct lpfc_work_evt
*evtp
= NULL
;
408 struct lpfc_nodelist
*ndlp
;
411 spin_lock_irq(&phba
->hbalock
);
412 while (!list_empty(&phba
->work_list
)) {
413 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
415 spin_unlock_irq(&phba
->hbalock
);
418 case LPFC_EVT_ELS_RETRY
:
419 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
420 lpfc_els_retry_delay_handler(ndlp
);
421 free_evt
= 0; /* evt is part of ndlp */
422 /* decrement the node reference count held
423 * for this queued work
427 case LPFC_EVT_DEV_LOSS
:
428 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
429 lpfc_dev_loss_tmo_handler(ndlp
);
431 /* decrement the node reference count held for
436 case LPFC_EVT_ONLINE
:
437 if (phba
->link_state
< LPFC_LINK_DOWN
)
438 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
440 *(int *) (evtp
->evt_arg1
) = 0;
441 complete((struct completion
*)(evtp
->evt_arg2
));
443 case LPFC_EVT_OFFLINE_PREP
:
444 if (phba
->link_state
>= LPFC_LINK_DOWN
)
445 lpfc_offline_prep(phba
);
446 *(int *)(evtp
->evt_arg1
) = 0;
447 complete((struct completion
*)(evtp
->evt_arg2
));
449 case LPFC_EVT_OFFLINE
:
451 lpfc_sli_brdrestart(phba
);
452 *(int *)(evtp
->evt_arg1
) =
453 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
454 lpfc_unblock_mgmt_io(phba
);
455 complete((struct completion
*)(evtp
->evt_arg2
));
457 case LPFC_EVT_WARM_START
:
459 lpfc_reset_barrier(phba
);
460 lpfc_sli_brdreset(phba
);
461 lpfc_hba_down_post(phba
);
462 *(int *)(evtp
->evt_arg1
) =
463 lpfc_sli_brdready(phba
, HS_MBRDY
);
464 lpfc_unblock_mgmt_io(phba
);
465 complete((struct completion
*)(evtp
->evt_arg2
));
469 *(int *)(evtp
->evt_arg1
)
470 = (phba
->pport
->stopped
)
471 ? 0 : lpfc_sli_brdkill(phba
);
472 lpfc_unblock_mgmt_io(phba
);
473 complete((struct completion
*)(evtp
->evt_arg2
));
475 case LPFC_EVT_FASTPATH_MGMT_EVT
:
476 lpfc_send_fastpath_evt(phba
, evtp
);
479 case LPFC_EVT_RESET_HBA
:
480 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
481 lpfc_reset_hba(phba
);
486 spin_lock_irq(&phba
->hbalock
);
488 spin_unlock_irq(&phba
->hbalock
);
493 lpfc_work_done(struct lpfc_hba
*phba
)
495 struct lpfc_sli_ring
*pring
;
496 uint32_t ha_copy
, status
, control
, work_port_events
;
497 struct lpfc_vport
**vports
;
498 struct lpfc_vport
*vport
;
501 spin_lock_irq(&phba
->hbalock
);
502 ha_copy
= phba
->work_ha
;
504 spin_unlock_irq(&phba
->hbalock
);
506 /* First, try to post the next mailbox command to SLI4 device */
507 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
508 lpfc_sli4_post_async_mbox(phba
);
510 if (ha_copy
& HA_ERATT
)
511 /* Handle the error attention event */
512 lpfc_handle_eratt(phba
);
514 if (ha_copy
& HA_MBATT
)
515 lpfc_sli_handle_mb_event(phba
);
517 if (ha_copy
& HA_LATT
)
518 lpfc_handle_latt(phba
);
520 /* Process SLI4 events */
521 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
522 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
523 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
524 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
525 lpfc_sli4_els_xri_abort_event_proc(phba
);
526 if (phba
->hba_flag
& ASYNC_EVENT
)
527 lpfc_sli4_async_event_proc(phba
);
528 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
529 spin_lock_irq(&phba
->hbalock
);
530 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
531 spin_unlock_irq(&phba
->hbalock
);
532 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
534 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
535 lpfc_sli4_fcf_redisc_event_proc(phba
);
538 vports
= lpfc_create_vport_work_array(phba
);
540 for (i
= 0; i
<= phba
->max_vports
; i
++) {
542 * We could have no vports in array if unloading, so if
543 * this happens then just use the pport
545 if (vports
[i
] == NULL
&& i
== 0)
551 spin_lock_irq(&vport
->work_port_lock
);
552 work_port_events
= vport
->work_port_events
;
553 vport
->work_port_events
&= ~work_port_events
;
554 spin_unlock_irq(&vport
->work_port_lock
);
555 if (work_port_events
& WORKER_DISC_TMO
)
556 lpfc_disc_timeout_handler(vport
);
557 if (work_port_events
& WORKER_ELS_TMO
)
558 lpfc_els_timeout_handler(vport
);
559 if (work_port_events
& WORKER_HB_TMO
)
560 lpfc_hb_timeout_handler(phba
);
561 if (work_port_events
& WORKER_MBOX_TMO
)
562 lpfc_mbox_timeout_handler(phba
);
563 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
564 lpfc_unblock_fabric_iocbs(phba
);
565 if (work_port_events
& WORKER_FDMI_TMO
)
566 lpfc_fdmi_timeout_handler(vport
);
567 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
568 lpfc_ramp_down_queue_handler(phba
);
569 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
570 lpfc_ramp_up_queue_handler(phba
);
572 lpfc_destroy_vport_work_array(phba
, vports
);
574 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
575 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
576 status
>>= (4*LPFC_ELS_RING
);
577 if ((status
& HA_RXMASK
) ||
578 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
579 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
580 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
581 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
582 /* Set the lpfc data pending flag */
583 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
585 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
586 lpfc_sli_handle_slow_ring_event(phba
, pring
,
590 if (phba
->pport
->work_port_events
& WORKER_SERVICE_TXQ
)
591 lpfc_drain_txq(phba
);
593 * Turn on Ring interrupts
595 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
596 spin_lock_irq(&phba
->hbalock
);
597 control
= readl(phba
->HCregaddr
);
598 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
599 lpfc_debugfs_slow_ring_trc(phba
,
600 "WRK Enable ring: cntl:x%x hacopy:x%x",
601 control
, ha_copy
, 0);
603 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
604 writel(control
, phba
->HCregaddr
);
605 readl(phba
->HCregaddr
); /* flush */
607 lpfc_debugfs_slow_ring_trc(phba
,
608 "WRK Ring ok: cntl:x%x hacopy:x%x",
609 control
, ha_copy
, 0);
611 spin_unlock_irq(&phba
->hbalock
);
614 lpfc_work_list_done(phba
);
618 lpfc_do_work(void *p
)
620 struct lpfc_hba
*phba
= p
;
623 set_user_nice(current
, -20);
624 phba
->data_flags
= 0;
626 while (!kthread_should_stop()) {
627 /* wait and check worker queue activities */
628 rc
= wait_event_interruptible(phba
->work_waitq
,
629 (test_and_clear_bit(LPFC_DATA_READY
,
631 || kthread_should_stop()));
632 /* Signal wakeup shall terminate the worker thread */
634 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
635 "0433 Wakeup on signal: rc=x%x\n", rc
);
639 /* Attend pending lpfc data processing */
640 lpfc_work_done(phba
);
642 phba
->worker_thread
= NULL
;
643 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
644 "0432 Worker thread stopped.\n");
649 * This is only called to handle FC worker events. Since this a rare
650 * occurance, we allocate a struct lpfc_work_evt structure here instead of
651 * embedding it in the IOCB.
654 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
657 struct lpfc_work_evt
*evtp
;
661 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
662 * be queued to worker thread for processing
664 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
668 evtp
->evt_arg1
= arg1
;
669 evtp
->evt_arg2
= arg2
;
672 spin_lock_irqsave(&phba
->hbalock
, flags
);
673 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
674 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
676 lpfc_worker_wake_up(phba
);
682 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
684 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
685 struct lpfc_hba
*phba
= vport
->phba
;
686 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
689 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
690 if (!NLP_CHK_NODE_ACT(ndlp
))
692 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
694 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
695 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
696 (ndlp
->nlp_DID
== NameServer_DID
)))
697 lpfc_unreg_rpi(vport
, ndlp
);
699 /* Leave Fabric nodes alone on link down */
700 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
701 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
703 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
706 : NLP_EVT_DEVICE_RECOVERY
);
708 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
709 lpfc_mbx_unreg_vpi(vport
);
710 spin_lock_irq(shost
->host_lock
);
711 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
712 spin_unlock_irq(shost
->host_lock
);
717 lpfc_port_link_failure(struct lpfc_vport
*vport
)
719 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
721 /* Cleanup any outstanding received buffers */
722 lpfc_cleanup_rcv_buffers(vport
);
724 /* Cleanup any outstanding RSCN activity */
725 lpfc_els_flush_rscn(vport
);
727 /* Cleanup any outstanding ELS commands */
728 lpfc_els_flush_cmd(vport
);
730 lpfc_cleanup_rpis(vport
, 0);
732 /* Turn off discovery timer if its running */
733 lpfc_can_disctmo(vport
);
737 lpfc_linkdown_port(struct lpfc_vport
*vport
)
739 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
741 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
743 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
744 "Link Down: state:x%x rtry:x%x flg:x%x",
745 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
747 lpfc_port_link_failure(vport
);
752 lpfc_linkdown(struct lpfc_hba
*phba
)
754 struct lpfc_vport
*vport
= phba
->pport
;
755 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
756 struct lpfc_vport
**vports
;
760 if (phba
->link_state
== LPFC_LINK_DOWN
)
763 /* Block all SCSI stack I/Os */
764 lpfc_scsi_dev_block(phba
);
766 spin_lock_irq(&phba
->hbalock
);
767 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
768 spin_unlock_irq(&phba
->hbalock
);
769 if (phba
->link_state
> LPFC_LINK_DOWN
) {
770 phba
->link_state
= LPFC_LINK_DOWN
;
771 spin_lock_irq(shost
->host_lock
);
772 phba
->pport
->fc_flag
&= ~FC_LBIT
;
773 spin_unlock_irq(shost
->host_lock
);
775 vports
= lpfc_create_vport_work_array(phba
);
777 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
778 /* Issue a LINK DOWN event to all nodes */
779 lpfc_linkdown_port(vports
[i
]);
781 lpfc_destroy_vport_work_array(phba
, vports
);
782 /* Clean up any firmware default rpi's */
783 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
785 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
787 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
788 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
789 == MBX_NOT_FINISHED
) {
790 mempool_free(mb
, phba
->mbox_mem_pool
);
794 /* Setup myDID for link up if we are in pt2pt mode */
795 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
796 phba
->pport
->fc_myDID
= 0;
797 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
799 lpfc_config_link(phba
, mb
);
800 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
802 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
803 == MBX_NOT_FINISHED
) {
804 mempool_free(mb
, phba
->mbox_mem_pool
);
807 spin_lock_irq(shost
->host_lock
);
808 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
809 spin_unlock_irq(shost
->host_lock
);
816 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
818 struct lpfc_nodelist
*ndlp
;
820 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
821 if (!NLP_CHK_NODE_ACT(ndlp
))
823 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
825 if (ndlp
->nlp_type
& NLP_FABRIC
) {
826 /* On Linkup its safe to clean up the ndlp
827 * from Fabric connections.
829 if (ndlp
->nlp_DID
!= Fabric_DID
)
830 lpfc_unreg_rpi(vport
, ndlp
);
831 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
832 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
833 /* Fail outstanding IO now since device is
836 lpfc_unreg_rpi(vport
, ndlp
);
842 lpfc_linkup_port(struct lpfc_vport
*vport
)
844 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
845 struct lpfc_hba
*phba
= vport
->phba
;
847 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
850 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
851 "Link Up: top:x%x speed:x%x flg:x%x",
852 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
854 /* If NPIV is not enabled, only bring the physical port up */
855 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
856 (vport
!= phba
->pport
))
859 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
861 spin_lock_irq(shost
->host_lock
);
862 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
863 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
864 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
865 vport
->fc_ns_retry
= 0;
866 spin_unlock_irq(shost
->host_lock
);
868 if (vport
->fc_flag
& FC_LBIT
)
869 lpfc_linkup_cleanup_nodes(vport
);
874 lpfc_linkup(struct lpfc_hba
*phba
)
876 struct lpfc_vport
**vports
;
879 phba
->link_state
= LPFC_LINK_UP
;
881 /* Unblock fabric iocbs if they are blocked */
882 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
883 del_timer_sync(&phba
->fabric_block_timer
);
885 vports
= lpfc_create_vport_work_array(phba
);
887 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
888 lpfc_linkup_port(vports
[i
]);
889 lpfc_destroy_vport_work_array(phba
, vports
);
890 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
891 (phba
->sli_rev
< LPFC_SLI_REV4
))
892 lpfc_issue_clear_la(phba
, phba
->pport
);
898 * This routine handles processing a CLEAR_LA mailbox
899 * command upon completion. It is setup in the LPFC_MBOXQ
900 * as the completion routine when the command is
901 * handed off to the SLI layer.
904 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
906 struct lpfc_vport
*vport
= pmb
->vport
;
907 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
908 struct lpfc_sli
*psli
= &phba
->sli
;
909 MAILBOX_t
*mb
= &pmb
->u
.mb
;
912 /* Since we don't do discovery right now, turn these off here */
913 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
914 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
915 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
917 /* Check for error */
918 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
919 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
920 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
921 "0320 CLEAR_LA mbxStatus error x%x hba "
923 mb
->mbxStatus
, vport
->port_state
);
924 phba
->link_state
= LPFC_HBA_ERROR
;
928 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
929 phba
->link_state
= LPFC_HBA_READY
;
931 spin_lock_irq(&phba
->hbalock
);
932 psli
->sli_flag
|= LPFC_PROCESS_LA
;
933 control
= readl(phba
->HCregaddr
);
934 control
|= HC_LAINT_ENA
;
935 writel(control
, phba
->HCregaddr
);
936 readl(phba
->HCregaddr
); /* flush */
937 spin_unlock_irq(&phba
->hbalock
);
938 mempool_free(pmb
, phba
->mbox_mem_pool
);
942 /* Device Discovery completes */
943 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
944 "0225 Device Discovery completes\n");
945 mempool_free(pmb
, phba
->mbox_mem_pool
);
947 spin_lock_irq(shost
->host_lock
);
948 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
949 spin_unlock_irq(shost
->host_lock
);
951 lpfc_can_disctmo(vport
);
953 /* turn on Link Attention interrupts */
955 spin_lock_irq(&phba
->hbalock
);
956 psli
->sli_flag
|= LPFC_PROCESS_LA
;
957 control
= readl(phba
->HCregaddr
);
958 control
|= HC_LAINT_ENA
;
959 writel(control
, phba
->HCregaddr
);
960 readl(phba
->HCregaddr
); /* flush */
961 spin_unlock_irq(&phba
->hbalock
);
968 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
970 struct lpfc_vport
*vport
= pmb
->vport
;
972 if (pmb
->u
.mb
.mbxStatus
)
975 mempool_free(pmb
, phba
->mbox_mem_pool
);
977 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
978 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
979 !(vport
->fc_flag
& FC_LBIT
)) {
980 /* Need to wait for FAN - use discovery timer
981 * for timeout. port_state is identically
982 * LPFC_LOCAL_CFG_LINK while waiting for FAN
984 lpfc_set_disctmo(vport
);
988 /* Start discovery by sending a FLOGI. port_state is identically
989 * LPFC_FLOGI while waiting for FLOGI cmpl
991 if (vport
->port_state
!= LPFC_FLOGI
) {
992 lpfc_initial_flogi(vport
);
997 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
998 "0306 CONFIG_LINK mbxStatus error x%x "
1000 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1001 mempool_free(pmb
, phba
->mbox_mem_pool
);
1003 lpfc_linkdown(phba
);
1005 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1006 "0200 CONFIG_LINK bad hba state x%x\n",
1009 lpfc_issue_clear_la(phba
, vport
);
1014 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1016 struct lpfc_vport
*vport
= mboxq
->vport
;
1017 unsigned long flags
;
1019 if (mboxq
->u
.mb
.mbxStatus
) {
1020 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1021 "2017 REG_FCFI mbxStatus error x%x "
1023 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1024 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1028 /* Start FCoE discovery by sending a FLOGI. */
1029 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1030 /* Set the FCFI registered flag */
1031 spin_lock_irqsave(&phba
->hbalock
, flags
);
1032 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1033 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1034 /* If there is a pending FCoE event, restart FCF table scan. */
1035 if (lpfc_check_pending_fcoe_event(phba
, 1)) {
1036 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1039 spin_lock_irqsave(&phba
->hbalock
, flags
);
1040 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1041 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1042 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1043 if (vport
->port_state
!= LPFC_FLOGI
)
1044 lpfc_initial_flogi(vport
);
1046 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1051 * lpfc_fab_name_match - Check if the fcf fabric name match.
1052 * @fab_name: pointer to fabric name.
1053 * @new_fcf_record: pointer to fcf record.
1055 * This routine compare the fcf record's fabric name with provided
1056 * fabric name. If the fabric name are identical this function
1057 * returns 1 else return 0.
1060 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1062 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1064 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1066 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1068 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1070 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1072 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1074 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1076 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1082 * lpfc_sw_name_match - Check if the fcf switch name match.
1083 * @fab_name: pointer to fabric name.
1084 * @new_fcf_record: pointer to fcf record.
1086 * This routine compare the fcf record's switch name with provided
1087 * switch name. If the switch name are identical this function
1088 * returns 1 else return 0.
1091 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1093 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1095 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1097 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1099 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1101 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1103 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1105 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1107 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1113 * lpfc_mac_addr_match - Check if the fcf mac address match.
1114 * @mac_addr: pointer to mac address.
1115 * @new_fcf_record: pointer to fcf record.
1117 * This routine compare the fcf record's mac address with HBA's
1118 * FCF mac address. If the mac addresses are identical this function
1119 * returns 1 else return 0.
1122 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1124 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1126 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1128 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1130 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1132 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1134 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1140 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1142 return (curr_vlan_id
== new_vlan_id
);
1146 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1147 * @fcf: pointer to driver fcf record.
1148 * @new_fcf_record: pointer to fcf record.
1150 * This routine copies the FCF information from the FCF
1151 * record to lpfc_hba data structure.
1154 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1155 struct fcf_record
*new_fcf_record
)
1158 fcf_rec
->fabric_name
[0] =
1159 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1160 fcf_rec
->fabric_name
[1] =
1161 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1162 fcf_rec
->fabric_name
[2] =
1163 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1164 fcf_rec
->fabric_name
[3] =
1165 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1166 fcf_rec
->fabric_name
[4] =
1167 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1168 fcf_rec
->fabric_name
[5] =
1169 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1170 fcf_rec
->fabric_name
[6] =
1171 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1172 fcf_rec
->fabric_name
[7] =
1173 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1175 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1176 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1177 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1178 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1179 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1180 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1181 /* FCF record index */
1182 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1183 /* FCF record priority */
1184 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1186 fcf_rec
->switch_name
[0] =
1187 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1188 fcf_rec
->switch_name
[1] =
1189 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1190 fcf_rec
->switch_name
[2] =
1191 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1192 fcf_rec
->switch_name
[3] =
1193 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1194 fcf_rec
->switch_name
[4] =
1195 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1196 fcf_rec
->switch_name
[5] =
1197 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1198 fcf_rec
->switch_name
[6] =
1199 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1200 fcf_rec
->switch_name
[7] =
1201 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1205 * lpfc_update_fcf_record - Update driver fcf record
1206 * @phba: pointer to lpfc hba data structure.
1207 * @fcf_rec: pointer to driver fcf record.
1208 * @new_fcf_record: pointer to hba fcf record.
1209 * @addr_mode: address mode to be set to the driver fcf record.
1210 * @vlan_id: vlan tag to be set to the driver fcf record.
1211 * @flag: flag bits to be set to the driver fcf record.
1213 * This routine updates the driver FCF record from the new HBA FCF record
1214 * together with the address mode, vlan_id, and other informations. This
1215 * routine is called with the host lock held.
1218 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1219 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1220 uint16_t vlan_id
, uint32_t flag
)
1222 /* Copy the fields from the HBA's FCF record */
1223 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1224 /* Update other fields of driver FCF record */
1225 fcf_rec
->addr_mode
= addr_mode
;
1226 fcf_rec
->vlan_id
= vlan_id
;
1227 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1231 * lpfc_register_fcf - Register the FCF with hba.
1232 * @phba: pointer to lpfc hba data structure.
1234 * This routine issues a register fcfi mailbox command to register
1238 lpfc_register_fcf(struct lpfc_hba
*phba
)
1240 LPFC_MBOXQ_t
*fcf_mbxq
;
1242 unsigned long flags
;
1244 spin_lock_irqsave(&phba
->hbalock
, flags
);
1246 /* If the FCF is not availabe do nothing. */
1247 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1248 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1249 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1253 /* The FCF is already registered, start discovery */
1254 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1255 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1256 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1257 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1258 if (phba
->pport
->port_state
!= LPFC_FLOGI
)
1259 lpfc_initial_flogi(phba
->pport
);
1262 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1264 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
,
1267 spin_lock_irqsave(&phba
->hbalock
, flags
);
1268 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1269 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1273 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1274 fcf_mbxq
->vport
= phba
->pport
;
1275 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1276 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1277 if (rc
== MBX_NOT_FINISHED
) {
1278 spin_lock_irqsave(&phba
->hbalock
, flags
);
1279 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1280 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1281 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1288 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1289 * @phba: pointer to lpfc hba data structure.
1290 * @new_fcf_record: pointer to fcf record.
1291 * @boot_flag: Indicates if this record used by boot bios.
1292 * @addr_mode: The address mode to be used by this FCF
1293 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1295 * This routine compare the fcf record with connect list obtained from the
1296 * config region to decide if this FCF can be used for SAN discovery. It returns
1297 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1298 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1299 * is used by boot bios and addr_mode will indicate the addressing mode to be
1300 * used for this FCF when the function returns.
1301 * If the FCF record need to be used with a particular vlan id, the vlan is
1302 * set in the vlan_id on return of the function. If not VLAN tagging need to
1303 * be used with the FCF vlan_id will be set to 0xFFFF;
1306 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1307 struct fcf_record
*new_fcf_record
,
1308 uint32_t *boot_flag
, uint32_t *addr_mode
,
1311 struct lpfc_fcf_conn_entry
*conn_entry
;
1312 int i
, j
, fcf_vlan_id
= 0;
1314 /* Find the lowest VLAN id in the FCF record */
1315 for (i
= 0; i
< 512; i
++) {
1316 if (new_fcf_record
->vlan_bitmap
[i
]) {
1317 fcf_vlan_id
= i
* 8;
1319 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1327 /* If FCF not available return 0 */
1328 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1329 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1332 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1334 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1336 if (phba
->valid_vlan
)
1337 *vlan_id
= phba
->vlan_id
;
1344 * If there are no FCF connection table entry, driver connect to all
1347 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1349 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1353 * When there are no FCF connect entries, use driver's default
1354 * addressing mode - FPMA.
1356 if (*addr_mode
& LPFC_FCF_FPMA
)
1357 *addr_mode
= LPFC_FCF_FPMA
;
1359 /* If FCF record report a vlan id use that vlan id */
1361 *vlan_id
= fcf_vlan_id
;
1367 list_for_each_entry(conn_entry
,
1368 &phba
->fcf_conn_rec_list
, list
) {
1369 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1372 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1373 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1376 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1377 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1380 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1382 * If the vlan bit map does not have the bit set for the
1383 * vlan id to be used, then it is not a match.
1385 if (!(new_fcf_record
->vlan_bitmap
1386 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1387 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1392 * If connection record does not support any addressing mode,
1393 * skip the FCF record.
1395 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1396 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1400 * Check if the connection record specifies a required
1403 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1404 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1407 * If SPMA required but FCF not support this continue.
1409 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1410 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1411 new_fcf_record
) & LPFC_FCF_SPMA
))
1415 * If FPMA required but FCF not support this continue.
1417 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1418 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1419 new_fcf_record
) & LPFC_FCF_FPMA
))
1424 * This fcf record matches filtering criteria.
1426 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1432 * If user did not specify any addressing mode, or if the
1433 * prefered addressing mode specified by user is not supported
1434 * by FCF, allow fabric to pick the addressing mode.
1436 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1439 * If the user specified a required address mode, assign that
1442 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1443 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1444 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1446 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1448 * If the user specified a prefered address mode, use the
1449 * addr mode only if FCF support the addr_mode.
1451 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1452 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1453 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1454 (*addr_mode
& LPFC_FCF_SPMA
))
1455 *addr_mode
= LPFC_FCF_SPMA
;
1456 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1457 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1458 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1459 (*addr_mode
& LPFC_FCF_FPMA
))
1460 *addr_mode
= LPFC_FCF_FPMA
;
1462 /* If matching connect list has a vlan id, use it */
1463 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1464 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1466 * If no vlan id is specified in connect list, use the vlan id
1469 else if (fcf_vlan_id
)
1470 *vlan_id
= fcf_vlan_id
;
1481 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1482 * @phba: pointer to lpfc hba data structure.
1483 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1485 * This function check if there is any fcoe event pending while driver
1486 * scan FCF entries. If there is any pending event, it will restart the
1487 * FCF saning and return 1 else return 0.
1490 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1493 * If the Link is up and no FCoE events while in the
1494 * FCF discovery, no need to restart FCF discovery.
1496 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1497 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1500 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1501 "2768 Pending link or FCF event during current "
1502 "handling of the previous event: link_state:x%x, "
1503 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1504 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1505 phba
->fcoe_eventtag
);
1507 spin_lock_irq(&phba
->hbalock
);
1508 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1509 spin_unlock_irq(&phba
->hbalock
);
1511 if (phba
->link_state
>= LPFC_LINK_UP
) {
1512 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1513 "2780 Restart FCF table scan due to "
1514 "pending FCF event:evt_tag_at_scan:x%x, "
1515 "evt_tag_current:x%x\n",
1516 phba
->fcoe_eventtag_at_fcf_scan
,
1517 phba
->fcoe_eventtag
);
1518 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1521 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1524 spin_lock_irq(&phba
->hbalock
);
1525 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1526 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1527 spin_unlock_irq(&phba
->hbalock
);
1530 /* Unregister the currently registered FCF if required */
1532 spin_lock_irq(&phba
->hbalock
);
1533 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1534 spin_unlock_irq(&phba
->hbalock
);
1535 lpfc_sli4_unregister_fcf(phba
);
1541 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1542 * @phba: pointer to lpfc hba data structure.
1543 * @fcf_cnt: number of eligible fcf record seen so far.
1545 * This function makes an running random selection decision on FCF record to
1546 * use through a sequence of @fcf_cnt eligible FCF records with equal
1547 * probability. To perform integer manunipulation of random numbers with
1548 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1549 * from random32() are taken as the random random number generated.
1551 * Returns true when outcome is for the newly read FCF record should be
1552 * chosen; otherwise, return false when outcome is for keeping the previously
1553 * chosen FCF record.
1556 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1560 /* Get 16-bit uniform random number */
1561 rand_num
= (0xFFFF & random32());
1563 /* Decision with probability 1/fcf_cnt */
1564 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1571 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1572 * @phba: pointer to lpfc hba data structure.
1573 * @mboxq: pointer to mailbox object.
1574 * @next_fcf_index: pointer to holder of next fcf index.
1576 * This routine parses the non-embedded fcf mailbox command by performing the
1577 * necessarily error checking, non-embedded read FCF record mailbox command
1578 * SGE parsing, and endianness swapping.
1580 * Returns the pointer to the new FCF record in the non-embedded mailbox
1581 * command DMA memory if successfully, other NULL.
1583 static struct fcf_record
*
1584 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1585 uint16_t *next_fcf_index
)
1588 dma_addr_t phys_addr
;
1589 struct lpfc_mbx_sge sge
;
1590 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1591 uint32_t shdr_status
, shdr_add_status
;
1592 union lpfc_sli4_cfg_shdr
*shdr
;
1593 struct fcf_record
*new_fcf_record
;
1595 /* Get the first SGE entry from the non-embedded DMA memory. This
1596 * routine only uses a single SGE.
1598 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1599 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1600 if (unlikely(!mboxq
->sge_array
)) {
1601 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1602 "2524 Failed to get the non-embedded SGE "
1603 "virtual address\n");
1606 virt_addr
= mboxq
->sge_array
->addr
[0];
1608 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1609 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1610 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1611 if (shdr_status
|| shdr_add_status
) {
1612 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1613 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1614 "2726 READ_FCF_RECORD Indicates empty "
1617 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1618 "2521 READ_FCF_RECORD mailbox failed "
1619 "with status x%x add_status x%x, "
1620 "mbx\n", shdr_status
, shdr_add_status
);
1624 /* Interpreting the returned information of the FCF record */
1625 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1626 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1627 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1628 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1629 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1630 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1631 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1632 offsetof(struct fcf_record
, vlan_bitmap
));
1633 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1634 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1636 return new_fcf_record
;
1640 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1641 * @phba: pointer to lpfc hba data structure.
1642 * @fcf_record: pointer to the fcf record.
1643 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1644 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1646 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1650 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1651 struct fcf_record
*fcf_record
,
1653 uint16_t next_fcf_index
)
1655 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1656 "2764 READ_FCF_RECORD:\n"
1657 "\tFCF_Index : x%x\n"
1658 "\tFCF_Avail : x%x\n"
1659 "\tFCF_Valid : x%x\n"
1660 "\tFIP_Priority : x%x\n"
1661 "\tMAC_Provider : x%x\n"
1662 "\tLowest VLANID : x%x\n"
1663 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1664 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1665 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1666 "\tNext_FCF_Index: x%x\n",
1667 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1668 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1669 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1670 fcf_record
->fip_priority
,
1671 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1673 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1674 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1675 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1676 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1677 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1678 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1679 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1680 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1681 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1682 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1683 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1684 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1685 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1686 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1687 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1688 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1689 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1690 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1691 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1692 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1693 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1694 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1699 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1700 * @phba: pointer to lpfc hba data structure.
1701 * @mboxq: pointer to mailbox object.
1703 * This function iterates through all the fcf records available in
1704 * HBA and chooses the optimal FCF record for discovery. After finding
1705 * the FCF for discovery it registers the FCF record and kicks start
1707 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1708 * use an FCF record which matches fabric name and mac address of the
1709 * currently used FCF record.
1710 * If the driver supports only one FCF, it will try to use the FCF record
1711 * used by BOOT_BIOS.
1714 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1716 struct fcf_record
*new_fcf_record
;
1717 uint32_t boot_flag
, addr_mode
;
1718 uint16_t fcf_index
, next_fcf_index
;
1719 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
1722 bool select_new_fcf
;
1725 /* If there is pending FCoE event restart FCF table scan */
1726 if (lpfc_check_pending_fcoe_event(phba
, 0)) {
1727 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1731 /* Parse the FCF record from the non-embedded mailbox command */
1732 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
1734 if (!new_fcf_record
) {
1735 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1736 "2765 Mailbox command READ_FCF_RECORD "
1737 "failed to retrieve a FCF record.\n");
1738 /* Let next new FCF event trigger fast failover */
1739 spin_lock_irq(&phba
->hbalock
);
1740 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1741 spin_unlock_irq(&phba
->hbalock
);
1742 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1746 /* Check the FCF record against the connection list */
1747 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1748 &addr_mode
, &vlan_id
);
1750 /* Log the FCF record information if turned on */
1751 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
1755 * If the fcf record does not match with connect list entries
1756 * read the next entry; otherwise, this is an eligible FCF
1757 * record for round robin FCF failover.
1760 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1761 "2781 FCF record fcf_index:x%x failed FCF "
1762 "connection list check, fcf_avail:x%x, "
1764 bf_get(lpfc_fcf_record_fcf_index
,
1766 bf_get(lpfc_fcf_record_fcf_avail
,
1768 bf_get(lpfc_fcf_record_fcf_valid
,
1772 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1773 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
1779 * If this is not the first FCF discovery of the HBA, use last
1780 * FCF record for the discovery. The condition that a rescan
1781 * matches the in-use FCF record: fabric name, switch name, mac
1782 * address, and vlan_id.
1784 spin_lock_irq(&phba
->hbalock
);
1785 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
1786 if (lpfc_fab_name_match(phba
->fcf
.current_rec
.fabric_name
,
1788 lpfc_sw_name_match(phba
->fcf
.current_rec
.switch_name
,
1790 lpfc_mac_addr_match(phba
->fcf
.current_rec
.mac_addr
,
1792 lpfc_vlan_id_match(phba
->fcf
.current_rec
.vlan_id
,
1794 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1795 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
1796 /* Stop FCF redisc wait timer if pending */
1797 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
1798 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1799 /* If in fast failover, mark it's completed */
1800 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
|
1802 spin_unlock_irq(&phba
->hbalock
);
1806 * Read next FCF record from HBA searching for the matching
1807 * with in-use record only if not during the fast failover
1808 * period. In case of fast failover period, it shall try to
1809 * determine whether the FCF record just read should be the
1812 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1813 spin_unlock_irq(&phba
->hbalock
);
1818 * Update on failover FCF record only if it's in FCF fast-failover
1819 * period; otherwise, update on current FCF record.
1821 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1822 fcf_rec
= &phba
->fcf
.failover_rec
;
1824 fcf_rec
= &phba
->fcf
.current_rec
;
1826 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
1828 * If the driver FCF record does not have boot flag
1829 * set and new hba fcf record has boot flag set, use
1830 * the new hba fcf record.
1832 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
1833 /* Choose this FCF record */
1834 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1835 addr_mode
, vlan_id
, BOOT_ENABLE
);
1836 spin_unlock_irq(&phba
->hbalock
);
1840 * If the driver FCF record has boot flag set and the
1841 * new hba FCF record does not have boot flag, read
1842 * the next FCF record.
1844 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
1845 spin_unlock_irq(&phba
->hbalock
);
1849 * If the new hba FCF record has lower priority value
1850 * than the driver FCF record, use the new record.
1852 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
1853 /* Choose the new FCF record with lower priority */
1854 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1855 addr_mode
, vlan_id
, 0);
1856 /* Reset running random FCF selection count */
1857 phba
->fcf
.eligible_fcf_cnt
= 1;
1858 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
1859 /* Update running random FCF selection count */
1860 phba
->fcf
.eligible_fcf_cnt
++;
1861 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
1862 phba
->fcf
.eligible_fcf_cnt
);
1864 /* Choose the new FCF by random selection */
1865 __lpfc_update_fcf_record(phba
, fcf_rec
,
1867 addr_mode
, vlan_id
, 0);
1869 spin_unlock_irq(&phba
->hbalock
);
1873 * This is the first suitable FCF record, choose this record for
1874 * initial best-fit FCF.
1877 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1878 addr_mode
, vlan_id
, (boot_flag
?
1880 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1881 /* Setup initial running random FCF selection count */
1882 phba
->fcf
.eligible_fcf_cnt
= 1;
1883 /* Seeding the random number generator for random selection */
1884 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
1887 spin_unlock_irq(&phba
->hbalock
);
1891 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1892 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
1893 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
1895 * Case of FCF fast failover scan
1899 * It has not found any suitable FCF record, cancel
1900 * FCF scan inprogress, and do nothing
1902 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
1903 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1904 "2782 No suitable FCF record "
1905 "found during this round of "
1906 "post FCF rediscovery scan: "
1907 "fcf_evt_tag:x%x, fcf_index: "
1909 phba
->fcoe_eventtag_at_fcf_scan
,
1910 bf_get(lpfc_fcf_record_fcf_index
,
1913 * Let next new FCF event trigger fast
1916 spin_lock_irq(&phba
->hbalock
);
1917 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1918 spin_unlock_irq(&phba
->hbalock
);
1922 * It has found a suitable FCF record that is not
1923 * the same as in-use FCF record, unregister the
1924 * in-use FCF record, replace the in-use FCF record
1925 * with the new FCF record, mark FCF fast failover
1926 * completed, and then start register the new FCF
1930 /* Unregister the current in-use FCF record */
1931 lpfc_unregister_fcf(phba
);
1933 /* Replace in-use record with the new record */
1934 memcpy(&phba
->fcf
.current_rec
,
1935 &phba
->fcf
.failover_rec
,
1936 sizeof(struct lpfc_fcf_rec
));
1937 /* mark the FCF fast failover completed */
1938 spin_lock_irq(&phba
->hbalock
);
1939 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
1940 spin_unlock_irq(&phba
->hbalock
);
1942 * Set up the initial registered FCF index for FLOGI
1943 * round robin FCF failover.
1945 phba
->fcf
.fcf_rr_init_indx
=
1946 phba
->fcf
.failover_rec
.fcf_indx
;
1947 /* Register to the new FCF record */
1948 lpfc_register_fcf(phba
);
1951 * In case of transaction period to fast FCF failover,
1952 * do nothing when search to the end of the FCF table.
1954 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
1955 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
1958 * Otherwise, initial scan or post linkdown rescan,
1959 * register with the best FCF record found so far
1960 * through the FCF scanning process.
1963 /* mark the initial FCF discovery completed */
1964 spin_lock_irq(&phba
->hbalock
);
1965 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
1966 spin_unlock_irq(&phba
->hbalock
);
1968 * Set up the initial registered FCF index for FLOGI
1969 * round robin FCF failover
1971 phba
->fcf
.fcf_rr_init_indx
=
1972 phba
->fcf
.current_rec
.fcf_indx
;
1973 /* Register to the new FCF record */
1974 lpfc_register_fcf(phba
);
1977 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
1981 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1982 lpfc_register_fcf(phba
);
1988 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1989 * @phba: pointer to lpfc hba data structure.
1990 * @mboxq: pointer to mailbox object.
1992 * This is the callback function for FLOGI failure round robin FCF failover
1993 * read FCF record mailbox command from the eligible FCF record bmask for
1994 * performing the failover. If the FCF read back is not valid/available, it
1995 * fails through to retrying FLOGI to the currently registered FCF again.
1996 * Otherwise, if the FCF read back is valid and available, it will set the
1997 * newly read FCF record to the failover FCF record, unregister currently
1998 * registered FCF record, copy the failover FCF record to the current
1999 * FCF record, and then register the current FCF record before proceeding
2000 * to trying FLOGI on the new failover FCF.
2003 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2005 struct fcf_record
*new_fcf_record
;
2006 uint32_t boot_flag
, addr_mode
;
2007 uint16_t next_fcf_index
;
2008 uint16_t current_fcf_index
;
2011 /* If link state is not up, stop the round robin failover process */
2012 if (phba
->link_state
< LPFC_LINK_UP
) {
2013 spin_lock_irq(&phba
->hbalock
);
2014 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2015 spin_unlock_irq(&phba
->hbalock
);
2016 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2020 /* Parse the FCF record from the non-embedded mailbox command */
2021 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2023 if (!new_fcf_record
) {
2024 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2025 "2766 Mailbox command READ_FCF_RECORD "
2026 "failed to retrieve a FCF record.\n");
2030 /* Get the needed parameters from FCF record */
2031 lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2032 &addr_mode
, &vlan_id
);
2034 /* Log the FCF record information if turned on */
2035 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2038 /* Upload new FCF record to the failover FCF record */
2039 spin_lock_irq(&phba
->hbalock
);
2040 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2041 new_fcf_record
, addr_mode
, vlan_id
,
2042 (boot_flag
? BOOT_ENABLE
: 0));
2043 spin_unlock_irq(&phba
->hbalock
);
2045 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2047 /* Unregister the current in-use FCF record */
2048 lpfc_unregister_fcf(phba
);
2050 /* Replace in-use record with the new record */
2051 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2052 sizeof(struct lpfc_fcf_rec
));
2054 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2055 "2783 FLOGI round robin FCF failover from FCF "
2056 "(index:x%x) to FCF (index:x%x).\n",
2058 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
));
2061 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2062 lpfc_register_fcf(phba
);
2066 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2067 * @phba: pointer to lpfc hba data structure.
2068 * @mboxq: pointer to mailbox object.
2070 * This is the callback function of read FCF record mailbox command for
2071 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2072 * failover when a new FCF event happened. If the FCF read back is
2073 * valid/available and it passes the connection list check, it updates
2074 * the bmask for the eligible FCF record for round robin failover.
2077 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2079 struct fcf_record
*new_fcf_record
;
2080 uint32_t boot_flag
, addr_mode
;
2081 uint16_t fcf_index
, next_fcf_index
;
2085 /* If link state is not up, no need to proceed */
2086 if (phba
->link_state
< LPFC_LINK_UP
)
2089 /* If FCF discovery period is over, no need to proceed */
2090 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
)
2093 /* Parse the FCF record from the non-embedded mailbox command */
2094 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2096 if (!new_fcf_record
) {
2097 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2098 "2767 Mailbox command READ_FCF_RECORD "
2099 "failed to retrieve a FCF record.\n");
2103 /* Check the connection list for eligibility */
2104 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2105 &addr_mode
, &vlan_id
);
2107 /* Log the FCF record information if turned on */
2108 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2114 /* Update the eligible FCF record index bmask */
2115 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2116 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
2119 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2123 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2124 * @phba: pointer to lpfc hba data structure.
2125 * @mboxq: pointer to mailbox data structure.
2127 * This function handles completion of init vpi mailbox command.
2130 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2132 struct lpfc_vport
*vport
= mboxq
->vport
;
2133 struct lpfc_nodelist
*ndlp
;
2134 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2136 if (mboxq
->u
.mb
.mbxStatus
) {
2137 lpfc_printf_vlog(vport
, KERN_ERR
,
2139 "2609 Init VPI mailbox failed 0x%x\n",
2140 mboxq
->u
.mb
.mbxStatus
);
2141 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2142 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2145 spin_lock_irq(shost
->host_lock
);
2146 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2147 spin_unlock_irq(shost
->host_lock
);
2149 /* If this port is physical port or FDISC is done, do reg_vpi */
2150 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2151 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2153 lpfc_printf_vlog(vport
, KERN_ERR
,
2155 "2731 Cannot find fabric "
2156 "controller node\n");
2158 lpfc_register_new_vport(phba
, vport
, ndlp
);
2159 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2163 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2164 lpfc_initial_fdisc(vport
);
2166 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2167 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2168 "2606 No NPIV Fabric support\n");
2170 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2175 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2176 * @vport: pointer to lpfc_vport data structure.
2178 * This function issue a init_vpi mailbox command to initialize
2179 * VPI for the vport.
2182 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2184 LPFC_MBOXQ_t
*mboxq
;
2187 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2189 lpfc_printf_vlog(vport
, KERN_ERR
,
2190 LOG_MBOX
, "2607 Failed to allocate "
2191 "init_vpi mailbox\n");
2194 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2195 mboxq
->vport
= vport
;
2196 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2197 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2198 if (rc
== MBX_NOT_FINISHED
) {
2199 lpfc_printf_vlog(vport
, KERN_ERR
,
2200 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2201 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2206 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2207 * @phba: pointer to lpfc hba data structure.
2209 * This function loops through the list of vports on the @phba and issues an
2210 * FDISC if possible.
2213 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2215 struct lpfc_vport
**vports
;
2218 vports
= lpfc_create_vport_work_array(phba
);
2219 if (vports
!= NULL
) {
2220 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2221 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2223 /* There are no vpi for this vport */
2224 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2225 lpfc_vport_set_state(vports
[i
],
2229 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2230 lpfc_vport_set_state(vports
[i
],
2234 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2235 lpfc_issue_init_vpi(vports
[i
]);
2238 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2239 lpfc_initial_fdisc(vports
[i
]);
2241 lpfc_vport_set_state(vports
[i
],
2242 FC_VPORT_NO_FABRIC_SUPP
);
2243 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2246 "Fabric support\n");
2250 lpfc_destroy_vport_work_array(phba
, vports
);
2254 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2256 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2257 struct lpfc_vport
*vport
= mboxq
->vport
;
2258 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2260 if (mboxq
->u
.mb
.mbxStatus
) {
2261 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2262 "2018 REG_VFI mbxStatus error x%x "
2264 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2265 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2266 /* FLOGI failed, use loop map to make discovery list */
2267 lpfc_disc_list_loopmap(vport
);
2268 /* Start discovery */
2269 lpfc_disc_start(vport
);
2272 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2275 /* The VPI is implicitly registered when the VFI is registered */
2276 spin_lock_irq(shost
->host_lock
);
2277 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2278 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2279 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2280 spin_unlock_irq(shost
->host_lock
);
2282 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2283 lpfc_start_fdiscs(phba
);
2284 lpfc_do_scr_ns_plogi(phba
, vport
);
2288 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2289 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2295 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2297 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2298 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2299 struct lpfc_vport
*vport
= pmb
->vport
;
2302 /* Check for error */
2303 if (mb
->mbxStatus
) {
2304 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2305 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2306 "0319 READ_SPARAM mbxStatus error x%x "
2308 mb
->mbxStatus
, vport
->port_state
);
2309 lpfc_linkdown(phba
);
2313 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2314 sizeof (struct serv_parm
));
2315 if (phba
->cfg_soft_wwnn
)
2316 u64_to_wwn(phba
->cfg_soft_wwnn
,
2317 vport
->fc_sparam
.nodeName
.u
.wwn
);
2318 if (phba
->cfg_soft_wwpn
)
2319 u64_to_wwn(phba
->cfg_soft_wwpn
,
2320 vport
->fc_sparam
.portName
.u
.wwn
);
2321 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
2322 sizeof(vport
->fc_nodename
));
2323 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
2324 sizeof(vport
->fc_portname
));
2325 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2326 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2327 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2330 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2332 mempool_free(pmb
, phba
->mbox_mem_pool
);
2336 pmb
->context1
= NULL
;
2337 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2339 lpfc_issue_clear_la(phba
, vport
);
2340 mempool_free(pmb
, phba
->mbox_mem_pool
);
2345 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
2347 struct lpfc_vport
*vport
= phba
->pport
;
2348 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2350 struct lpfc_dmabuf
*mp
;
2352 struct fcf_record
*fcf_record
;
2354 spin_lock_irq(&phba
->hbalock
);
2355 switch (la
->UlnkSpeed
) {
2357 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
2360 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
2363 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
2366 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
2369 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
2372 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
2376 phba
->fc_topology
= la
->topology
;
2377 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2379 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2380 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2382 /* if npiv is enabled and this adapter supports npiv log
2383 * a message that npiv is not supported in this topology
2385 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2386 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2387 "1309 Link Up Event npiv not supported in loop "
2389 /* Get Loop Map information */
2391 vport
->fc_flag
|= FC_LBIT
;
2393 vport
->fc_myDID
= la
->granted_AL_PA
;
2394 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
2397 phba
->alpa_map
[0] = 0;
2399 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2410 numalpa
= phba
->alpa_map
[0];
2412 while (j
< numalpa
) {
2413 memset(un
.pamap
, 0, 16);
2414 for (k
= 1; j
< numalpa
; k
++) {
2416 phba
->alpa_map
[j
+ 1];
2421 /* Link Up Event ALPA map */
2422 lpfc_printf_log(phba
,
2425 "1304 Link Up Event "
2426 "ALPA map Data: x%x "
2428 un
.pa
.wd1
, un
.pa
.wd2
,
2429 un
.pa
.wd3
, un
.pa
.wd4
);
2434 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
2435 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
2436 (phba
->sli_rev
== 3))
2437 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
2439 vport
->fc_myDID
= phba
->fc_pref_DID
;
2440 vport
->fc_flag
|= FC_LBIT
;
2442 spin_unlock_irq(&phba
->hbalock
);
2445 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2449 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
2451 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2454 sparam_mbox
->vport
= vport
;
2455 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
2456 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
2457 if (rc
== MBX_NOT_FINISHED
) {
2458 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
2459 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2461 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2465 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
2466 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2469 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
2470 lpfc_config_link(phba
, cfglink_mbox
);
2471 cfglink_mbox
->vport
= vport
;
2472 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
2473 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
2474 if (rc
== MBX_NOT_FINISHED
) {
2475 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
2479 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
2481 * Add the driver's default FCF record at FCF index 0 now. This
2482 * is phase 1 implementation that support FCF index 0 and driver
2485 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
2486 fcf_record
= kzalloc(sizeof(struct fcf_record
),
2488 if (unlikely(!fcf_record
)) {
2489 lpfc_printf_log(phba
, KERN_ERR
,
2491 "2554 Could not allocate memmory for "
2497 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
2498 LPFC_FCOE_FCF_DEF_INDEX
);
2499 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
2501 lpfc_printf_log(phba
, KERN_ERR
,
2503 "2013 Could not manually add FCF "
2504 "record 0, status %d\n", rc
);
2512 * The driver is expected to do FIP/FCF. Call the port
2513 * and get the FCF Table.
2515 spin_lock_irq(&phba
->hbalock
);
2516 if (phba
->hba_flag
& FCF_DISC_INPROGRESS
) {
2517 spin_unlock_irq(&phba
->hbalock
);
2520 /* This is the initial FCF discovery scan */
2521 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
2522 spin_unlock_irq(&phba
->hbalock
);
2523 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2524 "2778 Start FCF table scan at linkup\n");
2526 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2527 LPFC_FCOE_FCF_GET_FIRST
);
2529 spin_lock_irq(&phba
->hbalock
);
2530 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
2531 spin_unlock_irq(&phba
->hbalock
);
2538 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2539 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2540 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2541 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
2542 lpfc_issue_clear_la(phba
, vport
);
2547 lpfc_enable_la(struct lpfc_hba
*phba
)
2550 struct lpfc_sli
*psli
= &phba
->sli
;
2551 spin_lock_irq(&phba
->hbalock
);
2552 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2553 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
2554 control
= readl(phba
->HCregaddr
);
2555 control
|= HC_LAINT_ENA
;
2556 writel(control
, phba
->HCregaddr
);
2557 readl(phba
->HCregaddr
); /* flush */
2559 spin_unlock_irq(&phba
->hbalock
);
2563 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
2565 lpfc_linkdown(phba
);
2566 lpfc_enable_la(phba
);
2567 lpfc_unregister_unused_fcf(phba
);
2568 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2573 * This routine handles processing a READ_LA mailbox
2574 * command upon completion. It is setup in the LPFC_MBOXQ
2575 * as the completion routine when the command is
2576 * handed off to the SLI layer.
2579 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2581 struct lpfc_vport
*vport
= pmb
->vport
;
2582 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2584 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2585 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2587 /* Unblock ELS traffic */
2588 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2589 /* Check for error */
2590 if (mb
->mbxStatus
) {
2591 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2592 "1307 READ_LA mbox error x%x state x%x\n",
2593 mb
->mbxStatus
, vport
->port_state
);
2594 lpfc_mbx_issue_link_down(phba
);
2595 phba
->link_state
= LPFC_HBA_ERROR
;
2596 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
2599 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
2601 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
2603 spin_lock_irq(shost
->host_lock
);
2605 vport
->fc_flag
|= FC_BYPASSED_MODE
;
2607 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
2608 spin_unlock_irq(shost
->host_lock
);
2610 if ((phba
->fc_eventTag
< la
->eventTag
) ||
2611 (phba
->fc_eventTag
== la
->eventTag
)) {
2612 phba
->fc_stat
.LinkMultiEvent
++;
2613 if (la
->attType
== AT_LINK_UP
)
2614 if (phba
->fc_eventTag
!= 0)
2615 lpfc_linkdown(phba
);
2618 phba
->fc_eventTag
= la
->eventTag
;
2619 spin_lock_irq(&phba
->hbalock
);
2621 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
2623 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
2624 spin_unlock_irq(&phba
->hbalock
);
2626 phba
->link_events
++;
2627 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
2628 phba
->fc_stat
.LinkUp
++;
2629 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2630 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2631 "1306 Link Up Event in loop back mode "
2632 "x%x received Data: x%x x%x x%x x%x\n",
2633 la
->eventTag
, phba
->fc_eventTag
,
2634 la
->granted_AL_PA
, la
->UlnkSpeed
,
2637 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2638 "1303 Link Up Event x%x received "
2639 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2640 la
->eventTag
, phba
->fc_eventTag
,
2641 la
->granted_AL_PA
, la
->UlnkSpeed
,
2644 phba
->wait_4_mlo_maint_flg
);
2646 lpfc_mbx_process_link_up(phba
, la
);
2647 } else if (la
->attType
== AT_LINK_DOWN
) {
2648 phba
->fc_stat
.LinkDown
++;
2649 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2650 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2651 "1308 Link Down Event in loop back mode "
2653 "Data: x%x x%x x%x\n",
2654 la
->eventTag
, phba
->fc_eventTag
,
2655 phba
->pport
->port_state
, vport
->fc_flag
);
2658 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2659 "1305 Link Down Event x%x received "
2660 "Data: x%x x%x x%x x%x x%x\n",
2661 la
->eventTag
, phba
->fc_eventTag
,
2662 phba
->pport
->port_state
, vport
->fc_flag
,
2665 lpfc_mbx_issue_link_down(phba
);
2667 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
2668 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
2669 phba
->fc_stat
.LinkDown
++;
2670 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2671 "1312 Link Down Event x%x received "
2672 "Data: x%x x%x x%x\n",
2673 la
->eventTag
, phba
->fc_eventTag
,
2674 phba
->pport
->port_state
, vport
->fc_flag
);
2675 lpfc_mbx_issue_link_down(phba
);
2677 lpfc_enable_la(phba
);
2679 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2680 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2681 "Data: x%x x%x x%x\n",
2682 la
->eventTag
, phba
->fc_eventTag
,
2683 phba
->pport
->port_state
, vport
->fc_flag
);
2685 * The cmnd that triggered this will be waiting for this
2688 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2689 if (phba
->wait_4_mlo_maint_flg
) {
2690 phba
->wait_4_mlo_maint_flg
= 0;
2691 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
2697 lpfc_issue_clear_la(phba
, vport
);
2698 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2699 "1311 fa %d\n", la
->fa
);
2702 lpfc_mbx_cmpl_read_la_free_mbuf
:
2703 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2705 mempool_free(pmb
, phba
->mbox_mem_pool
);
2710 * This routine handles processing a REG_LOGIN mailbox
2711 * command upon completion. It is setup in the LPFC_MBOXQ
2712 * as the completion routine when the command is
2713 * handed off to the SLI layer.
2716 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2718 struct lpfc_vport
*vport
= pmb
->vport
;
2719 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2720 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2721 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2723 pmb
->context1
= NULL
;
2725 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
2726 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
2728 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
2729 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
2730 /* We rcvd a rscn after issuing this
2731 * mbox reg login, we may have cycled
2732 * back through the state and be
2733 * back at reg login state so this
2734 * mbox needs to be ignored becase
2735 * there is another reg login in
2738 spin_lock_irq(shost
->host_lock
);
2739 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
2740 spin_unlock_irq(shost
->host_lock
);
2741 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2742 lpfc_sli4_free_rpi(phba
,
2743 pmb
->u
.mb
.un
.varRegLogin
.rpi
);
2746 /* Good status, call state machine */
2747 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
2748 NLP_EVT_CMPL_REG_LOGIN
);
2750 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2752 mempool_free(pmb
, phba
->mbox_mem_pool
);
2753 /* decrement the node reference count held for this callback
2762 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2764 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2765 struct lpfc_vport
*vport
= pmb
->vport
;
2766 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2768 switch (mb
->mbxStatus
) {
2771 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2772 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2775 /* If VPI is busy, reset the HBA */
2777 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
2778 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
2779 vport
->vpi
, mb
->mbxStatus
);
2780 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
2781 lpfc_workq_post_event(phba
, NULL
, NULL
,
2782 LPFC_EVT_RESET_HBA
);
2784 spin_lock_irq(shost
->host_lock
);
2785 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2786 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2787 spin_unlock_irq(shost
->host_lock
);
2788 vport
->unreg_vpi_cmpl
= VPORT_OK
;
2789 mempool_free(pmb
, phba
->mbox_mem_pool
);
2791 * This shost reference might have been taken at the beginning of
2792 * lpfc_vport_delete()
2794 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
2795 scsi_host_put(shost
);
2799 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
2801 struct lpfc_hba
*phba
= vport
->phba
;
2805 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2809 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
2810 mbox
->vport
= vport
;
2811 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
2812 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2813 if (rc
== MBX_NOT_FINISHED
) {
2814 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2815 "1800 Could not issue unreg_vpi\n");
2816 mempool_free(mbox
, phba
->mbox_mem_pool
);
2817 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
2824 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2826 struct lpfc_vport
*vport
= pmb
->vport
;
2827 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2828 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2830 switch (mb
->mbxStatus
) {
2834 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2835 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2837 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2838 spin_lock_irq(shost
->host_lock
);
2839 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2840 spin_unlock_irq(shost
->host_lock
);
2841 vport
->fc_myDID
= 0;
2845 spin_lock_irq(shost
->host_lock
);
2846 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2847 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2848 spin_unlock_irq(shost
->host_lock
);
2849 vport
->num_disc_nodes
= 0;
2850 /* go thru NPR list and issue ELS PLOGIs */
2851 if (vport
->fc_npr_cnt
)
2852 lpfc_els_disc_plogi(vport
);
2854 if (!vport
->num_disc_nodes
) {
2855 spin_lock_irq(shost
->host_lock
);
2856 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2857 spin_unlock_irq(shost
->host_lock
);
2858 lpfc_can_disctmo(vport
);
2860 vport
->port_state
= LPFC_VPORT_READY
;
2863 mempool_free(pmb
, phba
->mbox_mem_pool
);
2868 * lpfc_create_static_vport - Read HBA config region to create static vports.
2869 * @phba: pointer to lpfc hba data structure.
2871 * This routine issue a DUMP mailbox command for config region 22 to get
2872 * the list of static vports to be created. The function create vports
2873 * based on the information returned from the HBA.
2876 lpfc_create_static_vport(struct lpfc_hba
*phba
)
2878 LPFC_MBOXQ_t
*pmb
= NULL
;
2880 struct static_vport_info
*vport_info
;
2882 struct fc_vport_identifiers vport_id
;
2883 struct fc_vport
*new_fc_vport
;
2884 struct Scsi_Host
*shost
;
2885 struct lpfc_vport
*vport
;
2886 uint16_t offset
= 0;
2887 uint8_t *vport_buff
;
2888 struct lpfc_dmabuf
*mp
;
2889 uint32_t byte_count
= 0;
2891 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2893 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2894 "0542 lpfc_create_static_vport failed to"
2895 " allocate mailbox memory\n");
2901 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
2903 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2904 "0543 lpfc_create_static_vport failed to"
2905 " allocate vport_info\n");
2906 mempool_free(pmb
, phba
->mbox_mem_pool
);
2910 vport_buff
= (uint8_t *) vport_info
;
2912 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
2915 pmb
->vport
= phba
->pport
;
2916 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
2918 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
2919 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2920 "0544 lpfc_create_static_vport failed to"
2921 " issue dump mailbox command ret 0x%x "
2927 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2928 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
2929 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2930 if (byte_count
> sizeof(struct static_vport_info
) -
2932 byte_count
= sizeof(struct static_vport_info
)
2934 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
2935 offset
+= byte_count
;
2937 if (mb
->un
.varDmp
.word_cnt
>
2938 sizeof(struct static_vport_info
) - offset
)
2939 mb
->un
.varDmp
.word_cnt
=
2940 sizeof(struct static_vport_info
)
2942 byte_count
= mb
->un
.varDmp
.word_cnt
;
2943 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
2944 vport_buff
+ offset
,
2947 offset
+= byte_count
;
2950 } while (byte_count
&&
2951 offset
< sizeof(struct static_vport_info
));
2954 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
2955 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
2956 != VPORT_INFO_REV
)) {
2957 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2958 "0545 lpfc_create_static_vport bad"
2959 " information header 0x%x 0x%x\n",
2960 le32_to_cpu(vport_info
->signature
),
2961 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
2966 shost
= lpfc_shost_from_vport(phba
->pport
);
2968 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
2969 memset(&vport_id
, 0, sizeof(vport_id
));
2970 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
2971 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
2972 if (!vport_id
.port_name
|| !vport_id
.node_name
)
2975 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
2976 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
2977 vport_id
.disable
= false;
2978 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
2980 if (!new_fc_vport
) {
2981 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2982 "0546 lpfc_create_static_vport failed to"
2987 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
2988 vport
->vport_flag
|= STATIC_VPORT
;
2993 if (rc
!= MBX_TIMEOUT
) {
2994 if (pmb
->context2
) {
2995 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2996 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2999 mempool_free(pmb
, phba
->mbox_mem_pool
);
3006 * This routine handles processing a Fabric REG_LOGIN mailbox
3007 * command upon completion. It is setup in the LPFC_MBOXQ
3008 * as the completion routine when the command is
3009 * handed off to the SLI layer.
3012 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3014 struct lpfc_vport
*vport
= pmb
->vport
;
3015 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3016 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3017 struct lpfc_nodelist
*ndlp
;
3019 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3020 pmb
->context1
= NULL
;
3021 pmb
->context2
= NULL
;
3022 if (mb
->mbxStatus
) {
3023 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3024 "0258 Register Fabric login error: 0x%x\n",
3026 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3028 mempool_free(pmb
, phba
->mbox_mem_pool
);
3030 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3031 /* FLOGI failed, use loop map to make discovery list */
3032 lpfc_disc_list_loopmap(vport
);
3034 /* Start discovery */
3035 lpfc_disc_start(vport
);
3036 /* Decrement the reference count to ndlp after the
3037 * reference to the ndlp are done.
3043 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3044 /* Decrement the reference count to ndlp after the reference
3045 * to the ndlp are done.
3051 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3052 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3053 ndlp
->nlp_type
|= NLP_FABRIC
;
3054 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3056 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3057 /* when physical port receive logo donot start
3058 * vport discovery */
3059 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3060 lpfc_start_fdiscs(phba
);
3062 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3063 lpfc_do_scr_ns_plogi(phba
, vport
);
3066 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3068 mempool_free(pmb
, phba
->mbox_mem_pool
);
3070 /* Drop the reference count from the mbox at the end after
3071 * all the current reference to the ndlp have been done.
3078 * This routine handles processing a NameServer REG_LOGIN mailbox
3079 * command upon completion. It is setup in the LPFC_MBOXQ
3080 * as the completion routine when the command is
3081 * handed off to the SLI layer.
3084 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3086 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3087 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3088 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3089 struct lpfc_vport
*vport
= pmb
->vport
;
3091 if (mb
->mbxStatus
) {
3093 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3094 "0260 Register NameServer error: 0x%x\n",
3096 /* decrement the node reference count held for this
3097 * callback function.
3100 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3102 mempool_free(pmb
, phba
->mbox_mem_pool
);
3104 /* If no other thread is using the ndlp, free it */
3105 lpfc_nlp_not_used(ndlp
);
3107 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3109 * RegLogin failed, use loop map to make discovery
3112 lpfc_disc_list_loopmap(vport
);
3114 /* Start discovery */
3115 lpfc_disc_start(vport
);
3118 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3122 pmb
->context1
= NULL
;
3124 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3125 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3126 ndlp
->nlp_type
|= NLP_FABRIC
;
3127 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3129 if (vport
->port_state
< LPFC_VPORT_READY
) {
3130 /* Link up discovery requires Fabric registration. */
3131 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3132 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3133 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3134 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3135 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3137 /* Issue SCR just before NameServer GID_FT Query */
3138 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3141 vport
->fc_ns_retry
= 0;
3142 /* Good status, issue CT Request to NameServer */
3143 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3144 /* Cannot issue NameServer Query, so finish up discovery */
3148 /* decrement the node reference count held for this
3149 * callback function.
3152 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3154 mempool_free(pmb
, phba
->mbox_mem_pool
);
3160 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3162 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3163 struct fc_rport
*rport
;
3164 struct lpfc_rport_data
*rdata
;
3165 struct fc_rport_identifiers rport_ids
;
3166 struct lpfc_hba
*phba
= vport
->phba
;
3168 /* Remote port has reappeared. Re-register w/ FC transport */
3169 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3170 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3171 rport_ids
.port_id
= ndlp
->nlp_DID
;
3172 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3175 * We leave our node pointer in rport->dd_data when we unregister a
3176 * FCP target port. But fc_remote_port_add zeros the space to which
3177 * rport->dd_data points. So, if we're reusing a previously
3178 * registered port, drop the reference that we took the last time we
3179 * registered the port.
3181 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3182 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3185 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3186 "rport add: did:x%x flg:x%x type x%x",
3187 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3189 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3190 if (!rport
|| !get_device(&rport
->dev
)) {
3191 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3192 "Warning: fc_remote_port_add failed\n");
3196 /* initialize static port data */
3197 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3198 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3199 rdata
= rport
->dd_data
;
3200 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3202 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3203 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3204 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3205 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3208 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3209 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3211 if ((rport
->scsi_target_id
!= -1) &&
3212 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3213 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3219 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3221 struct fc_rport
*rport
= ndlp
->rport
;
3223 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3224 "rport delete: did:x%x flg:x%x type x%x",
3225 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3227 fc_remote_port_delete(rport
);
3233 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3235 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3237 spin_lock_irq(shost
->host_lock
);
3239 case NLP_STE_UNUSED_NODE
:
3240 vport
->fc_unused_cnt
+= count
;
3242 case NLP_STE_PLOGI_ISSUE
:
3243 vport
->fc_plogi_cnt
+= count
;
3245 case NLP_STE_ADISC_ISSUE
:
3246 vport
->fc_adisc_cnt
+= count
;
3248 case NLP_STE_REG_LOGIN_ISSUE
:
3249 vport
->fc_reglogin_cnt
+= count
;
3251 case NLP_STE_PRLI_ISSUE
:
3252 vport
->fc_prli_cnt
+= count
;
3254 case NLP_STE_UNMAPPED_NODE
:
3255 vport
->fc_unmap_cnt
+= count
;
3257 case NLP_STE_MAPPED_NODE
:
3258 vport
->fc_map_cnt
+= count
;
3260 case NLP_STE_NPR_NODE
:
3261 vport
->fc_npr_cnt
+= count
;
3264 spin_unlock_irq(shost
->host_lock
);
3268 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3269 int old_state
, int new_state
)
3271 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3273 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3274 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3275 ndlp
->nlp_type
|= NLP_FC_NODE
;
3277 if (new_state
== NLP_STE_MAPPED_NODE
)
3278 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3279 if (new_state
== NLP_STE_NPR_NODE
)
3280 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3282 /* Transport interface */
3283 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3284 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3285 vport
->phba
->nport_event_cnt
++;
3286 lpfc_unregister_remote_port(ndlp
);
3289 if (new_state
== NLP_STE_MAPPED_NODE
||
3290 new_state
== NLP_STE_UNMAPPED_NODE
) {
3291 vport
->phba
->nport_event_cnt
++;
3293 * Tell the fc transport about the port, if we haven't
3294 * already. If we have, and it's a scsi entity, be
3295 * sure to unblock any attached scsi devices
3297 lpfc_register_remote_port(vport
, ndlp
);
3299 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3300 (vport
->stat_data_enabled
)) {
3302 * A new target is discovered, if there is no buffer for
3303 * statistical data collection allocate buffer.
3305 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3306 sizeof(struct lpfc_scsicmd_bkt
),
3309 if (!ndlp
->lat_data
)
3310 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3311 "0286 lpfc_nlp_state_cleanup failed to "
3312 "allocate statistical data buffer DID "
3313 "0x%x\n", ndlp
->nlp_DID
);
3316 * if we added to Mapped list, but the remote port
3317 * registration failed or assigned a target id outside
3318 * our presentable range - move the node to the
3321 if (new_state
== NLP_STE_MAPPED_NODE
&&
3323 ndlp
->rport
->scsi_target_id
== -1 ||
3324 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3325 spin_lock_irq(shost
->host_lock
);
3326 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3327 spin_unlock_irq(shost
->host_lock
);
3328 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3333 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3335 static char *states
[] = {
3336 [NLP_STE_UNUSED_NODE
] = "UNUSED",
3337 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
3338 [NLP_STE_ADISC_ISSUE
] = "ADISC",
3339 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
3340 [NLP_STE_PRLI_ISSUE
] = "PRLI",
3341 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
3342 [NLP_STE_MAPPED_NODE
] = "MAPPED",
3343 [NLP_STE_NPR_NODE
] = "NPR",
3346 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
3347 strlcpy(buffer
, states
[state
], size
);
3349 snprintf(buffer
, size
, "unknown (%d)", state
);
3354 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3357 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3358 int old_state
= ndlp
->nlp_state
;
3359 char name1
[16], name2
[16];
3361 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3362 "0904 NPort state transition x%06x, %s -> %s\n",
3364 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
3365 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
3367 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3368 "node statechg did:x%x old:%d ste:%d",
3369 ndlp
->nlp_DID
, old_state
, state
);
3371 if (old_state
== NLP_STE_NPR_NODE
&&
3372 state
!= NLP_STE_NPR_NODE
)
3373 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3374 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
3375 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
3376 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
3379 if (list_empty(&ndlp
->nlp_listp
)) {
3380 spin_lock_irq(shost
->host_lock
);
3381 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3382 spin_unlock_irq(shost
->host_lock
);
3383 } else if (old_state
)
3384 lpfc_nlp_counters(vport
, old_state
, -1);
3386 ndlp
->nlp_state
= state
;
3387 lpfc_nlp_counters(vport
, state
, 1);
3388 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3392 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3394 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3396 if (list_empty(&ndlp
->nlp_listp
)) {
3397 spin_lock_irq(shost
->host_lock
);
3398 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3399 spin_unlock_irq(shost
->host_lock
);
3404 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3406 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3408 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3409 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3410 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3411 spin_lock_irq(shost
->host_lock
);
3412 list_del_init(&ndlp
->nlp_listp
);
3413 spin_unlock_irq(shost
->host_lock
);
3414 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3415 NLP_STE_UNUSED_NODE
);
3419 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3421 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3422 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3423 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3424 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3425 NLP_STE_UNUSED_NODE
);
3428 * lpfc_initialize_node - Initialize all fields of node object
3429 * @vport: Pointer to Virtual Port object.
3430 * @ndlp: Pointer to FC node object.
3431 * @did: FC_ID of the node.
3433 * This function is always called when node object need to be initialized.
3434 * It initializes all the fields of the node object. Although the reference
3435 * to phba from @ndlp can be obtained indirectly through it's reference to
3436 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3437 * to the life-span of the @ndlp might go beyond the existence of @vport as
3438 * the final release of ndlp is determined by its reference count. And, the
3439 * operation on @ndlp needs the reference to phba.
3442 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3445 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
3446 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
3447 init_timer(&ndlp
->nlp_delayfunc
);
3448 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
3449 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
3450 ndlp
->nlp_DID
= did
;
3451 ndlp
->vport
= vport
;
3452 ndlp
->phba
= vport
->phba
;
3453 ndlp
->nlp_sid
= NLP_NO_SID
;
3454 kref_init(&ndlp
->kref
);
3455 NLP_INT_NODE_ACT(ndlp
);
3456 atomic_set(&ndlp
->cmd_pending
, 0);
3457 ndlp
->cmd_qdepth
= LPFC_MAX_TGT_QDEPTH
;
3460 struct lpfc_nodelist
*
3461 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3464 struct lpfc_hba
*phba
= vport
->phba
;
3466 unsigned long flags
;
3471 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3472 /* The ndlp should not be in memory free mode */
3473 if (NLP_CHK_FREE_REQ(ndlp
)) {
3474 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3475 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3476 "0277 lpfc_enable_node: ndlp:x%p "
3477 "usgmap:x%x refcnt:%d\n",
3478 (void *)ndlp
, ndlp
->nlp_usg_map
,
3479 atomic_read(&ndlp
->kref
.refcount
));
3482 /* The ndlp should not already be in active mode */
3483 if (NLP_CHK_NODE_ACT(ndlp
)) {
3484 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3485 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3486 "0278 lpfc_enable_node: ndlp:x%p "
3487 "usgmap:x%x refcnt:%d\n",
3488 (void *)ndlp
, ndlp
->nlp_usg_map
,
3489 atomic_read(&ndlp
->kref
.refcount
));
3493 /* Keep the original DID */
3494 did
= ndlp
->nlp_DID
;
3496 /* re-initialize ndlp except of ndlp linked list pointer */
3497 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
3498 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
3499 lpfc_initialize_node(vport
, ndlp
, did
);
3501 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3503 if (state
!= NLP_STE_UNUSED_NODE
)
3504 lpfc_nlp_set_state(vport
, ndlp
, state
);
3506 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3507 "node enable: did:x%x",
3508 ndlp
->nlp_DID
, 0, 0);
3513 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3516 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
3517 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
3518 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3519 * until ALL other outstanding threads have completed. We check
3520 * that the ndlp not already in the UNUSED state before we proceed.
3522 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3524 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3530 * Start / ReStart rescue timer for Discovery / RSCN handling
3533 lpfc_set_disctmo(struct lpfc_vport
*vport
)
3535 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3536 struct lpfc_hba
*phba
= vport
->phba
;
3539 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
3540 /* For FAN, timeout should be greater than edtov */
3541 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
3543 /* Normal discovery timeout should be > than ELS/CT timeout
3544 * FC spec states we need 3 * ratov for CT requests
3546 tmo
= ((phba
->fc_ratov
* 3) + 3);
3550 if (!timer_pending(&vport
->fc_disctmo
)) {
3551 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3552 "set disc timer: tmo:x%x state:x%x flg:x%x",
3553 tmo
, vport
->port_state
, vport
->fc_flag
);
3556 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
3557 spin_lock_irq(shost
->host_lock
);
3558 vport
->fc_flag
|= FC_DISC_TMO
;
3559 spin_unlock_irq(shost
->host_lock
);
3561 /* Start Discovery Timer state <hba_state> */
3562 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3563 "0247 Start Discovery Timer state x%x "
3564 "Data: x%x x%lx x%x x%x\n",
3565 vport
->port_state
, tmo
,
3566 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
3567 vport
->fc_adisc_cnt
);
3573 * Cancel rescue timer for Discovery / RSCN handling
3576 lpfc_can_disctmo(struct lpfc_vport
*vport
)
3578 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3579 unsigned long iflags
;
3581 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3582 "can disc timer: state:x%x rtry:x%x flg:x%x",
3583 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3585 /* Turn off discovery timer if its running */
3586 if (vport
->fc_flag
& FC_DISC_TMO
) {
3587 spin_lock_irqsave(shost
->host_lock
, iflags
);
3588 vport
->fc_flag
&= ~FC_DISC_TMO
;
3589 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
3590 del_timer_sync(&vport
->fc_disctmo
);
3591 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
3592 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
3593 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
3596 /* Cancel Discovery Timer state <hba_state> */
3597 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3598 "0248 Cancel Discovery Timer state x%x "
3599 "Data: x%x x%x x%x\n",
3600 vport
->port_state
, vport
->fc_flag
,
3601 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
3606 * Check specified ring for outstanding IOCB on the SLI queue
3607 * Return true if iocb matches the specified nport
3610 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
3611 struct lpfc_sli_ring
*pring
,
3612 struct lpfc_iocbq
*iocb
,
3613 struct lpfc_nodelist
*ndlp
)
3615 struct lpfc_sli
*psli
= &phba
->sli
;
3616 IOCB_t
*icmd
= &iocb
->iocb
;
3617 struct lpfc_vport
*vport
= ndlp
->vport
;
3619 if (iocb
->vport
!= vport
)
3622 if (pring
->ringno
== LPFC_ELS_RING
) {
3623 switch (icmd
->ulpCommand
) {
3624 case CMD_GEN_REQUEST64_CR
:
3625 if (iocb
->context_un
.ndlp
== ndlp
)
3627 case CMD_ELS_REQUEST64_CR
:
3628 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
3630 case CMD_XMIT_ELS_RSP64_CX
:
3631 if (iocb
->context1
== (uint8_t *) ndlp
)
3634 } else if (pring
->ringno
== psli
->extra_ring
) {
3636 } else if (pring
->ringno
== psli
->fcp_ring
) {
3637 /* Skip match check if waiting to relogin to FCP target */
3638 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3639 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3642 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
3645 } else if (pring
->ringno
== psli
->next_ring
) {
3652 * Free resources / clean up outstanding I/Os
3653 * associated with nlp_rpi in the LPFC_NODELIST entry.
3656 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3658 LIST_HEAD(completions
);
3659 struct lpfc_sli
*psli
;
3660 struct lpfc_sli_ring
*pring
;
3661 struct lpfc_iocbq
*iocb
, *next_iocb
;
3664 lpfc_fabric_abort_nport(ndlp
);
3667 * Everything that matches on txcmplq will be returned
3668 * by firmware with a no rpi error.
3671 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3672 /* Now process each ring */
3673 for (i
= 0; i
< psli
->num_rings
; i
++) {
3674 pring
= &psli
->ring
[i
];
3676 spin_lock_irq(&phba
->hbalock
);
3677 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
3680 * Check to see if iocb matches the nport we are
3683 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
3685 /* It matches, so deque and call compl
3687 list_move_tail(&iocb
->list
,
3692 spin_unlock_irq(&phba
->hbalock
);
3696 /* Cancel all the IOCBs from the completions list */
3697 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3704 * Free rpi associated with LPFC_NODELIST entry.
3705 * This routine is called from lpfc_freenode(), when we are removing
3706 * a LPFC_NODELIST entry. It is also called if the driver initiates a
3707 * LOGO that completes successfully, and we are waiting to PLOGI back
3708 * to the remote NPort. In addition, it is called after we receive
3709 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
3710 * we are waiting to PLOGI back to the remote NPort.
3713 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3715 struct lpfc_hba
*phba
= vport
->phba
;
3719 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3720 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3722 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
3723 mbox
->vport
= vport
;
3724 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3725 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3726 if (rc
== MBX_NOT_FINISHED
)
3727 mempool_free(mbox
, phba
->mbox_mem_pool
);
3729 lpfc_no_rpi(phba
, ndlp
);
3731 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
3732 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3739 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3740 * @phba: pointer to lpfc hba data structure.
3742 * This routine is invoked to unregister all the currently registered RPIs
3746 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
3748 struct lpfc_vport
**vports
;
3749 struct lpfc_nodelist
*ndlp
;
3750 struct Scsi_Host
*shost
;
3753 vports
= lpfc_create_vport_work_array(phba
);
3754 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3755 shost
= lpfc_shost_from_vport(vports
[i
]);
3756 spin_lock_irq(shost
->host_lock
);
3757 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
3758 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3759 /* The mempool_alloc might sleep */
3760 spin_unlock_irq(shost
->host_lock
);
3761 lpfc_unreg_rpi(vports
[i
], ndlp
);
3762 spin_lock_irq(shost
->host_lock
);
3765 spin_unlock_irq(shost
->host_lock
);
3767 lpfc_destroy_vport_work_array(phba
, vports
);
3771 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
3773 struct lpfc_hba
*phba
= vport
->phba
;
3777 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3779 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
3780 mbox
->vport
= vport
;
3781 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3782 mbox
->context1
= NULL
;
3783 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3784 if (rc
!= MBX_TIMEOUT
)
3785 mempool_free(mbox
, phba
->mbox_mem_pool
);
3787 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3788 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3789 "1836 Could not issue "
3790 "unreg_login(all_rpis) status %d\n", rc
);
3795 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
3797 struct lpfc_hba
*phba
= vport
->phba
;
3801 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3803 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
3804 mbox
->vport
= vport
;
3805 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3806 mbox
->context1
= NULL
;
3807 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3808 if (rc
!= MBX_TIMEOUT
)
3809 mempool_free(mbox
, phba
->mbox_mem_pool
);
3811 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3812 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3813 "1815 Could not issue "
3814 "unreg_did (default rpis) status %d\n",
3820 * Free resources associated with LPFC_NODELIST entry
3821 * so it can be freed.
3824 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3826 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3827 struct lpfc_hba
*phba
= vport
->phba
;
3828 LPFC_MBOXQ_t
*mb
, *nextmb
;
3829 struct lpfc_dmabuf
*mp
;
3831 /* Cleanup node for NPort <nlp_DID> */
3832 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3833 "0900 Cleanup node for NPort x%x "
3834 "Data: x%x x%x x%x\n",
3835 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3836 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3837 if (NLP_CHK_FREE_REQ(ndlp
)) {
3838 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3839 "0280 lpfc_cleanup_node: ndlp:x%p "
3840 "usgmap:x%x refcnt:%d\n",
3841 (void *)ndlp
, ndlp
->nlp_usg_map
,
3842 atomic_read(&ndlp
->kref
.refcount
));
3843 lpfc_dequeue_node(vport
, ndlp
);
3845 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3846 "0281 lpfc_cleanup_node: ndlp:x%p "
3847 "usgmap:x%x refcnt:%d\n",
3848 (void *)ndlp
, ndlp
->nlp_usg_map
,
3849 atomic_read(&ndlp
->kref
.refcount
));
3850 lpfc_disable_node(vport
, ndlp
);
3853 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3854 if ((mb
= phba
->sli
.mbox_active
)) {
3855 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3856 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3857 mb
->context2
= NULL
;
3858 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3862 spin_lock_irq(&phba
->hbalock
);
3863 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
3864 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3865 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3866 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
3868 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3871 list_del(&mb
->list
);
3872 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3873 lpfc_sli4_free_rpi(phba
,
3874 mb
->u
.mb
.un
.varRegLogin
.rpi
);
3875 mempool_free(mb
, phba
->mbox_mem_pool
);
3876 /* We shall not invoke the lpfc_nlp_put to decrement
3877 * the ndlp reference count as we are in the process
3878 * of lpfc_nlp_release.
3882 spin_unlock_irq(&phba
->hbalock
);
3884 lpfc_els_abort(phba
, ndlp
);
3886 spin_lock_irq(shost
->host_lock
);
3887 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3888 spin_unlock_irq(shost
->host_lock
);
3890 ndlp
->nlp_last_elscmd
= 0;
3891 del_timer_sync(&ndlp
->nlp_delayfunc
);
3893 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
3894 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
3896 lpfc_unreg_rpi(vport
, ndlp
);
3902 * Check to see if we can free the nlp back to the freelist.
3903 * If we are in the middle of using the nlp in the discovery state
3904 * machine, defer the free till we reach the end of the state machine.
3907 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3909 struct lpfc_hba
*phba
= vport
->phba
;
3910 struct lpfc_rport_data
*rdata
;
3914 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3915 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
3916 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
3917 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
3918 /* For this case we need to cleanup the default rpi
3919 * allocated by the firmware.
3921 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
3923 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
3924 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
3926 mempool_free(mbox
, phba
->mbox_mem_pool
);
3929 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
3930 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
3931 mbox
->vport
= vport
;
3932 mbox
->context2
= NULL
;
3933 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3934 if (rc
== MBX_NOT_FINISHED
) {
3935 mempool_free(mbox
, phba
->mbox_mem_pool
);
3940 lpfc_cleanup_node(vport
, ndlp
);
3943 * We can get here with a non-NULL ndlp->rport because when we
3944 * unregister a rport we don't break the rport/node linkage. So if we
3945 * do, make sure we don't leaving any dangling pointers behind.
3948 rdata
= ndlp
->rport
->dd_data
;
3949 rdata
->pnode
= NULL
;
3955 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3958 D_ID mydid
, ndlpdid
, matchdid
;
3960 if (did
== Bcast_DID
)
3963 /* First check for Direct match */
3964 if (ndlp
->nlp_DID
== did
)
3967 /* Next check for area/domain identically equals 0 match */
3968 mydid
.un
.word
= vport
->fc_myDID
;
3969 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
3973 matchdid
.un
.word
= did
;
3974 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
3975 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
3976 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
3977 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
3978 if ((ndlpdid
.un
.b
.domain
== 0) &&
3979 (ndlpdid
.un
.b
.area
== 0)) {
3980 if (ndlpdid
.un
.b
.id
)
3986 matchdid
.un
.word
= ndlp
->nlp_DID
;
3987 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
3988 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
3989 if ((matchdid
.un
.b
.domain
== 0) &&
3990 (matchdid
.un
.b
.area
== 0)) {
3991 if (matchdid
.un
.b
.id
)
3999 /* Search for a nodelist entry */
4000 static struct lpfc_nodelist
*
4001 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4003 struct lpfc_nodelist
*ndlp
;
4006 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4007 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4008 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4009 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4010 ((uint32_t) ndlp
->nlp_type
<< 8) |
4011 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4012 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4013 "0929 FIND node DID "
4014 "Data: x%p x%x x%x x%x\n",
4015 ndlp
, ndlp
->nlp_DID
,
4016 ndlp
->nlp_flag
, data1
);
4021 /* FIND node did <did> NOT FOUND */
4022 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4023 "0932 FIND node did x%x NOT FOUND.\n", did
);
4027 struct lpfc_nodelist
*
4028 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4030 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4031 struct lpfc_nodelist
*ndlp
;
4033 spin_lock_irq(shost
->host_lock
);
4034 ndlp
= __lpfc_findnode_did(vport
, did
);
4035 spin_unlock_irq(shost
->host_lock
);
4039 struct lpfc_nodelist
*
4040 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4042 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4043 struct lpfc_nodelist
*ndlp
;
4045 ndlp
= lpfc_findnode_did(vport
, did
);
4047 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4048 lpfc_rscn_payload_check(vport
, did
) == 0)
4050 ndlp
= (struct lpfc_nodelist
*)
4051 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4054 lpfc_nlp_init(vport
, ndlp
, did
);
4055 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4056 spin_lock_irq(shost
->host_lock
);
4057 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4058 spin_unlock_irq(shost
->host_lock
);
4060 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4061 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4064 spin_lock_irq(shost
->host_lock
);
4065 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4066 spin_unlock_irq(shost
->host_lock
);
4070 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4071 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4072 if (lpfc_rscn_payload_check(vport
, did
)) {
4073 /* If we've already recieved a PLOGI from this NPort
4074 * we don't need to try to discover it again.
4076 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4079 /* Since this node is marked for discovery,
4080 * delay timeout is not needed.
4082 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4083 spin_lock_irq(shost
->host_lock
);
4084 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4085 spin_unlock_irq(shost
->host_lock
);
4089 /* If we've already recieved a PLOGI from this NPort,
4090 * or we are already in the process of discovery on it,
4091 * we don't need to try to discover it again.
4093 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4094 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4095 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4097 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4098 spin_lock_irq(shost
->host_lock
);
4099 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4100 spin_unlock_irq(shost
->host_lock
);
4105 /* Build a list of nodes to discover based on the loopmap */
4107 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4109 struct lpfc_hba
*phba
= vport
->phba
;
4111 uint32_t alpa
, index
;
4113 if (!lpfc_is_link_up(phba
))
4116 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
4119 /* Check for loop map present or not */
4120 if (phba
->alpa_map
[0]) {
4121 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4122 alpa
= phba
->alpa_map
[j
];
4123 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4125 lpfc_setup_disc_node(vport
, alpa
);
4128 /* No alpamap, so try all alpa's */
4129 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4130 /* If cfg_scan_down is set, start from highest
4131 * ALPA (0xef) to lowest (0x1).
4133 if (vport
->cfg_scan_down
)
4136 index
= FC_MAXLOOP
- j
- 1;
4137 alpa
= lpfcAlpaArray
[index
];
4138 if ((vport
->fc_myDID
& 0xff) == alpa
)
4140 lpfc_setup_disc_node(vport
, alpa
);
4147 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4150 struct lpfc_sli
*psli
= &phba
->sli
;
4151 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4152 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4153 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4157 * if it's not a physical port or if we already send
4158 * clear_la then don't send it.
4160 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4161 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4162 (phba
->sli_rev
== LPFC_SLI_REV4
))
4165 /* Link up discovery */
4166 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4167 phba
->link_state
= LPFC_CLEAR_LA
;
4168 lpfc_clear_la(phba
, mbox
);
4169 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4170 mbox
->vport
= vport
;
4171 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4172 if (rc
== MBX_NOT_FINISHED
) {
4173 mempool_free(mbox
, phba
->mbox_mem_pool
);
4174 lpfc_disc_flush_list(vport
);
4175 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4176 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4177 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4178 phba
->link_state
= LPFC_HBA_ERROR
;
4183 /* Reg_vpi to tell firmware to resume normal operations */
4185 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4187 LPFC_MBOXQ_t
*regvpimbox
;
4189 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4191 lpfc_reg_vpi(vport
, regvpimbox
);
4192 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4193 regvpimbox
->vport
= vport
;
4194 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4195 == MBX_NOT_FINISHED
) {
4196 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4201 /* Start Link up / RSCN discovery on NPR nodes */
4203 lpfc_disc_start(struct lpfc_vport
*vport
)
4205 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4206 struct lpfc_hba
*phba
= vport
->phba
;
4208 uint32_t clear_la_pending
;
4211 if (!lpfc_is_link_up(phba
))
4214 if (phba
->link_state
== LPFC_CLEAR_LA
)
4215 clear_la_pending
= 1;
4217 clear_la_pending
= 0;
4219 if (vport
->port_state
< LPFC_VPORT_READY
)
4220 vport
->port_state
= LPFC_DISC_AUTH
;
4222 lpfc_set_disctmo(vport
);
4224 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4229 vport
->fc_prevDID
= vport
->fc_myDID
;
4230 vport
->num_disc_nodes
= 0;
4232 /* Start Discovery state <hba_state> */
4233 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4234 "0202 Start Discovery hba state x%x "
4235 "Data: x%x x%x x%x\n",
4236 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4237 vport
->fc_adisc_cnt
);
4239 /* First do ADISCs - if any */
4240 num_sent
= lpfc_els_disc_adisc(vport
);
4246 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4247 * continue discovery.
4249 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4250 !(vport
->fc_flag
& FC_PT2PT
) &&
4251 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4252 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4253 lpfc_issue_reg_vpi(phba
, vport
);
4258 * For SLI2, we need to set port_state to READY and continue
4261 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4262 /* If we get here, there is nothing to ADISC */
4263 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4264 lpfc_issue_clear_la(phba
, vport
);
4266 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4267 vport
->num_disc_nodes
= 0;
4268 /* go thru NPR nodes and issue ELS PLOGIs */
4269 if (vport
->fc_npr_cnt
)
4270 lpfc_els_disc_plogi(vport
);
4272 if (!vport
->num_disc_nodes
) {
4273 spin_lock_irq(shost
->host_lock
);
4274 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
4275 spin_unlock_irq(shost
->host_lock
);
4276 lpfc_can_disctmo(vport
);
4279 vport
->port_state
= LPFC_VPORT_READY
;
4281 /* Next do PLOGIs - if any */
4282 num_sent
= lpfc_els_disc_plogi(vport
);
4287 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4288 /* Check to see if more RSCNs came in while we
4289 * were processing this one.
4291 if ((vport
->fc_rscn_id_cnt
== 0) &&
4292 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
4293 spin_lock_irq(shost
->host_lock
);
4294 vport
->fc_flag
&= ~FC_RSCN_MODE
;
4295 spin_unlock_irq(shost
->host_lock
);
4296 lpfc_can_disctmo(vport
);
4298 lpfc_els_handle_rscn(vport
);
4305 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
4306 * ring the match the sppecified nodelist.
4309 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4311 LIST_HEAD(completions
);
4312 struct lpfc_sli
*psli
;
4314 struct lpfc_iocbq
*iocb
, *next_iocb
;
4315 struct lpfc_sli_ring
*pring
;
4318 pring
= &psli
->ring
[LPFC_ELS_RING
];
4320 /* Error matching iocb on txq or txcmplq
4321 * First check the txq.
4323 spin_lock_irq(&phba
->hbalock
);
4324 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4325 if (iocb
->context1
!= ndlp
) {
4329 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
4330 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
4332 list_move_tail(&iocb
->list
, &completions
);
4337 /* Next check the txcmplq */
4338 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
4339 if (iocb
->context1
!= ndlp
) {
4343 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
4344 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
4345 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
4348 spin_unlock_irq(&phba
->hbalock
);
4350 /* Cancel all the IOCBs from the completions list */
4351 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4356 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
4358 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4359 struct lpfc_hba
*phba
= vport
->phba
;
4361 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
4362 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4364 if (!NLP_CHK_NODE_ACT(ndlp
))
4366 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4367 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
4368 lpfc_free_tx(phba
, ndlp
);
4375 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
4377 lpfc_els_flush_rscn(vport
);
4378 lpfc_els_flush_cmd(vport
);
4379 lpfc_disc_flush_list(vport
);
4382 /*****************************************************************************/
4384 * NAME: lpfc_disc_timeout
4386 * FUNCTION: Fibre Channel driver discovery timeout routine.
4388 * EXECUTION ENVIRONMENT: interrupt only
4396 /*****************************************************************************/
4398 lpfc_disc_timeout(unsigned long ptr
)
4400 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
4401 struct lpfc_hba
*phba
= vport
->phba
;
4402 uint32_t tmo_posted
;
4403 unsigned long flags
= 0;
4405 if (unlikely(!phba
))
4408 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
4409 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
4411 vport
->work_port_events
|= WORKER_DISC_TMO
;
4412 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
4415 lpfc_worker_wake_up(phba
);
4420 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
4422 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4423 struct lpfc_hba
*phba
= vport
->phba
;
4424 struct lpfc_sli
*psli
= &phba
->sli
;
4425 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4426 LPFC_MBOXQ_t
*initlinkmbox
;
4427 int rc
, clrlaerr
= 0;
4429 if (!(vport
->fc_flag
& FC_DISC_TMO
))
4432 spin_lock_irq(shost
->host_lock
);
4433 vport
->fc_flag
&= ~FC_DISC_TMO
;
4434 spin_unlock_irq(shost
->host_lock
);
4436 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4437 "disc timeout: state:x%x rtry:x%x flg:x%x",
4438 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4440 switch (vport
->port_state
) {
4442 case LPFC_LOCAL_CFG_LINK
:
4443 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
4447 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
4448 "0221 FAN timeout\n");
4449 /* Start discovery by sending FLOGI, clean up old rpis */
4450 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4452 if (!NLP_CHK_NODE_ACT(ndlp
))
4454 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
4456 if (ndlp
->nlp_type
& NLP_FABRIC
) {
4457 /* Clean up the ndlp on Fabric connections */
4458 lpfc_drop_node(vport
, ndlp
);
4460 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
4461 /* Fail outstanding IO now since device
4462 * is marked for PLOGI.
4464 lpfc_unreg_rpi(vport
, ndlp
);
4467 if (vport
->port_state
!= LPFC_FLOGI
) {
4468 lpfc_initial_flogi(vport
);
4475 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
4476 /* Initial FLOGI timeout */
4477 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4478 "0222 Initial %s timeout\n",
4479 vport
->vpi
? "FDISC" : "FLOGI");
4481 /* Assume no Fabric and go on with discovery.
4482 * Check for outstanding ELS FLOGI to abort.
4485 /* FLOGI failed, so just use loop map to make discovery list */
4486 lpfc_disc_list_loopmap(vport
);
4488 /* Start discovery */
4489 lpfc_disc_start(vport
);
4492 case LPFC_FABRIC_CFG_LINK
:
4493 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
4495 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4496 "0223 Timeout while waiting for "
4497 "NameServer login\n");
4498 /* Next look for NameServer ndlp */
4499 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4500 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4501 lpfc_els_abort(phba
, ndlp
);
4503 /* ReStart discovery */
4507 /* Check for wait for NameServer Rsp timeout */
4508 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4509 "0224 NameServer Query timeout "
4511 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4513 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
4514 /* Try it one more time */
4515 vport
->fc_ns_retry
++;
4516 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
4517 vport
->fc_ns_retry
, 0);
4521 vport
->fc_ns_retry
= 0;
4525 * Discovery is over.
4526 * set port_state to PORT_READY if SLI2.
4527 * cmpl_reg_vpi will set port_state to READY for SLI3.
4529 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4530 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4531 lpfc_issue_reg_vpi(phba
, vport
);
4532 else { /* NPIV Not enabled */
4533 lpfc_issue_clear_la(phba
, vport
);
4534 vport
->port_state
= LPFC_VPORT_READY
;
4538 /* Setup and issue mailbox INITIALIZE LINK command */
4539 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4540 if (!initlinkmbox
) {
4541 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4542 "0206 Device Discovery "
4543 "completion error\n");
4544 phba
->link_state
= LPFC_HBA_ERROR
;
4548 lpfc_linkdown(phba
);
4549 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
4550 phba
->cfg_link_speed
);
4551 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4552 initlinkmbox
->vport
= vport
;
4553 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4554 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
4555 lpfc_set_loopback_flag(phba
);
4556 if (rc
== MBX_NOT_FINISHED
)
4557 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
4561 case LPFC_DISC_AUTH
:
4562 /* Node Authentication timeout */
4563 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4564 "0227 Node Authentication timeout\n");
4565 lpfc_disc_flush_list(vport
);
4568 * set port_state to PORT_READY if SLI2.
4569 * cmpl_reg_vpi will set port_state to READY for SLI3.
4571 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4572 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4573 lpfc_issue_reg_vpi(phba
, vport
);
4574 else { /* NPIV Not enabled */
4575 lpfc_issue_clear_la(phba
, vport
);
4576 vport
->port_state
= LPFC_VPORT_READY
;
4581 case LPFC_VPORT_READY
:
4582 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4583 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4584 "0231 RSCN timeout Data: x%x "
4586 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4588 /* Cleanup any outstanding ELS commands */
4589 lpfc_els_flush_cmd(vport
);
4591 lpfc_els_flush_rscn(vport
);
4592 lpfc_disc_flush_list(vport
);
4597 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4598 "0273 Unexpected discovery timeout, "
4599 "vport State x%x\n", vport
->port_state
);
4603 switch (phba
->link_state
) {
4605 /* CLEAR LA timeout */
4606 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4607 "0228 CLEAR LA timeout\n");
4612 lpfc_issue_clear_la(phba
, vport
);
4614 case LPFC_LINK_UNKNOWN
:
4615 case LPFC_WARM_START
:
4616 case LPFC_INIT_START
:
4617 case LPFC_INIT_MBX_CMDS
:
4618 case LPFC_LINK_DOWN
:
4619 case LPFC_HBA_ERROR
:
4620 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4621 "0230 Unexpected timeout, hba link "
4622 "state x%x\n", phba
->link_state
);
4626 case LPFC_HBA_READY
:
4631 lpfc_disc_flush_list(vport
);
4632 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4633 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4634 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4635 vport
->port_state
= LPFC_VPORT_READY
;
4642 * This routine handles processing a NameServer REG_LOGIN mailbox
4643 * command upon completion. It is setup in the LPFC_MBOXQ
4644 * as the completion routine when the command is
4645 * handed off to the SLI layer.
4648 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4650 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4651 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
4652 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
4653 struct lpfc_vport
*vport
= pmb
->vport
;
4655 pmb
->context1
= NULL
;
4657 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4658 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
4659 ndlp
->nlp_type
|= NLP_FABRIC
;
4660 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4663 * Start issuing Fabric-Device Management Interface (FDMI) command to
4664 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4665 * fdmi-on=2 (supporting RPA/hostnmae)
4668 if (vport
->cfg_fdmi_on
== 1)
4669 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
4671 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
4673 /* decrement the node reference count held for this callback
4677 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4679 mempool_free(pmb
, phba
->mbox_mem_pool
);
4685 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
4687 uint16_t *rpi
= param
;
4689 return ndlp
->nlp_rpi
== *rpi
;
4693 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
4695 return memcmp(&ndlp
->nlp_portname
, param
,
4696 sizeof(ndlp
->nlp_portname
)) == 0;
4699 static struct lpfc_nodelist
*
4700 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
4702 struct lpfc_nodelist
*ndlp
;
4704 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4705 if (filter(ndlp
, param
))
4712 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4713 * returns the node list element pointer else return NULL.
4715 struct lpfc_nodelist
*
4716 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
4718 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
4722 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4723 * returns the node element list pointer else return NULL.
4725 struct lpfc_nodelist
*
4726 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
4728 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4729 struct lpfc_nodelist
*ndlp
;
4731 spin_lock_irq(shost
->host_lock
);
4732 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
4733 spin_unlock_irq(shost
->host_lock
);
4738 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4741 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
4743 lpfc_initialize_node(vport
, ndlp
, did
);
4744 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
4746 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4747 "node init: did:x%x",
4748 ndlp
->nlp_DID
, 0, 0);
4753 /* This routine releases all resources associated with a specifc NPort's ndlp
4754 * and mempool_free's the nodelist.
4757 lpfc_nlp_release(struct kref
*kref
)
4759 struct lpfc_hba
*phba
;
4760 unsigned long flags
;
4761 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
4764 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4765 "node release: did:x%x flg:x%x type:x%x",
4766 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4768 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4769 "0279 lpfc_nlp_release: ndlp:x%p "
4770 "usgmap:x%x refcnt:%d\n",
4771 (void *)ndlp
, ndlp
->nlp_usg_map
,
4772 atomic_read(&ndlp
->kref
.refcount
));
4774 /* remove ndlp from action. */
4775 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
4777 /* clear the ndlp active flag for all release cases */
4779 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4780 NLP_CLR_NODE_ACT(ndlp
);
4781 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4783 /* free ndlp memory for final ndlp release */
4784 if (NLP_CHK_FREE_REQ(ndlp
)) {
4785 kfree(ndlp
->lat_data
);
4786 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
4790 /* This routine bumps the reference count for a ndlp structure to ensure
4791 * that one discovery thread won't free a ndlp while another discovery thread
4794 struct lpfc_nodelist
*
4795 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
4797 struct lpfc_hba
*phba
;
4798 unsigned long flags
;
4801 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4802 "node get: did:x%x flg:x%x refcnt:x%x",
4803 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4804 atomic_read(&ndlp
->kref
.refcount
));
4805 /* The check of ndlp usage to prevent incrementing the
4806 * ndlp reference count that is in the process of being
4810 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4811 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
4812 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4813 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4814 "0276 lpfc_nlp_get: ndlp:x%p "
4815 "usgmap:x%x refcnt:%d\n",
4816 (void *)ndlp
, ndlp
->nlp_usg_map
,
4817 atomic_read(&ndlp
->kref
.refcount
));
4820 kref_get(&ndlp
->kref
);
4821 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4826 /* This routine decrements the reference count for a ndlp structure. If the
4827 * count goes to 0, this indicates the the associated nodelist should be
4828 * freed. Returning 1 indicates the ndlp resource has been released; on the
4829 * other hand, returning 0 indicates the ndlp resource has not been released
4833 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
4835 struct lpfc_hba
*phba
;
4836 unsigned long flags
;
4841 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4842 "node put: did:x%x flg:x%x refcnt:x%x",
4843 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4844 atomic_read(&ndlp
->kref
.refcount
));
4846 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4847 /* Check the ndlp memory free acknowledge flag to avoid the
4848 * possible race condition that kref_put got invoked again
4849 * after previous one has done ndlp memory free.
4851 if (NLP_CHK_FREE_ACK(ndlp
)) {
4852 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4853 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4854 "0274 lpfc_nlp_put: ndlp:x%p "
4855 "usgmap:x%x refcnt:%d\n",
4856 (void *)ndlp
, ndlp
->nlp_usg_map
,
4857 atomic_read(&ndlp
->kref
.refcount
));
4860 /* Check the ndlp inactivate log flag to avoid the possible
4861 * race condition that kref_put got invoked again after ndlp
4862 * is already in inactivating state.
4864 if (NLP_CHK_IACT_REQ(ndlp
)) {
4865 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4866 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4867 "0275 lpfc_nlp_put: ndlp:x%p "
4868 "usgmap:x%x refcnt:%d\n",
4869 (void *)ndlp
, ndlp
->nlp_usg_map
,
4870 atomic_read(&ndlp
->kref
.refcount
));
4873 /* For last put, mark the ndlp usage flags to make sure no
4874 * other kref_get and kref_put on the same ndlp shall get
4875 * in between the process when the final kref_put has been
4876 * invoked on this ndlp.
4878 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
4879 /* Indicate ndlp is put to inactive state. */
4880 NLP_SET_IACT_REQ(ndlp
);
4881 /* Acknowledge ndlp memory free has been seen. */
4882 if (NLP_CHK_FREE_REQ(ndlp
))
4883 NLP_SET_FREE_ACK(ndlp
);
4885 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4886 /* Note, the kref_put returns 1 when decrementing a reference
4887 * count that was 1, it invokes the release callback function,
4888 * but it still left the reference count as 1 (not actually
4889 * performs the last decrementation). Otherwise, it actually
4890 * decrements the reference count and returns 0.
4892 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
4895 /* This routine free's the specified nodelist if it is not in use
4896 * by any other discovery thread. This routine returns 1 if the
4897 * ndlp has been freed. A return value of 0 indicates the ndlp is
4898 * not yet been released.
4901 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
4903 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4904 "node not used: did:x%x flg:x%x refcnt:x%x",
4905 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4906 atomic_read(&ndlp
->kref
.refcount
));
4907 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
4908 if (lpfc_nlp_put(ndlp
))
4914 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4915 * @phba: Pointer to hba context object.
4917 * This function iterate through all FC nodes associated
4918 * will all vports to check if there is any node with
4919 * fc_rports associated with it. If there is an fc_rport
4920 * associated with the node, then the node is either in
4921 * discovered state or its devloss_timer is pending.
4924 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
4926 struct lpfc_vport
**vports
;
4928 struct lpfc_nodelist
*ndlp
;
4929 struct Scsi_Host
*shost
;
4931 vports
= lpfc_create_vport_work_array(phba
);
4933 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4934 shost
= lpfc_shost_from_vport(vports
[i
]);
4935 spin_lock_irq(shost
->host_lock
);
4936 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4937 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
4938 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
4940 spin_unlock_irq(shost
->host_lock
);
4943 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
4944 "2624 RPI %x DID %x flg %x still "
4946 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4948 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
4952 spin_unlock_irq(shost
->host_lock
);
4955 lpfc_destroy_vport_work_array(phba
, vports
);
4960 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4961 * @phba: Pointer to hba context object.
4962 * @mboxq: Pointer to mailbox object.
4964 * This function frees memory associated with the mailbox command.
4967 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4969 struct lpfc_vport
*vport
= mboxq
->vport
;
4971 if (mboxq
->u
.mb
.mbxStatus
) {
4972 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4973 "2555 UNREG_VFI mbxStatus error x%x "
4975 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4977 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4982 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4983 * @phba: Pointer to hba context object.
4984 * @mboxq: Pointer to mailbox object.
4986 * This function frees memory associated with the mailbox command.
4989 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4991 struct lpfc_vport
*vport
= mboxq
->vport
;
4993 if (mboxq
->u
.mb
.mbxStatus
) {
4994 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4995 "2550 UNREG_FCFI mbxStatus error x%x "
4997 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4999 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5004 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5005 * @phba: Pointer to hba context object.
5007 * This function prepare the HBA for unregistering the currently registered
5008 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5012 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5015 struct lpfc_vport
**vports
;
5016 struct lpfc_nodelist
*ndlp
;
5017 struct Scsi_Host
*shost
;
5020 /* Unregister RPIs */
5021 if (lpfc_fcf_inuse(phba
))
5022 lpfc_unreg_hba_rpis(phba
);
5024 /* At this point, all discovery is aborted */
5025 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5027 /* Unregister VPIs */
5028 vports
= lpfc_create_vport_work_array(phba
);
5029 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5030 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5031 /* Stop FLOGI/FDISC retries */
5032 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5034 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5035 lpfc_cleanup_pending_mbox(vports
[i
]);
5036 lpfc_mbx_unreg_vpi(vports
[i
]);
5037 shost
= lpfc_shost_from_vport(vports
[i
]);
5038 spin_lock_irq(shost
->host_lock
);
5039 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5040 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5041 spin_unlock_irq(shost
->host_lock
);
5043 lpfc_destroy_vport_work_array(phba
, vports
);
5045 /* Cleanup any outstanding ELS commands */
5046 lpfc_els_flush_all_cmd(phba
);
5048 /* Unregister VFI */
5049 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5051 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5052 "2556 UNREG_VFI mbox allocation failed"
5053 "HBA state x%x\n", phba
->pport
->port_state
);
5057 lpfc_unreg_vfi(mbox
, phba
->pport
);
5058 mbox
->vport
= phba
->pport
;
5059 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
5061 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5062 if (rc
== MBX_NOT_FINISHED
) {
5063 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5064 "2557 UNREG_VFI issue mbox failed rc x%x "
5066 rc
, phba
->pport
->port_state
);
5067 mempool_free(mbox
, phba
->mbox_mem_pool
);
5071 shost
= lpfc_shost_from_vport(phba
->pport
);
5072 spin_lock_irq(shost
->host_lock
);
5073 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5074 spin_unlock_irq(shost
->host_lock
);
5080 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5081 * @phba: Pointer to hba context object.
5083 * This function issues synchronous unregister FCF mailbox command to HBA to
5084 * unregister the currently registered FCF record. The driver does not reset
5085 * the driver FCF usage state flags.
5087 * Return 0 if successfully issued, none-zero otherwise.
5090 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5095 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5097 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5098 "2551 UNREG_FCFI mbox allocation failed"
5099 "HBA state x%x\n", phba
->pport
->port_state
);
5102 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5103 mbox
->vport
= phba
->pport
;
5104 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5105 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5107 if (rc
== MBX_NOT_FINISHED
) {
5108 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5109 "2552 Unregister FCFI command failed rc x%x "
5111 rc
, phba
->pport
->port_state
);
5118 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5119 * @phba: Pointer to hba context object.
5121 * This function unregisters the currently reigstered FCF. This function
5122 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5125 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5129 /* Preparation for unregistering fcf */
5130 rc
= lpfc_unregister_fcf_prep(phba
);
5132 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5133 "2748 Failed to prepare for unregistering "
5134 "HBA's FCF record: rc=%d\n", rc
);
5138 /* Now, unregister FCF record and reset HBA FCF state */
5139 rc
= lpfc_sli4_unregister_fcf(phba
);
5142 /* Reset HBA FCF states after successful unregister FCF */
5143 phba
->fcf
.fcf_flag
= 0;
5144 phba
->fcf
.current_rec
.flag
= 0;
5147 * If driver is not unloading, check if there is any other
5148 * FCF record that can be used for discovery.
5150 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5151 (phba
->link_state
< LPFC_LINK_UP
))
5154 /* This is considered as the initial FCF discovery scan */
5155 spin_lock_irq(&phba
->hbalock
);
5156 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5157 spin_unlock_irq(&phba
->hbalock
);
5158 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5161 spin_lock_irq(&phba
->hbalock
);
5162 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5163 spin_unlock_irq(&phba
->hbalock
);
5164 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5165 "2553 lpfc_unregister_unused_fcf failed "
5166 "to read FCF record HBA state x%x\n",
5167 phba
->pport
->port_state
);
5172 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5173 * @phba: Pointer to hba context object.
5175 * This function just unregisters the currently reigstered FCF. It does not
5176 * try to find another FCF for discovery.
5179 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5183 /* Preparation for unregistering fcf */
5184 rc
= lpfc_unregister_fcf_prep(phba
);
5186 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5187 "2749 Failed to prepare for unregistering "
5188 "HBA's FCF record: rc=%d\n", rc
);
5192 /* Now, unregister FCF record and reset HBA FCF state */
5193 rc
= lpfc_sli4_unregister_fcf(phba
);
5196 /* Set proper HBA FCF states after successful unregister FCF */
5197 spin_lock_irq(&phba
->hbalock
);
5198 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
5199 spin_unlock_irq(&phba
->hbalock
);
5203 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5204 * @phba: Pointer to hba context object.
5206 * This function check if there are any connected remote port for the FCF and
5207 * if all the devices are disconnected, this function unregister FCFI.
5208 * This function also tries to use another FCF for discovery.
5211 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
5214 * If HBA is not running in FIP mode or if HBA does not support
5215 * FCoE or if FCF is not registered, do nothing.
5217 spin_lock_irq(&phba
->hbalock
);
5218 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
5219 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
5220 !(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
5221 spin_unlock_irq(&phba
->hbalock
);
5224 spin_unlock_irq(&phba
->hbalock
);
5226 if (lpfc_fcf_inuse(phba
))
5229 lpfc_unregister_fcf_rescan(phba
);
5233 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
5234 * @phba: Pointer to hba context object.
5235 * @buff: Buffer containing the FCF connection table as in the config
5237 * This function create driver data structure for the FCF connection
5238 * record table read from config region 23.
5241 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
5244 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5245 struct lpfc_fcf_conn_hdr
*conn_hdr
;
5246 struct lpfc_fcf_conn_rec
*conn_rec
;
5247 uint32_t record_count
;
5250 /* Free the current connect table */
5251 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5252 &phba
->fcf_conn_rec_list
, list
) {
5253 list_del_init(&conn_entry
->list
);
5257 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
5258 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
5259 sizeof(struct lpfc_fcf_conn_rec
);
5261 conn_rec
= (struct lpfc_fcf_conn_rec
*)
5262 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
5264 for (i
= 0; i
< record_count
; i
++) {
5265 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
5267 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
5270 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5271 "2566 Failed to allocate connection"
5276 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
5277 sizeof(struct lpfc_fcf_conn_rec
));
5278 conn_entry
->conn_rec
.vlan_tag
=
5279 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
5280 conn_entry
->conn_rec
.flags
=
5281 le16_to_cpu(conn_entry
->conn_rec
.flags
);
5282 list_add_tail(&conn_entry
->list
,
5283 &phba
->fcf_conn_rec_list
);
5288 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
5289 * @phba: Pointer to hba context object.
5290 * @buff: Buffer containing the FCoE parameter data structure.
5292 * This function update driver data structure with config
5293 * parameters read from config region 23.
5296 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
5299 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
5300 struct lpfc_fcoe_params
*fcoe_param
;
5302 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
5304 fcoe_param
= (struct lpfc_fcoe_params
*)
5305 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
5307 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
5308 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
5311 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
5312 phba
->valid_vlan
= 1;
5313 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
5317 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
5318 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
5319 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
5324 * lpfc_get_rec_conf23 - Get a record type in config region data.
5325 * @buff: Buffer containing config region 23 data.
5326 * @size: Size of the data buffer.
5327 * @rec_type: Record type to be searched.
5329 * This function searches config region data to find the begining
5330 * of the record specified by record_type. If record found, this
5331 * function return pointer to the record else return NULL.
5334 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
5336 uint32_t offset
= 0, rec_length
;
5338 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
5339 (size
< sizeof(uint32_t)))
5342 rec_length
= buff
[offset
+ 1];
5345 * One TLV record has one word header and number of data words
5346 * specified in the rec_length field of the record header.
5348 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
5350 if (buff
[offset
] == rec_type
)
5351 return &buff
[offset
];
5353 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
5356 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
5357 rec_length
= buff
[offset
+ 1];
5363 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
5364 * @phba: Pointer to lpfc_hba data structure.
5365 * @buff: Buffer containing config region 23 data.
5366 * @size: Size of the data buffer.
5368 * This fuction parse the FCoE config parameters in config region 23 and
5369 * populate driver data structure with the parameters.
5372 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
5376 uint32_t offset
= 0, rec_length
;
5380 * If data size is less than 2 words signature and version cannot be
5383 if (size
< 2*sizeof(uint32_t))
5386 /* Check the region signature first */
5387 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
5388 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5389 "2567 Config region 23 has bad signature\n");
5395 /* Check the data structure version */
5396 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
5397 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5398 "2568 Config region 23 has bad version\n");
5403 rec_length
= buff
[offset
+ 1];
5405 /* Read FCoE param record */
5406 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5407 size
- offset
, FCOE_PARAM_TYPE
);
5409 lpfc_read_fcoe_param(phba
, rec_ptr
);
5411 /* Read FCF connection table */
5412 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5413 size
- offset
, FCOE_CONN_TBL_TYPE
);
5415 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);