1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/slab.h>
24 #include <linux/pci.h>
25 #include <linux/kthread.h>
26 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
36 #include "lpfc_disc.h"
38 #include "lpfc_sli4.h"
39 #include "lpfc_scsi.h"
41 #include "lpfc_logmsg.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_vport.h"
44 #include "lpfc_debugfs.h"
46 /* AlpaArray for assignment of scsid for scan-down and bind_method */
47 static uint8_t lpfcAlpaArray
[] = {
48 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
49 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
50 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
51 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
52 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
53 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
54 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
55 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
56 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
57 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
58 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
59 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
60 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
63 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
64 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
65 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
68 lpfc_terminate_rport_io(struct fc_rport
*rport
)
70 struct lpfc_rport_data
*rdata
;
71 struct lpfc_nodelist
* ndlp
;
72 struct lpfc_hba
*phba
;
74 rdata
= rport
->dd_data
;
77 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
78 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
79 printk(KERN_ERR
"Cannot find remote node"
80 " to terminate I/O Data x%x\n",
87 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
88 "rport terminate: sid:x%x did:x%x flg:x%x",
89 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
91 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
92 lpfc_sli_abort_iocb(ndlp
->vport
,
93 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
94 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
99 * This function will be called when dev_loss_tmo fire.
102 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
104 struct lpfc_rport_data
*rdata
;
105 struct lpfc_nodelist
* ndlp
;
106 struct lpfc_vport
*vport
;
107 struct lpfc_hba
*phba
;
108 struct lpfc_work_evt
*evtp
;
112 rdata
= rport
->dd_data
;
114 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
120 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
121 "rport devlosscb: sid:x%x did:x%x flg:x%x",
122 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
124 /* Don't defer this if we are in the process of deleting the vport
125 * or unloading the driver. The unload will cleanup the node
126 * appropriately we just need to cleanup the ndlp rport info here.
128 if (vport
->load_flag
& FC_UNLOADING
) {
129 put_node
= rdata
->pnode
!= NULL
;
130 put_rport
= ndlp
->rport
!= NULL
;
136 put_device(&rport
->dev
);
140 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
143 evtp
= &ndlp
->dev_loss_evt
;
145 if (!list_empty(&evtp
->evt_listp
))
148 spin_lock_irq(&phba
->hbalock
);
149 /* We need to hold the node by incrementing the reference
150 * count until this queued work is done
152 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
153 if (evtp
->evt_arg1
) {
154 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
155 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
156 lpfc_worker_wake_up(phba
);
158 spin_unlock_irq(&phba
->hbalock
);
164 * This function is called from the worker thread when dev_loss_tmo
168 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
170 struct lpfc_rport_data
*rdata
;
171 struct fc_rport
*rport
;
172 struct lpfc_vport
*vport
;
173 struct lpfc_hba
*phba
;
184 rdata
= rport
->dd_data
;
185 name
= (uint8_t *) &ndlp
->nlp_portname
;
189 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
190 "rport devlosstmo:did:x%x type:x%x id:x%x",
191 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
193 /* Don't defer this if we are in the process of deleting the vport
194 * or unloading the driver. The unload will cleanup the node
195 * appropriately we just need to cleanup the ndlp rport info here.
197 if (vport
->load_flag
& FC_UNLOADING
) {
198 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
199 /* flush the target */
200 lpfc_sli_abort_iocb(vport
,
201 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
202 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
204 put_node
= rdata
->pnode
!= NULL
;
205 put_rport
= ndlp
->rport
!= NULL
;
211 put_device(&rport
->dev
);
215 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
216 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
217 "0284 Devloss timeout Ignored on "
218 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
220 *name
, *(name
+1), *(name
+2), *(name
+3),
221 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
226 if (ndlp
->nlp_type
& NLP_FABRIC
) {
227 /* We will clean up these Nodes in linkup */
228 put_node
= rdata
->pnode
!= NULL
;
229 put_rport
= ndlp
->rport
!= NULL
;
235 put_device(&rport
->dev
);
239 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
241 /* flush the target */
242 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
243 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
247 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
248 "0203 Devloss timeout on "
249 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
250 "NPort x%06x Data: x%x x%x x%x\n",
251 *name
, *(name
+1), *(name
+2), *(name
+3),
252 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
253 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
254 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
256 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
257 "0204 Devloss timeout on "
258 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
259 "NPort x%06x Data: x%x x%x x%x\n",
260 *name
, *(name
+1), *(name
+2), *(name
+3),
261 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
262 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
263 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
266 put_node
= rdata
->pnode
!= NULL
;
267 put_rport
= ndlp
->rport
!= NULL
;
273 put_device(&rport
->dev
);
275 if (!(vport
->load_flag
& FC_UNLOADING
) &&
276 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
277 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
278 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
279 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
280 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
281 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
283 lpfc_unregister_unused_fcf(phba
);
287 * lpfc_alloc_fast_evt - Allocates data structure for posting event
288 * @phba: Pointer to hba context object.
290 * This function is called from the functions which need to post
291 * events from interrupt context. This function allocates data
292 * structure required for posting event. It also keeps track of
293 * number of events pending and prevent event storm when there are
296 struct lpfc_fast_path_event
*
297 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
298 struct lpfc_fast_path_event
*ret
;
300 /* If there are lot of fast event do not exhaust memory due to this */
301 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
304 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
307 atomic_inc(&phba
->fast_event_count
);
308 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
309 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
315 * lpfc_free_fast_evt - Frees event data structure
316 * @phba: Pointer to hba context object.
317 * @evt: Event object which need to be freed.
319 * This function frees the data structure required for posting
323 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
324 struct lpfc_fast_path_event
*evt
) {
326 atomic_dec(&phba
->fast_event_count
);
331 * lpfc_send_fastpath_evt - Posts events generated from fast path
332 * @phba: Pointer to hba context object.
333 * @evtp: Event data structure.
335 * This function is called from worker thread, when the interrupt
336 * context need to post an event. This function posts the event
337 * to fc transport netlink interface.
340 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
341 struct lpfc_work_evt
*evtp
)
343 unsigned long evt_category
, evt_sub_category
;
344 struct lpfc_fast_path_event
*fast_evt_data
;
346 uint32_t evt_data_size
;
347 struct Scsi_Host
*shost
;
349 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
352 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
353 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
354 fabric_evt
.subcategory
;
355 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
356 if (evt_category
== FC_REG_FABRIC_EVENT
) {
357 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
358 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
359 evt_data_size
= sizeof(fast_evt_data
->un
.
361 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
362 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
363 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
364 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
366 lpfc_free_fast_evt(phba
, fast_evt_data
);
369 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
370 switch (evt_sub_category
) {
371 case LPFC_EVENT_QFULL
:
372 case LPFC_EVENT_DEVBSY
:
373 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
374 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
376 case LPFC_EVENT_CHECK_COND
:
377 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
378 evt_data_size
= sizeof(fast_evt_data
->un
.
381 case LPFC_EVENT_VARQUEDEPTH
:
382 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
383 evt_data_size
= sizeof(fast_evt_data
->un
.
387 lpfc_free_fast_evt(phba
, fast_evt_data
);
391 lpfc_free_fast_evt(phba
, fast_evt_data
);
395 fc_host_post_vendor_event(shost
,
396 fc_get_event_number(),
401 lpfc_free_fast_evt(phba
, fast_evt_data
);
406 lpfc_work_list_done(struct lpfc_hba
*phba
)
408 struct lpfc_work_evt
*evtp
= NULL
;
409 struct lpfc_nodelist
*ndlp
;
412 spin_lock_irq(&phba
->hbalock
);
413 while (!list_empty(&phba
->work_list
)) {
414 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
416 spin_unlock_irq(&phba
->hbalock
);
419 case LPFC_EVT_ELS_RETRY
:
420 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
421 lpfc_els_retry_delay_handler(ndlp
);
422 free_evt
= 0; /* evt is part of ndlp */
423 /* decrement the node reference count held
424 * for this queued work
428 case LPFC_EVT_DEV_LOSS
:
429 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
430 lpfc_dev_loss_tmo_handler(ndlp
);
432 /* decrement the node reference count held for
437 case LPFC_EVT_ONLINE
:
438 if (phba
->link_state
< LPFC_LINK_DOWN
)
439 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
441 *(int *) (evtp
->evt_arg1
) = 0;
442 complete((struct completion
*)(evtp
->evt_arg2
));
444 case LPFC_EVT_OFFLINE_PREP
:
445 if (phba
->link_state
>= LPFC_LINK_DOWN
)
446 lpfc_offline_prep(phba
);
447 *(int *)(evtp
->evt_arg1
) = 0;
448 complete((struct completion
*)(evtp
->evt_arg2
));
450 case LPFC_EVT_OFFLINE
:
452 lpfc_sli_brdrestart(phba
);
453 *(int *)(evtp
->evt_arg1
) =
454 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
455 lpfc_unblock_mgmt_io(phba
);
456 complete((struct completion
*)(evtp
->evt_arg2
));
458 case LPFC_EVT_WARM_START
:
460 lpfc_reset_barrier(phba
);
461 lpfc_sli_brdreset(phba
);
462 lpfc_hba_down_post(phba
);
463 *(int *)(evtp
->evt_arg1
) =
464 lpfc_sli_brdready(phba
, HS_MBRDY
);
465 lpfc_unblock_mgmt_io(phba
);
466 complete((struct completion
*)(evtp
->evt_arg2
));
470 *(int *)(evtp
->evt_arg1
)
471 = (phba
->pport
->stopped
)
472 ? 0 : lpfc_sli_brdkill(phba
);
473 lpfc_unblock_mgmt_io(phba
);
474 complete((struct completion
*)(evtp
->evt_arg2
));
476 case LPFC_EVT_FASTPATH_MGMT_EVT
:
477 lpfc_send_fastpath_evt(phba
, evtp
);
480 case LPFC_EVT_RESET_HBA
:
481 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
482 lpfc_reset_hba(phba
);
487 spin_lock_irq(&phba
->hbalock
);
489 spin_unlock_irq(&phba
->hbalock
);
494 lpfc_work_done(struct lpfc_hba
*phba
)
496 struct lpfc_sli_ring
*pring
;
497 uint32_t ha_copy
, status
, control
, work_port_events
;
498 struct lpfc_vport
**vports
;
499 struct lpfc_vport
*vport
;
502 spin_lock_irq(&phba
->hbalock
);
503 ha_copy
= phba
->work_ha
;
505 spin_unlock_irq(&phba
->hbalock
);
507 /* First, try to post the next mailbox command to SLI4 device */
508 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
509 lpfc_sli4_post_async_mbox(phba
);
511 if (ha_copy
& HA_ERATT
)
512 /* Handle the error attention event */
513 lpfc_handle_eratt(phba
);
515 if (ha_copy
& HA_MBATT
)
516 lpfc_sli_handle_mb_event(phba
);
518 if (ha_copy
& HA_LATT
)
519 lpfc_handle_latt(phba
);
521 /* Process SLI4 events */
522 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
523 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
524 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
525 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
526 lpfc_sli4_els_xri_abort_event_proc(phba
);
527 if (phba
->hba_flag
& ASYNC_EVENT
)
528 lpfc_sli4_async_event_proc(phba
);
529 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
530 spin_lock_irq(&phba
->hbalock
);
531 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
532 spin_unlock_irq(&phba
->hbalock
);
533 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
535 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
536 lpfc_sli4_fcf_redisc_event_proc(phba
);
539 vports
= lpfc_create_vport_work_array(phba
);
541 for (i
= 0; i
<= phba
->max_vports
; i
++) {
543 * We could have no vports in array if unloading, so if
544 * this happens then just use the pport
546 if (vports
[i
] == NULL
&& i
== 0)
552 spin_lock_irq(&vport
->work_port_lock
);
553 work_port_events
= vport
->work_port_events
;
554 vport
->work_port_events
&= ~work_port_events
;
555 spin_unlock_irq(&vport
->work_port_lock
);
556 if (work_port_events
& WORKER_DISC_TMO
)
557 lpfc_disc_timeout_handler(vport
);
558 if (work_port_events
& WORKER_ELS_TMO
)
559 lpfc_els_timeout_handler(vport
);
560 if (work_port_events
& WORKER_HB_TMO
)
561 lpfc_hb_timeout_handler(phba
);
562 if (work_port_events
& WORKER_MBOX_TMO
)
563 lpfc_mbox_timeout_handler(phba
);
564 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
565 lpfc_unblock_fabric_iocbs(phba
);
566 if (work_port_events
& WORKER_FDMI_TMO
)
567 lpfc_fdmi_timeout_handler(vport
);
568 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
569 lpfc_ramp_down_queue_handler(phba
);
570 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
571 lpfc_ramp_up_queue_handler(phba
);
573 lpfc_destroy_vport_work_array(phba
, vports
);
575 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
576 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
577 status
>>= (4*LPFC_ELS_RING
);
578 if ((status
& HA_RXMASK
) ||
579 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
580 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
581 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
582 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
583 /* Set the lpfc data pending flag */
584 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
586 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
587 lpfc_sli_handle_slow_ring_event(phba
, pring
,
592 lpfc_drain_txq(phba
);
594 * Turn on Ring interrupts
596 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
597 spin_lock_irq(&phba
->hbalock
);
598 control
= readl(phba
->HCregaddr
);
599 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
600 lpfc_debugfs_slow_ring_trc(phba
,
601 "WRK Enable ring: cntl:x%x hacopy:x%x",
602 control
, ha_copy
, 0);
604 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
605 writel(control
, phba
->HCregaddr
);
606 readl(phba
->HCregaddr
); /* flush */
608 lpfc_debugfs_slow_ring_trc(phba
,
609 "WRK Ring ok: cntl:x%x hacopy:x%x",
610 control
, ha_copy
, 0);
612 spin_unlock_irq(&phba
->hbalock
);
615 lpfc_work_list_done(phba
);
619 lpfc_do_work(void *p
)
621 struct lpfc_hba
*phba
= p
;
624 set_user_nice(current
, -20);
625 phba
->data_flags
= 0;
627 while (!kthread_should_stop()) {
628 /* wait and check worker queue activities */
629 rc
= wait_event_interruptible(phba
->work_waitq
,
630 (test_and_clear_bit(LPFC_DATA_READY
,
632 || kthread_should_stop()));
633 /* Signal wakeup shall terminate the worker thread */
635 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
636 "0433 Wakeup on signal: rc=x%x\n", rc
);
640 /* Attend pending lpfc data processing */
641 lpfc_work_done(phba
);
643 phba
->worker_thread
= NULL
;
644 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
645 "0432 Worker thread stopped.\n");
650 * This is only called to handle FC worker events. Since this a rare
651 * occurance, we allocate a struct lpfc_work_evt structure here instead of
652 * embedding it in the IOCB.
655 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
658 struct lpfc_work_evt
*evtp
;
662 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
663 * be queued to worker thread for processing
665 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
669 evtp
->evt_arg1
= arg1
;
670 evtp
->evt_arg2
= arg2
;
673 spin_lock_irqsave(&phba
->hbalock
, flags
);
674 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
675 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
677 lpfc_worker_wake_up(phba
);
683 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
685 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
686 struct lpfc_hba
*phba
= vport
->phba
;
687 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
690 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
691 if (!NLP_CHK_NODE_ACT(ndlp
))
693 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
695 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
696 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
697 (ndlp
->nlp_DID
== NameServer_DID
)))
698 lpfc_unreg_rpi(vport
, ndlp
);
700 /* Leave Fabric nodes alone on link down */
701 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
702 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
704 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
707 : NLP_EVT_DEVICE_RECOVERY
);
709 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
710 lpfc_mbx_unreg_vpi(vport
);
711 spin_lock_irq(shost
->host_lock
);
712 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
713 spin_unlock_irq(shost
->host_lock
);
718 lpfc_port_link_failure(struct lpfc_vport
*vport
)
720 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
722 /* Cleanup any outstanding received buffers */
723 lpfc_cleanup_rcv_buffers(vport
);
725 /* Cleanup any outstanding RSCN activity */
726 lpfc_els_flush_rscn(vport
);
728 /* Cleanup any outstanding ELS commands */
729 lpfc_els_flush_cmd(vport
);
731 lpfc_cleanup_rpis(vport
, 0);
733 /* Turn off discovery timer if its running */
734 lpfc_can_disctmo(vport
);
738 lpfc_linkdown_port(struct lpfc_vport
*vport
)
740 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
742 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
744 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
745 "Link Down: state:x%x rtry:x%x flg:x%x",
746 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
748 lpfc_port_link_failure(vport
);
753 lpfc_linkdown(struct lpfc_hba
*phba
)
755 struct lpfc_vport
*vport
= phba
->pport
;
756 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
757 struct lpfc_vport
**vports
;
761 if (phba
->link_state
== LPFC_LINK_DOWN
)
764 /* Block all SCSI stack I/Os */
765 lpfc_scsi_dev_block(phba
);
767 spin_lock_irq(&phba
->hbalock
);
768 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
769 spin_unlock_irq(&phba
->hbalock
);
770 if (phba
->link_state
> LPFC_LINK_DOWN
) {
771 phba
->link_state
= LPFC_LINK_DOWN
;
772 spin_lock_irq(shost
->host_lock
);
773 phba
->pport
->fc_flag
&= ~FC_LBIT
;
774 spin_unlock_irq(shost
->host_lock
);
776 vports
= lpfc_create_vport_work_array(phba
);
778 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
779 /* Issue a LINK DOWN event to all nodes */
780 lpfc_linkdown_port(vports
[i
]);
782 lpfc_destroy_vport_work_array(phba
, vports
);
783 /* Clean up any firmware default rpi's */
784 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
786 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
788 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
789 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
790 == MBX_NOT_FINISHED
) {
791 mempool_free(mb
, phba
->mbox_mem_pool
);
795 /* Setup myDID for link up if we are in pt2pt mode */
796 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
797 phba
->pport
->fc_myDID
= 0;
798 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
800 lpfc_config_link(phba
, mb
);
801 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
803 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
804 == MBX_NOT_FINISHED
) {
805 mempool_free(mb
, phba
->mbox_mem_pool
);
808 spin_lock_irq(shost
->host_lock
);
809 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
810 spin_unlock_irq(shost
->host_lock
);
817 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
819 struct lpfc_nodelist
*ndlp
;
821 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
822 if (!NLP_CHK_NODE_ACT(ndlp
))
824 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
826 if (ndlp
->nlp_type
& NLP_FABRIC
) {
827 /* On Linkup its safe to clean up the ndlp
828 * from Fabric connections.
830 if (ndlp
->nlp_DID
!= Fabric_DID
)
831 lpfc_unreg_rpi(vport
, ndlp
);
832 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
833 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
834 /* Fail outstanding IO now since device is
837 lpfc_unreg_rpi(vport
, ndlp
);
843 lpfc_linkup_port(struct lpfc_vport
*vport
)
845 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
846 struct lpfc_hba
*phba
= vport
->phba
;
848 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
851 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
852 "Link Up: top:x%x speed:x%x flg:x%x",
853 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
855 /* If NPIV is not enabled, only bring the physical port up */
856 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
857 (vport
!= phba
->pport
))
860 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
862 spin_lock_irq(shost
->host_lock
);
863 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
864 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
865 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
866 vport
->fc_ns_retry
= 0;
867 spin_unlock_irq(shost
->host_lock
);
869 if (vport
->fc_flag
& FC_LBIT
)
870 lpfc_linkup_cleanup_nodes(vport
);
875 lpfc_linkup(struct lpfc_hba
*phba
)
877 struct lpfc_vport
**vports
;
880 phba
->link_state
= LPFC_LINK_UP
;
882 /* Unblock fabric iocbs if they are blocked */
883 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
884 del_timer_sync(&phba
->fabric_block_timer
);
886 vports
= lpfc_create_vport_work_array(phba
);
888 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
889 lpfc_linkup_port(vports
[i
]);
890 lpfc_destroy_vport_work_array(phba
, vports
);
891 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
892 (phba
->sli_rev
< LPFC_SLI_REV4
))
893 lpfc_issue_clear_la(phba
, phba
->pport
);
899 * This routine handles processing a CLEAR_LA mailbox
900 * command upon completion. It is setup in the LPFC_MBOXQ
901 * as the completion routine when the command is
902 * handed off to the SLI layer.
905 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
907 struct lpfc_vport
*vport
= pmb
->vport
;
908 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
909 struct lpfc_sli
*psli
= &phba
->sli
;
910 MAILBOX_t
*mb
= &pmb
->u
.mb
;
913 /* Since we don't do discovery right now, turn these off here */
914 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
915 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
916 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
918 /* Check for error */
919 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
920 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
921 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
922 "0320 CLEAR_LA mbxStatus error x%x hba "
924 mb
->mbxStatus
, vport
->port_state
);
925 phba
->link_state
= LPFC_HBA_ERROR
;
929 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
930 phba
->link_state
= LPFC_HBA_READY
;
932 spin_lock_irq(&phba
->hbalock
);
933 psli
->sli_flag
|= LPFC_PROCESS_LA
;
934 control
= readl(phba
->HCregaddr
);
935 control
|= HC_LAINT_ENA
;
936 writel(control
, phba
->HCregaddr
);
937 readl(phba
->HCregaddr
); /* flush */
938 spin_unlock_irq(&phba
->hbalock
);
939 mempool_free(pmb
, phba
->mbox_mem_pool
);
943 /* Device Discovery completes */
944 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
945 "0225 Device Discovery completes\n");
946 mempool_free(pmb
, phba
->mbox_mem_pool
);
948 spin_lock_irq(shost
->host_lock
);
949 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
950 spin_unlock_irq(shost
->host_lock
);
952 lpfc_can_disctmo(vport
);
954 /* turn on Link Attention interrupts */
956 spin_lock_irq(&phba
->hbalock
);
957 psli
->sli_flag
|= LPFC_PROCESS_LA
;
958 control
= readl(phba
->HCregaddr
);
959 control
|= HC_LAINT_ENA
;
960 writel(control
, phba
->HCregaddr
);
961 readl(phba
->HCregaddr
); /* flush */
962 spin_unlock_irq(&phba
->hbalock
);
969 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
971 struct lpfc_vport
*vport
= pmb
->vport
;
973 if (pmb
->u
.mb
.mbxStatus
)
976 mempool_free(pmb
, phba
->mbox_mem_pool
);
978 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
979 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
980 !(vport
->fc_flag
& FC_LBIT
)) {
981 /* Need to wait for FAN - use discovery timer
982 * for timeout. port_state is identically
983 * LPFC_LOCAL_CFG_LINK while waiting for FAN
985 lpfc_set_disctmo(vport
);
989 /* Start discovery by sending a FLOGI. port_state is identically
990 * LPFC_FLOGI while waiting for FLOGI cmpl
992 if (vport
->port_state
!= LPFC_FLOGI
) {
993 lpfc_initial_flogi(vport
);
998 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
999 "0306 CONFIG_LINK mbxStatus error x%x "
1001 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1002 mempool_free(pmb
, phba
->mbox_mem_pool
);
1004 lpfc_linkdown(phba
);
1006 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1007 "0200 CONFIG_LINK bad hba state x%x\n",
1010 lpfc_issue_clear_la(phba
, vport
);
1015 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1017 struct lpfc_vport
*vport
= mboxq
->vport
;
1018 unsigned long flags
;
1020 if (mboxq
->u
.mb
.mbxStatus
) {
1021 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1022 "2017 REG_FCFI mbxStatus error x%x "
1024 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1025 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1029 /* Start FCoE discovery by sending a FLOGI. */
1030 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1031 /* Set the FCFI registered flag */
1032 spin_lock_irqsave(&phba
->hbalock
, flags
);
1033 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1034 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1035 /* If there is a pending FCoE event, restart FCF table scan. */
1036 if (lpfc_check_pending_fcoe_event(phba
, 1)) {
1037 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1040 spin_lock_irqsave(&phba
->hbalock
, flags
);
1041 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1042 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1043 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1044 if (vport
->port_state
!= LPFC_FLOGI
)
1045 lpfc_initial_flogi(vport
);
1047 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1052 * lpfc_fab_name_match - Check if the fcf fabric name match.
1053 * @fab_name: pointer to fabric name.
1054 * @new_fcf_record: pointer to fcf record.
1056 * This routine compare the fcf record's fabric name with provided
1057 * fabric name. If the fabric name are identical this function
1058 * returns 1 else return 0.
1061 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1063 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1065 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1067 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1069 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1071 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1073 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1075 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1077 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1083 * lpfc_sw_name_match - Check if the fcf switch name match.
1084 * @fab_name: pointer to fabric name.
1085 * @new_fcf_record: pointer to fcf record.
1087 * This routine compare the fcf record's switch name with provided
1088 * switch name. If the switch name are identical this function
1089 * returns 1 else return 0.
1092 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1094 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1096 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1098 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1100 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1102 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1104 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1106 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1108 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1114 * lpfc_mac_addr_match - Check if the fcf mac address match.
1115 * @mac_addr: pointer to mac address.
1116 * @new_fcf_record: pointer to fcf record.
1118 * This routine compare the fcf record's mac address with HBA's
1119 * FCF mac address. If the mac addresses are identical this function
1120 * returns 1 else return 0.
1123 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1125 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1127 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1129 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1131 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1133 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1135 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1141 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1143 return (curr_vlan_id
== new_vlan_id
);
1147 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1148 * @fcf: pointer to driver fcf record.
1149 * @new_fcf_record: pointer to fcf record.
1151 * This routine copies the FCF information from the FCF
1152 * record to lpfc_hba data structure.
1155 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1156 struct fcf_record
*new_fcf_record
)
1159 fcf_rec
->fabric_name
[0] =
1160 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1161 fcf_rec
->fabric_name
[1] =
1162 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1163 fcf_rec
->fabric_name
[2] =
1164 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1165 fcf_rec
->fabric_name
[3] =
1166 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1167 fcf_rec
->fabric_name
[4] =
1168 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1169 fcf_rec
->fabric_name
[5] =
1170 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1171 fcf_rec
->fabric_name
[6] =
1172 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1173 fcf_rec
->fabric_name
[7] =
1174 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1176 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1177 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1178 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1179 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1180 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1181 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1182 /* FCF record index */
1183 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1184 /* FCF record priority */
1185 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1187 fcf_rec
->switch_name
[0] =
1188 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1189 fcf_rec
->switch_name
[1] =
1190 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1191 fcf_rec
->switch_name
[2] =
1192 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1193 fcf_rec
->switch_name
[3] =
1194 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1195 fcf_rec
->switch_name
[4] =
1196 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1197 fcf_rec
->switch_name
[5] =
1198 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1199 fcf_rec
->switch_name
[6] =
1200 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1201 fcf_rec
->switch_name
[7] =
1202 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1206 * lpfc_update_fcf_record - Update driver fcf record
1207 * @phba: pointer to lpfc hba data structure.
1208 * @fcf_rec: pointer to driver fcf record.
1209 * @new_fcf_record: pointer to hba fcf record.
1210 * @addr_mode: address mode to be set to the driver fcf record.
1211 * @vlan_id: vlan tag to be set to the driver fcf record.
1212 * @flag: flag bits to be set to the driver fcf record.
1214 * This routine updates the driver FCF record from the new HBA FCF record
1215 * together with the address mode, vlan_id, and other informations. This
1216 * routine is called with the host lock held.
1219 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1220 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1221 uint16_t vlan_id
, uint32_t flag
)
1223 /* Copy the fields from the HBA's FCF record */
1224 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1225 /* Update other fields of driver FCF record */
1226 fcf_rec
->addr_mode
= addr_mode
;
1227 fcf_rec
->vlan_id
= vlan_id
;
1228 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1232 * lpfc_register_fcf - Register the FCF with hba.
1233 * @phba: pointer to lpfc hba data structure.
1235 * This routine issues a register fcfi mailbox command to register
1239 lpfc_register_fcf(struct lpfc_hba
*phba
)
1241 LPFC_MBOXQ_t
*fcf_mbxq
;
1243 unsigned long flags
;
1245 spin_lock_irqsave(&phba
->hbalock
, flags
);
1247 /* If the FCF is not availabe do nothing. */
1248 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1249 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1250 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1254 /* The FCF is already registered, start discovery */
1255 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1256 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1257 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1258 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1259 if (phba
->pport
->port_state
!= LPFC_FLOGI
)
1260 lpfc_initial_flogi(phba
->pport
);
1263 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1265 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
,
1268 spin_lock_irqsave(&phba
->hbalock
, flags
);
1269 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1270 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1274 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1275 fcf_mbxq
->vport
= phba
->pport
;
1276 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1277 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1278 if (rc
== MBX_NOT_FINISHED
) {
1279 spin_lock_irqsave(&phba
->hbalock
, flags
);
1280 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1281 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1282 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1289 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1290 * @phba: pointer to lpfc hba data structure.
1291 * @new_fcf_record: pointer to fcf record.
1292 * @boot_flag: Indicates if this record used by boot bios.
1293 * @addr_mode: The address mode to be used by this FCF
1294 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1296 * This routine compare the fcf record with connect list obtained from the
1297 * config region to decide if this FCF can be used for SAN discovery. It returns
1298 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1299 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1300 * is used by boot bios and addr_mode will indicate the addressing mode to be
1301 * used for this FCF when the function returns.
1302 * If the FCF record need to be used with a particular vlan id, the vlan is
1303 * set in the vlan_id on return of the function. If not VLAN tagging need to
1304 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1307 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1308 struct fcf_record
*new_fcf_record
,
1309 uint32_t *boot_flag
, uint32_t *addr_mode
,
1312 struct lpfc_fcf_conn_entry
*conn_entry
;
1313 int i
, j
, fcf_vlan_id
= 0;
1315 /* Find the lowest VLAN id in the FCF record */
1316 for (i
= 0; i
< 512; i
++) {
1317 if (new_fcf_record
->vlan_bitmap
[i
]) {
1318 fcf_vlan_id
= i
* 8;
1320 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1328 /* If FCF not available return 0 */
1329 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1330 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1333 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1335 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1337 if (phba
->valid_vlan
)
1338 *vlan_id
= phba
->vlan_id
;
1340 *vlan_id
= LPFC_FCOE_NULL_VID
;
1345 * If there are no FCF connection table entry, driver connect to all
1348 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1350 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1354 * When there are no FCF connect entries, use driver's default
1355 * addressing mode - FPMA.
1357 if (*addr_mode
& LPFC_FCF_FPMA
)
1358 *addr_mode
= LPFC_FCF_FPMA
;
1360 /* If FCF record report a vlan id use that vlan id */
1362 *vlan_id
= fcf_vlan_id
;
1364 *vlan_id
= LPFC_FCOE_NULL_VID
;
1368 list_for_each_entry(conn_entry
,
1369 &phba
->fcf_conn_rec_list
, list
) {
1370 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1373 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1374 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1377 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1378 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1381 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1383 * If the vlan bit map does not have the bit set for the
1384 * vlan id to be used, then it is not a match.
1386 if (!(new_fcf_record
->vlan_bitmap
1387 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1388 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1393 * If connection record does not support any addressing mode,
1394 * skip the FCF record.
1396 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1397 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1401 * Check if the connection record specifies a required
1404 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1405 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1408 * If SPMA required but FCF not support this continue.
1410 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1411 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1412 new_fcf_record
) & LPFC_FCF_SPMA
))
1416 * If FPMA required but FCF not support this continue.
1418 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1419 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1420 new_fcf_record
) & LPFC_FCF_FPMA
))
1425 * This fcf record matches filtering criteria.
1427 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1433 * If user did not specify any addressing mode, or if the
1434 * prefered addressing mode specified by user is not supported
1435 * by FCF, allow fabric to pick the addressing mode.
1437 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1440 * If the user specified a required address mode, assign that
1443 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1444 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1445 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1447 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1449 * If the user specified a prefered address mode, use the
1450 * addr mode only if FCF support the addr_mode.
1452 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1453 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1454 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1455 (*addr_mode
& LPFC_FCF_SPMA
))
1456 *addr_mode
= LPFC_FCF_SPMA
;
1457 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1458 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1459 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1460 (*addr_mode
& LPFC_FCF_FPMA
))
1461 *addr_mode
= LPFC_FCF_FPMA
;
1463 /* If matching connect list has a vlan id, use it */
1464 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1465 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1467 * If no vlan id is specified in connect list, use the vlan id
1470 else if (fcf_vlan_id
)
1471 *vlan_id
= fcf_vlan_id
;
1473 *vlan_id
= LPFC_FCOE_NULL_VID
;
1482 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1483 * @phba: pointer to lpfc hba data structure.
1484 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1486 * This function check if there is any fcoe event pending while driver
1487 * scan FCF entries. If there is any pending event, it will restart the
1488 * FCF saning and return 1 else return 0.
1491 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1494 * If the Link is up and no FCoE events while in the
1495 * FCF discovery, no need to restart FCF discovery.
1497 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1498 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1501 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1502 "2768 Pending link or FCF event during current "
1503 "handling of the previous event: link_state:x%x, "
1504 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1505 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1506 phba
->fcoe_eventtag
);
1508 spin_lock_irq(&phba
->hbalock
);
1509 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1510 spin_unlock_irq(&phba
->hbalock
);
1512 if (phba
->link_state
>= LPFC_LINK_UP
) {
1513 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1514 "2780 Restart FCF table scan due to "
1515 "pending FCF event:evt_tag_at_scan:x%x, "
1516 "evt_tag_current:x%x\n",
1517 phba
->fcoe_eventtag_at_fcf_scan
,
1518 phba
->fcoe_eventtag
);
1519 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1522 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1525 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1526 "2833 Stop FCF discovery process due to link "
1527 "state change (x%x)\n", phba
->link_state
);
1528 spin_lock_irq(&phba
->hbalock
);
1529 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1530 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1531 spin_unlock_irq(&phba
->hbalock
);
1534 /* Unregister the currently registered FCF if required */
1536 spin_lock_irq(&phba
->hbalock
);
1537 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1538 spin_unlock_irq(&phba
->hbalock
);
1539 lpfc_sli4_unregister_fcf(phba
);
1545 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1546 * @phba: pointer to lpfc hba data structure.
1547 * @fcf_cnt: number of eligible fcf record seen so far.
1549 * This function makes an running random selection decision on FCF record to
1550 * use through a sequence of @fcf_cnt eligible FCF records with equal
1551 * probability. To perform integer manunipulation of random numbers with
1552 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1553 * from random32() are taken as the random random number generated.
1555 * Returns true when outcome is for the newly read FCF record should be
1556 * chosen; otherwise, return false when outcome is for keeping the previously
1557 * chosen FCF record.
1560 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1564 /* Get 16-bit uniform random number */
1565 rand_num
= (0xFFFF & random32());
1567 /* Decision with probability 1/fcf_cnt */
1568 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1575 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1576 * @phba: pointer to lpfc hba data structure.
1577 * @mboxq: pointer to mailbox object.
1578 * @next_fcf_index: pointer to holder of next fcf index.
1580 * This routine parses the non-embedded fcf mailbox command by performing the
1581 * necessarily error checking, non-embedded read FCF record mailbox command
1582 * SGE parsing, and endianness swapping.
1584 * Returns the pointer to the new FCF record in the non-embedded mailbox
1585 * command DMA memory if successfully, other NULL.
1587 static struct fcf_record
*
1588 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1589 uint16_t *next_fcf_index
)
1592 dma_addr_t phys_addr
;
1593 struct lpfc_mbx_sge sge
;
1594 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1595 uint32_t shdr_status
, shdr_add_status
;
1596 union lpfc_sli4_cfg_shdr
*shdr
;
1597 struct fcf_record
*new_fcf_record
;
1599 /* Get the first SGE entry from the non-embedded DMA memory. This
1600 * routine only uses a single SGE.
1602 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1603 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1604 if (unlikely(!mboxq
->sge_array
)) {
1605 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1606 "2524 Failed to get the non-embedded SGE "
1607 "virtual address\n");
1610 virt_addr
= mboxq
->sge_array
->addr
[0];
1612 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1613 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1614 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1615 if (shdr_status
|| shdr_add_status
) {
1616 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1617 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1618 "2726 READ_FCF_RECORD Indicates empty "
1621 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1622 "2521 READ_FCF_RECORD mailbox failed "
1623 "with status x%x add_status x%x, "
1624 "mbx\n", shdr_status
, shdr_add_status
);
1628 /* Interpreting the returned information of the FCF record */
1629 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1630 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1631 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1632 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1633 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1634 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1635 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1636 offsetof(struct fcf_record
, vlan_bitmap
));
1637 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1638 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1640 return new_fcf_record
;
1644 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1645 * @phba: pointer to lpfc hba data structure.
1646 * @fcf_record: pointer to the fcf record.
1647 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1648 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1650 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1654 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1655 struct fcf_record
*fcf_record
,
1657 uint16_t next_fcf_index
)
1659 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1660 "2764 READ_FCF_RECORD:\n"
1661 "\tFCF_Index : x%x\n"
1662 "\tFCF_Avail : x%x\n"
1663 "\tFCF_Valid : x%x\n"
1664 "\tFIP_Priority : x%x\n"
1665 "\tMAC_Provider : x%x\n"
1666 "\tLowest VLANID : x%x\n"
1667 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1668 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1669 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1670 "\tNext_FCF_Index: x%x\n",
1671 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1672 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1673 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1674 fcf_record
->fip_priority
,
1675 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1677 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1678 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1679 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1680 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1681 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1682 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1683 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1684 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1685 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1686 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1687 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1688 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1689 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1690 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1691 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1692 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1693 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1694 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1695 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1696 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1697 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1698 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1703 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1704 * @phba: pointer to lpfc hba data structure.
1705 * @fcf_rec: pointer to an existing FCF record.
1706 * @new_fcf_record: pointer to a new FCF record.
1707 * @new_vlan_id: vlan id from the new FCF record.
1709 * This function performs matching test of a new FCF record against an existing
1710 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1711 * will not be used as part of the FCF record matching criteria.
1713 * Returns true if all the fields matching, otherwise returns false.
1716 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1717 struct lpfc_fcf_rec
*fcf_rec
,
1718 struct fcf_record
*new_fcf_record
,
1719 uint16_t new_vlan_id
)
1721 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1722 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1724 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1726 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1728 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1734 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1735 * @phba: pointer to lpfc hba data structure.
1736 * @mboxq: pointer to mailbox object.
1738 * This function iterates through all the fcf records available in
1739 * HBA and chooses the optimal FCF record for discovery. After finding
1740 * the FCF for discovery it registers the FCF record and kicks start
1742 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1743 * use an FCF record which matches fabric name and mac address of the
1744 * currently used FCF record.
1745 * If the driver supports only one FCF, it will try to use the FCF record
1746 * used by BOOT_BIOS.
1749 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1751 struct fcf_record
*new_fcf_record
;
1752 uint32_t boot_flag
, addr_mode
;
1753 uint16_t fcf_index
, next_fcf_index
;
1754 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
1757 bool select_new_fcf
;
1760 /* If there is pending FCoE event restart FCF table scan */
1761 if (lpfc_check_pending_fcoe_event(phba
, 0)) {
1762 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1766 /* Parse the FCF record from the non-embedded mailbox command */
1767 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
1769 if (!new_fcf_record
) {
1770 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1771 "2765 Mailbox command READ_FCF_RECORD "
1772 "failed to retrieve a FCF record.\n");
1773 /* Let next new FCF event trigger fast failover */
1774 spin_lock_irq(&phba
->hbalock
);
1775 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1776 spin_unlock_irq(&phba
->hbalock
);
1777 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1781 /* Check the FCF record against the connection list */
1782 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1783 &addr_mode
, &vlan_id
);
1785 /* Log the FCF record information if turned on */
1786 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
1790 * If the fcf record does not match with connect list entries
1791 * read the next entry; otherwise, this is an eligible FCF
1792 * record for round robin FCF failover.
1795 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1796 "2781 FCF record (x%x) failed FCF "
1797 "connection list check, fcf_avail:x%x, "
1799 bf_get(lpfc_fcf_record_fcf_index
,
1801 bf_get(lpfc_fcf_record_fcf_avail
,
1803 bf_get(lpfc_fcf_record_fcf_valid
,
1805 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
1806 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
1807 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
1809 * In case the current in-use FCF record becomes
1810 * invalid/unavailable during FCF discovery that
1811 * was not triggered by fast FCF failover process,
1812 * treat it as fast FCF failover.
1814 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
1815 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1816 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1817 "2835 Invalid in-use FCF "
1818 "record (x%x) reported, "
1819 "entering fast FCF failover "
1821 phba
->fcf
.current_rec
.fcf_indx
);
1822 spin_lock_irq(&phba
->hbalock
);
1823 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
1824 spin_unlock_irq(&phba
->hbalock
);
1825 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1826 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
1827 LPFC_FCOE_FCF_GET_FIRST
);
1833 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1834 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
1840 * If this is not the first FCF discovery of the HBA, use last
1841 * FCF record for the discovery. The condition that a rescan
1842 * matches the in-use FCF record: fabric name, switch name, mac
1843 * address, and vlan_id.
1845 spin_lock_irq(&phba
->hbalock
);
1846 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
1847 if (lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
1848 new_fcf_record
, vlan_id
)) {
1849 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1850 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
1851 /* Stop FCF redisc wait timer if pending */
1852 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
1853 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1854 /* If in fast failover, mark it's completed */
1855 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
|
1857 spin_unlock_irq(&phba
->hbalock
);
1858 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1859 "2836 The new FCF record (x%x) "
1860 "matches the in-use FCF record "
1862 phba
->fcf
.current_rec
.fcf_indx
,
1863 bf_get(lpfc_fcf_record_fcf_index
,
1868 * Read next FCF record from HBA searching for the matching
1869 * with in-use record only if not during the fast failover
1870 * period. In case of fast failover period, it shall try to
1871 * determine whether the FCF record just read should be the
1874 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1875 spin_unlock_irq(&phba
->hbalock
);
1880 * Update on failover FCF record only if it's in FCF fast-failover
1881 * period; otherwise, update on current FCF record.
1883 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1884 fcf_rec
= &phba
->fcf
.failover_rec
;
1886 fcf_rec
= &phba
->fcf
.current_rec
;
1888 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
1890 * If the driver FCF record does not have boot flag
1891 * set and new hba fcf record has boot flag set, use
1892 * the new hba fcf record.
1894 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
1895 /* Choose this FCF record */
1896 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1897 "2837 Update current FCF record "
1898 "(x%x) with new FCF record (x%x)\n",
1900 bf_get(lpfc_fcf_record_fcf_index
,
1902 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1903 addr_mode
, vlan_id
, BOOT_ENABLE
);
1904 spin_unlock_irq(&phba
->hbalock
);
1908 * If the driver FCF record has boot flag set and the
1909 * new hba FCF record does not have boot flag, read
1910 * the next FCF record.
1912 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
1913 spin_unlock_irq(&phba
->hbalock
);
1917 * If the new hba FCF record has lower priority value
1918 * than the driver FCF record, use the new record.
1920 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
1921 /* Choose the new FCF record with lower priority */
1922 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1923 "2838 Update current FCF record "
1924 "(x%x) with new FCF record (x%x)\n",
1926 bf_get(lpfc_fcf_record_fcf_index
,
1928 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1929 addr_mode
, vlan_id
, 0);
1930 /* Reset running random FCF selection count */
1931 phba
->fcf
.eligible_fcf_cnt
= 1;
1932 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
1933 /* Update running random FCF selection count */
1934 phba
->fcf
.eligible_fcf_cnt
++;
1935 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
1936 phba
->fcf
.eligible_fcf_cnt
);
1937 if (select_new_fcf
) {
1938 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1939 "2839 Update current FCF record "
1940 "(x%x) with new FCF record (x%x)\n",
1942 bf_get(lpfc_fcf_record_fcf_index
,
1944 /* Choose the new FCF by random selection */
1945 __lpfc_update_fcf_record(phba
, fcf_rec
,
1947 addr_mode
, vlan_id
, 0);
1950 spin_unlock_irq(&phba
->hbalock
);
1954 * This is the first suitable FCF record, choose this record for
1955 * initial best-fit FCF.
1958 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1959 "2840 Update current FCF record "
1960 "with initial FCF record (x%x)\n",
1961 bf_get(lpfc_fcf_record_fcf_index
,
1963 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1964 addr_mode
, vlan_id
, (boot_flag
?
1966 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1967 /* Setup initial running random FCF selection count */
1968 phba
->fcf
.eligible_fcf_cnt
= 1;
1969 /* Seeding the random number generator for random selection */
1970 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
1973 spin_unlock_irq(&phba
->hbalock
);
1977 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1978 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
1979 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
1981 * Case of FCF fast failover scan
1985 * It has not found any suitable FCF record, cancel
1986 * FCF scan inprogress, and do nothing
1988 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
1989 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1990 "2782 No suitable FCF record "
1991 "found during this round of "
1992 "post FCF rediscovery scan: "
1993 "fcf_evt_tag:x%x, fcf_index: "
1995 phba
->fcoe_eventtag_at_fcf_scan
,
1996 bf_get(lpfc_fcf_record_fcf_index
,
1999 * Let next new FCF event trigger fast
2002 spin_lock_irq(&phba
->hbalock
);
2003 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
2004 spin_unlock_irq(&phba
->hbalock
);
2008 * It has found a suitable FCF record that is not
2009 * the same as in-use FCF record, unregister the
2010 * in-use FCF record, replace the in-use FCF record
2011 * with the new FCF record, mark FCF fast failover
2012 * completed, and then start register the new FCF
2016 /* Unregister the current in-use FCF record */
2017 lpfc_unregister_fcf(phba
);
2019 /* Replace in-use record with the new record */
2020 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2021 "2842 Replace the current in-use "
2022 "FCF record (x%x) with failover FCF "
2024 phba
->fcf
.current_rec
.fcf_indx
,
2025 phba
->fcf
.failover_rec
.fcf_indx
);
2026 memcpy(&phba
->fcf
.current_rec
,
2027 &phba
->fcf
.failover_rec
,
2028 sizeof(struct lpfc_fcf_rec
));
2029 /* mark the FCF fast failover completed */
2030 spin_lock_irq(&phba
->hbalock
);
2031 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2032 spin_unlock_irq(&phba
->hbalock
);
2034 * Set up the initial registered FCF index for FLOGI
2035 * round robin FCF failover.
2037 phba
->fcf
.fcf_rr_init_indx
=
2038 phba
->fcf
.failover_rec
.fcf_indx
;
2039 /* Register to the new FCF record */
2040 lpfc_register_fcf(phba
);
2043 * In case of transaction period to fast FCF failover,
2044 * do nothing when search to the end of the FCF table.
2046 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2047 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2050 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2052 * In case the current in-use FCF record no
2053 * longer existed during FCF discovery that
2054 * was not triggered by fast FCF failover
2055 * process, treat it as fast FCF failover.
2057 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2058 "2841 In-use FCF record (x%x) "
2059 "not reported, entering fast "
2060 "FCF failover mode scanning.\n",
2061 phba
->fcf
.current_rec
.fcf_indx
);
2062 spin_lock_irq(&phba
->hbalock
);
2063 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2064 spin_unlock_irq(&phba
->hbalock
);
2065 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2066 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2067 LPFC_FCOE_FCF_GET_FIRST
);
2072 * Otherwise, initial scan or post linkdown rescan,
2073 * register with the best FCF record found so far
2074 * through the FCF scanning process.
2077 /* mark the initial FCF discovery completed */
2078 spin_lock_irq(&phba
->hbalock
);
2079 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
2080 spin_unlock_irq(&phba
->hbalock
);
2082 * Set up the initial registered FCF index for FLOGI
2083 * round robin FCF failover
2085 phba
->fcf
.fcf_rr_init_indx
=
2086 phba
->fcf
.current_rec
.fcf_indx
;
2087 /* Register to the new FCF record */
2088 lpfc_register_fcf(phba
);
2091 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2095 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2096 lpfc_register_fcf(phba
);
2102 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
2103 * @phba: pointer to lpfc hba data structure.
2104 * @mboxq: pointer to mailbox object.
2106 * This is the callback function for FLOGI failure round robin FCF failover
2107 * read FCF record mailbox command from the eligible FCF record bmask for
2108 * performing the failover. If the FCF read back is not valid/available, it
2109 * fails through to retrying FLOGI to the currently registered FCF again.
2110 * Otherwise, if the FCF read back is valid and available, it will set the
2111 * newly read FCF record to the failover FCF record, unregister currently
2112 * registered FCF record, copy the failover FCF record to the current
2113 * FCF record, and then register the current FCF record before proceeding
2114 * to trying FLOGI on the new failover FCF.
2117 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2119 struct fcf_record
*new_fcf_record
;
2120 uint32_t boot_flag
, addr_mode
;
2121 uint16_t next_fcf_index
;
2122 uint16_t current_fcf_index
;
2125 /* If link state is not up, stop the round robin failover process */
2126 if (phba
->link_state
< LPFC_LINK_UP
) {
2127 spin_lock_irq(&phba
->hbalock
);
2128 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2129 spin_unlock_irq(&phba
->hbalock
);
2130 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2134 /* Parse the FCF record from the non-embedded mailbox command */
2135 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2137 if (!new_fcf_record
) {
2138 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2139 "2766 Mailbox command READ_FCF_RECORD "
2140 "failed to retrieve a FCF record.\n");
2144 /* Get the needed parameters from FCF record */
2145 lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2146 &addr_mode
, &vlan_id
);
2148 /* Log the FCF record information if turned on */
2149 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2152 /* Upload new FCF record to the failover FCF record */
2153 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2154 "2834 Update the current FCF record (x%x) "
2155 "with the next FCF record (x%x)\n",
2156 phba
->fcf
.failover_rec
.fcf_indx
,
2157 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
));
2158 spin_lock_irq(&phba
->hbalock
);
2159 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2160 new_fcf_record
, addr_mode
, vlan_id
,
2161 (boot_flag
? BOOT_ENABLE
: 0));
2162 spin_unlock_irq(&phba
->hbalock
);
2164 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2166 /* Unregister the current in-use FCF record */
2167 lpfc_unregister_fcf(phba
);
2169 /* Replace in-use record with the new record */
2170 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2171 sizeof(struct lpfc_fcf_rec
));
2173 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2174 "2783 FLOGI round robin FCF failover from FCF "
2175 "(x%x) to FCF (x%x).\n",
2177 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
));
2180 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2181 lpfc_register_fcf(phba
);
2185 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2186 * @phba: pointer to lpfc hba data structure.
2187 * @mboxq: pointer to mailbox object.
2189 * This is the callback function of read FCF record mailbox command for
2190 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2191 * failover when a new FCF event happened. If the FCF read back is
2192 * valid/available and it passes the connection list check, it updates
2193 * the bmask for the eligible FCF record for round robin failover.
2196 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2198 struct fcf_record
*new_fcf_record
;
2199 uint32_t boot_flag
, addr_mode
;
2200 uint16_t fcf_index
, next_fcf_index
;
2204 /* If link state is not up, no need to proceed */
2205 if (phba
->link_state
< LPFC_LINK_UP
)
2208 /* If FCF discovery period is over, no need to proceed */
2209 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
)
2212 /* Parse the FCF record from the non-embedded mailbox command */
2213 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2215 if (!new_fcf_record
) {
2216 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2217 "2767 Mailbox command READ_FCF_RECORD "
2218 "failed to retrieve a FCF record.\n");
2222 /* Check the connection list for eligibility */
2223 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2224 &addr_mode
, &vlan_id
);
2226 /* Log the FCF record information if turned on */
2227 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2233 /* Update the eligible FCF record index bmask */
2234 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2235 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
2238 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2242 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2243 * @phba: pointer to lpfc hba data structure.
2244 * @mboxq: pointer to mailbox data structure.
2246 * This function handles completion of init vpi mailbox command.
2249 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2251 struct lpfc_vport
*vport
= mboxq
->vport
;
2252 struct lpfc_nodelist
*ndlp
;
2253 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2255 if (mboxq
->u
.mb
.mbxStatus
) {
2256 lpfc_printf_vlog(vport
, KERN_ERR
,
2258 "2609 Init VPI mailbox failed 0x%x\n",
2259 mboxq
->u
.mb
.mbxStatus
);
2260 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2261 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2264 spin_lock_irq(shost
->host_lock
);
2265 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2266 spin_unlock_irq(shost
->host_lock
);
2268 /* If this port is physical port or FDISC is done, do reg_vpi */
2269 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2270 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2272 lpfc_printf_vlog(vport
, KERN_ERR
,
2274 "2731 Cannot find fabric "
2275 "controller node\n");
2277 lpfc_register_new_vport(phba
, vport
, ndlp
);
2278 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2282 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2283 lpfc_initial_fdisc(vport
);
2285 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2286 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2287 "2606 No NPIV Fabric support\n");
2289 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2294 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2295 * @vport: pointer to lpfc_vport data structure.
2297 * This function issue a init_vpi mailbox command to initialize
2298 * VPI for the vport.
2301 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2303 LPFC_MBOXQ_t
*mboxq
;
2306 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2308 lpfc_printf_vlog(vport
, KERN_ERR
,
2309 LOG_MBOX
, "2607 Failed to allocate "
2310 "init_vpi mailbox\n");
2313 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2314 mboxq
->vport
= vport
;
2315 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2316 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2317 if (rc
== MBX_NOT_FINISHED
) {
2318 lpfc_printf_vlog(vport
, KERN_ERR
,
2319 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2320 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2325 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2326 * @phba: pointer to lpfc hba data structure.
2328 * This function loops through the list of vports on the @phba and issues an
2329 * FDISC if possible.
2332 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2334 struct lpfc_vport
**vports
;
2337 vports
= lpfc_create_vport_work_array(phba
);
2338 if (vports
!= NULL
) {
2339 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2340 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2342 /* There are no vpi for this vport */
2343 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2344 lpfc_vport_set_state(vports
[i
],
2348 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2349 lpfc_vport_set_state(vports
[i
],
2353 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2354 lpfc_issue_init_vpi(vports
[i
]);
2357 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2358 lpfc_initial_fdisc(vports
[i
]);
2360 lpfc_vport_set_state(vports
[i
],
2361 FC_VPORT_NO_FABRIC_SUPP
);
2362 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2365 "Fabric support\n");
2369 lpfc_destroy_vport_work_array(phba
, vports
);
2373 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2375 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2376 struct lpfc_vport
*vport
= mboxq
->vport
;
2377 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2379 if (mboxq
->u
.mb
.mbxStatus
) {
2380 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2381 "2018 REG_VFI mbxStatus error x%x "
2383 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2384 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2385 /* FLOGI failed, use loop map to make discovery list */
2386 lpfc_disc_list_loopmap(vport
);
2387 /* Start discovery */
2388 lpfc_disc_start(vport
);
2391 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2394 /* The VPI is implicitly registered when the VFI is registered */
2395 spin_lock_irq(shost
->host_lock
);
2396 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2397 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2398 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2399 spin_unlock_irq(shost
->host_lock
);
2401 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2402 lpfc_start_fdiscs(phba
);
2403 lpfc_do_scr_ns_plogi(phba
, vport
);
2407 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2408 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2414 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2416 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2417 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2418 struct lpfc_vport
*vport
= pmb
->vport
;
2421 /* Check for error */
2422 if (mb
->mbxStatus
) {
2423 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2424 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2425 "0319 READ_SPARAM mbxStatus error x%x "
2427 mb
->mbxStatus
, vport
->port_state
);
2428 lpfc_linkdown(phba
);
2432 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2433 sizeof (struct serv_parm
));
2434 if (phba
->cfg_soft_wwnn
)
2435 u64_to_wwn(phba
->cfg_soft_wwnn
,
2436 vport
->fc_sparam
.nodeName
.u
.wwn
);
2437 if (phba
->cfg_soft_wwpn
)
2438 u64_to_wwn(phba
->cfg_soft_wwpn
,
2439 vport
->fc_sparam
.portName
.u
.wwn
);
2440 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
2441 sizeof(vport
->fc_nodename
));
2442 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
2443 sizeof(vport
->fc_portname
));
2444 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2445 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2446 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2449 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2451 mempool_free(pmb
, phba
->mbox_mem_pool
);
2455 pmb
->context1
= NULL
;
2456 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2458 lpfc_issue_clear_la(phba
, vport
);
2459 mempool_free(pmb
, phba
->mbox_mem_pool
);
2464 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
2466 struct lpfc_vport
*vport
= phba
->pport
;
2467 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2469 struct lpfc_dmabuf
*mp
;
2471 struct fcf_record
*fcf_record
;
2473 spin_lock_irq(&phba
->hbalock
);
2474 switch (la
->UlnkSpeed
) {
2476 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
2479 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
2482 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
2485 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
2488 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
2491 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
2495 phba
->fc_topology
= la
->topology
;
2496 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2498 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2499 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2501 /* if npiv is enabled and this adapter supports npiv log
2502 * a message that npiv is not supported in this topology
2504 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2505 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2506 "1309 Link Up Event npiv not supported in loop "
2508 /* Get Loop Map information */
2510 vport
->fc_flag
|= FC_LBIT
;
2512 vport
->fc_myDID
= la
->granted_AL_PA
;
2513 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
2516 phba
->alpa_map
[0] = 0;
2518 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2529 numalpa
= phba
->alpa_map
[0];
2531 while (j
< numalpa
) {
2532 memset(un
.pamap
, 0, 16);
2533 for (k
= 1; j
< numalpa
; k
++) {
2535 phba
->alpa_map
[j
+ 1];
2540 /* Link Up Event ALPA map */
2541 lpfc_printf_log(phba
,
2544 "1304 Link Up Event "
2545 "ALPA map Data: x%x "
2547 un
.pa
.wd1
, un
.pa
.wd2
,
2548 un
.pa
.wd3
, un
.pa
.wd4
);
2553 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
2554 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
2555 (phba
->sli_rev
== 3))
2556 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
2558 vport
->fc_myDID
= phba
->fc_pref_DID
;
2559 vport
->fc_flag
|= FC_LBIT
;
2561 spin_unlock_irq(&phba
->hbalock
);
2564 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2568 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
2570 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2573 sparam_mbox
->vport
= vport
;
2574 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
2575 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
2576 if (rc
== MBX_NOT_FINISHED
) {
2577 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
2578 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2580 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2584 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
2585 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2588 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
2589 lpfc_config_link(phba
, cfglink_mbox
);
2590 cfglink_mbox
->vport
= vport
;
2591 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
2592 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
2593 if (rc
== MBX_NOT_FINISHED
) {
2594 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
2598 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
2600 * Add the driver's default FCF record at FCF index 0 now. This
2601 * is phase 1 implementation that support FCF index 0 and driver
2604 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
2605 fcf_record
= kzalloc(sizeof(struct fcf_record
),
2607 if (unlikely(!fcf_record
)) {
2608 lpfc_printf_log(phba
, KERN_ERR
,
2610 "2554 Could not allocate memmory for "
2616 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
2617 LPFC_FCOE_FCF_DEF_INDEX
);
2618 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
2620 lpfc_printf_log(phba
, KERN_ERR
,
2622 "2013 Could not manually add FCF "
2623 "record 0, status %d\n", rc
);
2631 * The driver is expected to do FIP/FCF. Call the port
2632 * and get the FCF Table.
2634 spin_lock_irq(&phba
->hbalock
);
2635 if (phba
->hba_flag
& FCF_DISC_INPROGRESS
) {
2636 spin_unlock_irq(&phba
->hbalock
);
2639 /* This is the initial FCF discovery scan */
2640 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
2641 spin_unlock_irq(&phba
->hbalock
);
2642 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2643 "2778 Start FCF table scan at linkup\n");
2645 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2646 LPFC_FCOE_FCF_GET_FIRST
);
2648 spin_lock_irq(&phba
->hbalock
);
2649 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
2650 spin_unlock_irq(&phba
->hbalock
);
2657 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2658 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2659 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2660 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
2661 lpfc_issue_clear_la(phba
, vport
);
2666 lpfc_enable_la(struct lpfc_hba
*phba
)
2669 struct lpfc_sli
*psli
= &phba
->sli
;
2670 spin_lock_irq(&phba
->hbalock
);
2671 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2672 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
2673 control
= readl(phba
->HCregaddr
);
2674 control
|= HC_LAINT_ENA
;
2675 writel(control
, phba
->HCregaddr
);
2676 readl(phba
->HCregaddr
); /* flush */
2678 spin_unlock_irq(&phba
->hbalock
);
2682 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
2684 lpfc_linkdown(phba
);
2685 lpfc_enable_la(phba
);
2686 lpfc_unregister_unused_fcf(phba
);
2687 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2692 * This routine handles processing a READ_LA mailbox
2693 * command upon completion. It is setup in the LPFC_MBOXQ
2694 * as the completion routine when the command is
2695 * handed off to the SLI layer.
2698 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2700 struct lpfc_vport
*vport
= pmb
->vport
;
2701 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2703 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2704 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2706 /* Unblock ELS traffic */
2707 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2708 /* Check for error */
2709 if (mb
->mbxStatus
) {
2710 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2711 "1307 READ_LA mbox error x%x state x%x\n",
2712 mb
->mbxStatus
, vport
->port_state
);
2713 lpfc_mbx_issue_link_down(phba
);
2714 phba
->link_state
= LPFC_HBA_ERROR
;
2715 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
2718 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
2720 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
2722 spin_lock_irq(shost
->host_lock
);
2724 vport
->fc_flag
|= FC_BYPASSED_MODE
;
2726 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
2727 spin_unlock_irq(shost
->host_lock
);
2729 if ((phba
->fc_eventTag
< la
->eventTag
) ||
2730 (phba
->fc_eventTag
== la
->eventTag
)) {
2731 phba
->fc_stat
.LinkMultiEvent
++;
2732 if (la
->attType
== AT_LINK_UP
)
2733 if (phba
->fc_eventTag
!= 0)
2734 lpfc_linkdown(phba
);
2737 phba
->fc_eventTag
= la
->eventTag
;
2738 spin_lock_irq(&phba
->hbalock
);
2740 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
2742 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
2743 spin_unlock_irq(&phba
->hbalock
);
2745 phba
->link_events
++;
2746 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
2747 phba
->fc_stat
.LinkUp
++;
2748 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2749 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2750 "1306 Link Up Event in loop back mode "
2751 "x%x received Data: x%x x%x x%x x%x\n",
2752 la
->eventTag
, phba
->fc_eventTag
,
2753 la
->granted_AL_PA
, la
->UlnkSpeed
,
2756 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2757 "1303 Link Up Event x%x received "
2758 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2759 la
->eventTag
, phba
->fc_eventTag
,
2760 la
->granted_AL_PA
, la
->UlnkSpeed
,
2763 phba
->wait_4_mlo_maint_flg
);
2765 lpfc_mbx_process_link_up(phba
, la
);
2766 } else if (la
->attType
== AT_LINK_DOWN
) {
2767 phba
->fc_stat
.LinkDown
++;
2768 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2769 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2770 "1308 Link Down Event in loop back mode "
2772 "Data: x%x x%x x%x\n",
2773 la
->eventTag
, phba
->fc_eventTag
,
2774 phba
->pport
->port_state
, vport
->fc_flag
);
2777 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2778 "1305 Link Down Event x%x received "
2779 "Data: x%x x%x x%x x%x x%x\n",
2780 la
->eventTag
, phba
->fc_eventTag
,
2781 phba
->pport
->port_state
, vport
->fc_flag
,
2784 lpfc_mbx_issue_link_down(phba
);
2786 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
2787 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
2788 phba
->fc_stat
.LinkDown
++;
2789 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2790 "1312 Link Down Event x%x received "
2791 "Data: x%x x%x x%x\n",
2792 la
->eventTag
, phba
->fc_eventTag
,
2793 phba
->pport
->port_state
, vport
->fc_flag
);
2794 lpfc_mbx_issue_link_down(phba
);
2796 lpfc_enable_la(phba
);
2798 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2799 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2800 "Data: x%x x%x x%x\n",
2801 la
->eventTag
, phba
->fc_eventTag
,
2802 phba
->pport
->port_state
, vport
->fc_flag
);
2804 * The cmnd that triggered this will be waiting for this
2807 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2808 if (phba
->wait_4_mlo_maint_flg
) {
2809 phba
->wait_4_mlo_maint_flg
= 0;
2810 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
2816 lpfc_issue_clear_la(phba
, vport
);
2817 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2818 "1311 fa %d\n", la
->fa
);
2821 lpfc_mbx_cmpl_read_la_free_mbuf
:
2822 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2824 mempool_free(pmb
, phba
->mbox_mem_pool
);
2829 * This routine handles processing a REG_LOGIN mailbox
2830 * command upon completion. It is setup in the LPFC_MBOXQ
2831 * as the completion routine when the command is
2832 * handed off to the SLI layer.
2835 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2837 struct lpfc_vport
*vport
= pmb
->vport
;
2838 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2839 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2840 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2842 pmb
->context1
= NULL
;
2844 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
2845 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
2847 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
2848 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
2849 /* We rcvd a rscn after issuing this
2850 * mbox reg login, we may have cycled
2851 * back through the state and be
2852 * back at reg login state so this
2853 * mbox needs to be ignored becase
2854 * there is another reg login in
2857 spin_lock_irq(shost
->host_lock
);
2858 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
2859 spin_unlock_irq(shost
->host_lock
);
2860 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2861 lpfc_sli4_free_rpi(phba
,
2862 pmb
->u
.mb
.un
.varRegLogin
.rpi
);
2865 /* Good status, call state machine */
2866 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
2867 NLP_EVT_CMPL_REG_LOGIN
);
2869 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2871 mempool_free(pmb
, phba
->mbox_mem_pool
);
2872 /* decrement the node reference count held for this callback
2881 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2883 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2884 struct lpfc_vport
*vport
= pmb
->vport
;
2885 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2887 switch (mb
->mbxStatus
) {
2890 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2891 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2894 /* If VPI is busy, reset the HBA */
2896 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
2897 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
2898 vport
->vpi
, mb
->mbxStatus
);
2899 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
2900 lpfc_workq_post_event(phba
, NULL
, NULL
,
2901 LPFC_EVT_RESET_HBA
);
2903 spin_lock_irq(shost
->host_lock
);
2904 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2905 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2906 spin_unlock_irq(shost
->host_lock
);
2907 vport
->unreg_vpi_cmpl
= VPORT_OK
;
2908 mempool_free(pmb
, phba
->mbox_mem_pool
);
2910 * This shost reference might have been taken at the beginning of
2911 * lpfc_vport_delete()
2913 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
2914 scsi_host_put(shost
);
2918 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
2920 struct lpfc_hba
*phba
= vport
->phba
;
2924 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2928 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
2929 mbox
->vport
= vport
;
2930 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
2931 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2932 if (rc
== MBX_NOT_FINISHED
) {
2933 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2934 "1800 Could not issue unreg_vpi\n");
2935 mempool_free(mbox
, phba
->mbox_mem_pool
);
2936 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
2943 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2945 struct lpfc_vport
*vport
= pmb
->vport
;
2946 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2947 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2949 switch (mb
->mbxStatus
) {
2953 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2954 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2956 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2957 spin_lock_irq(shost
->host_lock
);
2958 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2959 spin_unlock_irq(shost
->host_lock
);
2960 vport
->fc_myDID
= 0;
2964 spin_lock_irq(shost
->host_lock
);
2965 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2966 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2967 spin_unlock_irq(shost
->host_lock
);
2968 vport
->num_disc_nodes
= 0;
2969 /* go thru NPR list and issue ELS PLOGIs */
2970 if (vport
->fc_npr_cnt
)
2971 lpfc_els_disc_plogi(vport
);
2973 if (!vport
->num_disc_nodes
) {
2974 spin_lock_irq(shost
->host_lock
);
2975 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2976 spin_unlock_irq(shost
->host_lock
);
2977 lpfc_can_disctmo(vport
);
2979 vport
->port_state
= LPFC_VPORT_READY
;
2982 mempool_free(pmb
, phba
->mbox_mem_pool
);
2987 * lpfc_create_static_vport - Read HBA config region to create static vports.
2988 * @phba: pointer to lpfc hba data structure.
2990 * This routine issue a DUMP mailbox command for config region 22 to get
2991 * the list of static vports to be created. The function create vports
2992 * based on the information returned from the HBA.
2995 lpfc_create_static_vport(struct lpfc_hba
*phba
)
2997 LPFC_MBOXQ_t
*pmb
= NULL
;
2999 struct static_vport_info
*vport_info
;
3001 struct fc_vport_identifiers vport_id
;
3002 struct fc_vport
*new_fc_vport
;
3003 struct Scsi_Host
*shost
;
3004 struct lpfc_vport
*vport
;
3005 uint16_t offset
= 0;
3006 uint8_t *vport_buff
;
3007 struct lpfc_dmabuf
*mp
;
3008 uint32_t byte_count
= 0;
3010 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3012 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3013 "0542 lpfc_create_static_vport failed to"
3014 " allocate mailbox memory\n");
3020 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3022 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3023 "0543 lpfc_create_static_vport failed to"
3024 " allocate vport_info\n");
3025 mempool_free(pmb
, phba
->mbox_mem_pool
);
3029 vport_buff
= (uint8_t *) vport_info
;
3031 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3034 pmb
->vport
= phba
->pport
;
3035 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
3037 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3038 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3039 "0544 lpfc_create_static_vport failed to"
3040 " issue dump mailbox command ret 0x%x "
3046 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3047 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3048 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
3049 if (byte_count
> sizeof(struct static_vport_info
) -
3051 byte_count
= sizeof(struct static_vport_info
)
3053 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3054 offset
+= byte_count
;
3056 if (mb
->un
.varDmp
.word_cnt
>
3057 sizeof(struct static_vport_info
) - offset
)
3058 mb
->un
.varDmp
.word_cnt
=
3059 sizeof(struct static_vport_info
)
3061 byte_count
= mb
->un
.varDmp
.word_cnt
;
3062 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3063 vport_buff
+ offset
,
3066 offset
+= byte_count
;
3069 } while (byte_count
&&
3070 offset
< sizeof(struct static_vport_info
));
3073 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3074 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3075 != VPORT_INFO_REV
)) {
3076 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3077 "0545 lpfc_create_static_vport bad"
3078 " information header 0x%x 0x%x\n",
3079 le32_to_cpu(vport_info
->signature
),
3080 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3085 shost
= lpfc_shost_from_vport(phba
->pport
);
3087 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3088 memset(&vport_id
, 0, sizeof(vport_id
));
3089 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3090 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3091 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3094 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3095 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3096 vport_id
.disable
= false;
3097 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3099 if (!new_fc_vport
) {
3100 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3101 "0546 lpfc_create_static_vport failed to"
3106 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3107 vport
->vport_flag
|= STATIC_VPORT
;
3112 if (rc
!= MBX_TIMEOUT
) {
3113 if (pmb
->context2
) {
3114 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
3115 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3118 mempool_free(pmb
, phba
->mbox_mem_pool
);
3125 * This routine handles processing a Fabric REG_LOGIN mailbox
3126 * command upon completion. It is setup in the LPFC_MBOXQ
3127 * as the completion routine when the command is
3128 * handed off to the SLI layer.
3131 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3133 struct lpfc_vport
*vport
= pmb
->vport
;
3134 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3135 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3136 struct lpfc_nodelist
*ndlp
;
3138 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3139 pmb
->context1
= NULL
;
3140 pmb
->context2
= NULL
;
3141 if (mb
->mbxStatus
) {
3142 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3143 "0258 Register Fabric login error: 0x%x\n",
3145 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3147 mempool_free(pmb
, phba
->mbox_mem_pool
);
3149 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3150 /* FLOGI failed, use loop map to make discovery list */
3151 lpfc_disc_list_loopmap(vport
);
3153 /* Start discovery */
3154 lpfc_disc_start(vport
);
3155 /* Decrement the reference count to ndlp after the
3156 * reference to the ndlp are done.
3162 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3163 /* Decrement the reference count to ndlp after the reference
3164 * to the ndlp are done.
3170 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3171 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3172 ndlp
->nlp_type
|= NLP_FABRIC
;
3173 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3175 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3176 /* when physical port receive logo donot start
3177 * vport discovery */
3178 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3179 lpfc_start_fdiscs(phba
);
3181 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3182 lpfc_do_scr_ns_plogi(phba
, vport
);
3185 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3187 mempool_free(pmb
, phba
->mbox_mem_pool
);
3189 /* Drop the reference count from the mbox at the end after
3190 * all the current reference to the ndlp have been done.
3197 * This routine handles processing a NameServer REG_LOGIN mailbox
3198 * command upon completion. It is setup in the LPFC_MBOXQ
3199 * as the completion routine when the command is
3200 * handed off to the SLI layer.
3203 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3205 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3206 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3207 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3208 struct lpfc_vport
*vport
= pmb
->vport
;
3210 if (mb
->mbxStatus
) {
3212 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3213 "0260 Register NameServer error: 0x%x\n",
3215 /* decrement the node reference count held for this
3216 * callback function.
3219 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3221 mempool_free(pmb
, phba
->mbox_mem_pool
);
3223 /* If no other thread is using the ndlp, free it */
3224 lpfc_nlp_not_used(ndlp
);
3226 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3228 * RegLogin failed, use loop map to make discovery
3231 lpfc_disc_list_loopmap(vport
);
3233 /* Start discovery */
3234 lpfc_disc_start(vport
);
3237 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3241 pmb
->context1
= NULL
;
3243 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3244 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3245 ndlp
->nlp_type
|= NLP_FABRIC
;
3246 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3248 if (vport
->port_state
< LPFC_VPORT_READY
) {
3249 /* Link up discovery requires Fabric registration. */
3250 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3251 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3252 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3253 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3254 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3256 /* Issue SCR just before NameServer GID_FT Query */
3257 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3260 vport
->fc_ns_retry
= 0;
3261 /* Good status, issue CT Request to NameServer */
3262 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3263 /* Cannot issue NameServer Query, so finish up discovery */
3267 /* decrement the node reference count held for this
3268 * callback function.
3271 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3273 mempool_free(pmb
, phba
->mbox_mem_pool
);
3279 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3281 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3282 struct fc_rport
*rport
;
3283 struct lpfc_rport_data
*rdata
;
3284 struct fc_rport_identifiers rport_ids
;
3285 struct lpfc_hba
*phba
= vport
->phba
;
3287 /* Remote port has reappeared. Re-register w/ FC transport */
3288 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3289 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3290 rport_ids
.port_id
= ndlp
->nlp_DID
;
3291 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3294 * We leave our node pointer in rport->dd_data when we unregister a
3295 * FCP target port. But fc_remote_port_add zeros the space to which
3296 * rport->dd_data points. So, if we're reusing a previously
3297 * registered port, drop the reference that we took the last time we
3298 * registered the port.
3300 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3301 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3304 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3305 "rport add: did:x%x flg:x%x type x%x",
3306 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3308 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3309 if (!rport
|| !get_device(&rport
->dev
)) {
3310 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3311 "Warning: fc_remote_port_add failed\n");
3315 /* initialize static port data */
3316 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3317 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3318 rdata
= rport
->dd_data
;
3319 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3321 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3322 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3323 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3324 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3327 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3328 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3330 if ((rport
->scsi_target_id
!= -1) &&
3331 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3332 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3338 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3340 struct fc_rport
*rport
= ndlp
->rport
;
3342 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3343 "rport delete: did:x%x flg:x%x type x%x",
3344 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3346 fc_remote_port_delete(rport
);
3352 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3354 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3356 spin_lock_irq(shost
->host_lock
);
3358 case NLP_STE_UNUSED_NODE
:
3359 vport
->fc_unused_cnt
+= count
;
3361 case NLP_STE_PLOGI_ISSUE
:
3362 vport
->fc_plogi_cnt
+= count
;
3364 case NLP_STE_ADISC_ISSUE
:
3365 vport
->fc_adisc_cnt
+= count
;
3367 case NLP_STE_REG_LOGIN_ISSUE
:
3368 vport
->fc_reglogin_cnt
+= count
;
3370 case NLP_STE_PRLI_ISSUE
:
3371 vport
->fc_prli_cnt
+= count
;
3373 case NLP_STE_UNMAPPED_NODE
:
3374 vport
->fc_unmap_cnt
+= count
;
3376 case NLP_STE_MAPPED_NODE
:
3377 vport
->fc_map_cnt
+= count
;
3379 case NLP_STE_NPR_NODE
:
3380 vport
->fc_npr_cnt
+= count
;
3383 spin_unlock_irq(shost
->host_lock
);
3387 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3388 int old_state
, int new_state
)
3390 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3392 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3393 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3394 ndlp
->nlp_type
|= NLP_FC_NODE
;
3396 if (new_state
== NLP_STE_MAPPED_NODE
)
3397 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3398 if (new_state
== NLP_STE_NPR_NODE
)
3399 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3401 /* Transport interface */
3402 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3403 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3404 vport
->phba
->nport_event_cnt
++;
3405 lpfc_unregister_remote_port(ndlp
);
3408 if (new_state
== NLP_STE_MAPPED_NODE
||
3409 new_state
== NLP_STE_UNMAPPED_NODE
) {
3410 vport
->phba
->nport_event_cnt
++;
3412 * Tell the fc transport about the port, if we haven't
3413 * already. If we have, and it's a scsi entity, be
3414 * sure to unblock any attached scsi devices
3416 lpfc_register_remote_port(vport
, ndlp
);
3418 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3419 (vport
->stat_data_enabled
)) {
3421 * A new target is discovered, if there is no buffer for
3422 * statistical data collection allocate buffer.
3424 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3425 sizeof(struct lpfc_scsicmd_bkt
),
3428 if (!ndlp
->lat_data
)
3429 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3430 "0286 lpfc_nlp_state_cleanup failed to "
3431 "allocate statistical data buffer DID "
3432 "0x%x\n", ndlp
->nlp_DID
);
3435 * if we added to Mapped list, but the remote port
3436 * registration failed or assigned a target id outside
3437 * our presentable range - move the node to the
3440 if (new_state
== NLP_STE_MAPPED_NODE
&&
3442 ndlp
->rport
->scsi_target_id
== -1 ||
3443 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3444 spin_lock_irq(shost
->host_lock
);
3445 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3446 spin_unlock_irq(shost
->host_lock
);
3447 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3452 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3454 static char *states
[] = {
3455 [NLP_STE_UNUSED_NODE
] = "UNUSED",
3456 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
3457 [NLP_STE_ADISC_ISSUE
] = "ADISC",
3458 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
3459 [NLP_STE_PRLI_ISSUE
] = "PRLI",
3460 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
3461 [NLP_STE_MAPPED_NODE
] = "MAPPED",
3462 [NLP_STE_NPR_NODE
] = "NPR",
3465 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
3466 strlcpy(buffer
, states
[state
], size
);
3468 snprintf(buffer
, size
, "unknown (%d)", state
);
3473 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3476 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3477 int old_state
= ndlp
->nlp_state
;
3478 char name1
[16], name2
[16];
3480 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3481 "0904 NPort state transition x%06x, %s -> %s\n",
3483 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
3484 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
3486 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3487 "node statechg did:x%x old:%d ste:%d",
3488 ndlp
->nlp_DID
, old_state
, state
);
3490 if (old_state
== NLP_STE_NPR_NODE
&&
3491 state
!= NLP_STE_NPR_NODE
)
3492 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3493 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
3494 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
3495 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
3498 if (list_empty(&ndlp
->nlp_listp
)) {
3499 spin_lock_irq(shost
->host_lock
);
3500 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3501 spin_unlock_irq(shost
->host_lock
);
3502 } else if (old_state
)
3503 lpfc_nlp_counters(vport
, old_state
, -1);
3505 ndlp
->nlp_state
= state
;
3506 lpfc_nlp_counters(vport
, state
, 1);
3507 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3511 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3513 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3515 if (list_empty(&ndlp
->nlp_listp
)) {
3516 spin_lock_irq(shost
->host_lock
);
3517 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3518 spin_unlock_irq(shost
->host_lock
);
3523 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3525 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3527 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3528 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3529 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3530 spin_lock_irq(shost
->host_lock
);
3531 list_del_init(&ndlp
->nlp_listp
);
3532 spin_unlock_irq(shost
->host_lock
);
3533 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3534 NLP_STE_UNUSED_NODE
);
3538 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3540 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3541 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3542 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3543 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3544 NLP_STE_UNUSED_NODE
);
3547 * lpfc_initialize_node - Initialize all fields of node object
3548 * @vport: Pointer to Virtual Port object.
3549 * @ndlp: Pointer to FC node object.
3550 * @did: FC_ID of the node.
3552 * This function is always called when node object need to be initialized.
3553 * It initializes all the fields of the node object. Although the reference
3554 * to phba from @ndlp can be obtained indirectly through it's reference to
3555 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3556 * to the life-span of the @ndlp might go beyond the existence of @vport as
3557 * the final release of ndlp is determined by its reference count. And, the
3558 * operation on @ndlp needs the reference to phba.
3561 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3564 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
3565 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
3566 init_timer(&ndlp
->nlp_delayfunc
);
3567 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
3568 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
3569 ndlp
->nlp_DID
= did
;
3570 ndlp
->vport
= vport
;
3571 ndlp
->phba
= vport
->phba
;
3572 ndlp
->nlp_sid
= NLP_NO_SID
;
3573 kref_init(&ndlp
->kref
);
3574 NLP_INT_NODE_ACT(ndlp
);
3575 atomic_set(&ndlp
->cmd_pending
, 0);
3576 ndlp
->cmd_qdepth
= LPFC_MAX_TGT_QDEPTH
;
3579 struct lpfc_nodelist
*
3580 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3583 struct lpfc_hba
*phba
= vport
->phba
;
3585 unsigned long flags
;
3590 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3591 /* The ndlp should not be in memory free mode */
3592 if (NLP_CHK_FREE_REQ(ndlp
)) {
3593 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3594 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3595 "0277 lpfc_enable_node: ndlp:x%p "
3596 "usgmap:x%x refcnt:%d\n",
3597 (void *)ndlp
, ndlp
->nlp_usg_map
,
3598 atomic_read(&ndlp
->kref
.refcount
));
3601 /* The ndlp should not already be in active mode */
3602 if (NLP_CHK_NODE_ACT(ndlp
)) {
3603 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3604 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3605 "0278 lpfc_enable_node: ndlp:x%p "
3606 "usgmap:x%x refcnt:%d\n",
3607 (void *)ndlp
, ndlp
->nlp_usg_map
,
3608 atomic_read(&ndlp
->kref
.refcount
));
3612 /* Keep the original DID */
3613 did
= ndlp
->nlp_DID
;
3615 /* re-initialize ndlp except of ndlp linked list pointer */
3616 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
3617 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
3618 lpfc_initialize_node(vport
, ndlp
, did
);
3620 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3622 if (state
!= NLP_STE_UNUSED_NODE
)
3623 lpfc_nlp_set_state(vport
, ndlp
, state
);
3625 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3626 "node enable: did:x%x",
3627 ndlp
->nlp_DID
, 0, 0);
3632 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3635 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
3636 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
3637 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3638 * until ALL other outstanding threads have completed. We check
3639 * that the ndlp not already in the UNUSED state before we proceed.
3641 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3643 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3649 * Start / ReStart rescue timer for Discovery / RSCN handling
3652 lpfc_set_disctmo(struct lpfc_vport
*vport
)
3654 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3655 struct lpfc_hba
*phba
= vport
->phba
;
3658 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
3659 /* For FAN, timeout should be greater than edtov */
3660 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
3662 /* Normal discovery timeout should be > than ELS/CT timeout
3663 * FC spec states we need 3 * ratov for CT requests
3665 tmo
= ((phba
->fc_ratov
* 3) + 3);
3669 if (!timer_pending(&vport
->fc_disctmo
)) {
3670 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3671 "set disc timer: tmo:x%x state:x%x flg:x%x",
3672 tmo
, vport
->port_state
, vport
->fc_flag
);
3675 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
3676 spin_lock_irq(shost
->host_lock
);
3677 vport
->fc_flag
|= FC_DISC_TMO
;
3678 spin_unlock_irq(shost
->host_lock
);
3680 /* Start Discovery Timer state <hba_state> */
3681 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3682 "0247 Start Discovery Timer state x%x "
3683 "Data: x%x x%lx x%x x%x\n",
3684 vport
->port_state
, tmo
,
3685 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
3686 vport
->fc_adisc_cnt
);
3692 * Cancel rescue timer for Discovery / RSCN handling
3695 lpfc_can_disctmo(struct lpfc_vport
*vport
)
3697 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3698 unsigned long iflags
;
3700 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3701 "can disc timer: state:x%x rtry:x%x flg:x%x",
3702 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3704 /* Turn off discovery timer if its running */
3705 if (vport
->fc_flag
& FC_DISC_TMO
) {
3706 spin_lock_irqsave(shost
->host_lock
, iflags
);
3707 vport
->fc_flag
&= ~FC_DISC_TMO
;
3708 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
3709 del_timer_sync(&vport
->fc_disctmo
);
3710 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
3711 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
3712 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
3715 /* Cancel Discovery Timer state <hba_state> */
3716 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3717 "0248 Cancel Discovery Timer state x%x "
3718 "Data: x%x x%x x%x\n",
3719 vport
->port_state
, vport
->fc_flag
,
3720 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
3725 * Check specified ring for outstanding IOCB on the SLI queue
3726 * Return true if iocb matches the specified nport
3729 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
3730 struct lpfc_sli_ring
*pring
,
3731 struct lpfc_iocbq
*iocb
,
3732 struct lpfc_nodelist
*ndlp
)
3734 struct lpfc_sli
*psli
= &phba
->sli
;
3735 IOCB_t
*icmd
= &iocb
->iocb
;
3736 struct lpfc_vport
*vport
= ndlp
->vport
;
3738 if (iocb
->vport
!= vport
)
3741 if (pring
->ringno
== LPFC_ELS_RING
) {
3742 switch (icmd
->ulpCommand
) {
3743 case CMD_GEN_REQUEST64_CR
:
3744 if (iocb
->context_un
.ndlp
== ndlp
)
3746 case CMD_ELS_REQUEST64_CR
:
3747 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
3749 case CMD_XMIT_ELS_RSP64_CX
:
3750 if (iocb
->context1
== (uint8_t *) ndlp
)
3753 } else if (pring
->ringno
== psli
->extra_ring
) {
3755 } else if (pring
->ringno
== psli
->fcp_ring
) {
3756 /* Skip match check if waiting to relogin to FCP target */
3757 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3758 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3761 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
3764 } else if (pring
->ringno
== psli
->next_ring
) {
3771 * Free resources / clean up outstanding I/Os
3772 * associated with nlp_rpi in the LPFC_NODELIST entry.
3775 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3777 LIST_HEAD(completions
);
3778 struct lpfc_sli
*psli
;
3779 struct lpfc_sli_ring
*pring
;
3780 struct lpfc_iocbq
*iocb
, *next_iocb
;
3783 lpfc_fabric_abort_nport(ndlp
);
3786 * Everything that matches on txcmplq will be returned
3787 * by firmware with a no rpi error.
3790 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3791 /* Now process each ring */
3792 for (i
= 0; i
< psli
->num_rings
; i
++) {
3793 pring
= &psli
->ring
[i
];
3795 spin_lock_irq(&phba
->hbalock
);
3796 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
3799 * Check to see if iocb matches the nport we are
3802 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
3804 /* It matches, so deque and call compl
3806 list_move_tail(&iocb
->list
,
3811 spin_unlock_irq(&phba
->hbalock
);
3815 /* Cancel all the IOCBs from the completions list */
3816 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3823 * Free rpi associated with LPFC_NODELIST entry.
3824 * This routine is called from lpfc_freenode(), when we are removing
3825 * a LPFC_NODELIST entry. It is also called if the driver initiates a
3826 * LOGO that completes successfully, and we are waiting to PLOGI back
3827 * to the remote NPort. In addition, it is called after we receive
3828 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
3829 * we are waiting to PLOGI back to the remote NPort.
3832 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3834 struct lpfc_hba
*phba
= vport
->phba
;
3838 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3839 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3841 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
3842 mbox
->vport
= vport
;
3843 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3844 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3845 if (rc
== MBX_NOT_FINISHED
)
3846 mempool_free(mbox
, phba
->mbox_mem_pool
);
3848 lpfc_no_rpi(phba
, ndlp
);
3851 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
3852 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3859 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3860 * @phba: pointer to lpfc hba data structure.
3862 * This routine is invoked to unregister all the currently registered RPIs
3866 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
3868 struct lpfc_vport
**vports
;
3869 struct lpfc_nodelist
*ndlp
;
3870 struct Scsi_Host
*shost
;
3873 vports
= lpfc_create_vport_work_array(phba
);
3874 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3875 shost
= lpfc_shost_from_vport(vports
[i
]);
3876 spin_lock_irq(shost
->host_lock
);
3877 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
3878 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3879 /* The mempool_alloc might sleep */
3880 spin_unlock_irq(shost
->host_lock
);
3881 lpfc_unreg_rpi(vports
[i
], ndlp
);
3882 spin_lock_irq(shost
->host_lock
);
3885 spin_unlock_irq(shost
->host_lock
);
3887 lpfc_destroy_vport_work_array(phba
, vports
);
3891 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
3893 struct lpfc_hba
*phba
= vport
->phba
;
3897 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3899 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
3900 mbox
->vport
= vport
;
3901 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3902 mbox
->context1
= NULL
;
3903 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3904 if (rc
!= MBX_TIMEOUT
)
3905 mempool_free(mbox
, phba
->mbox_mem_pool
);
3907 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3908 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3909 "1836 Could not issue "
3910 "unreg_login(all_rpis) status %d\n", rc
);
3915 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
3917 struct lpfc_hba
*phba
= vport
->phba
;
3921 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3923 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
3924 mbox
->vport
= vport
;
3925 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3926 mbox
->context1
= NULL
;
3927 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3928 if (rc
!= MBX_TIMEOUT
)
3929 mempool_free(mbox
, phba
->mbox_mem_pool
);
3931 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3932 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3933 "1815 Could not issue "
3934 "unreg_did (default rpis) status %d\n",
3940 * Free resources associated with LPFC_NODELIST entry
3941 * so it can be freed.
3944 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3946 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3947 struct lpfc_hba
*phba
= vport
->phba
;
3948 LPFC_MBOXQ_t
*mb
, *nextmb
;
3949 struct lpfc_dmabuf
*mp
;
3951 /* Cleanup node for NPort <nlp_DID> */
3952 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3953 "0900 Cleanup node for NPort x%x "
3954 "Data: x%x x%x x%x\n",
3955 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3956 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3957 if (NLP_CHK_FREE_REQ(ndlp
)) {
3958 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3959 "0280 lpfc_cleanup_node: ndlp:x%p "
3960 "usgmap:x%x refcnt:%d\n",
3961 (void *)ndlp
, ndlp
->nlp_usg_map
,
3962 atomic_read(&ndlp
->kref
.refcount
));
3963 lpfc_dequeue_node(vport
, ndlp
);
3965 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3966 "0281 lpfc_cleanup_node: ndlp:x%p "
3967 "usgmap:x%x refcnt:%d\n",
3968 (void *)ndlp
, ndlp
->nlp_usg_map
,
3969 atomic_read(&ndlp
->kref
.refcount
));
3970 lpfc_disable_node(vport
, ndlp
);
3973 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3974 if ((mb
= phba
->sli
.mbox_active
)) {
3975 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3976 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3977 mb
->context2
= NULL
;
3978 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3982 spin_lock_irq(&phba
->hbalock
);
3983 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
3984 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3985 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3986 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
3988 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3991 list_del(&mb
->list
);
3992 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3993 lpfc_sli4_free_rpi(phba
,
3994 mb
->u
.mb
.un
.varRegLogin
.rpi
);
3995 mempool_free(mb
, phba
->mbox_mem_pool
);
3996 /* We shall not invoke the lpfc_nlp_put to decrement
3997 * the ndlp reference count as we are in the process
3998 * of lpfc_nlp_release.
4002 spin_unlock_irq(&phba
->hbalock
);
4004 lpfc_els_abort(phba
, ndlp
);
4006 spin_lock_irq(shost
->host_lock
);
4007 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4008 spin_unlock_irq(shost
->host_lock
);
4010 ndlp
->nlp_last_elscmd
= 0;
4011 del_timer_sync(&ndlp
->nlp_delayfunc
);
4013 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4014 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4016 lpfc_unreg_rpi(vport
, ndlp
);
4022 * Check to see if we can free the nlp back to the freelist.
4023 * If we are in the middle of using the nlp in the discovery state
4024 * machine, defer the free till we reach the end of the state machine.
4027 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4029 struct lpfc_hba
*phba
= vport
->phba
;
4030 struct lpfc_rport_data
*rdata
;
4034 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4035 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4036 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4037 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
4038 /* For this case we need to cleanup the default rpi
4039 * allocated by the firmware.
4041 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4043 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4044 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
4046 mempool_free(mbox
, phba
->mbox_mem_pool
);
4049 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4050 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4051 mbox
->vport
= vport
;
4052 mbox
->context2
= NULL
;
4053 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4054 if (rc
== MBX_NOT_FINISHED
) {
4055 mempool_free(mbox
, phba
->mbox_mem_pool
);
4060 lpfc_cleanup_node(vport
, ndlp
);
4063 * We can get here with a non-NULL ndlp->rport because when we
4064 * unregister a rport we don't break the rport/node linkage. So if we
4065 * do, make sure we don't leaving any dangling pointers behind.
4068 rdata
= ndlp
->rport
->dd_data
;
4069 rdata
->pnode
= NULL
;
4075 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4078 D_ID mydid
, ndlpdid
, matchdid
;
4080 if (did
== Bcast_DID
)
4083 /* First check for Direct match */
4084 if (ndlp
->nlp_DID
== did
)
4087 /* Next check for area/domain identically equals 0 match */
4088 mydid
.un
.word
= vport
->fc_myDID
;
4089 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
4093 matchdid
.un
.word
= did
;
4094 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
4095 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
4096 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
4097 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
4098 if ((ndlpdid
.un
.b
.domain
== 0) &&
4099 (ndlpdid
.un
.b
.area
== 0)) {
4100 if (ndlpdid
.un
.b
.id
)
4106 matchdid
.un
.word
= ndlp
->nlp_DID
;
4107 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
4108 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
4109 if ((matchdid
.un
.b
.domain
== 0) &&
4110 (matchdid
.un
.b
.area
== 0)) {
4111 if (matchdid
.un
.b
.id
)
4119 /* Search for a nodelist entry */
4120 static struct lpfc_nodelist
*
4121 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4123 struct lpfc_nodelist
*ndlp
;
4126 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4127 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4128 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4129 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4130 ((uint32_t) ndlp
->nlp_type
<< 8) |
4131 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4132 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4133 "0929 FIND node DID "
4134 "Data: x%p x%x x%x x%x\n",
4135 ndlp
, ndlp
->nlp_DID
,
4136 ndlp
->nlp_flag
, data1
);
4141 /* FIND node did <did> NOT FOUND */
4142 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4143 "0932 FIND node did x%x NOT FOUND.\n", did
);
4147 struct lpfc_nodelist
*
4148 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4150 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4151 struct lpfc_nodelist
*ndlp
;
4153 spin_lock_irq(shost
->host_lock
);
4154 ndlp
= __lpfc_findnode_did(vport
, did
);
4155 spin_unlock_irq(shost
->host_lock
);
4159 struct lpfc_nodelist
*
4160 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4162 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4163 struct lpfc_nodelist
*ndlp
;
4165 ndlp
= lpfc_findnode_did(vport
, did
);
4167 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4168 lpfc_rscn_payload_check(vport
, did
) == 0)
4170 ndlp
= (struct lpfc_nodelist
*)
4171 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4174 lpfc_nlp_init(vport
, ndlp
, did
);
4175 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4176 spin_lock_irq(shost
->host_lock
);
4177 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4178 spin_unlock_irq(shost
->host_lock
);
4180 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4181 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4184 spin_lock_irq(shost
->host_lock
);
4185 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4186 spin_unlock_irq(shost
->host_lock
);
4190 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4191 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4192 if (lpfc_rscn_payload_check(vport
, did
)) {
4193 /* If we've already recieved a PLOGI from this NPort
4194 * we don't need to try to discover it again.
4196 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4199 /* Since this node is marked for discovery,
4200 * delay timeout is not needed.
4202 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4203 spin_lock_irq(shost
->host_lock
);
4204 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4205 spin_unlock_irq(shost
->host_lock
);
4209 /* If we've already recieved a PLOGI from this NPort,
4210 * or we are already in the process of discovery on it,
4211 * we don't need to try to discover it again.
4213 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4214 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4215 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4217 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4218 spin_lock_irq(shost
->host_lock
);
4219 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4220 spin_unlock_irq(shost
->host_lock
);
4225 /* Build a list of nodes to discover based on the loopmap */
4227 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4229 struct lpfc_hba
*phba
= vport
->phba
;
4231 uint32_t alpa
, index
;
4233 if (!lpfc_is_link_up(phba
))
4236 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
4239 /* Check for loop map present or not */
4240 if (phba
->alpa_map
[0]) {
4241 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4242 alpa
= phba
->alpa_map
[j
];
4243 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4245 lpfc_setup_disc_node(vport
, alpa
);
4248 /* No alpamap, so try all alpa's */
4249 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4250 /* If cfg_scan_down is set, start from highest
4251 * ALPA (0xef) to lowest (0x1).
4253 if (vport
->cfg_scan_down
)
4256 index
= FC_MAXLOOP
- j
- 1;
4257 alpa
= lpfcAlpaArray
[index
];
4258 if ((vport
->fc_myDID
& 0xff) == alpa
)
4260 lpfc_setup_disc_node(vport
, alpa
);
4267 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4270 struct lpfc_sli
*psli
= &phba
->sli
;
4271 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4272 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4273 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4277 * if it's not a physical port or if we already send
4278 * clear_la then don't send it.
4280 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4281 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4282 (phba
->sli_rev
== LPFC_SLI_REV4
))
4285 /* Link up discovery */
4286 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4287 phba
->link_state
= LPFC_CLEAR_LA
;
4288 lpfc_clear_la(phba
, mbox
);
4289 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4290 mbox
->vport
= vport
;
4291 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4292 if (rc
== MBX_NOT_FINISHED
) {
4293 mempool_free(mbox
, phba
->mbox_mem_pool
);
4294 lpfc_disc_flush_list(vport
);
4295 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4296 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4297 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4298 phba
->link_state
= LPFC_HBA_ERROR
;
4303 /* Reg_vpi to tell firmware to resume normal operations */
4305 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4307 LPFC_MBOXQ_t
*regvpimbox
;
4309 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4311 lpfc_reg_vpi(vport
, regvpimbox
);
4312 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4313 regvpimbox
->vport
= vport
;
4314 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4315 == MBX_NOT_FINISHED
) {
4316 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4321 /* Start Link up / RSCN discovery on NPR nodes */
4323 lpfc_disc_start(struct lpfc_vport
*vport
)
4325 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4326 struct lpfc_hba
*phba
= vport
->phba
;
4328 uint32_t clear_la_pending
;
4331 if (!lpfc_is_link_up(phba
))
4334 if (phba
->link_state
== LPFC_CLEAR_LA
)
4335 clear_la_pending
= 1;
4337 clear_la_pending
= 0;
4339 if (vport
->port_state
< LPFC_VPORT_READY
)
4340 vport
->port_state
= LPFC_DISC_AUTH
;
4342 lpfc_set_disctmo(vport
);
4344 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4349 vport
->fc_prevDID
= vport
->fc_myDID
;
4350 vport
->num_disc_nodes
= 0;
4352 /* Start Discovery state <hba_state> */
4353 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4354 "0202 Start Discovery hba state x%x "
4355 "Data: x%x x%x x%x\n",
4356 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4357 vport
->fc_adisc_cnt
);
4359 /* First do ADISCs - if any */
4360 num_sent
= lpfc_els_disc_adisc(vport
);
4366 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4367 * continue discovery.
4369 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4370 !(vport
->fc_flag
& FC_PT2PT
) &&
4371 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4372 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4373 lpfc_issue_reg_vpi(phba
, vport
);
4378 * For SLI2, we need to set port_state to READY and continue
4381 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4382 /* If we get here, there is nothing to ADISC */
4383 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4384 lpfc_issue_clear_la(phba
, vport
);
4386 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4387 vport
->num_disc_nodes
= 0;
4388 /* go thru NPR nodes and issue ELS PLOGIs */
4389 if (vport
->fc_npr_cnt
)
4390 lpfc_els_disc_plogi(vport
);
4392 if (!vport
->num_disc_nodes
) {
4393 spin_lock_irq(shost
->host_lock
);
4394 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
4395 spin_unlock_irq(shost
->host_lock
);
4396 lpfc_can_disctmo(vport
);
4399 vport
->port_state
= LPFC_VPORT_READY
;
4401 /* Next do PLOGIs - if any */
4402 num_sent
= lpfc_els_disc_plogi(vport
);
4407 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4408 /* Check to see if more RSCNs came in while we
4409 * were processing this one.
4411 if ((vport
->fc_rscn_id_cnt
== 0) &&
4412 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
4413 spin_lock_irq(shost
->host_lock
);
4414 vport
->fc_flag
&= ~FC_RSCN_MODE
;
4415 spin_unlock_irq(shost
->host_lock
);
4416 lpfc_can_disctmo(vport
);
4418 lpfc_els_handle_rscn(vport
);
4425 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
4426 * ring the match the sppecified nodelist.
4429 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4431 LIST_HEAD(completions
);
4432 struct lpfc_sli
*psli
;
4434 struct lpfc_iocbq
*iocb
, *next_iocb
;
4435 struct lpfc_sli_ring
*pring
;
4438 pring
= &psli
->ring
[LPFC_ELS_RING
];
4440 /* Error matching iocb on txq or txcmplq
4441 * First check the txq.
4443 spin_lock_irq(&phba
->hbalock
);
4444 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4445 if (iocb
->context1
!= ndlp
) {
4449 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
4450 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
4452 list_move_tail(&iocb
->list
, &completions
);
4457 /* Next check the txcmplq */
4458 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
4459 if (iocb
->context1
!= ndlp
) {
4463 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
4464 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
4465 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
4468 spin_unlock_irq(&phba
->hbalock
);
4470 /* Cancel all the IOCBs from the completions list */
4471 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4476 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
4478 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4479 struct lpfc_hba
*phba
= vport
->phba
;
4481 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
4482 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4484 if (!NLP_CHK_NODE_ACT(ndlp
))
4486 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4487 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
4488 lpfc_free_tx(phba
, ndlp
);
4495 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
4497 lpfc_els_flush_rscn(vport
);
4498 lpfc_els_flush_cmd(vport
);
4499 lpfc_disc_flush_list(vport
);
4502 /*****************************************************************************/
4504 * NAME: lpfc_disc_timeout
4506 * FUNCTION: Fibre Channel driver discovery timeout routine.
4508 * EXECUTION ENVIRONMENT: interrupt only
4516 /*****************************************************************************/
4518 lpfc_disc_timeout(unsigned long ptr
)
4520 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
4521 struct lpfc_hba
*phba
= vport
->phba
;
4522 uint32_t tmo_posted
;
4523 unsigned long flags
= 0;
4525 if (unlikely(!phba
))
4528 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
4529 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
4531 vport
->work_port_events
|= WORKER_DISC_TMO
;
4532 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
4535 lpfc_worker_wake_up(phba
);
4540 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
4542 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4543 struct lpfc_hba
*phba
= vport
->phba
;
4544 struct lpfc_sli
*psli
= &phba
->sli
;
4545 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4546 LPFC_MBOXQ_t
*initlinkmbox
;
4547 int rc
, clrlaerr
= 0;
4549 if (!(vport
->fc_flag
& FC_DISC_TMO
))
4552 spin_lock_irq(shost
->host_lock
);
4553 vport
->fc_flag
&= ~FC_DISC_TMO
;
4554 spin_unlock_irq(shost
->host_lock
);
4556 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4557 "disc timeout: state:x%x rtry:x%x flg:x%x",
4558 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4560 switch (vport
->port_state
) {
4562 case LPFC_LOCAL_CFG_LINK
:
4563 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
4567 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
4568 "0221 FAN timeout\n");
4569 /* Start discovery by sending FLOGI, clean up old rpis */
4570 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4572 if (!NLP_CHK_NODE_ACT(ndlp
))
4574 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
4576 if (ndlp
->nlp_type
& NLP_FABRIC
) {
4577 /* Clean up the ndlp on Fabric connections */
4578 lpfc_drop_node(vport
, ndlp
);
4580 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
4581 /* Fail outstanding IO now since device
4582 * is marked for PLOGI.
4584 lpfc_unreg_rpi(vport
, ndlp
);
4587 if (vport
->port_state
!= LPFC_FLOGI
) {
4588 lpfc_initial_flogi(vport
);
4595 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
4596 /* Initial FLOGI timeout */
4597 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4598 "0222 Initial %s timeout\n",
4599 vport
->vpi
? "FDISC" : "FLOGI");
4601 /* Assume no Fabric and go on with discovery.
4602 * Check for outstanding ELS FLOGI to abort.
4605 /* FLOGI failed, so just use loop map to make discovery list */
4606 lpfc_disc_list_loopmap(vport
);
4608 /* Start discovery */
4609 lpfc_disc_start(vport
);
4612 case LPFC_FABRIC_CFG_LINK
:
4613 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
4615 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4616 "0223 Timeout while waiting for "
4617 "NameServer login\n");
4618 /* Next look for NameServer ndlp */
4619 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4620 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4621 lpfc_els_abort(phba
, ndlp
);
4623 /* ReStart discovery */
4627 /* Check for wait for NameServer Rsp timeout */
4628 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4629 "0224 NameServer Query timeout "
4631 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4633 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
4634 /* Try it one more time */
4635 vport
->fc_ns_retry
++;
4636 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
4637 vport
->fc_ns_retry
, 0);
4641 vport
->fc_ns_retry
= 0;
4645 * Discovery is over.
4646 * set port_state to PORT_READY if SLI2.
4647 * cmpl_reg_vpi will set port_state to READY for SLI3.
4649 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4650 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4651 lpfc_issue_reg_vpi(phba
, vport
);
4652 else { /* NPIV Not enabled */
4653 lpfc_issue_clear_la(phba
, vport
);
4654 vport
->port_state
= LPFC_VPORT_READY
;
4658 /* Setup and issue mailbox INITIALIZE LINK command */
4659 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4660 if (!initlinkmbox
) {
4661 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4662 "0206 Device Discovery "
4663 "completion error\n");
4664 phba
->link_state
= LPFC_HBA_ERROR
;
4668 lpfc_linkdown(phba
);
4669 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
4670 phba
->cfg_link_speed
);
4671 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4672 initlinkmbox
->vport
= vport
;
4673 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4674 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
4675 lpfc_set_loopback_flag(phba
);
4676 if (rc
== MBX_NOT_FINISHED
)
4677 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
4681 case LPFC_DISC_AUTH
:
4682 /* Node Authentication timeout */
4683 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4684 "0227 Node Authentication timeout\n");
4685 lpfc_disc_flush_list(vport
);
4688 * set port_state to PORT_READY if SLI2.
4689 * cmpl_reg_vpi will set port_state to READY for SLI3.
4691 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4692 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4693 lpfc_issue_reg_vpi(phba
, vport
);
4694 else { /* NPIV Not enabled */
4695 lpfc_issue_clear_la(phba
, vport
);
4696 vport
->port_state
= LPFC_VPORT_READY
;
4701 case LPFC_VPORT_READY
:
4702 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4703 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4704 "0231 RSCN timeout Data: x%x "
4706 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4708 /* Cleanup any outstanding ELS commands */
4709 lpfc_els_flush_cmd(vport
);
4711 lpfc_els_flush_rscn(vport
);
4712 lpfc_disc_flush_list(vport
);
4717 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4718 "0273 Unexpected discovery timeout, "
4719 "vport State x%x\n", vport
->port_state
);
4723 switch (phba
->link_state
) {
4725 /* CLEAR LA timeout */
4726 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4727 "0228 CLEAR LA timeout\n");
4732 lpfc_issue_clear_la(phba
, vport
);
4734 case LPFC_LINK_UNKNOWN
:
4735 case LPFC_WARM_START
:
4736 case LPFC_INIT_START
:
4737 case LPFC_INIT_MBX_CMDS
:
4738 case LPFC_LINK_DOWN
:
4739 case LPFC_HBA_ERROR
:
4740 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4741 "0230 Unexpected timeout, hba link "
4742 "state x%x\n", phba
->link_state
);
4746 case LPFC_HBA_READY
:
4751 lpfc_disc_flush_list(vport
);
4752 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4753 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4754 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4755 vport
->port_state
= LPFC_VPORT_READY
;
4762 * This routine handles processing a NameServer REG_LOGIN mailbox
4763 * command upon completion. It is setup in the LPFC_MBOXQ
4764 * as the completion routine when the command is
4765 * handed off to the SLI layer.
4768 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4770 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4771 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
4772 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
4773 struct lpfc_vport
*vport
= pmb
->vport
;
4775 pmb
->context1
= NULL
;
4777 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4778 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
4779 ndlp
->nlp_type
|= NLP_FABRIC
;
4780 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4783 * Start issuing Fabric-Device Management Interface (FDMI) command to
4784 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4785 * fdmi-on=2 (supporting RPA/hostnmae)
4788 if (vport
->cfg_fdmi_on
== 1)
4789 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
4791 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
4793 /* decrement the node reference count held for this callback
4797 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4799 mempool_free(pmb
, phba
->mbox_mem_pool
);
4805 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
4807 uint16_t *rpi
= param
;
4809 return ndlp
->nlp_rpi
== *rpi
;
4813 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
4815 return memcmp(&ndlp
->nlp_portname
, param
,
4816 sizeof(ndlp
->nlp_portname
)) == 0;
4819 static struct lpfc_nodelist
*
4820 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
4822 struct lpfc_nodelist
*ndlp
;
4824 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4825 if (filter(ndlp
, param
))
4832 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4833 * returns the node list element pointer else return NULL.
4835 struct lpfc_nodelist
*
4836 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
4838 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
4842 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4843 * returns the node element list pointer else return NULL.
4845 struct lpfc_nodelist
*
4846 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
4848 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4849 struct lpfc_nodelist
*ndlp
;
4851 spin_lock_irq(shost
->host_lock
);
4852 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
4853 spin_unlock_irq(shost
->host_lock
);
4858 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4861 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
4863 lpfc_initialize_node(vport
, ndlp
, did
);
4864 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
4866 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4867 "node init: did:x%x",
4868 ndlp
->nlp_DID
, 0, 0);
4873 /* This routine releases all resources associated with a specifc NPort's ndlp
4874 * and mempool_free's the nodelist.
4877 lpfc_nlp_release(struct kref
*kref
)
4879 struct lpfc_hba
*phba
;
4880 unsigned long flags
;
4881 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
4884 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4885 "node release: did:x%x flg:x%x type:x%x",
4886 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4888 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4889 "0279 lpfc_nlp_release: ndlp:x%p "
4890 "usgmap:x%x refcnt:%d\n",
4891 (void *)ndlp
, ndlp
->nlp_usg_map
,
4892 atomic_read(&ndlp
->kref
.refcount
));
4894 /* remove ndlp from action. */
4895 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
4897 /* clear the ndlp active flag for all release cases */
4899 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4900 NLP_CLR_NODE_ACT(ndlp
);
4901 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4903 /* free ndlp memory for final ndlp release */
4904 if (NLP_CHK_FREE_REQ(ndlp
)) {
4905 kfree(ndlp
->lat_data
);
4906 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
4910 /* This routine bumps the reference count for a ndlp structure to ensure
4911 * that one discovery thread won't free a ndlp while another discovery thread
4914 struct lpfc_nodelist
*
4915 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
4917 struct lpfc_hba
*phba
;
4918 unsigned long flags
;
4921 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4922 "node get: did:x%x flg:x%x refcnt:x%x",
4923 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4924 atomic_read(&ndlp
->kref
.refcount
));
4925 /* The check of ndlp usage to prevent incrementing the
4926 * ndlp reference count that is in the process of being
4930 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4931 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
4932 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4933 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4934 "0276 lpfc_nlp_get: ndlp:x%p "
4935 "usgmap:x%x refcnt:%d\n",
4936 (void *)ndlp
, ndlp
->nlp_usg_map
,
4937 atomic_read(&ndlp
->kref
.refcount
));
4940 kref_get(&ndlp
->kref
);
4941 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4946 /* This routine decrements the reference count for a ndlp structure. If the
4947 * count goes to 0, this indicates the the associated nodelist should be
4948 * freed. Returning 1 indicates the ndlp resource has been released; on the
4949 * other hand, returning 0 indicates the ndlp resource has not been released
4953 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
4955 struct lpfc_hba
*phba
;
4956 unsigned long flags
;
4961 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4962 "node put: did:x%x flg:x%x refcnt:x%x",
4963 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4964 atomic_read(&ndlp
->kref
.refcount
));
4966 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4967 /* Check the ndlp memory free acknowledge flag to avoid the
4968 * possible race condition that kref_put got invoked again
4969 * after previous one has done ndlp memory free.
4971 if (NLP_CHK_FREE_ACK(ndlp
)) {
4972 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4973 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4974 "0274 lpfc_nlp_put: ndlp:x%p "
4975 "usgmap:x%x refcnt:%d\n",
4976 (void *)ndlp
, ndlp
->nlp_usg_map
,
4977 atomic_read(&ndlp
->kref
.refcount
));
4980 /* Check the ndlp inactivate log flag to avoid the possible
4981 * race condition that kref_put got invoked again after ndlp
4982 * is already in inactivating state.
4984 if (NLP_CHK_IACT_REQ(ndlp
)) {
4985 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4986 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4987 "0275 lpfc_nlp_put: ndlp:x%p "
4988 "usgmap:x%x refcnt:%d\n",
4989 (void *)ndlp
, ndlp
->nlp_usg_map
,
4990 atomic_read(&ndlp
->kref
.refcount
));
4993 /* For last put, mark the ndlp usage flags to make sure no
4994 * other kref_get and kref_put on the same ndlp shall get
4995 * in between the process when the final kref_put has been
4996 * invoked on this ndlp.
4998 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
4999 /* Indicate ndlp is put to inactive state. */
5000 NLP_SET_IACT_REQ(ndlp
);
5001 /* Acknowledge ndlp memory free has been seen. */
5002 if (NLP_CHK_FREE_REQ(ndlp
))
5003 NLP_SET_FREE_ACK(ndlp
);
5005 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5006 /* Note, the kref_put returns 1 when decrementing a reference
5007 * count that was 1, it invokes the release callback function,
5008 * but it still left the reference count as 1 (not actually
5009 * performs the last decrementation). Otherwise, it actually
5010 * decrements the reference count and returns 0.
5012 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
5015 /* This routine free's the specified nodelist if it is not in use
5016 * by any other discovery thread. This routine returns 1 if the
5017 * ndlp has been freed. A return value of 0 indicates the ndlp is
5018 * not yet been released.
5021 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
5023 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5024 "node not used: did:x%x flg:x%x refcnt:x%x",
5025 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5026 atomic_read(&ndlp
->kref
.refcount
));
5027 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
5028 if (lpfc_nlp_put(ndlp
))
5034 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5035 * @phba: Pointer to hba context object.
5037 * This function iterate through all FC nodes associated
5038 * will all vports to check if there is any node with
5039 * fc_rports associated with it. If there is an fc_rport
5040 * associated with the node, then the node is either in
5041 * discovered state or its devloss_timer is pending.
5044 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
5046 struct lpfc_vport
**vports
;
5048 struct lpfc_nodelist
*ndlp
;
5049 struct Scsi_Host
*shost
;
5051 vports
= lpfc_create_vport_work_array(phba
);
5053 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5054 shost
= lpfc_shost_from_vport(vports
[i
]);
5055 spin_lock_irq(shost
->host_lock
);
5056 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5057 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
5058 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
5060 spin_unlock_irq(shost
->host_lock
);
5063 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
5064 "2624 RPI %x DID %x flg %x still "
5066 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5068 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
5072 spin_unlock_irq(shost
->host_lock
);
5075 lpfc_destroy_vport_work_array(phba
, vports
);
5080 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5081 * @phba: Pointer to hba context object.
5082 * @mboxq: Pointer to mailbox object.
5084 * This function frees memory associated with the mailbox command.
5087 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5089 struct lpfc_vport
*vport
= mboxq
->vport
;
5091 if (mboxq
->u
.mb
.mbxStatus
) {
5092 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5093 "2555 UNREG_VFI mbxStatus error x%x "
5095 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5097 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5102 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5103 * @phba: Pointer to hba context object.
5104 * @mboxq: Pointer to mailbox object.
5106 * This function frees memory associated with the mailbox command.
5109 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5111 struct lpfc_vport
*vport
= mboxq
->vport
;
5113 if (mboxq
->u
.mb
.mbxStatus
) {
5114 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5115 "2550 UNREG_FCFI mbxStatus error x%x "
5117 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5119 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5124 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5125 * @phba: Pointer to hba context object.
5127 * This function prepare the HBA for unregistering the currently registered
5128 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5132 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5135 struct lpfc_vport
**vports
;
5136 struct lpfc_nodelist
*ndlp
;
5137 struct Scsi_Host
*shost
;
5140 /* Unregister RPIs */
5141 if (lpfc_fcf_inuse(phba
))
5142 lpfc_unreg_hba_rpis(phba
);
5144 /* At this point, all discovery is aborted */
5145 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5147 /* Unregister VPIs */
5148 vports
= lpfc_create_vport_work_array(phba
);
5149 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5150 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5151 /* Stop FLOGI/FDISC retries */
5152 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5154 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5155 lpfc_cleanup_pending_mbox(vports
[i
]);
5156 lpfc_mbx_unreg_vpi(vports
[i
]);
5157 shost
= lpfc_shost_from_vport(vports
[i
]);
5158 spin_lock_irq(shost
->host_lock
);
5159 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5160 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5161 spin_unlock_irq(shost
->host_lock
);
5163 lpfc_destroy_vport_work_array(phba
, vports
);
5165 /* Cleanup any outstanding ELS commands */
5166 lpfc_els_flush_all_cmd(phba
);
5168 /* Unregister VFI */
5169 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5171 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5172 "2556 UNREG_VFI mbox allocation failed"
5173 "HBA state x%x\n", phba
->pport
->port_state
);
5177 lpfc_unreg_vfi(mbox
, phba
->pport
);
5178 mbox
->vport
= phba
->pport
;
5179 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
5181 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5182 if (rc
== MBX_NOT_FINISHED
) {
5183 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5184 "2557 UNREG_VFI issue mbox failed rc x%x "
5186 rc
, phba
->pport
->port_state
);
5187 mempool_free(mbox
, phba
->mbox_mem_pool
);
5191 shost
= lpfc_shost_from_vport(phba
->pport
);
5192 spin_lock_irq(shost
->host_lock
);
5193 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5194 spin_unlock_irq(shost
->host_lock
);
5200 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5201 * @phba: Pointer to hba context object.
5203 * This function issues synchronous unregister FCF mailbox command to HBA to
5204 * unregister the currently registered FCF record. The driver does not reset
5205 * the driver FCF usage state flags.
5207 * Return 0 if successfully issued, none-zero otherwise.
5210 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5215 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5217 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5218 "2551 UNREG_FCFI mbox allocation failed"
5219 "HBA state x%x\n", phba
->pport
->port_state
);
5222 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5223 mbox
->vport
= phba
->pport
;
5224 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5225 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5227 if (rc
== MBX_NOT_FINISHED
) {
5228 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5229 "2552 Unregister FCFI command failed rc x%x "
5231 rc
, phba
->pport
->port_state
);
5238 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5239 * @phba: Pointer to hba context object.
5241 * This function unregisters the currently reigstered FCF. This function
5242 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5245 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5249 /* Preparation for unregistering fcf */
5250 rc
= lpfc_unregister_fcf_prep(phba
);
5252 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5253 "2748 Failed to prepare for unregistering "
5254 "HBA's FCF record: rc=%d\n", rc
);
5258 /* Now, unregister FCF record and reset HBA FCF state */
5259 rc
= lpfc_sli4_unregister_fcf(phba
);
5262 /* Reset HBA FCF states after successful unregister FCF */
5263 phba
->fcf
.fcf_flag
= 0;
5264 phba
->fcf
.current_rec
.flag
= 0;
5267 * If driver is not unloading, check if there is any other
5268 * FCF record that can be used for discovery.
5270 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5271 (phba
->link_state
< LPFC_LINK_UP
))
5274 /* This is considered as the initial FCF discovery scan */
5275 spin_lock_irq(&phba
->hbalock
);
5276 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5277 spin_unlock_irq(&phba
->hbalock
);
5278 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5281 spin_lock_irq(&phba
->hbalock
);
5282 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5283 spin_unlock_irq(&phba
->hbalock
);
5284 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5285 "2553 lpfc_unregister_unused_fcf failed "
5286 "to read FCF record HBA state x%x\n",
5287 phba
->pport
->port_state
);
5292 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5293 * @phba: Pointer to hba context object.
5295 * This function just unregisters the currently reigstered FCF. It does not
5296 * try to find another FCF for discovery.
5299 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5303 /* Preparation for unregistering fcf */
5304 rc
= lpfc_unregister_fcf_prep(phba
);
5306 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5307 "2749 Failed to prepare for unregistering "
5308 "HBA's FCF record: rc=%d\n", rc
);
5312 /* Now, unregister FCF record and reset HBA FCF state */
5313 rc
= lpfc_sli4_unregister_fcf(phba
);
5316 /* Set proper HBA FCF states after successful unregister FCF */
5317 spin_lock_irq(&phba
->hbalock
);
5318 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
5319 spin_unlock_irq(&phba
->hbalock
);
5323 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5324 * @phba: Pointer to hba context object.
5326 * This function check if there are any connected remote port for the FCF and
5327 * if all the devices are disconnected, this function unregister FCFI.
5328 * This function also tries to use another FCF for discovery.
5331 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
5334 * If HBA is not running in FIP mode or if HBA does not support
5335 * FCoE or if FCF is not registered, do nothing.
5337 spin_lock_irq(&phba
->hbalock
);
5338 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
5339 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
5340 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
5341 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
5342 spin_unlock_irq(&phba
->hbalock
);
5345 spin_unlock_irq(&phba
->hbalock
);
5347 if (lpfc_fcf_inuse(phba
))
5350 lpfc_unregister_fcf_rescan(phba
);
5354 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
5355 * @phba: Pointer to hba context object.
5356 * @buff: Buffer containing the FCF connection table as in the config
5358 * This function create driver data structure for the FCF connection
5359 * record table read from config region 23.
5362 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
5365 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5366 struct lpfc_fcf_conn_hdr
*conn_hdr
;
5367 struct lpfc_fcf_conn_rec
*conn_rec
;
5368 uint32_t record_count
;
5371 /* Free the current connect table */
5372 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5373 &phba
->fcf_conn_rec_list
, list
) {
5374 list_del_init(&conn_entry
->list
);
5378 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
5379 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
5380 sizeof(struct lpfc_fcf_conn_rec
);
5382 conn_rec
= (struct lpfc_fcf_conn_rec
*)
5383 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
5385 for (i
= 0; i
< record_count
; i
++) {
5386 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
5388 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
5391 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5392 "2566 Failed to allocate connection"
5397 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
5398 sizeof(struct lpfc_fcf_conn_rec
));
5399 conn_entry
->conn_rec
.vlan_tag
=
5400 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
5401 conn_entry
->conn_rec
.flags
=
5402 le16_to_cpu(conn_entry
->conn_rec
.flags
);
5403 list_add_tail(&conn_entry
->list
,
5404 &phba
->fcf_conn_rec_list
);
5409 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
5410 * @phba: Pointer to hba context object.
5411 * @buff: Buffer containing the FCoE parameter data structure.
5413 * This function update driver data structure with config
5414 * parameters read from config region 23.
5417 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
5420 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
5421 struct lpfc_fcoe_params
*fcoe_param
;
5423 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
5425 fcoe_param
= (struct lpfc_fcoe_params
*)
5426 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
5428 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
5429 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
5432 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
5433 phba
->valid_vlan
= 1;
5434 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
5438 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
5439 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
5440 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
5445 * lpfc_get_rec_conf23 - Get a record type in config region data.
5446 * @buff: Buffer containing config region 23 data.
5447 * @size: Size of the data buffer.
5448 * @rec_type: Record type to be searched.
5450 * This function searches config region data to find the begining
5451 * of the record specified by record_type. If record found, this
5452 * function return pointer to the record else return NULL.
5455 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
5457 uint32_t offset
= 0, rec_length
;
5459 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
5460 (size
< sizeof(uint32_t)))
5463 rec_length
= buff
[offset
+ 1];
5466 * One TLV record has one word header and number of data words
5467 * specified in the rec_length field of the record header.
5469 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
5471 if (buff
[offset
] == rec_type
)
5472 return &buff
[offset
];
5474 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
5477 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
5478 rec_length
= buff
[offset
+ 1];
5484 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
5485 * @phba: Pointer to lpfc_hba data structure.
5486 * @buff: Buffer containing config region 23 data.
5487 * @size: Size of the data buffer.
5489 * This fuction parse the FCoE config parameters in config region 23 and
5490 * populate driver data structure with the parameters.
5493 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
5497 uint32_t offset
= 0, rec_length
;
5501 * If data size is less than 2 words signature and version cannot be
5504 if (size
< 2*sizeof(uint32_t))
5507 /* Check the region signature first */
5508 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
5509 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5510 "2567 Config region 23 has bad signature\n");
5516 /* Check the data structure version */
5517 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
5518 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5519 "2568 Config region 23 has bad version\n");
5524 rec_length
= buff
[offset
+ 1];
5526 /* Read FCoE param record */
5527 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5528 size
- offset
, FCOE_PARAM_TYPE
);
5530 lpfc_read_fcoe_param(phba
, rec_ptr
);
5532 /* Read FCF connection table */
5533 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5534 size
- offset
, FCOE_CONN_TBL_TYPE
);
5536 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);