1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/slab.h>
24 #include <linux/pci.h>
25 #include <linux/kthread.h>
26 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
36 #include "lpfc_disc.h"
38 #include "lpfc_sli4.h"
39 #include "lpfc_scsi.h"
41 #include "lpfc_logmsg.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_vport.h"
44 #include "lpfc_debugfs.h"
46 /* AlpaArray for assignment of scsid for scan-down and bind_method */
47 static uint8_t lpfcAlpaArray
[] = {
48 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
49 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
50 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
51 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
52 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
53 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
54 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
55 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
56 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
57 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
58 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
59 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
60 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
63 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
64 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
65 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
68 lpfc_terminate_rport_io(struct fc_rport
*rport
)
70 struct lpfc_rport_data
*rdata
;
71 struct lpfc_nodelist
* ndlp
;
72 struct lpfc_hba
*phba
;
74 rdata
= rport
->dd_data
;
77 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
78 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
79 printk(KERN_ERR
"Cannot find remote node"
80 " to terminate I/O Data x%x\n",
87 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
88 "rport terminate: sid:x%x did:x%x flg:x%x",
89 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
91 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
92 lpfc_sli_abort_iocb(ndlp
->vport
,
93 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
94 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
99 * This function will be called when dev_loss_tmo fire.
102 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
104 struct lpfc_rport_data
*rdata
;
105 struct lpfc_nodelist
* ndlp
;
106 struct lpfc_vport
*vport
;
107 struct lpfc_hba
*phba
;
108 struct lpfc_work_evt
*evtp
;
112 rdata
= rport
->dd_data
;
114 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
120 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
121 "rport devlosscb: sid:x%x did:x%x flg:x%x",
122 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
124 /* Don't defer this if we are in the process of deleting the vport
125 * or unloading the driver. The unload will cleanup the node
126 * appropriately we just need to cleanup the ndlp rport info here.
128 if (vport
->load_flag
& FC_UNLOADING
) {
129 put_node
= rdata
->pnode
!= NULL
;
130 put_rport
= ndlp
->rport
!= NULL
;
136 put_device(&rport
->dev
);
140 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
143 evtp
= &ndlp
->dev_loss_evt
;
145 if (!list_empty(&evtp
->evt_listp
))
148 spin_lock_irq(&phba
->hbalock
);
149 /* We need to hold the node by incrementing the reference
150 * count until this queued work is done
152 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
153 if (evtp
->evt_arg1
) {
154 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
155 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
156 lpfc_worker_wake_up(phba
);
158 spin_unlock_irq(&phba
->hbalock
);
164 * This function is called from the worker thread when dev_loss_tmo
168 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
170 struct lpfc_rport_data
*rdata
;
171 struct fc_rport
*rport
;
172 struct lpfc_vport
*vport
;
173 struct lpfc_hba
*phba
;
184 rdata
= rport
->dd_data
;
185 name
= (uint8_t *) &ndlp
->nlp_portname
;
189 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
190 "rport devlosstmo:did:x%x type:x%x id:x%x",
191 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
193 /* Don't defer this if we are in the process of deleting the vport
194 * or unloading the driver. The unload will cleanup the node
195 * appropriately we just need to cleanup the ndlp rport info here.
197 if (vport
->load_flag
& FC_UNLOADING
) {
198 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
199 /* flush the target */
200 lpfc_sli_abort_iocb(vport
,
201 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
202 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
204 put_node
= rdata
->pnode
!= NULL
;
205 put_rport
= ndlp
->rport
!= NULL
;
211 put_device(&rport
->dev
);
215 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
216 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
217 "0284 Devloss timeout Ignored on "
218 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
220 *name
, *(name
+1), *(name
+2), *(name
+3),
221 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
226 if (ndlp
->nlp_type
& NLP_FABRIC
) {
227 /* We will clean up these Nodes in linkup */
228 put_node
= rdata
->pnode
!= NULL
;
229 put_rport
= ndlp
->rport
!= NULL
;
235 put_device(&rport
->dev
);
239 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
241 /* flush the target */
242 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
243 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
247 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
248 "0203 Devloss timeout on "
249 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
250 "NPort x%06x Data: x%x x%x x%x\n",
251 *name
, *(name
+1), *(name
+2), *(name
+3),
252 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
253 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
254 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
256 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
257 "0204 Devloss timeout on "
258 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
259 "NPort x%06x Data: x%x x%x x%x\n",
260 *name
, *(name
+1), *(name
+2), *(name
+3),
261 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
262 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
263 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
266 put_node
= rdata
->pnode
!= NULL
;
267 put_rport
= ndlp
->rport
!= NULL
;
273 put_device(&rport
->dev
);
275 if (!(vport
->load_flag
& FC_UNLOADING
) &&
276 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
277 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
278 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
279 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
))
280 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
282 lpfc_unregister_unused_fcf(phba
);
286 * lpfc_alloc_fast_evt - Allocates data structure for posting event
287 * @phba: Pointer to hba context object.
289 * This function is called from the functions which need to post
290 * events from interrupt context. This function allocates data
291 * structure required for posting event. It also keeps track of
292 * number of events pending and prevent event storm when there are
295 struct lpfc_fast_path_event
*
296 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
297 struct lpfc_fast_path_event
*ret
;
299 /* If there are lot of fast event do not exhaust memory due to this */
300 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
303 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
306 atomic_inc(&phba
->fast_event_count
);
307 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
308 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
314 * lpfc_free_fast_evt - Frees event data structure
315 * @phba: Pointer to hba context object.
316 * @evt: Event object which need to be freed.
318 * This function frees the data structure required for posting
322 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
323 struct lpfc_fast_path_event
*evt
) {
325 atomic_dec(&phba
->fast_event_count
);
330 * lpfc_send_fastpath_evt - Posts events generated from fast path
331 * @phba: Pointer to hba context object.
332 * @evtp: Event data structure.
334 * This function is called from worker thread, when the interrupt
335 * context need to post an event. This function posts the event
336 * to fc transport netlink interface.
339 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
340 struct lpfc_work_evt
*evtp
)
342 unsigned long evt_category
, evt_sub_category
;
343 struct lpfc_fast_path_event
*fast_evt_data
;
345 uint32_t evt_data_size
;
346 struct Scsi_Host
*shost
;
348 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
351 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
352 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
353 fabric_evt
.subcategory
;
354 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
355 if (evt_category
== FC_REG_FABRIC_EVENT
) {
356 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
357 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
358 evt_data_size
= sizeof(fast_evt_data
->un
.
360 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
361 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
362 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
363 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
365 lpfc_free_fast_evt(phba
, fast_evt_data
);
368 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
369 switch (evt_sub_category
) {
370 case LPFC_EVENT_QFULL
:
371 case LPFC_EVENT_DEVBSY
:
372 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
373 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
375 case LPFC_EVENT_CHECK_COND
:
376 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
377 evt_data_size
= sizeof(fast_evt_data
->un
.
380 case LPFC_EVENT_VARQUEDEPTH
:
381 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
382 evt_data_size
= sizeof(fast_evt_data
->un
.
386 lpfc_free_fast_evt(phba
, fast_evt_data
);
390 lpfc_free_fast_evt(phba
, fast_evt_data
);
394 fc_host_post_vendor_event(shost
,
395 fc_get_event_number(),
400 lpfc_free_fast_evt(phba
, fast_evt_data
);
405 lpfc_work_list_done(struct lpfc_hba
*phba
)
407 struct lpfc_work_evt
*evtp
= NULL
;
408 struct lpfc_nodelist
*ndlp
;
411 spin_lock_irq(&phba
->hbalock
);
412 while (!list_empty(&phba
->work_list
)) {
413 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
415 spin_unlock_irq(&phba
->hbalock
);
418 case LPFC_EVT_ELS_RETRY
:
419 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
420 lpfc_els_retry_delay_handler(ndlp
);
421 free_evt
= 0; /* evt is part of ndlp */
422 /* decrement the node reference count held
423 * for this queued work
427 case LPFC_EVT_DEV_LOSS
:
428 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
429 lpfc_dev_loss_tmo_handler(ndlp
);
431 /* decrement the node reference count held for
436 case LPFC_EVT_ONLINE
:
437 if (phba
->link_state
< LPFC_LINK_DOWN
)
438 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
440 *(int *) (evtp
->evt_arg1
) = 0;
441 complete((struct completion
*)(evtp
->evt_arg2
));
443 case LPFC_EVT_OFFLINE_PREP
:
444 if (phba
->link_state
>= LPFC_LINK_DOWN
)
445 lpfc_offline_prep(phba
);
446 *(int *)(evtp
->evt_arg1
) = 0;
447 complete((struct completion
*)(evtp
->evt_arg2
));
449 case LPFC_EVT_OFFLINE
:
451 lpfc_sli_brdrestart(phba
);
452 *(int *)(evtp
->evt_arg1
) =
453 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
454 lpfc_unblock_mgmt_io(phba
);
455 complete((struct completion
*)(evtp
->evt_arg2
));
457 case LPFC_EVT_WARM_START
:
459 lpfc_reset_barrier(phba
);
460 lpfc_sli_brdreset(phba
);
461 lpfc_hba_down_post(phba
);
462 *(int *)(evtp
->evt_arg1
) =
463 lpfc_sli_brdready(phba
, HS_MBRDY
);
464 lpfc_unblock_mgmt_io(phba
);
465 complete((struct completion
*)(evtp
->evt_arg2
));
469 *(int *)(evtp
->evt_arg1
)
470 = (phba
->pport
->stopped
)
471 ? 0 : lpfc_sli_brdkill(phba
);
472 lpfc_unblock_mgmt_io(phba
);
473 complete((struct completion
*)(evtp
->evt_arg2
));
475 case LPFC_EVT_FASTPATH_MGMT_EVT
:
476 lpfc_send_fastpath_evt(phba
, evtp
);
479 case LPFC_EVT_RESET_HBA
:
480 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
481 lpfc_reset_hba(phba
);
486 spin_lock_irq(&phba
->hbalock
);
488 spin_unlock_irq(&phba
->hbalock
);
493 lpfc_work_done(struct lpfc_hba
*phba
)
495 struct lpfc_sli_ring
*pring
;
496 uint32_t ha_copy
, status
, control
, work_port_events
;
497 struct lpfc_vport
**vports
;
498 struct lpfc_vport
*vport
;
501 spin_lock_irq(&phba
->hbalock
);
502 ha_copy
= phba
->work_ha
;
504 spin_unlock_irq(&phba
->hbalock
);
506 /* First, try to post the next mailbox command to SLI4 device */
507 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
508 lpfc_sli4_post_async_mbox(phba
);
510 if (ha_copy
& HA_ERATT
)
511 /* Handle the error attention event */
512 lpfc_handle_eratt(phba
);
514 if (ha_copy
& HA_MBATT
)
515 lpfc_sli_handle_mb_event(phba
);
517 if (ha_copy
& HA_LATT
)
518 lpfc_handle_latt(phba
);
520 /* Process SLI4 events */
521 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
522 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
523 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
524 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
525 lpfc_sli4_els_xri_abort_event_proc(phba
);
526 if (phba
->hba_flag
& ASYNC_EVENT
)
527 lpfc_sli4_async_event_proc(phba
);
528 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
529 spin_lock_irq(&phba
->hbalock
);
530 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
531 spin_unlock_irq(&phba
->hbalock
);
532 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
534 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
535 lpfc_sli4_fcf_redisc_event_proc(phba
);
538 vports
= lpfc_create_vport_work_array(phba
);
540 for (i
= 0; i
<= phba
->max_vports
; i
++) {
542 * We could have no vports in array if unloading, so if
543 * this happens then just use the pport
545 if (vports
[i
] == NULL
&& i
== 0)
551 spin_lock_irq(&vport
->work_port_lock
);
552 work_port_events
= vport
->work_port_events
;
553 vport
->work_port_events
&= ~work_port_events
;
554 spin_unlock_irq(&vport
->work_port_lock
);
555 if (work_port_events
& WORKER_DISC_TMO
)
556 lpfc_disc_timeout_handler(vport
);
557 if (work_port_events
& WORKER_ELS_TMO
)
558 lpfc_els_timeout_handler(vport
);
559 if (work_port_events
& WORKER_HB_TMO
)
560 lpfc_hb_timeout_handler(phba
);
561 if (work_port_events
& WORKER_MBOX_TMO
)
562 lpfc_mbox_timeout_handler(phba
);
563 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
564 lpfc_unblock_fabric_iocbs(phba
);
565 if (work_port_events
& WORKER_FDMI_TMO
)
566 lpfc_fdmi_timeout_handler(vport
);
567 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
568 lpfc_ramp_down_queue_handler(phba
);
569 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
570 lpfc_ramp_up_queue_handler(phba
);
572 lpfc_destroy_vport_work_array(phba
, vports
);
574 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
575 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
576 status
>>= (4*LPFC_ELS_RING
);
577 if ((status
& HA_RXMASK
) ||
578 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
579 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
580 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
581 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
582 /* Set the lpfc data pending flag */
583 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
585 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
586 lpfc_sli_handle_slow_ring_event(phba
, pring
,
591 * Turn on Ring interrupts
593 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
594 spin_lock_irq(&phba
->hbalock
);
595 control
= readl(phba
->HCregaddr
);
596 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
597 lpfc_debugfs_slow_ring_trc(phba
,
598 "WRK Enable ring: cntl:x%x hacopy:x%x",
599 control
, ha_copy
, 0);
601 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
602 writel(control
, phba
->HCregaddr
);
603 readl(phba
->HCregaddr
); /* flush */
605 lpfc_debugfs_slow_ring_trc(phba
,
606 "WRK Ring ok: cntl:x%x hacopy:x%x",
607 control
, ha_copy
, 0);
609 spin_unlock_irq(&phba
->hbalock
);
612 lpfc_work_list_done(phba
);
616 lpfc_do_work(void *p
)
618 struct lpfc_hba
*phba
= p
;
621 set_user_nice(current
, -20);
622 phba
->data_flags
= 0;
624 while (!kthread_should_stop()) {
625 /* wait and check worker queue activities */
626 rc
= wait_event_interruptible(phba
->work_waitq
,
627 (test_and_clear_bit(LPFC_DATA_READY
,
629 || kthread_should_stop()));
630 /* Signal wakeup shall terminate the worker thread */
632 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
633 "0433 Wakeup on signal: rc=x%x\n", rc
);
637 /* Attend pending lpfc data processing */
638 lpfc_work_done(phba
);
640 phba
->worker_thread
= NULL
;
641 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
642 "0432 Worker thread stopped.\n");
647 * This is only called to handle FC worker events. Since this a rare
648 * occurance, we allocate a struct lpfc_work_evt structure here instead of
649 * embedding it in the IOCB.
652 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
655 struct lpfc_work_evt
*evtp
;
659 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
660 * be queued to worker thread for processing
662 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
666 evtp
->evt_arg1
= arg1
;
667 evtp
->evt_arg2
= arg2
;
670 spin_lock_irqsave(&phba
->hbalock
, flags
);
671 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
672 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
674 lpfc_worker_wake_up(phba
);
680 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
682 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
683 struct lpfc_hba
*phba
= vport
->phba
;
684 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
687 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
688 if (!NLP_CHK_NODE_ACT(ndlp
))
690 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
692 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
693 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
694 (ndlp
->nlp_DID
== NameServer_DID
)))
695 lpfc_unreg_rpi(vport
, ndlp
);
697 /* Leave Fabric nodes alone on link down */
698 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
699 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
701 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
704 : NLP_EVT_DEVICE_RECOVERY
);
706 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
707 lpfc_mbx_unreg_vpi(vport
);
708 spin_lock_irq(shost
->host_lock
);
709 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
710 spin_unlock_irq(shost
->host_lock
);
715 lpfc_port_link_failure(struct lpfc_vport
*vport
)
717 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
719 /* Cleanup any outstanding received buffers */
720 lpfc_cleanup_rcv_buffers(vport
);
722 /* Cleanup any outstanding RSCN activity */
723 lpfc_els_flush_rscn(vport
);
725 /* Cleanup any outstanding ELS commands */
726 lpfc_els_flush_cmd(vport
);
728 lpfc_cleanup_rpis(vport
, 0);
730 /* Turn off discovery timer if its running */
731 lpfc_can_disctmo(vport
);
735 lpfc_linkdown_port(struct lpfc_vport
*vport
)
737 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
739 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
741 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
742 "Link Down: state:x%x rtry:x%x flg:x%x",
743 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
745 lpfc_port_link_failure(vport
);
750 lpfc_linkdown(struct lpfc_hba
*phba
)
752 struct lpfc_vport
*vport
= phba
->pport
;
753 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
754 struct lpfc_vport
**vports
;
758 if (phba
->link_state
== LPFC_LINK_DOWN
)
761 /* Block all SCSI stack I/Os */
762 lpfc_scsi_dev_block(phba
);
764 spin_lock_irq(&phba
->hbalock
);
765 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
766 spin_unlock_irq(&phba
->hbalock
);
767 if (phba
->link_state
> LPFC_LINK_DOWN
) {
768 phba
->link_state
= LPFC_LINK_DOWN
;
769 spin_lock_irq(shost
->host_lock
);
770 phba
->pport
->fc_flag
&= ~FC_LBIT
;
771 spin_unlock_irq(shost
->host_lock
);
773 vports
= lpfc_create_vport_work_array(phba
);
775 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
776 /* Issue a LINK DOWN event to all nodes */
777 lpfc_linkdown_port(vports
[i
]);
779 lpfc_destroy_vport_work_array(phba
, vports
);
780 /* Clean up any firmware default rpi's */
781 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
783 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
785 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
786 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
787 == MBX_NOT_FINISHED
) {
788 mempool_free(mb
, phba
->mbox_mem_pool
);
792 /* Setup myDID for link up if we are in pt2pt mode */
793 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
794 phba
->pport
->fc_myDID
= 0;
795 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
797 lpfc_config_link(phba
, mb
);
798 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
800 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
801 == MBX_NOT_FINISHED
) {
802 mempool_free(mb
, phba
->mbox_mem_pool
);
805 spin_lock_irq(shost
->host_lock
);
806 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
807 spin_unlock_irq(shost
->host_lock
);
814 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
816 struct lpfc_nodelist
*ndlp
;
818 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
819 if (!NLP_CHK_NODE_ACT(ndlp
))
821 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
823 if (ndlp
->nlp_type
& NLP_FABRIC
) {
824 /* On Linkup its safe to clean up the ndlp
825 * from Fabric connections.
827 if (ndlp
->nlp_DID
!= Fabric_DID
)
828 lpfc_unreg_rpi(vport
, ndlp
);
829 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
830 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
831 /* Fail outstanding IO now since device is
834 lpfc_unreg_rpi(vport
, ndlp
);
840 lpfc_linkup_port(struct lpfc_vport
*vport
)
842 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
843 struct lpfc_hba
*phba
= vport
->phba
;
845 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
848 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
849 "Link Up: top:x%x speed:x%x flg:x%x",
850 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
852 /* If NPIV is not enabled, only bring the physical port up */
853 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
854 (vport
!= phba
->pport
))
857 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
859 spin_lock_irq(shost
->host_lock
);
860 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
861 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
862 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
863 vport
->fc_ns_retry
= 0;
864 spin_unlock_irq(shost
->host_lock
);
866 if (vport
->fc_flag
& FC_LBIT
)
867 lpfc_linkup_cleanup_nodes(vport
);
872 lpfc_linkup(struct lpfc_hba
*phba
)
874 struct lpfc_vport
**vports
;
877 phba
->link_state
= LPFC_LINK_UP
;
879 /* Unblock fabric iocbs if they are blocked */
880 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
881 del_timer_sync(&phba
->fabric_block_timer
);
883 vports
= lpfc_create_vport_work_array(phba
);
885 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
886 lpfc_linkup_port(vports
[i
]);
887 lpfc_destroy_vport_work_array(phba
, vports
);
888 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
889 (phba
->sli_rev
< LPFC_SLI_REV4
))
890 lpfc_issue_clear_la(phba
, phba
->pport
);
896 * This routine handles processing a CLEAR_LA mailbox
897 * command upon completion. It is setup in the LPFC_MBOXQ
898 * as the completion routine when the command is
899 * handed off to the SLI layer.
902 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
904 struct lpfc_vport
*vport
= pmb
->vport
;
905 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
906 struct lpfc_sli
*psli
= &phba
->sli
;
907 MAILBOX_t
*mb
= &pmb
->u
.mb
;
910 /* Since we don't do discovery right now, turn these off here */
911 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
912 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
913 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
915 /* Check for error */
916 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
917 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
918 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
919 "0320 CLEAR_LA mbxStatus error x%x hba "
921 mb
->mbxStatus
, vport
->port_state
);
922 phba
->link_state
= LPFC_HBA_ERROR
;
926 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
927 phba
->link_state
= LPFC_HBA_READY
;
929 spin_lock_irq(&phba
->hbalock
);
930 psli
->sli_flag
|= LPFC_PROCESS_LA
;
931 control
= readl(phba
->HCregaddr
);
932 control
|= HC_LAINT_ENA
;
933 writel(control
, phba
->HCregaddr
);
934 readl(phba
->HCregaddr
); /* flush */
935 spin_unlock_irq(&phba
->hbalock
);
936 mempool_free(pmb
, phba
->mbox_mem_pool
);
940 /* Device Discovery completes */
941 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
942 "0225 Device Discovery completes\n");
943 mempool_free(pmb
, phba
->mbox_mem_pool
);
945 spin_lock_irq(shost
->host_lock
);
946 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
947 spin_unlock_irq(shost
->host_lock
);
949 lpfc_can_disctmo(vport
);
951 /* turn on Link Attention interrupts */
953 spin_lock_irq(&phba
->hbalock
);
954 psli
->sli_flag
|= LPFC_PROCESS_LA
;
955 control
= readl(phba
->HCregaddr
);
956 control
|= HC_LAINT_ENA
;
957 writel(control
, phba
->HCregaddr
);
958 readl(phba
->HCregaddr
); /* flush */
959 spin_unlock_irq(&phba
->hbalock
);
966 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
968 struct lpfc_vport
*vport
= pmb
->vport
;
970 if (pmb
->u
.mb
.mbxStatus
)
973 mempool_free(pmb
, phba
->mbox_mem_pool
);
975 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
976 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
977 !(vport
->fc_flag
& FC_LBIT
)) {
978 /* Need to wait for FAN - use discovery timer
979 * for timeout. port_state is identically
980 * LPFC_LOCAL_CFG_LINK while waiting for FAN
982 lpfc_set_disctmo(vport
);
986 /* Start discovery by sending a FLOGI. port_state is identically
987 * LPFC_FLOGI while waiting for FLOGI cmpl
989 if (vport
->port_state
!= LPFC_FLOGI
) {
990 lpfc_initial_flogi(vport
);
995 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
996 "0306 CONFIG_LINK mbxStatus error x%x "
998 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
999 mempool_free(pmb
, phba
->mbox_mem_pool
);
1001 lpfc_linkdown(phba
);
1003 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1004 "0200 CONFIG_LINK bad hba state x%x\n",
1007 lpfc_issue_clear_la(phba
, vport
);
1012 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1014 struct lpfc_vport
*vport
= mboxq
->vport
;
1015 unsigned long flags
;
1017 if (mboxq
->u
.mb
.mbxStatus
) {
1018 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1019 "2017 REG_FCFI mbxStatus error x%x "
1021 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1022 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1026 /* Start FCoE discovery by sending a FLOGI. */
1027 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1028 /* Set the FCFI registered flag */
1029 spin_lock_irqsave(&phba
->hbalock
, flags
);
1030 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1031 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1032 /* If there is a pending FCoE event, restart FCF table scan. */
1033 if (lpfc_check_pending_fcoe_event(phba
, 1)) {
1034 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1037 spin_lock_irqsave(&phba
->hbalock
, flags
);
1038 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1039 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1040 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1041 if (vport
->port_state
!= LPFC_FLOGI
)
1042 lpfc_initial_flogi(vport
);
1044 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1049 * lpfc_fab_name_match - Check if the fcf fabric name match.
1050 * @fab_name: pointer to fabric name.
1051 * @new_fcf_record: pointer to fcf record.
1053 * This routine compare the fcf record's fabric name with provided
1054 * fabric name. If the fabric name are identical this function
1055 * returns 1 else return 0.
1058 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1060 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1062 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1064 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1066 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1068 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1070 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1072 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1074 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1080 * lpfc_sw_name_match - Check if the fcf switch name match.
1081 * @fab_name: pointer to fabric name.
1082 * @new_fcf_record: pointer to fcf record.
1084 * This routine compare the fcf record's switch name with provided
1085 * switch name. If the switch name are identical this function
1086 * returns 1 else return 0.
1089 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1091 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1093 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1095 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1097 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1099 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1101 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1103 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1105 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1111 * lpfc_mac_addr_match - Check if the fcf mac address match.
1112 * @mac_addr: pointer to mac address.
1113 * @new_fcf_record: pointer to fcf record.
1115 * This routine compare the fcf record's mac address with HBA's
1116 * FCF mac address. If the mac addresses are identical this function
1117 * returns 1 else return 0.
1120 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1122 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1124 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1126 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1128 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1130 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1132 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1138 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1140 return (curr_vlan_id
== new_vlan_id
);
1144 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1145 * @fcf: pointer to driver fcf record.
1146 * @new_fcf_record: pointer to fcf record.
1148 * This routine copies the FCF information from the FCF
1149 * record to lpfc_hba data structure.
1152 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1153 struct fcf_record
*new_fcf_record
)
1156 fcf_rec
->fabric_name
[0] =
1157 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1158 fcf_rec
->fabric_name
[1] =
1159 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1160 fcf_rec
->fabric_name
[2] =
1161 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1162 fcf_rec
->fabric_name
[3] =
1163 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1164 fcf_rec
->fabric_name
[4] =
1165 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1166 fcf_rec
->fabric_name
[5] =
1167 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1168 fcf_rec
->fabric_name
[6] =
1169 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1170 fcf_rec
->fabric_name
[7] =
1171 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1173 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1174 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1175 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1176 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1177 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1178 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1179 /* FCF record index */
1180 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1181 /* FCF record priority */
1182 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1184 fcf_rec
->switch_name
[0] =
1185 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1186 fcf_rec
->switch_name
[1] =
1187 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1188 fcf_rec
->switch_name
[2] =
1189 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1190 fcf_rec
->switch_name
[3] =
1191 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1192 fcf_rec
->switch_name
[4] =
1193 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1194 fcf_rec
->switch_name
[5] =
1195 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1196 fcf_rec
->switch_name
[6] =
1197 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1198 fcf_rec
->switch_name
[7] =
1199 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1203 * lpfc_update_fcf_record - Update driver fcf record
1204 * @phba: pointer to lpfc hba data structure.
1205 * @fcf_rec: pointer to driver fcf record.
1206 * @new_fcf_record: pointer to hba fcf record.
1207 * @addr_mode: address mode to be set to the driver fcf record.
1208 * @vlan_id: vlan tag to be set to the driver fcf record.
1209 * @flag: flag bits to be set to the driver fcf record.
1211 * This routine updates the driver FCF record from the new HBA FCF record
1212 * together with the address mode, vlan_id, and other informations. This
1213 * routine is called with the host lock held.
1216 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1217 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1218 uint16_t vlan_id
, uint32_t flag
)
1220 /* Copy the fields from the HBA's FCF record */
1221 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1222 /* Update other fields of driver FCF record */
1223 fcf_rec
->addr_mode
= addr_mode
;
1224 fcf_rec
->vlan_id
= vlan_id
;
1225 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1229 * lpfc_register_fcf - Register the FCF with hba.
1230 * @phba: pointer to lpfc hba data structure.
1232 * This routine issues a register fcfi mailbox command to register
1236 lpfc_register_fcf(struct lpfc_hba
*phba
)
1238 LPFC_MBOXQ_t
*fcf_mbxq
;
1240 unsigned long flags
;
1242 spin_lock_irqsave(&phba
->hbalock
, flags
);
1244 /* If the FCF is not availabe do nothing. */
1245 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1246 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1247 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1251 /* The FCF is already registered, start discovery */
1252 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1253 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1254 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1255 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1256 if (phba
->pport
->port_state
!= LPFC_FLOGI
)
1257 lpfc_initial_flogi(phba
->pport
);
1260 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1262 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
,
1265 spin_lock_irqsave(&phba
->hbalock
, flags
);
1266 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1267 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1271 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1272 fcf_mbxq
->vport
= phba
->pport
;
1273 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1274 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1275 if (rc
== MBX_NOT_FINISHED
) {
1276 spin_lock_irqsave(&phba
->hbalock
, flags
);
1277 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1278 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1279 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1286 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1287 * @phba: pointer to lpfc hba data structure.
1288 * @new_fcf_record: pointer to fcf record.
1289 * @boot_flag: Indicates if this record used by boot bios.
1290 * @addr_mode: The address mode to be used by this FCF
1291 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1293 * This routine compare the fcf record with connect list obtained from the
1294 * config region to decide if this FCF can be used for SAN discovery. It returns
1295 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1296 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1297 * is used by boot bios and addr_mode will indicate the addressing mode to be
1298 * used for this FCF when the function returns.
1299 * If the FCF record need to be used with a particular vlan id, the vlan is
1300 * set in the vlan_id on return of the function. If not VLAN tagging need to
1301 * be used with the FCF vlan_id will be set to 0xFFFF;
1304 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1305 struct fcf_record
*new_fcf_record
,
1306 uint32_t *boot_flag
, uint32_t *addr_mode
,
1309 struct lpfc_fcf_conn_entry
*conn_entry
;
1310 int i
, j
, fcf_vlan_id
= 0;
1312 /* Find the lowest VLAN id in the FCF record */
1313 for (i
= 0; i
< 512; i
++) {
1314 if (new_fcf_record
->vlan_bitmap
[i
]) {
1315 fcf_vlan_id
= i
* 8;
1317 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1325 /* If FCF not available return 0 */
1326 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1327 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1330 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1332 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1334 if (phba
->valid_vlan
)
1335 *vlan_id
= phba
->vlan_id
;
1342 * If there are no FCF connection table entry, driver connect to all
1345 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1347 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1351 * When there are no FCF connect entries, use driver's default
1352 * addressing mode - FPMA.
1354 if (*addr_mode
& LPFC_FCF_FPMA
)
1355 *addr_mode
= LPFC_FCF_FPMA
;
1357 /* If FCF record report a vlan id use that vlan id */
1359 *vlan_id
= fcf_vlan_id
;
1365 list_for_each_entry(conn_entry
,
1366 &phba
->fcf_conn_rec_list
, list
) {
1367 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1370 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1371 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1374 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1375 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1378 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1380 * If the vlan bit map does not have the bit set for the
1381 * vlan id to be used, then it is not a match.
1383 if (!(new_fcf_record
->vlan_bitmap
1384 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1385 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1390 * If connection record does not support any addressing mode,
1391 * skip the FCF record.
1393 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1394 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1398 * Check if the connection record specifies a required
1401 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1402 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1405 * If SPMA required but FCF not support this continue.
1407 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1408 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1409 new_fcf_record
) & LPFC_FCF_SPMA
))
1413 * If FPMA required but FCF not support this continue.
1415 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1416 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1417 new_fcf_record
) & LPFC_FCF_FPMA
))
1422 * This fcf record matches filtering criteria.
1424 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1430 * If user did not specify any addressing mode, or if the
1431 * prefered addressing mode specified by user is not supported
1432 * by FCF, allow fabric to pick the addressing mode.
1434 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1437 * If the user specified a required address mode, assign that
1440 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1441 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1442 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1444 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1446 * If the user specified a prefered address mode, use the
1447 * addr mode only if FCF support the addr_mode.
1449 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1450 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1451 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1452 (*addr_mode
& LPFC_FCF_SPMA
))
1453 *addr_mode
= LPFC_FCF_SPMA
;
1454 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1455 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1456 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1457 (*addr_mode
& LPFC_FCF_FPMA
))
1458 *addr_mode
= LPFC_FCF_FPMA
;
1460 /* If matching connect list has a vlan id, use it */
1461 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1462 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1464 * If no vlan id is specified in connect list, use the vlan id
1467 else if (fcf_vlan_id
)
1468 *vlan_id
= fcf_vlan_id
;
1479 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1480 * @phba: pointer to lpfc hba data structure.
1481 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1483 * This function check if there is any fcoe event pending while driver
1484 * scan FCF entries. If there is any pending event, it will restart the
1485 * FCF saning and return 1 else return 0.
1488 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1491 * If the Link is up and no FCoE events while in the
1492 * FCF discovery, no need to restart FCF discovery.
1494 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1495 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1498 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1499 "2768 Pending link or FCF event during current "
1500 "handling of the previous event: link_state:x%x, "
1501 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1502 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1503 phba
->fcoe_eventtag
);
1505 spin_lock_irq(&phba
->hbalock
);
1506 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1507 spin_unlock_irq(&phba
->hbalock
);
1509 if (phba
->link_state
>= LPFC_LINK_UP
) {
1510 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1511 "2780 Restart FCF table scan due to "
1512 "pending FCF event:evt_tag_at_scan:x%x, "
1513 "evt_tag_current:x%x\n",
1514 phba
->fcoe_eventtag_at_fcf_scan
,
1515 phba
->fcoe_eventtag
);
1516 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1519 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1522 spin_lock_irq(&phba
->hbalock
);
1523 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1524 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1525 spin_unlock_irq(&phba
->hbalock
);
1528 /* Unregister the currently registered FCF if required */
1530 spin_lock_irq(&phba
->hbalock
);
1531 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1532 spin_unlock_irq(&phba
->hbalock
);
1533 lpfc_sli4_unregister_fcf(phba
);
1539 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1540 * @phba: pointer to lpfc hba data structure.
1541 * @fcf_cnt: number of eligible fcf record seen so far.
1543 * This function makes an running random selection decision on FCF record to
1544 * use through a sequence of @fcf_cnt eligible FCF records with equal
1545 * probability. To perform integer manunipulation of random numbers with
1546 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1547 * from random32() are taken as the random random number generated.
1549 * Returns true when outcome is for the newly read FCF record should be
1550 * chosen; otherwise, return false when outcome is for keeping the previously
1551 * chosen FCF record.
1554 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1558 /* Get 16-bit uniform random number */
1559 rand_num
= (0xFFFF & random32());
1561 /* Decision with probability 1/fcf_cnt */
1562 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1569 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1570 * @phba: pointer to lpfc hba data structure.
1571 * @mboxq: pointer to mailbox object.
1572 * @next_fcf_index: pointer to holder of next fcf index.
1574 * This routine parses the non-embedded fcf mailbox command by performing the
1575 * necessarily error checking, non-embedded read FCF record mailbox command
1576 * SGE parsing, and endianness swapping.
1578 * Returns the pointer to the new FCF record in the non-embedded mailbox
1579 * command DMA memory if successfully, other NULL.
1581 static struct fcf_record
*
1582 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1583 uint16_t *next_fcf_index
)
1586 dma_addr_t phys_addr
;
1587 struct lpfc_mbx_sge sge
;
1588 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1589 uint32_t shdr_status
, shdr_add_status
;
1590 union lpfc_sli4_cfg_shdr
*shdr
;
1591 struct fcf_record
*new_fcf_record
;
1593 /* Get the first SGE entry from the non-embedded DMA memory. This
1594 * routine only uses a single SGE.
1596 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1597 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1598 if (unlikely(!mboxq
->sge_array
)) {
1599 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1600 "2524 Failed to get the non-embedded SGE "
1601 "virtual address\n");
1604 virt_addr
= mboxq
->sge_array
->addr
[0];
1606 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1607 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1608 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1609 if (shdr_status
|| shdr_add_status
) {
1610 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1611 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1612 "2726 READ_FCF_RECORD Indicates empty "
1615 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1616 "2521 READ_FCF_RECORD mailbox failed "
1617 "with status x%x add_status x%x, "
1618 "mbx\n", shdr_status
, shdr_add_status
);
1622 /* Interpreting the returned information of the FCF record */
1623 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1624 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1625 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1626 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1627 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1628 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1629 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1630 offsetof(struct fcf_record
, vlan_bitmap
));
1631 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1632 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1634 return new_fcf_record
;
1638 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1639 * @phba: pointer to lpfc hba data structure.
1640 * @fcf_record: pointer to the fcf record.
1641 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1642 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1644 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1648 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1649 struct fcf_record
*fcf_record
,
1651 uint16_t next_fcf_index
)
1653 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1654 "2764 READ_FCF_RECORD:\n"
1655 "\tFCF_Index : x%x\n"
1656 "\tFCF_Avail : x%x\n"
1657 "\tFCF_Valid : x%x\n"
1658 "\tFIP_Priority : x%x\n"
1659 "\tMAC_Provider : x%x\n"
1660 "\tLowest VLANID : x%x\n"
1661 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1662 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1663 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1664 "\tNext_FCF_Index: x%x\n",
1665 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1666 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1667 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1668 fcf_record
->fip_priority
,
1669 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1671 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1672 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1673 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1674 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1675 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1676 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1677 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1678 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1679 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1680 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1681 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1682 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1683 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1684 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1685 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1686 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1687 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1688 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1689 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1690 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1691 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1692 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1697 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1698 * @phba: pointer to lpfc hba data structure.
1699 * @mboxq: pointer to mailbox object.
1701 * This function iterates through all the fcf records available in
1702 * HBA and chooses the optimal FCF record for discovery. After finding
1703 * the FCF for discovery it registers the FCF record and kicks start
1705 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1706 * use an FCF record which matches fabric name and mac address of the
1707 * currently used FCF record.
1708 * If the driver supports only one FCF, it will try to use the FCF record
1709 * used by BOOT_BIOS.
1712 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1714 struct fcf_record
*new_fcf_record
;
1715 uint32_t boot_flag
, addr_mode
;
1716 uint16_t fcf_index
, next_fcf_index
;
1717 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
1720 bool select_new_fcf
;
1723 /* If there is pending FCoE event restart FCF table scan */
1724 if (lpfc_check_pending_fcoe_event(phba
, 0)) {
1725 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1729 /* Parse the FCF record from the non-embedded mailbox command */
1730 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
1732 if (!new_fcf_record
) {
1733 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1734 "2765 Mailbox command READ_FCF_RECORD "
1735 "failed to retrieve a FCF record.\n");
1736 /* Let next new FCF event trigger fast failover */
1737 spin_lock_irq(&phba
->hbalock
);
1738 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1739 spin_unlock_irq(&phba
->hbalock
);
1740 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1744 /* Check the FCF record against the connection list */
1745 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1746 &addr_mode
, &vlan_id
);
1748 /* Log the FCF record information if turned on */
1749 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
1753 * If the fcf record does not match with connect list entries
1754 * read the next entry; otherwise, this is an eligible FCF
1755 * record for round robin FCF failover.
1758 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1759 "2781 FCF record fcf_index:x%x failed FCF "
1760 "connection list check, fcf_avail:x%x, "
1762 bf_get(lpfc_fcf_record_fcf_index
,
1764 bf_get(lpfc_fcf_record_fcf_avail
,
1766 bf_get(lpfc_fcf_record_fcf_valid
,
1770 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1771 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
1777 * If this is not the first FCF discovery of the HBA, use last
1778 * FCF record for the discovery. The condition that a rescan
1779 * matches the in-use FCF record: fabric name, switch name, mac
1780 * address, and vlan_id.
1782 spin_lock_irq(&phba
->hbalock
);
1783 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
1784 if (lpfc_fab_name_match(phba
->fcf
.current_rec
.fabric_name
,
1786 lpfc_sw_name_match(phba
->fcf
.current_rec
.switch_name
,
1788 lpfc_mac_addr_match(phba
->fcf
.current_rec
.mac_addr
,
1790 lpfc_vlan_id_match(phba
->fcf
.current_rec
.vlan_id
,
1792 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1793 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
1794 /* Stop FCF redisc wait timer if pending */
1795 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
1796 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1797 /* If in fast failover, mark it's completed */
1798 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
|
1800 spin_unlock_irq(&phba
->hbalock
);
1804 * Read next FCF record from HBA searching for the matching
1805 * with in-use record only if not during the fast failover
1806 * period. In case of fast failover period, it shall try to
1807 * determine whether the FCF record just read should be the
1810 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1811 spin_unlock_irq(&phba
->hbalock
);
1816 * Update on failover FCF record only if it's in FCF fast-failover
1817 * period; otherwise, update on current FCF record.
1819 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1820 fcf_rec
= &phba
->fcf
.failover_rec
;
1822 fcf_rec
= &phba
->fcf
.current_rec
;
1824 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
1826 * If the driver FCF record does not have boot flag
1827 * set and new hba fcf record has boot flag set, use
1828 * the new hba fcf record.
1830 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
1831 /* Choose this FCF record */
1832 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1833 addr_mode
, vlan_id
, BOOT_ENABLE
);
1834 spin_unlock_irq(&phba
->hbalock
);
1838 * If the driver FCF record has boot flag set and the
1839 * new hba FCF record does not have boot flag, read
1840 * the next FCF record.
1842 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
1843 spin_unlock_irq(&phba
->hbalock
);
1847 * If the new hba FCF record has lower priority value
1848 * than the driver FCF record, use the new record.
1850 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
1851 /* Choose the new FCF record with lower priority */
1852 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1853 addr_mode
, vlan_id
, 0);
1854 /* Reset running random FCF selection count */
1855 phba
->fcf
.eligible_fcf_cnt
= 1;
1856 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
1857 /* Update running random FCF selection count */
1858 phba
->fcf
.eligible_fcf_cnt
++;
1859 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
1860 phba
->fcf
.eligible_fcf_cnt
);
1862 /* Choose the new FCF by random selection */
1863 __lpfc_update_fcf_record(phba
, fcf_rec
,
1865 addr_mode
, vlan_id
, 0);
1867 spin_unlock_irq(&phba
->hbalock
);
1871 * This is the first suitable FCF record, choose this record for
1872 * initial best-fit FCF.
1875 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1876 addr_mode
, vlan_id
, (boot_flag
?
1878 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1879 /* Setup initial running random FCF selection count */
1880 phba
->fcf
.eligible_fcf_cnt
= 1;
1881 /* Seeding the random number generator for random selection */
1882 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
1885 spin_unlock_irq(&phba
->hbalock
);
1889 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1890 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
1891 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
1893 * Case of FCF fast failover scan
1897 * It has not found any suitable FCF record, cancel
1898 * FCF scan inprogress, and do nothing
1900 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
1901 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1902 "2782 No suitable FCF record "
1903 "found during this round of "
1904 "post FCF rediscovery scan: "
1905 "fcf_evt_tag:x%x, fcf_index: "
1907 phba
->fcoe_eventtag_at_fcf_scan
,
1908 bf_get(lpfc_fcf_record_fcf_index
,
1911 * Let next new FCF event trigger fast
1914 spin_lock_irq(&phba
->hbalock
);
1915 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1916 spin_unlock_irq(&phba
->hbalock
);
1920 * It has found a suitable FCF record that is not
1921 * the same as in-use FCF record, unregister the
1922 * in-use FCF record, replace the in-use FCF record
1923 * with the new FCF record, mark FCF fast failover
1924 * completed, and then start register the new FCF
1928 /* Unregister the current in-use FCF record */
1929 lpfc_unregister_fcf(phba
);
1931 /* Replace in-use record with the new record */
1932 memcpy(&phba
->fcf
.current_rec
,
1933 &phba
->fcf
.failover_rec
,
1934 sizeof(struct lpfc_fcf_rec
));
1935 /* mark the FCF fast failover completed */
1936 spin_lock_irq(&phba
->hbalock
);
1937 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
1938 spin_unlock_irq(&phba
->hbalock
);
1940 * Set up the initial registered FCF index for FLOGI
1941 * round robin FCF failover.
1943 phba
->fcf
.fcf_rr_init_indx
=
1944 phba
->fcf
.failover_rec
.fcf_indx
;
1945 /* Register to the new FCF record */
1946 lpfc_register_fcf(phba
);
1949 * In case of transaction period to fast FCF failover,
1950 * do nothing when search to the end of the FCF table.
1952 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
1953 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
1956 * Otherwise, initial scan or post linkdown rescan,
1957 * register with the best FCF record found so far
1958 * through the FCF scanning process.
1961 /* mark the initial FCF discovery completed */
1962 spin_lock_irq(&phba
->hbalock
);
1963 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
1964 spin_unlock_irq(&phba
->hbalock
);
1966 * Set up the initial registered FCF index for FLOGI
1967 * round robin FCF failover
1969 phba
->fcf
.fcf_rr_init_indx
=
1970 phba
->fcf
.current_rec
.fcf_indx
;
1971 /* Register to the new FCF record */
1972 lpfc_register_fcf(phba
);
1975 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
1979 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1980 lpfc_register_fcf(phba
);
1986 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1987 * @phba: pointer to lpfc hba data structure.
1988 * @mboxq: pointer to mailbox object.
1990 * This is the callback function for FLOGI failure round robin FCF failover
1991 * read FCF record mailbox command from the eligible FCF record bmask for
1992 * performing the failover. If the FCF read back is not valid/available, it
1993 * fails through to retrying FLOGI to the currently registered FCF again.
1994 * Otherwise, if the FCF read back is valid and available, it will set the
1995 * newly read FCF record to the failover FCF record, unregister currently
1996 * registered FCF record, copy the failover FCF record to the current
1997 * FCF record, and then register the current FCF record before proceeding
1998 * to trying FLOGI on the new failover FCF.
2001 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2003 struct fcf_record
*new_fcf_record
;
2004 uint32_t boot_flag
, addr_mode
;
2005 uint16_t next_fcf_index
;
2006 uint16_t current_fcf_index
;
2009 /* If link state is not up, stop the round robin failover process */
2010 if (phba
->link_state
< LPFC_LINK_UP
) {
2011 spin_lock_irq(&phba
->hbalock
);
2012 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2013 spin_unlock_irq(&phba
->hbalock
);
2014 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2018 /* Parse the FCF record from the non-embedded mailbox command */
2019 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2021 if (!new_fcf_record
) {
2022 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2023 "2766 Mailbox command READ_FCF_RECORD "
2024 "failed to retrieve a FCF record.\n");
2028 /* Get the needed parameters from FCF record */
2029 lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2030 &addr_mode
, &vlan_id
);
2032 /* Log the FCF record information if turned on */
2033 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2036 /* Upload new FCF record to the failover FCF record */
2037 spin_lock_irq(&phba
->hbalock
);
2038 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2039 new_fcf_record
, addr_mode
, vlan_id
,
2040 (boot_flag
? BOOT_ENABLE
: 0));
2041 spin_unlock_irq(&phba
->hbalock
);
2043 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2045 /* Unregister the current in-use FCF record */
2046 lpfc_unregister_fcf(phba
);
2048 /* Replace in-use record with the new record */
2049 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2050 sizeof(struct lpfc_fcf_rec
));
2052 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2053 "2783 FLOGI round robin FCF failover from FCF "
2054 "(index:x%x) to FCF (index:x%x).\n",
2056 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
));
2059 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2060 lpfc_register_fcf(phba
);
2064 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2065 * @phba: pointer to lpfc hba data structure.
2066 * @mboxq: pointer to mailbox object.
2068 * This is the callback function of read FCF record mailbox command for
2069 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2070 * failover when a new FCF event happened. If the FCF read back is
2071 * valid/available and it passes the connection list check, it updates
2072 * the bmask for the eligible FCF record for round robin failover.
2075 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2077 struct fcf_record
*new_fcf_record
;
2078 uint32_t boot_flag
, addr_mode
;
2079 uint16_t fcf_index
, next_fcf_index
;
2083 /* If link state is not up, no need to proceed */
2084 if (phba
->link_state
< LPFC_LINK_UP
)
2087 /* If FCF discovery period is over, no need to proceed */
2088 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
)
2091 /* Parse the FCF record from the non-embedded mailbox command */
2092 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2094 if (!new_fcf_record
) {
2095 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2096 "2767 Mailbox command READ_FCF_RECORD "
2097 "failed to retrieve a FCF record.\n");
2101 /* Check the connection list for eligibility */
2102 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2103 &addr_mode
, &vlan_id
);
2105 /* Log the FCF record information if turned on */
2106 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2112 /* Update the eligible FCF record index bmask */
2113 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2114 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
2117 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2121 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2122 * @phba: pointer to lpfc hba data structure.
2123 * @mboxq: pointer to mailbox data structure.
2125 * This function handles completion of init vpi mailbox command.
2128 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2130 struct lpfc_vport
*vport
= mboxq
->vport
;
2131 struct lpfc_nodelist
*ndlp
;
2132 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2134 if (mboxq
->u
.mb
.mbxStatus
) {
2135 lpfc_printf_vlog(vport
, KERN_ERR
,
2137 "2609 Init VPI mailbox failed 0x%x\n",
2138 mboxq
->u
.mb
.mbxStatus
);
2139 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2140 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2143 spin_lock_irq(shost
->host_lock
);
2144 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2145 spin_unlock_irq(shost
->host_lock
);
2147 /* If this port is physical port or FDISC is done, do reg_vpi */
2148 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2149 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2151 lpfc_printf_vlog(vport
, KERN_ERR
,
2153 "2731 Cannot find fabric "
2154 "controller node\n");
2156 lpfc_register_new_vport(phba
, vport
, ndlp
);
2157 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2161 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2162 lpfc_initial_fdisc(vport
);
2164 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2165 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2166 "2606 No NPIV Fabric support\n");
2168 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2173 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2174 * @vport: pointer to lpfc_vport data structure.
2176 * This function issue a init_vpi mailbox command to initialize
2177 * VPI for the vport.
2180 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2182 LPFC_MBOXQ_t
*mboxq
;
2185 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2187 lpfc_printf_vlog(vport
, KERN_ERR
,
2188 LOG_MBOX
, "2607 Failed to allocate "
2189 "init_vpi mailbox\n");
2192 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2193 mboxq
->vport
= vport
;
2194 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2195 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2196 if (rc
== MBX_NOT_FINISHED
) {
2197 lpfc_printf_vlog(vport
, KERN_ERR
,
2198 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2199 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2204 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2205 * @phba: pointer to lpfc hba data structure.
2207 * This function loops through the list of vports on the @phba and issues an
2208 * FDISC if possible.
2211 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2213 struct lpfc_vport
**vports
;
2216 vports
= lpfc_create_vport_work_array(phba
);
2217 if (vports
!= NULL
) {
2218 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2219 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2221 /* There are no vpi for this vport */
2222 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2223 lpfc_vport_set_state(vports
[i
],
2227 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2228 lpfc_vport_set_state(vports
[i
],
2232 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2233 lpfc_issue_init_vpi(vports
[i
]);
2236 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2237 lpfc_initial_fdisc(vports
[i
]);
2239 lpfc_vport_set_state(vports
[i
],
2240 FC_VPORT_NO_FABRIC_SUPP
);
2241 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2244 "Fabric support\n");
2248 lpfc_destroy_vport_work_array(phba
, vports
);
2252 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2254 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2255 struct lpfc_vport
*vport
= mboxq
->vport
;
2256 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2258 if (mboxq
->u
.mb
.mbxStatus
) {
2259 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2260 "2018 REG_VFI mbxStatus error x%x "
2262 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2263 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2264 /* FLOGI failed, use loop map to make discovery list */
2265 lpfc_disc_list_loopmap(vport
);
2266 /* Start discovery */
2267 lpfc_disc_start(vport
);
2270 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2273 /* The VPI is implicitly registered when the VFI is registered */
2274 spin_lock_irq(shost
->host_lock
);
2275 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2276 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2277 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2278 spin_unlock_irq(shost
->host_lock
);
2280 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2281 lpfc_start_fdiscs(phba
);
2282 lpfc_do_scr_ns_plogi(phba
, vport
);
2286 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2287 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2293 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2295 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2296 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2297 struct lpfc_vport
*vport
= pmb
->vport
;
2300 /* Check for error */
2301 if (mb
->mbxStatus
) {
2302 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2303 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2304 "0319 READ_SPARAM mbxStatus error x%x "
2306 mb
->mbxStatus
, vport
->port_state
);
2307 lpfc_linkdown(phba
);
2311 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2312 sizeof (struct serv_parm
));
2313 if (phba
->cfg_soft_wwnn
)
2314 u64_to_wwn(phba
->cfg_soft_wwnn
,
2315 vport
->fc_sparam
.nodeName
.u
.wwn
);
2316 if (phba
->cfg_soft_wwpn
)
2317 u64_to_wwn(phba
->cfg_soft_wwpn
,
2318 vport
->fc_sparam
.portName
.u
.wwn
);
2319 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
2320 sizeof(vport
->fc_nodename
));
2321 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
2322 sizeof(vport
->fc_portname
));
2323 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2324 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2325 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2328 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2330 mempool_free(pmb
, phba
->mbox_mem_pool
);
2334 pmb
->context1
= NULL
;
2335 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2337 lpfc_issue_clear_la(phba
, vport
);
2338 mempool_free(pmb
, phba
->mbox_mem_pool
);
2343 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
2345 struct lpfc_vport
*vport
= phba
->pport
;
2346 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2348 struct lpfc_dmabuf
*mp
;
2350 struct fcf_record
*fcf_record
;
2352 spin_lock_irq(&phba
->hbalock
);
2353 switch (la
->UlnkSpeed
) {
2355 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
2358 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
2361 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
2364 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
2367 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
2370 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
2374 phba
->fc_topology
= la
->topology
;
2375 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2377 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2378 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2380 /* if npiv is enabled and this adapter supports npiv log
2381 * a message that npiv is not supported in this topology
2383 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2384 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2385 "1309 Link Up Event npiv not supported in loop "
2387 /* Get Loop Map information */
2389 vport
->fc_flag
|= FC_LBIT
;
2391 vport
->fc_myDID
= la
->granted_AL_PA
;
2392 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
2395 phba
->alpa_map
[0] = 0;
2397 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2408 numalpa
= phba
->alpa_map
[0];
2410 while (j
< numalpa
) {
2411 memset(un
.pamap
, 0, 16);
2412 for (k
= 1; j
< numalpa
; k
++) {
2414 phba
->alpa_map
[j
+ 1];
2419 /* Link Up Event ALPA map */
2420 lpfc_printf_log(phba
,
2423 "1304 Link Up Event "
2424 "ALPA map Data: x%x "
2426 un
.pa
.wd1
, un
.pa
.wd2
,
2427 un
.pa
.wd3
, un
.pa
.wd4
);
2432 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
2433 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
2434 (phba
->sli_rev
== 3))
2435 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
2437 vport
->fc_myDID
= phba
->fc_pref_DID
;
2438 vport
->fc_flag
|= FC_LBIT
;
2440 spin_unlock_irq(&phba
->hbalock
);
2443 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2447 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
2449 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2452 sparam_mbox
->vport
= vport
;
2453 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
2454 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
2455 if (rc
== MBX_NOT_FINISHED
) {
2456 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
2457 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2459 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2463 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
2464 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2467 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
2468 lpfc_config_link(phba
, cfglink_mbox
);
2469 cfglink_mbox
->vport
= vport
;
2470 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
2471 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
2472 if (rc
== MBX_NOT_FINISHED
) {
2473 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
2477 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
2479 * Add the driver's default FCF record at FCF index 0 now. This
2480 * is phase 1 implementation that support FCF index 0 and driver
2483 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
2484 fcf_record
= kzalloc(sizeof(struct fcf_record
),
2486 if (unlikely(!fcf_record
)) {
2487 lpfc_printf_log(phba
, KERN_ERR
,
2489 "2554 Could not allocate memmory for "
2495 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
2496 LPFC_FCOE_FCF_DEF_INDEX
);
2497 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
2499 lpfc_printf_log(phba
, KERN_ERR
,
2501 "2013 Could not manually add FCF "
2502 "record 0, status %d\n", rc
);
2510 * The driver is expected to do FIP/FCF. Call the port
2511 * and get the FCF Table.
2513 spin_lock_irq(&phba
->hbalock
);
2514 if (phba
->hba_flag
& FCF_DISC_INPROGRESS
) {
2515 spin_unlock_irq(&phba
->hbalock
);
2518 /* This is the initial FCF discovery scan */
2519 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
2520 spin_unlock_irq(&phba
->hbalock
);
2521 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2522 "2778 Start FCF table scan at linkup\n");
2524 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2525 LPFC_FCOE_FCF_GET_FIRST
);
2527 spin_lock_irq(&phba
->hbalock
);
2528 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
2529 spin_unlock_irq(&phba
->hbalock
);
2536 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2537 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2538 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2539 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
2540 lpfc_issue_clear_la(phba
, vport
);
2545 lpfc_enable_la(struct lpfc_hba
*phba
)
2548 struct lpfc_sli
*psli
= &phba
->sli
;
2549 spin_lock_irq(&phba
->hbalock
);
2550 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2551 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
2552 control
= readl(phba
->HCregaddr
);
2553 control
|= HC_LAINT_ENA
;
2554 writel(control
, phba
->HCregaddr
);
2555 readl(phba
->HCregaddr
); /* flush */
2557 spin_unlock_irq(&phba
->hbalock
);
2561 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
2563 lpfc_linkdown(phba
);
2564 lpfc_enable_la(phba
);
2565 lpfc_unregister_unused_fcf(phba
);
2566 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2571 * This routine handles processing a READ_LA mailbox
2572 * command upon completion. It is setup in the LPFC_MBOXQ
2573 * as the completion routine when the command is
2574 * handed off to the SLI layer.
2577 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2579 struct lpfc_vport
*vport
= pmb
->vport
;
2580 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2582 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2583 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2585 /* Unblock ELS traffic */
2586 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2587 /* Check for error */
2588 if (mb
->mbxStatus
) {
2589 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2590 "1307 READ_LA mbox error x%x state x%x\n",
2591 mb
->mbxStatus
, vport
->port_state
);
2592 lpfc_mbx_issue_link_down(phba
);
2593 phba
->link_state
= LPFC_HBA_ERROR
;
2594 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
2597 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
2599 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
2601 spin_lock_irq(shost
->host_lock
);
2603 vport
->fc_flag
|= FC_BYPASSED_MODE
;
2605 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
2606 spin_unlock_irq(shost
->host_lock
);
2608 if ((phba
->fc_eventTag
< la
->eventTag
) ||
2609 (phba
->fc_eventTag
== la
->eventTag
)) {
2610 phba
->fc_stat
.LinkMultiEvent
++;
2611 if (la
->attType
== AT_LINK_UP
)
2612 if (phba
->fc_eventTag
!= 0)
2613 lpfc_linkdown(phba
);
2616 phba
->fc_eventTag
= la
->eventTag
;
2617 spin_lock_irq(&phba
->hbalock
);
2619 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
2621 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
2622 spin_unlock_irq(&phba
->hbalock
);
2624 phba
->link_events
++;
2625 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
2626 phba
->fc_stat
.LinkUp
++;
2627 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2628 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2629 "1306 Link Up Event in loop back mode "
2630 "x%x received Data: x%x x%x x%x x%x\n",
2631 la
->eventTag
, phba
->fc_eventTag
,
2632 la
->granted_AL_PA
, la
->UlnkSpeed
,
2635 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2636 "1303 Link Up Event x%x received "
2637 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2638 la
->eventTag
, phba
->fc_eventTag
,
2639 la
->granted_AL_PA
, la
->UlnkSpeed
,
2642 phba
->wait_4_mlo_maint_flg
);
2644 lpfc_mbx_process_link_up(phba
, la
);
2645 } else if (la
->attType
== AT_LINK_DOWN
) {
2646 phba
->fc_stat
.LinkDown
++;
2647 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2648 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2649 "1308 Link Down Event in loop back mode "
2651 "Data: x%x x%x x%x\n",
2652 la
->eventTag
, phba
->fc_eventTag
,
2653 phba
->pport
->port_state
, vport
->fc_flag
);
2656 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2657 "1305 Link Down Event x%x received "
2658 "Data: x%x x%x x%x x%x x%x\n",
2659 la
->eventTag
, phba
->fc_eventTag
,
2660 phba
->pport
->port_state
, vport
->fc_flag
,
2663 lpfc_mbx_issue_link_down(phba
);
2665 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
2666 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
2667 phba
->fc_stat
.LinkDown
++;
2668 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2669 "1312 Link Down Event x%x received "
2670 "Data: x%x x%x x%x\n",
2671 la
->eventTag
, phba
->fc_eventTag
,
2672 phba
->pport
->port_state
, vport
->fc_flag
);
2673 lpfc_mbx_issue_link_down(phba
);
2675 lpfc_enable_la(phba
);
2677 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2678 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2679 "Data: x%x x%x x%x\n",
2680 la
->eventTag
, phba
->fc_eventTag
,
2681 phba
->pport
->port_state
, vport
->fc_flag
);
2683 * The cmnd that triggered this will be waiting for this
2686 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2687 if (phba
->wait_4_mlo_maint_flg
) {
2688 phba
->wait_4_mlo_maint_flg
= 0;
2689 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
2695 lpfc_issue_clear_la(phba
, vport
);
2696 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2697 "1311 fa %d\n", la
->fa
);
2700 lpfc_mbx_cmpl_read_la_free_mbuf
:
2701 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2703 mempool_free(pmb
, phba
->mbox_mem_pool
);
2708 * This routine handles processing a REG_LOGIN mailbox
2709 * command upon completion. It is setup in the LPFC_MBOXQ
2710 * as the completion routine when the command is
2711 * handed off to the SLI layer.
2714 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2716 struct lpfc_vport
*vport
= pmb
->vport
;
2717 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2718 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2719 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2721 pmb
->context1
= NULL
;
2723 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
2724 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
2726 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
2727 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
2728 /* We rcvd a rscn after issuing this
2729 * mbox reg login, we may have cycled
2730 * back through the state and be
2731 * back at reg login state so this
2732 * mbox needs to be ignored becase
2733 * there is another reg login in
2736 spin_lock_irq(shost
->host_lock
);
2737 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
2738 spin_unlock_irq(shost
->host_lock
);
2739 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2740 lpfc_sli4_free_rpi(phba
,
2741 pmb
->u
.mb
.un
.varRegLogin
.rpi
);
2744 /* Good status, call state machine */
2745 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
2746 NLP_EVT_CMPL_REG_LOGIN
);
2748 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2750 mempool_free(pmb
, phba
->mbox_mem_pool
);
2751 /* decrement the node reference count held for this callback
2760 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2762 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2763 struct lpfc_vport
*vport
= pmb
->vport
;
2764 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2766 switch (mb
->mbxStatus
) {
2769 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2770 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2773 /* If VPI is busy, reset the HBA */
2775 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
2776 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
2777 vport
->vpi
, mb
->mbxStatus
);
2778 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
2779 lpfc_workq_post_event(phba
, NULL
, NULL
,
2780 LPFC_EVT_RESET_HBA
);
2782 spin_lock_irq(shost
->host_lock
);
2783 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2784 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2785 spin_unlock_irq(shost
->host_lock
);
2786 vport
->unreg_vpi_cmpl
= VPORT_OK
;
2787 mempool_free(pmb
, phba
->mbox_mem_pool
);
2789 * This shost reference might have been taken at the beginning of
2790 * lpfc_vport_delete()
2792 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
2793 scsi_host_put(shost
);
2797 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
2799 struct lpfc_hba
*phba
= vport
->phba
;
2803 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2807 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
2808 mbox
->vport
= vport
;
2809 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
2810 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2811 if (rc
== MBX_NOT_FINISHED
) {
2812 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2813 "1800 Could not issue unreg_vpi\n");
2814 mempool_free(mbox
, phba
->mbox_mem_pool
);
2815 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
2822 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2824 struct lpfc_vport
*vport
= pmb
->vport
;
2825 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2826 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2828 switch (mb
->mbxStatus
) {
2832 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2833 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2835 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2836 spin_lock_irq(shost
->host_lock
);
2837 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2838 spin_unlock_irq(shost
->host_lock
);
2839 vport
->fc_myDID
= 0;
2843 spin_lock_irq(shost
->host_lock
);
2844 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2845 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2846 spin_unlock_irq(shost
->host_lock
);
2847 vport
->num_disc_nodes
= 0;
2848 /* go thru NPR list and issue ELS PLOGIs */
2849 if (vport
->fc_npr_cnt
)
2850 lpfc_els_disc_plogi(vport
);
2852 if (!vport
->num_disc_nodes
) {
2853 spin_lock_irq(shost
->host_lock
);
2854 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2855 spin_unlock_irq(shost
->host_lock
);
2856 lpfc_can_disctmo(vport
);
2858 vport
->port_state
= LPFC_VPORT_READY
;
2861 mempool_free(pmb
, phba
->mbox_mem_pool
);
2866 * lpfc_create_static_vport - Read HBA config region to create static vports.
2867 * @phba: pointer to lpfc hba data structure.
2869 * This routine issue a DUMP mailbox command for config region 22 to get
2870 * the list of static vports to be created. The function create vports
2871 * based on the information returned from the HBA.
2874 lpfc_create_static_vport(struct lpfc_hba
*phba
)
2876 LPFC_MBOXQ_t
*pmb
= NULL
;
2878 struct static_vport_info
*vport_info
;
2880 struct fc_vport_identifiers vport_id
;
2881 struct fc_vport
*new_fc_vport
;
2882 struct Scsi_Host
*shost
;
2883 struct lpfc_vport
*vport
;
2884 uint16_t offset
= 0;
2885 uint8_t *vport_buff
;
2886 struct lpfc_dmabuf
*mp
;
2887 uint32_t byte_count
= 0;
2889 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2891 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2892 "0542 lpfc_create_static_vport failed to"
2893 " allocate mailbox memory\n");
2899 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
2901 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2902 "0543 lpfc_create_static_vport failed to"
2903 " allocate vport_info\n");
2904 mempool_free(pmb
, phba
->mbox_mem_pool
);
2908 vport_buff
= (uint8_t *) vport_info
;
2910 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
2913 pmb
->vport
= phba
->pport
;
2914 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
2916 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
2917 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2918 "0544 lpfc_create_static_vport failed to"
2919 " issue dump mailbox command ret 0x%x "
2925 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2926 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
2927 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2928 if (byte_count
> sizeof(struct static_vport_info
) -
2930 byte_count
= sizeof(struct static_vport_info
)
2932 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
2933 offset
+= byte_count
;
2935 if (mb
->un
.varDmp
.word_cnt
>
2936 sizeof(struct static_vport_info
) - offset
)
2937 mb
->un
.varDmp
.word_cnt
=
2938 sizeof(struct static_vport_info
)
2940 byte_count
= mb
->un
.varDmp
.word_cnt
;
2941 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
2942 vport_buff
+ offset
,
2945 offset
+= byte_count
;
2948 } while (byte_count
&&
2949 offset
< sizeof(struct static_vport_info
));
2952 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
2953 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
2954 != VPORT_INFO_REV
)) {
2955 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2956 "0545 lpfc_create_static_vport bad"
2957 " information header 0x%x 0x%x\n",
2958 le32_to_cpu(vport_info
->signature
),
2959 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
2964 shost
= lpfc_shost_from_vport(phba
->pport
);
2966 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
2967 memset(&vport_id
, 0, sizeof(vport_id
));
2968 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
2969 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
2970 if (!vport_id
.port_name
|| !vport_id
.node_name
)
2973 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
2974 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
2975 vport_id
.disable
= false;
2976 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
2978 if (!new_fc_vport
) {
2979 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2980 "0546 lpfc_create_static_vport failed to"
2985 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
2986 vport
->vport_flag
|= STATIC_VPORT
;
2991 if (rc
!= MBX_TIMEOUT
) {
2992 if (pmb
->context2
) {
2993 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2994 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2997 mempool_free(pmb
, phba
->mbox_mem_pool
);
3004 * This routine handles processing a Fabric REG_LOGIN mailbox
3005 * command upon completion. It is setup in the LPFC_MBOXQ
3006 * as the completion routine when the command is
3007 * handed off to the SLI layer.
3010 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3012 struct lpfc_vport
*vport
= pmb
->vport
;
3013 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3014 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3015 struct lpfc_nodelist
*ndlp
;
3017 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3018 pmb
->context1
= NULL
;
3019 pmb
->context2
= NULL
;
3020 if (mb
->mbxStatus
) {
3021 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3022 "0258 Register Fabric login error: 0x%x\n",
3024 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3026 mempool_free(pmb
, phba
->mbox_mem_pool
);
3028 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3029 /* FLOGI failed, use loop map to make discovery list */
3030 lpfc_disc_list_loopmap(vport
);
3032 /* Start discovery */
3033 lpfc_disc_start(vport
);
3034 /* Decrement the reference count to ndlp after the
3035 * reference to the ndlp are done.
3041 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3042 /* Decrement the reference count to ndlp after the reference
3043 * to the ndlp are done.
3049 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3050 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3051 ndlp
->nlp_type
|= NLP_FABRIC
;
3052 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3054 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3055 /* when physical port receive logo donot start
3056 * vport discovery */
3057 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3058 lpfc_start_fdiscs(phba
);
3060 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3061 lpfc_do_scr_ns_plogi(phba
, vport
);
3064 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3066 mempool_free(pmb
, phba
->mbox_mem_pool
);
3068 /* Drop the reference count from the mbox at the end after
3069 * all the current reference to the ndlp have been done.
3076 * This routine handles processing a NameServer REG_LOGIN mailbox
3077 * command upon completion. It is setup in the LPFC_MBOXQ
3078 * as the completion routine when the command is
3079 * handed off to the SLI layer.
3082 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3084 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3085 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3086 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3087 struct lpfc_vport
*vport
= pmb
->vport
;
3089 if (mb
->mbxStatus
) {
3091 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3092 "0260 Register NameServer error: 0x%x\n",
3094 /* decrement the node reference count held for this
3095 * callback function.
3098 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3100 mempool_free(pmb
, phba
->mbox_mem_pool
);
3102 /* If no other thread is using the ndlp, free it */
3103 lpfc_nlp_not_used(ndlp
);
3105 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3107 * RegLogin failed, use loop map to make discovery
3110 lpfc_disc_list_loopmap(vport
);
3112 /* Start discovery */
3113 lpfc_disc_start(vport
);
3116 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3120 pmb
->context1
= NULL
;
3122 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3123 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3124 ndlp
->nlp_type
|= NLP_FABRIC
;
3125 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3127 if (vport
->port_state
< LPFC_VPORT_READY
) {
3128 /* Link up discovery requires Fabric registration. */
3129 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3130 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3131 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3132 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3133 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3135 /* Issue SCR just before NameServer GID_FT Query */
3136 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3139 vport
->fc_ns_retry
= 0;
3140 /* Good status, issue CT Request to NameServer */
3141 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3142 /* Cannot issue NameServer Query, so finish up discovery */
3146 /* decrement the node reference count held for this
3147 * callback function.
3150 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3152 mempool_free(pmb
, phba
->mbox_mem_pool
);
3158 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3160 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3161 struct fc_rport
*rport
;
3162 struct lpfc_rport_data
*rdata
;
3163 struct fc_rport_identifiers rport_ids
;
3164 struct lpfc_hba
*phba
= vport
->phba
;
3166 /* Remote port has reappeared. Re-register w/ FC transport */
3167 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3168 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3169 rport_ids
.port_id
= ndlp
->nlp_DID
;
3170 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3173 * We leave our node pointer in rport->dd_data when we unregister a
3174 * FCP target port. But fc_remote_port_add zeros the space to which
3175 * rport->dd_data points. So, if we're reusing a previously
3176 * registered port, drop the reference that we took the last time we
3177 * registered the port.
3179 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3180 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3183 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3184 "rport add: did:x%x flg:x%x type x%x",
3185 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3187 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3188 if (!rport
|| !get_device(&rport
->dev
)) {
3189 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3190 "Warning: fc_remote_port_add failed\n");
3194 /* initialize static port data */
3195 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3196 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3197 rdata
= rport
->dd_data
;
3198 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3200 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3201 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3202 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3203 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3206 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3207 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3209 if ((rport
->scsi_target_id
!= -1) &&
3210 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3211 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3217 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3219 struct fc_rport
*rport
= ndlp
->rport
;
3221 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3222 "rport delete: did:x%x flg:x%x type x%x",
3223 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3225 fc_remote_port_delete(rport
);
3231 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3233 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3235 spin_lock_irq(shost
->host_lock
);
3237 case NLP_STE_UNUSED_NODE
:
3238 vport
->fc_unused_cnt
+= count
;
3240 case NLP_STE_PLOGI_ISSUE
:
3241 vport
->fc_plogi_cnt
+= count
;
3243 case NLP_STE_ADISC_ISSUE
:
3244 vport
->fc_adisc_cnt
+= count
;
3246 case NLP_STE_REG_LOGIN_ISSUE
:
3247 vport
->fc_reglogin_cnt
+= count
;
3249 case NLP_STE_PRLI_ISSUE
:
3250 vport
->fc_prli_cnt
+= count
;
3252 case NLP_STE_UNMAPPED_NODE
:
3253 vport
->fc_unmap_cnt
+= count
;
3255 case NLP_STE_MAPPED_NODE
:
3256 vport
->fc_map_cnt
+= count
;
3258 case NLP_STE_NPR_NODE
:
3259 vport
->fc_npr_cnt
+= count
;
3262 spin_unlock_irq(shost
->host_lock
);
3266 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3267 int old_state
, int new_state
)
3269 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3271 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3272 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3273 ndlp
->nlp_type
|= NLP_FC_NODE
;
3275 if (new_state
== NLP_STE_MAPPED_NODE
)
3276 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3277 if (new_state
== NLP_STE_NPR_NODE
)
3278 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3280 /* Transport interface */
3281 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3282 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3283 vport
->phba
->nport_event_cnt
++;
3284 lpfc_unregister_remote_port(ndlp
);
3287 if (new_state
== NLP_STE_MAPPED_NODE
||
3288 new_state
== NLP_STE_UNMAPPED_NODE
) {
3289 vport
->phba
->nport_event_cnt
++;
3291 * Tell the fc transport about the port, if we haven't
3292 * already. If we have, and it's a scsi entity, be
3293 * sure to unblock any attached scsi devices
3295 lpfc_register_remote_port(vport
, ndlp
);
3297 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3298 (vport
->stat_data_enabled
)) {
3300 * A new target is discovered, if there is no buffer for
3301 * statistical data collection allocate buffer.
3303 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3304 sizeof(struct lpfc_scsicmd_bkt
),
3307 if (!ndlp
->lat_data
)
3308 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3309 "0286 lpfc_nlp_state_cleanup failed to "
3310 "allocate statistical data buffer DID "
3311 "0x%x\n", ndlp
->nlp_DID
);
3314 * if we added to Mapped list, but the remote port
3315 * registration failed or assigned a target id outside
3316 * our presentable range - move the node to the
3319 if (new_state
== NLP_STE_MAPPED_NODE
&&
3321 ndlp
->rport
->scsi_target_id
== -1 ||
3322 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3323 spin_lock_irq(shost
->host_lock
);
3324 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3325 spin_unlock_irq(shost
->host_lock
);
3326 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3331 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3333 static char *states
[] = {
3334 [NLP_STE_UNUSED_NODE
] = "UNUSED",
3335 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
3336 [NLP_STE_ADISC_ISSUE
] = "ADISC",
3337 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
3338 [NLP_STE_PRLI_ISSUE
] = "PRLI",
3339 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
3340 [NLP_STE_MAPPED_NODE
] = "MAPPED",
3341 [NLP_STE_NPR_NODE
] = "NPR",
3344 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
3345 strlcpy(buffer
, states
[state
], size
);
3347 snprintf(buffer
, size
, "unknown (%d)", state
);
3352 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3355 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3356 int old_state
= ndlp
->nlp_state
;
3357 char name1
[16], name2
[16];
3359 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3360 "0904 NPort state transition x%06x, %s -> %s\n",
3362 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
3363 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
3365 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3366 "node statechg did:x%x old:%d ste:%d",
3367 ndlp
->nlp_DID
, old_state
, state
);
3369 if (old_state
== NLP_STE_NPR_NODE
&&
3370 state
!= NLP_STE_NPR_NODE
)
3371 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3372 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
3373 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
3374 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
3377 if (list_empty(&ndlp
->nlp_listp
)) {
3378 spin_lock_irq(shost
->host_lock
);
3379 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3380 spin_unlock_irq(shost
->host_lock
);
3381 } else if (old_state
)
3382 lpfc_nlp_counters(vport
, old_state
, -1);
3384 ndlp
->nlp_state
= state
;
3385 lpfc_nlp_counters(vport
, state
, 1);
3386 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3390 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3392 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3394 if (list_empty(&ndlp
->nlp_listp
)) {
3395 spin_lock_irq(shost
->host_lock
);
3396 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3397 spin_unlock_irq(shost
->host_lock
);
3402 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3404 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3406 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3407 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3408 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3409 spin_lock_irq(shost
->host_lock
);
3410 list_del_init(&ndlp
->nlp_listp
);
3411 spin_unlock_irq(shost
->host_lock
);
3412 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3413 NLP_STE_UNUSED_NODE
);
3417 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3419 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3420 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3421 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3422 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3423 NLP_STE_UNUSED_NODE
);
3426 * lpfc_initialize_node - Initialize all fields of node object
3427 * @vport: Pointer to Virtual Port object.
3428 * @ndlp: Pointer to FC node object.
3429 * @did: FC_ID of the node.
3431 * This function is always called when node object need to be initialized.
3432 * It initializes all the fields of the node object. Although the reference
3433 * to phba from @ndlp can be obtained indirectly through it's reference to
3434 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3435 * to the life-span of the @ndlp might go beyond the existence of @vport as
3436 * the final release of ndlp is determined by its reference count. And, the
3437 * operation on @ndlp needs the reference to phba.
3440 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3443 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
3444 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
3445 init_timer(&ndlp
->nlp_delayfunc
);
3446 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
3447 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
3448 ndlp
->nlp_DID
= did
;
3449 ndlp
->vport
= vport
;
3450 ndlp
->phba
= vport
->phba
;
3451 ndlp
->nlp_sid
= NLP_NO_SID
;
3452 kref_init(&ndlp
->kref
);
3453 NLP_INT_NODE_ACT(ndlp
);
3454 atomic_set(&ndlp
->cmd_pending
, 0);
3455 ndlp
->cmd_qdepth
= LPFC_MAX_TGT_QDEPTH
;
3458 struct lpfc_nodelist
*
3459 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3462 struct lpfc_hba
*phba
= vport
->phba
;
3464 unsigned long flags
;
3469 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3470 /* The ndlp should not be in memory free mode */
3471 if (NLP_CHK_FREE_REQ(ndlp
)) {
3472 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3473 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3474 "0277 lpfc_enable_node: ndlp:x%p "
3475 "usgmap:x%x refcnt:%d\n",
3476 (void *)ndlp
, ndlp
->nlp_usg_map
,
3477 atomic_read(&ndlp
->kref
.refcount
));
3480 /* The ndlp should not already be in active mode */
3481 if (NLP_CHK_NODE_ACT(ndlp
)) {
3482 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3483 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3484 "0278 lpfc_enable_node: ndlp:x%p "
3485 "usgmap:x%x refcnt:%d\n",
3486 (void *)ndlp
, ndlp
->nlp_usg_map
,
3487 atomic_read(&ndlp
->kref
.refcount
));
3491 /* Keep the original DID */
3492 did
= ndlp
->nlp_DID
;
3494 /* re-initialize ndlp except of ndlp linked list pointer */
3495 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
3496 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
3497 lpfc_initialize_node(vport
, ndlp
, did
);
3499 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3501 if (state
!= NLP_STE_UNUSED_NODE
)
3502 lpfc_nlp_set_state(vport
, ndlp
, state
);
3504 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3505 "node enable: did:x%x",
3506 ndlp
->nlp_DID
, 0, 0);
3511 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3514 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
3515 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
3516 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3517 * until ALL other outstanding threads have completed. We check
3518 * that the ndlp not already in the UNUSED state before we proceed.
3520 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3522 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3528 * Start / ReStart rescue timer for Discovery / RSCN handling
3531 lpfc_set_disctmo(struct lpfc_vport
*vport
)
3533 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3534 struct lpfc_hba
*phba
= vport
->phba
;
3537 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
3538 /* For FAN, timeout should be greater than edtov */
3539 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
3541 /* Normal discovery timeout should be > than ELS/CT timeout
3542 * FC spec states we need 3 * ratov for CT requests
3544 tmo
= ((phba
->fc_ratov
* 3) + 3);
3548 if (!timer_pending(&vport
->fc_disctmo
)) {
3549 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3550 "set disc timer: tmo:x%x state:x%x flg:x%x",
3551 tmo
, vport
->port_state
, vport
->fc_flag
);
3554 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
3555 spin_lock_irq(shost
->host_lock
);
3556 vport
->fc_flag
|= FC_DISC_TMO
;
3557 spin_unlock_irq(shost
->host_lock
);
3559 /* Start Discovery Timer state <hba_state> */
3560 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3561 "0247 Start Discovery Timer state x%x "
3562 "Data: x%x x%lx x%x x%x\n",
3563 vport
->port_state
, tmo
,
3564 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
3565 vport
->fc_adisc_cnt
);
3571 * Cancel rescue timer for Discovery / RSCN handling
3574 lpfc_can_disctmo(struct lpfc_vport
*vport
)
3576 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3577 unsigned long iflags
;
3579 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3580 "can disc timer: state:x%x rtry:x%x flg:x%x",
3581 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3583 /* Turn off discovery timer if its running */
3584 if (vport
->fc_flag
& FC_DISC_TMO
) {
3585 spin_lock_irqsave(shost
->host_lock
, iflags
);
3586 vport
->fc_flag
&= ~FC_DISC_TMO
;
3587 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
3588 del_timer_sync(&vport
->fc_disctmo
);
3589 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
3590 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
3591 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
3594 /* Cancel Discovery Timer state <hba_state> */
3595 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3596 "0248 Cancel Discovery Timer state x%x "
3597 "Data: x%x x%x x%x\n",
3598 vport
->port_state
, vport
->fc_flag
,
3599 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
3604 * Check specified ring for outstanding IOCB on the SLI queue
3605 * Return true if iocb matches the specified nport
3608 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
3609 struct lpfc_sli_ring
*pring
,
3610 struct lpfc_iocbq
*iocb
,
3611 struct lpfc_nodelist
*ndlp
)
3613 struct lpfc_sli
*psli
= &phba
->sli
;
3614 IOCB_t
*icmd
= &iocb
->iocb
;
3615 struct lpfc_vport
*vport
= ndlp
->vport
;
3617 if (iocb
->vport
!= vport
)
3620 if (pring
->ringno
== LPFC_ELS_RING
) {
3621 switch (icmd
->ulpCommand
) {
3622 case CMD_GEN_REQUEST64_CR
:
3623 if (iocb
->context_un
.ndlp
== ndlp
)
3625 case CMD_ELS_REQUEST64_CR
:
3626 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
3628 case CMD_XMIT_ELS_RSP64_CX
:
3629 if (iocb
->context1
== (uint8_t *) ndlp
)
3632 } else if (pring
->ringno
== psli
->extra_ring
) {
3634 } else if (pring
->ringno
== psli
->fcp_ring
) {
3635 /* Skip match check if waiting to relogin to FCP target */
3636 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3637 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3640 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
3643 } else if (pring
->ringno
== psli
->next_ring
) {
3650 * Free resources / clean up outstanding I/Os
3651 * associated with nlp_rpi in the LPFC_NODELIST entry.
3654 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3656 LIST_HEAD(completions
);
3657 struct lpfc_sli
*psli
;
3658 struct lpfc_sli_ring
*pring
;
3659 struct lpfc_iocbq
*iocb
, *next_iocb
;
3662 lpfc_fabric_abort_nport(ndlp
);
3665 * Everything that matches on txcmplq will be returned
3666 * by firmware with a no rpi error.
3669 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3670 /* Now process each ring */
3671 for (i
= 0; i
< psli
->num_rings
; i
++) {
3672 pring
= &psli
->ring
[i
];
3674 spin_lock_irq(&phba
->hbalock
);
3675 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
3678 * Check to see if iocb matches the nport we are
3681 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
3683 /* It matches, so deque and call compl
3685 list_move_tail(&iocb
->list
,
3690 spin_unlock_irq(&phba
->hbalock
);
3694 /* Cancel all the IOCBs from the completions list */
3695 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3702 * Free rpi associated with LPFC_NODELIST entry.
3703 * This routine is called from lpfc_freenode(), when we are removing
3704 * a LPFC_NODELIST entry. It is also called if the driver initiates a
3705 * LOGO that completes successfully, and we are waiting to PLOGI back
3706 * to the remote NPort. In addition, it is called after we receive
3707 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
3708 * we are waiting to PLOGI back to the remote NPort.
3711 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3713 struct lpfc_hba
*phba
= vport
->phba
;
3717 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3718 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3720 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
3721 mbox
->vport
= vport
;
3722 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3723 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3724 if (rc
== MBX_NOT_FINISHED
)
3725 mempool_free(mbox
, phba
->mbox_mem_pool
);
3727 lpfc_no_rpi(phba
, ndlp
);
3729 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
3730 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3737 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3738 * @phba: pointer to lpfc hba data structure.
3740 * This routine is invoked to unregister all the currently registered RPIs
3744 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
3746 struct lpfc_vport
**vports
;
3747 struct lpfc_nodelist
*ndlp
;
3748 struct Scsi_Host
*shost
;
3751 vports
= lpfc_create_vport_work_array(phba
);
3752 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3753 shost
= lpfc_shost_from_vport(vports
[i
]);
3754 spin_lock_irq(shost
->host_lock
);
3755 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
3756 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3757 /* The mempool_alloc might sleep */
3758 spin_unlock_irq(shost
->host_lock
);
3759 lpfc_unreg_rpi(vports
[i
], ndlp
);
3760 spin_lock_irq(shost
->host_lock
);
3763 spin_unlock_irq(shost
->host_lock
);
3765 lpfc_destroy_vport_work_array(phba
, vports
);
3769 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
3771 struct lpfc_hba
*phba
= vport
->phba
;
3775 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3777 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
3778 mbox
->vport
= vport
;
3779 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3780 mbox
->context1
= NULL
;
3781 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3782 if (rc
!= MBX_TIMEOUT
)
3783 mempool_free(mbox
, phba
->mbox_mem_pool
);
3785 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3786 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3787 "1836 Could not issue "
3788 "unreg_login(all_rpis) status %d\n", rc
);
3793 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
3795 struct lpfc_hba
*phba
= vport
->phba
;
3799 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3801 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
3802 mbox
->vport
= vport
;
3803 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3804 mbox
->context1
= NULL
;
3805 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3806 if (rc
!= MBX_TIMEOUT
)
3807 mempool_free(mbox
, phba
->mbox_mem_pool
);
3809 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3810 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3811 "1815 Could not issue "
3812 "unreg_did (default rpis) status %d\n",
3818 * Free resources associated with LPFC_NODELIST entry
3819 * so it can be freed.
3822 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3824 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3825 struct lpfc_hba
*phba
= vport
->phba
;
3826 LPFC_MBOXQ_t
*mb
, *nextmb
;
3827 struct lpfc_dmabuf
*mp
;
3829 /* Cleanup node for NPort <nlp_DID> */
3830 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3831 "0900 Cleanup node for NPort x%x "
3832 "Data: x%x x%x x%x\n",
3833 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3834 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3835 if (NLP_CHK_FREE_REQ(ndlp
)) {
3836 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3837 "0280 lpfc_cleanup_node: ndlp:x%p "
3838 "usgmap:x%x refcnt:%d\n",
3839 (void *)ndlp
, ndlp
->nlp_usg_map
,
3840 atomic_read(&ndlp
->kref
.refcount
));
3841 lpfc_dequeue_node(vport
, ndlp
);
3843 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3844 "0281 lpfc_cleanup_node: ndlp:x%p "
3845 "usgmap:x%x refcnt:%d\n",
3846 (void *)ndlp
, ndlp
->nlp_usg_map
,
3847 atomic_read(&ndlp
->kref
.refcount
));
3848 lpfc_disable_node(vport
, ndlp
);
3851 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3852 if ((mb
= phba
->sli
.mbox_active
)) {
3853 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3854 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3855 mb
->context2
= NULL
;
3856 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3860 spin_lock_irq(&phba
->hbalock
);
3861 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
3862 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3863 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3864 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
3866 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3869 list_del(&mb
->list
);
3870 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3871 lpfc_sli4_free_rpi(phba
,
3872 mb
->u
.mb
.un
.varRegLogin
.rpi
);
3873 mempool_free(mb
, phba
->mbox_mem_pool
);
3874 /* We shall not invoke the lpfc_nlp_put to decrement
3875 * the ndlp reference count as we are in the process
3876 * of lpfc_nlp_release.
3880 spin_unlock_irq(&phba
->hbalock
);
3882 lpfc_els_abort(phba
, ndlp
);
3884 spin_lock_irq(shost
->host_lock
);
3885 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3886 spin_unlock_irq(shost
->host_lock
);
3888 ndlp
->nlp_last_elscmd
= 0;
3889 del_timer_sync(&ndlp
->nlp_delayfunc
);
3891 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
3892 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
3894 lpfc_unreg_rpi(vport
, ndlp
);
3900 * Check to see if we can free the nlp back to the freelist.
3901 * If we are in the middle of using the nlp in the discovery state
3902 * machine, defer the free till we reach the end of the state machine.
3905 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3907 struct lpfc_hba
*phba
= vport
->phba
;
3908 struct lpfc_rport_data
*rdata
;
3912 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3913 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
3914 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
3915 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
3916 /* For this case we need to cleanup the default rpi
3917 * allocated by the firmware.
3919 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
3921 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
3922 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
3924 mempool_free(mbox
, phba
->mbox_mem_pool
);
3927 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
3928 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
3929 mbox
->vport
= vport
;
3930 mbox
->context2
= NULL
;
3931 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3932 if (rc
== MBX_NOT_FINISHED
) {
3933 mempool_free(mbox
, phba
->mbox_mem_pool
);
3938 lpfc_cleanup_node(vport
, ndlp
);
3941 * We can get here with a non-NULL ndlp->rport because when we
3942 * unregister a rport we don't break the rport/node linkage. So if we
3943 * do, make sure we don't leaving any dangling pointers behind.
3946 rdata
= ndlp
->rport
->dd_data
;
3947 rdata
->pnode
= NULL
;
3953 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3956 D_ID mydid
, ndlpdid
, matchdid
;
3958 if (did
== Bcast_DID
)
3961 /* First check for Direct match */
3962 if (ndlp
->nlp_DID
== did
)
3965 /* Next check for area/domain identically equals 0 match */
3966 mydid
.un
.word
= vport
->fc_myDID
;
3967 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
3971 matchdid
.un
.word
= did
;
3972 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
3973 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
3974 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
3975 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
3976 if ((ndlpdid
.un
.b
.domain
== 0) &&
3977 (ndlpdid
.un
.b
.area
== 0)) {
3978 if (ndlpdid
.un
.b
.id
)
3984 matchdid
.un
.word
= ndlp
->nlp_DID
;
3985 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
3986 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
3987 if ((matchdid
.un
.b
.domain
== 0) &&
3988 (matchdid
.un
.b
.area
== 0)) {
3989 if (matchdid
.un
.b
.id
)
3997 /* Search for a nodelist entry */
3998 static struct lpfc_nodelist
*
3999 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4001 struct lpfc_nodelist
*ndlp
;
4004 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4005 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4006 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4007 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4008 ((uint32_t) ndlp
->nlp_type
<< 8) |
4009 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4010 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4011 "0929 FIND node DID "
4012 "Data: x%p x%x x%x x%x\n",
4013 ndlp
, ndlp
->nlp_DID
,
4014 ndlp
->nlp_flag
, data1
);
4019 /* FIND node did <did> NOT FOUND */
4020 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4021 "0932 FIND node did x%x NOT FOUND.\n", did
);
4025 struct lpfc_nodelist
*
4026 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4028 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4029 struct lpfc_nodelist
*ndlp
;
4031 spin_lock_irq(shost
->host_lock
);
4032 ndlp
= __lpfc_findnode_did(vport
, did
);
4033 spin_unlock_irq(shost
->host_lock
);
4037 struct lpfc_nodelist
*
4038 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4040 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4041 struct lpfc_nodelist
*ndlp
;
4043 ndlp
= lpfc_findnode_did(vport
, did
);
4045 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4046 lpfc_rscn_payload_check(vport
, did
) == 0)
4048 ndlp
= (struct lpfc_nodelist
*)
4049 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4052 lpfc_nlp_init(vport
, ndlp
, did
);
4053 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4054 spin_lock_irq(shost
->host_lock
);
4055 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4056 spin_unlock_irq(shost
->host_lock
);
4058 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4059 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4062 spin_lock_irq(shost
->host_lock
);
4063 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4064 spin_unlock_irq(shost
->host_lock
);
4068 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4069 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4070 if (lpfc_rscn_payload_check(vport
, did
)) {
4071 /* If we've already recieved a PLOGI from this NPort
4072 * we don't need to try to discover it again.
4074 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4077 /* Since this node is marked for discovery,
4078 * delay timeout is not needed.
4080 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4081 spin_lock_irq(shost
->host_lock
);
4082 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4083 spin_unlock_irq(shost
->host_lock
);
4087 /* If we've already recieved a PLOGI from this NPort,
4088 * or we are already in the process of discovery on it,
4089 * we don't need to try to discover it again.
4091 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4092 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4093 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4095 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4096 spin_lock_irq(shost
->host_lock
);
4097 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4098 spin_unlock_irq(shost
->host_lock
);
4103 /* Build a list of nodes to discover based on the loopmap */
4105 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4107 struct lpfc_hba
*phba
= vport
->phba
;
4109 uint32_t alpa
, index
;
4111 if (!lpfc_is_link_up(phba
))
4114 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
4117 /* Check for loop map present or not */
4118 if (phba
->alpa_map
[0]) {
4119 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4120 alpa
= phba
->alpa_map
[j
];
4121 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4123 lpfc_setup_disc_node(vport
, alpa
);
4126 /* No alpamap, so try all alpa's */
4127 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4128 /* If cfg_scan_down is set, start from highest
4129 * ALPA (0xef) to lowest (0x1).
4131 if (vport
->cfg_scan_down
)
4134 index
= FC_MAXLOOP
- j
- 1;
4135 alpa
= lpfcAlpaArray
[index
];
4136 if ((vport
->fc_myDID
& 0xff) == alpa
)
4138 lpfc_setup_disc_node(vport
, alpa
);
4145 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4148 struct lpfc_sli
*psli
= &phba
->sli
;
4149 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4150 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4151 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4155 * if it's not a physical port or if we already send
4156 * clear_la then don't send it.
4158 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4159 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4160 (phba
->sli_rev
== LPFC_SLI_REV4
))
4163 /* Link up discovery */
4164 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4165 phba
->link_state
= LPFC_CLEAR_LA
;
4166 lpfc_clear_la(phba
, mbox
);
4167 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4168 mbox
->vport
= vport
;
4169 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4170 if (rc
== MBX_NOT_FINISHED
) {
4171 mempool_free(mbox
, phba
->mbox_mem_pool
);
4172 lpfc_disc_flush_list(vport
);
4173 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4174 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4175 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4176 phba
->link_state
= LPFC_HBA_ERROR
;
4181 /* Reg_vpi to tell firmware to resume normal operations */
4183 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4185 LPFC_MBOXQ_t
*regvpimbox
;
4187 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4189 lpfc_reg_vpi(vport
, regvpimbox
);
4190 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4191 regvpimbox
->vport
= vport
;
4192 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4193 == MBX_NOT_FINISHED
) {
4194 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4199 /* Start Link up / RSCN discovery on NPR nodes */
4201 lpfc_disc_start(struct lpfc_vport
*vport
)
4203 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4204 struct lpfc_hba
*phba
= vport
->phba
;
4206 uint32_t clear_la_pending
;
4209 if (!lpfc_is_link_up(phba
))
4212 if (phba
->link_state
== LPFC_CLEAR_LA
)
4213 clear_la_pending
= 1;
4215 clear_la_pending
= 0;
4217 if (vport
->port_state
< LPFC_VPORT_READY
)
4218 vport
->port_state
= LPFC_DISC_AUTH
;
4220 lpfc_set_disctmo(vport
);
4222 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4227 vport
->fc_prevDID
= vport
->fc_myDID
;
4228 vport
->num_disc_nodes
= 0;
4230 /* Start Discovery state <hba_state> */
4231 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4232 "0202 Start Discovery hba state x%x "
4233 "Data: x%x x%x x%x\n",
4234 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4235 vport
->fc_adisc_cnt
);
4237 /* First do ADISCs - if any */
4238 num_sent
= lpfc_els_disc_adisc(vport
);
4244 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4245 * continue discovery.
4247 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4248 !(vport
->fc_flag
& FC_PT2PT
) &&
4249 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4250 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4251 lpfc_issue_reg_vpi(phba
, vport
);
4256 * For SLI2, we need to set port_state to READY and continue
4259 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4260 /* If we get here, there is nothing to ADISC */
4261 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4262 lpfc_issue_clear_la(phba
, vport
);
4264 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4265 vport
->num_disc_nodes
= 0;
4266 /* go thru NPR nodes and issue ELS PLOGIs */
4267 if (vport
->fc_npr_cnt
)
4268 lpfc_els_disc_plogi(vport
);
4270 if (!vport
->num_disc_nodes
) {
4271 spin_lock_irq(shost
->host_lock
);
4272 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
4273 spin_unlock_irq(shost
->host_lock
);
4274 lpfc_can_disctmo(vport
);
4277 vport
->port_state
= LPFC_VPORT_READY
;
4279 /* Next do PLOGIs - if any */
4280 num_sent
= lpfc_els_disc_plogi(vport
);
4285 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4286 /* Check to see if more RSCNs came in while we
4287 * were processing this one.
4289 if ((vport
->fc_rscn_id_cnt
== 0) &&
4290 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
4291 spin_lock_irq(shost
->host_lock
);
4292 vport
->fc_flag
&= ~FC_RSCN_MODE
;
4293 spin_unlock_irq(shost
->host_lock
);
4294 lpfc_can_disctmo(vport
);
4296 lpfc_els_handle_rscn(vport
);
4303 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
4304 * ring the match the sppecified nodelist.
4307 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4309 LIST_HEAD(completions
);
4310 struct lpfc_sli
*psli
;
4312 struct lpfc_iocbq
*iocb
, *next_iocb
;
4313 struct lpfc_sli_ring
*pring
;
4316 pring
= &psli
->ring
[LPFC_ELS_RING
];
4318 /* Error matching iocb on txq or txcmplq
4319 * First check the txq.
4321 spin_lock_irq(&phba
->hbalock
);
4322 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4323 if (iocb
->context1
!= ndlp
) {
4327 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
4328 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
4330 list_move_tail(&iocb
->list
, &completions
);
4335 /* Next check the txcmplq */
4336 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
4337 if (iocb
->context1
!= ndlp
) {
4341 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
4342 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
4343 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
4346 spin_unlock_irq(&phba
->hbalock
);
4348 /* Cancel all the IOCBs from the completions list */
4349 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4354 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
4356 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4357 struct lpfc_hba
*phba
= vport
->phba
;
4359 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
4360 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4362 if (!NLP_CHK_NODE_ACT(ndlp
))
4364 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4365 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
4366 lpfc_free_tx(phba
, ndlp
);
4373 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
4375 lpfc_els_flush_rscn(vport
);
4376 lpfc_els_flush_cmd(vport
);
4377 lpfc_disc_flush_list(vport
);
4380 /*****************************************************************************/
4382 * NAME: lpfc_disc_timeout
4384 * FUNCTION: Fibre Channel driver discovery timeout routine.
4386 * EXECUTION ENVIRONMENT: interrupt only
4394 /*****************************************************************************/
4396 lpfc_disc_timeout(unsigned long ptr
)
4398 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
4399 struct lpfc_hba
*phba
= vport
->phba
;
4400 uint32_t tmo_posted
;
4401 unsigned long flags
= 0;
4403 if (unlikely(!phba
))
4406 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
4407 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
4409 vport
->work_port_events
|= WORKER_DISC_TMO
;
4410 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
4413 lpfc_worker_wake_up(phba
);
4418 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
4420 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4421 struct lpfc_hba
*phba
= vport
->phba
;
4422 struct lpfc_sli
*psli
= &phba
->sli
;
4423 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4424 LPFC_MBOXQ_t
*initlinkmbox
;
4425 int rc
, clrlaerr
= 0;
4427 if (!(vport
->fc_flag
& FC_DISC_TMO
))
4430 spin_lock_irq(shost
->host_lock
);
4431 vport
->fc_flag
&= ~FC_DISC_TMO
;
4432 spin_unlock_irq(shost
->host_lock
);
4434 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4435 "disc timeout: state:x%x rtry:x%x flg:x%x",
4436 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4438 switch (vport
->port_state
) {
4440 case LPFC_LOCAL_CFG_LINK
:
4441 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
4445 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
4446 "0221 FAN timeout\n");
4447 /* Start discovery by sending FLOGI, clean up old rpis */
4448 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4450 if (!NLP_CHK_NODE_ACT(ndlp
))
4452 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
4454 if (ndlp
->nlp_type
& NLP_FABRIC
) {
4455 /* Clean up the ndlp on Fabric connections */
4456 lpfc_drop_node(vport
, ndlp
);
4458 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
4459 /* Fail outstanding IO now since device
4460 * is marked for PLOGI.
4462 lpfc_unreg_rpi(vport
, ndlp
);
4465 if (vport
->port_state
!= LPFC_FLOGI
) {
4466 lpfc_initial_flogi(vport
);
4473 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
4474 /* Initial FLOGI timeout */
4475 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4476 "0222 Initial %s timeout\n",
4477 vport
->vpi
? "FDISC" : "FLOGI");
4479 /* Assume no Fabric and go on with discovery.
4480 * Check for outstanding ELS FLOGI to abort.
4483 /* FLOGI failed, so just use loop map to make discovery list */
4484 lpfc_disc_list_loopmap(vport
);
4486 /* Start discovery */
4487 lpfc_disc_start(vport
);
4490 case LPFC_FABRIC_CFG_LINK
:
4491 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
4493 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4494 "0223 Timeout while waiting for "
4495 "NameServer login\n");
4496 /* Next look for NameServer ndlp */
4497 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4498 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4499 lpfc_els_abort(phba
, ndlp
);
4501 /* ReStart discovery */
4505 /* Check for wait for NameServer Rsp timeout */
4506 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4507 "0224 NameServer Query timeout "
4509 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4511 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
4512 /* Try it one more time */
4513 vport
->fc_ns_retry
++;
4514 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
4515 vport
->fc_ns_retry
, 0);
4519 vport
->fc_ns_retry
= 0;
4523 * Discovery is over.
4524 * set port_state to PORT_READY if SLI2.
4525 * cmpl_reg_vpi will set port_state to READY for SLI3.
4527 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4528 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4529 lpfc_issue_reg_vpi(phba
, vport
);
4530 else { /* NPIV Not enabled */
4531 lpfc_issue_clear_la(phba
, vport
);
4532 vport
->port_state
= LPFC_VPORT_READY
;
4536 /* Setup and issue mailbox INITIALIZE LINK command */
4537 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4538 if (!initlinkmbox
) {
4539 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4540 "0206 Device Discovery "
4541 "completion error\n");
4542 phba
->link_state
= LPFC_HBA_ERROR
;
4546 lpfc_linkdown(phba
);
4547 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
4548 phba
->cfg_link_speed
);
4549 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4550 initlinkmbox
->vport
= vport
;
4551 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4552 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
4553 lpfc_set_loopback_flag(phba
);
4554 if (rc
== MBX_NOT_FINISHED
)
4555 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
4559 case LPFC_DISC_AUTH
:
4560 /* Node Authentication timeout */
4561 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4562 "0227 Node Authentication timeout\n");
4563 lpfc_disc_flush_list(vport
);
4566 * set port_state to PORT_READY if SLI2.
4567 * cmpl_reg_vpi will set port_state to READY for SLI3.
4569 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4570 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4571 lpfc_issue_reg_vpi(phba
, vport
);
4572 else { /* NPIV Not enabled */
4573 lpfc_issue_clear_la(phba
, vport
);
4574 vport
->port_state
= LPFC_VPORT_READY
;
4579 case LPFC_VPORT_READY
:
4580 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4581 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4582 "0231 RSCN timeout Data: x%x "
4584 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4586 /* Cleanup any outstanding ELS commands */
4587 lpfc_els_flush_cmd(vport
);
4589 lpfc_els_flush_rscn(vport
);
4590 lpfc_disc_flush_list(vport
);
4595 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4596 "0273 Unexpected discovery timeout, "
4597 "vport State x%x\n", vport
->port_state
);
4601 switch (phba
->link_state
) {
4603 /* CLEAR LA timeout */
4604 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4605 "0228 CLEAR LA timeout\n");
4610 lpfc_issue_clear_la(phba
, vport
);
4612 case LPFC_LINK_UNKNOWN
:
4613 case LPFC_WARM_START
:
4614 case LPFC_INIT_START
:
4615 case LPFC_INIT_MBX_CMDS
:
4616 case LPFC_LINK_DOWN
:
4617 case LPFC_HBA_ERROR
:
4618 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4619 "0230 Unexpected timeout, hba link "
4620 "state x%x\n", phba
->link_state
);
4624 case LPFC_HBA_READY
:
4629 lpfc_disc_flush_list(vport
);
4630 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4631 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4632 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4633 vport
->port_state
= LPFC_VPORT_READY
;
4640 * This routine handles processing a NameServer REG_LOGIN mailbox
4641 * command upon completion. It is setup in the LPFC_MBOXQ
4642 * as the completion routine when the command is
4643 * handed off to the SLI layer.
4646 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4648 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4649 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
4650 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
4651 struct lpfc_vport
*vport
= pmb
->vport
;
4653 pmb
->context1
= NULL
;
4655 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4656 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
4657 ndlp
->nlp_type
|= NLP_FABRIC
;
4658 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4661 * Start issuing Fabric-Device Management Interface (FDMI) command to
4662 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4663 * fdmi-on=2 (supporting RPA/hostnmae)
4666 if (vport
->cfg_fdmi_on
== 1)
4667 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
4669 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
4671 /* decrement the node reference count held for this callback
4675 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4677 mempool_free(pmb
, phba
->mbox_mem_pool
);
4683 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
4685 uint16_t *rpi
= param
;
4687 return ndlp
->nlp_rpi
== *rpi
;
4691 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
4693 return memcmp(&ndlp
->nlp_portname
, param
,
4694 sizeof(ndlp
->nlp_portname
)) == 0;
4697 static struct lpfc_nodelist
*
4698 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
4700 struct lpfc_nodelist
*ndlp
;
4702 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4703 if (filter(ndlp
, param
))
4710 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4711 * returns the node list element pointer else return NULL.
4713 struct lpfc_nodelist
*
4714 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
4716 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
4720 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4721 * returns the node element list pointer else return NULL.
4723 struct lpfc_nodelist
*
4724 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
4726 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4727 struct lpfc_nodelist
*ndlp
;
4729 spin_lock_irq(shost
->host_lock
);
4730 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
4731 spin_unlock_irq(shost
->host_lock
);
4736 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4739 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
4741 lpfc_initialize_node(vport
, ndlp
, did
);
4742 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
4744 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4745 "node init: did:x%x",
4746 ndlp
->nlp_DID
, 0, 0);
4751 /* This routine releases all resources associated with a specifc NPort's ndlp
4752 * and mempool_free's the nodelist.
4755 lpfc_nlp_release(struct kref
*kref
)
4757 struct lpfc_hba
*phba
;
4758 unsigned long flags
;
4759 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
4762 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4763 "node release: did:x%x flg:x%x type:x%x",
4764 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4766 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4767 "0279 lpfc_nlp_release: ndlp:x%p "
4768 "usgmap:x%x refcnt:%d\n",
4769 (void *)ndlp
, ndlp
->nlp_usg_map
,
4770 atomic_read(&ndlp
->kref
.refcount
));
4772 /* remove ndlp from action. */
4773 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
4775 /* clear the ndlp active flag for all release cases */
4777 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4778 NLP_CLR_NODE_ACT(ndlp
);
4779 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4781 /* free ndlp memory for final ndlp release */
4782 if (NLP_CHK_FREE_REQ(ndlp
)) {
4783 kfree(ndlp
->lat_data
);
4784 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
4788 /* This routine bumps the reference count for a ndlp structure to ensure
4789 * that one discovery thread won't free a ndlp while another discovery thread
4792 struct lpfc_nodelist
*
4793 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
4795 struct lpfc_hba
*phba
;
4796 unsigned long flags
;
4799 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4800 "node get: did:x%x flg:x%x refcnt:x%x",
4801 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4802 atomic_read(&ndlp
->kref
.refcount
));
4803 /* The check of ndlp usage to prevent incrementing the
4804 * ndlp reference count that is in the process of being
4808 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4809 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
4810 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4811 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4812 "0276 lpfc_nlp_get: ndlp:x%p "
4813 "usgmap:x%x refcnt:%d\n",
4814 (void *)ndlp
, ndlp
->nlp_usg_map
,
4815 atomic_read(&ndlp
->kref
.refcount
));
4818 kref_get(&ndlp
->kref
);
4819 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4824 /* This routine decrements the reference count for a ndlp structure. If the
4825 * count goes to 0, this indicates the the associated nodelist should be
4826 * freed. Returning 1 indicates the ndlp resource has been released; on the
4827 * other hand, returning 0 indicates the ndlp resource has not been released
4831 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
4833 struct lpfc_hba
*phba
;
4834 unsigned long flags
;
4839 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4840 "node put: did:x%x flg:x%x refcnt:x%x",
4841 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4842 atomic_read(&ndlp
->kref
.refcount
));
4844 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4845 /* Check the ndlp memory free acknowledge flag to avoid the
4846 * possible race condition that kref_put got invoked again
4847 * after previous one has done ndlp memory free.
4849 if (NLP_CHK_FREE_ACK(ndlp
)) {
4850 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4851 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4852 "0274 lpfc_nlp_put: ndlp:x%p "
4853 "usgmap:x%x refcnt:%d\n",
4854 (void *)ndlp
, ndlp
->nlp_usg_map
,
4855 atomic_read(&ndlp
->kref
.refcount
));
4858 /* Check the ndlp inactivate log flag to avoid the possible
4859 * race condition that kref_put got invoked again after ndlp
4860 * is already in inactivating state.
4862 if (NLP_CHK_IACT_REQ(ndlp
)) {
4863 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4864 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4865 "0275 lpfc_nlp_put: ndlp:x%p "
4866 "usgmap:x%x refcnt:%d\n",
4867 (void *)ndlp
, ndlp
->nlp_usg_map
,
4868 atomic_read(&ndlp
->kref
.refcount
));
4871 /* For last put, mark the ndlp usage flags to make sure no
4872 * other kref_get and kref_put on the same ndlp shall get
4873 * in between the process when the final kref_put has been
4874 * invoked on this ndlp.
4876 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
4877 /* Indicate ndlp is put to inactive state. */
4878 NLP_SET_IACT_REQ(ndlp
);
4879 /* Acknowledge ndlp memory free has been seen. */
4880 if (NLP_CHK_FREE_REQ(ndlp
))
4881 NLP_SET_FREE_ACK(ndlp
);
4883 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4884 /* Note, the kref_put returns 1 when decrementing a reference
4885 * count that was 1, it invokes the release callback function,
4886 * but it still left the reference count as 1 (not actually
4887 * performs the last decrementation). Otherwise, it actually
4888 * decrements the reference count and returns 0.
4890 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
4893 /* This routine free's the specified nodelist if it is not in use
4894 * by any other discovery thread. This routine returns 1 if the
4895 * ndlp has been freed. A return value of 0 indicates the ndlp is
4896 * not yet been released.
4899 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
4901 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4902 "node not used: did:x%x flg:x%x refcnt:x%x",
4903 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4904 atomic_read(&ndlp
->kref
.refcount
));
4905 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
4906 if (lpfc_nlp_put(ndlp
))
4912 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4913 * @phba: Pointer to hba context object.
4915 * This function iterate through all FC nodes associated
4916 * will all vports to check if there is any node with
4917 * fc_rports associated with it. If there is an fc_rport
4918 * associated with the node, then the node is either in
4919 * discovered state or its devloss_timer is pending.
4922 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
4924 struct lpfc_vport
**vports
;
4926 struct lpfc_nodelist
*ndlp
;
4927 struct Scsi_Host
*shost
;
4929 vports
= lpfc_create_vport_work_array(phba
);
4931 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4932 shost
= lpfc_shost_from_vport(vports
[i
]);
4933 spin_lock_irq(shost
->host_lock
);
4934 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4935 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
4936 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
4938 spin_unlock_irq(shost
->host_lock
);
4941 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
4942 "2624 RPI %x DID %x flg %x still "
4944 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4946 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
4950 spin_unlock_irq(shost
->host_lock
);
4953 lpfc_destroy_vport_work_array(phba
, vports
);
4958 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4959 * @phba: Pointer to hba context object.
4960 * @mboxq: Pointer to mailbox object.
4962 * This function frees memory associated with the mailbox command.
4965 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4967 struct lpfc_vport
*vport
= mboxq
->vport
;
4969 if (mboxq
->u
.mb
.mbxStatus
) {
4970 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4971 "2555 UNREG_VFI mbxStatus error x%x "
4973 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4975 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4980 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4981 * @phba: Pointer to hba context object.
4982 * @mboxq: Pointer to mailbox object.
4984 * This function frees memory associated with the mailbox command.
4987 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4989 struct lpfc_vport
*vport
= mboxq
->vport
;
4991 if (mboxq
->u
.mb
.mbxStatus
) {
4992 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4993 "2550 UNREG_FCFI mbxStatus error x%x "
4995 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4997 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5002 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5003 * @phba: Pointer to hba context object.
5005 * This function prepare the HBA for unregistering the currently registered
5006 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5010 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5013 struct lpfc_vport
**vports
;
5014 struct lpfc_nodelist
*ndlp
;
5015 struct Scsi_Host
*shost
;
5018 /* Unregister RPIs */
5019 if (lpfc_fcf_inuse(phba
))
5020 lpfc_unreg_hba_rpis(phba
);
5022 /* At this point, all discovery is aborted */
5023 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5025 /* Unregister VPIs */
5026 vports
= lpfc_create_vport_work_array(phba
);
5027 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5028 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5029 /* Stop FLOGI/FDISC retries */
5030 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5032 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5033 lpfc_cleanup_pending_mbox(vports
[i
]);
5034 lpfc_mbx_unreg_vpi(vports
[i
]);
5035 shost
= lpfc_shost_from_vport(vports
[i
]);
5036 spin_lock_irq(shost
->host_lock
);
5037 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5038 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5039 spin_unlock_irq(shost
->host_lock
);
5041 lpfc_destroy_vport_work_array(phba
, vports
);
5043 /* Cleanup any outstanding ELS commands */
5044 lpfc_els_flush_all_cmd(phba
);
5046 /* Unregister VFI */
5047 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5049 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5050 "2556 UNREG_VFI mbox allocation failed"
5051 "HBA state x%x\n", phba
->pport
->port_state
);
5055 lpfc_unreg_vfi(mbox
, phba
->pport
);
5056 mbox
->vport
= phba
->pport
;
5057 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
5059 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5060 if (rc
== MBX_NOT_FINISHED
) {
5061 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5062 "2557 UNREG_VFI issue mbox failed rc x%x "
5064 rc
, phba
->pport
->port_state
);
5065 mempool_free(mbox
, phba
->mbox_mem_pool
);
5069 shost
= lpfc_shost_from_vport(phba
->pport
);
5070 spin_lock_irq(shost
->host_lock
);
5071 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5072 spin_unlock_irq(shost
->host_lock
);
5078 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5079 * @phba: Pointer to hba context object.
5081 * This function issues synchronous unregister FCF mailbox command to HBA to
5082 * unregister the currently registered FCF record. The driver does not reset
5083 * the driver FCF usage state flags.
5085 * Return 0 if successfully issued, none-zero otherwise.
5088 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5093 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5095 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5096 "2551 UNREG_FCFI mbox allocation failed"
5097 "HBA state x%x\n", phba
->pport
->port_state
);
5100 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5101 mbox
->vport
= phba
->pport
;
5102 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5103 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5105 if (rc
== MBX_NOT_FINISHED
) {
5106 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5107 "2552 Unregister FCFI command failed rc x%x "
5109 rc
, phba
->pport
->port_state
);
5116 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5117 * @phba: Pointer to hba context object.
5119 * This function unregisters the currently reigstered FCF. This function
5120 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5123 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5127 /* Preparation for unregistering fcf */
5128 rc
= lpfc_unregister_fcf_prep(phba
);
5130 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5131 "2748 Failed to prepare for unregistering "
5132 "HBA's FCF record: rc=%d\n", rc
);
5136 /* Now, unregister FCF record and reset HBA FCF state */
5137 rc
= lpfc_sli4_unregister_fcf(phba
);
5140 /* Reset HBA FCF states after successful unregister FCF */
5141 phba
->fcf
.fcf_flag
= 0;
5142 phba
->fcf
.current_rec
.flag
= 0;
5145 * If driver is not unloading, check if there is any other
5146 * FCF record that can be used for discovery.
5148 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5149 (phba
->link_state
< LPFC_LINK_UP
))
5152 /* This is considered as the initial FCF discovery scan */
5153 spin_lock_irq(&phba
->hbalock
);
5154 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5155 spin_unlock_irq(&phba
->hbalock
);
5156 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5159 spin_lock_irq(&phba
->hbalock
);
5160 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5161 spin_unlock_irq(&phba
->hbalock
);
5162 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5163 "2553 lpfc_unregister_unused_fcf failed "
5164 "to read FCF record HBA state x%x\n",
5165 phba
->pport
->port_state
);
5170 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5171 * @phba: Pointer to hba context object.
5173 * This function just unregisters the currently reigstered FCF. It does not
5174 * try to find another FCF for discovery.
5177 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5181 /* Preparation for unregistering fcf */
5182 rc
= lpfc_unregister_fcf_prep(phba
);
5184 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5185 "2749 Failed to prepare for unregistering "
5186 "HBA's FCF record: rc=%d\n", rc
);
5190 /* Now, unregister FCF record and reset HBA FCF state */
5191 rc
= lpfc_sli4_unregister_fcf(phba
);
5194 /* Set proper HBA FCF states after successful unregister FCF */
5195 spin_lock_irq(&phba
->hbalock
);
5196 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
5197 spin_unlock_irq(&phba
->hbalock
);
5201 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5202 * @phba: Pointer to hba context object.
5204 * This function check if there are any connected remote port for the FCF and
5205 * if all the devices are disconnected, this function unregister FCFI.
5206 * This function also tries to use another FCF for discovery.
5209 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
5212 * If HBA is not running in FIP mode or if HBA does not support
5213 * FCoE or if FCF is not registered, do nothing.
5215 spin_lock_irq(&phba
->hbalock
);
5216 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
5217 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
5218 !(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
5219 spin_unlock_irq(&phba
->hbalock
);
5222 spin_unlock_irq(&phba
->hbalock
);
5224 if (lpfc_fcf_inuse(phba
))
5227 lpfc_unregister_fcf_rescan(phba
);
5231 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
5232 * @phba: Pointer to hba context object.
5233 * @buff: Buffer containing the FCF connection table as in the config
5235 * This function create driver data structure for the FCF connection
5236 * record table read from config region 23.
5239 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
5242 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5243 struct lpfc_fcf_conn_hdr
*conn_hdr
;
5244 struct lpfc_fcf_conn_rec
*conn_rec
;
5245 uint32_t record_count
;
5248 /* Free the current connect table */
5249 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5250 &phba
->fcf_conn_rec_list
, list
) {
5251 list_del_init(&conn_entry
->list
);
5255 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
5256 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
5257 sizeof(struct lpfc_fcf_conn_rec
);
5259 conn_rec
= (struct lpfc_fcf_conn_rec
*)
5260 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
5262 for (i
= 0; i
< record_count
; i
++) {
5263 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
5265 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
5268 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5269 "2566 Failed to allocate connection"
5274 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
5275 sizeof(struct lpfc_fcf_conn_rec
));
5276 conn_entry
->conn_rec
.vlan_tag
=
5277 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
5278 conn_entry
->conn_rec
.flags
=
5279 le16_to_cpu(conn_entry
->conn_rec
.flags
);
5280 list_add_tail(&conn_entry
->list
,
5281 &phba
->fcf_conn_rec_list
);
5286 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
5287 * @phba: Pointer to hba context object.
5288 * @buff: Buffer containing the FCoE parameter data structure.
5290 * This function update driver data structure with config
5291 * parameters read from config region 23.
5294 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
5297 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
5298 struct lpfc_fcoe_params
*fcoe_param
;
5300 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
5302 fcoe_param
= (struct lpfc_fcoe_params
*)
5303 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
5305 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
5306 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
5309 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
5310 phba
->valid_vlan
= 1;
5311 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
5315 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
5316 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
5317 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
5322 * lpfc_get_rec_conf23 - Get a record type in config region data.
5323 * @buff: Buffer containing config region 23 data.
5324 * @size: Size of the data buffer.
5325 * @rec_type: Record type to be searched.
5327 * This function searches config region data to find the begining
5328 * of the record specified by record_type. If record found, this
5329 * function return pointer to the record else return NULL.
5332 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
5334 uint32_t offset
= 0, rec_length
;
5336 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
5337 (size
< sizeof(uint32_t)))
5340 rec_length
= buff
[offset
+ 1];
5343 * One TLV record has one word header and number of data words
5344 * specified in the rec_length field of the record header.
5346 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
5348 if (buff
[offset
] == rec_type
)
5349 return &buff
[offset
];
5351 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
5354 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
5355 rec_length
= buff
[offset
+ 1];
5361 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
5362 * @phba: Pointer to lpfc_hba data structure.
5363 * @buff: Buffer containing config region 23 data.
5364 * @size: Size of the data buffer.
5366 * This fuction parse the FCoE config parameters in config region 23 and
5367 * populate driver data structure with the parameters.
5370 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
5374 uint32_t offset
= 0, rec_length
;
5378 * If data size is less than 2 words signature and version cannot be
5381 if (size
< 2*sizeof(uint32_t))
5384 /* Check the region signature first */
5385 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
5386 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5387 "2567 Config region 23 has bad signature\n");
5393 /* Check the data structure version */
5394 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
5395 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5396 "2568 Config region 23 has bad version\n");
5401 rec_length
= buff
[offset
+ 1];
5403 /* Read FCoE param record */
5404 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5405 size
- offset
, FCOE_PARAM_TYPE
);
5407 lpfc_read_fcoe_param(phba
, rec_ptr
);
5409 /* Read FCF connection table */
5410 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5411 size
- offset
, FCOE_CONN_TBL_TYPE
);
5413 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);