1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
37 #include "lpfc_disc.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray
[] = {
49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
64 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
65 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 static int lpfc_fcf_inuse(struct lpfc_hba
*);
70 lpfc_terminate_rport_io(struct fc_rport
*rport
)
72 struct lpfc_rport_data
*rdata
;
73 struct lpfc_nodelist
* ndlp
;
74 struct lpfc_hba
*phba
;
76 rdata
= rport
->dd_data
;
79 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
80 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
81 printk(KERN_ERR
"Cannot find remote node"
82 " to terminate I/O Data x%x\n",
89 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
90 "rport terminate: sid:x%x did:x%x flg:x%x",
91 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
93 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
94 lpfc_sli_abort_iocb(ndlp
->vport
,
95 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
96 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
106 struct lpfc_rport_data
*rdata
;
107 struct lpfc_nodelist
* ndlp
;
108 struct lpfc_vport
*vport
;
109 struct lpfc_hba
*phba
;
110 struct lpfc_work_evt
*evtp
;
114 rdata
= rport
->dd_data
;
116 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
122 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
126 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
128 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
130 /* Don't defer this if we are in the process of deleting the vport
131 * or unloading the driver. The unload will cleanup the node
132 * appropriately we just need to cleanup the ndlp rport info here.
134 if (vport
->load_flag
& FC_UNLOADING
) {
135 put_node
= rdata
->pnode
!= NULL
;
136 put_rport
= ndlp
->rport
!= NULL
;
142 put_device(&rport
->dev
);
146 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
149 if (ndlp
->nlp_type
& NLP_FABRIC
) {
151 /* If the WWPN of the rport and ndlp don't match, ignore it */
152 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
)) {
153 put_device(&rport
->dev
);
158 evtp
= &ndlp
->dev_loss_evt
;
160 if (!list_empty(&evtp
->evt_listp
))
163 spin_lock_irq(&phba
->hbalock
);
164 /* We need to hold the node by incrementing the reference
165 * count until this queued work is done
167 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
168 if (evtp
->evt_arg1
) {
169 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
170 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
171 lpfc_worker_wake_up(phba
);
173 spin_unlock_irq(&phba
->hbalock
);
179 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
180 * @ndlp: Pointer to remote node object.
182 * This function is called from the worker thread when devloss timeout timer
183 * expires. For SLI4 host, this routine shall return 1 when at lease one
184 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
185 * routine shall return 0 when there is no remote node is still in use of FCF
186 * when devloss timeout happened to this @ndlp.
189 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
191 struct lpfc_rport_data
*rdata
;
192 struct fc_rport
*rport
;
193 struct lpfc_vport
*vport
;
194 struct lpfc_hba
*phba
;
206 rdata
= rport
->dd_data
;
207 name
= (uint8_t *) &ndlp
->nlp_portname
;
211 if (phba
->sli_rev
== LPFC_SLI_REV4
)
212 fcf_inuse
= lpfc_fcf_inuse(phba
);
214 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
215 "rport devlosstmo:did:x%x type:x%x id:x%x",
216 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
218 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
219 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
220 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
222 /* Don't defer this if we are in the process of deleting the vport
223 * or unloading the driver. The unload will cleanup the node
224 * appropriately we just need to cleanup the ndlp rport info here.
226 if (vport
->load_flag
& FC_UNLOADING
) {
227 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
228 /* flush the target */
229 lpfc_sli_abort_iocb(vport
,
230 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
231 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
233 put_node
= rdata
->pnode
!= NULL
;
234 put_rport
= ndlp
->rport
!= NULL
;
240 put_device(&rport
->dev
);
244 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
245 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
246 "0284 Devloss timeout Ignored on "
247 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
249 *name
, *(name
+1), *(name
+2), *(name
+3),
250 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
255 if (ndlp
->nlp_type
& NLP_FABRIC
) {
256 /* We will clean up these Nodes in linkup */
257 put_node
= rdata
->pnode
!= NULL
;
258 put_rport
= ndlp
->rport
!= NULL
;
264 put_device(&rport
->dev
);
268 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
270 /* flush the target */
271 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
272 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
276 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
277 "0203 Devloss timeout on "
278 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
279 "NPort x%06x Data: x%x x%x x%x\n",
280 *name
, *(name
+1), *(name
+2), *(name
+3),
281 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
282 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
283 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
285 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
286 "0204 Devloss timeout on "
287 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
288 "NPort x%06x Data: x%x x%x x%x\n",
289 *name
, *(name
+1), *(name
+2), *(name
+3),
290 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
291 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
292 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
295 put_node
= rdata
->pnode
!= NULL
;
296 put_rport
= ndlp
->rport
!= NULL
;
302 put_device(&rport
->dev
);
304 if (!(vport
->load_flag
& FC_UNLOADING
) &&
305 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
306 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
307 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
308 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
309 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
310 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
316 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
317 * @phba: Pointer to hba context object.
318 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
319 * @nlp_did: remote node identifer with devloss timeout.
321 * This function is called from the worker thread after invoking devloss
322 * timeout handler and releasing the reference count for the ndlp with
323 * which the devloss timeout was handled for SLI4 host. For the devloss
324 * timeout of the last remote node which had been in use of FCF, when this
325 * routine is invoked, it shall be guaranteed that none of the remote are
326 * in-use of FCF. When devloss timeout to the last remote using the FCF,
327 * if the FIP engine is neither in FCF table scan process nor roundrobin
328 * failover process, the in-use FCF shall be unregistered. If the FIP
329 * engine is in FCF discovery process, the devloss timeout state shall
330 * be set for either the FCF table scan process or roundrobin failover
331 * process to unregister the in-use FCF.
334 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
337 /* If devloss timeout happened to a remote node when FCF had no
338 * longer been in-use, do nothing.
343 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
344 spin_lock_irq(&phba
->hbalock
);
345 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
346 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
347 spin_unlock_irq(&phba
->hbalock
);
350 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
351 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
352 "2847 Last remote node (x%x) using "
353 "FCF devloss tmo\n", nlp_did
);
355 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
356 spin_unlock_irq(&phba
->hbalock
);
357 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
358 "2868 Devloss tmo to FCF rediscovery "
362 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
363 spin_unlock_irq(&phba
->hbalock
);
364 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
365 "2869 Devloss tmo to idle FIP engine, "
366 "unreg in-use FCF and rescan.\n");
367 /* Unregister in-use FCF and rescan */
368 lpfc_unregister_fcf_rescan(phba
);
371 spin_unlock_irq(&phba
->hbalock
);
372 if (phba
->hba_flag
& FCF_TS_INPROG
)
373 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
374 "2870 FCF table scan in progress\n");
375 if (phba
->hba_flag
& FCF_RR_INPROG
)
376 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
377 "2871 FLOGI roundrobin FCF failover "
380 lpfc_unregister_unused_fcf(phba
);
384 * lpfc_alloc_fast_evt - Allocates data structure for posting event
385 * @phba: Pointer to hba context object.
387 * This function is called from the functions which need to post
388 * events from interrupt context. This function allocates data
389 * structure required for posting event. It also keeps track of
390 * number of events pending and prevent event storm when there are
393 struct lpfc_fast_path_event
*
394 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
395 struct lpfc_fast_path_event
*ret
;
397 /* If there are lot of fast event do not exhaust memory due to this */
398 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
401 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
404 atomic_inc(&phba
->fast_event_count
);
405 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
406 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
412 * lpfc_free_fast_evt - Frees event data structure
413 * @phba: Pointer to hba context object.
414 * @evt: Event object which need to be freed.
416 * This function frees the data structure required for posting
420 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
421 struct lpfc_fast_path_event
*evt
) {
423 atomic_dec(&phba
->fast_event_count
);
428 * lpfc_send_fastpath_evt - Posts events generated from fast path
429 * @phba: Pointer to hba context object.
430 * @evtp: Event data structure.
432 * This function is called from worker thread, when the interrupt
433 * context need to post an event. This function posts the event
434 * to fc transport netlink interface.
437 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
438 struct lpfc_work_evt
*evtp
)
440 unsigned long evt_category
, evt_sub_category
;
441 struct lpfc_fast_path_event
*fast_evt_data
;
443 uint32_t evt_data_size
;
444 struct Scsi_Host
*shost
;
446 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
449 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
450 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
451 fabric_evt
.subcategory
;
452 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
453 if (evt_category
== FC_REG_FABRIC_EVENT
) {
454 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
455 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
456 evt_data_size
= sizeof(fast_evt_data
->un
.
458 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
459 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
460 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
461 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
463 lpfc_free_fast_evt(phba
, fast_evt_data
);
466 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
467 switch (evt_sub_category
) {
468 case LPFC_EVENT_QFULL
:
469 case LPFC_EVENT_DEVBSY
:
470 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
471 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
473 case LPFC_EVENT_CHECK_COND
:
474 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
475 evt_data_size
= sizeof(fast_evt_data
->un
.
478 case LPFC_EVENT_VARQUEDEPTH
:
479 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
480 evt_data_size
= sizeof(fast_evt_data
->un
.
484 lpfc_free_fast_evt(phba
, fast_evt_data
);
488 lpfc_free_fast_evt(phba
, fast_evt_data
);
492 fc_host_post_vendor_event(shost
,
493 fc_get_event_number(),
498 lpfc_free_fast_evt(phba
, fast_evt_data
);
503 lpfc_work_list_done(struct lpfc_hba
*phba
)
505 struct lpfc_work_evt
*evtp
= NULL
;
506 struct lpfc_nodelist
*ndlp
;
511 spin_lock_irq(&phba
->hbalock
);
512 while (!list_empty(&phba
->work_list
)) {
513 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
515 spin_unlock_irq(&phba
->hbalock
);
518 case LPFC_EVT_ELS_RETRY
:
519 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
520 lpfc_els_retry_delay_handler(ndlp
);
521 free_evt
= 0; /* evt is part of ndlp */
522 /* decrement the node reference count held
523 * for this queued work
527 case LPFC_EVT_DEV_LOSS
:
528 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
529 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
531 /* decrement the node reference count held for
534 nlp_did
= ndlp
->nlp_DID
;
536 if (phba
->sli_rev
== LPFC_SLI_REV4
)
537 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
541 case LPFC_EVT_ONLINE
:
542 if (phba
->link_state
< LPFC_LINK_DOWN
)
543 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
545 *(int *) (evtp
->evt_arg1
) = 0;
546 complete((struct completion
*)(evtp
->evt_arg2
));
548 case LPFC_EVT_OFFLINE_PREP
:
549 if (phba
->link_state
>= LPFC_LINK_DOWN
)
550 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
551 *(int *)(evtp
->evt_arg1
) = 0;
552 complete((struct completion
*)(evtp
->evt_arg2
));
554 case LPFC_EVT_OFFLINE
:
556 lpfc_sli_brdrestart(phba
);
557 *(int *)(evtp
->evt_arg1
) =
558 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
559 lpfc_unblock_mgmt_io(phba
);
560 complete((struct completion
*)(evtp
->evt_arg2
));
562 case LPFC_EVT_WARM_START
:
564 lpfc_reset_barrier(phba
);
565 lpfc_sli_brdreset(phba
);
566 lpfc_hba_down_post(phba
);
567 *(int *)(evtp
->evt_arg1
) =
568 lpfc_sli_brdready(phba
, HS_MBRDY
);
569 lpfc_unblock_mgmt_io(phba
);
570 complete((struct completion
*)(evtp
->evt_arg2
));
574 *(int *)(evtp
->evt_arg1
)
575 = (phba
->pport
->stopped
)
576 ? 0 : lpfc_sli_brdkill(phba
);
577 lpfc_unblock_mgmt_io(phba
);
578 complete((struct completion
*)(evtp
->evt_arg2
));
580 case LPFC_EVT_FASTPATH_MGMT_EVT
:
581 lpfc_send_fastpath_evt(phba
, evtp
);
584 case LPFC_EVT_RESET_HBA
:
585 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
586 lpfc_reset_hba(phba
);
591 spin_lock_irq(&phba
->hbalock
);
593 spin_unlock_irq(&phba
->hbalock
);
598 lpfc_work_done(struct lpfc_hba
*phba
)
600 struct lpfc_sli_ring
*pring
;
601 uint32_t ha_copy
, status
, control
, work_port_events
;
602 struct lpfc_vport
**vports
;
603 struct lpfc_vport
*vport
;
606 spin_lock_irq(&phba
->hbalock
);
607 ha_copy
= phba
->work_ha
;
609 spin_unlock_irq(&phba
->hbalock
);
611 /* First, try to post the next mailbox command to SLI4 device */
612 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
613 lpfc_sli4_post_async_mbox(phba
);
615 if (ha_copy
& HA_ERATT
)
616 /* Handle the error attention event */
617 lpfc_handle_eratt(phba
);
619 if (ha_copy
& HA_MBATT
)
620 lpfc_sli_handle_mb_event(phba
);
622 if (ha_copy
& HA_LATT
)
623 lpfc_handle_latt(phba
);
625 /* Process SLI4 events */
626 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
627 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
628 lpfc_handle_rrq_active(phba
);
629 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
630 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
631 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
632 lpfc_sli4_els_xri_abort_event_proc(phba
);
633 if (phba
->hba_flag
& ASYNC_EVENT
)
634 lpfc_sli4_async_event_proc(phba
);
635 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
636 spin_lock_irq(&phba
->hbalock
);
637 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
638 spin_unlock_irq(&phba
->hbalock
);
639 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
641 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
642 lpfc_sli4_fcf_redisc_event_proc(phba
);
645 vports
= lpfc_create_vport_work_array(phba
);
647 for (i
= 0; i
<= phba
->max_vports
; i
++) {
649 * We could have no vports in array if unloading, so if
650 * this happens then just use the pport
652 if (vports
[i
] == NULL
&& i
== 0)
658 spin_lock_irq(&vport
->work_port_lock
);
659 work_port_events
= vport
->work_port_events
;
660 vport
->work_port_events
&= ~work_port_events
;
661 spin_unlock_irq(&vport
->work_port_lock
);
662 if (work_port_events
& WORKER_DISC_TMO
)
663 lpfc_disc_timeout_handler(vport
);
664 if (work_port_events
& WORKER_ELS_TMO
)
665 lpfc_els_timeout_handler(vport
);
666 if (work_port_events
& WORKER_HB_TMO
)
667 lpfc_hb_timeout_handler(phba
);
668 if (work_port_events
& WORKER_MBOX_TMO
)
669 lpfc_mbox_timeout_handler(phba
);
670 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
671 lpfc_unblock_fabric_iocbs(phba
);
672 if (work_port_events
& WORKER_FDMI_TMO
)
673 lpfc_fdmi_timeout_handler(vport
);
674 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
675 lpfc_ramp_down_queue_handler(phba
);
676 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
677 lpfc_ramp_up_queue_handler(phba
);
678 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
679 lpfc_delayed_disc_timeout_handler(vport
);
681 lpfc_destroy_vport_work_array(phba
, vports
);
683 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
684 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
685 status
>>= (4*LPFC_ELS_RING
);
686 if ((status
& HA_RXMASK
) ||
687 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
688 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
689 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
690 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
691 /* Set the lpfc data pending flag */
692 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
694 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
695 lpfc_sli_handle_slow_ring_event(phba
, pring
,
699 if ((phba
->sli_rev
== LPFC_SLI_REV4
) && pring
->txq_cnt
)
700 lpfc_drain_txq(phba
);
702 * Turn on Ring interrupts
704 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
705 spin_lock_irq(&phba
->hbalock
);
706 control
= readl(phba
->HCregaddr
);
707 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
708 lpfc_debugfs_slow_ring_trc(phba
,
709 "WRK Enable ring: cntl:x%x hacopy:x%x",
710 control
, ha_copy
, 0);
712 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
713 writel(control
, phba
->HCregaddr
);
714 readl(phba
->HCregaddr
); /* flush */
716 lpfc_debugfs_slow_ring_trc(phba
,
717 "WRK Ring ok: cntl:x%x hacopy:x%x",
718 control
, ha_copy
, 0);
720 spin_unlock_irq(&phba
->hbalock
);
723 lpfc_work_list_done(phba
);
727 lpfc_do_work(void *p
)
729 struct lpfc_hba
*phba
= p
;
732 set_user_nice(current
, -20);
733 current
->flags
|= PF_NOFREEZE
;
734 phba
->data_flags
= 0;
736 while (!kthread_should_stop()) {
737 /* wait and check worker queue activities */
738 rc
= wait_event_interruptible(phba
->work_waitq
,
739 (test_and_clear_bit(LPFC_DATA_READY
,
741 || kthread_should_stop()));
742 /* Signal wakeup shall terminate the worker thread */
744 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
745 "0433 Wakeup on signal: rc=x%x\n", rc
);
749 /* Attend pending lpfc data processing */
750 lpfc_work_done(phba
);
752 phba
->worker_thread
= NULL
;
753 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
754 "0432 Worker thread stopped.\n");
759 * This is only called to handle FC worker events. Since this a rare
760 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
761 * embedding it in the IOCB.
764 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
767 struct lpfc_work_evt
*evtp
;
771 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
772 * be queued to worker thread for processing
774 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
778 evtp
->evt_arg1
= arg1
;
779 evtp
->evt_arg2
= arg2
;
782 spin_lock_irqsave(&phba
->hbalock
, flags
);
783 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
784 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
786 lpfc_worker_wake_up(phba
);
792 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
794 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
795 struct lpfc_hba
*phba
= vport
->phba
;
796 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
799 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
800 if (!NLP_CHK_NODE_ACT(ndlp
))
802 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
804 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
805 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
806 (ndlp
->nlp_DID
== NameServer_DID
)))
807 lpfc_unreg_rpi(vport
, ndlp
);
809 /* Leave Fabric nodes alone on link down */
810 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
811 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
813 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
816 : NLP_EVT_DEVICE_RECOVERY
);
818 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
819 if (phba
->sli_rev
== LPFC_SLI_REV4
)
820 lpfc_sli4_unreg_all_rpis(vport
);
821 lpfc_mbx_unreg_vpi(vport
);
822 spin_lock_irq(shost
->host_lock
);
823 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
824 spin_unlock_irq(shost
->host_lock
);
829 lpfc_port_link_failure(struct lpfc_vport
*vport
)
831 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
833 /* Cleanup any outstanding received buffers */
834 lpfc_cleanup_rcv_buffers(vport
);
836 /* Cleanup any outstanding RSCN activity */
837 lpfc_els_flush_rscn(vport
);
839 /* Cleanup any outstanding ELS commands */
840 lpfc_els_flush_cmd(vport
);
842 lpfc_cleanup_rpis(vport
, 0);
844 /* Turn off discovery timer if its running */
845 lpfc_can_disctmo(vport
);
849 lpfc_linkdown_port(struct lpfc_vport
*vport
)
851 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
853 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
855 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
856 "Link Down: state:x%x rtry:x%x flg:x%x",
857 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
859 lpfc_port_link_failure(vport
);
861 /* Stop delayed Nport discovery */
862 spin_lock_irq(shost
->host_lock
);
863 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
864 spin_unlock_irq(shost
->host_lock
);
865 del_timer_sync(&vport
->delayed_disc_tmo
);
869 lpfc_linkdown(struct lpfc_hba
*phba
)
871 struct lpfc_vport
*vport
= phba
->pport
;
872 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
873 struct lpfc_vport
**vports
;
877 if (phba
->link_state
== LPFC_LINK_DOWN
)
880 /* Block all SCSI stack I/Os */
881 lpfc_scsi_dev_block(phba
);
883 spin_lock_irq(&phba
->hbalock
);
884 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
885 spin_unlock_irq(&phba
->hbalock
);
886 if (phba
->link_state
> LPFC_LINK_DOWN
) {
887 phba
->link_state
= LPFC_LINK_DOWN
;
888 spin_lock_irq(shost
->host_lock
);
889 phba
->pport
->fc_flag
&= ~FC_LBIT
;
890 spin_unlock_irq(shost
->host_lock
);
892 vports
= lpfc_create_vport_work_array(phba
);
894 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
895 /* Issue a LINK DOWN event to all nodes */
896 lpfc_linkdown_port(vports
[i
]);
898 lpfc_destroy_vport_work_array(phba
, vports
);
899 /* Clean up any firmware default rpi's */
900 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
902 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
904 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
905 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
906 == MBX_NOT_FINISHED
) {
907 mempool_free(mb
, phba
->mbox_mem_pool
);
911 /* Setup myDID for link up if we are in pt2pt mode */
912 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
913 phba
->pport
->fc_myDID
= 0;
914 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
916 lpfc_config_link(phba
, mb
);
917 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
919 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
920 == MBX_NOT_FINISHED
) {
921 mempool_free(mb
, phba
->mbox_mem_pool
);
924 spin_lock_irq(shost
->host_lock
);
925 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
926 spin_unlock_irq(shost
->host_lock
);
933 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
935 struct lpfc_nodelist
*ndlp
;
937 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
938 if (!NLP_CHK_NODE_ACT(ndlp
))
940 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
942 if (ndlp
->nlp_type
& NLP_FABRIC
) {
943 /* On Linkup its safe to clean up the ndlp
944 * from Fabric connections.
946 if (ndlp
->nlp_DID
!= Fabric_DID
)
947 lpfc_unreg_rpi(vport
, ndlp
);
948 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
949 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
950 /* Fail outstanding IO now since device is
953 lpfc_unreg_rpi(vport
, ndlp
);
959 lpfc_linkup_port(struct lpfc_vport
*vport
)
961 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
962 struct lpfc_hba
*phba
= vport
->phba
;
964 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
967 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
968 "Link Up: top:x%x speed:x%x flg:x%x",
969 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
971 /* If NPIV is not enabled, only bring the physical port up */
972 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
973 (vport
!= phba
->pport
))
976 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
978 spin_lock_irq(shost
->host_lock
);
979 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
980 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
981 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
982 vport
->fc_ns_retry
= 0;
983 spin_unlock_irq(shost
->host_lock
);
985 if (vport
->fc_flag
& FC_LBIT
)
986 lpfc_linkup_cleanup_nodes(vport
);
991 lpfc_linkup(struct lpfc_hba
*phba
)
993 struct lpfc_vport
**vports
;
996 lpfc_cleanup_wt_rrqs(phba
);
997 phba
->link_state
= LPFC_LINK_UP
;
999 /* Unblock fabric iocbs if they are blocked */
1000 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1001 del_timer_sync(&phba
->fabric_block_timer
);
1003 vports
= lpfc_create_vport_work_array(phba
);
1005 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1006 lpfc_linkup_port(vports
[i
]);
1007 lpfc_destroy_vport_work_array(phba
, vports
);
1008 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
1009 (phba
->sli_rev
< LPFC_SLI_REV4
))
1010 lpfc_issue_clear_la(phba
, phba
->pport
);
1016 * This routine handles processing a CLEAR_LA mailbox
1017 * command upon completion. It is setup in the LPFC_MBOXQ
1018 * as the completion routine when the command is
1019 * handed off to the SLI layer.
1022 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1024 struct lpfc_vport
*vport
= pmb
->vport
;
1025 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1026 struct lpfc_sli
*psli
= &phba
->sli
;
1027 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1030 /* Since we don't do discovery right now, turn these off here */
1031 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1032 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1033 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1035 /* Check for error */
1036 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1037 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1038 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1039 "0320 CLEAR_LA mbxStatus error x%x hba "
1041 mb
->mbxStatus
, vport
->port_state
);
1042 phba
->link_state
= LPFC_HBA_ERROR
;
1046 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1047 phba
->link_state
= LPFC_HBA_READY
;
1049 spin_lock_irq(&phba
->hbalock
);
1050 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1051 control
= readl(phba
->HCregaddr
);
1052 control
|= HC_LAINT_ENA
;
1053 writel(control
, phba
->HCregaddr
);
1054 readl(phba
->HCregaddr
); /* flush */
1055 spin_unlock_irq(&phba
->hbalock
);
1056 mempool_free(pmb
, phba
->mbox_mem_pool
);
1060 /* Device Discovery completes */
1061 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1062 "0225 Device Discovery completes\n");
1063 mempool_free(pmb
, phba
->mbox_mem_pool
);
1065 spin_lock_irq(shost
->host_lock
);
1066 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1067 spin_unlock_irq(shost
->host_lock
);
1069 lpfc_can_disctmo(vport
);
1071 /* turn on Link Attention interrupts */
1073 spin_lock_irq(&phba
->hbalock
);
1074 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1075 control
= readl(phba
->HCregaddr
);
1076 control
|= HC_LAINT_ENA
;
1077 writel(control
, phba
->HCregaddr
);
1078 readl(phba
->HCregaddr
); /* flush */
1079 spin_unlock_irq(&phba
->hbalock
);
1086 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1088 struct lpfc_vport
*vport
= pmb
->vport
;
1090 if (pmb
->u
.mb
.mbxStatus
)
1093 mempool_free(pmb
, phba
->mbox_mem_pool
);
1095 /* don't perform discovery for SLI4 loopback diagnostic test */
1096 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1097 !(phba
->hba_flag
& HBA_FCOE_MODE
) &&
1098 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1101 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1102 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1103 !(vport
->fc_flag
& FC_LBIT
)) {
1104 /* Need to wait for FAN - use discovery timer
1105 * for timeout. port_state is identically
1106 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1108 lpfc_set_disctmo(vport
);
1112 /* Start discovery by sending a FLOGI. port_state is identically
1113 * LPFC_FLOGI while waiting for FLOGI cmpl
1115 if (vport
->port_state
!= LPFC_FLOGI
|| vport
->fc_flag
& FC_PT2PT_PLOGI
)
1116 lpfc_initial_flogi(vport
);
1120 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1121 "0306 CONFIG_LINK mbxStatus error x%x "
1123 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1124 mempool_free(pmb
, phba
->mbox_mem_pool
);
1126 lpfc_linkdown(phba
);
1128 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1129 "0200 CONFIG_LINK bad hba state x%x\n",
1132 lpfc_issue_clear_la(phba
, vport
);
1137 * lpfc_sli4_clear_fcf_rr_bmask
1138 * @phba pointer to the struct lpfc_hba for this port.
1139 * This fucnction resets the round robin bit mask and clears the
1140 * fcf priority list. The list deletions are done while holding the
1141 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1142 * from the lpfc_fcf_pri record.
1145 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1147 struct lpfc_fcf_pri
*fcf_pri
;
1148 struct lpfc_fcf_pri
*next_fcf_pri
;
1149 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1150 spin_lock_irq(&phba
->hbalock
);
1151 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1152 &phba
->fcf
.fcf_pri_list
, list
) {
1153 list_del_init(&fcf_pri
->list
);
1154 fcf_pri
->fcf_rec
.flag
= 0;
1156 spin_unlock_irq(&phba
->hbalock
);
1159 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1161 struct lpfc_vport
*vport
= mboxq
->vport
;
1163 if (mboxq
->u
.mb
.mbxStatus
) {
1164 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1165 "2017 REG_FCFI mbxStatus error x%x "
1167 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1171 /* Start FCoE discovery by sending a FLOGI. */
1172 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1173 /* Set the FCFI registered flag */
1174 spin_lock_irq(&phba
->hbalock
);
1175 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1176 spin_unlock_irq(&phba
->hbalock
);
1178 /* If there is a pending FCoE event, restart FCF table scan. */
1179 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1180 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1183 /* Mark successful completion of FCF table scan */
1184 spin_lock_irq(&phba
->hbalock
);
1185 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1186 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1187 if (vport
->port_state
!= LPFC_FLOGI
) {
1188 phba
->hba_flag
|= FCF_RR_INPROG
;
1189 spin_unlock_irq(&phba
->hbalock
);
1190 lpfc_issue_init_vfi(vport
);
1193 spin_unlock_irq(&phba
->hbalock
);
1197 spin_lock_irq(&phba
->hbalock
);
1198 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1199 spin_unlock_irq(&phba
->hbalock
);
1201 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1205 * lpfc_fab_name_match - Check if the fcf fabric name match.
1206 * @fab_name: pointer to fabric name.
1207 * @new_fcf_record: pointer to fcf record.
1209 * This routine compare the fcf record's fabric name with provided
1210 * fabric name. If the fabric name are identical this function
1211 * returns 1 else return 0.
1214 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1216 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1218 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1220 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1222 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1224 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1226 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1228 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1230 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1236 * lpfc_sw_name_match - Check if the fcf switch name match.
1237 * @fab_name: pointer to fabric name.
1238 * @new_fcf_record: pointer to fcf record.
1240 * This routine compare the fcf record's switch name with provided
1241 * switch name. If the switch name are identical this function
1242 * returns 1 else return 0.
1245 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1247 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1249 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1251 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1253 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1255 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1257 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1259 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1261 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1267 * lpfc_mac_addr_match - Check if the fcf mac address match.
1268 * @mac_addr: pointer to mac address.
1269 * @new_fcf_record: pointer to fcf record.
1271 * This routine compare the fcf record's mac address with HBA's
1272 * FCF mac address. If the mac addresses are identical this function
1273 * returns 1 else return 0.
1276 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1278 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1280 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1282 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1284 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1286 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1288 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1294 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1296 return (curr_vlan_id
== new_vlan_id
);
1300 * lpfc_update_fcf_record - Update driver fcf record
1301 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1302 * @phba: pointer to lpfc hba data structure.
1303 * @fcf_index: Index for the lpfc_fcf_record.
1304 * @new_fcf_record: pointer to hba fcf record.
1306 * This routine updates the driver FCF priority record from the new HBA FCF
1307 * record. This routine is called with the host lock held.
1310 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1311 struct fcf_record
*new_fcf_record
1314 struct lpfc_fcf_pri
*fcf_pri
;
1316 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1317 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1318 /* FCF record priority */
1319 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1324 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1325 * @fcf: pointer to driver fcf record.
1326 * @new_fcf_record: pointer to fcf record.
1328 * This routine copies the FCF information from the FCF
1329 * record to lpfc_hba data structure.
1332 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1333 struct fcf_record
*new_fcf_record
)
1336 fcf_rec
->fabric_name
[0] =
1337 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1338 fcf_rec
->fabric_name
[1] =
1339 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1340 fcf_rec
->fabric_name
[2] =
1341 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1342 fcf_rec
->fabric_name
[3] =
1343 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1344 fcf_rec
->fabric_name
[4] =
1345 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1346 fcf_rec
->fabric_name
[5] =
1347 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1348 fcf_rec
->fabric_name
[6] =
1349 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1350 fcf_rec
->fabric_name
[7] =
1351 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1353 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1354 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1355 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1356 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1357 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1358 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1359 /* FCF record index */
1360 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1361 /* FCF record priority */
1362 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1364 fcf_rec
->switch_name
[0] =
1365 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1366 fcf_rec
->switch_name
[1] =
1367 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1368 fcf_rec
->switch_name
[2] =
1369 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1370 fcf_rec
->switch_name
[3] =
1371 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1372 fcf_rec
->switch_name
[4] =
1373 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1374 fcf_rec
->switch_name
[5] =
1375 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1376 fcf_rec
->switch_name
[6] =
1377 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1378 fcf_rec
->switch_name
[7] =
1379 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1383 * lpfc_update_fcf_record - Update driver fcf record
1384 * @phba: pointer to lpfc hba data structure.
1385 * @fcf_rec: pointer to driver fcf record.
1386 * @new_fcf_record: pointer to hba fcf record.
1387 * @addr_mode: address mode to be set to the driver fcf record.
1388 * @vlan_id: vlan tag to be set to the driver fcf record.
1389 * @flag: flag bits to be set to the driver fcf record.
1391 * This routine updates the driver FCF record from the new HBA FCF record
1392 * together with the address mode, vlan_id, and other informations. This
1393 * routine is called with the host lock held.
1396 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1397 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1398 uint16_t vlan_id
, uint32_t flag
)
1400 /* Copy the fields from the HBA's FCF record */
1401 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1402 /* Update other fields of driver FCF record */
1403 fcf_rec
->addr_mode
= addr_mode
;
1404 fcf_rec
->vlan_id
= vlan_id
;
1405 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1406 __lpfc_update_fcf_record_pri(phba
,
1407 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1412 * lpfc_register_fcf - Register the FCF with hba.
1413 * @phba: pointer to lpfc hba data structure.
1415 * This routine issues a register fcfi mailbox command to register
1419 lpfc_register_fcf(struct lpfc_hba
*phba
)
1421 LPFC_MBOXQ_t
*fcf_mbxq
;
1424 spin_lock_irq(&phba
->hbalock
);
1425 /* If the FCF is not available do nothing. */
1426 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1427 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1428 spin_unlock_irq(&phba
->hbalock
);
1432 /* The FCF is already registered, start discovery */
1433 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1434 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1435 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1436 if (phba
->pport
->port_state
!= LPFC_FLOGI
) {
1437 phba
->hba_flag
|= FCF_RR_INPROG
;
1438 spin_unlock_irq(&phba
->hbalock
);
1439 lpfc_initial_flogi(phba
->pport
);
1442 spin_unlock_irq(&phba
->hbalock
);
1445 spin_unlock_irq(&phba
->hbalock
);
1447 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1449 spin_lock_irq(&phba
->hbalock
);
1450 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1451 spin_unlock_irq(&phba
->hbalock
);
1455 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1456 fcf_mbxq
->vport
= phba
->pport
;
1457 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1458 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1459 if (rc
== MBX_NOT_FINISHED
) {
1460 spin_lock_irq(&phba
->hbalock
);
1461 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1462 spin_unlock_irq(&phba
->hbalock
);
1463 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1470 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1471 * @phba: pointer to lpfc hba data structure.
1472 * @new_fcf_record: pointer to fcf record.
1473 * @boot_flag: Indicates if this record used by boot bios.
1474 * @addr_mode: The address mode to be used by this FCF
1475 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1477 * This routine compare the fcf record with connect list obtained from the
1478 * config region to decide if this FCF can be used for SAN discovery. It returns
1479 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1480 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1481 * is used by boot bios and addr_mode will indicate the addressing mode to be
1482 * used for this FCF when the function returns.
1483 * If the FCF record need to be used with a particular vlan id, the vlan is
1484 * set in the vlan_id on return of the function. If not VLAN tagging need to
1485 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1488 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1489 struct fcf_record
*new_fcf_record
,
1490 uint32_t *boot_flag
, uint32_t *addr_mode
,
1493 struct lpfc_fcf_conn_entry
*conn_entry
;
1494 int i
, j
, fcf_vlan_id
= 0;
1496 /* Find the lowest VLAN id in the FCF record */
1497 for (i
= 0; i
< 512; i
++) {
1498 if (new_fcf_record
->vlan_bitmap
[i
]) {
1499 fcf_vlan_id
= i
* 8;
1501 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1509 /* FCF not valid/available or solicitation in progress */
1510 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1511 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1512 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1515 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1517 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1519 if (phba
->valid_vlan
)
1520 *vlan_id
= phba
->vlan_id
;
1522 *vlan_id
= LPFC_FCOE_NULL_VID
;
1527 * If there are no FCF connection table entry, driver connect to all
1530 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1532 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1536 * When there are no FCF connect entries, use driver's default
1537 * addressing mode - FPMA.
1539 if (*addr_mode
& LPFC_FCF_FPMA
)
1540 *addr_mode
= LPFC_FCF_FPMA
;
1542 /* If FCF record report a vlan id use that vlan id */
1544 *vlan_id
= fcf_vlan_id
;
1546 *vlan_id
= LPFC_FCOE_NULL_VID
;
1550 list_for_each_entry(conn_entry
,
1551 &phba
->fcf_conn_rec_list
, list
) {
1552 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1555 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1556 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1559 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1560 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1563 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1565 * If the vlan bit map does not have the bit set for the
1566 * vlan id to be used, then it is not a match.
1568 if (!(new_fcf_record
->vlan_bitmap
1569 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1570 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1575 * If connection record does not support any addressing mode,
1576 * skip the FCF record.
1578 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1579 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1583 * Check if the connection record specifies a required
1586 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1587 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1590 * If SPMA required but FCF not support this continue.
1592 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1593 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1594 new_fcf_record
) & LPFC_FCF_SPMA
))
1598 * If FPMA required but FCF not support this continue.
1600 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1601 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1602 new_fcf_record
) & LPFC_FCF_FPMA
))
1607 * This fcf record matches filtering criteria.
1609 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1615 * If user did not specify any addressing mode, or if the
1616 * preferred addressing mode specified by user is not supported
1617 * by FCF, allow fabric to pick the addressing mode.
1619 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1622 * If the user specified a required address mode, assign that
1625 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1626 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1627 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1629 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1631 * If the user specified a preferred address mode, use the
1632 * addr mode only if FCF support the addr_mode.
1634 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1635 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1636 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1637 (*addr_mode
& LPFC_FCF_SPMA
))
1638 *addr_mode
= LPFC_FCF_SPMA
;
1639 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1640 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1641 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1642 (*addr_mode
& LPFC_FCF_FPMA
))
1643 *addr_mode
= LPFC_FCF_FPMA
;
1645 /* If matching connect list has a vlan id, use it */
1646 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1647 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1649 * If no vlan id is specified in connect list, use the vlan id
1652 else if (fcf_vlan_id
)
1653 *vlan_id
= fcf_vlan_id
;
1655 *vlan_id
= LPFC_FCOE_NULL_VID
;
1664 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1665 * @phba: pointer to lpfc hba data structure.
1666 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1668 * This function check if there is any fcoe event pending while driver
1669 * scan FCF entries. If there is any pending event, it will restart the
1670 * FCF saning and return 1 else return 0.
1673 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1676 * If the Link is up and no FCoE events while in the
1677 * FCF discovery, no need to restart FCF discovery.
1679 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1680 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1683 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1684 "2768 Pending link or FCF event during current "
1685 "handling of the previous event: link_state:x%x, "
1686 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1687 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1688 phba
->fcoe_eventtag
);
1690 spin_lock_irq(&phba
->hbalock
);
1691 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1692 spin_unlock_irq(&phba
->hbalock
);
1694 if (phba
->link_state
>= LPFC_LINK_UP
) {
1695 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1696 "2780 Restart FCF table scan due to "
1697 "pending FCF event:evt_tag_at_scan:x%x, "
1698 "evt_tag_current:x%x\n",
1699 phba
->fcoe_eventtag_at_fcf_scan
,
1700 phba
->fcoe_eventtag
);
1701 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1704 * Do not continue FCF discovery and clear FCF_TS_INPROG
1707 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1708 "2833 Stop FCF discovery process due to link "
1709 "state change (x%x)\n", phba
->link_state
);
1710 spin_lock_irq(&phba
->hbalock
);
1711 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1712 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1713 spin_unlock_irq(&phba
->hbalock
);
1716 /* Unregister the currently registered FCF if required */
1718 spin_lock_irq(&phba
->hbalock
);
1719 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1720 spin_unlock_irq(&phba
->hbalock
);
1721 lpfc_sli4_unregister_fcf(phba
);
1727 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1728 * @phba: pointer to lpfc hba data structure.
1729 * @fcf_cnt: number of eligible fcf record seen so far.
1731 * This function makes an running random selection decision on FCF record to
1732 * use through a sequence of @fcf_cnt eligible FCF records with equal
1733 * probability. To perform integer manunipulation of random numbers with
1734 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1735 * from random32() are taken as the random random number generated.
1737 * Returns true when outcome is for the newly read FCF record should be
1738 * chosen; otherwise, return false when outcome is for keeping the previously
1739 * chosen FCF record.
1742 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1746 /* Get 16-bit uniform random number */
1747 rand_num
= (0xFFFF & random32());
1749 /* Decision with probability 1/fcf_cnt */
1750 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1757 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1758 * @phba: pointer to lpfc hba data structure.
1759 * @mboxq: pointer to mailbox object.
1760 * @next_fcf_index: pointer to holder of next fcf index.
1762 * This routine parses the non-embedded fcf mailbox command by performing the
1763 * necessarily error checking, non-embedded read FCF record mailbox command
1764 * SGE parsing, and endianness swapping.
1766 * Returns the pointer to the new FCF record in the non-embedded mailbox
1767 * command DMA memory if successfully, other NULL.
1769 static struct fcf_record
*
1770 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1771 uint16_t *next_fcf_index
)
1774 dma_addr_t phys_addr
;
1775 struct lpfc_mbx_sge sge
;
1776 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1777 uint32_t shdr_status
, shdr_add_status
;
1778 union lpfc_sli4_cfg_shdr
*shdr
;
1779 struct fcf_record
*new_fcf_record
;
1781 /* Get the first SGE entry from the non-embedded DMA memory. This
1782 * routine only uses a single SGE.
1784 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1785 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1786 if (unlikely(!mboxq
->sge_array
)) {
1787 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1788 "2524 Failed to get the non-embedded SGE "
1789 "virtual address\n");
1792 virt_addr
= mboxq
->sge_array
->addr
[0];
1794 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1795 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1796 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1797 if (shdr_status
|| shdr_add_status
) {
1798 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1799 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1800 "2726 READ_FCF_RECORD Indicates empty "
1803 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1804 "2521 READ_FCF_RECORD mailbox failed "
1805 "with status x%x add_status x%x, "
1806 "mbx\n", shdr_status
, shdr_add_status
);
1810 /* Interpreting the returned information of the FCF record */
1811 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1812 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1813 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1814 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1815 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1816 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1817 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1818 offsetof(struct fcf_record
, vlan_bitmap
));
1819 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1820 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1822 return new_fcf_record
;
1826 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1827 * @phba: pointer to lpfc hba data structure.
1828 * @fcf_record: pointer to the fcf record.
1829 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1830 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1832 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1836 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1837 struct fcf_record
*fcf_record
,
1839 uint16_t next_fcf_index
)
1841 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1842 "2764 READ_FCF_RECORD:\n"
1843 "\tFCF_Index : x%x\n"
1844 "\tFCF_Avail : x%x\n"
1845 "\tFCF_Valid : x%x\n"
1847 "\tFIP_Priority : x%x\n"
1848 "\tMAC_Provider : x%x\n"
1849 "\tLowest VLANID : x%x\n"
1850 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1851 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1852 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1853 "\tNext_FCF_Index: x%x\n",
1854 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1855 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1856 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1857 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
1858 fcf_record
->fip_priority
,
1859 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1861 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1862 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1863 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1864 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1865 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1866 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1867 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1868 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1869 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1870 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1871 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1872 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1873 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1874 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1875 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1876 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1877 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1878 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1879 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1880 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1881 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1882 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1887 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1888 * @phba: pointer to lpfc hba data structure.
1889 * @fcf_rec: pointer to an existing FCF record.
1890 * @new_fcf_record: pointer to a new FCF record.
1891 * @new_vlan_id: vlan id from the new FCF record.
1893 * This function performs matching test of a new FCF record against an existing
1894 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1895 * will not be used as part of the FCF record matching criteria.
1897 * Returns true if all the fields matching, otherwise returns false.
1900 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1901 struct lpfc_fcf_rec
*fcf_rec
,
1902 struct fcf_record
*new_fcf_record
,
1903 uint16_t new_vlan_id
)
1905 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1906 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1908 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1910 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1912 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1914 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1920 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1921 * @vport: Pointer to vport object.
1922 * @fcf_index: index to next fcf.
1924 * This function processing the roundrobin fcf failover to next fcf index.
1925 * When this function is invoked, there will be a current fcf registered
1927 * Return: 0 for continue retrying flogi on currently registered fcf;
1928 * 1 for stop flogi on currently registered fcf;
1930 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1932 struct lpfc_hba
*phba
= vport
->phba
;
1935 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1936 spin_lock_irq(&phba
->hbalock
);
1937 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1938 spin_unlock_irq(&phba
->hbalock
);
1939 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1940 "2872 Devloss tmo with no eligible "
1941 "FCF, unregister in-use FCF (x%x) "
1942 "and rescan FCF table\n",
1943 phba
->fcf
.current_rec
.fcf_indx
);
1944 lpfc_unregister_fcf_rescan(phba
);
1945 goto stop_flogi_current_fcf
;
1947 /* Mark the end to FLOGI roundrobin failover */
1948 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1949 /* Allow action to new fcf asynchronous event */
1950 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1951 spin_unlock_irq(&phba
->hbalock
);
1952 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1953 "2865 No FCF available, stop roundrobin FCF "
1954 "failover and change port state:x%x/x%x\n",
1955 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1956 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1957 goto stop_flogi_current_fcf
;
1959 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1960 "2794 Try FLOGI roundrobin FCF failover to "
1961 "(x%x)\n", fcf_index
);
1962 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1964 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1965 "2761 FLOGI roundrobin FCF failover "
1966 "failed (rc:x%x) to read FCF (x%x)\n",
1967 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1969 goto stop_flogi_current_fcf
;
1973 stop_flogi_current_fcf
:
1974 lpfc_can_disctmo(vport
);
1979 * lpfc_sli4_fcf_pri_list_del
1980 * @phba: pointer to lpfc hba data structure.
1981 * @fcf_index the index of the fcf record to delete
1982 * This routine checks the on list flag of the fcf_index to be deleted.
1983 * If it is one the list then it is removed from the list, and the flag
1984 * is cleared. This routine grab the hbalock before removing the fcf
1985 * record from the list.
1987 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
1990 struct lpfc_fcf_pri
*new_fcf_pri
;
1992 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1993 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1994 "3058 deleting idx x%x pri x%x flg x%x\n",
1995 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
1996 new_fcf_pri
->fcf_rec
.flag
);
1997 spin_lock_irq(&phba
->hbalock
);
1998 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
1999 if (phba
->fcf
.current_rec
.priority
==
2000 new_fcf_pri
->fcf_rec
.priority
)
2001 phba
->fcf
.eligible_fcf_cnt
--;
2002 list_del_init(&new_fcf_pri
->list
);
2003 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2005 spin_unlock_irq(&phba
->hbalock
);
2009 * lpfc_sli4_set_fcf_flogi_fail
2010 * @phba: pointer to lpfc hba data structure.
2011 * @fcf_index the index of the fcf record to update
2012 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2013 * flag so the the round robin slection for the particular priority level
2014 * will try a different fcf record that does not have this bit set.
2015 * If the fcf record is re-read for any reason this flag is cleared brfore
2016 * adding it to the priority list.
2019 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2021 struct lpfc_fcf_pri
*new_fcf_pri
;
2022 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2023 spin_lock_irq(&phba
->hbalock
);
2024 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2025 spin_unlock_irq(&phba
->hbalock
);
2029 * lpfc_sli4_fcf_pri_list_add
2030 * @phba: pointer to lpfc hba data structure.
2031 * @fcf_index the index of the fcf record to add
2032 * This routine checks the priority of the fcf_index to be added.
2033 * If it is a lower priority than the current head of the fcf_pri list
2034 * then it is added to the list in the right order.
2035 * If it is the same priority as the current head of the list then it
2036 * is added to the head of the list and its bit in the rr_bmask is set.
2037 * If the fcf_index to be added is of a higher priority than the current
2038 * head of the list then the rr_bmask is cleared, its bit is set in the
2039 * rr_bmask and it is added to the head of the list.
2041 * 0=success 1=failure
2043 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
, uint16_t fcf_index
,
2044 struct fcf_record
*new_fcf_record
)
2046 uint16_t current_fcf_pri
;
2047 uint16_t last_index
;
2048 struct lpfc_fcf_pri
*fcf_pri
;
2049 struct lpfc_fcf_pri
*next_fcf_pri
;
2050 struct lpfc_fcf_pri
*new_fcf_pri
;
2053 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2054 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2055 "3059 adding idx x%x pri x%x flg x%x\n",
2056 fcf_index
, new_fcf_record
->fip_priority
,
2057 new_fcf_pri
->fcf_rec
.flag
);
2058 spin_lock_irq(&phba
->hbalock
);
2059 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2060 list_del_init(&new_fcf_pri
->list
);
2061 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2062 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2063 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2064 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2065 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2066 new_fcf_pri
->fcf_rec
.fcf_index
);
2070 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2071 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2072 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2073 ret
= 0; /* Empty rr list */
2076 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2077 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2078 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2079 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2080 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2081 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2082 /* fcfs_at_this_priority_level = 1; */
2083 phba
->fcf
.eligible_fcf_cnt
= 1;
2085 /* fcfs_at_this_priority_level++; */
2086 phba
->fcf
.eligible_fcf_cnt
++;
2087 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2088 new_fcf_pri
->fcf_rec
.fcf_index
);
2092 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2093 &phba
->fcf
.fcf_pri_list
, list
) {
2094 if (new_fcf_pri
->fcf_rec
.priority
<=
2095 fcf_pri
->fcf_rec
.priority
) {
2096 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2097 list_add(&new_fcf_pri
->list
,
2098 &phba
->fcf
.fcf_pri_list
);
2100 list_add(&new_fcf_pri
->list
,
2101 &((struct lpfc_fcf_pri
*)
2102 fcf_pri
->list
.prev
)->list
);
2105 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2106 || new_fcf_pri
->fcf_rec
.priority
<
2107 next_fcf_pri
->fcf_rec
.priority
) {
2108 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2112 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2118 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2119 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2120 spin_unlock_irq(&phba
->hbalock
);
2125 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2126 * @phba: pointer to lpfc hba data structure.
2127 * @mboxq: pointer to mailbox object.
2129 * This function iterates through all the fcf records available in
2130 * HBA and chooses the optimal FCF record for discovery. After finding
2131 * the FCF for discovery it registers the FCF record and kicks start
2133 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2134 * use an FCF record which matches fabric name and mac address of the
2135 * currently used FCF record.
2136 * If the driver supports only one FCF, it will try to use the FCF record
2137 * used by BOOT_BIOS.
2140 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2142 struct fcf_record
*new_fcf_record
;
2143 uint32_t boot_flag
, addr_mode
;
2144 uint16_t fcf_index
, next_fcf_index
;
2145 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2148 bool select_new_fcf
;
2151 /* If there is pending FCoE event restart FCF table scan */
2152 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2153 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2157 /* Parse the FCF record from the non-embedded mailbox command */
2158 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2160 if (!new_fcf_record
) {
2161 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2162 "2765 Mailbox command READ_FCF_RECORD "
2163 "failed to retrieve a FCF record.\n");
2164 /* Let next new FCF event trigger fast failover */
2165 spin_lock_irq(&phba
->hbalock
);
2166 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2167 spin_unlock_irq(&phba
->hbalock
);
2168 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2172 /* Check the FCF record against the connection list */
2173 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2174 &addr_mode
, &vlan_id
);
2176 /* Log the FCF record information if turned on */
2177 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2181 * If the fcf record does not match with connect list entries
2182 * read the next entry; otherwise, this is an eligible FCF
2183 * record for roundrobin FCF failover.
2186 lpfc_sli4_fcf_pri_list_del(phba
,
2187 bf_get(lpfc_fcf_record_fcf_index
,
2189 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2190 "2781 FCF (x%x) failed connection "
2191 "list check: (x%x/x%x/%x)\n",
2192 bf_get(lpfc_fcf_record_fcf_index
,
2194 bf_get(lpfc_fcf_record_fcf_avail
,
2196 bf_get(lpfc_fcf_record_fcf_valid
,
2198 bf_get(lpfc_fcf_record_fcf_sol
,
2200 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2201 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2202 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2203 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2204 phba
->fcf
.current_rec
.fcf_indx
) {
2205 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2206 "2862 FCF (x%x) matches property "
2207 "of in-use FCF (x%x)\n",
2208 bf_get(lpfc_fcf_record_fcf_index
,
2210 phba
->fcf
.current_rec
.fcf_indx
);
2214 * In case the current in-use FCF record becomes
2215 * invalid/unavailable during FCF discovery that
2216 * was not triggered by fast FCF failover process,
2217 * treat it as fast FCF failover.
2219 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2220 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2221 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2222 "2835 Invalid in-use FCF "
2223 "(x%x), enter FCF failover "
2225 phba
->fcf
.current_rec
.fcf_indx
);
2226 spin_lock_irq(&phba
->hbalock
);
2227 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2228 spin_unlock_irq(&phba
->hbalock
);
2229 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2230 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2231 LPFC_FCOE_FCF_GET_FIRST
);
2237 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2238 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2245 * If this is not the first FCF discovery of the HBA, use last
2246 * FCF record for the discovery. The condition that a rescan
2247 * matches the in-use FCF record: fabric name, switch name, mac
2248 * address, and vlan_id.
2250 spin_lock_irq(&phba
->hbalock
);
2251 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2252 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2253 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2254 new_fcf_record
, vlan_id
)) {
2255 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2256 phba
->fcf
.current_rec
.fcf_indx
) {
2257 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2258 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2259 /* Stop FCF redisc wait timer */
2260 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2262 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2263 /* Fast failover, mark completed */
2264 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2265 spin_unlock_irq(&phba
->hbalock
);
2266 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2267 "2836 New FCF matches in-use "
2269 phba
->fcf
.current_rec
.fcf_indx
);
2272 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2273 "2863 New FCF (x%x) matches "
2274 "property of in-use FCF (x%x)\n",
2275 bf_get(lpfc_fcf_record_fcf_index
,
2277 phba
->fcf
.current_rec
.fcf_indx
);
2280 * Read next FCF record from HBA searching for the matching
2281 * with in-use record only if not during the fast failover
2282 * period. In case of fast failover period, it shall try to
2283 * determine whether the FCF record just read should be the
2286 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2287 spin_unlock_irq(&phba
->hbalock
);
2292 * Update on failover FCF record only if it's in FCF fast-failover
2293 * period; otherwise, update on current FCF record.
2295 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2296 fcf_rec
= &phba
->fcf
.failover_rec
;
2298 fcf_rec
= &phba
->fcf
.current_rec
;
2300 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2302 * If the driver FCF record does not have boot flag
2303 * set and new hba fcf record has boot flag set, use
2304 * the new hba fcf record.
2306 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2307 /* Choose this FCF record */
2308 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2309 "2837 Update current FCF record "
2310 "(x%x) with new FCF record (x%x)\n",
2312 bf_get(lpfc_fcf_record_fcf_index
,
2314 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2315 addr_mode
, vlan_id
, BOOT_ENABLE
);
2316 spin_unlock_irq(&phba
->hbalock
);
2320 * If the driver FCF record has boot flag set and the
2321 * new hba FCF record does not have boot flag, read
2322 * the next FCF record.
2324 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2325 spin_unlock_irq(&phba
->hbalock
);
2329 * If the new hba FCF record has lower priority value
2330 * than the driver FCF record, use the new record.
2332 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2333 /* Choose the new FCF record with lower priority */
2334 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2335 "2838 Update current FCF record "
2336 "(x%x) with new FCF record (x%x)\n",
2338 bf_get(lpfc_fcf_record_fcf_index
,
2340 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2341 addr_mode
, vlan_id
, 0);
2342 /* Reset running random FCF selection count */
2343 phba
->fcf
.eligible_fcf_cnt
= 1;
2344 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2345 /* Update running random FCF selection count */
2346 phba
->fcf
.eligible_fcf_cnt
++;
2347 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2348 phba
->fcf
.eligible_fcf_cnt
);
2349 if (select_new_fcf
) {
2350 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2351 "2839 Update current FCF record "
2352 "(x%x) with new FCF record (x%x)\n",
2354 bf_get(lpfc_fcf_record_fcf_index
,
2356 /* Choose the new FCF by random selection */
2357 __lpfc_update_fcf_record(phba
, fcf_rec
,
2359 addr_mode
, vlan_id
, 0);
2362 spin_unlock_irq(&phba
->hbalock
);
2366 * This is the first suitable FCF record, choose this record for
2367 * initial best-fit FCF.
2370 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2371 "2840 Update initial FCF candidate "
2373 bf_get(lpfc_fcf_record_fcf_index
,
2375 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2376 addr_mode
, vlan_id
, (boot_flag
?
2378 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2379 /* Setup initial running random FCF selection count */
2380 phba
->fcf
.eligible_fcf_cnt
= 1;
2381 /* Seeding the random number generator for random selection */
2382 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
2385 spin_unlock_irq(&phba
->hbalock
);
2389 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2390 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2391 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2393 * Case of FCF fast failover scan
2397 * It has not found any suitable FCF record, cancel
2398 * FCF scan inprogress, and do nothing
2400 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2401 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2402 "2782 No suitable FCF found: "
2404 phba
->fcoe_eventtag_at_fcf_scan
,
2405 bf_get(lpfc_fcf_record_fcf_index
,
2407 spin_lock_irq(&phba
->hbalock
);
2408 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2409 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2410 spin_unlock_irq(&phba
->hbalock
);
2411 /* Unregister in-use FCF and rescan */
2412 lpfc_printf_log(phba
, KERN_INFO
,
2414 "2864 On devloss tmo "
2415 "unreg in-use FCF and "
2416 "rescan FCF table\n");
2417 lpfc_unregister_fcf_rescan(phba
);
2421 * Let next new FCF event trigger fast failover
2423 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2424 spin_unlock_irq(&phba
->hbalock
);
2428 * It has found a suitable FCF record that is not
2429 * the same as in-use FCF record, unregister the
2430 * in-use FCF record, replace the in-use FCF record
2431 * with the new FCF record, mark FCF fast failover
2432 * completed, and then start register the new FCF
2436 /* Unregister the current in-use FCF record */
2437 lpfc_unregister_fcf(phba
);
2439 /* Replace in-use record with the new record */
2440 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2441 "2842 Replace in-use FCF (x%x) "
2442 "with failover FCF (x%x)\n",
2443 phba
->fcf
.current_rec
.fcf_indx
,
2444 phba
->fcf
.failover_rec
.fcf_indx
);
2445 memcpy(&phba
->fcf
.current_rec
,
2446 &phba
->fcf
.failover_rec
,
2447 sizeof(struct lpfc_fcf_rec
));
2449 * Mark the fast FCF failover rediscovery completed
2450 * and the start of the first round of the roundrobin
2453 spin_lock_irq(&phba
->hbalock
);
2454 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2455 spin_unlock_irq(&phba
->hbalock
);
2456 /* Register to the new FCF record */
2457 lpfc_register_fcf(phba
);
2460 * In case of transaction period to fast FCF failover,
2461 * do nothing when search to the end of the FCF table.
2463 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2464 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2467 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2468 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2470 * In case the current in-use FCF record no
2471 * longer existed during FCF discovery that
2472 * was not triggered by fast FCF failover
2473 * process, treat it as fast FCF failover.
2475 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2476 "2841 In-use FCF record (x%x) "
2477 "not reported, entering fast "
2478 "FCF failover mode scanning.\n",
2479 phba
->fcf
.current_rec
.fcf_indx
);
2480 spin_lock_irq(&phba
->hbalock
);
2481 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2482 spin_unlock_irq(&phba
->hbalock
);
2483 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2484 LPFC_FCOE_FCF_GET_FIRST
);
2487 /* Register to the new FCF record */
2488 lpfc_register_fcf(phba
);
2491 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2495 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2496 lpfc_register_fcf(phba
);
2502 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2503 * @phba: pointer to lpfc hba data structure.
2504 * @mboxq: pointer to mailbox object.
2506 * This is the callback function for FLOGI failure roundrobin FCF failover
2507 * read FCF record mailbox command from the eligible FCF record bmask for
2508 * performing the failover. If the FCF read back is not valid/available, it
2509 * fails through to retrying FLOGI to the currently registered FCF again.
2510 * Otherwise, if the FCF read back is valid and available, it will set the
2511 * newly read FCF record to the failover FCF record, unregister currently
2512 * registered FCF record, copy the failover FCF record to the current
2513 * FCF record, and then register the current FCF record before proceeding
2514 * to trying FLOGI on the new failover FCF.
2517 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2519 struct fcf_record
*new_fcf_record
;
2520 uint32_t boot_flag
, addr_mode
;
2521 uint16_t next_fcf_index
, fcf_index
;
2522 uint16_t current_fcf_index
;
2526 /* If link state is not up, stop the roundrobin failover process */
2527 if (phba
->link_state
< LPFC_LINK_UP
) {
2528 spin_lock_irq(&phba
->hbalock
);
2529 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2530 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2531 spin_unlock_irq(&phba
->hbalock
);
2535 /* Parse the FCF record from the non-embedded mailbox command */
2536 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2538 if (!new_fcf_record
) {
2539 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2540 "2766 Mailbox command READ_FCF_RECORD "
2541 "failed to retrieve a FCF record.\n");
2545 /* Get the needed parameters from FCF record */
2546 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2547 &addr_mode
, &vlan_id
);
2549 /* Log the FCF record information if turned on */
2550 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2553 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2555 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2556 "2848 Remove ineligible FCF (x%x) from "
2557 "from roundrobin bmask\n", fcf_index
);
2558 /* Clear roundrobin bmask bit for ineligible FCF */
2559 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2560 /* Perform next round of roundrobin FCF failover */
2561 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2562 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2568 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2569 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2570 "2760 Perform FLOGI roundrobin FCF failover: "
2571 "FCF (x%x) back to FCF (x%x)\n",
2572 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2573 /* Wait 500 ms before retrying FLOGI to current FCF */
2575 lpfc_issue_init_vfi(phba
->pport
);
2579 /* Upload new FCF record to the failover FCF record */
2580 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2581 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2582 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2583 spin_lock_irq(&phba
->hbalock
);
2584 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2585 new_fcf_record
, addr_mode
, vlan_id
,
2586 (boot_flag
? BOOT_ENABLE
: 0));
2587 spin_unlock_irq(&phba
->hbalock
);
2589 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2591 /* Unregister the current in-use FCF record */
2592 lpfc_unregister_fcf(phba
);
2594 /* Replace in-use record with the new record */
2595 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2596 sizeof(struct lpfc_fcf_rec
));
2598 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2599 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2600 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2603 lpfc_register_fcf(phba
);
2605 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2609 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2610 * @phba: pointer to lpfc hba data structure.
2611 * @mboxq: pointer to mailbox object.
2613 * This is the callback function of read FCF record mailbox command for
2614 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2615 * failover when a new FCF event happened. If the FCF read back is
2616 * valid/available and it passes the connection list check, it updates
2617 * the bmask for the eligible FCF record for roundrobin failover.
2620 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2622 struct fcf_record
*new_fcf_record
;
2623 uint32_t boot_flag
, addr_mode
;
2624 uint16_t fcf_index
, next_fcf_index
;
2628 /* If link state is not up, no need to proceed */
2629 if (phba
->link_state
< LPFC_LINK_UP
)
2632 /* If FCF discovery period is over, no need to proceed */
2633 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2636 /* Parse the FCF record from the non-embedded mailbox command */
2637 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2639 if (!new_fcf_record
) {
2640 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2641 "2767 Mailbox command READ_FCF_RECORD "
2642 "failed to retrieve a FCF record.\n");
2646 /* Check the connection list for eligibility */
2647 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2648 &addr_mode
, &vlan_id
);
2650 /* Log the FCF record information if turned on */
2651 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2657 /* Update the eligible FCF record index bmask */
2658 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2660 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2663 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2667 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2668 * @phba: pointer to lpfc hba data structure.
2669 * @mboxq: pointer to mailbox data structure.
2671 * This function handles completion of init vfi mailbox command.
2674 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2676 struct lpfc_vport
*vport
= mboxq
->vport
;
2679 * VFI not supported on interface type 0, just do the flogi
2680 * Also continue if the VFI is in use - just use the same one.
2682 if (mboxq
->u
.mb
.mbxStatus
&&
2683 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2684 LPFC_SLI_INTF_IF_TYPE_0
) &&
2685 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2686 lpfc_printf_vlog(vport
, KERN_ERR
,
2688 "2891 Init VFI mailbox failed 0x%x\n",
2689 mboxq
->u
.mb
.mbxStatus
);
2690 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2691 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2695 lpfc_initial_flogi(vport
);
2696 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2701 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2702 * @vport: pointer to lpfc_vport data structure.
2704 * This function issue a init_vfi mailbox command to initialize the VFI and
2705 * VPI for the physical port.
2708 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2710 LPFC_MBOXQ_t
*mboxq
;
2712 struct lpfc_hba
*phba
= vport
->phba
;
2714 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2716 lpfc_printf_vlog(vport
, KERN_ERR
,
2717 LOG_MBOX
, "2892 Failed to allocate "
2718 "init_vfi mailbox\n");
2721 lpfc_init_vfi(mboxq
, vport
);
2722 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2723 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2724 if (rc
== MBX_NOT_FINISHED
) {
2725 lpfc_printf_vlog(vport
, KERN_ERR
,
2726 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2727 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2732 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2733 * @phba: pointer to lpfc hba data structure.
2734 * @mboxq: pointer to mailbox data structure.
2736 * This function handles completion of init vpi mailbox command.
2739 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2741 struct lpfc_vport
*vport
= mboxq
->vport
;
2742 struct lpfc_nodelist
*ndlp
;
2743 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2745 if (mboxq
->u
.mb
.mbxStatus
) {
2746 lpfc_printf_vlog(vport
, KERN_ERR
,
2748 "2609 Init VPI mailbox failed 0x%x\n",
2749 mboxq
->u
.mb
.mbxStatus
);
2750 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2751 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2754 spin_lock_irq(shost
->host_lock
);
2755 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2756 spin_unlock_irq(shost
->host_lock
);
2758 /* If this port is physical port or FDISC is done, do reg_vpi */
2759 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2760 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2762 lpfc_printf_vlog(vport
, KERN_ERR
,
2764 "2731 Cannot find fabric "
2765 "controller node\n");
2767 lpfc_register_new_vport(phba
, vport
, ndlp
);
2768 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2772 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2773 lpfc_initial_fdisc(vport
);
2775 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2776 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2777 "2606 No NPIV Fabric support\n");
2779 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2784 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2785 * @vport: pointer to lpfc_vport data structure.
2787 * This function issue a init_vpi mailbox command to initialize
2788 * VPI for the vport.
2791 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2793 LPFC_MBOXQ_t
*mboxq
;
2796 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2798 lpfc_printf_vlog(vport
, KERN_ERR
,
2799 LOG_MBOX
, "2607 Failed to allocate "
2800 "init_vpi mailbox\n");
2803 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2804 mboxq
->vport
= vport
;
2805 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2806 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2807 if (rc
== MBX_NOT_FINISHED
) {
2808 lpfc_printf_vlog(vport
, KERN_ERR
,
2809 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2810 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2815 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2816 * @phba: pointer to lpfc hba data structure.
2818 * This function loops through the list of vports on the @phba and issues an
2819 * FDISC if possible.
2822 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2824 struct lpfc_vport
**vports
;
2827 vports
= lpfc_create_vport_work_array(phba
);
2828 if (vports
!= NULL
) {
2829 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2830 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2832 /* There are no vpi for this vport */
2833 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2834 lpfc_vport_set_state(vports
[i
],
2838 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2839 lpfc_vport_set_state(vports
[i
],
2843 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2844 lpfc_issue_init_vpi(vports
[i
]);
2847 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2848 lpfc_initial_fdisc(vports
[i
]);
2850 lpfc_vport_set_state(vports
[i
],
2851 FC_VPORT_NO_FABRIC_SUPP
);
2852 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2855 "Fabric support\n");
2859 lpfc_destroy_vport_work_array(phba
, vports
);
2863 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2865 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2866 struct lpfc_vport
*vport
= mboxq
->vport
;
2867 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2870 * VFI not supported for interface type 0, so ignore any mailbox
2871 * error (except VFI in use) and continue with the discovery.
2873 if (mboxq
->u
.mb
.mbxStatus
&&
2874 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2875 LPFC_SLI_INTF_IF_TYPE_0
) &&
2876 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2877 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2878 "2018 REG_VFI mbxStatus error x%x "
2880 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2881 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2882 /* FLOGI failed, use loop map to make discovery list */
2883 lpfc_disc_list_loopmap(vport
);
2884 /* Start discovery */
2885 lpfc_disc_start(vport
);
2888 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2891 /* The VPI is implicitly registered when the VFI is registered */
2892 spin_lock_irq(shost
->host_lock
);
2893 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2894 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2895 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2896 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2897 spin_unlock_irq(shost
->host_lock
);
2899 /* In case SLI4 FC loopback test, we are ready */
2900 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2901 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2902 phba
->link_state
= LPFC_HBA_READY
;
2906 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2908 * For private loop or for NPort pt2pt,
2909 * just start discovery and we are done.
2911 if ((vport
->fc_flag
& FC_PT2PT
) ||
2912 ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
2913 !(vport
->fc_flag
& FC_PUBLIC_LOOP
))) {
2915 /* Use loop map to make discovery list */
2916 lpfc_disc_list_loopmap(vport
);
2917 /* Start discovery */
2918 lpfc_disc_start(vport
);
2920 lpfc_start_fdiscs(phba
);
2921 lpfc_do_scr_ns_plogi(phba
, vport
);
2926 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2927 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2933 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2935 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2936 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2937 struct lpfc_vport
*vport
= pmb
->vport
;
2940 /* Check for error */
2941 if (mb
->mbxStatus
) {
2942 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2943 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2944 "0319 READ_SPARAM mbxStatus error x%x "
2946 mb
->mbxStatus
, vport
->port_state
);
2947 lpfc_linkdown(phba
);
2951 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2952 sizeof (struct serv_parm
));
2953 lpfc_update_vport_wwn(vport
);
2954 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2955 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2956 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2959 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2961 mempool_free(pmb
, phba
->mbox_mem_pool
);
2965 pmb
->context1
= NULL
;
2966 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2968 lpfc_issue_clear_la(phba
, vport
);
2969 mempool_free(pmb
, phba
->mbox_mem_pool
);
2974 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
2976 struct lpfc_vport
*vport
= phba
->pport
;
2977 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2978 struct Scsi_Host
*shost
;
2980 struct lpfc_dmabuf
*mp
;
2982 struct fcf_record
*fcf_record
;
2984 spin_lock_irq(&phba
->hbalock
);
2985 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
2986 case LPFC_LINK_SPEED_1GHZ
:
2987 case LPFC_LINK_SPEED_2GHZ
:
2988 case LPFC_LINK_SPEED_4GHZ
:
2989 case LPFC_LINK_SPEED_8GHZ
:
2990 case LPFC_LINK_SPEED_10GHZ
:
2991 case LPFC_LINK_SPEED_16GHZ
:
2992 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
2995 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
2999 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3000 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
3002 shost
= lpfc_shost_from_vport(vport
);
3003 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3004 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3006 /* if npiv is enabled and this adapter supports npiv log
3007 * a message that npiv is not supported in this topology
3009 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3010 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3011 "1309 Link Up Event npiv not supported in loop "
3013 /* Get Loop Map information */
3014 if (bf_get(lpfc_mbx_read_top_il
, la
)) {
3015 spin_lock(shost
->host_lock
);
3016 vport
->fc_flag
|= FC_LBIT
;
3017 spin_unlock(shost
->host_lock
);
3020 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3021 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3024 phba
->alpa_map
[0] = 0;
3026 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3037 numalpa
= phba
->alpa_map
[0];
3039 while (j
< numalpa
) {
3040 memset(un
.pamap
, 0, 16);
3041 for (k
= 1; j
< numalpa
; k
++) {
3043 phba
->alpa_map
[j
+ 1];
3048 /* Link Up Event ALPA map */
3049 lpfc_printf_log(phba
,
3052 "1304 Link Up Event "
3053 "ALPA map Data: x%x "
3055 un
.pa
.wd1
, un
.pa
.wd2
,
3056 un
.pa
.wd3
, un
.pa
.wd4
);
3061 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3062 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3063 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3064 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3066 vport
->fc_myDID
= phba
->fc_pref_DID
;
3067 spin_lock(shost
->host_lock
);
3068 vport
->fc_flag
|= FC_LBIT
;
3069 spin_unlock(shost
->host_lock
);
3071 spin_unlock_irq(&phba
->hbalock
);
3074 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3078 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3080 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3083 sparam_mbox
->vport
= vport
;
3084 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3085 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3086 if (rc
== MBX_NOT_FINISHED
) {
3087 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
3088 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3090 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3094 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3095 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3098 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3099 lpfc_config_link(phba
, cfglink_mbox
);
3100 cfglink_mbox
->vport
= vport
;
3101 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3102 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3103 if (rc
== MBX_NOT_FINISHED
) {
3104 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3108 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3110 * Add the driver's default FCF record at FCF index 0 now. This
3111 * is phase 1 implementation that support FCF index 0 and driver
3114 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3115 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3117 if (unlikely(!fcf_record
)) {
3118 lpfc_printf_log(phba
, KERN_ERR
,
3120 "2554 Could not allocate memory for "
3126 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3127 LPFC_FCOE_FCF_DEF_INDEX
);
3128 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3130 lpfc_printf_log(phba
, KERN_ERR
,
3132 "2013 Could not manually add FCF "
3133 "record 0, status %d\n", rc
);
3141 * The driver is expected to do FIP/FCF. Call the port
3142 * and get the FCF Table.
3144 spin_lock_irq(&phba
->hbalock
);
3145 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3146 spin_unlock_irq(&phba
->hbalock
);
3149 /* This is the initial FCF discovery scan */
3150 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3151 spin_unlock_irq(&phba
->hbalock
);
3152 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3153 "2778 Start FCF table scan at linkup\n");
3154 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3155 LPFC_FCOE_FCF_GET_FIRST
);
3157 spin_lock_irq(&phba
->hbalock
);
3158 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3159 spin_unlock_irq(&phba
->hbalock
);
3162 /* Reset FCF roundrobin bmask for new discovery */
3163 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3168 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3169 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3170 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3171 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3172 lpfc_issue_clear_la(phba
, vport
);
3177 lpfc_enable_la(struct lpfc_hba
*phba
)
3180 struct lpfc_sli
*psli
= &phba
->sli
;
3181 spin_lock_irq(&phba
->hbalock
);
3182 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3183 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3184 control
= readl(phba
->HCregaddr
);
3185 control
|= HC_LAINT_ENA
;
3186 writel(control
, phba
->HCregaddr
);
3187 readl(phba
->HCregaddr
); /* flush */
3189 spin_unlock_irq(&phba
->hbalock
);
3193 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3195 lpfc_linkdown(phba
);
3196 lpfc_enable_la(phba
);
3197 lpfc_unregister_unused_fcf(phba
);
3198 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3203 * This routine handles processing a READ_TOPOLOGY mailbox
3204 * command upon completion. It is setup in the LPFC_MBOXQ
3205 * as the completion routine when the command is
3206 * handed off to the SLI layer.
3209 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3211 struct lpfc_vport
*vport
= pmb
->vport
;
3212 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3213 struct lpfc_mbx_read_top
*la
;
3214 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3215 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3217 /* Unblock ELS traffic */
3218 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3219 /* Check for error */
3220 if (mb
->mbxStatus
) {
3221 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3222 "1307 READ_LA mbox error x%x state x%x\n",
3223 mb
->mbxStatus
, vport
->port_state
);
3224 lpfc_mbx_issue_link_down(phba
);
3225 phba
->link_state
= LPFC_HBA_ERROR
;
3226 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3229 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3231 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3233 spin_lock_irq(shost
->host_lock
);
3234 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3235 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3237 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3238 spin_unlock_irq(shost
->host_lock
);
3240 if ((phba
->fc_eventTag
< la
->eventTag
) ||
3241 (phba
->fc_eventTag
== la
->eventTag
)) {
3242 phba
->fc_stat
.LinkMultiEvent
++;
3243 if (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)
3244 if (phba
->fc_eventTag
!= 0)
3245 lpfc_linkdown(phba
);
3248 phba
->fc_eventTag
= la
->eventTag
;
3249 spin_lock_irq(&phba
->hbalock
);
3250 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3251 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3253 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3254 spin_unlock_irq(&phba
->hbalock
);
3256 phba
->link_events
++;
3257 if ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
) &&
3258 (!bf_get(lpfc_mbx_read_top_mm
, la
))) {
3259 phba
->fc_stat
.LinkUp
++;
3260 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3261 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3262 "1306 Link Up Event in loop back mode "
3263 "x%x received Data: x%x x%x x%x x%x\n",
3264 la
->eventTag
, phba
->fc_eventTag
,
3265 bf_get(lpfc_mbx_read_top_alpa_granted
,
3267 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3270 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3271 "1303 Link Up Event x%x received "
3272 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3273 la
->eventTag
, phba
->fc_eventTag
,
3274 bf_get(lpfc_mbx_read_top_alpa_granted
,
3276 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3278 bf_get(lpfc_mbx_read_top_mm
, la
),
3279 bf_get(lpfc_mbx_read_top_fa
, la
),
3280 phba
->wait_4_mlo_maint_flg
);
3282 lpfc_mbx_process_link_up(phba
, la
);
3283 } else if (bf_get(lpfc_mbx_read_top_att_type
, la
) ==
3284 LPFC_ATT_LINK_DOWN
) {
3285 phba
->fc_stat
.LinkDown
++;
3286 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3287 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3288 "1308 Link Down Event in loop back mode "
3290 "Data: x%x x%x x%x\n",
3291 la
->eventTag
, phba
->fc_eventTag
,
3292 phba
->pport
->port_state
, vport
->fc_flag
);
3294 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3295 "1305 Link Down Event x%x received "
3296 "Data: x%x x%x x%x x%x x%x\n",
3297 la
->eventTag
, phba
->fc_eventTag
,
3298 phba
->pport
->port_state
, vport
->fc_flag
,
3299 bf_get(lpfc_mbx_read_top_mm
, la
),
3300 bf_get(lpfc_mbx_read_top_fa
, la
));
3301 lpfc_mbx_issue_link_down(phba
);
3303 if ((bf_get(lpfc_mbx_read_top_mm
, la
)) &&
3304 (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)) {
3305 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3306 phba
->fc_stat
.LinkDown
++;
3307 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3308 "1312 Link Down Event x%x received "
3309 "Data: x%x x%x x%x\n",
3310 la
->eventTag
, phba
->fc_eventTag
,
3311 phba
->pport
->port_state
, vport
->fc_flag
);
3312 lpfc_mbx_issue_link_down(phba
);
3314 lpfc_enable_la(phba
);
3316 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3317 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3318 "Data: x%x x%x x%x\n",
3319 la
->eventTag
, phba
->fc_eventTag
,
3320 phba
->pport
->port_state
, vport
->fc_flag
);
3322 * The cmnd that triggered this will be waiting for this
3325 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3326 if (phba
->wait_4_mlo_maint_flg
) {
3327 phba
->wait_4_mlo_maint_flg
= 0;
3328 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3332 if (bf_get(lpfc_mbx_read_top_fa
, la
)) {
3333 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3334 lpfc_issue_clear_la(phba
, vport
);
3335 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3337 bf_get(lpfc_mbx_read_top_fa
, la
));
3340 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3341 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3343 mempool_free(pmb
, phba
->mbox_mem_pool
);
3348 * This routine handles processing a REG_LOGIN mailbox
3349 * command upon completion. It is setup in the LPFC_MBOXQ
3350 * as the completion routine when the command is
3351 * handed off to the SLI layer.
3354 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3356 struct lpfc_vport
*vport
= pmb
->vport
;
3357 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3358 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3359 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3361 pmb
->context1
= NULL
;
3362 pmb
->context2
= NULL
;
3364 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3365 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3367 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3368 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3369 /* We rcvd a rscn after issuing this
3370 * mbox reg login, we may have cycled
3371 * back through the state and be
3372 * back at reg login state so this
3373 * mbox needs to be ignored becase
3374 * there is another reg login in
3377 spin_lock_irq(shost
->host_lock
);
3378 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3379 spin_unlock_irq(shost
->host_lock
);
3381 /* Good status, call state machine */
3382 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
3383 NLP_EVT_CMPL_REG_LOGIN
);
3385 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3387 mempool_free(pmb
, phba
->mbox_mem_pool
);
3388 /* decrement the node reference count held for this callback
3397 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3399 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3400 struct lpfc_vport
*vport
= pmb
->vport
;
3401 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3403 switch (mb
->mbxStatus
) {
3406 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3407 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3410 /* If VPI is busy, reset the HBA */
3412 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3413 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3414 vport
->vpi
, mb
->mbxStatus
);
3415 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3416 lpfc_workq_post_event(phba
, NULL
, NULL
,
3417 LPFC_EVT_RESET_HBA
);
3419 spin_lock_irq(shost
->host_lock
);
3420 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3421 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3422 spin_unlock_irq(shost
->host_lock
);
3423 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3424 mempool_free(pmb
, phba
->mbox_mem_pool
);
3425 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3427 * This shost reference might have been taken at the beginning of
3428 * lpfc_vport_delete()
3430 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3431 scsi_host_put(shost
);
3435 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3437 struct lpfc_hba
*phba
= vport
->phba
;
3441 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3445 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3446 mbox
->vport
= vport
;
3447 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3448 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3449 if (rc
== MBX_NOT_FINISHED
) {
3450 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3451 "1800 Could not issue unreg_vpi\n");
3452 mempool_free(mbox
, phba
->mbox_mem_pool
);
3453 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3460 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3462 struct lpfc_vport
*vport
= pmb
->vport
;
3463 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3464 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3466 switch (mb
->mbxStatus
) {
3470 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3471 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3473 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3474 spin_lock_irq(shost
->host_lock
);
3475 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3476 spin_unlock_irq(shost
->host_lock
);
3477 vport
->fc_myDID
= 0;
3481 spin_lock_irq(shost
->host_lock
);
3482 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3483 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3484 spin_unlock_irq(shost
->host_lock
);
3485 vport
->num_disc_nodes
= 0;
3486 /* go thru NPR list and issue ELS PLOGIs */
3487 if (vport
->fc_npr_cnt
)
3488 lpfc_els_disc_plogi(vport
);
3490 if (!vport
->num_disc_nodes
) {
3491 spin_lock_irq(shost
->host_lock
);
3492 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3493 spin_unlock_irq(shost
->host_lock
);
3494 lpfc_can_disctmo(vport
);
3496 vport
->port_state
= LPFC_VPORT_READY
;
3499 mempool_free(pmb
, phba
->mbox_mem_pool
);
3504 * lpfc_create_static_vport - Read HBA config region to create static vports.
3505 * @phba: pointer to lpfc hba data structure.
3507 * This routine issue a DUMP mailbox command for config region 22 to get
3508 * the list of static vports to be created. The function create vports
3509 * based on the information returned from the HBA.
3512 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3514 LPFC_MBOXQ_t
*pmb
= NULL
;
3516 struct static_vport_info
*vport_info
;
3517 int mbx_wait_rc
= 0, i
;
3518 struct fc_vport_identifiers vport_id
;
3519 struct fc_vport
*new_fc_vport
;
3520 struct Scsi_Host
*shost
;
3521 struct lpfc_vport
*vport
;
3522 uint16_t offset
= 0;
3523 uint8_t *vport_buff
;
3524 struct lpfc_dmabuf
*mp
;
3525 uint32_t byte_count
= 0;
3527 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3529 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3530 "0542 lpfc_create_static_vport failed to"
3531 " allocate mailbox memory\n");
3534 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
3537 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3539 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3540 "0543 lpfc_create_static_vport failed to"
3541 " allocate vport_info\n");
3542 mempool_free(pmb
, phba
->mbox_mem_pool
);
3546 vport_buff
= (uint8_t *) vport_info
;
3548 /* free dma buffer from previous round */
3549 if (pmb
->context1
) {
3550 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3551 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3554 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3557 pmb
->vport
= phba
->pport
;
3558 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
3561 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3562 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3563 "0544 lpfc_create_static_vport failed to"
3564 " issue dump mailbox command ret 0x%x "
3566 mbx_wait_rc
, mb
->mbxStatus
);
3570 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3571 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3572 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3573 if (byte_count
> sizeof(struct static_vport_info
) -
3575 byte_count
= sizeof(struct static_vport_info
)
3577 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3578 offset
+= byte_count
;
3580 if (mb
->un
.varDmp
.word_cnt
>
3581 sizeof(struct static_vport_info
) - offset
)
3582 mb
->un
.varDmp
.word_cnt
=
3583 sizeof(struct static_vport_info
)
3585 byte_count
= mb
->un
.varDmp
.word_cnt
;
3586 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3587 vport_buff
+ offset
,
3590 offset
+= byte_count
;
3593 } while (byte_count
&&
3594 offset
< sizeof(struct static_vport_info
));
3597 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3598 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3599 != VPORT_INFO_REV
)) {
3600 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3601 "0545 lpfc_create_static_vport bad"
3602 " information header 0x%x 0x%x\n",
3603 le32_to_cpu(vport_info
->signature
),
3604 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3609 shost
= lpfc_shost_from_vport(phba
->pport
);
3611 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3612 memset(&vport_id
, 0, sizeof(vport_id
));
3613 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3614 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3615 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3618 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3619 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3620 vport_id
.disable
= false;
3621 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3623 if (!new_fc_vport
) {
3624 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3625 "0546 lpfc_create_static_vport failed to"
3630 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3631 vport
->vport_flag
|= STATIC_VPORT
;
3636 if (mbx_wait_rc
!= MBX_TIMEOUT
) {
3637 if (pmb
->context1
) {
3638 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3639 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3642 mempool_free(pmb
, phba
->mbox_mem_pool
);
3649 * This routine handles processing a Fabric REG_LOGIN mailbox
3650 * command upon completion. It is setup in the LPFC_MBOXQ
3651 * as the completion routine when the command is
3652 * handed off to the SLI layer.
3655 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3657 struct lpfc_vport
*vport
= pmb
->vport
;
3658 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3659 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3660 struct lpfc_nodelist
*ndlp
;
3661 struct Scsi_Host
*shost
;
3663 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3664 pmb
->context1
= NULL
;
3665 pmb
->context2
= NULL
;
3667 if (mb
->mbxStatus
) {
3668 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3669 "0258 Register Fabric login error: 0x%x\n",
3671 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3673 mempool_free(pmb
, phba
->mbox_mem_pool
);
3675 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3676 /* FLOGI failed, use loop map to make discovery list */
3677 lpfc_disc_list_loopmap(vport
);
3679 /* Start discovery */
3680 lpfc_disc_start(vport
);
3681 /* Decrement the reference count to ndlp after the
3682 * reference to the ndlp are done.
3688 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3689 /* Decrement the reference count to ndlp after the reference
3690 * to the ndlp are done.
3696 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3697 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3698 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3699 ndlp
->nlp_type
|= NLP_FABRIC
;
3700 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3702 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3703 /* when physical port receive logo donot start
3704 * vport discovery */
3705 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3706 lpfc_start_fdiscs(phba
);
3708 shost
= lpfc_shost_from_vport(vport
);
3709 spin_lock_irq(shost
->host_lock
);
3710 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3711 spin_unlock_irq(shost
->host_lock
);
3713 lpfc_do_scr_ns_plogi(phba
, vport
);
3716 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3718 mempool_free(pmb
, phba
->mbox_mem_pool
);
3720 /* Drop the reference count from the mbox at the end after
3721 * all the current reference to the ndlp have been done.
3728 * This routine handles processing a NameServer REG_LOGIN mailbox
3729 * command upon completion. It is setup in the LPFC_MBOXQ
3730 * as the completion routine when the command is
3731 * handed off to the SLI layer.
3734 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3736 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3737 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3738 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3739 struct lpfc_vport
*vport
= pmb
->vport
;
3741 pmb
->context1
= NULL
;
3742 pmb
->context2
= NULL
;
3744 if (mb
->mbxStatus
) {
3746 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3747 "0260 Register NameServer error: 0x%x\n",
3749 /* decrement the node reference count held for this
3750 * callback function.
3753 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3755 mempool_free(pmb
, phba
->mbox_mem_pool
);
3757 /* If no other thread is using the ndlp, free it */
3758 lpfc_nlp_not_used(ndlp
);
3760 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3762 * RegLogin failed, use loop map to make discovery
3765 lpfc_disc_list_loopmap(vport
);
3767 /* Start discovery */
3768 lpfc_disc_start(vport
);
3771 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3775 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3776 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3777 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3778 ndlp
->nlp_type
|= NLP_FABRIC
;
3779 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3781 if (vport
->port_state
< LPFC_VPORT_READY
) {
3782 /* Link up discovery requires Fabric registration. */
3783 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3784 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3785 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3786 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3787 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3789 /* Issue SCR just before NameServer GID_FT Query */
3790 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3793 vport
->fc_ns_retry
= 0;
3794 /* Good status, issue CT Request to NameServer */
3795 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3796 /* Cannot issue NameServer Query, so finish up discovery */
3800 /* decrement the node reference count held for this
3801 * callback function.
3804 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3806 mempool_free(pmb
, phba
->mbox_mem_pool
);
3812 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3814 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3815 struct fc_rport
*rport
;
3816 struct lpfc_rport_data
*rdata
;
3817 struct fc_rport_identifiers rport_ids
;
3818 struct lpfc_hba
*phba
= vport
->phba
;
3820 /* Remote port has reappeared. Re-register w/ FC transport */
3821 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3822 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3823 rport_ids
.port_id
= ndlp
->nlp_DID
;
3824 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3827 * We leave our node pointer in rport->dd_data when we unregister a
3828 * FCP target port. But fc_remote_port_add zeros the space to which
3829 * rport->dd_data points. So, if we're reusing a previously
3830 * registered port, drop the reference that we took the last time we
3831 * registered the port.
3833 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3834 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3837 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3838 "rport add: did:x%x flg:x%x type x%x",
3839 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3841 /* Don't add the remote port if unloading. */
3842 if (vport
->load_flag
& FC_UNLOADING
)
3845 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3846 if (!rport
|| !get_device(&rport
->dev
)) {
3847 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3848 "Warning: fc_remote_port_add failed\n");
3852 /* initialize static port data */
3853 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3854 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3855 rdata
= rport
->dd_data
;
3856 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3858 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3859 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3860 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3861 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3863 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3864 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3866 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3867 "3183 rport register x%06x, rport %p role x%x\n",
3868 ndlp
->nlp_DID
, rport
, rport_ids
.roles
);
3870 if ((rport
->scsi_target_id
!= -1) &&
3871 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3872 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3878 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3880 struct fc_rport
*rport
= ndlp
->rport
;
3882 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3883 "rport delete: did:x%x flg:x%x type x%x",
3884 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3886 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3887 "3184 rport unregister x%06x, rport %p\n",
3888 ndlp
->nlp_DID
, rport
);
3890 fc_remote_port_delete(rport
);
3896 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3898 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3900 spin_lock_irq(shost
->host_lock
);
3902 case NLP_STE_UNUSED_NODE
:
3903 vport
->fc_unused_cnt
+= count
;
3905 case NLP_STE_PLOGI_ISSUE
:
3906 vport
->fc_plogi_cnt
+= count
;
3908 case NLP_STE_ADISC_ISSUE
:
3909 vport
->fc_adisc_cnt
+= count
;
3911 case NLP_STE_REG_LOGIN_ISSUE
:
3912 vport
->fc_reglogin_cnt
+= count
;
3914 case NLP_STE_PRLI_ISSUE
:
3915 vport
->fc_prli_cnt
+= count
;
3917 case NLP_STE_UNMAPPED_NODE
:
3918 vport
->fc_unmap_cnt
+= count
;
3920 case NLP_STE_MAPPED_NODE
:
3921 vport
->fc_map_cnt
+= count
;
3923 case NLP_STE_NPR_NODE
:
3924 vport
->fc_npr_cnt
+= count
;
3927 spin_unlock_irq(shost
->host_lock
);
3931 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3932 int old_state
, int new_state
)
3934 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3936 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3937 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3938 ndlp
->nlp_type
|= NLP_FC_NODE
;
3940 if (new_state
== NLP_STE_MAPPED_NODE
)
3941 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3942 if (new_state
== NLP_STE_NPR_NODE
)
3943 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3945 /* Transport interface */
3946 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3947 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3948 vport
->phba
->nport_event_cnt
++;
3949 lpfc_unregister_remote_port(ndlp
);
3952 if (new_state
== NLP_STE_MAPPED_NODE
||
3953 new_state
== NLP_STE_UNMAPPED_NODE
) {
3954 vport
->phba
->nport_event_cnt
++;
3956 * Tell the fc transport about the port, if we haven't
3957 * already. If we have, and it's a scsi entity, be
3958 * sure to unblock any attached scsi devices
3960 lpfc_register_remote_port(vport
, ndlp
);
3962 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3963 (vport
->stat_data_enabled
)) {
3965 * A new target is discovered, if there is no buffer for
3966 * statistical data collection allocate buffer.
3968 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3969 sizeof(struct lpfc_scsicmd_bkt
),
3972 if (!ndlp
->lat_data
)
3973 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3974 "0286 lpfc_nlp_state_cleanup failed to "
3975 "allocate statistical data buffer DID "
3976 "0x%x\n", ndlp
->nlp_DID
);
3979 * if we added to Mapped list, but the remote port
3980 * registration failed or assigned a target id outside
3981 * our presentable range - move the node to the
3984 if (new_state
== NLP_STE_MAPPED_NODE
&&
3986 ndlp
->rport
->scsi_target_id
== -1 ||
3987 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3988 spin_lock_irq(shost
->host_lock
);
3989 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3990 spin_unlock_irq(shost
->host_lock
);
3991 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3996 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3998 static char *states
[] = {
3999 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4000 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4001 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4002 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4003 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4004 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4005 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4006 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4007 [NLP_STE_NPR_NODE
] = "NPR",
4010 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4011 strlcpy(buffer
, states
[state
], size
);
4013 snprintf(buffer
, size
, "unknown (%d)", state
);
4018 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4021 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4022 int old_state
= ndlp
->nlp_state
;
4023 char name1
[16], name2
[16];
4025 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4026 "0904 NPort state transition x%06x, %s -> %s\n",
4028 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4029 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4031 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4032 "node statechg did:x%x old:%d ste:%d",
4033 ndlp
->nlp_DID
, old_state
, state
);
4035 if (old_state
== NLP_STE_NPR_NODE
&&
4036 state
!= NLP_STE_NPR_NODE
)
4037 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4038 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4039 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
4040 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4043 if (list_empty(&ndlp
->nlp_listp
)) {
4044 spin_lock_irq(shost
->host_lock
);
4045 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4046 spin_unlock_irq(shost
->host_lock
);
4047 } else if (old_state
)
4048 lpfc_nlp_counters(vport
, old_state
, -1);
4050 ndlp
->nlp_state
= state
;
4051 lpfc_nlp_counters(vport
, state
, 1);
4052 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4056 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4058 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4060 if (list_empty(&ndlp
->nlp_listp
)) {
4061 spin_lock_irq(shost
->host_lock
);
4062 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4063 spin_unlock_irq(shost
->host_lock
);
4068 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4070 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4072 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4073 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4074 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4075 spin_lock_irq(shost
->host_lock
);
4076 list_del_init(&ndlp
->nlp_listp
);
4077 spin_unlock_irq(shost
->host_lock
);
4078 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4079 NLP_STE_UNUSED_NODE
);
4083 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4085 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4086 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4087 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4088 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4089 NLP_STE_UNUSED_NODE
);
4092 * lpfc_initialize_node - Initialize all fields of node object
4093 * @vport: Pointer to Virtual Port object.
4094 * @ndlp: Pointer to FC node object.
4095 * @did: FC_ID of the node.
4097 * This function is always called when node object need to be initialized.
4098 * It initializes all the fields of the node object. Although the reference
4099 * to phba from @ndlp can be obtained indirectly through it's reference to
4100 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4101 * to the life-span of the @ndlp might go beyond the existence of @vport as
4102 * the final release of ndlp is determined by its reference count. And, the
4103 * operation on @ndlp needs the reference to phba.
4106 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4109 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4110 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4111 init_timer(&ndlp
->nlp_delayfunc
);
4112 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
4113 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
4114 ndlp
->nlp_DID
= did
;
4115 ndlp
->vport
= vport
;
4116 ndlp
->phba
= vport
->phba
;
4117 ndlp
->nlp_sid
= NLP_NO_SID
;
4118 kref_init(&ndlp
->kref
);
4119 NLP_INT_NODE_ACT(ndlp
);
4120 atomic_set(&ndlp
->cmd_pending
, 0);
4121 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4122 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4123 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4126 struct lpfc_nodelist
*
4127 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4130 struct lpfc_hba
*phba
= vport
->phba
;
4132 unsigned long flags
;
4137 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4138 /* The ndlp should not be in memory free mode */
4139 if (NLP_CHK_FREE_REQ(ndlp
)) {
4140 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4141 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4142 "0277 lpfc_enable_node: ndlp:x%p "
4143 "usgmap:x%x refcnt:%d\n",
4144 (void *)ndlp
, ndlp
->nlp_usg_map
,
4145 atomic_read(&ndlp
->kref
.refcount
));
4148 /* The ndlp should not already be in active mode */
4149 if (NLP_CHK_NODE_ACT(ndlp
)) {
4150 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4151 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4152 "0278 lpfc_enable_node: ndlp:x%p "
4153 "usgmap:x%x refcnt:%d\n",
4154 (void *)ndlp
, ndlp
->nlp_usg_map
,
4155 atomic_read(&ndlp
->kref
.refcount
));
4159 /* Keep the original DID */
4160 did
= ndlp
->nlp_DID
;
4162 /* re-initialize ndlp except of ndlp linked list pointer */
4163 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4164 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4165 lpfc_initialize_node(vport
, ndlp
, did
);
4167 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4169 if (state
!= NLP_STE_UNUSED_NODE
)
4170 lpfc_nlp_set_state(vport
, ndlp
, state
);
4172 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4173 "node enable: did:x%x",
4174 ndlp
->nlp_DID
, 0, 0);
4179 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4182 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4183 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4184 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4185 * until ALL other outstanding threads have completed. We check
4186 * that the ndlp not already in the UNUSED state before we proceed.
4188 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4190 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4191 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4192 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4198 * Start / ReStart rescue timer for Discovery / RSCN handling
4201 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4203 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4204 struct lpfc_hba
*phba
= vport
->phba
;
4207 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4208 /* For FAN, timeout should be greater than edtov */
4209 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4211 /* Normal discovery timeout should be > than ELS/CT timeout
4212 * FC spec states we need 3 * ratov for CT requests
4214 tmo
= ((phba
->fc_ratov
* 3) + 3);
4218 if (!timer_pending(&vport
->fc_disctmo
)) {
4219 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4220 "set disc timer: tmo:x%x state:x%x flg:x%x",
4221 tmo
, vport
->port_state
, vport
->fc_flag
);
4224 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
4225 spin_lock_irq(shost
->host_lock
);
4226 vport
->fc_flag
|= FC_DISC_TMO
;
4227 spin_unlock_irq(shost
->host_lock
);
4229 /* Start Discovery Timer state <hba_state> */
4230 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4231 "0247 Start Discovery Timer state x%x "
4232 "Data: x%x x%lx x%x x%x\n",
4233 vport
->port_state
, tmo
,
4234 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4235 vport
->fc_adisc_cnt
);
4241 * Cancel rescue timer for Discovery / RSCN handling
4244 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4246 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4247 unsigned long iflags
;
4249 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4250 "can disc timer: state:x%x rtry:x%x flg:x%x",
4251 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4253 /* Turn off discovery timer if its running */
4254 if (vport
->fc_flag
& FC_DISC_TMO
) {
4255 spin_lock_irqsave(shost
->host_lock
, iflags
);
4256 vport
->fc_flag
&= ~FC_DISC_TMO
;
4257 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4258 del_timer_sync(&vport
->fc_disctmo
);
4259 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4260 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4261 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4264 /* Cancel Discovery Timer state <hba_state> */
4265 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4266 "0248 Cancel Discovery Timer state x%x "
4267 "Data: x%x x%x x%x\n",
4268 vport
->port_state
, vport
->fc_flag
,
4269 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4274 * Check specified ring for outstanding IOCB on the SLI queue
4275 * Return true if iocb matches the specified nport
4278 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4279 struct lpfc_sli_ring
*pring
,
4280 struct lpfc_iocbq
*iocb
,
4281 struct lpfc_nodelist
*ndlp
)
4283 struct lpfc_sli
*psli
= &phba
->sli
;
4284 IOCB_t
*icmd
= &iocb
->iocb
;
4285 struct lpfc_vport
*vport
= ndlp
->vport
;
4287 if (iocb
->vport
!= vport
)
4290 if (pring
->ringno
== LPFC_ELS_RING
) {
4291 switch (icmd
->ulpCommand
) {
4292 case CMD_GEN_REQUEST64_CR
:
4293 if (iocb
->context_un
.ndlp
== ndlp
)
4295 case CMD_ELS_REQUEST64_CR
:
4296 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4298 case CMD_XMIT_ELS_RSP64_CX
:
4299 if (iocb
->context1
== (uint8_t *) ndlp
)
4302 } else if (pring
->ringno
== psli
->extra_ring
) {
4304 } else if (pring
->ringno
== psli
->fcp_ring
) {
4305 /* Skip match check if waiting to relogin to FCP target */
4306 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4307 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4310 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4313 } else if (pring
->ringno
== psli
->next_ring
) {
4320 * Free resources / clean up outstanding I/Os
4321 * associated with nlp_rpi in the LPFC_NODELIST entry.
4324 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4326 LIST_HEAD(completions
);
4327 struct lpfc_sli
*psli
;
4328 struct lpfc_sli_ring
*pring
;
4329 struct lpfc_iocbq
*iocb
, *next_iocb
;
4332 lpfc_fabric_abort_nport(ndlp
);
4335 * Everything that matches on txcmplq will be returned
4336 * by firmware with a no rpi error.
4339 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4340 /* Now process each ring */
4341 for (i
= 0; i
< psli
->num_rings
; i
++) {
4342 pring
= &psli
->ring
[i
];
4344 spin_lock_irq(&phba
->hbalock
);
4345 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
4348 * Check to see if iocb matches the nport we are
4351 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
4353 /* It matches, so deque and call compl
4355 list_move_tail(&iocb
->list
,
4360 spin_unlock_irq(&phba
->hbalock
);
4364 /* Cancel all the IOCBs from the completions list */
4365 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4372 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4373 * @phba: Pointer to HBA context object.
4374 * @pmb: Pointer to mailbox object.
4376 * This function will issue an ELS LOGO command after completing
4380 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4382 struct lpfc_vport
*vport
= pmb
->vport
;
4383 struct lpfc_nodelist
*ndlp
;
4385 ndlp
= (struct lpfc_nodelist
*)(pmb
->context1
);
4388 lpfc_issue_els_logo(vport
, ndlp
, 0);
4392 * Free rpi associated with LPFC_NODELIST entry.
4393 * This routine is called from lpfc_freenode(), when we are removing
4394 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4395 * LOGO that completes successfully, and we are waiting to PLOGI back
4396 * to the remote NPort. In addition, it is called after we receive
4397 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4398 * we are waiting to PLOGI back to the remote NPort.
4401 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4403 struct lpfc_hba
*phba
= vport
->phba
;
4408 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4409 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4411 /* SLI4 ports require the physical rpi value. */
4412 rpi
= ndlp
->nlp_rpi
;
4413 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4414 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4416 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4417 mbox
->vport
= vport
;
4418 if (ndlp
->nlp_flag
& NLP_ISSUE_LOGO
) {
4419 mbox
->context1
= ndlp
;
4420 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
4422 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4425 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4426 if (rc
== MBX_NOT_FINISHED
)
4427 mempool_free(mbox
, phba
->mbox_mem_pool
);
4429 lpfc_no_rpi(phba
, ndlp
);
4431 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4433 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4434 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4441 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4442 * @phba: pointer to lpfc hba data structure.
4444 * This routine is invoked to unregister all the currently registered RPIs
4448 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4450 struct lpfc_vport
**vports
;
4451 struct lpfc_nodelist
*ndlp
;
4452 struct Scsi_Host
*shost
;
4455 vports
= lpfc_create_vport_work_array(phba
);
4457 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4458 "2884 Vport array allocation failed \n");
4461 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4462 shost
= lpfc_shost_from_vport(vports
[i
]);
4463 spin_lock_irq(shost
->host_lock
);
4464 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4465 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4466 /* The mempool_alloc might sleep */
4467 spin_unlock_irq(shost
->host_lock
);
4468 lpfc_unreg_rpi(vports
[i
], ndlp
);
4469 spin_lock_irq(shost
->host_lock
);
4472 spin_unlock_irq(shost
->host_lock
);
4474 lpfc_destroy_vport_work_array(phba
, vports
);
4478 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4480 struct lpfc_hba
*phba
= vport
->phba
;
4484 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4485 lpfc_sli4_unreg_all_rpis(vport
);
4489 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4491 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4493 mbox
->vport
= vport
;
4494 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4495 mbox
->context1
= NULL
;
4496 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4497 if (rc
!= MBX_TIMEOUT
)
4498 mempool_free(mbox
, phba
->mbox_mem_pool
);
4500 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4501 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4502 "1836 Could not issue "
4503 "unreg_login(all_rpis) status %d\n", rc
);
4508 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4510 struct lpfc_hba
*phba
= vport
->phba
;
4514 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4516 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4518 mbox
->vport
= vport
;
4519 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4520 mbox
->context1
= NULL
;
4521 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4522 if (rc
!= MBX_TIMEOUT
)
4523 mempool_free(mbox
, phba
->mbox_mem_pool
);
4525 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4526 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4527 "1815 Could not issue "
4528 "unreg_did (default rpis) status %d\n",
4534 * Free resources associated with LPFC_NODELIST entry
4535 * so it can be freed.
4538 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4540 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4541 struct lpfc_hba
*phba
= vport
->phba
;
4542 LPFC_MBOXQ_t
*mb
, *nextmb
;
4543 struct lpfc_dmabuf
*mp
;
4545 /* Cleanup node for NPort <nlp_DID> */
4546 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4547 "0900 Cleanup node for NPort x%x "
4548 "Data: x%x x%x x%x\n",
4549 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4550 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4551 if (NLP_CHK_FREE_REQ(ndlp
)) {
4552 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4553 "0280 lpfc_cleanup_node: ndlp:x%p "
4554 "usgmap:x%x refcnt:%d\n",
4555 (void *)ndlp
, ndlp
->nlp_usg_map
,
4556 atomic_read(&ndlp
->kref
.refcount
));
4557 lpfc_dequeue_node(vport
, ndlp
);
4559 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4560 "0281 lpfc_cleanup_node: ndlp:x%p "
4561 "usgmap:x%x refcnt:%d\n",
4562 (void *)ndlp
, ndlp
->nlp_usg_map
,
4563 atomic_read(&ndlp
->kref
.refcount
));
4564 lpfc_disable_node(vport
, ndlp
);
4568 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4570 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4571 if ((mb
= phba
->sli
.mbox_active
)) {
4572 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4573 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4574 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4575 mb
->context2
= NULL
;
4576 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4580 spin_lock_irq(&phba
->hbalock
);
4581 /* Cleanup REG_LOGIN completions which are not yet processed */
4582 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4583 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4584 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
4585 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4588 mb
->context2
= NULL
;
4589 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4592 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4593 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4594 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4595 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4596 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4598 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4601 list_del(&mb
->list
);
4602 mempool_free(mb
, phba
->mbox_mem_pool
);
4603 /* We shall not invoke the lpfc_nlp_put to decrement
4604 * the ndlp reference count as we are in the process
4605 * of lpfc_nlp_release.
4609 spin_unlock_irq(&phba
->hbalock
);
4611 lpfc_els_abort(phba
, ndlp
);
4613 spin_lock_irq(shost
->host_lock
);
4614 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4615 spin_unlock_irq(shost
->host_lock
);
4617 ndlp
->nlp_last_elscmd
= 0;
4618 del_timer_sync(&ndlp
->nlp_delayfunc
);
4620 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4621 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4622 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4623 lpfc_unreg_rpi(vport
, ndlp
);
4629 * Check to see if we can free the nlp back to the freelist.
4630 * If we are in the middle of using the nlp in the discovery state
4631 * machine, defer the free till we reach the end of the state machine.
4634 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4636 struct lpfc_hba
*phba
= vport
->phba
;
4637 struct lpfc_rport_data
*rdata
;
4641 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4642 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4643 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4644 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
4645 /* For this case we need to cleanup the default rpi
4646 * allocated by the firmware.
4648 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4650 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4651 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
4653 mempool_free(mbox
, phba
->mbox_mem_pool
);
4656 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4657 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4658 mbox
->vport
= vport
;
4659 mbox
->context2
= ndlp
;
4660 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4661 if (rc
== MBX_NOT_FINISHED
) {
4662 mempool_free(mbox
, phba
->mbox_mem_pool
);
4667 lpfc_cleanup_node(vport
, ndlp
);
4670 * We can get here with a non-NULL ndlp->rport because when we
4671 * unregister a rport we don't break the rport/node linkage. So if we
4672 * do, make sure we don't leaving any dangling pointers behind.
4675 rdata
= ndlp
->rport
->dd_data
;
4676 rdata
->pnode
= NULL
;
4682 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4685 D_ID mydid
, ndlpdid
, matchdid
;
4687 if (did
== Bcast_DID
)
4690 /* First check for Direct match */
4691 if (ndlp
->nlp_DID
== did
)
4694 /* Next check for area/domain identically equals 0 match */
4695 mydid
.un
.word
= vport
->fc_myDID
;
4696 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
4700 matchdid
.un
.word
= did
;
4701 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
4702 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
4703 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
4704 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
4705 if ((ndlpdid
.un
.b
.domain
== 0) &&
4706 (ndlpdid
.un
.b
.area
== 0)) {
4707 if (ndlpdid
.un
.b
.id
)
4713 matchdid
.un
.word
= ndlp
->nlp_DID
;
4714 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
4715 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
4716 if ((matchdid
.un
.b
.domain
== 0) &&
4717 (matchdid
.un
.b
.area
== 0)) {
4718 if (matchdid
.un
.b
.id
)
4726 /* Search for a nodelist entry */
4727 static struct lpfc_nodelist
*
4728 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4730 struct lpfc_nodelist
*ndlp
;
4733 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4734 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4735 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4736 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4737 ((uint32_t) ndlp
->nlp_type
<< 8) |
4738 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4739 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4740 "0929 FIND node DID "
4741 "Data: x%p x%x x%x x%x\n",
4742 ndlp
, ndlp
->nlp_DID
,
4743 ndlp
->nlp_flag
, data1
);
4748 /* FIND node did <did> NOT FOUND */
4749 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4750 "0932 FIND node did x%x NOT FOUND.\n", did
);
4754 struct lpfc_nodelist
*
4755 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4757 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4758 struct lpfc_nodelist
*ndlp
;
4759 unsigned long iflags
;
4761 spin_lock_irqsave(shost
->host_lock
, iflags
);
4762 ndlp
= __lpfc_findnode_did(vport
, did
);
4763 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4767 struct lpfc_nodelist
*
4768 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4770 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4771 struct lpfc_nodelist
*ndlp
;
4773 ndlp
= lpfc_findnode_did(vport
, did
);
4775 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4776 lpfc_rscn_payload_check(vport
, did
) == 0)
4778 ndlp
= (struct lpfc_nodelist
*)
4779 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4782 lpfc_nlp_init(vport
, ndlp
, did
);
4783 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4784 spin_lock_irq(shost
->host_lock
);
4785 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4786 spin_unlock_irq(shost
->host_lock
);
4788 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4789 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4792 spin_lock_irq(shost
->host_lock
);
4793 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4794 spin_unlock_irq(shost
->host_lock
);
4798 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4799 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4800 if (lpfc_rscn_payload_check(vport
, did
)) {
4801 /* If we've already received a PLOGI from this NPort
4802 * we don't need to try to discover it again.
4804 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4807 /* Since this node is marked for discovery,
4808 * delay timeout is not needed.
4810 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4811 spin_lock_irq(shost
->host_lock
);
4812 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4813 spin_unlock_irq(shost
->host_lock
);
4817 /* If we've already received a PLOGI from this NPort,
4818 * or we are already in the process of discovery on it,
4819 * we don't need to try to discover it again.
4821 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4822 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4823 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4825 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4826 spin_lock_irq(shost
->host_lock
);
4827 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4828 spin_unlock_irq(shost
->host_lock
);
4833 /* Build a list of nodes to discover based on the loopmap */
4835 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4837 struct lpfc_hba
*phba
= vport
->phba
;
4839 uint32_t alpa
, index
;
4841 if (!lpfc_is_link_up(phba
))
4844 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
4847 /* Check for loop map present or not */
4848 if (phba
->alpa_map
[0]) {
4849 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4850 alpa
= phba
->alpa_map
[j
];
4851 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4853 lpfc_setup_disc_node(vport
, alpa
);
4856 /* No alpamap, so try all alpa's */
4857 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4858 /* If cfg_scan_down is set, start from highest
4859 * ALPA (0xef) to lowest (0x1).
4861 if (vport
->cfg_scan_down
)
4864 index
= FC_MAXLOOP
- j
- 1;
4865 alpa
= lpfcAlpaArray
[index
];
4866 if ((vport
->fc_myDID
& 0xff) == alpa
)
4868 lpfc_setup_disc_node(vport
, alpa
);
4875 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4878 struct lpfc_sli
*psli
= &phba
->sli
;
4879 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4880 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4881 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4885 * if it's not a physical port or if we already send
4886 * clear_la then don't send it.
4888 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4889 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4890 (phba
->sli_rev
== LPFC_SLI_REV4
))
4893 /* Link up discovery */
4894 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4895 phba
->link_state
= LPFC_CLEAR_LA
;
4896 lpfc_clear_la(phba
, mbox
);
4897 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4898 mbox
->vport
= vport
;
4899 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4900 if (rc
== MBX_NOT_FINISHED
) {
4901 mempool_free(mbox
, phba
->mbox_mem_pool
);
4902 lpfc_disc_flush_list(vport
);
4903 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4904 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4905 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4906 phba
->link_state
= LPFC_HBA_ERROR
;
4911 /* Reg_vpi to tell firmware to resume normal operations */
4913 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4915 LPFC_MBOXQ_t
*regvpimbox
;
4917 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4919 lpfc_reg_vpi(vport
, regvpimbox
);
4920 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4921 regvpimbox
->vport
= vport
;
4922 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4923 == MBX_NOT_FINISHED
) {
4924 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4929 /* Start Link up / RSCN discovery on NPR nodes */
4931 lpfc_disc_start(struct lpfc_vport
*vport
)
4933 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4934 struct lpfc_hba
*phba
= vport
->phba
;
4936 uint32_t clear_la_pending
;
4939 if (!lpfc_is_link_up(phba
))
4942 if (phba
->link_state
== LPFC_CLEAR_LA
)
4943 clear_la_pending
= 1;
4945 clear_la_pending
= 0;
4947 if (vport
->port_state
< LPFC_VPORT_READY
)
4948 vport
->port_state
= LPFC_DISC_AUTH
;
4950 lpfc_set_disctmo(vport
);
4952 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4957 vport
->fc_prevDID
= vport
->fc_myDID
;
4958 vport
->num_disc_nodes
= 0;
4960 /* Start Discovery state <hba_state> */
4961 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4962 "0202 Start Discovery hba state x%x "
4963 "Data: x%x x%x x%x\n",
4964 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4965 vport
->fc_adisc_cnt
);
4967 /* First do ADISCs - if any */
4968 num_sent
= lpfc_els_disc_adisc(vport
);
4973 /* Register the VPI for SLI3, NON-NPIV only. */
4974 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4975 !(vport
->fc_flag
& FC_PT2PT
) &&
4976 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4977 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4978 lpfc_issue_reg_vpi(phba
, vport
);
4983 * For SLI2, we need to set port_state to READY and continue
4986 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4987 /* If we get here, there is nothing to ADISC */
4988 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4989 lpfc_issue_clear_la(phba
, vport
);
4991 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4992 vport
->num_disc_nodes
= 0;
4993 /* go thru NPR nodes and issue ELS PLOGIs */
4994 if (vport
->fc_npr_cnt
)
4995 lpfc_els_disc_plogi(vport
);
4997 if (!vport
->num_disc_nodes
) {
4998 spin_lock_irq(shost
->host_lock
);
4999 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
5000 spin_unlock_irq(shost
->host_lock
);
5001 lpfc_can_disctmo(vport
);
5004 vport
->port_state
= LPFC_VPORT_READY
;
5006 /* Next do PLOGIs - if any */
5007 num_sent
= lpfc_els_disc_plogi(vport
);
5012 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5013 /* Check to see if more RSCNs came in while we
5014 * were processing this one.
5016 if ((vport
->fc_rscn_id_cnt
== 0) &&
5017 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
5018 spin_lock_irq(shost
->host_lock
);
5019 vport
->fc_flag
&= ~FC_RSCN_MODE
;
5020 spin_unlock_irq(shost
->host_lock
);
5021 lpfc_can_disctmo(vport
);
5023 lpfc_els_handle_rscn(vport
);
5030 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5031 * ring the match the sppecified nodelist.
5034 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5036 LIST_HEAD(completions
);
5037 struct lpfc_sli
*psli
;
5039 struct lpfc_iocbq
*iocb
, *next_iocb
;
5040 struct lpfc_sli_ring
*pring
;
5043 pring
= &psli
->ring
[LPFC_ELS_RING
];
5045 /* Error matching iocb on txq or txcmplq
5046 * First check the txq.
5048 spin_lock_irq(&phba
->hbalock
);
5049 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5050 if (iocb
->context1
!= ndlp
) {
5054 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
5055 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
5057 list_move_tail(&iocb
->list
, &completions
);
5062 /* Next check the txcmplq */
5063 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5064 if (iocb
->context1
!= ndlp
) {
5068 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
5069 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
5070 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
5073 spin_unlock_irq(&phba
->hbalock
);
5075 /* Cancel all the IOCBs from the completions list */
5076 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5081 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5083 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5084 struct lpfc_hba
*phba
= vport
->phba
;
5086 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
5087 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5089 if (!NLP_CHK_NODE_ACT(ndlp
))
5091 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5092 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
5093 lpfc_free_tx(phba
, ndlp
);
5100 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
5102 lpfc_els_flush_rscn(vport
);
5103 lpfc_els_flush_cmd(vport
);
5104 lpfc_disc_flush_list(vport
);
5107 /*****************************************************************************/
5109 * NAME: lpfc_disc_timeout
5111 * FUNCTION: Fibre Channel driver discovery timeout routine.
5113 * EXECUTION ENVIRONMENT: interrupt only
5121 /*****************************************************************************/
5123 lpfc_disc_timeout(unsigned long ptr
)
5125 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5126 struct lpfc_hba
*phba
= vport
->phba
;
5127 uint32_t tmo_posted
;
5128 unsigned long flags
= 0;
5130 if (unlikely(!phba
))
5133 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5134 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5136 vport
->work_port_events
|= WORKER_DISC_TMO
;
5137 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5140 lpfc_worker_wake_up(phba
);
5145 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5147 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5148 struct lpfc_hba
*phba
= vport
->phba
;
5149 struct lpfc_sli
*psli
= &phba
->sli
;
5150 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5151 LPFC_MBOXQ_t
*initlinkmbox
;
5152 int rc
, clrlaerr
= 0;
5154 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5157 spin_lock_irq(shost
->host_lock
);
5158 vport
->fc_flag
&= ~FC_DISC_TMO
;
5159 spin_unlock_irq(shost
->host_lock
);
5161 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5162 "disc timeout: state:x%x rtry:x%x flg:x%x",
5163 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5165 switch (vport
->port_state
) {
5167 case LPFC_LOCAL_CFG_LINK
:
5168 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
5172 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5173 "0221 FAN timeout\n");
5174 /* Start discovery by sending FLOGI, clean up old rpis */
5175 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5177 if (!NLP_CHK_NODE_ACT(ndlp
))
5179 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5181 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5182 /* Clean up the ndlp on Fabric connections */
5183 lpfc_drop_node(vport
, ndlp
);
5185 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5186 /* Fail outstanding IO now since device
5187 * is marked for PLOGI.
5189 lpfc_unreg_rpi(vport
, ndlp
);
5192 if (vport
->port_state
!= LPFC_FLOGI
) {
5193 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5194 lpfc_initial_flogi(vport
);
5196 lpfc_issue_init_vfi(vport
);
5203 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5204 /* Initial FLOGI timeout */
5205 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5206 "0222 Initial %s timeout\n",
5207 vport
->vpi
? "FDISC" : "FLOGI");
5209 /* Assume no Fabric and go on with discovery.
5210 * Check for outstanding ELS FLOGI to abort.
5213 /* FLOGI failed, so just use loop map to make discovery list */
5214 lpfc_disc_list_loopmap(vport
);
5216 /* Start discovery */
5217 lpfc_disc_start(vport
);
5220 case LPFC_FABRIC_CFG_LINK
:
5221 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5223 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5224 "0223 Timeout while waiting for "
5225 "NameServer login\n");
5226 /* Next look for NameServer ndlp */
5227 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5228 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5229 lpfc_els_abort(phba
, ndlp
);
5231 /* ReStart discovery */
5235 /* Check for wait for NameServer Rsp timeout */
5236 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5237 "0224 NameServer Query timeout "
5239 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5241 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5242 /* Try it one more time */
5243 vport
->fc_ns_retry
++;
5244 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
5245 vport
->fc_ns_retry
, 0);
5249 vport
->fc_ns_retry
= 0;
5253 * Discovery is over.
5254 * set port_state to PORT_READY if SLI2.
5255 * cmpl_reg_vpi will set port_state to READY for SLI3.
5257 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5258 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5259 lpfc_issue_reg_vpi(phba
, vport
);
5261 lpfc_issue_clear_la(phba
, vport
);
5262 vport
->port_state
= LPFC_VPORT_READY
;
5266 /* Setup and issue mailbox INITIALIZE LINK command */
5267 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5268 if (!initlinkmbox
) {
5269 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5270 "0206 Device Discovery "
5271 "completion error\n");
5272 phba
->link_state
= LPFC_HBA_ERROR
;
5276 lpfc_linkdown(phba
);
5277 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5278 phba
->cfg_link_speed
);
5279 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5280 initlinkmbox
->vport
= vport
;
5281 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5282 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5283 lpfc_set_loopback_flag(phba
);
5284 if (rc
== MBX_NOT_FINISHED
)
5285 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5289 case LPFC_DISC_AUTH
:
5290 /* Node Authentication timeout */
5291 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5292 "0227 Node Authentication timeout\n");
5293 lpfc_disc_flush_list(vport
);
5296 * set port_state to PORT_READY if SLI2.
5297 * cmpl_reg_vpi will set port_state to READY for SLI3.
5299 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5300 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5301 lpfc_issue_reg_vpi(phba
, vport
);
5302 else { /* NPIV Not enabled */
5303 lpfc_issue_clear_la(phba
, vport
);
5304 vport
->port_state
= LPFC_VPORT_READY
;
5309 case LPFC_VPORT_READY
:
5310 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5311 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5312 "0231 RSCN timeout Data: x%x "
5314 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5316 /* Cleanup any outstanding ELS commands */
5317 lpfc_els_flush_cmd(vport
);
5319 lpfc_els_flush_rscn(vport
);
5320 lpfc_disc_flush_list(vport
);
5325 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5326 "0273 Unexpected discovery timeout, "
5327 "vport State x%x\n", vport
->port_state
);
5331 switch (phba
->link_state
) {
5333 /* CLEAR LA timeout */
5334 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5335 "0228 CLEAR LA timeout\n");
5340 lpfc_issue_clear_la(phba
, vport
);
5342 case LPFC_LINK_UNKNOWN
:
5343 case LPFC_WARM_START
:
5344 case LPFC_INIT_START
:
5345 case LPFC_INIT_MBX_CMDS
:
5346 case LPFC_LINK_DOWN
:
5347 case LPFC_HBA_ERROR
:
5348 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5349 "0230 Unexpected timeout, hba link "
5350 "state x%x\n", phba
->link_state
);
5354 case LPFC_HBA_READY
:
5359 lpfc_disc_flush_list(vport
);
5360 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5361 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5362 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5363 vport
->port_state
= LPFC_VPORT_READY
;
5370 * This routine handles processing a NameServer REG_LOGIN mailbox
5371 * command upon completion. It is setup in the LPFC_MBOXQ
5372 * as the completion routine when the command is
5373 * handed off to the SLI layer.
5376 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5378 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5379 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
5380 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5381 struct lpfc_vport
*vport
= pmb
->vport
;
5383 pmb
->context1
= NULL
;
5384 pmb
->context2
= NULL
;
5386 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5387 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5388 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5389 ndlp
->nlp_type
|= NLP_FABRIC
;
5390 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5393 * Start issuing Fabric-Device Management Interface (FDMI) command to
5394 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
5395 * fdmi-on=2 (supporting RPA/hostnmae)
5398 if (vport
->cfg_fdmi_on
== 1)
5399 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
5401 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
5403 /* decrement the node reference count held for this callback
5407 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5409 mempool_free(pmb
, phba
->mbox_mem_pool
);
5415 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5417 uint16_t *rpi
= param
;
5419 /* check for active node */
5420 if (!NLP_CHK_NODE_ACT(ndlp
))
5423 return ndlp
->nlp_rpi
== *rpi
;
5427 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5429 return memcmp(&ndlp
->nlp_portname
, param
,
5430 sizeof(ndlp
->nlp_portname
)) == 0;
5433 static struct lpfc_nodelist
*
5434 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5436 struct lpfc_nodelist
*ndlp
;
5438 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5439 if (filter(ndlp
, param
)) {
5440 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5441 "3185 FIND node filter %p DID "
5442 "Data: x%p x%x x%x\n",
5443 filter
, ndlp
, ndlp
->nlp_DID
,
5448 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5449 "3186 FIND node filter %p NOT FOUND.\n", filter
);
5454 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5455 * returns the node list element pointer else return NULL.
5457 struct lpfc_nodelist
*
5458 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5460 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5464 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5465 * returns the node element list pointer else return NULL.
5467 struct lpfc_nodelist
*
5468 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5470 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5471 struct lpfc_nodelist
*ndlp
;
5473 spin_lock_irq(shost
->host_lock
);
5474 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5475 spin_unlock_irq(shost
->host_lock
);
5480 * This routine looks up the ndlp lists for the given RPI. If the rpi
5481 * is found, the routine returns the node element list pointer else
5484 struct lpfc_nodelist
*
5485 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5487 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5488 struct lpfc_nodelist
*ndlp
;
5490 spin_lock_irq(shost
->host_lock
);
5491 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
5492 spin_unlock_irq(shost
->host_lock
);
5497 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5498 * @phba: pointer to lpfc hba data structure.
5499 * @vpi: the physical host virtual N_Port identifier.
5501 * This routine finds a vport on a HBA (referred by @phba) through a
5502 * @vpi. The function walks the HBA's vport list and returns the address
5503 * of the vport with the matching @vpi.
5506 * NULL - No vport with the matching @vpi found
5507 * Otherwise - Address to the vport with the matching @vpi.
5510 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
5512 struct lpfc_vport
*vport
;
5513 unsigned long flags
;
5516 /* The physical ports are always vpi 0 - translate is unnecessary. */
5519 * Translate the physical vpi to the logical vpi. The
5520 * vport stores the logical vpi.
5522 for (i
= 0; i
< phba
->max_vpi
; i
++) {
5523 if (vpi
== phba
->vpi_ids
[i
])
5527 if (i
>= phba
->max_vpi
) {
5528 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
5529 "2936 Could not find Vport mapped "
5530 "to vpi %d\n", vpi
);
5535 spin_lock_irqsave(&phba
->hbalock
, flags
);
5536 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
5537 if (vport
->vpi
== i
) {
5538 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5542 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5547 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5550 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5552 lpfc_initialize_node(vport
, ndlp
, did
);
5553 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5555 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5556 "node init: did:x%x",
5557 ndlp
->nlp_DID
, 0, 0);
5562 /* This routine releases all resources associated with a specifc NPort's ndlp
5563 * and mempool_free's the nodelist.
5566 lpfc_nlp_release(struct kref
*kref
)
5568 struct lpfc_hba
*phba
;
5569 unsigned long flags
;
5570 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5573 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5574 "node release: did:x%x flg:x%x type:x%x",
5575 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5577 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5578 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5579 "usgmap:x%x refcnt:%d\n",
5580 (void *)ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_usg_map
,
5581 atomic_read(&ndlp
->kref
.refcount
));
5583 /* remove ndlp from action. */
5584 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
5586 /* clear the ndlp active flag for all release cases */
5588 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5589 NLP_CLR_NODE_ACT(ndlp
);
5590 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5591 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5592 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
5594 /* free ndlp memory for final ndlp release */
5595 if (NLP_CHK_FREE_REQ(ndlp
)) {
5596 kfree(ndlp
->lat_data
);
5597 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
5601 /* This routine bumps the reference count for a ndlp structure to ensure
5602 * that one discovery thread won't free a ndlp while another discovery thread
5605 struct lpfc_nodelist
*
5606 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
5608 struct lpfc_hba
*phba
;
5609 unsigned long flags
;
5612 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5613 "node get: did:x%x flg:x%x refcnt:x%x",
5614 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5615 atomic_read(&ndlp
->kref
.refcount
));
5616 /* The check of ndlp usage to prevent incrementing the
5617 * ndlp reference count that is in the process of being
5621 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5622 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
5623 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5624 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5625 "0276 lpfc_nlp_get: ndlp:x%p "
5626 "usgmap:x%x refcnt:%d\n",
5627 (void *)ndlp
, ndlp
->nlp_usg_map
,
5628 atomic_read(&ndlp
->kref
.refcount
));
5631 kref_get(&ndlp
->kref
);
5632 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5637 /* This routine decrements the reference count for a ndlp structure. If the
5638 * count goes to 0, this indicates the the associated nodelist should be
5639 * freed. Returning 1 indicates the ndlp resource has been released; on the
5640 * other hand, returning 0 indicates the ndlp resource has not been released
5644 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
5646 struct lpfc_hba
*phba
;
5647 unsigned long flags
;
5652 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5653 "node put: did:x%x flg:x%x refcnt:x%x",
5654 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5655 atomic_read(&ndlp
->kref
.refcount
));
5657 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5658 /* Check the ndlp memory free acknowledge flag to avoid the
5659 * possible race condition that kref_put got invoked again
5660 * after previous one has done ndlp memory free.
5662 if (NLP_CHK_FREE_ACK(ndlp
)) {
5663 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5664 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5665 "0274 lpfc_nlp_put: ndlp:x%p "
5666 "usgmap:x%x refcnt:%d\n",
5667 (void *)ndlp
, ndlp
->nlp_usg_map
,
5668 atomic_read(&ndlp
->kref
.refcount
));
5671 /* Check the ndlp inactivate log flag to avoid the possible
5672 * race condition that kref_put got invoked again after ndlp
5673 * is already in inactivating state.
5675 if (NLP_CHK_IACT_REQ(ndlp
)) {
5676 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5677 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5678 "0275 lpfc_nlp_put: ndlp:x%p "
5679 "usgmap:x%x refcnt:%d\n",
5680 (void *)ndlp
, ndlp
->nlp_usg_map
,
5681 atomic_read(&ndlp
->kref
.refcount
));
5684 /* For last put, mark the ndlp usage flags to make sure no
5685 * other kref_get and kref_put on the same ndlp shall get
5686 * in between the process when the final kref_put has been
5687 * invoked on this ndlp.
5689 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
5690 /* Indicate ndlp is put to inactive state. */
5691 NLP_SET_IACT_REQ(ndlp
);
5692 /* Acknowledge ndlp memory free has been seen. */
5693 if (NLP_CHK_FREE_REQ(ndlp
))
5694 NLP_SET_FREE_ACK(ndlp
);
5696 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5697 /* Note, the kref_put returns 1 when decrementing a reference
5698 * count that was 1, it invokes the release callback function,
5699 * but it still left the reference count as 1 (not actually
5700 * performs the last decrementation). Otherwise, it actually
5701 * decrements the reference count and returns 0.
5703 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
5706 /* This routine free's the specified nodelist if it is not in use
5707 * by any other discovery thread. This routine returns 1 if the
5708 * ndlp has been freed. A return value of 0 indicates the ndlp is
5709 * not yet been released.
5712 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
5714 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5715 "node not used: did:x%x flg:x%x refcnt:x%x",
5716 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5717 atomic_read(&ndlp
->kref
.refcount
));
5718 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
5719 if (lpfc_nlp_put(ndlp
))
5725 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5726 * @phba: Pointer to hba context object.
5728 * This function iterate through all FC nodes associated
5729 * will all vports to check if there is any node with
5730 * fc_rports associated with it. If there is an fc_rport
5731 * associated with the node, then the node is either in
5732 * discovered state or its devloss_timer is pending.
5735 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
5737 struct lpfc_vport
**vports
;
5739 struct lpfc_nodelist
*ndlp
;
5740 struct Scsi_Host
*shost
;
5742 vports
= lpfc_create_vport_work_array(phba
);
5744 /* If driver cannot allocate memory, indicate fcf is in use */
5748 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5749 shost
= lpfc_shost_from_vport(vports
[i
]);
5750 spin_lock_irq(shost
->host_lock
);
5752 * IF the CVL_RCVD bit is not set then we have sent the
5754 * If dev_loss fires while we are waiting we do not want to
5757 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
5758 spin_unlock_irq(shost
->host_lock
);
5762 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5763 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
5764 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
5766 spin_unlock_irq(shost
->host_lock
);
5768 } else if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
5770 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
5771 "2624 RPI %x DID %x flag %x "
5772 "still logged in\n",
5773 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5777 spin_unlock_irq(shost
->host_lock
);
5780 lpfc_destroy_vport_work_array(phba
, vports
);
5785 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5786 * @phba: Pointer to hba context object.
5787 * @mboxq: Pointer to mailbox object.
5789 * This function frees memory associated with the mailbox command.
5792 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5794 struct lpfc_vport
*vport
= mboxq
->vport
;
5795 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5797 if (mboxq
->u
.mb
.mbxStatus
) {
5798 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5799 "2555 UNREG_VFI mbxStatus error x%x "
5801 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5803 spin_lock_irq(shost
->host_lock
);
5804 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5805 spin_unlock_irq(shost
->host_lock
);
5806 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5811 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5812 * @phba: Pointer to hba context object.
5813 * @mboxq: Pointer to mailbox object.
5815 * This function frees memory associated with the mailbox command.
5818 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5820 struct lpfc_vport
*vport
= mboxq
->vport
;
5822 if (mboxq
->u
.mb
.mbxStatus
) {
5823 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5824 "2550 UNREG_FCFI mbxStatus error x%x "
5826 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5828 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5833 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5834 * @phba: Pointer to hba context object.
5836 * This function prepare the HBA for unregistering the currently registered
5837 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5841 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5843 struct lpfc_vport
**vports
;
5844 struct lpfc_nodelist
*ndlp
;
5845 struct Scsi_Host
*shost
;
5848 /* Unregister RPIs */
5849 if (lpfc_fcf_inuse(phba
))
5850 lpfc_unreg_hba_rpis(phba
);
5852 /* At this point, all discovery is aborted */
5853 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5855 /* Unregister VPIs */
5856 vports
= lpfc_create_vport_work_array(phba
);
5857 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5858 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5859 /* Stop FLOGI/FDISC retries */
5860 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5862 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5863 lpfc_cleanup_pending_mbox(vports
[i
]);
5864 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5865 lpfc_sli4_unreg_all_rpis(vports
[i
]);
5866 lpfc_mbx_unreg_vpi(vports
[i
]);
5867 shost
= lpfc_shost_from_vport(vports
[i
]);
5868 spin_lock_irq(shost
->host_lock
);
5869 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5870 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5871 spin_unlock_irq(shost
->host_lock
);
5873 lpfc_destroy_vport_work_array(phba
, vports
);
5875 /* Cleanup any outstanding ELS commands */
5876 lpfc_els_flush_all_cmd(phba
);
5878 /* Unregister the physical port VFI */
5879 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
5884 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5885 * @phba: Pointer to hba context object.
5887 * This function issues synchronous unregister FCF mailbox command to HBA to
5888 * unregister the currently registered FCF record. The driver does not reset
5889 * the driver FCF usage state flags.
5891 * Return 0 if successfully issued, none-zero otherwise.
5894 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5899 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5901 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5902 "2551 UNREG_FCFI mbox allocation failed"
5903 "HBA state x%x\n", phba
->pport
->port_state
);
5906 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5907 mbox
->vport
= phba
->pport
;
5908 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5909 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5911 if (rc
== MBX_NOT_FINISHED
) {
5912 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5913 "2552 Unregister FCFI command failed rc x%x "
5915 rc
, phba
->pport
->port_state
);
5922 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5923 * @phba: Pointer to hba context object.
5925 * This function unregisters the currently reigstered FCF. This function
5926 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5929 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5933 /* Preparation for unregistering fcf */
5934 rc
= lpfc_unregister_fcf_prep(phba
);
5936 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5937 "2748 Failed to prepare for unregistering "
5938 "HBA's FCF record: rc=%d\n", rc
);
5942 /* Now, unregister FCF record and reset HBA FCF state */
5943 rc
= lpfc_sli4_unregister_fcf(phba
);
5946 /* Reset HBA FCF states after successful unregister FCF */
5947 phba
->fcf
.fcf_flag
= 0;
5948 phba
->fcf
.current_rec
.flag
= 0;
5951 * If driver is not unloading, check if there is any other
5952 * FCF record that can be used for discovery.
5954 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5955 (phba
->link_state
< LPFC_LINK_UP
))
5958 /* This is considered as the initial FCF discovery scan */
5959 spin_lock_irq(&phba
->hbalock
);
5960 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5961 spin_unlock_irq(&phba
->hbalock
);
5963 /* Reset FCF roundrobin bmask for new discovery */
5964 lpfc_sli4_clear_fcf_rr_bmask(phba
);
5966 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5969 spin_lock_irq(&phba
->hbalock
);
5970 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5971 spin_unlock_irq(&phba
->hbalock
);
5972 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5973 "2553 lpfc_unregister_unused_fcf failed "
5974 "to read FCF record HBA state x%x\n",
5975 phba
->pport
->port_state
);
5980 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5981 * @phba: Pointer to hba context object.
5983 * This function just unregisters the currently reigstered FCF. It does not
5984 * try to find another FCF for discovery.
5987 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5991 /* Preparation for unregistering fcf */
5992 rc
= lpfc_unregister_fcf_prep(phba
);
5994 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5995 "2749 Failed to prepare for unregistering "
5996 "HBA's FCF record: rc=%d\n", rc
);
6000 /* Now, unregister FCF record and reset HBA FCF state */
6001 rc
= lpfc_sli4_unregister_fcf(phba
);
6004 /* Set proper HBA FCF states after successful unregister FCF */
6005 spin_lock_irq(&phba
->hbalock
);
6006 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6007 spin_unlock_irq(&phba
->hbalock
);
6011 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6012 * @phba: Pointer to hba context object.
6014 * This function check if there are any connected remote port for the FCF and
6015 * if all the devices are disconnected, this function unregister FCFI.
6016 * This function also tries to use another FCF for discovery.
6019 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6022 * If HBA is not running in FIP mode, if HBA does not support
6023 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6024 * registered, do nothing.
6026 spin_lock_irq(&phba
->hbalock
);
6027 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
6028 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6029 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
6030 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6031 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
6032 spin_unlock_irq(&phba
->hbalock
);
6035 spin_unlock_irq(&phba
->hbalock
);
6037 if (lpfc_fcf_inuse(phba
))
6040 lpfc_unregister_fcf_rescan(phba
);
6044 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6045 * @phba: Pointer to hba context object.
6046 * @buff: Buffer containing the FCF connection table as in the config
6048 * This function create driver data structure for the FCF connection
6049 * record table read from config region 23.
6052 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6055 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6056 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6057 struct lpfc_fcf_conn_rec
*conn_rec
;
6058 uint32_t record_count
;
6061 /* Free the current connect table */
6062 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6063 &phba
->fcf_conn_rec_list
, list
) {
6064 list_del_init(&conn_entry
->list
);
6068 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6069 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6070 sizeof(struct lpfc_fcf_conn_rec
);
6072 conn_rec
= (struct lpfc_fcf_conn_rec
*)
6073 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
6075 for (i
= 0; i
< record_count
; i
++) {
6076 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
6078 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
6081 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6082 "2566 Failed to allocate connection"
6087 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
6088 sizeof(struct lpfc_fcf_conn_rec
));
6089 conn_entry
->conn_rec
.vlan_tag
=
6090 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
6091 conn_entry
->conn_rec
.flags
=
6092 le16_to_cpu(conn_entry
->conn_rec
.flags
);
6093 list_add_tail(&conn_entry
->list
,
6094 &phba
->fcf_conn_rec_list
);
6099 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6100 * @phba: Pointer to hba context object.
6101 * @buff: Buffer containing the FCoE parameter data structure.
6103 * This function update driver data structure with config
6104 * parameters read from config region 23.
6107 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
6110 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
6111 struct lpfc_fcoe_params
*fcoe_param
;
6113 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
6115 fcoe_param
= (struct lpfc_fcoe_params
*)
6116 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
6118 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
6119 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
6122 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
6123 phba
->valid_vlan
= 1;
6124 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
6128 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
6129 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
6130 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
6135 * lpfc_get_rec_conf23 - Get a record type in config region data.
6136 * @buff: Buffer containing config region 23 data.
6137 * @size: Size of the data buffer.
6138 * @rec_type: Record type to be searched.
6140 * This function searches config region data to find the beginning
6141 * of the record specified by record_type. If record found, this
6142 * function return pointer to the record else return NULL.
6145 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
6147 uint32_t offset
= 0, rec_length
;
6149 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
6150 (size
< sizeof(uint32_t)))
6153 rec_length
= buff
[offset
+ 1];
6156 * One TLV record has one word header and number of data words
6157 * specified in the rec_length field of the record header.
6159 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
6161 if (buff
[offset
] == rec_type
)
6162 return &buff
[offset
];
6164 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6167 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6168 rec_length
= buff
[offset
+ 1];
6174 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6175 * @phba: Pointer to lpfc_hba data structure.
6176 * @buff: Buffer containing config region 23 data.
6177 * @size: Size of the data buffer.
6179 * This function parses the FCoE config parameters in config region 23 and
6180 * populate driver data structure with the parameters.
6183 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6187 uint32_t offset
= 0, rec_length
;
6191 * If data size is less than 2 words signature and version cannot be
6194 if (size
< 2*sizeof(uint32_t))
6197 /* Check the region signature first */
6198 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6199 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6200 "2567 Config region 23 has bad signature\n");
6206 /* Check the data structure version */
6207 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6208 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6209 "2568 Config region 23 has bad version\n");
6214 rec_length
= buff
[offset
+ 1];
6216 /* Read FCoE param record */
6217 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6218 size
- offset
, FCOE_PARAM_TYPE
);
6220 lpfc_read_fcoe_param(phba
, rec_ptr
);
6222 /* Read FCF connection table */
6223 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6224 size
- offset
, FCOE_CONN_TBL_TYPE
);
6226 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);