1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
37 #include "lpfc_disc.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray
[] = {
49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
64 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
65 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 static int lpfc_fcf_inuse(struct lpfc_hba
*);
70 lpfc_terminate_rport_io(struct fc_rport
*rport
)
72 struct lpfc_rport_data
*rdata
;
73 struct lpfc_nodelist
* ndlp
;
74 struct lpfc_hba
*phba
;
76 rdata
= rport
->dd_data
;
79 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
80 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
81 printk(KERN_ERR
"Cannot find remote node"
82 " to terminate I/O Data x%x\n",
89 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
90 "rport terminate: sid:x%x did:x%x flg:x%x",
91 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
93 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
94 lpfc_sli_abort_iocb(ndlp
->vport
,
95 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
96 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
106 struct lpfc_rport_data
*rdata
;
107 struct lpfc_nodelist
* ndlp
;
108 struct lpfc_vport
*vport
;
109 struct lpfc_hba
*phba
;
110 struct lpfc_work_evt
*evtp
;
114 rdata
= rport
->dd_data
;
116 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
122 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
126 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
128 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
130 /* Don't defer this if we are in the process of deleting the vport
131 * or unloading the driver. The unload will cleanup the node
132 * appropriately we just need to cleanup the ndlp rport info here.
134 if (vport
->load_flag
& FC_UNLOADING
) {
135 put_node
= rdata
->pnode
!= NULL
;
136 put_rport
= ndlp
->rport
!= NULL
;
142 put_device(&rport
->dev
);
146 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
149 if (ndlp
->nlp_type
& NLP_FABRIC
) {
151 /* If the WWPN of the rport and ndlp don't match, ignore it */
152 if (rport
->port_name
!= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
)) {
153 put_device(&rport
->dev
);
158 evtp
= &ndlp
->dev_loss_evt
;
160 if (!list_empty(&evtp
->evt_listp
))
163 spin_lock_irq(&phba
->hbalock
);
164 /* We need to hold the node by incrementing the reference
165 * count until this queued work is done
167 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
168 if (evtp
->evt_arg1
) {
169 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
170 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
171 lpfc_worker_wake_up(phba
);
173 spin_unlock_irq(&phba
->hbalock
);
179 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
180 * @ndlp: Pointer to remote node object.
182 * This function is called from the worker thread when devloss timeout timer
183 * expires. For SLI4 host, this routine shall return 1 when at lease one
184 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
185 * routine shall return 0 when there is no remote node is still in use of FCF
186 * when devloss timeout happened to this @ndlp.
189 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
191 struct lpfc_rport_data
*rdata
;
192 struct fc_rport
*rport
;
193 struct lpfc_vport
*vport
;
194 struct lpfc_hba
*phba
;
206 rdata
= rport
->dd_data
;
207 name
= (uint8_t *) &ndlp
->nlp_portname
;
211 if (phba
->sli_rev
== LPFC_SLI_REV4
)
212 fcf_inuse
= lpfc_fcf_inuse(phba
);
214 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
215 "rport devlosstmo:did:x%x type:x%x id:x%x",
216 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
218 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
219 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
220 ndlp
->nlp_DID
, ndlp
->rport
, ndlp
->nlp_flag
);
222 /* Don't defer this if we are in the process of deleting the vport
223 * or unloading the driver. The unload will cleanup the node
224 * appropriately we just need to cleanup the ndlp rport info here.
226 if (vport
->load_flag
& FC_UNLOADING
) {
227 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
228 /* flush the target */
229 lpfc_sli_abort_iocb(vport
,
230 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
231 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
233 put_node
= rdata
->pnode
!= NULL
;
234 put_rport
= ndlp
->rport
!= NULL
;
240 put_device(&rport
->dev
);
244 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
245 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
246 "0284 Devloss timeout Ignored on "
247 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
249 *name
, *(name
+1), *(name
+2), *(name
+3),
250 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
255 if (ndlp
->nlp_type
& NLP_FABRIC
) {
256 /* We will clean up these Nodes in linkup */
257 put_node
= rdata
->pnode
!= NULL
;
258 put_rport
= ndlp
->rport
!= NULL
;
264 put_device(&rport
->dev
);
268 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
270 /* flush the target */
271 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
272 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
276 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
277 "0203 Devloss timeout on "
278 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
279 "NPort x%06x Data: x%x x%x x%x\n",
280 *name
, *(name
+1), *(name
+2), *(name
+3),
281 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
282 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
283 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
285 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
286 "0204 Devloss timeout on "
287 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
288 "NPort x%06x Data: x%x x%x x%x\n",
289 *name
, *(name
+1), *(name
+2), *(name
+3),
290 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
291 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
292 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
295 put_node
= rdata
->pnode
!= NULL
;
296 put_rport
= ndlp
->rport
!= NULL
;
302 put_device(&rport
->dev
);
304 if (!(vport
->load_flag
& FC_UNLOADING
) &&
305 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
306 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
307 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
308 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
309 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
310 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
316 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
317 * @phba: Pointer to hba context object.
318 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
319 * @nlp_did: remote node identifer with devloss timeout.
321 * This function is called from the worker thread after invoking devloss
322 * timeout handler and releasing the reference count for the ndlp with
323 * which the devloss timeout was handled for SLI4 host. For the devloss
324 * timeout of the last remote node which had been in use of FCF, when this
325 * routine is invoked, it shall be guaranteed that none of the remote are
326 * in-use of FCF. When devloss timeout to the last remote using the FCF,
327 * if the FIP engine is neither in FCF table scan process nor roundrobin
328 * failover process, the in-use FCF shall be unregistered. If the FIP
329 * engine is in FCF discovery process, the devloss timeout state shall
330 * be set for either the FCF table scan process or roundrobin failover
331 * process to unregister the in-use FCF.
334 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
337 /* If devloss timeout happened to a remote node when FCF had no
338 * longer been in-use, do nothing.
343 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
344 spin_lock_irq(&phba
->hbalock
);
345 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
346 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
347 spin_unlock_irq(&phba
->hbalock
);
350 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
351 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
352 "2847 Last remote node (x%x) using "
353 "FCF devloss tmo\n", nlp_did
);
355 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
356 spin_unlock_irq(&phba
->hbalock
);
357 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
358 "2868 Devloss tmo to FCF rediscovery "
362 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
363 spin_unlock_irq(&phba
->hbalock
);
364 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
365 "2869 Devloss tmo to idle FIP engine, "
366 "unreg in-use FCF and rescan.\n");
367 /* Unregister in-use FCF and rescan */
368 lpfc_unregister_fcf_rescan(phba
);
371 spin_unlock_irq(&phba
->hbalock
);
372 if (phba
->hba_flag
& FCF_TS_INPROG
)
373 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
374 "2870 FCF table scan in progress\n");
375 if (phba
->hba_flag
& FCF_RR_INPROG
)
376 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
377 "2871 FLOGI roundrobin FCF failover "
380 lpfc_unregister_unused_fcf(phba
);
384 * lpfc_alloc_fast_evt - Allocates data structure for posting event
385 * @phba: Pointer to hba context object.
387 * This function is called from the functions which need to post
388 * events from interrupt context. This function allocates data
389 * structure required for posting event. It also keeps track of
390 * number of events pending and prevent event storm when there are
393 struct lpfc_fast_path_event
*
394 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
395 struct lpfc_fast_path_event
*ret
;
397 /* If there are lot of fast event do not exhaust memory due to this */
398 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
401 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
404 atomic_inc(&phba
->fast_event_count
);
405 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
406 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
412 * lpfc_free_fast_evt - Frees event data structure
413 * @phba: Pointer to hba context object.
414 * @evt: Event object which need to be freed.
416 * This function frees the data structure required for posting
420 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
421 struct lpfc_fast_path_event
*evt
) {
423 atomic_dec(&phba
->fast_event_count
);
428 * lpfc_send_fastpath_evt - Posts events generated from fast path
429 * @phba: Pointer to hba context object.
430 * @evtp: Event data structure.
432 * This function is called from worker thread, when the interrupt
433 * context need to post an event. This function posts the event
434 * to fc transport netlink interface.
437 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
438 struct lpfc_work_evt
*evtp
)
440 unsigned long evt_category
, evt_sub_category
;
441 struct lpfc_fast_path_event
*fast_evt_data
;
443 uint32_t evt_data_size
;
444 struct Scsi_Host
*shost
;
446 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
449 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
450 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
451 fabric_evt
.subcategory
;
452 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
453 if (evt_category
== FC_REG_FABRIC_EVENT
) {
454 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
455 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
456 evt_data_size
= sizeof(fast_evt_data
->un
.
458 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
459 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
460 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
461 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
463 lpfc_free_fast_evt(phba
, fast_evt_data
);
466 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
467 switch (evt_sub_category
) {
468 case LPFC_EVENT_QFULL
:
469 case LPFC_EVENT_DEVBSY
:
470 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
471 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
473 case LPFC_EVENT_CHECK_COND
:
474 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
475 evt_data_size
= sizeof(fast_evt_data
->un
.
478 case LPFC_EVENT_VARQUEDEPTH
:
479 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
480 evt_data_size
= sizeof(fast_evt_data
->un
.
484 lpfc_free_fast_evt(phba
, fast_evt_data
);
488 lpfc_free_fast_evt(phba
, fast_evt_data
);
492 fc_host_post_vendor_event(shost
,
493 fc_get_event_number(),
498 lpfc_free_fast_evt(phba
, fast_evt_data
);
503 lpfc_work_list_done(struct lpfc_hba
*phba
)
505 struct lpfc_work_evt
*evtp
= NULL
;
506 struct lpfc_nodelist
*ndlp
;
511 spin_lock_irq(&phba
->hbalock
);
512 while (!list_empty(&phba
->work_list
)) {
513 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
515 spin_unlock_irq(&phba
->hbalock
);
518 case LPFC_EVT_ELS_RETRY
:
519 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
520 lpfc_els_retry_delay_handler(ndlp
);
521 free_evt
= 0; /* evt is part of ndlp */
522 /* decrement the node reference count held
523 * for this queued work
527 case LPFC_EVT_DEV_LOSS
:
528 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
529 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
531 /* decrement the node reference count held for
534 nlp_did
= ndlp
->nlp_DID
;
536 if (phba
->sli_rev
== LPFC_SLI_REV4
)
537 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
541 case LPFC_EVT_ONLINE
:
542 if (phba
->link_state
< LPFC_LINK_DOWN
)
543 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
545 *(int *) (evtp
->evt_arg1
) = 0;
546 complete((struct completion
*)(evtp
->evt_arg2
));
548 case LPFC_EVT_OFFLINE_PREP
:
549 if (phba
->link_state
>= LPFC_LINK_DOWN
)
550 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
551 *(int *)(evtp
->evt_arg1
) = 0;
552 complete((struct completion
*)(evtp
->evt_arg2
));
554 case LPFC_EVT_OFFLINE
:
556 lpfc_sli_brdrestart(phba
);
557 *(int *)(evtp
->evt_arg1
) =
558 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
559 lpfc_unblock_mgmt_io(phba
);
560 complete((struct completion
*)(evtp
->evt_arg2
));
562 case LPFC_EVT_WARM_START
:
564 lpfc_reset_barrier(phba
);
565 lpfc_sli_brdreset(phba
);
566 lpfc_hba_down_post(phba
);
567 *(int *)(evtp
->evt_arg1
) =
568 lpfc_sli_brdready(phba
, HS_MBRDY
);
569 lpfc_unblock_mgmt_io(phba
);
570 complete((struct completion
*)(evtp
->evt_arg2
));
574 *(int *)(evtp
->evt_arg1
)
575 = (phba
->pport
->stopped
)
576 ? 0 : lpfc_sli_brdkill(phba
);
577 lpfc_unblock_mgmt_io(phba
);
578 complete((struct completion
*)(evtp
->evt_arg2
));
580 case LPFC_EVT_FASTPATH_MGMT_EVT
:
581 lpfc_send_fastpath_evt(phba
, evtp
);
584 case LPFC_EVT_RESET_HBA
:
585 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
586 lpfc_reset_hba(phba
);
591 spin_lock_irq(&phba
->hbalock
);
593 spin_unlock_irq(&phba
->hbalock
);
598 lpfc_work_done(struct lpfc_hba
*phba
)
600 struct lpfc_sli_ring
*pring
;
601 uint32_t ha_copy
, status
, control
, work_port_events
;
602 struct lpfc_vport
**vports
;
603 struct lpfc_vport
*vport
;
606 spin_lock_irq(&phba
->hbalock
);
607 ha_copy
= phba
->work_ha
;
609 spin_unlock_irq(&phba
->hbalock
);
611 /* First, try to post the next mailbox command to SLI4 device */
612 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
613 lpfc_sli4_post_async_mbox(phba
);
615 if (ha_copy
& HA_ERATT
)
616 /* Handle the error attention event */
617 lpfc_handle_eratt(phba
);
619 if (ha_copy
& HA_MBATT
)
620 lpfc_sli_handle_mb_event(phba
);
622 if (ha_copy
& HA_LATT
)
623 lpfc_handle_latt(phba
);
625 /* Process SLI4 events */
626 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
627 if (phba
->hba_flag
& HBA_RRQ_ACTIVE
)
628 lpfc_handle_rrq_active(phba
);
629 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
630 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
631 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
632 lpfc_sli4_els_xri_abort_event_proc(phba
);
633 if (phba
->hba_flag
& ASYNC_EVENT
)
634 lpfc_sli4_async_event_proc(phba
);
635 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
636 spin_lock_irq(&phba
->hbalock
);
637 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
638 spin_unlock_irq(&phba
->hbalock
);
639 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
641 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
642 lpfc_sli4_fcf_redisc_event_proc(phba
);
645 vports
= lpfc_create_vport_work_array(phba
);
647 for (i
= 0; i
<= phba
->max_vports
; i
++) {
649 * We could have no vports in array if unloading, so if
650 * this happens then just use the pport
652 if (vports
[i
] == NULL
&& i
== 0)
658 spin_lock_irq(&vport
->work_port_lock
);
659 work_port_events
= vport
->work_port_events
;
660 vport
->work_port_events
&= ~work_port_events
;
661 spin_unlock_irq(&vport
->work_port_lock
);
662 if (work_port_events
& WORKER_DISC_TMO
)
663 lpfc_disc_timeout_handler(vport
);
664 if (work_port_events
& WORKER_ELS_TMO
)
665 lpfc_els_timeout_handler(vport
);
666 if (work_port_events
& WORKER_HB_TMO
)
667 lpfc_hb_timeout_handler(phba
);
668 if (work_port_events
& WORKER_MBOX_TMO
)
669 lpfc_mbox_timeout_handler(phba
);
670 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
671 lpfc_unblock_fabric_iocbs(phba
);
672 if (work_port_events
& WORKER_FDMI_TMO
)
673 lpfc_fdmi_timeout_handler(vport
);
674 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
675 lpfc_ramp_down_queue_handler(phba
);
676 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
677 lpfc_ramp_up_queue_handler(phba
);
678 if (work_port_events
& WORKER_DELAYED_DISC_TMO
)
679 lpfc_delayed_disc_timeout_handler(vport
);
681 lpfc_destroy_vport_work_array(phba
, vports
);
683 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
684 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
685 status
>>= (4*LPFC_ELS_RING
);
686 if ((status
& HA_RXMASK
) ||
687 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
688 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
689 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
690 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
691 /* Set the lpfc data pending flag */
692 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
694 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
695 lpfc_sli_handle_slow_ring_event(phba
, pring
,
699 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &
700 (!list_empty(&pring
->txq
)))
701 lpfc_drain_txq(phba
);
703 * Turn on Ring interrupts
705 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
706 spin_lock_irq(&phba
->hbalock
);
707 control
= readl(phba
->HCregaddr
);
708 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
709 lpfc_debugfs_slow_ring_trc(phba
,
710 "WRK Enable ring: cntl:x%x hacopy:x%x",
711 control
, ha_copy
, 0);
713 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
714 writel(control
, phba
->HCregaddr
);
715 readl(phba
->HCregaddr
); /* flush */
717 lpfc_debugfs_slow_ring_trc(phba
,
718 "WRK Ring ok: cntl:x%x hacopy:x%x",
719 control
, ha_copy
, 0);
721 spin_unlock_irq(&phba
->hbalock
);
724 lpfc_work_list_done(phba
);
728 lpfc_do_work(void *p
)
730 struct lpfc_hba
*phba
= p
;
733 set_user_nice(current
, -20);
734 current
->flags
|= PF_NOFREEZE
;
735 phba
->data_flags
= 0;
737 while (!kthread_should_stop()) {
738 /* wait and check worker queue activities */
739 rc
= wait_event_interruptible(phba
->work_waitq
,
740 (test_and_clear_bit(LPFC_DATA_READY
,
742 || kthread_should_stop()));
743 /* Signal wakeup shall terminate the worker thread */
745 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
746 "0433 Wakeup on signal: rc=x%x\n", rc
);
750 /* Attend pending lpfc data processing */
751 lpfc_work_done(phba
);
753 phba
->worker_thread
= NULL
;
754 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
755 "0432 Worker thread stopped.\n");
760 * This is only called to handle FC worker events. Since this a rare
761 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
762 * embedding it in the IOCB.
765 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
768 struct lpfc_work_evt
*evtp
;
772 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
773 * be queued to worker thread for processing
775 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
779 evtp
->evt_arg1
= arg1
;
780 evtp
->evt_arg2
= arg2
;
783 spin_lock_irqsave(&phba
->hbalock
, flags
);
784 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
785 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
787 lpfc_worker_wake_up(phba
);
793 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
795 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
796 struct lpfc_hba
*phba
= vport
->phba
;
797 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
800 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
801 if (!NLP_CHK_NODE_ACT(ndlp
))
803 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
805 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
806 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
807 (ndlp
->nlp_DID
== NameServer_DID
)))
808 lpfc_unreg_rpi(vport
, ndlp
);
810 /* Leave Fabric nodes alone on link down */
811 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
812 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
814 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
817 : NLP_EVT_DEVICE_RECOVERY
);
819 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
820 if (phba
->sli_rev
== LPFC_SLI_REV4
)
821 lpfc_sli4_unreg_all_rpis(vport
);
822 lpfc_mbx_unreg_vpi(vport
);
823 spin_lock_irq(shost
->host_lock
);
824 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
825 spin_unlock_irq(shost
->host_lock
);
830 lpfc_port_link_failure(struct lpfc_vport
*vport
)
832 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
834 /* Cleanup any outstanding received buffers */
835 lpfc_cleanup_rcv_buffers(vport
);
837 /* Cleanup any outstanding RSCN activity */
838 lpfc_els_flush_rscn(vport
);
840 /* Cleanup any outstanding ELS commands */
841 lpfc_els_flush_cmd(vport
);
843 lpfc_cleanup_rpis(vport
, 0);
845 /* Turn off discovery timer if its running */
846 lpfc_can_disctmo(vport
);
850 lpfc_linkdown_port(struct lpfc_vport
*vport
)
852 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
854 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
856 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
857 "Link Down: state:x%x rtry:x%x flg:x%x",
858 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
860 lpfc_port_link_failure(vport
);
862 /* Stop delayed Nport discovery */
863 spin_lock_irq(shost
->host_lock
);
864 vport
->fc_flag
&= ~FC_DISC_DELAYED
;
865 spin_unlock_irq(shost
->host_lock
);
866 del_timer_sync(&vport
->delayed_disc_tmo
);
870 lpfc_linkdown(struct lpfc_hba
*phba
)
872 struct lpfc_vport
*vport
= phba
->pport
;
873 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
874 struct lpfc_vport
**vports
;
878 if (phba
->link_state
== LPFC_LINK_DOWN
)
881 /* Block all SCSI stack I/Os */
882 lpfc_scsi_dev_block(phba
);
884 spin_lock_irq(&phba
->hbalock
);
885 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
886 spin_unlock_irq(&phba
->hbalock
);
887 if (phba
->link_state
> LPFC_LINK_DOWN
) {
888 phba
->link_state
= LPFC_LINK_DOWN
;
889 spin_lock_irq(shost
->host_lock
);
890 phba
->pport
->fc_flag
&= ~FC_LBIT
;
891 spin_unlock_irq(shost
->host_lock
);
893 vports
= lpfc_create_vport_work_array(phba
);
895 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
896 /* Issue a LINK DOWN event to all nodes */
897 lpfc_linkdown_port(vports
[i
]);
899 lpfc_destroy_vport_work_array(phba
, vports
);
900 /* Clean up any firmware default rpi's */
901 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
903 lpfc_unreg_did(phba
, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS
, mb
);
905 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
906 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
907 == MBX_NOT_FINISHED
) {
908 mempool_free(mb
, phba
->mbox_mem_pool
);
912 /* Setup myDID for link up if we are in pt2pt mode */
913 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
914 phba
->pport
->fc_myDID
= 0;
915 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
917 lpfc_config_link(phba
, mb
);
918 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
920 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
921 == MBX_NOT_FINISHED
) {
922 mempool_free(mb
, phba
->mbox_mem_pool
);
925 spin_lock_irq(shost
->host_lock
);
926 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
927 spin_unlock_irq(shost
->host_lock
);
934 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
936 struct lpfc_nodelist
*ndlp
;
938 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
939 if (!NLP_CHK_NODE_ACT(ndlp
))
941 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
943 if (ndlp
->nlp_type
& NLP_FABRIC
) {
944 /* On Linkup its safe to clean up the ndlp
945 * from Fabric connections.
947 if (ndlp
->nlp_DID
!= Fabric_DID
)
948 lpfc_unreg_rpi(vport
, ndlp
);
949 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
950 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
951 /* Fail outstanding IO now since device is
954 lpfc_unreg_rpi(vport
, ndlp
);
960 lpfc_linkup_port(struct lpfc_vport
*vport
)
962 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
963 struct lpfc_hba
*phba
= vport
->phba
;
965 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
968 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
969 "Link Up: top:x%x speed:x%x flg:x%x",
970 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
972 /* If NPIV is not enabled, only bring the physical port up */
973 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
974 (vport
!= phba
->pport
))
977 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
979 spin_lock_irq(shost
->host_lock
);
980 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
981 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
982 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
983 vport
->fc_ns_retry
= 0;
984 spin_unlock_irq(shost
->host_lock
);
986 if (vport
->fc_flag
& FC_LBIT
)
987 lpfc_linkup_cleanup_nodes(vport
);
992 lpfc_linkup(struct lpfc_hba
*phba
)
994 struct lpfc_vport
**vports
;
997 lpfc_cleanup_wt_rrqs(phba
);
998 phba
->link_state
= LPFC_LINK_UP
;
1000 /* Unblock fabric iocbs if they are blocked */
1001 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
1002 del_timer_sync(&phba
->fabric_block_timer
);
1004 vports
= lpfc_create_vport_work_array(phba
);
1006 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1007 lpfc_linkup_port(vports
[i
]);
1008 lpfc_destroy_vport_work_array(phba
, vports
);
1009 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
1010 (phba
->sli_rev
< LPFC_SLI_REV4
))
1011 lpfc_issue_clear_la(phba
, phba
->pport
);
1017 * This routine handles processing a CLEAR_LA mailbox
1018 * command upon completion. It is setup in the LPFC_MBOXQ
1019 * as the completion routine when the command is
1020 * handed off to the SLI layer.
1023 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1025 struct lpfc_vport
*vport
= pmb
->vport
;
1026 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1027 struct lpfc_sli
*psli
= &phba
->sli
;
1028 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1031 /* Since we don't do discovery right now, turn these off here */
1032 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1033 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1034 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1036 /* Check for error */
1037 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1038 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1039 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1040 "0320 CLEAR_LA mbxStatus error x%x hba "
1042 mb
->mbxStatus
, vport
->port_state
);
1043 phba
->link_state
= LPFC_HBA_ERROR
;
1047 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1048 phba
->link_state
= LPFC_HBA_READY
;
1050 spin_lock_irq(&phba
->hbalock
);
1051 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1052 control
= readl(phba
->HCregaddr
);
1053 control
|= HC_LAINT_ENA
;
1054 writel(control
, phba
->HCregaddr
);
1055 readl(phba
->HCregaddr
); /* flush */
1056 spin_unlock_irq(&phba
->hbalock
);
1057 mempool_free(pmb
, phba
->mbox_mem_pool
);
1061 /* Device Discovery completes */
1062 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1063 "0225 Device Discovery completes\n");
1064 mempool_free(pmb
, phba
->mbox_mem_pool
);
1066 spin_lock_irq(shost
->host_lock
);
1067 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1068 spin_unlock_irq(shost
->host_lock
);
1070 lpfc_can_disctmo(vport
);
1072 /* turn on Link Attention interrupts */
1074 spin_lock_irq(&phba
->hbalock
);
1075 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1076 control
= readl(phba
->HCregaddr
);
1077 control
|= HC_LAINT_ENA
;
1078 writel(control
, phba
->HCregaddr
);
1079 readl(phba
->HCregaddr
); /* flush */
1080 spin_unlock_irq(&phba
->hbalock
);
1087 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1089 struct lpfc_vport
*vport
= pmb
->vport
;
1091 if (pmb
->u
.mb
.mbxStatus
)
1094 mempool_free(pmb
, phba
->mbox_mem_pool
);
1096 /* don't perform discovery for SLI4 loopback diagnostic test */
1097 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1098 !(phba
->hba_flag
& HBA_FCOE_MODE
) &&
1099 (phba
->link_flag
& LS_LOOPBACK_MODE
))
1102 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
&&
1103 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1104 !(vport
->fc_flag
& FC_LBIT
)) {
1105 /* Need to wait for FAN - use discovery timer
1106 * for timeout. port_state is identically
1107 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1109 lpfc_set_disctmo(vport
);
1113 /* Start discovery by sending a FLOGI. port_state is identically
1114 * LPFC_FLOGI while waiting for FLOGI cmpl
1116 if (vport
->port_state
!= LPFC_FLOGI
|| vport
->fc_flag
& FC_PT2PT_PLOGI
)
1117 lpfc_initial_flogi(vport
);
1121 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1122 "0306 CONFIG_LINK mbxStatus error x%x "
1124 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1125 mempool_free(pmb
, phba
->mbox_mem_pool
);
1127 lpfc_linkdown(phba
);
1129 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1130 "0200 CONFIG_LINK bad hba state x%x\n",
1133 lpfc_issue_clear_la(phba
, vport
);
1138 * lpfc_sli4_clear_fcf_rr_bmask
1139 * @phba pointer to the struct lpfc_hba for this port.
1140 * This fucnction resets the round robin bit mask and clears the
1141 * fcf priority list. The list deletions are done while holding the
1142 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1143 * from the lpfc_fcf_pri record.
1146 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba
*phba
)
1148 struct lpfc_fcf_pri
*fcf_pri
;
1149 struct lpfc_fcf_pri
*next_fcf_pri
;
1150 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
1151 spin_lock_irq(&phba
->hbalock
);
1152 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
1153 &phba
->fcf
.fcf_pri_list
, list
) {
1154 list_del_init(&fcf_pri
->list
);
1155 fcf_pri
->fcf_rec
.flag
= 0;
1157 spin_unlock_irq(&phba
->hbalock
);
1160 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1162 struct lpfc_vport
*vport
= mboxq
->vport
;
1164 if (mboxq
->u
.mb
.mbxStatus
) {
1165 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1166 "2017 REG_FCFI mbxStatus error x%x "
1168 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1172 /* Start FCoE discovery by sending a FLOGI. */
1173 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1174 /* Set the FCFI registered flag */
1175 spin_lock_irq(&phba
->hbalock
);
1176 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1177 spin_unlock_irq(&phba
->hbalock
);
1179 /* If there is a pending FCoE event, restart FCF table scan. */
1180 if ((!(phba
->hba_flag
& FCF_RR_INPROG
)) &&
1181 lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1184 /* Mark successful completion of FCF table scan */
1185 spin_lock_irq(&phba
->hbalock
);
1186 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1187 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1188 if (vport
->port_state
!= LPFC_FLOGI
) {
1189 phba
->hba_flag
|= FCF_RR_INPROG
;
1190 spin_unlock_irq(&phba
->hbalock
);
1191 lpfc_issue_init_vfi(vport
);
1194 spin_unlock_irq(&phba
->hbalock
);
1198 spin_lock_irq(&phba
->hbalock
);
1199 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1200 spin_unlock_irq(&phba
->hbalock
);
1202 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1206 * lpfc_fab_name_match - Check if the fcf fabric name match.
1207 * @fab_name: pointer to fabric name.
1208 * @new_fcf_record: pointer to fcf record.
1210 * This routine compare the fcf record's fabric name with provided
1211 * fabric name. If the fabric name are identical this function
1212 * returns 1 else return 0.
1215 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1217 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1219 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1221 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1223 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1225 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1227 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1229 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1231 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1237 * lpfc_sw_name_match - Check if the fcf switch name match.
1238 * @fab_name: pointer to fabric name.
1239 * @new_fcf_record: pointer to fcf record.
1241 * This routine compare the fcf record's switch name with provided
1242 * switch name. If the switch name are identical this function
1243 * returns 1 else return 0.
1246 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1248 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1250 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1252 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1254 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1256 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1258 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1260 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1262 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1268 * lpfc_mac_addr_match - Check if the fcf mac address match.
1269 * @mac_addr: pointer to mac address.
1270 * @new_fcf_record: pointer to fcf record.
1272 * This routine compare the fcf record's mac address with HBA's
1273 * FCF mac address. If the mac addresses are identical this function
1274 * returns 1 else return 0.
1277 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1279 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1281 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1283 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1285 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1287 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1289 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1295 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1297 return (curr_vlan_id
== new_vlan_id
);
1301 * lpfc_update_fcf_record - Update driver fcf record
1302 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1303 * @phba: pointer to lpfc hba data structure.
1304 * @fcf_index: Index for the lpfc_fcf_record.
1305 * @new_fcf_record: pointer to hba fcf record.
1307 * This routine updates the driver FCF priority record from the new HBA FCF
1308 * record. This routine is called with the host lock held.
1311 __lpfc_update_fcf_record_pri(struct lpfc_hba
*phba
, uint16_t fcf_index
,
1312 struct fcf_record
*new_fcf_record
1315 struct lpfc_fcf_pri
*fcf_pri
;
1317 fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1318 fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
1319 /* FCF record priority */
1320 fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
1325 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1326 * @fcf: pointer to driver fcf record.
1327 * @new_fcf_record: pointer to fcf record.
1329 * This routine copies the FCF information from the FCF
1330 * record to lpfc_hba data structure.
1333 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1334 struct fcf_record
*new_fcf_record
)
1337 fcf_rec
->fabric_name
[0] =
1338 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1339 fcf_rec
->fabric_name
[1] =
1340 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1341 fcf_rec
->fabric_name
[2] =
1342 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1343 fcf_rec
->fabric_name
[3] =
1344 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1345 fcf_rec
->fabric_name
[4] =
1346 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1347 fcf_rec
->fabric_name
[5] =
1348 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1349 fcf_rec
->fabric_name
[6] =
1350 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1351 fcf_rec
->fabric_name
[7] =
1352 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1354 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1355 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1356 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1357 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1358 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1359 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1360 /* FCF record index */
1361 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1362 /* FCF record priority */
1363 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1365 fcf_rec
->switch_name
[0] =
1366 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1367 fcf_rec
->switch_name
[1] =
1368 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1369 fcf_rec
->switch_name
[2] =
1370 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1371 fcf_rec
->switch_name
[3] =
1372 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1373 fcf_rec
->switch_name
[4] =
1374 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1375 fcf_rec
->switch_name
[5] =
1376 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1377 fcf_rec
->switch_name
[6] =
1378 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1379 fcf_rec
->switch_name
[7] =
1380 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1384 * lpfc_update_fcf_record - Update driver fcf record
1385 * @phba: pointer to lpfc hba data structure.
1386 * @fcf_rec: pointer to driver fcf record.
1387 * @new_fcf_record: pointer to hba fcf record.
1388 * @addr_mode: address mode to be set to the driver fcf record.
1389 * @vlan_id: vlan tag to be set to the driver fcf record.
1390 * @flag: flag bits to be set to the driver fcf record.
1392 * This routine updates the driver FCF record from the new HBA FCF record
1393 * together with the address mode, vlan_id, and other informations. This
1394 * routine is called with the host lock held.
1397 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1398 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1399 uint16_t vlan_id
, uint32_t flag
)
1401 /* Copy the fields from the HBA's FCF record */
1402 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1403 /* Update other fields of driver FCF record */
1404 fcf_rec
->addr_mode
= addr_mode
;
1405 fcf_rec
->vlan_id
= vlan_id
;
1406 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1407 __lpfc_update_fcf_record_pri(phba
,
1408 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
),
1413 * lpfc_register_fcf - Register the FCF with hba.
1414 * @phba: pointer to lpfc hba data structure.
1416 * This routine issues a register fcfi mailbox command to register
1420 lpfc_register_fcf(struct lpfc_hba
*phba
)
1422 LPFC_MBOXQ_t
*fcf_mbxq
;
1425 spin_lock_irq(&phba
->hbalock
);
1426 /* If the FCF is not available do nothing. */
1427 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1428 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1429 spin_unlock_irq(&phba
->hbalock
);
1433 /* The FCF is already registered, start discovery */
1434 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1435 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1436 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1437 if (phba
->pport
->port_state
!= LPFC_FLOGI
) {
1438 phba
->hba_flag
|= FCF_RR_INPROG
;
1439 spin_unlock_irq(&phba
->hbalock
);
1440 lpfc_initial_flogi(phba
->pport
);
1443 spin_unlock_irq(&phba
->hbalock
);
1446 spin_unlock_irq(&phba
->hbalock
);
1448 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1450 spin_lock_irq(&phba
->hbalock
);
1451 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1452 spin_unlock_irq(&phba
->hbalock
);
1456 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1457 fcf_mbxq
->vport
= phba
->pport
;
1458 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1459 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1460 if (rc
== MBX_NOT_FINISHED
) {
1461 spin_lock_irq(&phba
->hbalock
);
1462 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1463 spin_unlock_irq(&phba
->hbalock
);
1464 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1471 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1472 * @phba: pointer to lpfc hba data structure.
1473 * @new_fcf_record: pointer to fcf record.
1474 * @boot_flag: Indicates if this record used by boot bios.
1475 * @addr_mode: The address mode to be used by this FCF
1476 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1478 * This routine compare the fcf record with connect list obtained from the
1479 * config region to decide if this FCF can be used for SAN discovery. It returns
1480 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1481 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1482 * is used by boot bios and addr_mode will indicate the addressing mode to be
1483 * used for this FCF when the function returns.
1484 * If the FCF record need to be used with a particular vlan id, the vlan is
1485 * set in the vlan_id on return of the function. If not VLAN tagging need to
1486 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1489 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1490 struct fcf_record
*new_fcf_record
,
1491 uint32_t *boot_flag
, uint32_t *addr_mode
,
1494 struct lpfc_fcf_conn_entry
*conn_entry
;
1495 int i
, j
, fcf_vlan_id
= 0;
1497 /* Find the lowest VLAN id in the FCF record */
1498 for (i
= 0; i
< 512; i
++) {
1499 if (new_fcf_record
->vlan_bitmap
[i
]) {
1500 fcf_vlan_id
= i
* 8;
1502 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1510 /* FCF not valid/available or solicitation in progress */
1511 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1512 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
) ||
1513 bf_get(lpfc_fcf_record_fcf_sol
, new_fcf_record
))
1516 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1518 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1520 if (phba
->valid_vlan
)
1521 *vlan_id
= phba
->vlan_id
;
1523 *vlan_id
= LPFC_FCOE_NULL_VID
;
1528 * If there are no FCF connection table entry, driver connect to all
1531 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1533 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1537 * When there are no FCF connect entries, use driver's default
1538 * addressing mode - FPMA.
1540 if (*addr_mode
& LPFC_FCF_FPMA
)
1541 *addr_mode
= LPFC_FCF_FPMA
;
1543 /* If FCF record report a vlan id use that vlan id */
1545 *vlan_id
= fcf_vlan_id
;
1547 *vlan_id
= LPFC_FCOE_NULL_VID
;
1551 list_for_each_entry(conn_entry
,
1552 &phba
->fcf_conn_rec_list
, list
) {
1553 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1556 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1557 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1560 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1561 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1564 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1566 * If the vlan bit map does not have the bit set for the
1567 * vlan id to be used, then it is not a match.
1569 if (!(new_fcf_record
->vlan_bitmap
1570 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1571 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1576 * If connection record does not support any addressing mode,
1577 * skip the FCF record.
1579 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1580 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1584 * Check if the connection record specifies a required
1587 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1588 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1591 * If SPMA required but FCF not support this continue.
1593 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1594 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1595 new_fcf_record
) & LPFC_FCF_SPMA
))
1599 * If FPMA required but FCF not support this continue.
1601 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1602 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1603 new_fcf_record
) & LPFC_FCF_FPMA
))
1608 * This fcf record matches filtering criteria.
1610 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1616 * If user did not specify any addressing mode, or if the
1617 * preferred addressing mode specified by user is not supported
1618 * by FCF, allow fabric to pick the addressing mode.
1620 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1623 * If the user specified a required address mode, assign that
1626 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1627 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1628 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1630 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1632 * If the user specified a preferred address mode, use the
1633 * addr mode only if FCF support the addr_mode.
1635 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1636 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1637 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1638 (*addr_mode
& LPFC_FCF_SPMA
))
1639 *addr_mode
= LPFC_FCF_SPMA
;
1640 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1641 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1642 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1643 (*addr_mode
& LPFC_FCF_FPMA
))
1644 *addr_mode
= LPFC_FCF_FPMA
;
1646 /* If matching connect list has a vlan id, use it */
1647 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1648 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1650 * If no vlan id is specified in connect list, use the vlan id
1653 else if (fcf_vlan_id
)
1654 *vlan_id
= fcf_vlan_id
;
1656 *vlan_id
= LPFC_FCOE_NULL_VID
;
1665 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1666 * @phba: pointer to lpfc hba data structure.
1667 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1669 * This function check if there is any fcoe event pending while driver
1670 * scan FCF entries. If there is any pending event, it will restart the
1671 * FCF saning and return 1 else return 0.
1674 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1677 * If the Link is up and no FCoE events while in the
1678 * FCF discovery, no need to restart FCF discovery.
1680 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1681 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1684 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1685 "2768 Pending link or FCF event during current "
1686 "handling of the previous event: link_state:x%x, "
1687 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1688 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1689 phba
->fcoe_eventtag
);
1691 spin_lock_irq(&phba
->hbalock
);
1692 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1693 spin_unlock_irq(&phba
->hbalock
);
1695 if (phba
->link_state
>= LPFC_LINK_UP
) {
1696 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1697 "2780 Restart FCF table scan due to "
1698 "pending FCF event:evt_tag_at_scan:x%x, "
1699 "evt_tag_current:x%x\n",
1700 phba
->fcoe_eventtag_at_fcf_scan
,
1701 phba
->fcoe_eventtag
);
1702 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1705 * Do not continue FCF discovery and clear FCF_TS_INPROG
1708 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1709 "2833 Stop FCF discovery process due to link "
1710 "state change (x%x)\n", phba
->link_state
);
1711 spin_lock_irq(&phba
->hbalock
);
1712 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1713 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1714 spin_unlock_irq(&phba
->hbalock
);
1717 /* Unregister the currently registered FCF if required */
1719 spin_lock_irq(&phba
->hbalock
);
1720 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1721 spin_unlock_irq(&phba
->hbalock
);
1722 lpfc_sli4_unregister_fcf(phba
);
1728 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1729 * @phba: pointer to lpfc hba data structure.
1730 * @fcf_cnt: number of eligible fcf record seen so far.
1732 * This function makes an running random selection decision on FCF record to
1733 * use through a sequence of @fcf_cnt eligible FCF records with equal
1734 * probability. To perform integer manunipulation of random numbers with
1735 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1736 * from random32() are taken as the random random number generated.
1738 * Returns true when outcome is for the newly read FCF record should be
1739 * chosen; otherwise, return false when outcome is for keeping the previously
1740 * chosen FCF record.
1743 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1747 /* Get 16-bit uniform random number */
1748 rand_num
= (0xFFFF & random32());
1750 /* Decision with probability 1/fcf_cnt */
1751 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1758 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1759 * @phba: pointer to lpfc hba data structure.
1760 * @mboxq: pointer to mailbox object.
1761 * @next_fcf_index: pointer to holder of next fcf index.
1763 * This routine parses the non-embedded fcf mailbox command by performing the
1764 * necessarily error checking, non-embedded read FCF record mailbox command
1765 * SGE parsing, and endianness swapping.
1767 * Returns the pointer to the new FCF record in the non-embedded mailbox
1768 * command DMA memory if successfully, other NULL.
1770 static struct fcf_record
*
1771 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1772 uint16_t *next_fcf_index
)
1775 dma_addr_t phys_addr
;
1776 struct lpfc_mbx_sge sge
;
1777 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1778 uint32_t shdr_status
, shdr_add_status
;
1779 union lpfc_sli4_cfg_shdr
*shdr
;
1780 struct fcf_record
*new_fcf_record
;
1782 /* Get the first SGE entry from the non-embedded DMA memory. This
1783 * routine only uses a single SGE.
1785 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1786 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1787 if (unlikely(!mboxq
->sge_array
)) {
1788 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1789 "2524 Failed to get the non-embedded SGE "
1790 "virtual address\n");
1793 virt_addr
= mboxq
->sge_array
->addr
[0];
1795 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1796 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1797 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1798 if (shdr_status
|| shdr_add_status
) {
1799 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1800 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1801 "2726 READ_FCF_RECORD Indicates empty "
1804 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1805 "2521 READ_FCF_RECORD mailbox failed "
1806 "with status x%x add_status x%x, "
1807 "mbx\n", shdr_status
, shdr_add_status
);
1811 /* Interpreting the returned information of the FCF record */
1812 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1813 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1814 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1815 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1816 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1817 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1818 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1819 offsetof(struct fcf_record
, vlan_bitmap
));
1820 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1821 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1823 return new_fcf_record
;
1827 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1828 * @phba: pointer to lpfc hba data structure.
1829 * @fcf_record: pointer to the fcf record.
1830 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1831 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1833 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1837 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1838 struct fcf_record
*fcf_record
,
1840 uint16_t next_fcf_index
)
1842 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1843 "2764 READ_FCF_RECORD:\n"
1844 "\tFCF_Index : x%x\n"
1845 "\tFCF_Avail : x%x\n"
1846 "\tFCF_Valid : x%x\n"
1848 "\tFIP_Priority : x%x\n"
1849 "\tMAC_Provider : x%x\n"
1850 "\tLowest VLANID : x%x\n"
1851 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1852 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1853 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1854 "\tNext_FCF_Index: x%x\n",
1855 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1856 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1857 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1858 bf_get(lpfc_fcf_record_fcf_sol
, fcf_record
),
1859 fcf_record
->fip_priority
,
1860 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1862 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1863 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1864 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1865 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1866 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1867 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1868 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1869 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1870 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1871 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1872 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1873 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1874 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1875 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1876 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1877 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1878 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1879 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1880 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1881 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1882 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1883 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1888 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1889 * @phba: pointer to lpfc hba data structure.
1890 * @fcf_rec: pointer to an existing FCF record.
1891 * @new_fcf_record: pointer to a new FCF record.
1892 * @new_vlan_id: vlan id from the new FCF record.
1894 * This function performs matching test of a new FCF record against an existing
1895 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1896 * will not be used as part of the FCF record matching criteria.
1898 * Returns true if all the fields matching, otherwise returns false.
1901 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1902 struct lpfc_fcf_rec
*fcf_rec
,
1903 struct fcf_record
*new_fcf_record
,
1904 uint16_t new_vlan_id
)
1906 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1907 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1909 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1911 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1913 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1915 if (fcf_rec
->priority
!= new_fcf_record
->fip_priority
)
1921 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1922 * @vport: Pointer to vport object.
1923 * @fcf_index: index to next fcf.
1925 * This function processing the roundrobin fcf failover to next fcf index.
1926 * When this function is invoked, there will be a current fcf registered
1928 * Return: 0 for continue retrying flogi on currently registered fcf;
1929 * 1 for stop flogi on currently registered fcf;
1931 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1933 struct lpfc_hba
*phba
= vport
->phba
;
1936 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1937 spin_lock_irq(&phba
->hbalock
);
1938 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1939 spin_unlock_irq(&phba
->hbalock
);
1940 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1941 "2872 Devloss tmo with no eligible "
1942 "FCF, unregister in-use FCF (x%x) "
1943 "and rescan FCF table\n",
1944 phba
->fcf
.current_rec
.fcf_indx
);
1945 lpfc_unregister_fcf_rescan(phba
);
1946 goto stop_flogi_current_fcf
;
1948 /* Mark the end to FLOGI roundrobin failover */
1949 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1950 /* Allow action to new fcf asynchronous event */
1951 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1952 spin_unlock_irq(&phba
->hbalock
);
1953 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1954 "2865 No FCF available, stop roundrobin FCF "
1955 "failover and change port state:x%x/x%x\n",
1956 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1957 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1958 goto stop_flogi_current_fcf
;
1960 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1961 "2794 Try FLOGI roundrobin FCF failover to "
1962 "(x%x)\n", fcf_index
);
1963 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1965 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1966 "2761 FLOGI roundrobin FCF failover "
1967 "failed (rc:x%x) to read FCF (x%x)\n",
1968 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1970 goto stop_flogi_current_fcf
;
1974 stop_flogi_current_fcf
:
1975 lpfc_can_disctmo(vport
);
1980 * lpfc_sli4_fcf_pri_list_del
1981 * @phba: pointer to lpfc hba data structure.
1982 * @fcf_index the index of the fcf record to delete
1983 * This routine checks the on list flag of the fcf_index to be deleted.
1984 * If it is one the list then it is removed from the list, and the flag
1985 * is cleared. This routine grab the hbalock before removing the fcf
1986 * record from the list.
1988 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba
*phba
,
1991 struct lpfc_fcf_pri
*new_fcf_pri
;
1993 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
1994 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1995 "3058 deleting idx x%x pri x%x flg x%x\n",
1996 fcf_index
, new_fcf_pri
->fcf_rec
.priority
,
1997 new_fcf_pri
->fcf_rec
.flag
);
1998 spin_lock_irq(&phba
->hbalock
);
1999 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
) {
2000 if (phba
->fcf
.current_rec
.priority
==
2001 new_fcf_pri
->fcf_rec
.priority
)
2002 phba
->fcf
.eligible_fcf_cnt
--;
2003 list_del_init(&new_fcf_pri
->list
);
2004 new_fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_ON_PRI_LIST
;
2006 spin_unlock_irq(&phba
->hbalock
);
2010 * lpfc_sli4_set_fcf_flogi_fail
2011 * @phba: pointer to lpfc hba data structure.
2012 * @fcf_index the index of the fcf record to update
2013 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2014 * flag so the the round robin slection for the particular priority level
2015 * will try a different fcf record that does not have this bit set.
2016 * If the fcf record is re-read for any reason this flag is cleared brfore
2017 * adding it to the priority list.
2020 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba
*phba
, uint16_t fcf_index
)
2022 struct lpfc_fcf_pri
*new_fcf_pri
;
2023 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2024 spin_lock_irq(&phba
->hbalock
);
2025 new_fcf_pri
->fcf_rec
.flag
|= LPFC_FCF_FLOGI_FAILED
;
2026 spin_unlock_irq(&phba
->hbalock
);
2030 * lpfc_sli4_fcf_pri_list_add
2031 * @phba: pointer to lpfc hba data structure.
2032 * @fcf_index the index of the fcf record to add
2033 * This routine checks the priority of the fcf_index to be added.
2034 * If it is a lower priority than the current head of the fcf_pri list
2035 * then it is added to the list in the right order.
2036 * If it is the same priority as the current head of the list then it
2037 * is added to the head of the list and its bit in the rr_bmask is set.
2038 * If the fcf_index to be added is of a higher priority than the current
2039 * head of the list then the rr_bmask is cleared, its bit is set in the
2040 * rr_bmask and it is added to the head of the list.
2042 * 0=success 1=failure
2044 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba
*phba
, uint16_t fcf_index
,
2045 struct fcf_record
*new_fcf_record
)
2047 uint16_t current_fcf_pri
;
2048 uint16_t last_index
;
2049 struct lpfc_fcf_pri
*fcf_pri
;
2050 struct lpfc_fcf_pri
*next_fcf_pri
;
2051 struct lpfc_fcf_pri
*new_fcf_pri
;
2054 new_fcf_pri
= &phba
->fcf
.fcf_pri
[fcf_index
];
2055 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2056 "3059 adding idx x%x pri x%x flg x%x\n",
2057 fcf_index
, new_fcf_record
->fip_priority
,
2058 new_fcf_pri
->fcf_rec
.flag
);
2059 spin_lock_irq(&phba
->hbalock
);
2060 if (new_fcf_pri
->fcf_rec
.flag
& LPFC_FCF_ON_PRI_LIST
)
2061 list_del_init(&new_fcf_pri
->list
);
2062 new_fcf_pri
->fcf_rec
.fcf_index
= fcf_index
;
2063 new_fcf_pri
->fcf_rec
.priority
= new_fcf_record
->fip_priority
;
2064 if (list_empty(&phba
->fcf
.fcf_pri_list
)) {
2065 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2066 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2067 new_fcf_pri
->fcf_rec
.fcf_index
);
2071 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
2072 LPFC_SLI4_FCF_TBL_INDX_MAX
);
2073 if (last_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
2074 ret
= 0; /* Empty rr list */
2077 current_fcf_pri
= phba
->fcf
.fcf_pri
[last_index
].fcf_rec
.priority
;
2078 if (new_fcf_pri
->fcf_rec
.priority
<= current_fcf_pri
) {
2079 list_add(&new_fcf_pri
->list
, &phba
->fcf
.fcf_pri_list
);
2080 if (new_fcf_pri
->fcf_rec
.priority
< current_fcf_pri
) {
2081 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2082 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2083 /* fcfs_at_this_priority_level = 1; */
2084 phba
->fcf
.eligible_fcf_cnt
= 1;
2086 /* fcfs_at_this_priority_level++; */
2087 phba
->fcf
.eligible_fcf_cnt
++;
2088 ret
= lpfc_sli4_fcf_rr_index_set(phba
,
2089 new_fcf_pri
->fcf_rec
.fcf_index
);
2093 list_for_each_entry_safe(fcf_pri
, next_fcf_pri
,
2094 &phba
->fcf
.fcf_pri_list
, list
) {
2095 if (new_fcf_pri
->fcf_rec
.priority
<=
2096 fcf_pri
->fcf_rec
.priority
) {
2097 if (fcf_pri
->list
.prev
== &phba
->fcf
.fcf_pri_list
)
2098 list_add(&new_fcf_pri
->list
,
2099 &phba
->fcf
.fcf_pri_list
);
2101 list_add(&new_fcf_pri
->list
,
2102 &((struct lpfc_fcf_pri
*)
2103 fcf_pri
->list
.prev
)->list
);
2106 } else if (fcf_pri
->list
.next
== &phba
->fcf
.fcf_pri_list
2107 || new_fcf_pri
->fcf_rec
.priority
<
2108 next_fcf_pri
->fcf_rec
.priority
) {
2109 list_add(&new_fcf_pri
->list
, &fcf_pri
->list
);
2113 if (new_fcf_pri
->fcf_rec
.priority
> fcf_pri
->fcf_rec
.priority
)
2119 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2120 new_fcf_pri
->fcf_rec
.flag
= LPFC_FCF_ON_PRI_LIST
;
2121 spin_unlock_irq(&phba
->hbalock
);
2126 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2127 * @phba: pointer to lpfc hba data structure.
2128 * @mboxq: pointer to mailbox object.
2130 * This function iterates through all the fcf records available in
2131 * HBA and chooses the optimal FCF record for discovery. After finding
2132 * the FCF for discovery it registers the FCF record and kicks start
2134 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2135 * use an FCF record which matches fabric name and mac address of the
2136 * currently used FCF record.
2137 * If the driver supports only one FCF, it will try to use the FCF record
2138 * used by BOOT_BIOS.
2141 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2143 struct fcf_record
*new_fcf_record
;
2144 uint32_t boot_flag
, addr_mode
;
2145 uint16_t fcf_index
, next_fcf_index
;
2146 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
2149 bool select_new_fcf
;
2152 /* If there is pending FCoE event restart FCF table scan */
2153 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
2154 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2158 /* Parse the FCF record from the non-embedded mailbox command */
2159 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2161 if (!new_fcf_record
) {
2162 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2163 "2765 Mailbox command READ_FCF_RECORD "
2164 "failed to retrieve a FCF record.\n");
2165 /* Let next new FCF event trigger fast failover */
2166 spin_lock_irq(&phba
->hbalock
);
2167 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2168 spin_unlock_irq(&phba
->hbalock
);
2169 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2173 /* Check the FCF record against the connection list */
2174 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2175 &addr_mode
, &vlan_id
);
2177 /* Log the FCF record information if turned on */
2178 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2182 * If the fcf record does not match with connect list entries
2183 * read the next entry; otherwise, this is an eligible FCF
2184 * record for roundrobin FCF failover.
2187 lpfc_sli4_fcf_pri_list_del(phba
,
2188 bf_get(lpfc_fcf_record_fcf_index
,
2190 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2191 "2781 FCF (x%x) failed connection "
2192 "list check: (x%x/x%x/%x)\n",
2193 bf_get(lpfc_fcf_record_fcf_index
,
2195 bf_get(lpfc_fcf_record_fcf_avail
,
2197 bf_get(lpfc_fcf_record_fcf_valid
,
2199 bf_get(lpfc_fcf_record_fcf_sol
,
2201 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
2202 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2203 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
2204 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
2205 phba
->fcf
.current_rec
.fcf_indx
) {
2206 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2207 "2862 FCF (x%x) matches property "
2208 "of in-use FCF (x%x)\n",
2209 bf_get(lpfc_fcf_record_fcf_index
,
2211 phba
->fcf
.current_rec
.fcf_indx
);
2215 * In case the current in-use FCF record becomes
2216 * invalid/unavailable during FCF discovery that
2217 * was not triggered by fast FCF failover process,
2218 * treat it as fast FCF failover.
2220 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
2221 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2222 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2223 "2835 Invalid in-use FCF "
2224 "(x%x), enter FCF failover "
2226 phba
->fcf
.current_rec
.fcf_indx
);
2227 spin_lock_irq(&phba
->hbalock
);
2228 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2229 spin_unlock_irq(&phba
->hbalock
);
2230 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2231 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2232 LPFC_FCOE_FCF_GET_FIRST
);
2238 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2239 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
,
2246 * If this is not the first FCF discovery of the HBA, use last
2247 * FCF record for the discovery. The condition that a rescan
2248 * matches the in-use FCF record: fabric name, switch name, mac
2249 * address, and vlan_id.
2251 spin_lock_irq(&phba
->hbalock
);
2252 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2253 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2254 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2255 new_fcf_record
, vlan_id
)) {
2256 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2257 phba
->fcf
.current_rec
.fcf_indx
) {
2258 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2259 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2260 /* Stop FCF redisc wait timer */
2261 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2263 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2264 /* Fast failover, mark completed */
2265 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2266 spin_unlock_irq(&phba
->hbalock
);
2267 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2268 "2836 New FCF matches in-use "
2270 phba
->fcf
.current_rec
.fcf_indx
);
2273 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2274 "2863 New FCF (x%x) matches "
2275 "property of in-use FCF (x%x)\n",
2276 bf_get(lpfc_fcf_record_fcf_index
,
2278 phba
->fcf
.current_rec
.fcf_indx
);
2281 * Read next FCF record from HBA searching for the matching
2282 * with in-use record only if not during the fast failover
2283 * period. In case of fast failover period, it shall try to
2284 * determine whether the FCF record just read should be the
2287 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2288 spin_unlock_irq(&phba
->hbalock
);
2293 * Update on failover FCF record only if it's in FCF fast-failover
2294 * period; otherwise, update on current FCF record.
2296 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2297 fcf_rec
= &phba
->fcf
.failover_rec
;
2299 fcf_rec
= &phba
->fcf
.current_rec
;
2301 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2303 * If the driver FCF record does not have boot flag
2304 * set and new hba fcf record has boot flag set, use
2305 * the new hba fcf record.
2307 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2308 /* Choose this FCF record */
2309 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2310 "2837 Update current FCF record "
2311 "(x%x) with new FCF record (x%x)\n",
2313 bf_get(lpfc_fcf_record_fcf_index
,
2315 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2316 addr_mode
, vlan_id
, BOOT_ENABLE
);
2317 spin_unlock_irq(&phba
->hbalock
);
2321 * If the driver FCF record has boot flag set and the
2322 * new hba FCF record does not have boot flag, read
2323 * the next FCF record.
2325 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2326 spin_unlock_irq(&phba
->hbalock
);
2330 * If the new hba FCF record has lower priority value
2331 * than the driver FCF record, use the new record.
2333 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2334 /* Choose the new FCF record with lower priority */
2335 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2336 "2838 Update current FCF record "
2337 "(x%x) with new FCF record (x%x)\n",
2339 bf_get(lpfc_fcf_record_fcf_index
,
2341 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2342 addr_mode
, vlan_id
, 0);
2343 /* Reset running random FCF selection count */
2344 phba
->fcf
.eligible_fcf_cnt
= 1;
2345 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2346 /* Update running random FCF selection count */
2347 phba
->fcf
.eligible_fcf_cnt
++;
2348 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2349 phba
->fcf
.eligible_fcf_cnt
);
2350 if (select_new_fcf
) {
2351 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2352 "2839 Update current FCF record "
2353 "(x%x) with new FCF record (x%x)\n",
2355 bf_get(lpfc_fcf_record_fcf_index
,
2357 /* Choose the new FCF by random selection */
2358 __lpfc_update_fcf_record(phba
, fcf_rec
,
2360 addr_mode
, vlan_id
, 0);
2363 spin_unlock_irq(&phba
->hbalock
);
2367 * This is the first suitable FCF record, choose this record for
2368 * initial best-fit FCF.
2371 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2372 "2840 Update initial FCF candidate "
2374 bf_get(lpfc_fcf_record_fcf_index
,
2376 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2377 addr_mode
, vlan_id
, (boot_flag
?
2379 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2380 /* Setup initial running random FCF selection count */
2381 phba
->fcf
.eligible_fcf_cnt
= 1;
2382 /* Seeding the random number generator for random selection */
2383 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
2386 spin_unlock_irq(&phba
->hbalock
);
2390 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2391 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2392 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2394 * Case of FCF fast failover scan
2398 * It has not found any suitable FCF record, cancel
2399 * FCF scan inprogress, and do nothing
2401 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2402 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2403 "2782 No suitable FCF found: "
2405 phba
->fcoe_eventtag_at_fcf_scan
,
2406 bf_get(lpfc_fcf_record_fcf_index
,
2408 spin_lock_irq(&phba
->hbalock
);
2409 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2410 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2411 spin_unlock_irq(&phba
->hbalock
);
2412 /* Unregister in-use FCF and rescan */
2413 lpfc_printf_log(phba
, KERN_INFO
,
2415 "2864 On devloss tmo "
2416 "unreg in-use FCF and "
2417 "rescan FCF table\n");
2418 lpfc_unregister_fcf_rescan(phba
);
2422 * Let next new FCF event trigger fast failover
2424 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2425 spin_unlock_irq(&phba
->hbalock
);
2429 * It has found a suitable FCF record that is not
2430 * the same as in-use FCF record, unregister the
2431 * in-use FCF record, replace the in-use FCF record
2432 * with the new FCF record, mark FCF fast failover
2433 * completed, and then start register the new FCF
2437 /* Unregister the current in-use FCF record */
2438 lpfc_unregister_fcf(phba
);
2440 /* Replace in-use record with the new record */
2441 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2442 "2842 Replace in-use FCF (x%x) "
2443 "with failover FCF (x%x)\n",
2444 phba
->fcf
.current_rec
.fcf_indx
,
2445 phba
->fcf
.failover_rec
.fcf_indx
);
2446 memcpy(&phba
->fcf
.current_rec
,
2447 &phba
->fcf
.failover_rec
,
2448 sizeof(struct lpfc_fcf_rec
));
2450 * Mark the fast FCF failover rediscovery completed
2451 * and the start of the first round of the roundrobin
2454 spin_lock_irq(&phba
->hbalock
);
2455 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2456 spin_unlock_irq(&phba
->hbalock
);
2457 /* Register to the new FCF record */
2458 lpfc_register_fcf(phba
);
2461 * In case of transaction period to fast FCF failover,
2462 * do nothing when search to the end of the FCF table.
2464 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2465 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2468 if (phba
->cfg_fcf_failover_policy
== LPFC_FCF_FOV
&&
2469 phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2471 * In case the current in-use FCF record no
2472 * longer existed during FCF discovery that
2473 * was not triggered by fast FCF failover
2474 * process, treat it as fast FCF failover.
2476 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2477 "2841 In-use FCF record (x%x) "
2478 "not reported, entering fast "
2479 "FCF failover mode scanning.\n",
2480 phba
->fcf
.current_rec
.fcf_indx
);
2481 spin_lock_irq(&phba
->hbalock
);
2482 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2483 spin_unlock_irq(&phba
->hbalock
);
2484 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2485 LPFC_FCOE_FCF_GET_FIRST
);
2488 /* Register to the new FCF record */
2489 lpfc_register_fcf(phba
);
2492 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2496 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2497 lpfc_register_fcf(phba
);
2503 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2504 * @phba: pointer to lpfc hba data structure.
2505 * @mboxq: pointer to mailbox object.
2507 * This is the callback function for FLOGI failure roundrobin FCF failover
2508 * read FCF record mailbox command from the eligible FCF record bmask for
2509 * performing the failover. If the FCF read back is not valid/available, it
2510 * fails through to retrying FLOGI to the currently registered FCF again.
2511 * Otherwise, if the FCF read back is valid and available, it will set the
2512 * newly read FCF record to the failover FCF record, unregister currently
2513 * registered FCF record, copy the failover FCF record to the current
2514 * FCF record, and then register the current FCF record before proceeding
2515 * to trying FLOGI on the new failover FCF.
2518 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2520 struct fcf_record
*new_fcf_record
;
2521 uint32_t boot_flag
, addr_mode
;
2522 uint16_t next_fcf_index
, fcf_index
;
2523 uint16_t current_fcf_index
;
2527 /* If link state is not up, stop the roundrobin failover process */
2528 if (phba
->link_state
< LPFC_LINK_UP
) {
2529 spin_lock_irq(&phba
->hbalock
);
2530 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2531 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2532 spin_unlock_irq(&phba
->hbalock
);
2536 /* Parse the FCF record from the non-embedded mailbox command */
2537 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2539 if (!new_fcf_record
) {
2540 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2541 "2766 Mailbox command READ_FCF_RECORD "
2542 "failed to retrieve a FCF record.\n");
2546 /* Get the needed parameters from FCF record */
2547 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2548 &addr_mode
, &vlan_id
);
2550 /* Log the FCF record information if turned on */
2551 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2554 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2556 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2557 "2848 Remove ineligible FCF (x%x) from "
2558 "from roundrobin bmask\n", fcf_index
);
2559 /* Clear roundrobin bmask bit for ineligible FCF */
2560 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2561 /* Perform next round of roundrobin FCF failover */
2562 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2563 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2569 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2570 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2571 "2760 Perform FLOGI roundrobin FCF failover: "
2572 "FCF (x%x) back to FCF (x%x)\n",
2573 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2574 /* Wait 500 ms before retrying FLOGI to current FCF */
2576 lpfc_issue_init_vfi(phba
->pport
);
2580 /* Upload new FCF record to the failover FCF record */
2581 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2582 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2583 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2584 spin_lock_irq(&phba
->hbalock
);
2585 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2586 new_fcf_record
, addr_mode
, vlan_id
,
2587 (boot_flag
? BOOT_ENABLE
: 0));
2588 spin_unlock_irq(&phba
->hbalock
);
2590 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2592 /* Unregister the current in-use FCF record */
2593 lpfc_unregister_fcf(phba
);
2595 /* Replace in-use record with the new record */
2596 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2597 sizeof(struct lpfc_fcf_rec
));
2599 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2600 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2601 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2604 lpfc_register_fcf(phba
);
2606 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2610 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2611 * @phba: pointer to lpfc hba data structure.
2612 * @mboxq: pointer to mailbox object.
2614 * This is the callback function of read FCF record mailbox command for
2615 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2616 * failover when a new FCF event happened. If the FCF read back is
2617 * valid/available and it passes the connection list check, it updates
2618 * the bmask for the eligible FCF record for roundrobin failover.
2621 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2623 struct fcf_record
*new_fcf_record
;
2624 uint32_t boot_flag
, addr_mode
;
2625 uint16_t fcf_index
, next_fcf_index
;
2629 /* If link state is not up, no need to proceed */
2630 if (phba
->link_state
< LPFC_LINK_UP
)
2633 /* If FCF discovery period is over, no need to proceed */
2634 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2637 /* Parse the FCF record from the non-embedded mailbox command */
2638 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2640 if (!new_fcf_record
) {
2641 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2642 "2767 Mailbox command READ_FCF_RECORD "
2643 "failed to retrieve a FCF record.\n");
2647 /* Check the connection list for eligibility */
2648 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2649 &addr_mode
, &vlan_id
);
2651 /* Log the FCF record information if turned on */
2652 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2658 /* Update the eligible FCF record index bmask */
2659 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2661 rc
= lpfc_sli4_fcf_pri_list_add(phba
, fcf_index
, new_fcf_record
);
2664 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2668 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2669 * @phba: pointer to lpfc hba data structure.
2670 * @mboxq: pointer to mailbox data structure.
2672 * This function handles completion of init vfi mailbox command.
2675 lpfc_init_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2677 struct lpfc_vport
*vport
= mboxq
->vport
;
2680 * VFI not supported on interface type 0, just do the flogi
2681 * Also continue if the VFI is in use - just use the same one.
2683 if (mboxq
->u
.mb
.mbxStatus
&&
2684 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2685 LPFC_SLI_INTF_IF_TYPE_0
) &&
2686 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2687 lpfc_printf_vlog(vport
, KERN_ERR
,
2689 "2891 Init VFI mailbox failed 0x%x\n",
2690 mboxq
->u
.mb
.mbxStatus
);
2691 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2692 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2696 lpfc_initial_flogi(vport
);
2697 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2702 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2703 * @vport: pointer to lpfc_vport data structure.
2705 * This function issue a init_vfi mailbox command to initialize the VFI and
2706 * VPI for the physical port.
2709 lpfc_issue_init_vfi(struct lpfc_vport
*vport
)
2711 LPFC_MBOXQ_t
*mboxq
;
2713 struct lpfc_hba
*phba
= vport
->phba
;
2715 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2717 lpfc_printf_vlog(vport
, KERN_ERR
,
2718 LOG_MBOX
, "2892 Failed to allocate "
2719 "init_vfi mailbox\n");
2722 lpfc_init_vfi(mboxq
, vport
);
2723 mboxq
->mbox_cmpl
= lpfc_init_vfi_cmpl
;
2724 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
2725 if (rc
== MBX_NOT_FINISHED
) {
2726 lpfc_printf_vlog(vport
, KERN_ERR
,
2727 LOG_MBOX
, "2893 Failed to issue init_vfi mailbox\n");
2728 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2733 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2734 * @phba: pointer to lpfc hba data structure.
2735 * @mboxq: pointer to mailbox data structure.
2737 * This function handles completion of init vpi mailbox command.
2740 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2742 struct lpfc_vport
*vport
= mboxq
->vport
;
2743 struct lpfc_nodelist
*ndlp
;
2744 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2746 if (mboxq
->u
.mb
.mbxStatus
) {
2747 lpfc_printf_vlog(vport
, KERN_ERR
,
2749 "2609 Init VPI mailbox failed 0x%x\n",
2750 mboxq
->u
.mb
.mbxStatus
);
2751 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2752 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2755 spin_lock_irq(shost
->host_lock
);
2756 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2757 spin_unlock_irq(shost
->host_lock
);
2759 /* If this port is physical port or FDISC is done, do reg_vpi */
2760 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2761 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2763 lpfc_printf_vlog(vport
, KERN_ERR
,
2765 "2731 Cannot find fabric "
2766 "controller node\n");
2768 lpfc_register_new_vport(phba
, vport
, ndlp
);
2769 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2773 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2774 lpfc_initial_fdisc(vport
);
2776 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2777 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2778 "2606 No NPIV Fabric support\n");
2780 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2785 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2786 * @vport: pointer to lpfc_vport data structure.
2788 * This function issue a init_vpi mailbox command to initialize
2789 * VPI for the vport.
2792 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2794 LPFC_MBOXQ_t
*mboxq
;
2797 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2799 lpfc_printf_vlog(vport
, KERN_ERR
,
2800 LOG_MBOX
, "2607 Failed to allocate "
2801 "init_vpi mailbox\n");
2804 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2805 mboxq
->vport
= vport
;
2806 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2807 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2808 if (rc
== MBX_NOT_FINISHED
) {
2809 lpfc_printf_vlog(vport
, KERN_ERR
,
2810 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2811 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2816 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2817 * @phba: pointer to lpfc hba data structure.
2819 * This function loops through the list of vports on the @phba and issues an
2820 * FDISC if possible.
2823 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2825 struct lpfc_vport
**vports
;
2828 vports
= lpfc_create_vport_work_array(phba
);
2829 if (vports
!= NULL
) {
2830 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2831 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2833 /* There are no vpi for this vport */
2834 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2835 lpfc_vport_set_state(vports
[i
],
2839 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2840 lpfc_vport_set_state(vports
[i
],
2844 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2845 lpfc_issue_init_vpi(vports
[i
]);
2848 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2849 lpfc_initial_fdisc(vports
[i
]);
2851 lpfc_vport_set_state(vports
[i
],
2852 FC_VPORT_NO_FABRIC_SUPP
);
2853 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2856 "Fabric support\n");
2860 lpfc_destroy_vport_work_array(phba
, vports
);
2864 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2866 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2867 struct lpfc_vport
*vport
= mboxq
->vport
;
2868 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2871 * VFI not supported for interface type 0, so ignore any mailbox
2872 * error (except VFI in use) and continue with the discovery.
2874 if (mboxq
->u
.mb
.mbxStatus
&&
2875 (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2876 LPFC_SLI_INTF_IF_TYPE_0
) &&
2877 mboxq
->u
.mb
.mbxStatus
!= MBX_VFI_IN_USE
) {
2878 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2879 "2018 REG_VFI mbxStatus error x%x "
2881 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2882 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
2883 /* FLOGI failed, use loop map to make discovery list */
2884 lpfc_disc_list_loopmap(vport
);
2885 /* Start discovery */
2886 lpfc_disc_start(vport
);
2889 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2892 /* The VPI is implicitly registered when the VFI is registered */
2893 spin_lock_irq(shost
->host_lock
);
2894 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2895 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2896 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2897 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2898 spin_unlock_irq(shost
->host_lock
);
2900 /* In case SLI4 FC loopback test, we are ready */
2901 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2902 (phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2903 phba
->link_state
= LPFC_HBA_READY
;
2907 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2909 * For private loop or for NPort pt2pt,
2910 * just start discovery and we are done.
2912 if ((vport
->fc_flag
& FC_PT2PT
) ||
2913 ((phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) &&
2914 !(vport
->fc_flag
& FC_PUBLIC_LOOP
))) {
2916 /* Use loop map to make discovery list */
2917 lpfc_disc_list_loopmap(vport
);
2918 /* Start discovery */
2919 lpfc_disc_start(vport
);
2921 lpfc_start_fdiscs(phba
);
2922 lpfc_do_scr_ns_plogi(phba
, vport
);
2927 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2928 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2934 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2936 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2937 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2938 struct lpfc_vport
*vport
= pmb
->vport
;
2941 /* Check for error */
2942 if (mb
->mbxStatus
) {
2943 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2944 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2945 "0319 READ_SPARAM mbxStatus error x%x "
2947 mb
->mbxStatus
, vport
->port_state
);
2948 lpfc_linkdown(phba
);
2952 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2953 sizeof (struct serv_parm
));
2954 lpfc_update_vport_wwn(vport
);
2955 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2956 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2957 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2960 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2962 mempool_free(pmb
, phba
->mbox_mem_pool
);
2966 pmb
->context1
= NULL
;
2967 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2969 lpfc_issue_clear_la(phba
, vport
);
2970 mempool_free(pmb
, phba
->mbox_mem_pool
);
2975 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, struct lpfc_mbx_read_top
*la
)
2977 struct lpfc_vport
*vport
= phba
->pport
;
2978 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2979 struct Scsi_Host
*shost
;
2981 struct lpfc_dmabuf
*mp
;
2983 struct fcf_record
*fcf_record
;
2985 spin_lock_irq(&phba
->hbalock
);
2986 switch (bf_get(lpfc_mbx_read_top_link_spd
, la
)) {
2987 case LPFC_LINK_SPEED_1GHZ
:
2988 case LPFC_LINK_SPEED_2GHZ
:
2989 case LPFC_LINK_SPEED_4GHZ
:
2990 case LPFC_LINK_SPEED_8GHZ
:
2991 case LPFC_LINK_SPEED_10GHZ
:
2992 case LPFC_LINK_SPEED_16GHZ
:
2993 phba
->fc_linkspeed
= bf_get(lpfc_mbx_read_top_link_spd
, la
);
2996 phba
->fc_linkspeed
= LPFC_LINK_SPEED_UNKNOWN
;
3000 phba
->fc_topology
= bf_get(lpfc_mbx_read_top_topology
, la
);
3001 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
3003 shost
= lpfc_shost_from_vport(vport
);
3004 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3005 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
3007 /* if npiv is enabled and this adapter supports npiv log
3008 * a message that npiv is not supported in this topology
3010 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
3011 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3012 "1309 Link Up Event npiv not supported in loop "
3014 /* Get Loop Map information */
3015 if (bf_get(lpfc_mbx_read_top_il
, la
)) {
3016 spin_lock(shost
->host_lock
);
3017 vport
->fc_flag
|= FC_LBIT
;
3018 spin_unlock(shost
->host_lock
);
3021 vport
->fc_myDID
= bf_get(lpfc_mbx_read_top_alpa_granted
, la
);
3022 i
= la
->lilpBde64
.tus
.f
.bdeSize
;
3025 phba
->alpa_map
[0] = 0;
3027 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
3038 numalpa
= phba
->alpa_map
[0];
3040 while (j
< numalpa
) {
3041 memset(un
.pamap
, 0, 16);
3042 for (k
= 1; j
< numalpa
; k
++) {
3044 phba
->alpa_map
[j
+ 1];
3049 /* Link Up Event ALPA map */
3050 lpfc_printf_log(phba
,
3053 "1304 Link Up Event "
3054 "ALPA map Data: x%x "
3056 un
.pa
.wd1
, un
.pa
.wd2
,
3057 un
.pa
.wd3
, un
.pa
.wd4
);
3062 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
3063 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
3064 (phba
->sli_rev
>= LPFC_SLI_REV3
))
3065 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
3067 vport
->fc_myDID
= phba
->fc_pref_DID
;
3068 spin_lock(shost
->host_lock
);
3069 vport
->fc_flag
|= FC_LBIT
;
3070 spin_unlock(shost
->host_lock
);
3072 spin_unlock_irq(&phba
->hbalock
);
3075 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3079 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
3081 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3084 sparam_mbox
->vport
= vport
;
3085 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
3086 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
3087 if (rc
== MBX_NOT_FINISHED
) {
3088 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
3089 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3091 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
3095 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3096 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3099 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
3100 lpfc_config_link(phba
, cfglink_mbox
);
3101 cfglink_mbox
->vport
= vport
;
3102 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
3103 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
3104 if (rc
== MBX_NOT_FINISHED
) {
3105 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
3109 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
3111 * Add the driver's default FCF record at FCF index 0 now. This
3112 * is phase 1 implementation that support FCF index 0 and driver
3115 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
3116 fcf_record
= kzalloc(sizeof(struct fcf_record
),
3118 if (unlikely(!fcf_record
)) {
3119 lpfc_printf_log(phba
, KERN_ERR
,
3121 "2554 Could not allocate memory for "
3127 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
3128 LPFC_FCOE_FCF_DEF_INDEX
);
3129 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
3131 lpfc_printf_log(phba
, KERN_ERR
,
3133 "2013 Could not manually add FCF "
3134 "record 0, status %d\n", rc
);
3142 * The driver is expected to do FIP/FCF. Call the port
3143 * and get the FCF Table.
3145 spin_lock_irq(&phba
->hbalock
);
3146 if (phba
->hba_flag
& FCF_TS_INPROG
) {
3147 spin_unlock_irq(&phba
->hbalock
);
3150 /* This is the initial FCF discovery scan */
3151 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
3152 spin_unlock_irq(&phba
->hbalock
);
3153 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
3154 "2778 Start FCF table scan at linkup\n");
3155 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
3156 LPFC_FCOE_FCF_GET_FIRST
);
3158 spin_lock_irq(&phba
->hbalock
);
3159 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
3160 spin_unlock_irq(&phba
->hbalock
);
3163 /* Reset FCF roundrobin bmask for new discovery */
3164 lpfc_sli4_clear_fcf_rr_bmask(phba
);
3169 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3170 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3171 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3172 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
3173 lpfc_issue_clear_la(phba
, vport
);
3178 lpfc_enable_la(struct lpfc_hba
*phba
)
3181 struct lpfc_sli
*psli
= &phba
->sli
;
3182 spin_lock_irq(&phba
->hbalock
);
3183 psli
->sli_flag
|= LPFC_PROCESS_LA
;
3184 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3185 control
= readl(phba
->HCregaddr
);
3186 control
|= HC_LAINT_ENA
;
3187 writel(control
, phba
->HCregaddr
);
3188 readl(phba
->HCregaddr
); /* flush */
3190 spin_unlock_irq(&phba
->hbalock
);
3194 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
3196 lpfc_linkdown(phba
);
3197 lpfc_enable_la(phba
);
3198 lpfc_unregister_unused_fcf(phba
);
3199 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3204 * This routine handles processing a READ_TOPOLOGY mailbox
3205 * command upon completion. It is setup in the LPFC_MBOXQ
3206 * as the completion routine when the command is
3207 * handed off to the SLI layer.
3210 lpfc_mbx_cmpl_read_topology(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3212 struct lpfc_vport
*vport
= pmb
->vport
;
3213 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3214 struct lpfc_mbx_read_top
*la
;
3215 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3216 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3218 /* Unblock ELS traffic */
3219 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
3220 /* Check for error */
3221 if (mb
->mbxStatus
) {
3222 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3223 "1307 READ_LA mbox error x%x state x%x\n",
3224 mb
->mbxStatus
, vport
->port_state
);
3225 lpfc_mbx_issue_link_down(phba
);
3226 phba
->link_state
= LPFC_HBA_ERROR
;
3227 goto lpfc_mbx_cmpl_read_topology_free_mbuf
;
3230 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3232 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
3234 spin_lock_irq(shost
->host_lock
);
3235 if (bf_get(lpfc_mbx_read_top_pb
, la
))
3236 vport
->fc_flag
|= FC_BYPASSED_MODE
;
3238 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
3239 spin_unlock_irq(shost
->host_lock
);
3241 if ((phba
->fc_eventTag
< la
->eventTag
) ||
3242 (phba
->fc_eventTag
== la
->eventTag
)) {
3243 phba
->fc_stat
.LinkMultiEvent
++;
3244 if (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)
3245 if (phba
->fc_eventTag
!= 0)
3246 lpfc_linkdown(phba
);
3249 phba
->fc_eventTag
= la
->eventTag
;
3250 spin_lock_irq(&phba
->hbalock
);
3251 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3252 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
3254 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
3255 spin_unlock_irq(&phba
->hbalock
);
3257 phba
->link_events
++;
3258 if ((bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
) &&
3259 (!bf_get(lpfc_mbx_read_top_mm
, la
))) {
3260 phba
->fc_stat
.LinkUp
++;
3261 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
3262 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3263 "1306 Link Up Event in loop back mode "
3264 "x%x received Data: x%x x%x x%x x%x\n",
3265 la
->eventTag
, phba
->fc_eventTag
,
3266 bf_get(lpfc_mbx_read_top_alpa_granted
,
3268 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3271 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3272 "1303 Link Up Event x%x received "
3273 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3274 la
->eventTag
, phba
->fc_eventTag
,
3275 bf_get(lpfc_mbx_read_top_alpa_granted
,
3277 bf_get(lpfc_mbx_read_top_link_spd
, la
),
3279 bf_get(lpfc_mbx_read_top_mm
, la
),
3280 bf_get(lpfc_mbx_read_top_fa
, la
),
3281 phba
->wait_4_mlo_maint_flg
);
3283 lpfc_mbx_process_link_up(phba
, la
);
3284 } else if (bf_get(lpfc_mbx_read_top_att_type
, la
) ==
3285 LPFC_ATT_LINK_DOWN
) {
3286 phba
->fc_stat
.LinkDown
++;
3287 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
3288 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3289 "1308 Link Down Event in loop back mode "
3291 "Data: x%x x%x x%x\n",
3292 la
->eventTag
, phba
->fc_eventTag
,
3293 phba
->pport
->port_state
, vport
->fc_flag
);
3295 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3296 "1305 Link Down Event x%x received "
3297 "Data: x%x x%x x%x x%x x%x\n",
3298 la
->eventTag
, phba
->fc_eventTag
,
3299 phba
->pport
->port_state
, vport
->fc_flag
,
3300 bf_get(lpfc_mbx_read_top_mm
, la
),
3301 bf_get(lpfc_mbx_read_top_fa
, la
));
3302 lpfc_mbx_issue_link_down(phba
);
3304 if ((bf_get(lpfc_mbx_read_top_mm
, la
)) &&
3305 (bf_get(lpfc_mbx_read_top_att_type
, la
) == LPFC_ATT_LINK_UP
)) {
3306 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
3307 phba
->fc_stat
.LinkDown
++;
3308 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3309 "1312 Link Down Event x%x received "
3310 "Data: x%x x%x x%x\n",
3311 la
->eventTag
, phba
->fc_eventTag
,
3312 phba
->pport
->port_state
, vport
->fc_flag
);
3313 lpfc_mbx_issue_link_down(phba
);
3315 lpfc_enable_la(phba
);
3317 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
3318 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3319 "Data: x%x x%x x%x\n",
3320 la
->eventTag
, phba
->fc_eventTag
,
3321 phba
->pport
->port_state
, vport
->fc_flag
);
3323 * The cmnd that triggered this will be waiting for this
3326 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3327 if (phba
->wait_4_mlo_maint_flg
) {
3328 phba
->wait_4_mlo_maint_flg
= 0;
3329 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3333 if (bf_get(lpfc_mbx_read_top_fa
, la
)) {
3334 if (bf_get(lpfc_mbx_read_top_mm
, la
))
3335 lpfc_issue_clear_la(phba
, vport
);
3336 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3338 bf_get(lpfc_mbx_read_top_fa
, la
));
3341 lpfc_mbx_cmpl_read_topology_free_mbuf
:
3342 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3344 mempool_free(pmb
, phba
->mbox_mem_pool
);
3349 * This routine handles processing a REG_LOGIN mailbox
3350 * command upon completion. It is setup in the LPFC_MBOXQ
3351 * as the completion routine when the command is
3352 * handed off to the SLI layer.
3355 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3357 struct lpfc_vport
*vport
= pmb
->vport
;
3358 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3359 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3360 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3362 pmb
->context1
= NULL
;
3363 pmb
->context2
= NULL
;
3365 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3366 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3368 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3369 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3370 /* We rcvd a rscn after issuing this
3371 * mbox reg login, we may have cycled
3372 * back through the state and be
3373 * back at reg login state so this
3374 * mbox needs to be ignored becase
3375 * there is another reg login in
3378 spin_lock_irq(shost
->host_lock
);
3379 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3380 spin_unlock_irq(shost
->host_lock
);
3382 /* Good status, call state machine */
3383 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
3384 NLP_EVT_CMPL_REG_LOGIN
);
3386 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3388 mempool_free(pmb
, phba
->mbox_mem_pool
);
3389 /* decrement the node reference count held for this callback
3398 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3400 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3401 struct lpfc_vport
*vport
= pmb
->vport
;
3402 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3404 switch (mb
->mbxStatus
) {
3407 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3408 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3411 /* If VPI is busy, reset the HBA */
3413 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3414 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3415 vport
->vpi
, mb
->mbxStatus
);
3416 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3417 lpfc_workq_post_event(phba
, NULL
, NULL
,
3418 LPFC_EVT_RESET_HBA
);
3420 spin_lock_irq(shost
->host_lock
);
3421 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3422 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3423 spin_unlock_irq(shost
->host_lock
);
3424 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3425 mempool_free(pmb
, phba
->mbox_mem_pool
);
3426 lpfc_cleanup_vports_rrqs(vport
, NULL
);
3428 * This shost reference might have been taken at the beginning of
3429 * lpfc_vport_delete()
3431 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3432 scsi_host_put(shost
);
3436 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3438 struct lpfc_hba
*phba
= vport
->phba
;
3442 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3446 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3447 mbox
->vport
= vport
;
3448 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3449 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3450 if (rc
== MBX_NOT_FINISHED
) {
3451 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3452 "1800 Could not issue unreg_vpi\n");
3453 mempool_free(mbox
, phba
->mbox_mem_pool
);
3454 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3461 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3463 struct lpfc_vport
*vport
= pmb
->vport
;
3464 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3465 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3467 switch (mb
->mbxStatus
) {
3471 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3472 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3474 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3475 spin_lock_irq(shost
->host_lock
);
3476 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3477 spin_unlock_irq(shost
->host_lock
);
3478 vport
->fc_myDID
= 0;
3482 spin_lock_irq(shost
->host_lock
);
3483 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3484 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3485 spin_unlock_irq(shost
->host_lock
);
3486 vport
->num_disc_nodes
= 0;
3487 /* go thru NPR list and issue ELS PLOGIs */
3488 if (vport
->fc_npr_cnt
)
3489 lpfc_els_disc_plogi(vport
);
3491 if (!vport
->num_disc_nodes
) {
3492 spin_lock_irq(shost
->host_lock
);
3493 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3494 spin_unlock_irq(shost
->host_lock
);
3495 lpfc_can_disctmo(vport
);
3497 vport
->port_state
= LPFC_VPORT_READY
;
3500 mempool_free(pmb
, phba
->mbox_mem_pool
);
3505 * lpfc_create_static_vport - Read HBA config region to create static vports.
3506 * @phba: pointer to lpfc hba data structure.
3508 * This routine issue a DUMP mailbox command for config region 22 to get
3509 * the list of static vports to be created. The function create vports
3510 * based on the information returned from the HBA.
3513 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3515 LPFC_MBOXQ_t
*pmb
= NULL
;
3517 struct static_vport_info
*vport_info
;
3518 int mbx_wait_rc
= 0, i
;
3519 struct fc_vport_identifiers vport_id
;
3520 struct fc_vport
*new_fc_vport
;
3521 struct Scsi_Host
*shost
;
3522 struct lpfc_vport
*vport
;
3523 uint16_t offset
= 0;
3524 uint8_t *vport_buff
;
3525 struct lpfc_dmabuf
*mp
;
3526 uint32_t byte_count
= 0;
3528 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3530 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3531 "0542 lpfc_create_static_vport failed to"
3532 " allocate mailbox memory\n");
3535 memset(pmb
, 0, sizeof(LPFC_MBOXQ_t
));
3538 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3540 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3541 "0543 lpfc_create_static_vport failed to"
3542 " allocate vport_info\n");
3543 mempool_free(pmb
, phba
->mbox_mem_pool
);
3547 vport_buff
= (uint8_t *) vport_info
;
3549 /* free dma buffer from previous round */
3550 if (pmb
->context1
) {
3551 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3552 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3555 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3558 pmb
->vport
= phba
->pport
;
3559 mbx_wait_rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
,
3562 if ((mbx_wait_rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3563 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3564 "0544 lpfc_create_static_vport failed to"
3565 " issue dump mailbox command ret 0x%x "
3567 mbx_wait_rc
, mb
->mbxStatus
);
3571 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3572 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3573 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3574 if (byte_count
> sizeof(struct static_vport_info
) -
3576 byte_count
= sizeof(struct static_vport_info
)
3578 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3579 offset
+= byte_count
;
3581 if (mb
->un
.varDmp
.word_cnt
>
3582 sizeof(struct static_vport_info
) - offset
)
3583 mb
->un
.varDmp
.word_cnt
=
3584 sizeof(struct static_vport_info
)
3586 byte_count
= mb
->un
.varDmp
.word_cnt
;
3587 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3588 vport_buff
+ offset
,
3591 offset
+= byte_count
;
3594 } while (byte_count
&&
3595 offset
< sizeof(struct static_vport_info
));
3598 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3599 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3600 != VPORT_INFO_REV
)) {
3601 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3602 "0545 lpfc_create_static_vport bad"
3603 " information header 0x%x 0x%x\n",
3604 le32_to_cpu(vport_info
->signature
),
3605 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3610 shost
= lpfc_shost_from_vport(phba
->pport
);
3612 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3613 memset(&vport_id
, 0, sizeof(vport_id
));
3614 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3615 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3616 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3619 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3620 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3621 vport_id
.disable
= false;
3622 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3624 if (!new_fc_vport
) {
3625 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3626 "0546 lpfc_create_static_vport failed to"
3631 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3632 vport
->vport_flag
|= STATIC_VPORT
;
3637 if (mbx_wait_rc
!= MBX_TIMEOUT
) {
3638 if (pmb
->context1
) {
3639 mp
= (struct lpfc_dmabuf
*)pmb
->context1
;
3640 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3643 mempool_free(pmb
, phba
->mbox_mem_pool
);
3650 * This routine handles processing a Fabric REG_LOGIN mailbox
3651 * command upon completion. It is setup in the LPFC_MBOXQ
3652 * as the completion routine when the command is
3653 * handed off to the SLI layer.
3656 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3658 struct lpfc_vport
*vport
= pmb
->vport
;
3659 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3660 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3661 struct lpfc_nodelist
*ndlp
;
3662 struct Scsi_Host
*shost
;
3664 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3665 pmb
->context1
= NULL
;
3666 pmb
->context2
= NULL
;
3668 if (mb
->mbxStatus
) {
3669 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3670 "0258 Register Fabric login error: 0x%x\n",
3672 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3674 mempool_free(pmb
, phba
->mbox_mem_pool
);
3676 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3677 /* FLOGI failed, use loop map to make discovery list */
3678 lpfc_disc_list_loopmap(vport
);
3680 /* Start discovery */
3681 lpfc_disc_start(vport
);
3682 /* Decrement the reference count to ndlp after the
3683 * reference to the ndlp are done.
3689 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3690 /* Decrement the reference count to ndlp after the reference
3691 * to the ndlp are done.
3697 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3698 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3699 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3700 ndlp
->nlp_type
|= NLP_FABRIC
;
3701 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3703 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3704 /* when physical port receive logo donot start
3705 * vport discovery */
3706 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3707 lpfc_start_fdiscs(phba
);
3709 shost
= lpfc_shost_from_vport(vport
);
3710 spin_lock_irq(shost
->host_lock
);
3711 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3712 spin_unlock_irq(shost
->host_lock
);
3714 lpfc_do_scr_ns_plogi(phba
, vport
);
3717 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3719 mempool_free(pmb
, phba
->mbox_mem_pool
);
3721 /* Drop the reference count from the mbox at the end after
3722 * all the current reference to the ndlp have been done.
3729 * This routine handles processing a NameServer REG_LOGIN mailbox
3730 * command upon completion. It is setup in the LPFC_MBOXQ
3731 * as the completion routine when the command is
3732 * handed off to the SLI layer.
3735 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3737 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3738 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3739 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3740 struct lpfc_vport
*vport
= pmb
->vport
;
3742 pmb
->context1
= NULL
;
3743 pmb
->context2
= NULL
;
3745 if (mb
->mbxStatus
) {
3747 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3748 "0260 Register NameServer error: 0x%x\n",
3750 /* decrement the node reference count held for this
3751 * callback function.
3754 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3756 mempool_free(pmb
, phba
->mbox_mem_pool
);
3758 /* If no other thread is using the ndlp, free it */
3759 lpfc_nlp_not_used(ndlp
);
3761 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
3763 * RegLogin failed, use loop map to make discovery
3766 lpfc_disc_list_loopmap(vport
);
3768 /* Start discovery */
3769 lpfc_disc_start(vport
);
3772 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3776 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3777 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3778 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
3779 ndlp
->nlp_type
|= NLP_FABRIC
;
3780 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3782 if (vport
->port_state
< LPFC_VPORT_READY
) {
3783 /* Link up discovery requires Fabric registration. */
3784 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3785 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3786 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3787 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3788 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3790 /* Issue SCR just before NameServer GID_FT Query */
3791 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3794 vport
->fc_ns_retry
= 0;
3795 /* Good status, issue CT Request to NameServer */
3796 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3797 /* Cannot issue NameServer Query, so finish up discovery */
3801 /* decrement the node reference count held for this
3802 * callback function.
3805 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3807 mempool_free(pmb
, phba
->mbox_mem_pool
);
3813 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3815 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3816 struct fc_rport
*rport
;
3817 struct lpfc_rport_data
*rdata
;
3818 struct fc_rport_identifiers rport_ids
;
3819 struct lpfc_hba
*phba
= vport
->phba
;
3821 /* Remote port has reappeared. Re-register w/ FC transport */
3822 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3823 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3824 rport_ids
.port_id
= ndlp
->nlp_DID
;
3825 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3828 * We leave our node pointer in rport->dd_data when we unregister a
3829 * FCP target port. But fc_remote_port_add zeros the space to which
3830 * rport->dd_data points. So, if we're reusing a previously
3831 * registered port, drop the reference that we took the last time we
3832 * registered the port.
3834 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3835 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3838 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3839 "rport add: did:x%x flg:x%x type x%x",
3840 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3842 /* Don't add the remote port if unloading. */
3843 if (vport
->load_flag
& FC_UNLOADING
)
3846 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3847 if (!rport
|| !get_device(&rport
->dev
)) {
3848 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3849 "Warning: fc_remote_port_add failed\n");
3853 /* initialize static port data */
3854 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3855 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3856 rdata
= rport
->dd_data
;
3857 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3859 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3860 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3861 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3862 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3864 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3865 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3867 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3868 "3183 rport register x%06x, rport %p role x%x\n",
3869 ndlp
->nlp_DID
, rport
, rport_ids
.roles
);
3871 if ((rport
->scsi_target_id
!= -1) &&
3872 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3873 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3879 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3881 struct fc_rport
*rport
= ndlp
->rport
;
3883 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3884 "rport delete: did:x%x flg:x%x type x%x",
3885 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3887 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
3888 "3184 rport unregister x%06x, rport %p\n",
3889 ndlp
->nlp_DID
, rport
);
3891 fc_remote_port_delete(rport
);
3897 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3899 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3901 spin_lock_irq(shost
->host_lock
);
3903 case NLP_STE_UNUSED_NODE
:
3904 vport
->fc_unused_cnt
+= count
;
3906 case NLP_STE_PLOGI_ISSUE
:
3907 vport
->fc_plogi_cnt
+= count
;
3909 case NLP_STE_ADISC_ISSUE
:
3910 vport
->fc_adisc_cnt
+= count
;
3912 case NLP_STE_REG_LOGIN_ISSUE
:
3913 vport
->fc_reglogin_cnt
+= count
;
3915 case NLP_STE_PRLI_ISSUE
:
3916 vport
->fc_prli_cnt
+= count
;
3918 case NLP_STE_UNMAPPED_NODE
:
3919 vport
->fc_unmap_cnt
+= count
;
3921 case NLP_STE_MAPPED_NODE
:
3922 vport
->fc_map_cnt
+= count
;
3924 case NLP_STE_NPR_NODE
:
3925 vport
->fc_npr_cnt
+= count
;
3928 spin_unlock_irq(shost
->host_lock
);
3932 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3933 int old_state
, int new_state
)
3935 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3937 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3938 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3939 ndlp
->nlp_type
|= NLP_FC_NODE
;
3941 if (new_state
== NLP_STE_MAPPED_NODE
)
3942 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3943 if (new_state
== NLP_STE_NPR_NODE
)
3944 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3946 /* Transport interface */
3947 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3948 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3949 vport
->phba
->nport_event_cnt
++;
3950 lpfc_unregister_remote_port(ndlp
);
3953 if (new_state
== NLP_STE_MAPPED_NODE
||
3954 new_state
== NLP_STE_UNMAPPED_NODE
) {
3955 vport
->phba
->nport_event_cnt
++;
3957 * Tell the fc transport about the port, if we haven't
3958 * already. If we have, and it's a scsi entity, be
3959 * sure to unblock any attached scsi devices
3961 lpfc_register_remote_port(vport
, ndlp
);
3963 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3964 (vport
->stat_data_enabled
)) {
3966 * A new target is discovered, if there is no buffer for
3967 * statistical data collection allocate buffer.
3969 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3970 sizeof(struct lpfc_scsicmd_bkt
),
3973 if (!ndlp
->lat_data
)
3974 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3975 "0286 lpfc_nlp_state_cleanup failed to "
3976 "allocate statistical data buffer DID "
3977 "0x%x\n", ndlp
->nlp_DID
);
3980 * if we added to Mapped list, but the remote port
3981 * registration failed or assigned a target id outside
3982 * our presentable range - move the node to the
3985 if (new_state
== NLP_STE_MAPPED_NODE
&&
3987 ndlp
->rport
->scsi_target_id
== -1 ||
3988 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3989 spin_lock_irq(shost
->host_lock
);
3990 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3991 spin_unlock_irq(shost
->host_lock
);
3992 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3997 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3999 static char *states
[] = {
4000 [NLP_STE_UNUSED_NODE
] = "UNUSED",
4001 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
4002 [NLP_STE_ADISC_ISSUE
] = "ADISC",
4003 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
4004 [NLP_STE_PRLI_ISSUE
] = "PRLI",
4005 [NLP_STE_LOGO_ISSUE
] = "LOGO",
4006 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
4007 [NLP_STE_MAPPED_NODE
] = "MAPPED",
4008 [NLP_STE_NPR_NODE
] = "NPR",
4011 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
4012 strlcpy(buffer
, states
[state
], size
);
4014 snprintf(buffer
, size
, "unknown (%d)", state
);
4019 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4022 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4023 int old_state
= ndlp
->nlp_state
;
4024 char name1
[16], name2
[16];
4026 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4027 "0904 NPort state transition x%06x, %s -> %s\n",
4029 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
4030 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
4032 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4033 "node statechg did:x%x old:%d ste:%d",
4034 ndlp
->nlp_DID
, old_state
, state
);
4036 if (old_state
== NLP_STE_NPR_NODE
&&
4037 state
!= NLP_STE_NPR_NODE
)
4038 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4039 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
4040 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
4041 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
4044 if (list_empty(&ndlp
->nlp_listp
)) {
4045 spin_lock_irq(shost
->host_lock
);
4046 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4047 spin_unlock_irq(shost
->host_lock
);
4048 } else if (old_state
)
4049 lpfc_nlp_counters(vport
, old_state
, -1);
4051 ndlp
->nlp_state
= state
;
4052 lpfc_nlp_counters(vport
, state
, 1);
4053 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
4057 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4059 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4061 if (list_empty(&ndlp
->nlp_listp
)) {
4062 spin_lock_irq(shost
->host_lock
);
4063 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
4064 spin_unlock_irq(shost
->host_lock
);
4069 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4071 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4073 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4074 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4075 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4076 spin_lock_irq(shost
->host_lock
);
4077 list_del_init(&ndlp
->nlp_listp
);
4078 spin_unlock_irq(shost
->host_lock
);
4079 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4080 NLP_STE_UNUSED_NODE
);
4084 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4086 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4087 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
4088 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
4089 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
4090 NLP_STE_UNUSED_NODE
);
4093 * lpfc_initialize_node - Initialize all fields of node object
4094 * @vport: Pointer to Virtual Port object.
4095 * @ndlp: Pointer to FC node object.
4096 * @did: FC_ID of the node.
4098 * This function is always called when node object need to be initialized.
4099 * It initializes all the fields of the node object. Although the reference
4100 * to phba from @ndlp can be obtained indirectly through it's reference to
4101 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4102 * to the life-span of the @ndlp might go beyond the existence of @vport as
4103 * the final release of ndlp is determined by its reference count. And, the
4104 * operation on @ndlp needs the reference to phba.
4107 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4110 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
4111 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
4112 init_timer(&ndlp
->nlp_delayfunc
);
4113 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
4114 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
4115 ndlp
->nlp_DID
= did
;
4116 ndlp
->vport
= vport
;
4117 ndlp
->phba
= vport
->phba
;
4118 ndlp
->nlp_sid
= NLP_NO_SID
;
4119 kref_init(&ndlp
->kref
);
4120 NLP_INT_NODE_ACT(ndlp
);
4121 atomic_set(&ndlp
->cmd_pending
, 0);
4122 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
4123 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4124 ndlp
->nlp_rpi
= lpfc_sli4_alloc_rpi(vport
->phba
);
4127 struct lpfc_nodelist
*
4128 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4131 struct lpfc_hba
*phba
= vport
->phba
;
4133 unsigned long flags
;
4138 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4139 /* The ndlp should not be in memory free mode */
4140 if (NLP_CHK_FREE_REQ(ndlp
)) {
4141 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4142 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4143 "0277 lpfc_enable_node: ndlp:x%p "
4144 "usgmap:x%x refcnt:%d\n",
4145 (void *)ndlp
, ndlp
->nlp_usg_map
,
4146 atomic_read(&ndlp
->kref
.refcount
));
4149 /* The ndlp should not already be in active mode */
4150 if (NLP_CHK_NODE_ACT(ndlp
)) {
4151 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4152 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4153 "0278 lpfc_enable_node: ndlp:x%p "
4154 "usgmap:x%x refcnt:%d\n",
4155 (void *)ndlp
, ndlp
->nlp_usg_map
,
4156 atomic_read(&ndlp
->kref
.refcount
));
4160 /* Keep the original DID */
4161 did
= ndlp
->nlp_DID
;
4163 /* re-initialize ndlp except of ndlp linked list pointer */
4164 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
4165 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
4166 lpfc_initialize_node(vport
, ndlp
, did
);
4168 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4170 if (state
!= NLP_STE_UNUSED_NODE
)
4171 lpfc_nlp_set_state(vport
, ndlp
, state
);
4173 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4174 "node enable: did:x%x",
4175 ndlp
->nlp_DID
, 0, 0);
4180 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4183 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4184 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4185 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4186 * until ALL other outstanding threads have completed. We check
4187 * that the ndlp not already in the UNUSED state before we proceed.
4189 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
4191 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4192 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
)
4193 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4199 * Start / ReStart rescue timer for Discovery / RSCN handling
4202 lpfc_set_disctmo(struct lpfc_vport
*vport
)
4204 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4205 struct lpfc_hba
*phba
= vport
->phba
;
4208 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
4209 /* For FAN, timeout should be greater than edtov */
4210 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
4212 /* Normal discovery timeout should be > than ELS/CT timeout
4213 * FC spec states we need 3 * ratov for CT requests
4215 tmo
= ((phba
->fc_ratov
* 3) + 3);
4219 if (!timer_pending(&vport
->fc_disctmo
)) {
4220 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4221 "set disc timer: tmo:x%x state:x%x flg:x%x",
4222 tmo
, vport
->port_state
, vport
->fc_flag
);
4225 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
4226 spin_lock_irq(shost
->host_lock
);
4227 vport
->fc_flag
|= FC_DISC_TMO
;
4228 spin_unlock_irq(shost
->host_lock
);
4230 /* Start Discovery Timer state <hba_state> */
4231 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4232 "0247 Start Discovery Timer state x%x "
4233 "Data: x%x x%lx x%x x%x\n",
4234 vport
->port_state
, tmo
,
4235 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
4236 vport
->fc_adisc_cnt
);
4242 * Cancel rescue timer for Discovery / RSCN handling
4245 lpfc_can_disctmo(struct lpfc_vport
*vport
)
4247 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4248 unsigned long iflags
;
4250 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4251 "can disc timer: state:x%x rtry:x%x flg:x%x",
4252 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4254 /* Turn off discovery timer if its running */
4255 if (vport
->fc_flag
& FC_DISC_TMO
) {
4256 spin_lock_irqsave(shost
->host_lock
, iflags
);
4257 vport
->fc_flag
&= ~FC_DISC_TMO
;
4258 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4259 del_timer_sync(&vport
->fc_disctmo
);
4260 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
4261 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
4262 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
4265 /* Cancel Discovery Timer state <hba_state> */
4266 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4267 "0248 Cancel Discovery Timer state x%x "
4268 "Data: x%x x%x x%x\n",
4269 vport
->port_state
, vport
->fc_flag
,
4270 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
4275 * Check specified ring for outstanding IOCB on the SLI queue
4276 * Return true if iocb matches the specified nport
4279 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
4280 struct lpfc_sli_ring
*pring
,
4281 struct lpfc_iocbq
*iocb
,
4282 struct lpfc_nodelist
*ndlp
)
4284 struct lpfc_sli
*psli
= &phba
->sli
;
4285 IOCB_t
*icmd
= &iocb
->iocb
;
4286 struct lpfc_vport
*vport
= ndlp
->vport
;
4288 if (iocb
->vport
!= vport
)
4291 if (pring
->ringno
== LPFC_ELS_RING
) {
4292 switch (icmd
->ulpCommand
) {
4293 case CMD_GEN_REQUEST64_CR
:
4294 if (iocb
->context_un
.ndlp
== ndlp
)
4296 case CMD_ELS_REQUEST64_CR
:
4297 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
4299 case CMD_XMIT_ELS_RSP64_CX
:
4300 if (iocb
->context1
== (uint8_t *) ndlp
)
4303 } else if (pring
->ringno
== psli
->extra_ring
) {
4305 } else if (pring
->ringno
== psli
->fcp_ring
) {
4306 /* Skip match check if waiting to relogin to FCP target */
4307 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
4308 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
4311 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
4314 } else if (pring
->ringno
== psli
->next_ring
) {
4321 * Free resources / clean up outstanding I/Os
4322 * associated with nlp_rpi in the LPFC_NODELIST entry.
4325 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4327 LIST_HEAD(completions
);
4328 struct lpfc_sli
*psli
;
4329 struct lpfc_sli_ring
*pring
;
4330 struct lpfc_iocbq
*iocb
, *next_iocb
;
4333 lpfc_fabric_abort_nport(ndlp
);
4336 * Everything that matches on txcmplq will be returned
4337 * by firmware with a no rpi error.
4340 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4341 /* Now process each ring */
4342 for (i
= 0; i
< psli
->num_rings
; i
++) {
4343 pring
= &psli
->ring
[i
];
4345 spin_lock_irq(&phba
->hbalock
);
4346 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
4349 * Check to see if iocb matches the nport we are
4352 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
4354 /* It matches, so deque and call compl
4356 list_move_tail(&iocb
->list
,
4360 spin_unlock_irq(&phba
->hbalock
);
4364 /* Cancel all the IOCBs from the completions list */
4365 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4372 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4373 * @phba: Pointer to HBA context object.
4374 * @pmb: Pointer to mailbox object.
4376 * This function will issue an ELS LOGO command after completing
4380 lpfc_nlp_logo_unreg(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4382 struct lpfc_vport
*vport
= pmb
->vport
;
4383 struct lpfc_nodelist
*ndlp
;
4385 ndlp
= (struct lpfc_nodelist
*)(pmb
->context1
);
4388 lpfc_issue_els_logo(vport
, ndlp
, 0);
4392 * Free rpi associated with LPFC_NODELIST entry.
4393 * This routine is called from lpfc_freenode(), when we are removing
4394 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4395 * LOGO that completes successfully, and we are waiting to PLOGI back
4396 * to the remote NPort. In addition, it is called after we receive
4397 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4398 * we are waiting to PLOGI back to the remote NPort.
4401 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4403 struct lpfc_hba
*phba
= vport
->phba
;
4408 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4409 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4411 /* SLI4 ports require the physical rpi value. */
4412 rpi
= ndlp
->nlp_rpi
;
4413 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4414 rpi
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
4416 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, mbox
);
4417 mbox
->vport
= vport
;
4418 if (ndlp
->nlp_flag
& NLP_ISSUE_LOGO
) {
4419 mbox
->context1
= ndlp
;
4420 mbox
->mbox_cmpl
= lpfc_nlp_logo_unreg
;
4422 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4425 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4426 if (rc
== MBX_NOT_FINISHED
)
4427 mempool_free(mbox
, phba
->mbox_mem_pool
);
4429 lpfc_no_rpi(phba
, ndlp
);
4431 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
4433 ndlp
->nlp_flag
&= ~NLP_RPI_REGISTERED
;
4434 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4441 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4442 * @phba: pointer to lpfc hba data structure.
4444 * This routine is invoked to unregister all the currently registered RPIs
4448 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4450 struct lpfc_vport
**vports
;
4451 struct lpfc_nodelist
*ndlp
;
4452 struct Scsi_Host
*shost
;
4455 vports
= lpfc_create_vport_work_array(phba
);
4457 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4458 "2884 Vport array allocation failed \n");
4461 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4462 shost
= lpfc_shost_from_vport(vports
[i
]);
4463 spin_lock_irq(shost
->host_lock
);
4464 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4465 if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
4466 /* The mempool_alloc might sleep */
4467 spin_unlock_irq(shost
->host_lock
);
4468 lpfc_unreg_rpi(vports
[i
], ndlp
);
4469 spin_lock_irq(shost
->host_lock
);
4472 spin_unlock_irq(shost
->host_lock
);
4474 lpfc_destroy_vport_work_array(phba
, vports
);
4478 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4480 struct lpfc_hba
*phba
= vport
->phba
;
4484 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4485 lpfc_sli4_unreg_all_rpis(vport
);
4489 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4491 lpfc_unreg_login(phba
, vport
->vpi
, LPFC_UNREG_ALL_RPIS_VPORT
,
4493 mbox
->vport
= vport
;
4494 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4495 mbox
->context1
= NULL
;
4496 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4497 if (rc
!= MBX_TIMEOUT
)
4498 mempool_free(mbox
, phba
->mbox_mem_pool
);
4500 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4501 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4502 "1836 Could not issue "
4503 "unreg_login(all_rpis) status %d\n", rc
);
4508 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4510 struct lpfc_hba
*phba
= vport
->phba
;
4514 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4516 lpfc_unreg_did(phba
, vport
->vpi
, LPFC_UNREG_ALL_DFLT_RPIS
,
4518 mbox
->vport
= vport
;
4519 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4520 mbox
->context1
= NULL
;
4521 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4522 if (rc
!= MBX_TIMEOUT
)
4523 mempool_free(mbox
, phba
->mbox_mem_pool
);
4525 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4526 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4527 "1815 Could not issue "
4528 "unreg_did (default rpis) status %d\n",
4534 * Free resources associated with LPFC_NODELIST entry
4535 * so it can be freed.
4538 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4540 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4541 struct lpfc_hba
*phba
= vport
->phba
;
4542 LPFC_MBOXQ_t
*mb
, *nextmb
;
4543 struct lpfc_dmabuf
*mp
;
4545 /* Cleanup node for NPort <nlp_DID> */
4546 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4547 "0900 Cleanup node for NPort x%x "
4548 "Data: x%x x%x x%x\n",
4549 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4550 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4551 if (NLP_CHK_FREE_REQ(ndlp
)) {
4552 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4553 "0280 lpfc_cleanup_node: ndlp:x%p "
4554 "usgmap:x%x refcnt:%d\n",
4555 (void *)ndlp
, ndlp
->nlp_usg_map
,
4556 atomic_read(&ndlp
->kref
.refcount
));
4557 lpfc_dequeue_node(vport
, ndlp
);
4559 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4560 "0281 lpfc_cleanup_node: ndlp:x%p "
4561 "usgmap:x%x refcnt:%d\n",
4562 (void *)ndlp
, ndlp
->nlp_usg_map
,
4563 atomic_read(&ndlp
->kref
.refcount
));
4564 lpfc_disable_node(vport
, ndlp
);
4568 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4570 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4571 if ((mb
= phba
->sli
.mbox_active
)) {
4572 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4573 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4574 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4575 mb
->context2
= NULL
;
4576 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4580 spin_lock_irq(&phba
->hbalock
);
4581 /* Cleanup REG_LOGIN completions which are not yet processed */
4582 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4583 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4584 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) ||
4585 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4588 mb
->context2
= NULL
;
4589 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4592 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4593 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4594 !(mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) &&
4595 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4596 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4598 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4601 list_del(&mb
->list
);
4602 mempool_free(mb
, phba
->mbox_mem_pool
);
4603 /* We shall not invoke the lpfc_nlp_put to decrement
4604 * the ndlp reference count as we are in the process
4605 * of lpfc_nlp_release.
4609 spin_unlock_irq(&phba
->hbalock
);
4611 lpfc_els_abort(phba
, ndlp
);
4613 spin_lock_irq(shost
->host_lock
);
4614 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4615 spin_unlock_irq(shost
->host_lock
);
4617 ndlp
->nlp_last_elscmd
= 0;
4618 del_timer_sync(&ndlp
->nlp_delayfunc
);
4620 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4621 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4622 lpfc_cleanup_vports_rrqs(vport
, ndlp
);
4623 lpfc_unreg_rpi(vport
, ndlp
);
4629 * Check to see if we can free the nlp back to the freelist.
4630 * If we are in the middle of using the nlp in the discovery state
4631 * machine, defer the free till we reach the end of the state machine.
4634 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4636 struct lpfc_hba
*phba
= vport
->phba
;
4637 struct lpfc_rport_data
*rdata
;
4641 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4642 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4643 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4644 !(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
4645 /* For this case we need to cleanup the default rpi
4646 * allocated by the firmware.
4648 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4650 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4651 (uint8_t *) &vport
->fc_sparam
, mbox
, ndlp
->nlp_rpi
);
4653 mempool_free(mbox
, phba
->mbox_mem_pool
);
4656 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4657 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4658 mbox
->vport
= vport
;
4659 mbox
->context2
= ndlp
;
4660 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4661 if (rc
== MBX_NOT_FINISHED
) {
4662 mempool_free(mbox
, phba
->mbox_mem_pool
);
4667 lpfc_cleanup_node(vport
, ndlp
);
4670 * We can get here with a non-NULL ndlp->rport because when we
4671 * unregister a rport we don't break the rport/node linkage. So if we
4672 * do, make sure we don't leaving any dangling pointers behind.
4675 rdata
= ndlp
->rport
->dd_data
;
4676 rdata
->pnode
= NULL
;
4682 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4685 D_ID mydid
, ndlpdid
, matchdid
;
4687 if (did
== Bcast_DID
)
4690 /* First check for Direct match */
4691 if (ndlp
->nlp_DID
== did
)
4694 /* Next check for area/domain identically equals 0 match */
4695 mydid
.un
.word
= vport
->fc_myDID
;
4696 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
4700 matchdid
.un
.word
= did
;
4701 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
4702 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
4703 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
4704 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
4705 if ((ndlpdid
.un
.b
.domain
== 0) &&
4706 (ndlpdid
.un
.b
.area
== 0)) {
4707 if (ndlpdid
.un
.b
.id
)
4713 matchdid
.un
.word
= ndlp
->nlp_DID
;
4714 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
4715 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
4716 if ((matchdid
.un
.b
.domain
== 0) &&
4717 (matchdid
.un
.b
.area
== 0)) {
4718 if (matchdid
.un
.b
.id
)
4726 /* Search for a nodelist entry */
4727 static struct lpfc_nodelist
*
4728 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4730 struct lpfc_nodelist
*ndlp
;
4733 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4734 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4735 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4736 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4737 ((uint32_t) ndlp
->nlp_type
<< 8) |
4738 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4739 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4740 "0929 FIND node DID "
4741 "Data: x%p x%x x%x x%x\n",
4742 ndlp
, ndlp
->nlp_DID
,
4743 ndlp
->nlp_flag
, data1
);
4748 /* FIND node did <did> NOT FOUND */
4749 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4750 "0932 FIND node did x%x NOT FOUND.\n", did
);
4754 struct lpfc_nodelist
*
4755 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4757 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4758 struct lpfc_nodelist
*ndlp
;
4759 unsigned long iflags
;
4761 spin_lock_irqsave(shost
->host_lock
, iflags
);
4762 ndlp
= __lpfc_findnode_did(vport
, did
);
4763 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
4767 struct lpfc_nodelist
*
4768 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4770 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4771 struct lpfc_nodelist
*ndlp
;
4773 ndlp
= lpfc_findnode_did(vport
, did
);
4775 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4776 lpfc_rscn_payload_check(vport
, did
) == 0)
4778 ndlp
= (struct lpfc_nodelist
*)
4779 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4782 lpfc_nlp_init(vport
, ndlp
, did
);
4783 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4784 spin_lock_irq(shost
->host_lock
);
4785 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4786 spin_unlock_irq(shost
->host_lock
);
4788 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4789 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4792 spin_lock_irq(shost
->host_lock
);
4793 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4794 spin_unlock_irq(shost
->host_lock
);
4798 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4799 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4800 if (lpfc_rscn_payload_check(vport
, did
)) {
4801 /* If we've already received a PLOGI from this NPort
4802 * we don't need to try to discover it again.
4804 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4807 /* Since this node is marked for discovery,
4808 * delay timeout is not needed.
4810 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4811 spin_lock_irq(shost
->host_lock
);
4812 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4813 spin_unlock_irq(shost
->host_lock
);
4817 /* If we've already received a PLOGI from this NPort,
4818 * or we are already in the process of discovery on it,
4819 * we don't need to try to discover it again.
4821 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4822 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4823 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4825 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4826 spin_lock_irq(shost
->host_lock
);
4827 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4828 spin_unlock_irq(shost
->host_lock
);
4833 /* Build a list of nodes to discover based on the loopmap */
4835 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4837 struct lpfc_hba
*phba
= vport
->phba
;
4839 uint32_t alpa
, index
;
4841 if (!lpfc_is_link_up(phba
))
4844 if (phba
->fc_topology
!= LPFC_TOPOLOGY_LOOP
)
4847 /* Check for loop map present or not */
4848 if (phba
->alpa_map
[0]) {
4849 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4850 alpa
= phba
->alpa_map
[j
];
4851 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4853 lpfc_setup_disc_node(vport
, alpa
);
4856 /* No alpamap, so try all alpa's */
4857 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4858 /* If cfg_scan_down is set, start from highest
4859 * ALPA (0xef) to lowest (0x1).
4861 if (vport
->cfg_scan_down
)
4864 index
= FC_MAXLOOP
- j
- 1;
4865 alpa
= lpfcAlpaArray
[index
];
4866 if ((vport
->fc_myDID
& 0xff) == alpa
)
4868 lpfc_setup_disc_node(vport
, alpa
);
4875 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4878 struct lpfc_sli
*psli
= &phba
->sli
;
4879 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4880 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4881 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4885 * if it's not a physical port or if we already send
4886 * clear_la then don't send it.
4888 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4889 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4890 (phba
->sli_rev
== LPFC_SLI_REV4
))
4893 /* Link up discovery */
4894 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4895 phba
->link_state
= LPFC_CLEAR_LA
;
4896 lpfc_clear_la(phba
, mbox
);
4897 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4898 mbox
->vport
= vport
;
4899 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4900 if (rc
== MBX_NOT_FINISHED
) {
4901 mempool_free(mbox
, phba
->mbox_mem_pool
);
4902 lpfc_disc_flush_list(vport
);
4903 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4904 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4905 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4906 phba
->link_state
= LPFC_HBA_ERROR
;
4911 /* Reg_vpi to tell firmware to resume normal operations */
4913 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4915 LPFC_MBOXQ_t
*regvpimbox
;
4917 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4919 lpfc_reg_vpi(vport
, regvpimbox
);
4920 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4921 regvpimbox
->vport
= vport
;
4922 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4923 == MBX_NOT_FINISHED
) {
4924 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4929 /* Start Link up / RSCN discovery on NPR nodes */
4931 lpfc_disc_start(struct lpfc_vport
*vport
)
4933 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4934 struct lpfc_hba
*phba
= vport
->phba
;
4936 uint32_t clear_la_pending
;
4939 if (!lpfc_is_link_up(phba
))
4942 if (phba
->link_state
== LPFC_CLEAR_LA
)
4943 clear_la_pending
= 1;
4945 clear_la_pending
= 0;
4947 if (vport
->port_state
< LPFC_VPORT_READY
)
4948 vport
->port_state
= LPFC_DISC_AUTH
;
4950 lpfc_set_disctmo(vport
);
4952 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4957 vport
->fc_prevDID
= vport
->fc_myDID
;
4958 vport
->num_disc_nodes
= 0;
4960 /* Start Discovery state <hba_state> */
4961 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4962 "0202 Start Discovery hba state x%x "
4963 "Data: x%x x%x x%x\n",
4964 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4965 vport
->fc_adisc_cnt
);
4967 /* First do ADISCs - if any */
4968 num_sent
= lpfc_els_disc_adisc(vport
);
4973 /* Register the VPI for SLI3, NON-NPIV only. */
4974 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4975 !(vport
->fc_flag
& FC_PT2PT
) &&
4976 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4977 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4978 lpfc_issue_reg_vpi(phba
, vport
);
4983 * For SLI2, we need to set port_state to READY and continue
4986 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4987 /* If we get here, there is nothing to ADISC */
4988 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4989 lpfc_issue_clear_la(phba
, vport
);
4991 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4992 vport
->num_disc_nodes
= 0;
4993 /* go thru NPR nodes and issue ELS PLOGIs */
4994 if (vport
->fc_npr_cnt
)
4995 lpfc_els_disc_plogi(vport
);
4997 if (!vport
->num_disc_nodes
) {
4998 spin_lock_irq(shost
->host_lock
);
4999 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
5000 spin_unlock_irq(shost
->host_lock
);
5001 lpfc_can_disctmo(vport
);
5004 vport
->port_state
= LPFC_VPORT_READY
;
5006 /* Next do PLOGIs - if any */
5007 num_sent
= lpfc_els_disc_plogi(vport
);
5012 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5013 /* Check to see if more RSCNs came in while we
5014 * were processing this one.
5016 if ((vport
->fc_rscn_id_cnt
== 0) &&
5017 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
5018 spin_lock_irq(shost
->host_lock
);
5019 vport
->fc_flag
&= ~FC_RSCN_MODE
;
5020 spin_unlock_irq(shost
->host_lock
);
5021 lpfc_can_disctmo(vport
);
5023 lpfc_els_handle_rscn(vport
);
5030 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5031 * ring the match the sppecified nodelist.
5034 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
5036 LIST_HEAD(completions
);
5037 struct lpfc_sli
*psli
;
5039 struct lpfc_iocbq
*iocb
, *next_iocb
;
5040 struct lpfc_sli_ring
*pring
;
5043 pring
= &psli
->ring
[LPFC_ELS_RING
];
5045 /* Error matching iocb on txq or txcmplq
5046 * First check the txq.
5048 spin_lock_irq(&phba
->hbalock
);
5049 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
5050 if (iocb
->context1
!= ndlp
) {
5054 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
5055 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
5057 list_move_tail(&iocb
->list
, &completions
);
5061 /* Next check the txcmplq */
5062 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
5063 if (iocb
->context1
!= ndlp
) {
5067 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
5068 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
5069 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
5072 spin_unlock_irq(&phba
->hbalock
);
5074 /* Cancel all the IOCBs from the completions list */
5075 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
5080 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
5082 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5083 struct lpfc_hba
*phba
= vport
->phba
;
5085 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
5086 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5088 if (!NLP_CHK_NODE_ACT(ndlp
))
5090 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
5091 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
5092 lpfc_free_tx(phba
, ndlp
);
5099 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
5101 lpfc_els_flush_rscn(vport
);
5102 lpfc_els_flush_cmd(vport
);
5103 lpfc_disc_flush_list(vport
);
5106 /*****************************************************************************/
5108 * NAME: lpfc_disc_timeout
5110 * FUNCTION: Fibre Channel driver discovery timeout routine.
5112 * EXECUTION ENVIRONMENT: interrupt only
5120 /*****************************************************************************/
5122 lpfc_disc_timeout(unsigned long ptr
)
5124 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
5125 struct lpfc_hba
*phba
= vport
->phba
;
5126 uint32_t tmo_posted
;
5127 unsigned long flags
= 0;
5129 if (unlikely(!phba
))
5132 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
5133 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
5135 vport
->work_port_events
|= WORKER_DISC_TMO
;
5136 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
5139 lpfc_worker_wake_up(phba
);
5144 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
5146 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5147 struct lpfc_hba
*phba
= vport
->phba
;
5148 struct lpfc_sli
*psli
= &phba
->sli
;
5149 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
5150 LPFC_MBOXQ_t
*initlinkmbox
;
5151 int rc
, clrlaerr
= 0;
5153 if (!(vport
->fc_flag
& FC_DISC_TMO
))
5156 spin_lock_irq(shost
->host_lock
);
5157 vport
->fc_flag
&= ~FC_DISC_TMO
;
5158 spin_unlock_irq(shost
->host_lock
);
5160 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
5161 "disc timeout: state:x%x rtry:x%x flg:x%x",
5162 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
5164 switch (vport
->port_state
) {
5166 case LPFC_LOCAL_CFG_LINK
:
5167 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
5171 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
5172 "0221 FAN timeout\n");
5173 /* Start discovery by sending FLOGI, clean up old rpis */
5174 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
5176 if (!NLP_CHK_NODE_ACT(ndlp
))
5178 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
5180 if (ndlp
->nlp_type
& NLP_FABRIC
) {
5181 /* Clean up the ndlp on Fabric connections */
5182 lpfc_drop_node(vport
, ndlp
);
5184 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
5185 /* Fail outstanding IO now since device
5186 * is marked for PLOGI.
5188 lpfc_unreg_rpi(vport
, ndlp
);
5191 if (vport
->port_state
!= LPFC_FLOGI
) {
5192 if (phba
->sli_rev
<= LPFC_SLI_REV3
)
5193 lpfc_initial_flogi(vport
);
5195 lpfc_issue_init_vfi(vport
);
5202 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5203 /* Initial FLOGI timeout */
5204 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5205 "0222 Initial %s timeout\n",
5206 vport
->vpi
? "FDISC" : "FLOGI");
5208 /* Assume no Fabric and go on with discovery.
5209 * Check for outstanding ELS FLOGI to abort.
5212 /* FLOGI failed, so just use loop map to make discovery list */
5213 lpfc_disc_list_loopmap(vport
);
5215 /* Start discovery */
5216 lpfc_disc_start(vport
);
5219 case LPFC_FABRIC_CFG_LINK
:
5220 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5222 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5223 "0223 Timeout while waiting for "
5224 "NameServer login\n");
5225 /* Next look for NameServer ndlp */
5226 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
5227 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
5228 lpfc_els_abort(phba
, ndlp
);
5230 /* ReStart discovery */
5234 /* Check for wait for NameServer Rsp timeout */
5235 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5236 "0224 NameServer Query timeout "
5238 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5240 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
5241 /* Try it one more time */
5242 vport
->fc_ns_retry
++;
5243 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
5244 vport
->fc_ns_retry
, 0);
5248 vport
->fc_ns_retry
= 0;
5252 * Discovery is over.
5253 * set port_state to PORT_READY if SLI2.
5254 * cmpl_reg_vpi will set port_state to READY for SLI3.
5256 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5257 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5258 lpfc_issue_reg_vpi(phba
, vport
);
5260 lpfc_issue_clear_la(phba
, vport
);
5261 vport
->port_state
= LPFC_VPORT_READY
;
5265 /* Setup and issue mailbox INITIALIZE LINK command */
5266 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5267 if (!initlinkmbox
) {
5268 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5269 "0206 Device Discovery "
5270 "completion error\n");
5271 phba
->link_state
= LPFC_HBA_ERROR
;
5275 lpfc_linkdown(phba
);
5276 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
5277 phba
->cfg_link_speed
);
5278 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
5279 initlinkmbox
->vport
= vport
;
5280 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
5281 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
5282 lpfc_set_loopback_flag(phba
);
5283 if (rc
== MBX_NOT_FINISHED
)
5284 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
5288 case LPFC_DISC_AUTH
:
5289 /* Node Authentication timeout */
5290 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5291 "0227 Node Authentication timeout\n");
5292 lpfc_disc_flush_list(vport
);
5295 * set port_state to PORT_READY if SLI2.
5296 * cmpl_reg_vpi will set port_state to READY for SLI3.
5298 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
5299 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
5300 lpfc_issue_reg_vpi(phba
, vport
);
5301 else { /* NPIV Not enabled */
5302 lpfc_issue_clear_la(phba
, vport
);
5303 vport
->port_state
= LPFC_VPORT_READY
;
5308 case LPFC_VPORT_READY
:
5309 if (vport
->fc_flag
& FC_RSCN_MODE
) {
5310 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5311 "0231 RSCN timeout Data: x%x "
5313 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
5315 /* Cleanup any outstanding ELS commands */
5316 lpfc_els_flush_cmd(vport
);
5318 lpfc_els_flush_rscn(vport
);
5319 lpfc_disc_flush_list(vport
);
5324 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5325 "0273 Unexpected discovery timeout, "
5326 "vport State x%x\n", vport
->port_state
);
5330 switch (phba
->link_state
) {
5332 /* CLEAR LA timeout */
5333 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5334 "0228 CLEAR LA timeout\n");
5339 lpfc_issue_clear_la(phba
, vport
);
5341 case LPFC_LINK_UNKNOWN
:
5342 case LPFC_WARM_START
:
5343 case LPFC_INIT_START
:
5344 case LPFC_INIT_MBX_CMDS
:
5345 case LPFC_LINK_DOWN
:
5346 case LPFC_HBA_ERROR
:
5347 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
5348 "0230 Unexpected timeout, hba link "
5349 "state x%x\n", phba
->link_state
);
5353 case LPFC_HBA_READY
:
5358 lpfc_disc_flush_list(vport
);
5359 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5360 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5361 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
5362 vport
->port_state
= LPFC_VPORT_READY
;
5369 * This routine handles processing a NameServer REG_LOGIN mailbox
5370 * command upon completion. It is setup in the LPFC_MBOXQ
5371 * as the completion routine when the command is
5372 * handed off to the SLI layer.
5375 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
5377 MAILBOX_t
*mb
= &pmb
->u
.mb
;
5378 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
5379 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
5380 struct lpfc_vport
*vport
= pmb
->vport
;
5382 pmb
->context1
= NULL
;
5383 pmb
->context2
= NULL
;
5385 if (phba
->sli_rev
< LPFC_SLI_REV4
)
5386 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
5387 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
5388 ndlp
->nlp_type
|= NLP_FABRIC
;
5389 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
5392 * Start issuing Fabric-Device Management Interface (FDMI) command to
5393 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
5394 * fdmi-on=2 (supporting RPA/hostnmae)
5397 if (vport
->cfg_fdmi_on
== 1)
5398 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
5400 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
5402 /* decrement the node reference count held for this callback
5406 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5408 mempool_free(pmb
, phba
->mbox_mem_pool
);
5414 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5416 uint16_t *rpi
= param
;
5418 /* check for active node */
5419 if (!NLP_CHK_NODE_ACT(ndlp
))
5422 return ndlp
->nlp_rpi
== *rpi
;
5426 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5428 return memcmp(&ndlp
->nlp_portname
, param
,
5429 sizeof(ndlp
->nlp_portname
)) == 0;
5432 static struct lpfc_nodelist
*
5433 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5435 struct lpfc_nodelist
*ndlp
;
5437 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5438 if (filter(ndlp
, param
)) {
5439 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5440 "3185 FIND node filter %p DID "
5441 "Data: x%p x%x x%x\n",
5442 filter
, ndlp
, ndlp
->nlp_DID
,
5447 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
5448 "3186 FIND node filter %p NOT FOUND.\n", filter
);
5453 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5454 * returns the node list element pointer else return NULL.
5456 struct lpfc_nodelist
*
5457 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5459 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5463 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5464 * returns the node element list pointer else return NULL.
5466 struct lpfc_nodelist
*
5467 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5469 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5470 struct lpfc_nodelist
*ndlp
;
5472 spin_lock_irq(shost
->host_lock
);
5473 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5474 spin_unlock_irq(shost
->host_lock
);
5479 * This routine looks up the ndlp lists for the given RPI. If the rpi
5480 * is found, the routine returns the node element list pointer else
5483 struct lpfc_nodelist
*
5484 lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5486 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5487 struct lpfc_nodelist
*ndlp
;
5489 spin_lock_irq(shost
->host_lock
);
5490 ndlp
= __lpfc_findnode_rpi(vport
, rpi
);
5491 spin_unlock_irq(shost
->host_lock
);
5496 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5497 * @phba: pointer to lpfc hba data structure.
5498 * @vpi: the physical host virtual N_Port identifier.
5500 * This routine finds a vport on a HBA (referred by @phba) through a
5501 * @vpi. The function walks the HBA's vport list and returns the address
5502 * of the vport with the matching @vpi.
5505 * NULL - No vport with the matching @vpi found
5506 * Otherwise - Address to the vport with the matching @vpi.
5509 lpfc_find_vport_by_vpid(struct lpfc_hba
*phba
, uint16_t vpi
)
5511 struct lpfc_vport
*vport
;
5512 unsigned long flags
;
5515 /* The physical ports are always vpi 0 - translate is unnecessary. */
5518 * Translate the physical vpi to the logical vpi. The
5519 * vport stores the logical vpi.
5521 for (i
= 0; i
< phba
->max_vpi
; i
++) {
5522 if (vpi
== phba
->vpi_ids
[i
])
5526 if (i
>= phba
->max_vpi
) {
5527 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
5528 "2936 Could not find Vport mapped "
5529 "to vpi %d\n", vpi
);
5534 spin_lock_irqsave(&phba
->hbalock
, flags
);
5535 list_for_each_entry(vport
, &phba
->port_list
, listentry
) {
5536 if (vport
->vpi
== i
) {
5537 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5541 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5546 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5549 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5551 lpfc_initialize_node(vport
, ndlp
, did
);
5552 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5554 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5555 "node init: did:x%x",
5556 ndlp
->nlp_DID
, 0, 0);
5561 /* This routine releases all resources associated with a specifc NPort's ndlp
5562 * and mempool_free's the nodelist.
5565 lpfc_nlp_release(struct kref
*kref
)
5567 struct lpfc_hba
*phba
;
5568 unsigned long flags
;
5569 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5572 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5573 "node release: did:x%x flg:x%x type:x%x",
5574 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5576 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5577 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5578 "usgmap:x%x refcnt:%d\n",
5579 (void *)ndlp
, ndlp
->nlp_DID
, ndlp
->nlp_usg_map
,
5580 atomic_read(&ndlp
->kref
.refcount
));
5582 /* remove ndlp from action. */
5583 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
5585 /* clear the ndlp active flag for all release cases */
5587 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5588 NLP_CLR_NODE_ACT(ndlp
);
5589 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5590 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5591 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
5593 /* free ndlp memory for final ndlp release */
5594 if (NLP_CHK_FREE_REQ(ndlp
)) {
5595 kfree(ndlp
->lat_data
);
5596 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
5600 /* This routine bumps the reference count for a ndlp structure to ensure
5601 * that one discovery thread won't free a ndlp while another discovery thread
5604 struct lpfc_nodelist
*
5605 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
5607 struct lpfc_hba
*phba
;
5608 unsigned long flags
;
5611 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5612 "node get: did:x%x flg:x%x refcnt:x%x",
5613 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5614 atomic_read(&ndlp
->kref
.refcount
));
5615 /* The check of ndlp usage to prevent incrementing the
5616 * ndlp reference count that is in the process of being
5620 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5621 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
5622 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5623 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5624 "0276 lpfc_nlp_get: ndlp:x%p "
5625 "usgmap:x%x refcnt:%d\n",
5626 (void *)ndlp
, ndlp
->nlp_usg_map
,
5627 atomic_read(&ndlp
->kref
.refcount
));
5630 kref_get(&ndlp
->kref
);
5631 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5636 /* This routine decrements the reference count for a ndlp structure. If the
5637 * count goes to 0, this indicates the the associated nodelist should be
5638 * freed. Returning 1 indicates the ndlp resource has been released; on the
5639 * other hand, returning 0 indicates the ndlp resource has not been released
5643 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
5645 struct lpfc_hba
*phba
;
5646 unsigned long flags
;
5651 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5652 "node put: did:x%x flg:x%x refcnt:x%x",
5653 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5654 atomic_read(&ndlp
->kref
.refcount
));
5656 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5657 /* Check the ndlp memory free acknowledge flag to avoid the
5658 * possible race condition that kref_put got invoked again
5659 * after previous one has done ndlp memory free.
5661 if (NLP_CHK_FREE_ACK(ndlp
)) {
5662 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5663 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5664 "0274 lpfc_nlp_put: ndlp:x%p "
5665 "usgmap:x%x refcnt:%d\n",
5666 (void *)ndlp
, ndlp
->nlp_usg_map
,
5667 atomic_read(&ndlp
->kref
.refcount
));
5670 /* Check the ndlp inactivate log flag to avoid the possible
5671 * race condition that kref_put got invoked again after ndlp
5672 * is already in inactivating state.
5674 if (NLP_CHK_IACT_REQ(ndlp
)) {
5675 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5676 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5677 "0275 lpfc_nlp_put: ndlp:x%p "
5678 "usgmap:x%x refcnt:%d\n",
5679 (void *)ndlp
, ndlp
->nlp_usg_map
,
5680 atomic_read(&ndlp
->kref
.refcount
));
5683 /* For last put, mark the ndlp usage flags to make sure no
5684 * other kref_get and kref_put on the same ndlp shall get
5685 * in between the process when the final kref_put has been
5686 * invoked on this ndlp.
5688 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
5689 /* Indicate ndlp is put to inactive state. */
5690 NLP_SET_IACT_REQ(ndlp
);
5691 /* Acknowledge ndlp memory free has been seen. */
5692 if (NLP_CHK_FREE_REQ(ndlp
))
5693 NLP_SET_FREE_ACK(ndlp
);
5695 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5696 /* Note, the kref_put returns 1 when decrementing a reference
5697 * count that was 1, it invokes the release callback function,
5698 * but it still left the reference count as 1 (not actually
5699 * performs the last decrementation). Otherwise, it actually
5700 * decrements the reference count and returns 0.
5702 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
5705 /* This routine free's the specified nodelist if it is not in use
5706 * by any other discovery thread. This routine returns 1 if the
5707 * ndlp has been freed. A return value of 0 indicates the ndlp is
5708 * not yet been released.
5711 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
5713 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5714 "node not used: did:x%x flg:x%x refcnt:x%x",
5715 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5716 atomic_read(&ndlp
->kref
.refcount
));
5717 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
5718 if (lpfc_nlp_put(ndlp
))
5724 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5725 * @phba: Pointer to hba context object.
5727 * This function iterate through all FC nodes associated
5728 * will all vports to check if there is any node with
5729 * fc_rports associated with it. If there is an fc_rport
5730 * associated with the node, then the node is either in
5731 * discovered state or its devloss_timer is pending.
5734 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
5736 struct lpfc_vport
**vports
;
5738 struct lpfc_nodelist
*ndlp
;
5739 struct Scsi_Host
*shost
;
5741 vports
= lpfc_create_vport_work_array(phba
);
5743 /* If driver cannot allocate memory, indicate fcf is in use */
5747 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5748 shost
= lpfc_shost_from_vport(vports
[i
]);
5749 spin_lock_irq(shost
->host_lock
);
5751 * IF the CVL_RCVD bit is not set then we have sent the
5753 * If dev_loss fires while we are waiting we do not want to
5756 if (!(vports
[i
]->fc_flag
& FC_VPORT_CVL_RCVD
)) {
5757 spin_unlock_irq(shost
->host_lock
);
5761 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5762 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
5763 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
5765 spin_unlock_irq(shost
->host_lock
);
5767 } else if (ndlp
->nlp_flag
& NLP_RPI_REGISTERED
) {
5769 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
5770 "2624 RPI %x DID %x flag %x "
5771 "still logged in\n",
5772 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5776 spin_unlock_irq(shost
->host_lock
);
5779 lpfc_destroy_vport_work_array(phba
, vports
);
5784 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5785 * @phba: Pointer to hba context object.
5786 * @mboxq: Pointer to mailbox object.
5788 * This function frees memory associated with the mailbox command.
5791 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5793 struct lpfc_vport
*vport
= mboxq
->vport
;
5794 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5796 if (mboxq
->u
.mb
.mbxStatus
) {
5797 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5798 "2555 UNREG_VFI mbxStatus error x%x "
5800 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5802 spin_lock_irq(shost
->host_lock
);
5803 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5804 spin_unlock_irq(shost
->host_lock
);
5805 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5810 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5811 * @phba: Pointer to hba context object.
5812 * @mboxq: Pointer to mailbox object.
5814 * This function frees memory associated with the mailbox command.
5817 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5819 struct lpfc_vport
*vport
= mboxq
->vport
;
5821 if (mboxq
->u
.mb
.mbxStatus
) {
5822 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5823 "2550 UNREG_FCFI mbxStatus error x%x "
5825 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5827 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5832 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5833 * @phba: Pointer to hba context object.
5835 * This function prepare the HBA for unregistering the currently registered
5836 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5840 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5842 struct lpfc_vport
**vports
;
5843 struct lpfc_nodelist
*ndlp
;
5844 struct Scsi_Host
*shost
;
5847 /* Unregister RPIs */
5848 if (lpfc_fcf_inuse(phba
))
5849 lpfc_unreg_hba_rpis(phba
);
5851 /* At this point, all discovery is aborted */
5852 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5854 /* Unregister VPIs */
5855 vports
= lpfc_create_vport_work_array(phba
);
5856 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5857 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5858 /* Stop FLOGI/FDISC retries */
5859 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5861 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5862 lpfc_cleanup_pending_mbox(vports
[i
]);
5863 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5864 lpfc_sli4_unreg_all_rpis(vports
[i
]);
5865 lpfc_mbx_unreg_vpi(vports
[i
]);
5866 shost
= lpfc_shost_from_vport(vports
[i
]);
5867 spin_lock_irq(shost
->host_lock
);
5868 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5869 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5870 spin_unlock_irq(shost
->host_lock
);
5872 lpfc_destroy_vport_work_array(phba
, vports
);
5874 /* Cleanup any outstanding ELS commands */
5875 lpfc_els_flush_all_cmd(phba
);
5877 /* Unregister the physical port VFI */
5878 rc
= lpfc_issue_unreg_vfi(phba
->pport
);
5883 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5884 * @phba: Pointer to hba context object.
5886 * This function issues synchronous unregister FCF mailbox command to HBA to
5887 * unregister the currently registered FCF record. The driver does not reset
5888 * the driver FCF usage state flags.
5890 * Return 0 if successfully issued, none-zero otherwise.
5893 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5898 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5900 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5901 "2551 UNREG_FCFI mbox allocation failed"
5902 "HBA state x%x\n", phba
->pport
->port_state
);
5905 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5906 mbox
->vport
= phba
->pport
;
5907 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5908 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5910 if (rc
== MBX_NOT_FINISHED
) {
5911 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5912 "2552 Unregister FCFI command failed rc x%x "
5914 rc
, phba
->pport
->port_state
);
5921 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5922 * @phba: Pointer to hba context object.
5924 * This function unregisters the currently reigstered FCF. This function
5925 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5928 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5932 /* Preparation for unregistering fcf */
5933 rc
= lpfc_unregister_fcf_prep(phba
);
5935 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5936 "2748 Failed to prepare for unregistering "
5937 "HBA's FCF record: rc=%d\n", rc
);
5941 /* Now, unregister FCF record and reset HBA FCF state */
5942 rc
= lpfc_sli4_unregister_fcf(phba
);
5945 /* Reset HBA FCF states after successful unregister FCF */
5946 phba
->fcf
.fcf_flag
= 0;
5947 phba
->fcf
.current_rec
.flag
= 0;
5950 * If driver is not unloading, check if there is any other
5951 * FCF record that can be used for discovery.
5953 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5954 (phba
->link_state
< LPFC_LINK_UP
))
5957 /* This is considered as the initial FCF discovery scan */
5958 spin_lock_irq(&phba
->hbalock
);
5959 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5960 spin_unlock_irq(&phba
->hbalock
);
5962 /* Reset FCF roundrobin bmask for new discovery */
5963 lpfc_sli4_clear_fcf_rr_bmask(phba
);
5965 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5968 spin_lock_irq(&phba
->hbalock
);
5969 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5970 spin_unlock_irq(&phba
->hbalock
);
5971 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5972 "2553 lpfc_unregister_unused_fcf failed "
5973 "to read FCF record HBA state x%x\n",
5974 phba
->pport
->port_state
);
5979 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5980 * @phba: Pointer to hba context object.
5982 * This function just unregisters the currently reigstered FCF. It does not
5983 * try to find another FCF for discovery.
5986 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5990 /* Preparation for unregistering fcf */
5991 rc
= lpfc_unregister_fcf_prep(phba
);
5993 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5994 "2749 Failed to prepare for unregistering "
5995 "HBA's FCF record: rc=%d\n", rc
);
5999 /* Now, unregister FCF record and reset HBA FCF state */
6000 rc
= lpfc_sli4_unregister_fcf(phba
);
6003 /* Set proper HBA FCF states after successful unregister FCF */
6004 spin_lock_irq(&phba
->hbalock
);
6005 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
6006 spin_unlock_irq(&phba
->hbalock
);
6010 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6011 * @phba: Pointer to hba context object.
6013 * This function check if there are any connected remote port for the FCF and
6014 * if all the devices are disconnected, this function unregister FCFI.
6015 * This function also tries to use another FCF for discovery.
6018 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
6021 * If HBA is not running in FIP mode, if HBA does not support
6022 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6023 * registered, do nothing.
6025 spin_lock_irq(&phba
->hbalock
);
6026 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) ||
6027 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
6028 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
6029 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
6030 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
6031 spin_unlock_irq(&phba
->hbalock
);
6034 spin_unlock_irq(&phba
->hbalock
);
6036 if (lpfc_fcf_inuse(phba
))
6039 lpfc_unregister_fcf_rescan(phba
);
6043 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6044 * @phba: Pointer to hba context object.
6045 * @buff: Buffer containing the FCF connection table as in the config
6047 * This function create driver data structure for the FCF connection
6048 * record table read from config region 23.
6051 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
6054 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
6055 struct lpfc_fcf_conn_hdr
*conn_hdr
;
6056 struct lpfc_fcf_conn_rec
*conn_rec
;
6057 uint32_t record_count
;
6060 /* Free the current connect table */
6061 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
6062 &phba
->fcf_conn_rec_list
, list
) {
6063 list_del_init(&conn_entry
->list
);
6067 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
6068 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
6069 sizeof(struct lpfc_fcf_conn_rec
);
6071 conn_rec
= (struct lpfc_fcf_conn_rec
*)
6072 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
6074 for (i
= 0; i
< record_count
; i
++) {
6075 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
6077 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
6080 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6081 "2566 Failed to allocate connection"
6086 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
6087 sizeof(struct lpfc_fcf_conn_rec
));
6088 conn_entry
->conn_rec
.vlan_tag
=
6089 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
6090 conn_entry
->conn_rec
.flags
=
6091 le16_to_cpu(conn_entry
->conn_rec
.flags
);
6092 list_add_tail(&conn_entry
->list
,
6093 &phba
->fcf_conn_rec_list
);
6098 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6099 * @phba: Pointer to hba context object.
6100 * @buff: Buffer containing the FCoE parameter data structure.
6102 * This function update driver data structure with config
6103 * parameters read from config region 23.
6106 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
6109 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
6110 struct lpfc_fcoe_params
*fcoe_param
;
6112 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
6114 fcoe_param
= (struct lpfc_fcoe_params
*)
6115 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
6117 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
6118 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
6121 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
6122 phba
->valid_vlan
= 1;
6123 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
6127 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
6128 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
6129 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
6134 * lpfc_get_rec_conf23 - Get a record type in config region data.
6135 * @buff: Buffer containing config region 23 data.
6136 * @size: Size of the data buffer.
6137 * @rec_type: Record type to be searched.
6139 * This function searches config region data to find the beginning
6140 * of the record specified by record_type. If record found, this
6141 * function return pointer to the record else return NULL.
6144 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
6146 uint32_t offset
= 0, rec_length
;
6148 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
6149 (size
< sizeof(uint32_t)))
6152 rec_length
= buff
[offset
+ 1];
6155 * One TLV record has one word header and number of data words
6156 * specified in the rec_length field of the record header.
6158 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
6160 if (buff
[offset
] == rec_type
)
6161 return &buff
[offset
];
6163 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
6166 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
6167 rec_length
= buff
[offset
+ 1];
6173 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6174 * @phba: Pointer to lpfc_hba data structure.
6175 * @buff: Buffer containing config region 23 data.
6176 * @size: Size of the data buffer.
6178 * This function parses the FCoE config parameters in config region 23 and
6179 * populate driver data structure with the parameters.
6182 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
6186 uint32_t offset
= 0, rec_length
;
6190 * If data size is less than 2 words signature and version cannot be
6193 if (size
< 2*sizeof(uint32_t))
6196 /* Check the region signature first */
6197 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
6198 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6199 "2567 Config region 23 has bad signature\n");
6205 /* Check the data structure version */
6206 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
6207 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6208 "2568 Config region 23 has bad version\n");
6213 rec_length
= buff
[offset
+ 1];
6215 /* Read FCoE param record */
6216 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6217 size
- offset
, FCOE_PARAM_TYPE
);
6219 lpfc_read_fcoe_param(phba
, rec_ptr
);
6221 /* Read FCF connection table */
6222 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
6223 size
- offset
, FCOE_CONN_TBL_TYPE
);
6225 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);