1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
45 /* AlpaArray for assignment of scsid for scan-down and bind_method */
46 static uint8_t lpfcAlpaArray
[] = {
47 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
48 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
49 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
50 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
51 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
52 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
53 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
54 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
55 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
56 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
57 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
58 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
59 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
62 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
63 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
64 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 lpfc_terminate_rport_io(struct fc_rport
*rport
)
69 struct lpfc_rport_data
*rdata
;
70 struct lpfc_nodelist
* ndlp
;
71 struct lpfc_hba
*phba
;
73 rdata
= rport
->dd_data
;
76 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
77 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
78 printk(KERN_ERR
"Cannot find remote node"
79 " to terminate I/O Data x%x\n",
86 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
87 "rport terminate: sid:x%x did:x%x flg:x%x",
88 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
90 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
91 lpfc_sli_abort_iocb(ndlp
->vport
,
92 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
93 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
98 * This function will be called when dev_loss_tmo fire.
101 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
103 struct lpfc_rport_data
*rdata
;
104 struct lpfc_nodelist
* ndlp
;
105 struct lpfc_vport
*vport
;
106 struct lpfc_hba
*phba
;
107 struct lpfc_work_evt
*evtp
;
111 rdata
= rport
->dd_data
;
113 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
119 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
120 "rport devlosscb: sid:x%x did:x%x flg:x%x",
121 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
123 /* Don't defer this if we are in the process of deleting the vport
124 * or unloading the driver. The unload will cleanup the node
125 * appropriately we just need to cleanup the ndlp rport info here.
127 if (vport
->load_flag
& FC_UNLOADING
) {
128 put_node
= rdata
->pnode
!= NULL
;
129 put_rport
= ndlp
->rport
!= NULL
;
135 put_device(&rport
->dev
);
139 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
142 evtp
= &ndlp
->dev_loss_evt
;
144 if (!list_empty(&evtp
->evt_listp
))
147 spin_lock_irq(&phba
->hbalock
);
148 /* We need to hold the node by incrementing the reference
149 * count until this queued work is done
151 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
152 if (evtp
->evt_arg1
) {
153 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
154 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
155 lpfc_worker_wake_up(phba
);
157 spin_unlock_irq(&phba
->hbalock
);
163 * This function is called from the worker thread when dev_loss_tmo
167 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
169 struct lpfc_rport_data
*rdata
;
170 struct fc_rport
*rport
;
171 struct lpfc_vport
*vport
;
172 struct lpfc_hba
*phba
;
183 rdata
= rport
->dd_data
;
184 name
= (uint8_t *) &ndlp
->nlp_portname
;
188 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
189 "rport devlosstmo:did:x%x type:x%x id:x%x",
190 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
192 /* Don't defer this if we are in the process of deleting the vport
193 * or unloading the driver. The unload will cleanup the node
194 * appropriately we just need to cleanup the ndlp rport info here.
196 if (vport
->load_flag
& FC_UNLOADING
) {
197 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
198 /* flush the target */
199 lpfc_sli_abort_iocb(vport
,
200 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
201 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
203 put_node
= rdata
->pnode
!= NULL
;
204 put_rport
= ndlp
->rport
!= NULL
;
210 put_device(&rport
->dev
);
214 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
215 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
216 "0284 Devloss timeout Ignored on "
217 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
219 *name
, *(name
+1), *(name
+2), *(name
+3),
220 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
225 if (ndlp
->nlp_type
& NLP_FABRIC
) {
226 /* We will clean up these Nodes in linkup */
227 put_node
= rdata
->pnode
!= NULL
;
228 put_rport
= ndlp
->rport
!= NULL
;
234 put_device(&rport
->dev
);
238 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
240 /* flush the target */
241 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
242 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
246 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
247 "0203 Devloss timeout on "
248 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
249 "NPort x%06x Data: x%x x%x x%x\n",
250 *name
, *(name
+1), *(name
+2), *(name
+3),
251 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
252 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
253 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
255 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
256 "0204 Devloss timeout on "
257 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
258 "NPort x%06x Data: x%x x%x x%x\n",
259 *name
, *(name
+1), *(name
+2), *(name
+3),
260 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
261 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
262 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
265 put_node
= rdata
->pnode
!= NULL
;
266 put_rport
= ndlp
->rport
!= NULL
;
272 put_device(&rport
->dev
);
274 if (!(vport
->load_flag
& FC_UNLOADING
) &&
275 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
276 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
277 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
))
278 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
280 lpfc_unregister_unused_fcf(phba
);
284 * lpfc_alloc_fast_evt - Allocates data structure for posting event
285 * @phba: Pointer to hba context object.
287 * This function is called from the functions which need to post
288 * events from interrupt context. This function allocates data
289 * structure required for posting event. It also keeps track of
290 * number of events pending and prevent event storm when there are
293 struct lpfc_fast_path_event
*
294 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
295 struct lpfc_fast_path_event
*ret
;
297 /* If there are lot of fast event do not exhaust memory due to this */
298 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
301 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
304 atomic_inc(&phba
->fast_event_count
);
305 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
306 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
312 * lpfc_free_fast_evt - Frees event data structure
313 * @phba: Pointer to hba context object.
314 * @evt: Event object which need to be freed.
316 * This function frees the data structure required for posting
320 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
321 struct lpfc_fast_path_event
*evt
) {
323 atomic_dec(&phba
->fast_event_count
);
328 * lpfc_send_fastpath_evt - Posts events generated from fast path
329 * @phba: Pointer to hba context object.
330 * @evtp: Event data structure.
332 * This function is called from worker thread, when the interrupt
333 * context need to post an event. This function posts the event
334 * to fc transport netlink interface.
337 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
338 struct lpfc_work_evt
*evtp
)
340 unsigned long evt_category
, evt_sub_category
;
341 struct lpfc_fast_path_event
*fast_evt_data
;
343 uint32_t evt_data_size
;
344 struct Scsi_Host
*shost
;
346 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
349 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
350 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
351 fabric_evt
.subcategory
;
352 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
353 if (evt_category
== FC_REG_FABRIC_EVENT
) {
354 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
355 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
356 evt_data_size
= sizeof(fast_evt_data
->un
.
358 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
359 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
360 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
361 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
363 lpfc_free_fast_evt(phba
, fast_evt_data
);
366 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
367 switch (evt_sub_category
) {
368 case LPFC_EVENT_QFULL
:
369 case LPFC_EVENT_DEVBSY
:
370 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
371 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
373 case LPFC_EVENT_CHECK_COND
:
374 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
375 evt_data_size
= sizeof(fast_evt_data
->un
.
378 case LPFC_EVENT_VARQUEDEPTH
:
379 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
380 evt_data_size
= sizeof(fast_evt_data
->un
.
384 lpfc_free_fast_evt(phba
, fast_evt_data
);
388 lpfc_free_fast_evt(phba
, fast_evt_data
);
392 fc_host_post_vendor_event(shost
,
393 fc_get_event_number(),
398 lpfc_free_fast_evt(phba
, fast_evt_data
);
403 lpfc_work_list_done(struct lpfc_hba
*phba
)
405 struct lpfc_work_evt
*evtp
= NULL
;
406 struct lpfc_nodelist
*ndlp
;
409 spin_lock_irq(&phba
->hbalock
);
410 while (!list_empty(&phba
->work_list
)) {
411 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
413 spin_unlock_irq(&phba
->hbalock
);
416 case LPFC_EVT_ELS_RETRY
:
417 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
418 lpfc_els_retry_delay_handler(ndlp
);
419 free_evt
= 0; /* evt is part of ndlp */
420 /* decrement the node reference count held
421 * for this queued work
425 case LPFC_EVT_DEV_LOSS
:
426 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
427 lpfc_dev_loss_tmo_handler(ndlp
);
429 /* decrement the node reference count held for
434 case LPFC_EVT_ONLINE
:
435 if (phba
->link_state
< LPFC_LINK_DOWN
)
436 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
438 *(int *) (evtp
->evt_arg1
) = 0;
439 complete((struct completion
*)(evtp
->evt_arg2
));
441 case LPFC_EVT_OFFLINE_PREP
:
442 if (phba
->link_state
>= LPFC_LINK_DOWN
)
443 lpfc_offline_prep(phba
);
444 *(int *)(evtp
->evt_arg1
) = 0;
445 complete((struct completion
*)(evtp
->evt_arg2
));
447 case LPFC_EVT_OFFLINE
:
449 lpfc_sli_brdrestart(phba
);
450 *(int *)(evtp
->evt_arg1
) =
451 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
452 lpfc_unblock_mgmt_io(phba
);
453 complete((struct completion
*)(evtp
->evt_arg2
));
455 case LPFC_EVT_WARM_START
:
457 lpfc_reset_barrier(phba
);
458 lpfc_sli_brdreset(phba
);
459 lpfc_hba_down_post(phba
);
460 *(int *)(evtp
->evt_arg1
) =
461 lpfc_sli_brdready(phba
, HS_MBRDY
);
462 lpfc_unblock_mgmt_io(phba
);
463 complete((struct completion
*)(evtp
->evt_arg2
));
467 *(int *)(evtp
->evt_arg1
)
468 = (phba
->pport
->stopped
)
469 ? 0 : lpfc_sli_brdkill(phba
);
470 lpfc_unblock_mgmt_io(phba
);
471 complete((struct completion
*)(evtp
->evt_arg2
));
473 case LPFC_EVT_FASTPATH_MGMT_EVT
:
474 lpfc_send_fastpath_evt(phba
, evtp
);
480 spin_lock_irq(&phba
->hbalock
);
482 spin_unlock_irq(&phba
->hbalock
);
487 lpfc_work_done(struct lpfc_hba
*phba
)
489 struct lpfc_sli_ring
*pring
;
490 uint32_t ha_copy
, status
, control
, work_port_events
;
491 struct lpfc_vport
**vports
;
492 struct lpfc_vport
*vport
;
495 spin_lock_irq(&phba
->hbalock
);
496 ha_copy
= phba
->work_ha
;
498 spin_unlock_irq(&phba
->hbalock
);
500 /* First, try to post the next mailbox command to SLI4 device */
501 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
502 lpfc_sli4_post_async_mbox(phba
);
504 if (ha_copy
& HA_ERATT
)
505 /* Handle the error attention event */
506 lpfc_handle_eratt(phba
);
508 if (ha_copy
& HA_MBATT
)
509 lpfc_sli_handle_mb_event(phba
);
511 if (ha_copy
& HA_LATT
)
512 lpfc_handle_latt(phba
);
514 /* Process SLI4 events */
515 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
516 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
517 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
518 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
519 lpfc_sli4_els_xri_abort_event_proc(phba
);
520 if (phba
->hba_flag
& ASYNC_EVENT
)
521 lpfc_sli4_async_event_proc(phba
);
522 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
523 spin_lock_irq(&phba
->hbalock
);
524 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
525 spin_unlock_irq(&phba
->hbalock
);
526 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
528 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
529 lpfc_sli4_fcf_redisc_event_proc(phba
);
532 vports
= lpfc_create_vport_work_array(phba
);
534 for (i
= 0; i
<= phba
->max_vports
; i
++) {
536 * We could have no vports in array if unloading, so if
537 * this happens then just use the pport
539 if (vports
[i
] == NULL
&& i
== 0)
545 spin_lock_irq(&vport
->work_port_lock
);
546 work_port_events
= vport
->work_port_events
;
547 vport
->work_port_events
&= ~work_port_events
;
548 spin_unlock_irq(&vport
->work_port_lock
);
549 if (work_port_events
& WORKER_DISC_TMO
)
550 lpfc_disc_timeout_handler(vport
);
551 if (work_port_events
& WORKER_ELS_TMO
)
552 lpfc_els_timeout_handler(vport
);
553 if (work_port_events
& WORKER_HB_TMO
)
554 lpfc_hb_timeout_handler(phba
);
555 if (work_port_events
& WORKER_MBOX_TMO
)
556 lpfc_mbox_timeout_handler(phba
);
557 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
558 lpfc_unblock_fabric_iocbs(phba
);
559 if (work_port_events
& WORKER_FDMI_TMO
)
560 lpfc_fdmi_timeout_handler(vport
);
561 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
562 lpfc_ramp_down_queue_handler(phba
);
563 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
564 lpfc_ramp_up_queue_handler(phba
);
566 lpfc_destroy_vport_work_array(phba
, vports
);
568 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
569 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
570 status
>>= (4*LPFC_ELS_RING
);
571 if ((status
& HA_RXMASK
) ||
572 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
573 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
574 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
575 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
576 /* Set the lpfc data pending flag */
577 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
579 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
580 lpfc_sli_handle_slow_ring_event(phba
, pring
,
585 * Turn on Ring interrupts
587 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
588 spin_lock_irq(&phba
->hbalock
);
589 control
= readl(phba
->HCregaddr
);
590 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
591 lpfc_debugfs_slow_ring_trc(phba
,
592 "WRK Enable ring: cntl:x%x hacopy:x%x",
593 control
, ha_copy
, 0);
595 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
596 writel(control
, phba
->HCregaddr
);
597 readl(phba
->HCregaddr
); /* flush */
599 lpfc_debugfs_slow_ring_trc(phba
,
600 "WRK Ring ok: cntl:x%x hacopy:x%x",
601 control
, ha_copy
, 0);
603 spin_unlock_irq(&phba
->hbalock
);
606 lpfc_work_list_done(phba
);
610 lpfc_do_work(void *p
)
612 struct lpfc_hba
*phba
= p
;
615 set_user_nice(current
, -20);
616 phba
->data_flags
= 0;
618 while (!kthread_should_stop()) {
619 /* wait and check worker queue activities */
620 rc
= wait_event_interruptible(phba
->work_waitq
,
621 (test_and_clear_bit(LPFC_DATA_READY
,
623 || kthread_should_stop()));
624 /* Signal wakeup shall terminate the worker thread */
626 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
627 "0433 Wakeup on signal: rc=x%x\n", rc
);
631 /* Attend pending lpfc data processing */
632 lpfc_work_done(phba
);
634 phba
->worker_thread
= NULL
;
635 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
636 "0432 Worker thread stopped.\n");
641 * This is only called to handle FC worker events. Since this a rare
642 * occurance, we allocate a struct lpfc_work_evt structure here instead of
643 * embedding it in the IOCB.
646 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
649 struct lpfc_work_evt
*evtp
;
653 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
654 * be queued to worker thread for processing
656 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
660 evtp
->evt_arg1
= arg1
;
661 evtp
->evt_arg2
= arg2
;
664 spin_lock_irqsave(&phba
->hbalock
, flags
);
665 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
666 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
668 lpfc_worker_wake_up(phba
);
674 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
676 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
677 struct lpfc_hba
*phba
= vport
->phba
;
678 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
681 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
682 if (!NLP_CHK_NODE_ACT(ndlp
))
684 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
686 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
687 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
688 (ndlp
->nlp_DID
== NameServer_DID
)))
689 lpfc_unreg_rpi(vport
, ndlp
);
691 /* Leave Fabric nodes alone on link down */
692 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
693 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
695 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
698 : NLP_EVT_DEVICE_RECOVERY
);
700 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
701 lpfc_mbx_unreg_vpi(vport
);
702 spin_lock_irq(shost
->host_lock
);
703 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
704 spin_unlock_irq(shost
->host_lock
);
709 lpfc_port_link_failure(struct lpfc_vport
*vport
)
711 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
713 /* Cleanup any outstanding received buffers */
714 lpfc_cleanup_rcv_buffers(vport
);
716 /* Cleanup any outstanding RSCN activity */
717 lpfc_els_flush_rscn(vport
);
719 /* Cleanup any outstanding ELS commands */
720 lpfc_els_flush_cmd(vport
);
722 lpfc_cleanup_rpis(vport
, 0);
724 /* Turn off discovery timer if its running */
725 lpfc_can_disctmo(vport
);
729 lpfc_linkdown_port(struct lpfc_vport
*vport
)
731 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
733 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
735 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
736 "Link Down: state:x%x rtry:x%x flg:x%x",
737 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
739 lpfc_port_link_failure(vport
);
744 lpfc_linkdown(struct lpfc_hba
*phba
)
746 struct lpfc_vport
*vport
= phba
->pport
;
747 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
748 struct lpfc_vport
**vports
;
752 if (phba
->link_state
== LPFC_LINK_DOWN
)
755 /* Block all SCSI stack I/Os */
756 lpfc_scsi_dev_block(phba
);
758 spin_lock_irq(&phba
->hbalock
);
759 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
760 if (phba
->link_state
> LPFC_LINK_DOWN
) {
761 phba
->link_state
= LPFC_LINK_DOWN
;
762 phba
->pport
->fc_flag
&= ~FC_LBIT
;
764 spin_unlock_irq(&phba
->hbalock
);
765 vports
= lpfc_create_vport_work_array(phba
);
767 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
768 /* Issue a LINK DOWN event to all nodes */
769 lpfc_linkdown_port(vports
[i
]);
771 lpfc_destroy_vport_work_array(phba
, vports
);
772 /* Clean up any firmware default rpi's */
773 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
775 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
777 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
778 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
779 == MBX_NOT_FINISHED
) {
780 mempool_free(mb
, phba
->mbox_mem_pool
);
784 /* Setup myDID for link up if we are in pt2pt mode */
785 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
786 phba
->pport
->fc_myDID
= 0;
787 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
789 lpfc_config_link(phba
, mb
);
790 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
792 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
793 == MBX_NOT_FINISHED
) {
794 mempool_free(mb
, phba
->mbox_mem_pool
);
797 spin_lock_irq(shost
->host_lock
);
798 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
799 spin_unlock_irq(shost
->host_lock
);
806 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
808 struct lpfc_nodelist
*ndlp
;
810 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
811 if (!NLP_CHK_NODE_ACT(ndlp
))
813 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
815 if (ndlp
->nlp_type
& NLP_FABRIC
) {
816 /* On Linkup its safe to clean up the ndlp
817 * from Fabric connections.
819 if (ndlp
->nlp_DID
!= Fabric_DID
)
820 lpfc_unreg_rpi(vport
, ndlp
);
821 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
822 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
823 /* Fail outstanding IO now since device is
826 lpfc_unreg_rpi(vport
, ndlp
);
832 lpfc_linkup_port(struct lpfc_vport
*vport
)
834 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
835 struct lpfc_hba
*phba
= vport
->phba
;
837 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
840 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
841 "Link Up: top:x%x speed:x%x flg:x%x",
842 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
844 /* If NPIV is not enabled, only bring the physical port up */
845 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
846 (vport
!= phba
->pport
))
849 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
851 spin_lock_irq(shost
->host_lock
);
852 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
853 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
854 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
855 vport
->fc_ns_retry
= 0;
856 spin_unlock_irq(shost
->host_lock
);
858 if (vport
->fc_flag
& FC_LBIT
)
859 lpfc_linkup_cleanup_nodes(vport
);
864 lpfc_linkup(struct lpfc_hba
*phba
)
866 struct lpfc_vport
**vports
;
869 phba
->link_state
= LPFC_LINK_UP
;
871 /* Unblock fabric iocbs if they are blocked */
872 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
873 del_timer_sync(&phba
->fabric_block_timer
);
875 vports
= lpfc_create_vport_work_array(phba
);
877 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
878 lpfc_linkup_port(vports
[i
]);
879 lpfc_destroy_vport_work_array(phba
, vports
);
880 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
881 (phba
->sli_rev
< LPFC_SLI_REV4
))
882 lpfc_issue_clear_la(phba
, phba
->pport
);
888 * This routine handles processing a CLEAR_LA mailbox
889 * command upon completion. It is setup in the LPFC_MBOXQ
890 * as the completion routine when the command is
891 * handed off to the SLI layer.
894 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
896 struct lpfc_vport
*vport
= pmb
->vport
;
897 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
898 struct lpfc_sli
*psli
= &phba
->sli
;
899 MAILBOX_t
*mb
= &pmb
->u
.mb
;
902 /* Since we don't do discovery right now, turn these off here */
903 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
904 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
905 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
907 /* Check for error */
908 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
909 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
910 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
911 "0320 CLEAR_LA mbxStatus error x%x hba "
913 mb
->mbxStatus
, vport
->port_state
);
914 phba
->link_state
= LPFC_HBA_ERROR
;
918 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
919 phba
->link_state
= LPFC_HBA_READY
;
921 spin_lock_irq(&phba
->hbalock
);
922 psli
->sli_flag
|= LPFC_PROCESS_LA
;
923 control
= readl(phba
->HCregaddr
);
924 control
|= HC_LAINT_ENA
;
925 writel(control
, phba
->HCregaddr
);
926 readl(phba
->HCregaddr
); /* flush */
927 spin_unlock_irq(&phba
->hbalock
);
928 mempool_free(pmb
, phba
->mbox_mem_pool
);
932 /* Device Discovery completes */
933 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
934 "0225 Device Discovery completes\n");
935 mempool_free(pmb
, phba
->mbox_mem_pool
);
937 spin_lock_irq(shost
->host_lock
);
938 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
939 spin_unlock_irq(shost
->host_lock
);
941 lpfc_can_disctmo(vport
);
943 /* turn on Link Attention interrupts */
945 spin_lock_irq(&phba
->hbalock
);
946 psli
->sli_flag
|= LPFC_PROCESS_LA
;
947 control
= readl(phba
->HCregaddr
);
948 control
|= HC_LAINT_ENA
;
949 writel(control
, phba
->HCregaddr
);
950 readl(phba
->HCregaddr
); /* flush */
951 spin_unlock_irq(&phba
->hbalock
);
958 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
960 struct lpfc_vport
*vport
= pmb
->vport
;
962 if (pmb
->u
.mb
.mbxStatus
)
965 mempool_free(pmb
, phba
->mbox_mem_pool
);
967 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
968 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
969 !(vport
->fc_flag
& FC_LBIT
)) {
970 /* Need to wait for FAN - use discovery timer
971 * for timeout. port_state is identically
972 * LPFC_LOCAL_CFG_LINK while waiting for FAN
974 lpfc_set_disctmo(vport
);
978 /* Start discovery by sending a FLOGI. port_state is identically
979 * LPFC_FLOGI while waiting for FLOGI cmpl
981 if (vport
->port_state
!= LPFC_FLOGI
) {
982 lpfc_initial_flogi(vport
);
987 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
988 "0306 CONFIG_LINK mbxStatus error x%x "
990 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
991 mempool_free(pmb
, phba
->mbox_mem_pool
);
995 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
996 "0200 CONFIG_LINK bad hba state x%x\n",
999 lpfc_issue_clear_la(phba
, vport
);
1004 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1006 struct lpfc_vport
*vport
= mboxq
->vport
;
1007 unsigned long flags
;
1009 if (mboxq
->u
.mb
.mbxStatus
) {
1010 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1011 "2017 REG_FCFI mbxStatus error x%x "
1013 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1014 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1018 /* Start FCoE discovery by sending a FLOGI. */
1019 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1020 /* Set the FCFI registered flag */
1021 spin_lock_irqsave(&phba
->hbalock
, flags
);
1022 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1023 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1024 /* If there is a pending FCoE event, restart FCF table scan. */
1025 if (lpfc_check_pending_fcoe_event(phba
, 1)) {
1026 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1029 spin_lock_irqsave(&phba
->hbalock
, flags
);
1030 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1031 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1032 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1033 if (vport
->port_state
!= LPFC_FLOGI
)
1034 lpfc_initial_flogi(vport
);
1036 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1041 * lpfc_fab_name_match - Check if the fcf fabric name match.
1042 * @fab_name: pointer to fabric name.
1043 * @new_fcf_record: pointer to fcf record.
1045 * This routine compare the fcf record's fabric name with provided
1046 * fabric name. If the fabric name are identical this function
1047 * returns 1 else return 0.
1050 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1052 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1054 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1056 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1058 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1060 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1062 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1064 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1066 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1072 * lpfc_sw_name_match - Check if the fcf switch name match.
1073 * @fab_name: pointer to fabric name.
1074 * @new_fcf_record: pointer to fcf record.
1076 * This routine compare the fcf record's switch name with provided
1077 * switch name. If the switch name are identical this function
1078 * returns 1 else return 0.
1081 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1083 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1085 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1087 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1089 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1091 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1093 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1095 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1097 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1103 * lpfc_mac_addr_match - Check if the fcf mac address match.
1104 * @mac_addr: pointer to mac address.
1105 * @new_fcf_record: pointer to fcf record.
1107 * This routine compare the fcf record's mac address with HBA's
1108 * FCF mac address. If the mac addresses are identical this function
1109 * returns 1 else return 0.
1112 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1114 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1116 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1118 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1120 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1122 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1124 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1130 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1132 return (curr_vlan_id
== new_vlan_id
);
1136 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1137 * @fcf: pointer to driver fcf record.
1138 * @new_fcf_record: pointer to fcf record.
1140 * This routine copies the FCF information from the FCF
1141 * record to lpfc_hba data structure.
1144 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1145 struct fcf_record
*new_fcf_record
)
1148 fcf_rec
->fabric_name
[0] =
1149 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1150 fcf_rec
->fabric_name
[1] =
1151 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1152 fcf_rec
->fabric_name
[2] =
1153 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1154 fcf_rec
->fabric_name
[3] =
1155 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1156 fcf_rec
->fabric_name
[4] =
1157 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1158 fcf_rec
->fabric_name
[5] =
1159 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1160 fcf_rec
->fabric_name
[6] =
1161 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1162 fcf_rec
->fabric_name
[7] =
1163 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1165 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1166 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1167 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1168 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1169 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1170 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1171 /* FCF record index */
1172 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1173 /* FCF record priority */
1174 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1176 fcf_rec
->switch_name
[0] =
1177 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1178 fcf_rec
->switch_name
[1] =
1179 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1180 fcf_rec
->switch_name
[2] =
1181 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1182 fcf_rec
->switch_name
[3] =
1183 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1184 fcf_rec
->switch_name
[4] =
1185 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1186 fcf_rec
->switch_name
[5] =
1187 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1188 fcf_rec
->switch_name
[6] =
1189 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1190 fcf_rec
->switch_name
[7] =
1191 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1195 * lpfc_update_fcf_record - Update driver fcf record
1196 * @phba: pointer to lpfc hba data structure.
1197 * @fcf_rec: pointer to driver fcf record.
1198 * @new_fcf_record: pointer to hba fcf record.
1199 * @addr_mode: address mode to be set to the driver fcf record.
1200 * @vlan_id: vlan tag to be set to the driver fcf record.
1201 * @flag: flag bits to be set to the driver fcf record.
1203 * This routine updates the driver FCF record from the new HBA FCF record
1204 * together with the address mode, vlan_id, and other informations. This
1205 * routine is called with the host lock held.
1208 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1209 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1210 uint16_t vlan_id
, uint32_t flag
)
1212 /* Copy the fields from the HBA's FCF record */
1213 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1214 /* Update other fields of driver FCF record */
1215 fcf_rec
->addr_mode
= addr_mode
;
1216 fcf_rec
->vlan_id
= vlan_id
;
1217 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1221 * lpfc_register_fcf - Register the FCF with hba.
1222 * @phba: pointer to lpfc hba data structure.
1224 * This routine issues a register fcfi mailbox command to register
1228 lpfc_register_fcf(struct lpfc_hba
*phba
)
1230 LPFC_MBOXQ_t
*fcf_mbxq
;
1232 unsigned long flags
;
1234 spin_lock_irqsave(&phba
->hbalock
, flags
);
1236 /* If the FCF is not availabe do nothing. */
1237 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1238 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1239 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1243 /* The FCF is already registered, start discovery */
1244 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1245 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1246 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1247 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1248 if (phba
->pport
->port_state
!= LPFC_FLOGI
)
1249 lpfc_initial_flogi(phba
->pport
);
1252 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1254 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
,
1257 spin_lock_irqsave(&phba
->hbalock
, flags
);
1258 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1259 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1263 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1264 fcf_mbxq
->vport
= phba
->pport
;
1265 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1266 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1267 if (rc
== MBX_NOT_FINISHED
) {
1268 spin_lock_irqsave(&phba
->hbalock
, flags
);
1269 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1270 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1271 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1278 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1279 * @phba: pointer to lpfc hba data structure.
1280 * @new_fcf_record: pointer to fcf record.
1281 * @boot_flag: Indicates if this record used by boot bios.
1282 * @addr_mode: The address mode to be used by this FCF
1283 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1285 * This routine compare the fcf record with connect list obtained from the
1286 * config region to decide if this FCF can be used for SAN discovery. It returns
1287 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1288 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1289 * is used by boot bios and addr_mode will indicate the addressing mode to be
1290 * used for this FCF when the function returns.
1291 * If the FCF record need to be used with a particular vlan id, the vlan is
1292 * set in the vlan_id on return of the function. If not VLAN tagging need to
1293 * be used with the FCF vlan_id will be set to 0xFFFF;
1296 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1297 struct fcf_record
*new_fcf_record
,
1298 uint32_t *boot_flag
, uint32_t *addr_mode
,
1301 struct lpfc_fcf_conn_entry
*conn_entry
;
1302 int i
, j
, fcf_vlan_id
= 0;
1304 /* Find the lowest VLAN id in the FCF record */
1305 for (i
= 0; i
< 512; i
++) {
1306 if (new_fcf_record
->vlan_bitmap
[i
]) {
1307 fcf_vlan_id
= i
* 8;
1309 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1317 /* If FCF not available return 0 */
1318 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1319 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1322 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1324 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1326 if (phba
->valid_vlan
)
1327 *vlan_id
= phba
->vlan_id
;
1334 * If there are no FCF connection table entry, driver connect to all
1337 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1339 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1343 * When there are no FCF connect entries, use driver's default
1344 * addressing mode - FPMA.
1346 if (*addr_mode
& LPFC_FCF_FPMA
)
1347 *addr_mode
= LPFC_FCF_FPMA
;
1349 /* If FCF record report a vlan id use that vlan id */
1351 *vlan_id
= fcf_vlan_id
;
1357 list_for_each_entry(conn_entry
,
1358 &phba
->fcf_conn_rec_list
, list
) {
1359 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1362 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1363 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1366 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1367 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1370 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1372 * If the vlan bit map does not have the bit set for the
1373 * vlan id to be used, then it is not a match.
1375 if (!(new_fcf_record
->vlan_bitmap
1376 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1377 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1382 * If connection record does not support any addressing mode,
1383 * skip the FCF record.
1385 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1386 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1390 * Check if the connection record specifies a required
1393 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1394 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1397 * If SPMA required but FCF not support this continue.
1399 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1400 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1401 new_fcf_record
) & LPFC_FCF_SPMA
))
1405 * If FPMA required but FCF not support this continue.
1407 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1408 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1409 new_fcf_record
) & LPFC_FCF_FPMA
))
1414 * This fcf record matches filtering criteria.
1416 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1422 * If user did not specify any addressing mode, or if the
1423 * prefered addressing mode specified by user is not supported
1424 * by FCF, allow fabric to pick the addressing mode.
1426 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1429 * If the user specified a required address mode, assign that
1432 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1433 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1434 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1436 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1438 * If the user specified a prefered address mode, use the
1439 * addr mode only if FCF support the addr_mode.
1441 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1442 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1443 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1444 (*addr_mode
& LPFC_FCF_SPMA
))
1445 *addr_mode
= LPFC_FCF_SPMA
;
1446 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1447 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1448 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1449 (*addr_mode
& LPFC_FCF_FPMA
))
1450 *addr_mode
= LPFC_FCF_FPMA
;
1452 /* If matching connect list has a vlan id, use it */
1453 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1454 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1456 * If no vlan id is specified in connect list, use the vlan id
1459 else if (fcf_vlan_id
)
1460 *vlan_id
= fcf_vlan_id
;
1471 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1472 * @phba: pointer to lpfc hba data structure.
1473 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1475 * This function check if there is any fcoe event pending while driver
1476 * scan FCF entries. If there is any pending event, it will restart the
1477 * FCF saning and return 1 else return 0.
1480 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1485 * If the Link is up and no FCoE events while in the
1486 * FCF discovery, no need to restart FCF discovery.
1488 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1489 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1492 spin_lock_irq(&phba
->hbalock
);
1493 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1494 spin_unlock_irq(&phba
->hbalock
);
1496 if (phba
->link_state
>= LPFC_LINK_UP
)
1497 lpfc_sli4_read_fcf_record(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1500 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1503 spin_lock_irq(&phba
->hbalock
);
1504 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1505 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
1506 spin_unlock_irq(&phba
->hbalock
);
1510 spin_lock_irq(&phba
->hbalock
);
1511 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1512 spin_unlock_irq(&phba
->hbalock
);
1513 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1515 lpfc_printf_log(phba
, KERN_ERR
,
1516 LOG_DISCOVERY
|LOG_MBOX
,
1517 "2610 UNREG_FCFI mbox allocation failed\n");
1520 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
1521 mbox
->vport
= phba
->pport
;
1522 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
1523 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
1524 if (rc
== MBX_NOT_FINISHED
) {
1525 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
1526 "2611 UNREG_FCFI issue mbox failed\n");
1527 mempool_free(mbox
, phba
->mbox_mem_pool
);
1535 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1536 * @phba: pointer to lpfc hba data structure.
1537 * @mboxq: pointer to mailbox object.
1539 * This function iterate through all the fcf records available in
1540 * HBA and choose the optimal FCF record for discovery. After finding
1541 * the FCF for discovery it register the FCF record and kick start
1543 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1544 * use a FCF record which match fabric name and mac address of the
1545 * currently used FCF record.
1546 * If the driver support only one FCF, it will try to use the FCF record
1547 * used by BOOT_BIOS.
1550 lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1553 dma_addr_t phys_addr
;
1555 struct lpfc_mbx_sge sge
;
1556 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1557 uint32_t shdr_status
, shdr_add_status
;
1558 union lpfc_sli4_cfg_shdr
*shdr
;
1559 struct fcf_record
*new_fcf_record
;
1560 uint32_t boot_flag
, addr_mode
;
1561 uint32_t next_fcf_index
;
1562 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
1563 unsigned long iflags
;
1567 /* If there is pending FCoE event restart FCF table scan */
1568 if (lpfc_check_pending_fcoe_event(phba
, 0)) {
1569 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1573 /* Get the first SGE entry from the non-embedded DMA memory. This
1574 * routine only uses a single SGE.
1576 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1577 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1578 if (unlikely(!mboxq
->sge_array
)) {
1579 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1580 "2524 Failed to get the non-embedded SGE "
1581 "virtual address\n");
1584 virt_addr
= mboxq
->sge_array
->addr
[0];
1586 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1587 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1588 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
1591 * The FCF Record was read and there is no reason for the driver
1592 * to maintain the FCF record data or memory. Instead, just need
1593 * to book keeping the FCFIs can be used.
1595 if (shdr_status
|| shdr_add_status
) {
1596 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
) {
1597 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1598 "2726 READ_FCF_RECORD Indicates empty "
1601 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1602 "2521 READ_FCF_RECORD mailbox failed "
1603 "with status x%x add_status x%x, mbx\n",
1604 shdr_status
, shdr_add_status
);
1608 /* Interpreting the returned information of FCF records */
1609 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1610 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1611 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1612 next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1614 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1615 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1616 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1617 sizeof(struct fcf_record
));
1618 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
1620 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1621 &addr_mode
, &vlan_id
);
1623 * If the fcf record does not match with connect list entries
1624 * read the next entry.
1629 * If this is not the first FCF discovery of the HBA, use last
1630 * FCF record for the discovery. The condition that a rescan
1631 * matches the in-use FCF record: fabric name, switch name, mac
1632 * address, and vlan_id.
1634 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1635 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
1636 if (lpfc_fab_name_match(phba
->fcf
.current_rec
.fabric_name
,
1638 lpfc_sw_name_match(phba
->fcf
.current_rec
.switch_name
,
1640 lpfc_mac_addr_match(phba
->fcf
.current_rec
.mac_addr
,
1642 lpfc_vlan_id_match(phba
->fcf
.current_rec
.vlan_id
,
1644 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1645 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
1646 /* Stop FCF redisc wait timer if pending */
1647 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
1648 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1649 /* If in fast failover, mark it's completed */
1650 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
1651 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1655 * Read next FCF record from HBA searching for the matching
1656 * with in-use record only if not during the fast failover
1657 * period. In case of fast failover period, it shall try to
1658 * determine whether the FCF record just read should be the
1661 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1662 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1667 * Update on failover FCF record only if it's in FCF fast-failover
1668 * period; otherwise, update on current FCF record.
1670 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
1671 /* Fast FCF failover only to the same fabric name */
1672 if (lpfc_fab_name_match(phba
->fcf
.current_rec
.fabric_name
,
1674 fcf_rec
= &phba
->fcf
.failover_rec
;
1678 fcf_rec
= &phba
->fcf
.current_rec
;
1680 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
1682 * If the driver FCF record does not have boot flag
1683 * set and new hba fcf record has boot flag set, use
1684 * the new hba fcf record.
1686 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
1687 /* Choose this FCF record */
1688 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1689 addr_mode
, vlan_id
, BOOT_ENABLE
);
1690 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1694 * If the driver FCF record has boot flag set and the
1695 * new hba FCF record does not have boot flag, read
1696 * the next FCF record.
1698 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
1699 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1703 * If the new hba FCF record has lower priority value
1704 * than the driver FCF record, use the new record.
1706 if (lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
) &&
1707 (new_fcf_record
->fip_priority
< fcf_rec
->priority
)) {
1708 /* Choose this FCF record */
1709 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1710 addr_mode
, vlan_id
, 0);
1712 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1716 * This is the first suitable FCF record, choose this record for
1717 * initial best-fit FCF.
1720 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1721 addr_mode
, vlan_id
, (boot_flag
?
1723 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1725 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1729 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1730 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
1731 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
1733 * Case of FCF fast failover scan
1737 * It has not found any suitable FCF record, cancel
1738 * FCF scan inprogress, and do nothing
1740 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
1741 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1742 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1743 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1747 * It has found a suitable FCF record that is not
1748 * the same as in-use FCF record, unregister the
1749 * in-use FCF record, replace the in-use FCF record
1750 * with the new FCF record, mark FCF fast failover
1751 * completed, and then start register the new FCF
1755 /* unregister the current in-use FCF record */
1756 lpfc_unregister_fcf(phba
);
1757 /* replace in-use record with the new record */
1758 memcpy(&phba
->fcf
.current_rec
,
1759 &phba
->fcf
.failover_rec
,
1760 sizeof(struct lpfc_fcf_rec
));
1761 /* mark the FCF fast failover completed */
1762 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1763 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
1764 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1765 /* Register to the new FCF record */
1766 lpfc_register_fcf(phba
);
1769 * In case of transaction period to fast FCF failover,
1770 * do nothing when search to the end of the FCF table.
1772 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
1773 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
1776 * Otherwise, initial scan or post linkdown rescan,
1777 * register with the best fit FCF record found so
1778 * far through the scanning process.
1780 lpfc_register_fcf(phba
);
1783 lpfc_sli4_read_fcf_record(phba
, next_fcf_index
);
1787 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1788 lpfc_register_fcf(phba
);
1794 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1795 * @phba: pointer to lpfc hba data structure.
1796 * @mboxq: pointer to mailbox data structure.
1798 * This function handles completion of init vpi mailbox command.
1801 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1803 struct lpfc_vport
*vport
= mboxq
->vport
;
1804 struct lpfc_nodelist
*ndlp
;
1805 if (mboxq
->u
.mb
.mbxStatus
) {
1806 lpfc_printf_vlog(vport
, KERN_ERR
,
1808 "2609 Init VPI mailbox failed 0x%x\n",
1809 mboxq
->u
.mb
.mbxStatus
);
1810 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1811 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1814 spin_lock_irq(&phba
->hbalock
);
1815 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
1816 spin_unlock_irq(&phba
->hbalock
);
1818 /* If this port is physical port or FDISC is done, do reg_vpi */
1819 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
1820 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
1822 lpfc_printf_vlog(vport
, KERN_ERR
,
1824 "2731 Cannot find fabric "
1825 "controller node\n");
1827 lpfc_register_new_vport(phba
, vport
, ndlp
);
1828 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1832 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
1833 lpfc_initial_fdisc(vport
);
1835 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
1836 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1837 "2606 No NPIV Fabric support\n");
1839 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1844 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
1845 * @vport: pointer to lpfc_vport data structure.
1847 * This function issue a init_vpi mailbox command to initialize
1848 * VPI for the vport.
1851 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
1853 LPFC_MBOXQ_t
*mboxq
;
1856 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
1858 lpfc_printf_vlog(vport
, KERN_ERR
,
1859 LOG_MBOX
, "2607 Failed to allocate "
1860 "init_vpi mailbox\n");
1863 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
1864 mboxq
->vport
= vport
;
1865 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
1866 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
1867 if (rc
== MBX_NOT_FINISHED
) {
1868 lpfc_printf_vlog(vport
, KERN_ERR
,
1869 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
1870 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
1875 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1876 * @phba: pointer to lpfc hba data structure.
1878 * This function loops through the list of vports on the @phba and issues an
1879 * FDISC if possible.
1882 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
1884 struct lpfc_vport
**vports
;
1887 vports
= lpfc_create_vport_work_array(phba
);
1888 if (vports
!= NULL
) {
1889 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
1890 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
1892 /* There are no vpi for this vport */
1893 if (vports
[i
]->vpi
> phba
->max_vpi
) {
1894 lpfc_vport_set_state(vports
[i
],
1898 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1899 lpfc_vport_set_state(vports
[i
],
1903 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
1904 lpfc_issue_init_vpi(vports
[i
]);
1907 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
1908 lpfc_initial_fdisc(vports
[i
]);
1910 lpfc_vport_set_state(vports
[i
],
1911 FC_VPORT_NO_FABRIC_SUPP
);
1912 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
1915 "Fabric support\n");
1919 lpfc_destroy_vport_work_array(phba
, vports
);
1923 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1925 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
1926 struct lpfc_vport
*vport
= mboxq
->vport
;
1928 if (mboxq
->u
.mb
.mbxStatus
) {
1929 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1930 "2018 REG_VFI mbxStatus error x%x "
1932 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1933 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
1934 /* FLOGI failed, use loop map to make discovery list */
1935 lpfc_disc_list_loopmap(vport
);
1936 /* Start discovery */
1937 lpfc_disc_start(vport
);
1940 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1943 /* The VPI is implicitly registered when the VFI is registered */
1944 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
1945 vport
->fc_flag
|= FC_VFI_REGISTERED
;
1947 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
1949 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
1950 lpfc_start_fdiscs(phba
);
1951 lpfc_do_scr_ns_plogi(phba
, vport
);
1955 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1956 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
1962 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1964 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1965 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
1966 struct lpfc_vport
*vport
= pmb
->vport
;
1969 /* Check for error */
1970 if (mb
->mbxStatus
) {
1971 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
1972 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1973 "0319 READ_SPARAM mbxStatus error x%x "
1975 mb
->mbxStatus
, vport
->port_state
);
1976 lpfc_linkdown(phba
);
1980 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
1981 sizeof (struct serv_parm
));
1982 if (phba
->cfg_soft_wwnn
)
1983 u64_to_wwn(phba
->cfg_soft_wwnn
,
1984 vport
->fc_sparam
.nodeName
.u
.wwn
);
1985 if (phba
->cfg_soft_wwpn
)
1986 u64_to_wwn(phba
->cfg_soft_wwpn
,
1987 vport
->fc_sparam
.portName
.u
.wwn
);
1988 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
1989 sizeof(vport
->fc_nodename
));
1990 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
1991 sizeof(vport
->fc_portname
));
1992 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
1993 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
1994 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
1997 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1999 mempool_free(pmb
, phba
->mbox_mem_pool
);
2003 pmb
->context1
= NULL
;
2004 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2006 lpfc_issue_clear_la(phba
, vport
);
2007 mempool_free(pmb
, phba
->mbox_mem_pool
);
2012 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
2014 struct lpfc_vport
*vport
= phba
->pport
;
2015 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2017 struct lpfc_dmabuf
*mp
;
2019 struct fcf_record
*fcf_record
;
2021 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2023 spin_lock_irq(&phba
->hbalock
);
2024 switch (la
->UlnkSpeed
) {
2026 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
2029 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
2032 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
2035 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
2038 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
2041 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
2045 phba
->fc_topology
= la
->topology
;
2046 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2048 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2049 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2051 /* if npiv is enabled and this adapter supports npiv log
2052 * a message that npiv is not supported in this topology
2054 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2055 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2056 "1309 Link Up Event npiv not supported in loop "
2058 /* Get Loop Map information */
2060 vport
->fc_flag
|= FC_LBIT
;
2062 vport
->fc_myDID
= la
->granted_AL_PA
;
2063 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
2066 phba
->alpa_map
[0] = 0;
2068 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2079 numalpa
= phba
->alpa_map
[0];
2081 while (j
< numalpa
) {
2082 memset(un
.pamap
, 0, 16);
2083 for (k
= 1; j
< numalpa
; k
++) {
2085 phba
->alpa_map
[j
+ 1];
2090 /* Link Up Event ALPA map */
2091 lpfc_printf_log(phba
,
2094 "1304 Link Up Event "
2095 "ALPA map Data: x%x "
2097 un
.pa
.wd1
, un
.pa
.wd2
,
2098 un
.pa
.wd3
, un
.pa
.wd4
);
2103 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
2104 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
2105 (phba
->sli_rev
== 3))
2106 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
2108 vport
->fc_myDID
= phba
->fc_pref_DID
;
2109 vport
->fc_flag
|= FC_LBIT
;
2111 spin_unlock_irq(&phba
->hbalock
);
2115 lpfc_read_sparam(phba
, sparam_mbox
, 0);
2116 sparam_mbox
->vport
= vport
;
2117 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
2118 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
2119 if (rc
== MBX_NOT_FINISHED
) {
2120 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
2121 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2123 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2128 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
2129 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2132 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
2133 lpfc_config_link(phba
, cfglink_mbox
);
2134 cfglink_mbox
->vport
= vport
;
2135 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
2136 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
2137 if (rc
== MBX_NOT_FINISHED
) {
2138 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
2142 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
2144 * Add the driver's default FCF record at FCF index 0 now. This
2145 * is phase 1 implementation that support FCF index 0 and driver
2148 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
2149 fcf_record
= kzalloc(sizeof(struct fcf_record
),
2151 if (unlikely(!fcf_record
)) {
2152 lpfc_printf_log(phba
, KERN_ERR
,
2154 "2554 Could not allocate memmory for "
2160 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
2161 LPFC_FCOE_FCF_DEF_INDEX
);
2162 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
2164 lpfc_printf_log(phba
, KERN_ERR
,
2166 "2013 Could not manually add FCF "
2167 "record 0, status %d\n", rc
);
2175 * The driver is expected to do FIP/FCF. Call the port
2176 * and get the FCF Table.
2178 spin_lock_irq(&phba
->hbalock
);
2179 if (phba
->hba_flag
& FCF_DISC_INPROGRESS
) {
2180 spin_unlock_irq(&phba
->hbalock
);
2183 spin_unlock_irq(&phba
->hbalock
);
2184 rc
= lpfc_sli4_read_fcf_record(phba
, LPFC_FCOE_FCF_GET_FIRST
);
2191 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2192 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2193 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2194 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
2195 lpfc_issue_clear_la(phba
, vport
);
2200 lpfc_enable_la(struct lpfc_hba
*phba
)
2203 struct lpfc_sli
*psli
= &phba
->sli
;
2204 spin_lock_irq(&phba
->hbalock
);
2205 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2206 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
2207 control
= readl(phba
->HCregaddr
);
2208 control
|= HC_LAINT_ENA
;
2209 writel(control
, phba
->HCregaddr
);
2210 readl(phba
->HCregaddr
); /* flush */
2212 spin_unlock_irq(&phba
->hbalock
);
2216 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
2218 lpfc_linkdown(phba
);
2219 lpfc_enable_la(phba
);
2220 lpfc_unregister_unused_fcf(phba
);
2221 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2226 * This routine handles processing a READ_LA mailbox
2227 * command upon completion. It is setup in the LPFC_MBOXQ
2228 * as the completion routine when the command is
2229 * handed off to the SLI layer.
2232 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2234 struct lpfc_vport
*vport
= pmb
->vport
;
2235 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2237 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2238 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2240 /* Unblock ELS traffic */
2241 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2242 /* Check for error */
2243 if (mb
->mbxStatus
) {
2244 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2245 "1307 READ_LA mbox error x%x state x%x\n",
2246 mb
->mbxStatus
, vport
->port_state
);
2247 lpfc_mbx_issue_link_down(phba
);
2248 phba
->link_state
= LPFC_HBA_ERROR
;
2249 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
2252 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
2254 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
2256 spin_lock_irq(shost
->host_lock
);
2258 vport
->fc_flag
|= FC_BYPASSED_MODE
;
2260 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
2261 spin_unlock_irq(shost
->host_lock
);
2263 if ((phba
->fc_eventTag
< la
->eventTag
) ||
2264 (phba
->fc_eventTag
== la
->eventTag
)) {
2265 phba
->fc_stat
.LinkMultiEvent
++;
2266 if (la
->attType
== AT_LINK_UP
)
2267 if (phba
->fc_eventTag
!= 0)
2268 lpfc_linkdown(phba
);
2271 phba
->fc_eventTag
= la
->eventTag
;
2273 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
2275 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
2277 phba
->link_events
++;
2278 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
2279 phba
->fc_stat
.LinkUp
++;
2280 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2281 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2282 "1306 Link Up Event in loop back mode "
2283 "x%x received Data: x%x x%x x%x x%x\n",
2284 la
->eventTag
, phba
->fc_eventTag
,
2285 la
->granted_AL_PA
, la
->UlnkSpeed
,
2288 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2289 "1303 Link Up Event x%x received "
2290 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2291 la
->eventTag
, phba
->fc_eventTag
,
2292 la
->granted_AL_PA
, la
->UlnkSpeed
,
2295 phba
->wait_4_mlo_maint_flg
);
2297 lpfc_mbx_process_link_up(phba
, la
);
2298 } else if (la
->attType
== AT_LINK_DOWN
) {
2299 phba
->fc_stat
.LinkDown
++;
2300 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2301 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2302 "1308 Link Down Event in loop back mode "
2304 "Data: x%x x%x x%x\n",
2305 la
->eventTag
, phba
->fc_eventTag
,
2306 phba
->pport
->port_state
, vport
->fc_flag
);
2309 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2310 "1305 Link Down Event x%x received "
2311 "Data: x%x x%x x%x x%x x%x\n",
2312 la
->eventTag
, phba
->fc_eventTag
,
2313 phba
->pport
->port_state
, vport
->fc_flag
,
2316 lpfc_mbx_issue_link_down(phba
);
2318 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
2319 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
2320 phba
->fc_stat
.LinkDown
++;
2321 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2322 "1312 Link Down Event x%x received "
2323 "Data: x%x x%x x%x\n",
2324 la
->eventTag
, phba
->fc_eventTag
,
2325 phba
->pport
->port_state
, vport
->fc_flag
);
2326 lpfc_mbx_issue_link_down(phba
);
2328 lpfc_enable_la(phba
);
2330 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2331 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2332 "Data: x%x x%x x%x\n",
2333 la
->eventTag
, phba
->fc_eventTag
,
2334 phba
->pport
->port_state
, vport
->fc_flag
);
2336 * The cmnd that triggered this will be waiting for this
2339 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2340 if (phba
->wait_4_mlo_maint_flg
) {
2341 phba
->wait_4_mlo_maint_flg
= 0;
2342 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
2348 lpfc_issue_clear_la(phba
, vport
);
2349 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2350 "1311 fa %d\n", la
->fa
);
2353 lpfc_mbx_cmpl_read_la_free_mbuf
:
2354 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2356 mempool_free(pmb
, phba
->mbox_mem_pool
);
2361 * This routine handles processing a REG_LOGIN mailbox
2362 * command upon completion. It is setup in the LPFC_MBOXQ
2363 * as the completion routine when the command is
2364 * handed off to the SLI layer.
2367 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2369 struct lpfc_vport
*vport
= pmb
->vport
;
2370 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2371 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2373 pmb
->context1
= NULL
;
2375 /* Good status, call state machine */
2376 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
2377 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2379 mempool_free(pmb
, phba
->mbox_mem_pool
);
2380 /* decrement the node reference count held for this callback
2389 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2391 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2392 struct lpfc_vport
*vport
= pmb
->vport
;
2393 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2395 switch (mb
->mbxStatus
) {
2399 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2400 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2404 spin_lock_irq(&phba
->hbalock
);
2405 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2406 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2407 spin_unlock_irq(&phba
->hbalock
);
2408 vport
->unreg_vpi_cmpl
= VPORT_OK
;
2409 mempool_free(pmb
, phba
->mbox_mem_pool
);
2411 * This shost reference might have been taken at the beginning of
2412 * lpfc_vport_delete()
2414 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
2415 scsi_host_put(shost
);
2419 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
2421 struct lpfc_hba
*phba
= vport
->phba
;
2425 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2429 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
2430 mbox
->vport
= vport
;
2431 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
2432 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2433 if (rc
== MBX_NOT_FINISHED
) {
2434 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2435 "1800 Could not issue unreg_vpi\n");
2436 mempool_free(mbox
, phba
->mbox_mem_pool
);
2437 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
2444 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2446 struct lpfc_vport
*vport
= pmb
->vport
;
2447 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2448 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2450 switch (mb
->mbxStatus
) {
2454 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2455 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2457 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2458 spin_lock_irq(shost
->host_lock
);
2459 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2460 spin_unlock_irq(shost
->host_lock
);
2461 vport
->fc_myDID
= 0;
2465 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2466 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2467 vport
->num_disc_nodes
= 0;
2468 /* go thru NPR list and issue ELS PLOGIs */
2469 if (vport
->fc_npr_cnt
)
2470 lpfc_els_disc_plogi(vport
);
2472 if (!vport
->num_disc_nodes
) {
2473 spin_lock_irq(shost
->host_lock
);
2474 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2475 spin_unlock_irq(shost
->host_lock
);
2476 lpfc_can_disctmo(vport
);
2478 vport
->port_state
= LPFC_VPORT_READY
;
2481 mempool_free(pmb
, phba
->mbox_mem_pool
);
2486 * lpfc_create_static_vport - Read HBA config region to create static vports.
2487 * @phba: pointer to lpfc hba data structure.
2489 * This routine issue a DUMP mailbox command for config region 22 to get
2490 * the list of static vports to be created. The function create vports
2491 * based on the information returned from the HBA.
2494 lpfc_create_static_vport(struct lpfc_hba
*phba
)
2496 LPFC_MBOXQ_t
*pmb
= NULL
;
2498 struct static_vport_info
*vport_info
;
2500 struct fc_vport_identifiers vport_id
;
2501 struct fc_vport
*new_fc_vport
;
2502 struct Scsi_Host
*shost
;
2503 struct lpfc_vport
*vport
;
2504 uint16_t offset
= 0;
2505 uint8_t *vport_buff
;
2506 struct lpfc_dmabuf
*mp
;
2507 uint32_t byte_count
= 0;
2509 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2511 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2512 "0542 lpfc_create_static_vport failed to"
2513 " allocate mailbox memory\n");
2519 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
2521 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2522 "0543 lpfc_create_static_vport failed to"
2523 " allocate vport_info\n");
2524 mempool_free(pmb
, phba
->mbox_mem_pool
);
2528 vport_buff
= (uint8_t *) vport_info
;
2530 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
2533 pmb
->vport
= phba
->pport
;
2534 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
2536 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
2537 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2538 "0544 lpfc_create_static_vport failed to"
2539 " issue dump mailbox command ret 0x%x "
2545 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2546 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
2547 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2548 if (byte_count
> sizeof(struct static_vport_info
) -
2550 byte_count
= sizeof(struct static_vport_info
)
2552 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
2553 offset
+= byte_count
;
2555 if (mb
->un
.varDmp
.word_cnt
>
2556 sizeof(struct static_vport_info
) - offset
)
2557 mb
->un
.varDmp
.word_cnt
=
2558 sizeof(struct static_vport_info
)
2560 byte_count
= mb
->un
.varDmp
.word_cnt
;
2561 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
2562 vport_buff
+ offset
,
2565 offset
+= byte_count
;
2568 } while (byte_count
&&
2569 offset
< sizeof(struct static_vport_info
));
2572 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
2573 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
2574 != VPORT_INFO_REV
)) {
2575 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2576 "0545 lpfc_create_static_vport bad"
2577 " information header 0x%x 0x%x\n",
2578 le32_to_cpu(vport_info
->signature
),
2579 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
2584 shost
= lpfc_shost_from_vport(phba
->pport
);
2586 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
2587 memset(&vport_id
, 0, sizeof(vport_id
));
2588 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
2589 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
2590 if (!vport_id
.port_name
|| !vport_id
.node_name
)
2593 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
2594 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
2595 vport_id
.disable
= false;
2596 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
2598 if (!new_fc_vport
) {
2599 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2600 "0546 lpfc_create_static_vport failed to"
2605 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
2606 vport
->vport_flag
|= STATIC_VPORT
;
2611 if (rc
!= MBX_TIMEOUT
) {
2612 if (pmb
->context2
) {
2613 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2614 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2617 mempool_free(pmb
, phba
->mbox_mem_pool
);
2624 * This routine handles processing a Fabric REG_LOGIN mailbox
2625 * command upon completion. It is setup in the LPFC_MBOXQ
2626 * as the completion routine when the command is
2627 * handed off to the SLI layer.
2630 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2632 struct lpfc_vport
*vport
= pmb
->vport
;
2633 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2634 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2635 struct lpfc_nodelist
*ndlp
;
2637 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2638 pmb
->context1
= NULL
;
2639 pmb
->context2
= NULL
;
2640 if (mb
->mbxStatus
) {
2641 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2642 "0258 Register Fabric login error: 0x%x\n",
2644 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2646 mempool_free(pmb
, phba
->mbox_mem_pool
);
2648 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2649 /* FLOGI failed, use loop map to make discovery list */
2650 lpfc_disc_list_loopmap(vport
);
2652 /* Start discovery */
2653 lpfc_disc_start(vport
);
2654 /* Decrement the reference count to ndlp after the
2655 * reference to the ndlp are done.
2661 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2662 /* Decrement the reference count to ndlp after the reference
2663 * to the ndlp are done.
2669 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2670 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
2671 ndlp
->nlp_type
|= NLP_FABRIC
;
2672 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2674 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2675 lpfc_start_fdiscs(phba
);
2676 lpfc_do_scr_ns_plogi(phba
, vport
);
2679 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2681 mempool_free(pmb
, phba
->mbox_mem_pool
);
2683 /* Drop the reference count from the mbox at the end after
2684 * all the current reference to the ndlp have been done.
2691 * This routine handles processing a NameServer REG_LOGIN mailbox
2692 * command upon completion. It is setup in the LPFC_MBOXQ
2693 * as the completion routine when the command is
2694 * handed off to the SLI layer.
2697 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2699 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2700 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2701 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2702 struct lpfc_vport
*vport
= pmb
->vport
;
2704 if (mb
->mbxStatus
) {
2706 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2707 "0260 Register NameServer error: 0x%x\n",
2709 /* decrement the node reference count held for this
2710 * callback function.
2713 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2715 mempool_free(pmb
, phba
->mbox_mem_pool
);
2717 /* If no other thread is using the ndlp, free it */
2718 lpfc_nlp_not_used(ndlp
);
2720 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2722 * RegLogin failed, use loop map to make discovery
2725 lpfc_disc_list_loopmap(vport
);
2727 /* Start discovery */
2728 lpfc_disc_start(vport
);
2731 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2735 pmb
->context1
= NULL
;
2737 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2738 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
2739 ndlp
->nlp_type
|= NLP_FABRIC
;
2740 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2742 if (vport
->port_state
< LPFC_VPORT_READY
) {
2743 /* Link up discovery requires Fabric registration. */
2744 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
2745 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
2746 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
2747 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
2748 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
2750 /* Issue SCR just before NameServer GID_FT Query */
2751 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
2754 vport
->fc_ns_retry
= 0;
2755 /* Good status, issue CT Request to NameServer */
2756 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
2757 /* Cannot issue NameServer Query, so finish up discovery */
2761 /* decrement the node reference count held for this
2762 * callback function.
2765 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2767 mempool_free(pmb
, phba
->mbox_mem_pool
);
2773 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2775 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2776 struct fc_rport
*rport
;
2777 struct lpfc_rport_data
*rdata
;
2778 struct fc_rport_identifiers rport_ids
;
2779 struct lpfc_hba
*phba
= vport
->phba
;
2781 /* Remote port has reappeared. Re-register w/ FC transport */
2782 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
2783 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
2784 rport_ids
.port_id
= ndlp
->nlp_DID
;
2785 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
2788 * We leave our node pointer in rport->dd_data when we unregister a
2789 * FCP target port. But fc_remote_port_add zeros the space to which
2790 * rport->dd_data points. So, if we're reusing a previously
2791 * registered port, drop the reference that we took the last time we
2792 * registered the port.
2794 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
2795 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
2798 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
2799 "rport add: did:x%x flg:x%x type x%x",
2800 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
2802 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
2803 if (!rport
|| !get_device(&rport
->dev
)) {
2804 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
2805 "Warning: fc_remote_port_add failed\n");
2809 /* initialize static port data */
2810 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
2811 rport
->supported_classes
= ndlp
->nlp_class_sup
;
2812 rdata
= rport
->dd_data
;
2813 rdata
->pnode
= lpfc_nlp_get(ndlp
);
2815 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
2816 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
2817 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
2818 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
2821 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
2822 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
2824 if ((rport
->scsi_target_id
!= -1) &&
2825 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
2826 ndlp
->nlp_sid
= rport
->scsi_target_id
;
2832 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
2834 struct fc_rport
*rport
= ndlp
->rport
;
2836 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
2837 "rport delete: did:x%x flg:x%x type x%x",
2838 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
2840 fc_remote_port_delete(rport
);
2846 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
2848 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2850 spin_lock_irq(shost
->host_lock
);
2852 case NLP_STE_UNUSED_NODE
:
2853 vport
->fc_unused_cnt
+= count
;
2855 case NLP_STE_PLOGI_ISSUE
:
2856 vport
->fc_plogi_cnt
+= count
;
2858 case NLP_STE_ADISC_ISSUE
:
2859 vport
->fc_adisc_cnt
+= count
;
2861 case NLP_STE_REG_LOGIN_ISSUE
:
2862 vport
->fc_reglogin_cnt
+= count
;
2864 case NLP_STE_PRLI_ISSUE
:
2865 vport
->fc_prli_cnt
+= count
;
2867 case NLP_STE_UNMAPPED_NODE
:
2868 vport
->fc_unmap_cnt
+= count
;
2870 case NLP_STE_MAPPED_NODE
:
2871 vport
->fc_map_cnt
+= count
;
2873 case NLP_STE_NPR_NODE
:
2874 vport
->fc_npr_cnt
+= count
;
2877 spin_unlock_irq(shost
->host_lock
);
2881 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2882 int old_state
, int new_state
)
2884 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2886 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
2887 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
2888 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
2889 ndlp
->nlp_type
|= NLP_FC_NODE
;
2891 if (new_state
== NLP_STE_MAPPED_NODE
)
2892 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
2893 if (new_state
== NLP_STE_NPR_NODE
)
2894 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
2896 /* Transport interface */
2897 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
2898 old_state
== NLP_STE_UNMAPPED_NODE
)) {
2899 vport
->phba
->nport_event_cnt
++;
2900 lpfc_unregister_remote_port(ndlp
);
2903 if (new_state
== NLP_STE_MAPPED_NODE
||
2904 new_state
== NLP_STE_UNMAPPED_NODE
) {
2905 vport
->phba
->nport_event_cnt
++;
2907 * Tell the fc transport about the port, if we haven't
2908 * already. If we have, and it's a scsi entity, be
2909 * sure to unblock any attached scsi devices
2911 lpfc_register_remote_port(vport
, ndlp
);
2913 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
2914 (vport
->stat_data_enabled
)) {
2916 * A new target is discovered, if there is no buffer for
2917 * statistical data collection allocate buffer.
2919 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
2920 sizeof(struct lpfc_scsicmd_bkt
),
2923 if (!ndlp
->lat_data
)
2924 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
2925 "0286 lpfc_nlp_state_cleanup failed to "
2926 "allocate statistical data buffer DID "
2927 "0x%x\n", ndlp
->nlp_DID
);
2930 * if we added to Mapped list, but the remote port
2931 * registration failed or assigned a target id outside
2932 * our presentable range - move the node to the
2935 if (new_state
== NLP_STE_MAPPED_NODE
&&
2937 ndlp
->rport
->scsi_target_id
== -1 ||
2938 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
2939 spin_lock_irq(shost
->host_lock
);
2940 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
2941 spin_unlock_irq(shost
->host_lock
);
2942 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2947 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
2949 static char *states
[] = {
2950 [NLP_STE_UNUSED_NODE
] = "UNUSED",
2951 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
2952 [NLP_STE_ADISC_ISSUE
] = "ADISC",
2953 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
2954 [NLP_STE_PRLI_ISSUE
] = "PRLI",
2955 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
2956 [NLP_STE_MAPPED_NODE
] = "MAPPED",
2957 [NLP_STE_NPR_NODE
] = "NPR",
2960 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
2961 strlcpy(buffer
, states
[state
], size
);
2963 snprintf(buffer
, size
, "unknown (%d)", state
);
2968 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2971 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2972 int old_state
= ndlp
->nlp_state
;
2973 char name1
[16], name2
[16];
2975 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2976 "0904 NPort state transition x%06x, %s -> %s\n",
2978 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
2979 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
2981 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
2982 "node statechg did:x%x old:%d ste:%d",
2983 ndlp
->nlp_DID
, old_state
, state
);
2985 if (old_state
== NLP_STE_NPR_NODE
&&
2986 state
!= NLP_STE_NPR_NODE
)
2987 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2988 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
2989 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
2990 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
2993 if (list_empty(&ndlp
->nlp_listp
)) {
2994 spin_lock_irq(shost
->host_lock
);
2995 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
2996 spin_unlock_irq(shost
->host_lock
);
2997 } else if (old_state
)
2998 lpfc_nlp_counters(vport
, old_state
, -1);
3000 ndlp
->nlp_state
= state
;
3001 lpfc_nlp_counters(vport
, state
, 1);
3002 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3006 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3008 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3010 if (list_empty(&ndlp
->nlp_listp
)) {
3011 spin_lock_irq(shost
->host_lock
);
3012 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3013 spin_unlock_irq(shost
->host_lock
);
3018 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3020 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3022 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3023 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3024 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3025 spin_lock_irq(shost
->host_lock
);
3026 list_del_init(&ndlp
->nlp_listp
);
3027 spin_unlock_irq(shost
->host_lock
);
3028 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3029 NLP_STE_UNUSED_NODE
);
3033 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3035 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3036 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3037 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3038 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3039 NLP_STE_UNUSED_NODE
);
3042 * lpfc_initialize_node - Initialize all fields of node object
3043 * @vport: Pointer to Virtual Port object.
3044 * @ndlp: Pointer to FC node object.
3045 * @did: FC_ID of the node.
3047 * This function is always called when node object need to be initialized.
3048 * It initializes all the fields of the node object. Although the reference
3049 * to phba from @ndlp can be obtained indirectly through it's reference to
3050 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3051 * to the life-span of the @ndlp might go beyond the existence of @vport as
3052 * the final release of ndlp is determined by its reference count. And, the
3053 * operation on @ndlp needs the reference to phba.
3056 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3059 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
3060 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
3061 init_timer(&ndlp
->nlp_delayfunc
);
3062 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
3063 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
3064 ndlp
->nlp_DID
= did
;
3065 ndlp
->vport
= vport
;
3066 ndlp
->phba
= vport
->phba
;
3067 ndlp
->nlp_sid
= NLP_NO_SID
;
3068 kref_init(&ndlp
->kref
);
3069 NLP_INT_NODE_ACT(ndlp
);
3070 atomic_set(&ndlp
->cmd_pending
, 0);
3071 ndlp
->cmd_qdepth
= LPFC_MAX_TGT_QDEPTH
;
3074 struct lpfc_nodelist
*
3075 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3078 struct lpfc_hba
*phba
= vport
->phba
;
3080 unsigned long flags
;
3085 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3086 /* The ndlp should not be in memory free mode */
3087 if (NLP_CHK_FREE_REQ(ndlp
)) {
3088 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3089 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3090 "0277 lpfc_enable_node: ndlp:x%p "
3091 "usgmap:x%x refcnt:%d\n",
3092 (void *)ndlp
, ndlp
->nlp_usg_map
,
3093 atomic_read(&ndlp
->kref
.refcount
));
3096 /* The ndlp should not already be in active mode */
3097 if (NLP_CHK_NODE_ACT(ndlp
)) {
3098 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3099 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3100 "0278 lpfc_enable_node: ndlp:x%p "
3101 "usgmap:x%x refcnt:%d\n",
3102 (void *)ndlp
, ndlp
->nlp_usg_map
,
3103 atomic_read(&ndlp
->kref
.refcount
));
3107 /* Keep the original DID */
3108 did
= ndlp
->nlp_DID
;
3110 /* re-initialize ndlp except of ndlp linked list pointer */
3111 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
3112 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
3113 lpfc_initialize_node(vport
, ndlp
, did
);
3115 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3117 if (state
!= NLP_STE_UNUSED_NODE
)
3118 lpfc_nlp_set_state(vport
, ndlp
, state
);
3120 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3121 "node enable: did:x%x",
3122 ndlp
->nlp_DID
, 0, 0);
3127 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3130 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
3131 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
3132 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3133 * until ALL other outstanding threads have completed. We check
3134 * that the ndlp not already in the UNUSED state before we proceed.
3136 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3138 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3144 * Start / ReStart rescue timer for Discovery / RSCN handling
3147 lpfc_set_disctmo(struct lpfc_vport
*vport
)
3149 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3150 struct lpfc_hba
*phba
= vport
->phba
;
3153 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
3154 /* For FAN, timeout should be greater than edtov */
3155 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
3157 /* Normal discovery timeout should be > than ELS/CT timeout
3158 * FC spec states we need 3 * ratov for CT requests
3160 tmo
= ((phba
->fc_ratov
* 3) + 3);
3164 if (!timer_pending(&vport
->fc_disctmo
)) {
3165 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3166 "set disc timer: tmo:x%x state:x%x flg:x%x",
3167 tmo
, vport
->port_state
, vport
->fc_flag
);
3170 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
3171 spin_lock_irq(shost
->host_lock
);
3172 vport
->fc_flag
|= FC_DISC_TMO
;
3173 spin_unlock_irq(shost
->host_lock
);
3175 /* Start Discovery Timer state <hba_state> */
3176 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3177 "0247 Start Discovery Timer state x%x "
3178 "Data: x%x x%lx x%x x%x\n",
3179 vport
->port_state
, tmo
,
3180 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
3181 vport
->fc_adisc_cnt
);
3187 * Cancel rescue timer for Discovery / RSCN handling
3190 lpfc_can_disctmo(struct lpfc_vport
*vport
)
3192 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3193 unsigned long iflags
;
3195 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3196 "can disc timer: state:x%x rtry:x%x flg:x%x",
3197 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3199 /* Turn off discovery timer if its running */
3200 if (vport
->fc_flag
& FC_DISC_TMO
) {
3201 spin_lock_irqsave(shost
->host_lock
, iflags
);
3202 vport
->fc_flag
&= ~FC_DISC_TMO
;
3203 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
3204 del_timer_sync(&vport
->fc_disctmo
);
3205 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
3206 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
3207 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
3210 /* Cancel Discovery Timer state <hba_state> */
3211 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3212 "0248 Cancel Discovery Timer state x%x "
3213 "Data: x%x x%x x%x\n",
3214 vport
->port_state
, vport
->fc_flag
,
3215 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
3220 * Check specified ring for outstanding IOCB on the SLI queue
3221 * Return true if iocb matches the specified nport
3224 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
3225 struct lpfc_sli_ring
*pring
,
3226 struct lpfc_iocbq
*iocb
,
3227 struct lpfc_nodelist
*ndlp
)
3229 struct lpfc_sli
*psli
= &phba
->sli
;
3230 IOCB_t
*icmd
= &iocb
->iocb
;
3231 struct lpfc_vport
*vport
= ndlp
->vport
;
3233 if (iocb
->vport
!= vport
)
3236 if (pring
->ringno
== LPFC_ELS_RING
) {
3237 switch (icmd
->ulpCommand
) {
3238 case CMD_GEN_REQUEST64_CR
:
3239 if (iocb
->context_un
.ndlp
== ndlp
)
3241 case CMD_ELS_REQUEST64_CR
:
3242 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
3244 case CMD_XMIT_ELS_RSP64_CX
:
3245 if (iocb
->context1
== (uint8_t *) ndlp
)
3248 } else if (pring
->ringno
== psli
->extra_ring
) {
3250 } else if (pring
->ringno
== psli
->fcp_ring
) {
3251 /* Skip match check if waiting to relogin to FCP target */
3252 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3253 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3256 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
3259 } else if (pring
->ringno
== psli
->next_ring
) {
3266 * Free resources / clean up outstanding I/Os
3267 * associated with nlp_rpi in the LPFC_NODELIST entry.
3270 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3272 LIST_HEAD(completions
);
3273 struct lpfc_sli
*psli
;
3274 struct lpfc_sli_ring
*pring
;
3275 struct lpfc_iocbq
*iocb
, *next_iocb
;
3278 lpfc_fabric_abort_nport(ndlp
);
3281 * Everything that matches on txcmplq will be returned
3282 * by firmware with a no rpi error.
3285 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3286 /* Now process each ring */
3287 for (i
= 0; i
< psli
->num_rings
; i
++) {
3288 pring
= &psli
->ring
[i
];
3290 spin_lock_irq(&phba
->hbalock
);
3291 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
3294 * Check to see if iocb matches the nport we are
3297 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
3299 /* It matches, so deque and call compl
3301 list_move_tail(&iocb
->list
,
3306 spin_unlock_irq(&phba
->hbalock
);
3310 /* Cancel all the IOCBs from the completions list */
3311 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3318 * Free rpi associated with LPFC_NODELIST entry.
3319 * This routine is called from lpfc_freenode(), when we are removing
3320 * a LPFC_NODELIST entry. It is also called if the driver initiates a
3321 * LOGO that completes successfully, and we are waiting to PLOGI back
3322 * to the remote NPort. In addition, it is called after we receive
3323 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
3324 * we are waiting to PLOGI back to the remote NPort.
3327 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3329 struct lpfc_hba
*phba
= vport
->phba
;
3333 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3334 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3336 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
3337 mbox
->vport
= vport
;
3338 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3339 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3340 if (rc
== MBX_NOT_FINISHED
)
3341 mempool_free(mbox
, phba
->mbox_mem_pool
);
3343 lpfc_no_rpi(phba
, ndlp
);
3345 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
3346 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3353 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3354 * @phba: pointer to lpfc hba data structure.
3356 * This routine is invoked to unregister all the currently registered RPIs
3360 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
3362 struct lpfc_vport
**vports
;
3363 struct lpfc_nodelist
*ndlp
;
3364 struct Scsi_Host
*shost
;
3367 vports
= lpfc_create_vport_work_array(phba
);
3368 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3369 shost
= lpfc_shost_from_vport(vports
[i
]);
3370 spin_lock_irq(shost
->host_lock
);
3371 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
3372 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
3373 lpfc_unreg_rpi(vports
[i
], ndlp
);
3375 spin_unlock_irq(shost
->host_lock
);
3377 lpfc_destroy_vport_work_array(phba
, vports
);
3381 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
3383 struct lpfc_hba
*phba
= vport
->phba
;
3387 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3389 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
3390 mbox
->vport
= vport
;
3391 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3392 mbox
->context1
= NULL
;
3393 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3394 if (rc
!= MBX_TIMEOUT
)
3395 mempool_free(mbox
, phba
->mbox_mem_pool
);
3397 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3398 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3399 "1836 Could not issue "
3400 "unreg_login(all_rpis) status %d\n", rc
);
3405 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
3407 struct lpfc_hba
*phba
= vport
->phba
;
3411 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3413 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
3414 mbox
->vport
= vport
;
3415 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3416 mbox
->context1
= NULL
;
3417 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3418 if (rc
!= MBX_TIMEOUT
)
3419 mempool_free(mbox
, phba
->mbox_mem_pool
);
3421 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3422 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3423 "1815 Could not issue "
3424 "unreg_did (default rpis) status %d\n",
3430 * Free resources associated with LPFC_NODELIST entry
3431 * so it can be freed.
3434 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3436 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3437 struct lpfc_hba
*phba
= vport
->phba
;
3438 LPFC_MBOXQ_t
*mb
, *nextmb
;
3439 struct lpfc_dmabuf
*mp
;
3441 /* Cleanup node for NPort <nlp_DID> */
3442 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3443 "0900 Cleanup node for NPort x%x "
3444 "Data: x%x x%x x%x\n",
3445 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3446 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3447 if (NLP_CHK_FREE_REQ(ndlp
)) {
3448 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3449 "0280 lpfc_cleanup_node: ndlp:x%p "
3450 "usgmap:x%x refcnt:%d\n",
3451 (void *)ndlp
, ndlp
->nlp_usg_map
,
3452 atomic_read(&ndlp
->kref
.refcount
));
3453 lpfc_dequeue_node(vport
, ndlp
);
3455 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3456 "0281 lpfc_cleanup_node: ndlp:x%p "
3457 "usgmap:x%x refcnt:%d\n",
3458 (void *)ndlp
, ndlp
->nlp_usg_map
,
3459 atomic_read(&ndlp
->kref
.refcount
));
3460 lpfc_disable_node(vport
, ndlp
);
3463 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3464 if ((mb
= phba
->sli
.mbox_active
)) {
3465 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3466 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3467 mb
->context2
= NULL
;
3468 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3472 spin_lock_irq(&phba
->hbalock
);
3473 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
3474 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3475 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3476 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
3478 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3481 list_del(&mb
->list
);
3482 mempool_free(mb
, phba
->mbox_mem_pool
);
3483 /* We shall not invoke the lpfc_nlp_put to decrement
3484 * the ndlp reference count as we are in the process
3485 * of lpfc_nlp_release.
3489 spin_unlock_irq(&phba
->hbalock
);
3491 lpfc_els_abort(phba
, ndlp
);
3493 spin_lock_irq(shost
->host_lock
);
3494 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3495 spin_unlock_irq(shost
->host_lock
);
3497 ndlp
->nlp_last_elscmd
= 0;
3498 del_timer_sync(&ndlp
->nlp_delayfunc
);
3500 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
3501 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
3503 lpfc_unreg_rpi(vport
, ndlp
);
3509 * Check to see if we can free the nlp back to the freelist.
3510 * If we are in the middle of using the nlp in the discovery state
3511 * machine, defer the free till we reach the end of the state machine.
3514 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3516 struct lpfc_hba
*phba
= vport
->phba
;
3517 struct lpfc_rport_data
*rdata
;
3521 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3522 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
3523 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
3524 /* For this case we need to cleanup the default rpi
3525 * allocated by the firmware.
3527 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
3529 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
3530 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
3532 mempool_free(mbox
, phba
->mbox_mem_pool
);
3535 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
3536 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
3537 mbox
->vport
= vport
;
3538 mbox
->context2
= NULL
;
3539 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3540 if (rc
== MBX_NOT_FINISHED
) {
3541 mempool_free(mbox
, phba
->mbox_mem_pool
);
3546 lpfc_cleanup_node(vport
, ndlp
);
3549 * We can get here with a non-NULL ndlp->rport because when we
3550 * unregister a rport we don't break the rport/node linkage. So if we
3551 * do, make sure we don't leaving any dangling pointers behind.
3554 rdata
= ndlp
->rport
->dd_data
;
3555 rdata
->pnode
= NULL
;
3561 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3564 D_ID mydid
, ndlpdid
, matchdid
;
3566 if (did
== Bcast_DID
)
3569 /* First check for Direct match */
3570 if (ndlp
->nlp_DID
== did
)
3573 /* Next check for area/domain identically equals 0 match */
3574 mydid
.un
.word
= vport
->fc_myDID
;
3575 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
3579 matchdid
.un
.word
= did
;
3580 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
3581 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
3582 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
3583 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
3584 if ((ndlpdid
.un
.b
.domain
== 0) &&
3585 (ndlpdid
.un
.b
.area
== 0)) {
3586 if (ndlpdid
.un
.b
.id
)
3592 matchdid
.un
.word
= ndlp
->nlp_DID
;
3593 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
3594 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
3595 if ((matchdid
.un
.b
.domain
== 0) &&
3596 (matchdid
.un
.b
.area
== 0)) {
3597 if (matchdid
.un
.b
.id
)
3605 /* Search for a nodelist entry */
3606 static struct lpfc_nodelist
*
3607 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
3609 struct lpfc_nodelist
*ndlp
;
3612 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
3613 if (lpfc_matchdid(vport
, ndlp
, did
)) {
3614 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
3615 ((uint32_t) ndlp
->nlp_xri
<< 16) |
3616 ((uint32_t) ndlp
->nlp_type
<< 8) |
3617 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
3618 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3619 "0929 FIND node DID "
3620 "Data: x%p x%x x%x x%x\n",
3621 ndlp
, ndlp
->nlp_DID
,
3622 ndlp
->nlp_flag
, data1
);
3627 /* FIND node did <did> NOT FOUND */
3628 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3629 "0932 FIND node did x%x NOT FOUND.\n", did
);
3633 struct lpfc_nodelist
*
3634 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
3636 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3637 struct lpfc_nodelist
*ndlp
;
3639 spin_lock_irq(shost
->host_lock
);
3640 ndlp
= __lpfc_findnode_did(vport
, did
);
3641 spin_unlock_irq(shost
->host_lock
);
3645 struct lpfc_nodelist
*
3646 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
3648 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3649 struct lpfc_nodelist
*ndlp
;
3651 ndlp
= lpfc_findnode_did(vport
, did
);
3653 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
3654 lpfc_rscn_payload_check(vport
, did
) == 0)
3656 ndlp
= (struct lpfc_nodelist
*)
3657 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
3660 lpfc_nlp_init(vport
, ndlp
, did
);
3661 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
3662 spin_lock_irq(shost
->host_lock
);
3663 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3664 spin_unlock_irq(shost
->host_lock
);
3666 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
3667 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
3670 spin_lock_irq(shost
->host_lock
);
3671 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3672 spin_unlock_irq(shost
->host_lock
);
3676 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
3677 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
3678 if (lpfc_rscn_payload_check(vport
, did
)) {
3679 /* If we've already recieved a PLOGI from this NPort
3680 * we don't need to try to discover it again.
3682 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
3685 /* Since this node is marked for discovery,
3686 * delay timeout is not needed.
3688 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3689 spin_lock_irq(shost
->host_lock
);
3690 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3691 spin_unlock_irq(shost
->host_lock
);
3695 /* If we've already recieved a PLOGI from this NPort,
3696 * or we are already in the process of discovery on it,
3697 * we don't need to try to discover it again.
3699 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
3700 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
3701 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
3703 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
3704 spin_lock_irq(shost
->host_lock
);
3705 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3706 spin_unlock_irq(shost
->host_lock
);
3711 /* Build a list of nodes to discover based on the loopmap */
3713 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
3715 struct lpfc_hba
*phba
= vport
->phba
;
3717 uint32_t alpa
, index
;
3719 if (!lpfc_is_link_up(phba
))
3722 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
3725 /* Check for loop map present or not */
3726 if (phba
->alpa_map
[0]) {
3727 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
3728 alpa
= phba
->alpa_map
[j
];
3729 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
3731 lpfc_setup_disc_node(vport
, alpa
);
3734 /* No alpamap, so try all alpa's */
3735 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
3736 /* If cfg_scan_down is set, start from highest
3737 * ALPA (0xef) to lowest (0x1).
3739 if (vport
->cfg_scan_down
)
3742 index
= FC_MAXLOOP
- j
- 1;
3743 alpa
= lpfcAlpaArray
[index
];
3744 if ((vport
->fc_myDID
& 0xff) == alpa
)
3746 lpfc_setup_disc_node(vport
, alpa
);
3753 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
3756 struct lpfc_sli
*psli
= &phba
->sli
;
3757 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
3758 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
3759 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
3763 * if it's not a physical port or if we already send
3764 * clear_la then don't send it.
3766 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
3767 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
3768 (phba
->sli_rev
== LPFC_SLI_REV4
))
3771 /* Link up discovery */
3772 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
3773 phba
->link_state
= LPFC_CLEAR_LA
;
3774 lpfc_clear_la(phba
, mbox
);
3775 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
3776 mbox
->vport
= vport
;
3777 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3778 if (rc
== MBX_NOT_FINISHED
) {
3779 mempool_free(mbox
, phba
->mbox_mem_pool
);
3780 lpfc_disc_flush_list(vport
);
3781 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3782 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3783 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
3784 phba
->link_state
= LPFC_HBA_ERROR
;
3789 /* Reg_vpi to tell firmware to resume normal operations */
3791 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
3793 LPFC_MBOXQ_t
*regvpimbox
;
3795 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3797 lpfc_reg_vpi(vport
, regvpimbox
);
3798 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
3799 regvpimbox
->vport
= vport
;
3800 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
3801 == MBX_NOT_FINISHED
) {
3802 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
3807 /* Start Link up / RSCN discovery on NPR nodes */
3809 lpfc_disc_start(struct lpfc_vport
*vport
)
3811 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3812 struct lpfc_hba
*phba
= vport
->phba
;
3814 uint32_t clear_la_pending
;
3817 if (!lpfc_is_link_up(phba
))
3820 if (phba
->link_state
== LPFC_CLEAR_LA
)
3821 clear_la_pending
= 1;
3823 clear_la_pending
= 0;
3825 if (vport
->port_state
< LPFC_VPORT_READY
)
3826 vport
->port_state
= LPFC_DISC_AUTH
;
3828 lpfc_set_disctmo(vport
);
3830 if (vport
->fc_prevDID
== vport
->fc_myDID
)
3835 vport
->fc_prevDID
= vport
->fc_myDID
;
3836 vport
->num_disc_nodes
= 0;
3838 /* Start Discovery state <hba_state> */
3839 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3840 "0202 Start Discovery hba state x%x "
3841 "Data: x%x x%x x%x\n",
3842 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
3843 vport
->fc_adisc_cnt
);
3845 /* First do ADISCs - if any */
3846 num_sent
= lpfc_els_disc_adisc(vport
);
3852 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
3853 * continue discovery.
3855 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
3856 !(vport
->fc_flag
& FC_PT2PT
) &&
3857 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
3858 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
3859 lpfc_issue_reg_vpi(phba
, vport
);
3864 * For SLI2, we need to set port_state to READY and continue
3867 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
3868 /* If we get here, there is nothing to ADISC */
3869 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
3870 lpfc_issue_clear_la(phba
, vport
);
3872 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
3873 vport
->num_disc_nodes
= 0;
3874 /* go thru NPR nodes and issue ELS PLOGIs */
3875 if (vport
->fc_npr_cnt
)
3876 lpfc_els_disc_plogi(vport
);
3878 if (!vport
->num_disc_nodes
) {
3879 spin_lock_irq(shost
->host_lock
);
3880 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3881 spin_unlock_irq(shost
->host_lock
);
3882 lpfc_can_disctmo(vport
);
3885 vport
->port_state
= LPFC_VPORT_READY
;
3887 /* Next do PLOGIs - if any */
3888 num_sent
= lpfc_els_disc_plogi(vport
);
3893 if (vport
->fc_flag
& FC_RSCN_MODE
) {
3894 /* Check to see if more RSCNs came in while we
3895 * were processing this one.
3897 if ((vport
->fc_rscn_id_cnt
== 0) &&
3898 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
3899 spin_lock_irq(shost
->host_lock
);
3900 vport
->fc_flag
&= ~FC_RSCN_MODE
;
3901 spin_unlock_irq(shost
->host_lock
);
3902 lpfc_can_disctmo(vport
);
3904 lpfc_els_handle_rscn(vport
);
3911 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
3912 * ring the match the sppecified nodelist.
3915 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3917 LIST_HEAD(completions
);
3918 struct lpfc_sli
*psli
;
3920 struct lpfc_iocbq
*iocb
, *next_iocb
;
3921 struct lpfc_sli_ring
*pring
;
3924 pring
= &psli
->ring
[LPFC_ELS_RING
];
3926 /* Error matching iocb on txq or txcmplq
3927 * First check the txq.
3929 spin_lock_irq(&phba
->hbalock
);
3930 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
3931 if (iocb
->context1
!= ndlp
) {
3935 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
3936 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
3938 list_move_tail(&iocb
->list
, &completions
);
3943 /* Next check the txcmplq */
3944 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
3945 if (iocb
->context1
!= ndlp
) {
3949 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
3950 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
3951 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3954 spin_unlock_irq(&phba
->hbalock
);
3956 /* Cancel all the IOCBs from the completions list */
3957 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3962 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
3964 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
3965 struct lpfc_hba
*phba
= vport
->phba
;
3967 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
3968 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
3970 if (!NLP_CHK_NODE_ACT(ndlp
))
3972 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
3973 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
3974 lpfc_free_tx(phba
, ndlp
);
3981 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
3983 lpfc_els_flush_rscn(vport
);
3984 lpfc_els_flush_cmd(vport
);
3985 lpfc_disc_flush_list(vport
);
3988 /*****************************************************************************/
3990 * NAME: lpfc_disc_timeout
3992 * FUNCTION: Fibre Channel driver discovery timeout routine.
3994 * EXECUTION ENVIRONMENT: interrupt only
4002 /*****************************************************************************/
4004 lpfc_disc_timeout(unsigned long ptr
)
4006 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
4007 struct lpfc_hba
*phba
= vport
->phba
;
4008 uint32_t tmo_posted
;
4009 unsigned long flags
= 0;
4011 if (unlikely(!phba
))
4014 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
4015 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
4017 vport
->work_port_events
|= WORKER_DISC_TMO
;
4018 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
4021 lpfc_worker_wake_up(phba
);
4026 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
4028 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4029 struct lpfc_hba
*phba
= vport
->phba
;
4030 struct lpfc_sli
*psli
= &phba
->sli
;
4031 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4032 LPFC_MBOXQ_t
*initlinkmbox
;
4033 int rc
, clrlaerr
= 0;
4035 if (!(vport
->fc_flag
& FC_DISC_TMO
))
4038 spin_lock_irq(shost
->host_lock
);
4039 vport
->fc_flag
&= ~FC_DISC_TMO
;
4040 spin_unlock_irq(shost
->host_lock
);
4042 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4043 "disc timeout: state:x%x rtry:x%x flg:x%x",
4044 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4046 switch (vport
->port_state
) {
4048 case LPFC_LOCAL_CFG_LINK
:
4049 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
4053 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
4054 "0221 FAN timeout\n");
4055 /* Start discovery by sending FLOGI, clean up old rpis */
4056 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4058 if (!NLP_CHK_NODE_ACT(ndlp
))
4060 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
4062 if (ndlp
->nlp_type
& NLP_FABRIC
) {
4063 /* Clean up the ndlp on Fabric connections */
4064 lpfc_drop_node(vport
, ndlp
);
4066 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
4067 /* Fail outstanding IO now since device
4068 * is marked for PLOGI.
4070 lpfc_unreg_rpi(vport
, ndlp
);
4073 if (vport
->port_state
!= LPFC_FLOGI
) {
4074 lpfc_initial_flogi(vport
);
4081 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
4082 /* Initial FLOGI timeout */
4083 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4084 "0222 Initial %s timeout\n",
4085 vport
->vpi
? "FDISC" : "FLOGI");
4087 /* Assume no Fabric and go on with discovery.
4088 * Check for outstanding ELS FLOGI to abort.
4091 /* FLOGI failed, so just use loop map to make discovery list */
4092 lpfc_disc_list_loopmap(vport
);
4094 /* Start discovery */
4095 lpfc_disc_start(vport
);
4098 case LPFC_FABRIC_CFG_LINK
:
4099 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
4101 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4102 "0223 Timeout while waiting for "
4103 "NameServer login\n");
4104 /* Next look for NameServer ndlp */
4105 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4106 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4107 lpfc_els_abort(phba
, ndlp
);
4109 /* ReStart discovery */
4113 /* Check for wait for NameServer Rsp timeout */
4114 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4115 "0224 NameServer Query timeout "
4117 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4119 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
4120 /* Try it one more time */
4121 vport
->fc_ns_retry
++;
4122 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
4123 vport
->fc_ns_retry
, 0);
4127 vport
->fc_ns_retry
= 0;
4131 * Discovery is over.
4132 * set port_state to PORT_READY if SLI2.
4133 * cmpl_reg_vpi will set port_state to READY for SLI3.
4135 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4136 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4137 lpfc_issue_reg_vpi(phba
, vport
);
4138 else { /* NPIV Not enabled */
4139 lpfc_issue_clear_la(phba
, vport
);
4140 vport
->port_state
= LPFC_VPORT_READY
;
4144 /* Setup and issue mailbox INITIALIZE LINK command */
4145 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4146 if (!initlinkmbox
) {
4147 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4148 "0206 Device Discovery "
4149 "completion error\n");
4150 phba
->link_state
= LPFC_HBA_ERROR
;
4154 lpfc_linkdown(phba
);
4155 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
4156 phba
->cfg_link_speed
);
4157 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4158 initlinkmbox
->vport
= vport
;
4159 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4160 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
4161 lpfc_set_loopback_flag(phba
);
4162 if (rc
== MBX_NOT_FINISHED
)
4163 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
4167 case LPFC_DISC_AUTH
:
4168 /* Node Authentication timeout */
4169 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4170 "0227 Node Authentication timeout\n");
4171 lpfc_disc_flush_list(vport
);
4174 * set port_state to PORT_READY if SLI2.
4175 * cmpl_reg_vpi will set port_state to READY for SLI3.
4177 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4178 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4179 lpfc_issue_reg_vpi(phba
, vport
);
4180 else { /* NPIV Not enabled */
4181 lpfc_issue_clear_la(phba
, vport
);
4182 vport
->port_state
= LPFC_VPORT_READY
;
4187 case LPFC_VPORT_READY
:
4188 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4189 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4190 "0231 RSCN timeout Data: x%x "
4192 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4194 /* Cleanup any outstanding ELS commands */
4195 lpfc_els_flush_cmd(vport
);
4197 lpfc_els_flush_rscn(vport
);
4198 lpfc_disc_flush_list(vport
);
4203 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4204 "0273 Unexpected discovery timeout, "
4205 "vport State x%x\n", vport
->port_state
);
4209 switch (phba
->link_state
) {
4211 /* CLEAR LA timeout */
4212 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4213 "0228 CLEAR LA timeout\n");
4218 lpfc_issue_clear_la(phba
, vport
);
4220 case LPFC_LINK_UNKNOWN
:
4221 case LPFC_WARM_START
:
4222 case LPFC_INIT_START
:
4223 case LPFC_INIT_MBX_CMDS
:
4224 case LPFC_LINK_DOWN
:
4225 case LPFC_HBA_ERROR
:
4226 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4227 "0230 Unexpected timeout, hba link "
4228 "state x%x\n", phba
->link_state
);
4232 case LPFC_HBA_READY
:
4237 lpfc_disc_flush_list(vport
);
4238 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4239 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4240 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4241 vport
->port_state
= LPFC_VPORT_READY
;
4248 * This routine handles processing a NameServer REG_LOGIN mailbox
4249 * command upon completion. It is setup in the LPFC_MBOXQ
4250 * as the completion routine when the command is
4251 * handed off to the SLI layer.
4254 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4256 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4257 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
4258 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
4259 struct lpfc_vport
*vport
= pmb
->vport
;
4261 pmb
->context1
= NULL
;
4263 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4264 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
4265 ndlp
->nlp_type
|= NLP_FABRIC
;
4266 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4269 * Start issuing Fabric-Device Management Interface (FDMI) command to
4270 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4271 * fdmi-on=2 (supporting RPA/hostnmae)
4274 if (vport
->cfg_fdmi_on
== 1)
4275 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
4277 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
4279 /* decrement the node reference count held for this callback
4283 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4285 mempool_free(pmb
, phba
->mbox_mem_pool
);
4291 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
4293 uint16_t *rpi
= param
;
4295 return ndlp
->nlp_rpi
== *rpi
;
4299 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
4301 return memcmp(&ndlp
->nlp_portname
, param
,
4302 sizeof(ndlp
->nlp_portname
)) == 0;
4305 static struct lpfc_nodelist
*
4306 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
4308 struct lpfc_nodelist
*ndlp
;
4310 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4311 if (filter(ndlp
, param
))
4318 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4319 * returns the node list element pointer else return NULL.
4321 struct lpfc_nodelist
*
4322 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
4324 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
4328 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4329 * returns the node element list pointer else return NULL.
4331 struct lpfc_nodelist
*
4332 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
4334 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4335 struct lpfc_nodelist
*ndlp
;
4337 spin_lock_irq(shost
->host_lock
);
4338 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
4339 spin_unlock_irq(shost
->host_lock
);
4344 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4347 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
4349 lpfc_initialize_node(vport
, ndlp
, did
);
4350 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
4352 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4353 "node init: did:x%x",
4354 ndlp
->nlp_DID
, 0, 0);
4359 /* This routine releases all resources associated with a specifc NPort's ndlp
4360 * and mempool_free's the nodelist.
4363 lpfc_nlp_release(struct kref
*kref
)
4365 struct lpfc_hba
*phba
;
4366 unsigned long flags
;
4367 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
4370 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4371 "node release: did:x%x flg:x%x type:x%x",
4372 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4374 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4375 "0279 lpfc_nlp_release: ndlp:x%p "
4376 "usgmap:x%x refcnt:%d\n",
4377 (void *)ndlp
, ndlp
->nlp_usg_map
,
4378 atomic_read(&ndlp
->kref
.refcount
));
4380 /* remove ndlp from action. */
4381 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
4383 /* clear the ndlp active flag for all release cases */
4385 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4386 NLP_CLR_NODE_ACT(ndlp
);
4387 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4389 /* free ndlp memory for final ndlp release */
4390 if (NLP_CHK_FREE_REQ(ndlp
)) {
4391 kfree(ndlp
->lat_data
);
4392 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
4396 /* This routine bumps the reference count for a ndlp structure to ensure
4397 * that one discovery thread won't free a ndlp while another discovery thread
4400 struct lpfc_nodelist
*
4401 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
4403 struct lpfc_hba
*phba
;
4404 unsigned long flags
;
4407 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4408 "node get: did:x%x flg:x%x refcnt:x%x",
4409 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4410 atomic_read(&ndlp
->kref
.refcount
));
4411 /* The check of ndlp usage to prevent incrementing the
4412 * ndlp reference count that is in the process of being
4416 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4417 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
4418 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4419 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4420 "0276 lpfc_nlp_get: ndlp:x%p "
4421 "usgmap:x%x refcnt:%d\n",
4422 (void *)ndlp
, ndlp
->nlp_usg_map
,
4423 atomic_read(&ndlp
->kref
.refcount
));
4426 kref_get(&ndlp
->kref
);
4427 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4432 /* This routine decrements the reference count for a ndlp structure. If the
4433 * count goes to 0, this indicates the the associated nodelist should be
4434 * freed. Returning 1 indicates the ndlp resource has been released; on the
4435 * other hand, returning 0 indicates the ndlp resource has not been released
4439 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
4441 struct lpfc_hba
*phba
;
4442 unsigned long flags
;
4447 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4448 "node put: did:x%x flg:x%x refcnt:x%x",
4449 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4450 atomic_read(&ndlp
->kref
.refcount
));
4452 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4453 /* Check the ndlp memory free acknowledge flag to avoid the
4454 * possible race condition that kref_put got invoked again
4455 * after previous one has done ndlp memory free.
4457 if (NLP_CHK_FREE_ACK(ndlp
)) {
4458 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4459 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4460 "0274 lpfc_nlp_put: ndlp:x%p "
4461 "usgmap:x%x refcnt:%d\n",
4462 (void *)ndlp
, ndlp
->nlp_usg_map
,
4463 atomic_read(&ndlp
->kref
.refcount
));
4466 /* Check the ndlp inactivate log flag to avoid the possible
4467 * race condition that kref_put got invoked again after ndlp
4468 * is already in inactivating state.
4470 if (NLP_CHK_IACT_REQ(ndlp
)) {
4471 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4472 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4473 "0275 lpfc_nlp_put: ndlp:x%p "
4474 "usgmap:x%x refcnt:%d\n",
4475 (void *)ndlp
, ndlp
->nlp_usg_map
,
4476 atomic_read(&ndlp
->kref
.refcount
));
4479 /* For last put, mark the ndlp usage flags to make sure no
4480 * other kref_get and kref_put on the same ndlp shall get
4481 * in between the process when the final kref_put has been
4482 * invoked on this ndlp.
4484 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
4485 /* Indicate ndlp is put to inactive state. */
4486 NLP_SET_IACT_REQ(ndlp
);
4487 /* Acknowledge ndlp memory free has been seen. */
4488 if (NLP_CHK_FREE_REQ(ndlp
))
4489 NLP_SET_FREE_ACK(ndlp
);
4491 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4492 /* Note, the kref_put returns 1 when decrementing a reference
4493 * count that was 1, it invokes the release callback function,
4494 * but it still left the reference count as 1 (not actually
4495 * performs the last decrementation). Otherwise, it actually
4496 * decrements the reference count and returns 0.
4498 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
4501 /* This routine free's the specified nodelist if it is not in use
4502 * by any other discovery thread. This routine returns 1 if the
4503 * ndlp has been freed. A return value of 0 indicates the ndlp is
4504 * not yet been released.
4507 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
4509 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4510 "node not used: did:x%x flg:x%x refcnt:x%x",
4511 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4512 atomic_read(&ndlp
->kref
.refcount
));
4513 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
4514 if (lpfc_nlp_put(ndlp
))
4520 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4521 * @phba: Pointer to hba context object.
4523 * This function iterate through all FC nodes associated
4524 * will all vports to check if there is any node with
4525 * fc_rports associated with it. If there is an fc_rport
4526 * associated with the node, then the node is either in
4527 * discovered state or its devloss_timer is pending.
4530 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
4532 struct lpfc_vport
**vports
;
4534 struct lpfc_nodelist
*ndlp
;
4535 struct Scsi_Host
*shost
;
4537 vports
= lpfc_create_vport_work_array(phba
);
4539 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4540 shost
= lpfc_shost_from_vport(vports
[i
]);
4541 spin_lock_irq(shost
->host_lock
);
4542 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4543 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
4544 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
4546 spin_unlock_irq(shost
->host_lock
);
4549 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
4550 "2624 RPI %x DID %x flg %x still "
4552 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4554 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
4558 spin_unlock_irq(shost
->host_lock
);
4561 lpfc_destroy_vport_work_array(phba
, vports
);
4566 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4567 * @phba: Pointer to hba context object.
4568 * @mboxq: Pointer to mailbox object.
4570 * This function frees memory associated with the mailbox command.
4573 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4575 struct lpfc_vport
*vport
= mboxq
->vport
;
4577 if (mboxq
->u
.mb
.mbxStatus
) {
4578 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4579 "2555 UNREG_VFI mbxStatus error x%x "
4581 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4583 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4588 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4589 * @phba: Pointer to hba context object.
4590 * @mboxq: Pointer to mailbox object.
4592 * This function frees memory associated with the mailbox command.
4595 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4597 struct lpfc_vport
*vport
= mboxq
->vport
;
4599 if (mboxq
->u
.mb
.mbxStatus
) {
4600 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4601 "2550 UNREG_FCFI mbxStatus error x%x "
4603 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4605 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4610 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
4611 * @phba: Pointer to hba context object.
4613 * This function prepare the HBA for unregistering the currently registered
4614 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
4618 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
4621 struct lpfc_vport
**vports
;
4622 struct lpfc_nodelist
*ndlp
;
4625 /* Unregister RPIs */
4626 if (lpfc_fcf_inuse(phba
))
4627 lpfc_unreg_hba_rpis(phba
);
4629 /* At this point, all discovery is aborted */
4630 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4632 /* Unregister VPIs */
4633 vports
= lpfc_create_vport_work_array(phba
);
4634 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
4635 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4636 /* Stop FLOGI/FDISC retries */
4637 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
4639 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
4640 lpfc_mbx_unreg_vpi(vports
[i
]);
4641 spin_lock_irq(&phba
->hbalock
);
4642 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
4643 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
4644 spin_unlock_irq(&phba
->hbalock
);
4646 lpfc_destroy_vport_work_array(phba
, vports
);
4648 /* Cleanup any outstanding ELS commands */
4649 lpfc_els_flush_all_cmd(phba
);
4651 /* Unregister VFI */
4652 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4654 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4655 "2556 UNREG_VFI mbox allocation failed"
4656 "HBA state x%x\n", phba
->pport
->port_state
);
4660 lpfc_unreg_vfi(mbox
, phba
->pport
);
4661 mbox
->vport
= phba
->pport
;
4662 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
4664 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4665 if (rc
== MBX_NOT_FINISHED
) {
4666 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4667 "2557 UNREG_VFI issue mbox failed rc x%x "
4669 rc
, phba
->pport
->port_state
);
4670 mempool_free(mbox
, phba
->mbox_mem_pool
);
4674 spin_lock_irq(&phba
->hbalock
);
4675 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
4676 spin_unlock_irq(&phba
->hbalock
);
4682 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
4683 * @phba: Pointer to hba context object.
4685 * This function issues synchronous unregister FCF mailbox command to HBA to
4686 * unregister the currently registered FCF record. The driver does not reset
4687 * the driver FCF usage state flags.
4689 * Return 0 if successfully issued, none-zero otherwise.
4692 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
4697 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4699 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4700 "2551 UNREG_FCFI mbox allocation failed"
4701 "HBA state x%x\n", phba
->pport
->port_state
);
4704 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
4705 mbox
->vport
= phba
->pport
;
4706 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
4707 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4709 if (rc
== MBX_NOT_FINISHED
) {
4710 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4711 "2552 Unregister FCFI command failed rc x%x "
4713 rc
, phba
->pport
->port_state
);
4720 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
4721 * @phba: Pointer to hba context object.
4723 * This function unregisters the currently reigstered FCF. This function
4724 * also tries to find another FCF for discovery by rescan the HBA FCF table.
4727 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
4731 /* Preparation for unregistering fcf */
4732 rc
= lpfc_unregister_fcf_prep(phba
);
4734 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4735 "2748 Failed to prepare for unregistering "
4736 "HBA's FCF record: rc=%d\n", rc
);
4740 /* Now, unregister FCF record and reset HBA FCF state */
4741 rc
= lpfc_sli4_unregister_fcf(phba
);
4744 /* Reset HBA FCF states after successful unregister FCF */
4745 phba
->fcf
.fcf_flag
= 0;
4748 * If driver is not unloading, check if there is any other
4749 * FCF record that can be used for discovery.
4751 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
4752 (phba
->link_state
< LPFC_LINK_UP
))
4755 rc
= lpfc_sli4_read_fcf_record(phba
, LPFC_FCOE_FCF_GET_FIRST
);
4758 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4759 "2553 lpfc_unregister_unused_fcf failed "
4760 "to read FCF record HBA state x%x\n",
4761 phba
->pport
->port_state
);
4765 * lpfc_unregister_fcf - Unregister the currently registered fcf record
4766 * @phba: Pointer to hba context object.
4768 * This function just unregisters the currently reigstered FCF. It does not
4769 * try to find another FCF for discovery.
4772 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
4776 /* Preparation for unregistering fcf */
4777 rc
= lpfc_unregister_fcf_prep(phba
);
4779 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
4780 "2749 Failed to prepare for unregistering "
4781 "HBA's FCF record: rc=%d\n", rc
);
4785 /* Now, unregister FCF record and reset HBA FCF state */
4786 rc
= lpfc_sli4_unregister_fcf(phba
);
4789 /* Set proper HBA FCF states after successful unregister FCF */
4790 spin_lock_irq(&phba
->hbalock
);
4791 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
4792 spin_unlock_irq(&phba
->hbalock
);
4796 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4797 * @phba: Pointer to hba context object.
4799 * This function check if there are any connected remote port for the FCF and
4800 * if all the devices are disconnected, this function unregister FCFI.
4801 * This function also tries to use another FCF for discovery.
4804 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
4807 * If HBA is not running in FIP mode or if HBA does not support
4808 * FCoE or if FCF is not registered, do nothing.
4810 spin_lock_irq(&phba
->hbalock
);
4811 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
4812 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
4813 !(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
4814 spin_unlock_irq(&phba
->hbalock
);
4817 spin_unlock_irq(&phba
->hbalock
);
4819 if (lpfc_fcf_inuse(phba
))
4822 lpfc_unregister_fcf_rescan(phba
);
4826 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4827 * @phba: Pointer to hba context object.
4828 * @buff: Buffer containing the FCF connection table as in the config
4830 * This function create driver data structure for the FCF connection
4831 * record table read from config region 23.
4834 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
4837 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
4838 struct lpfc_fcf_conn_hdr
*conn_hdr
;
4839 struct lpfc_fcf_conn_rec
*conn_rec
;
4840 uint32_t record_count
;
4843 /* Free the current connect table */
4844 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
4845 &phba
->fcf_conn_rec_list
, list
) {
4846 list_del_init(&conn_entry
->list
);
4850 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
4851 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
4852 sizeof(struct lpfc_fcf_conn_rec
);
4854 conn_rec
= (struct lpfc_fcf_conn_rec
*)
4855 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
4857 for (i
= 0; i
< record_count
; i
++) {
4858 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
4860 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
4863 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4864 "2566 Failed to allocate connection"
4869 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
4870 sizeof(struct lpfc_fcf_conn_rec
));
4871 conn_entry
->conn_rec
.vlan_tag
=
4872 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
4873 conn_entry
->conn_rec
.flags
=
4874 le16_to_cpu(conn_entry
->conn_rec
.flags
);
4875 list_add_tail(&conn_entry
->list
,
4876 &phba
->fcf_conn_rec_list
);
4881 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4882 * @phba: Pointer to hba context object.
4883 * @buff: Buffer containing the FCoE parameter data structure.
4885 * This function update driver data structure with config
4886 * parameters read from config region 23.
4889 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
4892 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
4893 struct lpfc_fcoe_params
*fcoe_param
;
4895 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
4897 fcoe_param
= (struct lpfc_fcoe_params
*)
4898 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
4900 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
4901 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
4904 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
4905 phba
->valid_vlan
= 1;
4906 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
4910 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
4911 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
4912 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
4917 * lpfc_get_rec_conf23 - Get a record type in config region data.
4918 * @buff: Buffer containing config region 23 data.
4919 * @size: Size of the data buffer.
4920 * @rec_type: Record type to be searched.
4922 * This function searches config region data to find the begining
4923 * of the record specified by record_type. If record found, this
4924 * function return pointer to the record else return NULL.
4927 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
4929 uint32_t offset
= 0, rec_length
;
4931 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
4932 (size
< sizeof(uint32_t)))
4935 rec_length
= buff
[offset
+ 1];
4938 * One TLV record has one word header and number of data words
4939 * specified in the rec_length field of the record header.
4941 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
4943 if (buff
[offset
] == rec_type
)
4944 return &buff
[offset
];
4946 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
4949 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
4950 rec_length
= buff
[offset
+ 1];
4956 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4957 * @phba: Pointer to lpfc_hba data structure.
4958 * @buff: Buffer containing config region 23 data.
4959 * @size: Size of the data buffer.
4961 * This fuction parse the FCoE config parameters in config region 23 and
4962 * populate driver data structure with the parameters.
4965 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
4969 uint32_t offset
= 0, rec_length
;
4973 * If data size is less than 2 words signature and version cannot be
4976 if (size
< 2*sizeof(uint32_t))
4979 /* Check the region signature first */
4980 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
4981 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4982 "2567 Config region 23 has bad signature\n");
4988 /* Check the data structure version */
4989 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
4990 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4991 "2568 Config region 23 has bad version\n");
4996 rec_length
= buff
[offset
+ 1];
4998 /* Read FCoE param record */
4999 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5000 size
- offset
, FCOE_PARAM_TYPE
);
5002 lpfc_read_fcoe_param(phba
, rec_ptr
);
5004 /* Read FCF connection table */
5005 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5006 size
- offset
, FCOE_CONN_TBL_TYPE
);
5008 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);