[SCSI] lpfc 8.3.8: BugFixes: Discovery relates changes
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_els.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
d8e93df1 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
09372820 21/* See Fibre Channel protocol T11 FC-LS for details */
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25
91886523 26#include <scsi/scsi.h>
dea3101e 27#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h>
30
da0436e9 31#include "lpfc_hw4.h"
dea3101e 32#include "lpfc_hw.h"
33#include "lpfc_sli.h"
da0436e9 34#include "lpfc_sli4.h"
ea2151b4 35#include "lpfc_nl.h"
dea3101e 36#include "lpfc_disc.h"
37#include "lpfc_scsi.h"
38#include "lpfc.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_crtn.h"
92d7f7b0 41#include "lpfc_vport.h"
858c9f6c 42#include "lpfc_debugfs.h"
dea3101e 43
44static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
45 struct lpfc_iocbq *);
92d7f7b0
JS
46static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
47 struct lpfc_iocbq *);
a6ababd2
AB
48static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
49static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
50 struct lpfc_nodelist *ndlp, uint8_t retry);
51static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
52 struct lpfc_iocbq *iocb);
92d7f7b0 53
dea3101e 54static int lpfc_max_els_tries = 3;
55
e59058c4 56/**
3621a710 57 * lpfc_els_chk_latt - Check host link attention event for a vport
e59058c4
JS
58 * @vport: pointer to a host virtual N_Port data structure.
59 *
60 * This routine checks whether there is an outstanding host link
61 * attention event during the discovery process with the @vport. It is done
62 * by reading the HBA's Host Attention (HA) register. If there is any host
63 * link attention events during this @vport's discovery process, the @vport
64 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
65 * be issued if the link state is not already in host link cleared state,
66 * and a return code shall indicate whether the host link attention event
67 * had happened.
68 *
69 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
70 * state in LPFC_VPORT_READY, the request for checking host link attention
71 * event will be ignored and a return code shall indicate no host link
72 * attention event had happened.
73 *
74 * Return codes
75 * 0 - no host link attention event happened
76 * 1 - host link attention event happened
77 **/
858c9f6c 78int
2e0fef85 79lpfc_els_chk_latt(struct lpfc_vport *vport)
dea3101e 80{
2e0fef85
JS
81 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
82 struct lpfc_hba *phba = vport->phba;
dea3101e 83 uint32_t ha_copy;
dea3101e 84
2e0fef85 85 if (vport->port_state >= LPFC_VPORT_READY ||
3772a991
JS
86 phba->link_state == LPFC_LINK_DOWN ||
87 phba->sli_rev > LPFC_SLI_REV3)
dea3101e 88 return 0;
89
90 /* Read the HBA Host Attention Register */
dea3101e 91 ha_copy = readl(phba->HAregaddr);
dea3101e 92
93 if (!(ha_copy & HA_LATT))
94 return 0;
95
96 /* Pending Link Event during Discovery */
e8b62011
JS
97 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
98 "0237 Pending Link Event during "
99 "Discovery: State x%x\n",
100 phba->pport->port_state);
dea3101e 101
102 /* CLEAR_LA should re-enable link attention events and
103 * we should then imediately take a LATT event. The
104 * LATT processing should call lpfc_linkdown() which
105 * will cleanup any left over in-progress discovery
106 * events.
107 */
2e0fef85
JS
108 spin_lock_irq(shost->host_lock);
109 vport->fc_flag |= FC_ABORT_DISCOVERY;
110 spin_unlock_irq(shost->host_lock);
dea3101e 111
92d7f7b0 112 if (phba->link_state != LPFC_CLEAR_LA)
ed957684 113 lpfc_issue_clear_la(phba, vport);
dea3101e 114
c9f8735b 115 return 1;
dea3101e 116}
117
e59058c4 118/**
3621a710 119 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
e59058c4
JS
120 * @vport: pointer to a host virtual N_Port data structure.
121 * @expectRsp: flag indicating whether response is expected.
122 * @cmdSize: size of the ELS command.
123 * @retry: number of retries to the command IOCB when it fails.
124 * @ndlp: pointer to a node-list data structure.
125 * @did: destination identifier.
126 * @elscmd: the ELS command code.
127 *
128 * This routine is used for allocating a lpfc-IOCB data structure from
129 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
130 * passed into the routine for discovery state machine to issue an Extended
131 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
132 * and preparation routine that is used by all the discovery state machine
133 * routines and the ELS command-specific fields will be later set up by
134 * the individual discovery machine routines after calling this routine
135 * allocating and preparing a generic IOCB data structure. It fills in the
136 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
137 * payload and response payload (if expected). The reference count on the
138 * ndlp is incremented by 1 and the reference to the ndlp is put into
139 * context1 of the IOCB data structure for this IOCB to hold the ndlp
140 * reference for the command's callback function to access later.
141 *
142 * Return code
143 * Pointer to the newly allocated/prepared els iocb data structure
144 * NULL - when els iocb data structure allocation/preparation failed
145 **/
f1c3b0fc 146struct lpfc_iocbq *
2e0fef85
JS
147lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
148 uint16_t cmdSize, uint8_t retry,
149 struct lpfc_nodelist *ndlp, uint32_t did,
150 uint32_t elscmd)
dea3101e 151{
2e0fef85 152 struct lpfc_hba *phba = vport->phba;
0bd4ca25 153 struct lpfc_iocbq *elsiocb;
dea3101e 154 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
155 struct ulp_bde64 *bpl;
156 IOCB_t *icmd;
157
dea3101e 158
2e0fef85
JS
159 if (!lpfc_is_link_up(phba))
160 return NULL;
dea3101e 161
dea3101e 162 /* Allocate buffer for command iocb */
0bd4ca25 163 elsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 164
165 if (elsiocb == NULL)
166 return NULL;
e47c9093 167
0c287589
JS
168 /*
169 * If this command is for fabric controller and HBA running
170 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
171 */
172 if ((did == Fabric_DID) &&
45ed1190 173 (phba->hba_flag & HBA_FIP_SUPPORT) &&
0c287589
JS
174 ((elscmd == ELS_CMD_FLOGI) ||
175 (elscmd == ELS_CMD_FDISC) ||
176 (elscmd == ELS_CMD_LOGO)))
c868595d
JS
177 switch (elscmd) {
178 case ELS_CMD_FLOGI:
179 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
180 & LPFC_FIP_ELS_ID_MASK);
181 break;
182 case ELS_CMD_FDISC:
183 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
184 & LPFC_FIP_ELS_ID_MASK);
185 break;
186 case ELS_CMD_LOGO:
187 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
188 & LPFC_FIP_ELS_ID_MASK);
189 break;
190 }
0c287589 191 else
c868595d 192 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
0c287589 193
dea3101e 194 icmd = &elsiocb->iocb;
195
196 /* fill in BDEs for command */
197 /* Allocate buffer for command payload */
98c9ea5c
JS
198 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
199 if (pcmd)
200 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
fa4066b6
JS
201 if (!pcmd || !pcmd->virt)
202 goto els_iocb_free_pcmb_exit;
dea3101e 203
204 INIT_LIST_HEAD(&pcmd->list);
205
206 /* Allocate buffer for response payload */
207 if (expectRsp) {
92d7f7b0 208 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e 209 if (prsp)
210 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
211 &prsp->phys);
fa4066b6
JS
212 if (!prsp || !prsp->virt)
213 goto els_iocb_free_prsp_exit;
dea3101e 214 INIT_LIST_HEAD(&prsp->list);
e47c9093 215 } else
dea3101e 216 prsp = NULL;
dea3101e 217
218 /* Allocate buffer for Buffer ptr list */
92d7f7b0 219 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
dea3101e 220 if (pbuflist)
ed957684
JS
221 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
222 &pbuflist->phys);
fa4066b6
JS
223 if (!pbuflist || !pbuflist->virt)
224 goto els_iocb_free_pbuf_exit;
dea3101e 225
226 INIT_LIST_HEAD(&pbuflist->list);
227
228 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
229 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
34b02dcd 230 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2e0fef85 231 icmd->un.elsreq64.remoteID = did; /* DID */
dea3101e 232 if (expectRsp) {
92d7f7b0 233 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
dea3101e 234 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
2680eeaa 235 icmd->ulpTimeout = phba->fc_ratov * 2;
dea3101e 236 } else {
92d7f7b0 237 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
dea3101e 238 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
239 }
dea3101e 240 icmd->ulpBdeCount = 1;
241 icmd->ulpLe = 1;
242 icmd->ulpClass = CLASS3;
243
92d7f7b0
JS
244 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
245 icmd->un.elsreq64.myID = vport->fc_myDID;
246
247 /* For ELS_REQUEST64_CR, use the VPI by default */
da0436e9 248 icmd->ulpContext = vport->vpi + phba->vpi_base;
92d7f7b0 249 icmd->ulpCt_h = 0;
eada272d
JS
250 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
251 if (elscmd == ELS_CMD_ECHO)
252 icmd->ulpCt_l = 0; /* context = invalid RPI */
253 else
254 icmd->ulpCt_l = 1; /* context = VPI */
92d7f7b0
JS
255 }
256
dea3101e 257 bpl = (struct ulp_bde64 *) pbuflist->virt;
258 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
259 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
260 bpl->tus.f.bdeSize = cmdSize;
261 bpl->tus.f.bdeFlags = 0;
262 bpl->tus.w = le32_to_cpu(bpl->tus.w);
263
264 if (expectRsp) {
265 bpl++;
266 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
267 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
268 bpl->tus.f.bdeSize = FCELSSIZE;
34b02dcd 269 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
dea3101e 270 bpl->tus.w = le32_to_cpu(bpl->tus.w);
271 }
272
fa4066b6 273 /* prevent preparing iocb with NULL ndlp reference */
51ef4c26 274 elsiocb->context1 = lpfc_nlp_get(ndlp);
fa4066b6
JS
275 if (!elsiocb->context1)
276 goto els_iocb_free_pbuf_exit;
329f9bc7
JS
277 elsiocb->context2 = pcmd;
278 elsiocb->context3 = pbuflist;
dea3101e 279 elsiocb->retry = retry;
2e0fef85 280 elsiocb->vport = vport;
dea3101e 281 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
282
283 if (prsp) {
284 list_add(&prsp->list, &pcmd->list);
285 }
dea3101e 286 if (expectRsp) {
287 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
288 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
289 "0116 Xmit ELS command x%x to remote "
290 "NPORT x%x I/O tag: x%x, port state: x%x\n",
291 elscmd, did, elsiocb->iotag,
292 vport->port_state);
dea3101e 293 } else {
294 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
e8b62011
JS
295 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
296 "0117 Xmit ELS response x%x to remote "
297 "NPORT x%x I/O tag: x%x, size: x%x\n",
298 elscmd, ndlp->nlp_DID, elsiocb->iotag,
299 cmdSize);
dea3101e 300 }
c9f8735b 301 return elsiocb;
dea3101e 302
fa4066b6 303els_iocb_free_pbuf_exit:
eaf15d5b
JS
304 if (expectRsp)
305 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
fa4066b6
JS
306 kfree(pbuflist);
307
308els_iocb_free_prsp_exit:
309 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
310 kfree(prsp);
311
312els_iocb_free_pcmb_exit:
313 kfree(pcmd);
314 lpfc_sli_release_iocbq(phba, elsiocb);
315 return NULL;
316}
dea3101e 317
e59058c4 318/**
3621a710 319 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
e59058c4
JS
320 * @vport: pointer to a host virtual N_Port data structure.
321 *
322 * This routine issues a fabric registration login for a @vport. An
323 * active ndlp node with Fabric_DID must already exist for this @vport.
324 * The routine invokes two mailbox commands to carry out fabric registration
325 * login through the HBA firmware: the first mailbox command requests the
326 * HBA to perform link configuration for the @vport; and the second mailbox
327 * command requests the HBA to perform the actual fabric registration login
328 * with the @vport.
329 *
330 * Return code
331 * 0 - successfully issued fabric registration login for @vport
332 * -ENXIO -- failed to issue fabric registration login for @vport
333 **/
3772a991 334int
92d7f7b0 335lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
dea3101e 336{
2e0fef85 337 struct lpfc_hba *phba = vport->phba;
dea3101e 338 LPFC_MBOXQ_t *mbox;
14691150 339 struct lpfc_dmabuf *mp;
92d7f7b0
JS
340 struct lpfc_nodelist *ndlp;
341 struct serv_parm *sp;
dea3101e 342 int rc;
98c9ea5c 343 int err = 0;
dea3101e 344
92d7f7b0
JS
345 sp = &phba->fc_fabparam;
346 ndlp = lpfc_findnode_did(vport, Fabric_DID);
e47c9093 347 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
98c9ea5c 348 err = 1;
92d7f7b0 349 goto fail;
98c9ea5c 350 }
92d7f7b0
JS
351
352 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
353 if (!mbox) {
354 err = 2;
92d7f7b0 355 goto fail;
98c9ea5c 356 }
92d7f7b0
JS
357
358 vport->port_state = LPFC_FABRIC_CFG_LINK;
359 lpfc_config_link(phba, mbox);
360 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
361 mbox->vport = vport;
362
0b727fea 363 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
364 if (rc == MBX_NOT_FINISHED) {
365 err = 3;
92d7f7b0 366 goto fail_free_mbox;
98c9ea5c 367 }
92d7f7b0
JS
368
369 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
98c9ea5c
JS
370 if (!mbox) {
371 err = 4;
92d7f7b0 372 goto fail;
98c9ea5c 373 }
3772a991 374 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
98c9ea5c
JS
375 if (rc) {
376 err = 5;
92d7f7b0 377 goto fail_free_mbox;
98c9ea5c 378 }
92d7f7b0
JS
379
380 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
381 mbox->vport = vport;
e47c9093
JS
382 /* increment the reference count on ndlp to hold reference
383 * for the callback routine.
384 */
92d7f7b0
JS
385 mbox->context2 = lpfc_nlp_get(ndlp);
386
0b727fea 387 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
98c9ea5c
JS
388 if (rc == MBX_NOT_FINISHED) {
389 err = 6;
92d7f7b0 390 goto fail_issue_reg_login;
98c9ea5c 391 }
92d7f7b0
JS
392
393 return 0;
394
395fail_issue_reg_login:
e47c9093
JS
396 /* decrement the reference count on ndlp just incremented
397 * for the failed mbox command.
398 */
92d7f7b0
JS
399 lpfc_nlp_put(ndlp);
400 mp = (struct lpfc_dmabuf *) mbox->context1;
401 lpfc_mbuf_free(phba, mp->virt, mp->phys);
402 kfree(mp);
403fail_free_mbox:
404 mempool_free(mbox, phba->mbox_mem_pool);
405
406fail:
407 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011 408 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
98c9ea5c 409 "0249 Cannot issue Register Fabric login: Err %d\n", err);
92d7f7b0
JS
410 return -ENXIO;
411}
412
6fb120a7
JS
413/**
414 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
415 * @vport: pointer to a host virtual N_Port data structure.
416 *
417 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
418 * the @vport. This mailbox command is necessary for FCoE only.
419 *
420 * Return code
421 * 0 - successfully issued REG_VFI for @vport
422 * A failure code otherwise.
423 **/
424static int
425lpfc_issue_reg_vfi(struct lpfc_vport *vport)
426{
427 struct lpfc_hba *phba = vport->phba;
428 LPFC_MBOXQ_t *mboxq;
429 struct lpfc_nodelist *ndlp;
430 struct serv_parm *sp;
431 struct lpfc_dmabuf *dmabuf;
432 int rc = 0;
433
434 sp = &phba->fc_fabparam;
435 ndlp = lpfc_findnode_did(vport, Fabric_DID);
436 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
437 rc = -ENODEV;
438 goto fail;
439 }
440
441 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
442 if (!dmabuf) {
443 rc = -ENOMEM;
444 goto fail;
445 }
446 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
447 if (!dmabuf->virt) {
448 rc = -ENOMEM;
449 goto fail_free_dmabuf;
450 }
451 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
452 if (!mboxq) {
453 rc = -ENOMEM;
454 goto fail_free_coherent;
455 }
456 vport->port_state = LPFC_FABRIC_CFG_LINK;
457 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
458 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
459 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
460 mboxq->vport = vport;
461 mboxq->context1 = dmabuf;
462 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
463 if (rc == MBX_NOT_FINISHED) {
464 rc = -ENXIO;
465 goto fail_free_mbox;
466 }
467 return 0;
468
469fail_free_mbox:
470 mempool_free(mboxq, phba->mbox_mem_pool);
471fail_free_coherent:
472 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
473fail_free_dmabuf:
474 kfree(dmabuf);
475fail:
476 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
477 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
478 "0289 Issue Register VFI failed: Err %d\n", rc);
479 return rc;
480}
481
e59058c4 482/**
3621a710 483 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
e59058c4
JS
484 * @vport: pointer to a host virtual N_Port data structure.
485 * @ndlp: pointer to a node-list data structure.
486 * @sp: pointer to service parameter data structure.
487 * @irsp: pointer to the IOCB within the lpfc response IOCB.
488 *
489 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
490 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
491 * port in a fabric topology. It properly sets up the parameters to the @ndlp
492 * from the IOCB response. It also check the newly assigned N_Port ID to the
493 * @vport against the previously assigned N_Port ID. If it is different from
494 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
495 * is invoked on all the remaining nodes with the @vport to unregister the
496 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
497 * is invoked to register login to the fabric.
498 *
499 * Return code
500 * 0 - Success (currently, always return 0)
501 **/
92d7f7b0
JS
502static int
503lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
504 struct serv_parm *sp, IOCB_t *irsp)
505{
506 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
507 struct lpfc_hba *phba = vport->phba;
508 struct lpfc_nodelist *np;
509 struct lpfc_nodelist *next_np;
510
2e0fef85
JS
511 spin_lock_irq(shost->host_lock);
512 vport->fc_flag |= FC_FABRIC;
513 spin_unlock_irq(shost->host_lock);
dea3101e 514
515 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
516 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
517 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
518
519 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
520
521 if (phba->fc_topology == TOPOLOGY_LOOP) {
2e0fef85
JS
522 spin_lock_irq(shost->host_lock);
523 vport->fc_flag |= FC_PUBLIC_LOOP;
524 spin_unlock_irq(shost->host_lock);
dea3101e 525 } else {
526 /*
527 * If we are a N-port connected to a Fabric, fixup sparam's so
528 * logins to devices on remote loops work.
529 */
2e0fef85 530 vport->fc_sparam.cmn.altBbCredit = 1;
dea3101e 531 }
532
2e0fef85 533 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
dea3101e 534 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
92d7f7b0 535 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
dea3101e 536 ndlp->nlp_class_sup = 0;
537 if (sp->cls1.classValid)
538 ndlp->nlp_class_sup |= FC_COS_CLASS1;
539 if (sp->cls2.classValid)
540 ndlp->nlp_class_sup |= FC_COS_CLASS2;
541 if (sp->cls3.classValid)
542 ndlp->nlp_class_sup |= FC_COS_CLASS3;
543 if (sp->cls4.classValid)
544 ndlp->nlp_class_sup |= FC_COS_CLASS4;
545 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
546 sp->cmn.bbRcvSizeLsb;
547 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
548
92d7f7b0
JS
549 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
550 if (sp->cmn.response_multiple_NPort) {
e8b62011
JS
551 lpfc_printf_vlog(vport, KERN_WARNING,
552 LOG_ELS | LOG_VPORT,
553 "1816 FLOGI NPIV supported, "
554 "response data 0x%x\n",
555 sp->cmn.response_multiple_NPort);
92d7f7b0 556 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
92d7f7b0
JS
557 } else {
558 /* Because we asked f/w for NPIV it still expects us
e8b62011
JS
559 to call reg_vnpid atleast for the physcial host */
560 lpfc_printf_vlog(vport, KERN_WARNING,
561 LOG_ELS | LOG_VPORT,
562 "1817 Fabric does not support NPIV "
563 "- configuring single port mode.\n");
92d7f7b0
JS
564 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
565 }
566 }
dea3101e 567
92d7f7b0
JS
568 if ((vport->fc_prevDID != vport->fc_myDID) &&
569 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
dea3101e 570
92d7f7b0
JS
571 /* If our NportID changed, we need to ensure all
572 * remaining NPORTs get unreg_login'ed.
573 */
574 list_for_each_entry_safe(np, next_np,
575 &vport->fc_nodes, nlp_listp) {
d7c255b2 576 if (!NLP_CHK_NODE_ACT(np))
e47c9093 577 continue;
92d7f7b0
JS
578 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
579 !(np->nlp_flag & NLP_NPR_ADISC))
580 continue;
581 spin_lock_irq(shost->host_lock);
582 np->nlp_flag &= ~NLP_NPR_ADISC;
583 spin_unlock_irq(shost->host_lock);
584 lpfc_unreg_rpi(vport, np);
585 }
586 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
587 lpfc_mbx_unreg_vpi(vport);
09372820 588 spin_lock_irq(shost->host_lock);
92d7f7b0 589 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 590 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
591 }
592 }
dea3101e 593
6fb120a7
JS
594 if (phba->sli_rev < LPFC_SLI_REV4) {
595 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
596 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
597 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
598 lpfc_register_new_vport(phba, vport, ndlp);
599 else
600 lpfc_issue_fabric_reglogin(vport);
601 } else {
602 ndlp->nlp_type |= NLP_FABRIC;
603 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
695a814e
JS
604 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
605 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
6fb120a7
JS
606 lpfc_start_fdiscs(phba);
607 lpfc_do_scr_ns_plogi(phba, vport);
695a814e
JS
608 } else if (vport->fc_flag & FC_VFI_REGISTERED)
609 lpfc_register_new_vport(phba, vport, ndlp);
610 else
6fb120a7 611 lpfc_issue_reg_vfi(vport);
92d7f7b0 612 }
dea3101e 613 return 0;
dea3101e 614}
e59058c4 615/**
3621a710 616 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
e59058c4
JS
617 * @vport: pointer to a host virtual N_Port data structure.
618 * @ndlp: pointer to a node-list data structure.
619 * @sp: pointer to service parameter data structure.
620 *
621 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
622 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
623 * in a point-to-point topology. First, the @vport's N_Port Name is compared
624 * with the received N_Port Name: if the @vport's N_Port Name is greater than
625 * the received N_Port Name lexicographically, this node shall assign local
626 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
627 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
628 * this node shall just wait for the remote node to issue PLOGI and assign
629 * N_Port IDs.
630 *
631 * Return code
632 * 0 - Success
633 * -ENXIO - Fail
634 **/
dea3101e 635static int
2e0fef85
JS
636lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
637 struct serv_parm *sp)
dea3101e 638{
2e0fef85
JS
639 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
640 struct lpfc_hba *phba = vport->phba;
dea3101e 641 LPFC_MBOXQ_t *mbox;
642 int rc;
643
2e0fef85
JS
644 spin_lock_irq(shost->host_lock);
645 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
646 spin_unlock_irq(shost->host_lock);
dea3101e 647
648 phba->fc_edtov = FF_DEF_EDTOV;
649 phba->fc_ratov = FF_DEF_RATOV;
2e0fef85 650 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 651 sizeof(vport->fc_portname));
dea3101e 652 if (rc >= 0) {
653 /* This side will initiate the PLOGI */
2e0fef85
JS
654 spin_lock_irq(shost->host_lock);
655 vport->fc_flag |= FC_PT2PT_PLOGI;
656 spin_unlock_irq(shost->host_lock);
dea3101e 657
658 /*
659 * N_Port ID cannot be 0, set our to LocalID the other
660 * side will be RemoteID.
661 */
662
663 /* not equal */
664 if (rc)
2e0fef85 665 vport->fc_myDID = PT2PT_LocalID;
dea3101e 666
667 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
668 if (!mbox)
669 goto fail;
670
671 lpfc_config_link(phba, mbox);
672
673 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 674 mbox->vport = vport;
0b727fea 675 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
dea3101e 676 if (rc == MBX_NOT_FINISHED) {
677 mempool_free(mbox, phba->mbox_mem_pool);
678 goto fail;
679 }
e47c9093
JS
680 /* Decrement ndlp reference count indicating that ndlp can be
681 * safely released when other references to it are done.
682 */
329f9bc7 683 lpfc_nlp_put(ndlp);
dea3101e 684
2e0fef85 685 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
dea3101e 686 if (!ndlp) {
687 /*
688 * Cannot find existing Fabric ndlp, so allocate a
689 * new one
690 */
691 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
692 if (!ndlp)
693 goto fail;
2e0fef85 694 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
e47c9093
JS
695 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
696 ndlp = lpfc_enable_node(vport, ndlp,
697 NLP_STE_UNUSED_NODE);
698 if(!ndlp)
699 goto fail;
dea3101e 700 }
701
702 memcpy(&ndlp->nlp_portname, &sp->portName,
2e0fef85 703 sizeof(struct lpfc_name));
dea3101e 704 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
2e0fef85 705 sizeof(struct lpfc_name));
e47c9093 706 /* Set state will put ndlp onto node list if not already done */
2e0fef85
JS
707 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
708 spin_lock_irq(shost->host_lock);
dea3101e 709 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 710 spin_unlock_irq(shost->host_lock);
e47c9093
JS
711 } else
712 /* This side will wait for the PLOGI, decrement ndlp reference
713 * count indicating that ndlp can be released when other
714 * references to it are done.
715 */
329f9bc7 716 lpfc_nlp_put(ndlp);
dea3101e 717
09372820
JS
718 /* If we are pt2pt with another NPort, force NPIV off! */
719 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
720
2e0fef85
JS
721 spin_lock_irq(shost->host_lock);
722 vport->fc_flag |= FC_PT2PT;
723 spin_unlock_irq(shost->host_lock);
dea3101e 724
725 /* Start discovery - this should just do CLEAR_LA */
2e0fef85 726 lpfc_disc_start(vport);
dea3101e 727 return 0;
92d7f7b0 728fail:
dea3101e 729 return -ENXIO;
730}
731
e59058c4 732/**
3621a710 733 * lpfc_cmpl_els_flogi - Completion callback function for flogi
e59058c4
JS
734 * @phba: pointer to lpfc hba data structure.
735 * @cmdiocb: pointer to lpfc command iocb data structure.
736 * @rspiocb: pointer to lpfc response iocb data structure.
737 *
738 * This routine is the top-level completion callback function for issuing
739 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
740 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
741 * retry has been made (either immediately or delayed with lpfc_els_retry()
742 * returning 1), the command IOCB will be released and function returned.
743 * If the retry attempt has been given up (possibly reach the maximum
744 * number of retries), one additional decrement of ndlp reference shall be
745 * invoked before going out after releasing the command IOCB. This will
746 * actually release the remote node (Note, lpfc_els_free_iocb() will also
747 * invoke one decrement of ndlp reference count). If no error reported in
748 * the IOCB status, the command Port ID field is used to determine whether
749 * this is a point-to-point topology or a fabric topology: if the Port ID
750 * field is assigned, it is a fabric topology; otherwise, it is a
751 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
752 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
753 * specific topology completion conditions.
754 **/
dea3101e 755static void
329f9bc7
JS
756lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
757 struct lpfc_iocbq *rspiocb)
dea3101e 758{
2e0fef85
JS
759 struct lpfc_vport *vport = cmdiocb->vport;
760 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 761 IOCB_t *irsp = &rspiocb->iocb;
762 struct lpfc_nodelist *ndlp = cmdiocb->context1;
763 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
764 struct serv_parm *sp;
765 int rc;
766
767 /* Check to see if link went down during discovery */
2e0fef85 768 if (lpfc_els_chk_latt(vport)) {
fa4066b6
JS
769 /* One additional decrement on node reference count to
770 * trigger the release of the node
771 */
329f9bc7 772 lpfc_nlp_put(ndlp);
dea3101e 773 goto out;
774 }
775
858c9f6c
JS
776 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
777 "FLOGI cmpl: status:x%x/x%x state:x%x",
778 irsp->ulpStatus, irsp->un.ulpWord[4],
779 vport->port_state);
780
dea3101e 781 if (irsp->ulpStatus) {
782 /* Check for retry */
2e0fef85 783 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e 784 goto out;
2e0fef85 785
dea3101e 786 /* FLOGI failed, so there is no fabric */
2e0fef85
JS
787 spin_lock_irq(shost->host_lock);
788 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
789 spin_unlock_irq(shost->host_lock);
dea3101e 790
329f9bc7 791 /* If private loop, then allow max outstanding els to be
dea3101e 792 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
793 * alpa map would take too long otherwise.
794 */
795 if (phba->alpa_map[0] == 0) {
3de2a653 796 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
dea3101e 797 }
798
799 /* FLOGI failure */
e8b62011
JS
800 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
801 "0100 FLOGI failure Data: x%x x%x "
802 "x%x\n",
803 irsp->ulpStatus, irsp->un.ulpWord[4],
804 irsp->ulpTimeout);
dea3101e 805 goto flogifail;
806 }
695a814e
JS
807 spin_lock_irq(shost->host_lock);
808 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
809 spin_unlock_irq(shost->host_lock);
dea3101e 810
811 /*
812 * The FLogI succeeded. Sync the data for the CPU before
813 * accessing it.
814 */
815 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
816
817 sp = prsp->virt + sizeof(uint32_t);
818
819 /* FLOGI completes successfully */
e8b62011 820 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
af901ca1 821 "0101 FLOGI completes successfully "
e8b62011
JS
822 "Data: x%x x%x x%x x%x\n",
823 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
824 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
dea3101e 825
2e0fef85 826 if (vport->port_state == LPFC_FLOGI) {
dea3101e 827 /*
828 * If Common Service Parameters indicate Nport
829 * we are point to point, if Fport we are Fabric.
830 */
831 if (sp->cmn.fPort)
2e0fef85 832 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
dea3101e 833 else
2e0fef85 834 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
dea3101e 835
836 if (!rc)
837 goto out;
838 }
839
840flogifail:
329f9bc7 841 lpfc_nlp_put(ndlp);
dea3101e 842
858c9f6c 843 if (!lpfc_error_lost_link(irsp)) {
dea3101e 844 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 845 lpfc_disc_list_loopmap(vport);
dea3101e 846
847 /* Start discovery */
2e0fef85 848 lpfc_disc_start(vport);
87af33fe
JS
849 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
850 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
851 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
852 (phba->link_state != LPFC_CLEAR_LA)) {
853 /* If FLOGI failed enable link interrupt. */
854 lpfc_issue_clear_la(phba, vport);
dea3101e 855 }
dea3101e 856out:
857 lpfc_els_free_iocb(phba, cmdiocb);
858}
859
e59058c4 860/**
3621a710 861 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
e59058c4
JS
862 * @vport: pointer to a host virtual N_Port data structure.
863 * @ndlp: pointer to a node-list data structure.
864 * @retry: number of retries to the command IOCB.
865 *
866 * This routine issues a Fabric Login (FLOGI) Request ELS command
867 * for a @vport. The initiator service parameters are put into the payload
868 * of the FLOGI Request IOCB and the top-level callback function pointer
869 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
870 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
871 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
872 *
873 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
874 * will be incremented by 1 for holding the ndlp and the reference to ndlp
875 * will be stored into the context1 field of the IOCB for the completion
876 * callback function to the FLOGI ELS command.
877 *
878 * Return code
879 * 0 - successfully issued flogi iocb for @vport
880 * 1 - failed to issue flogi iocb for @vport
881 **/
dea3101e 882static int
2e0fef85 883lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e 884 uint8_t retry)
885{
2e0fef85 886 struct lpfc_hba *phba = vport->phba;
dea3101e 887 struct serv_parm *sp;
888 IOCB_t *icmd;
889 struct lpfc_iocbq *elsiocb;
890 struct lpfc_sli_ring *pring;
891 uint8_t *pcmd;
892 uint16_t cmdsize;
893 uint32_t tmo;
894 int rc;
895
896 pring = &phba->sli.ring[LPFC_ELS_RING];
897
92d7f7b0 898 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2e0fef85
JS
899 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
900 ndlp->nlp_DID, ELS_CMD_FLOGI);
92d7f7b0 901
488d1469 902 if (!elsiocb)
c9f8735b 903 return 1;
dea3101e 904
905 icmd = &elsiocb->iocb;
906 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
907
908 /* For FLOGI request, remainder of payload is service parameters */
909 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
92d7f7b0
JS
910 pcmd += sizeof(uint32_t);
911 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e 912 sp = (struct serv_parm *) pcmd;
913
914 /* Setup CSPs accordingly for Fabric */
915 sp->cmn.e_d_tov = 0;
916 sp->cmn.w2.r_a_tov = 0;
917 sp->cls1.classValid = 0;
918 sp->cls2.seqDelivery = 1;
919 sp->cls3.seqDelivery = 1;
920 if (sp->cmn.fcphLow < FC_PH3)
921 sp->cmn.fcphLow = FC_PH3;
922 if (sp->cmn.fcphHigh < FC_PH3)
923 sp->cmn.fcphHigh = FC_PH3;
924
6fb120a7
JS
925 if (phba->sli_rev == LPFC_SLI_REV4) {
926 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
927 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
928 /* FLOGI needs to be 3 for WQE FCFI */
929 /* Set the fcfi to the fcfi we registered with */
930 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
931 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
92d7f7b0 932 sp->cmn.request_multiple_Nport = 1;
92d7f7b0
JS
933 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
934 icmd->ulpCt_h = 1;
935 icmd->ulpCt_l = 0;
936 }
937
858c9f6c
JS
938 if (phba->fc_topology != TOPOLOGY_LOOP) {
939 icmd->un.elsreq64.myID = 0;
940 icmd->un.elsreq64.fl = 1;
941 }
942
dea3101e 943 tmo = phba->fc_ratov;
944 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
2e0fef85 945 lpfc_set_disctmo(vport);
dea3101e 946 phba->fc_ratov = tmo;
947
948 phba->fc_stat.elsXmitFLOGI++;
949 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
858c9f6c
JS
950
951 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
952 "Issue FLOGI: opt:x%x",
953 phba->sli3_options, 0, 0);
954
92d7f7b0 955 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
dea3101e 956 if (rc == IOCB_ERROR) {
957 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 958 return 1;
dea3101e 959 }
c9f8735b 960 return 0;
dea3101e 961}
962
e59058c4 963/**
3621a710 964 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
e59058c4
JS
965 * @phba: pointer to lpfc hba data structure.
966 *
967 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
968 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
969 * list and issues an abort IOCB commond on each outstanding IOCB that
970 * contains a active Fabric_DID ndlp. Note that this function is to issue
971 * the abort IOCB command on all the outstanding IOCBs, thus when this
972 * function returns, it does not guarantee all the IOCBs are actually aborted.
973 *
974 * Return code
975 * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0)
976 **/
dea3101e 977int
2e0fef85 978lpfc_els_abort_flogi(struct lpfc_hba *phba)
dea3101e 979{
980 struct lpfc_sli_ring *pring;
981 struct lpfc_iocbq *iocb, *next_iocb;
982 struct lpfc_nodelist *ndlp;
983 IOCB_t *icmd;
984
985 /* Abort outstanding I/O on NPort <nlp_DID> */
986 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
e8b62011
JS
987 "0201 Abort outstanding I/O on NPort x%x\n",
988 Fabric_DID);
dea3101e 989
990 pring = &phba->sli.ring[LPFC_ELS_RING];
991
992 /*
993 * Check the txcmplq for an iocb that matches the nport the driver is
994 * searching for.
995 */
2e0fef85 996 spin_lock_irq(&phba->hbalock);
dea3101e 997 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
998 icmd = &iocb->iocb;
2e0fef85
JS
999 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1000 icmd->un.elsreq64.bdl.ulpIoTag32) {
dea3101e 1001 ndlp = (struct lpfc_nodelist *)(iocb->context1);
58da1ffb
JS
1002 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1003 (ndlp->nlp_DID == Fabric_DID))
07951076 1004 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 1005 }
1006 }
2e0fef85 1007 spin_unlock_irq(&phba->hbalock);
dea3101e 1008
1009 return 0;
1010}
1011
e59058c4 1012/**
3621a710 1013 * lpfc_initial_flogi - Issue an initial fabric login for a vport
e59058c4
JS
1014 * @vport: pointer to a host virtual N_Port data structure.
1015 *
1016 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1017 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1018 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1019 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1020 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1021 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1022 * @vport.
1023 *
1024 * Return code
1025 * 0 - failed to issue initial flogi for @vport
1026 * 1 - successfully issued initial flogi for @vport
1027 **/
dea3101e 1028int
2e0fef85 1029lpfc_initial_flogi(struct lpfc_vport *vport)
dea3101e 1030{
2e0fef85 1031 struct lpfc_hba *phba = vport->phba;
dea3101e 1032 struct lpfc_nodelist *ndlp;
1033
98c9ea5c
JS
1034 vport->port_state = LPFC_FLOGI;
1035 lpfc_set_disctmo(vport);
1036
c9f8735b 1037 /* First look for the Fabric ndlp */
2e0fef85 1038 ndlp = lpfc_findnode_did(vport, Fabric_DID);
c9f8735b 1039 if (!ndlp) {
dea3101e 1040 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b
JW
1041 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1042 if (!ndlp)
1043 return 0;
2e0fef85 1044 lpfc_nlp_init(vport, ndlp, Fabric_DID);
6fb120a7
JS
1045 /* Set the node type */
1046 ndlp->nlp_type |= NLP_FABRIC;
e47c9093
JS
1047 /* Put ndlp onto node list */
1048 lpfc_enqueue_node(vport, ndlp);
1049 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1050 /* re-setup ndlp without removing from node list */
1051 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1052 if (!ndlp)
1053 return 0;
dea3101e 1054 }
87af33fe 1055
e47c9093 1056 if (lpfc_issue_els_flogi(vport, ndlp, 0))
fa4066b6
JS
1057 /* This decrement of reference count to node shall kick off
1058 * the release of the node.
1059 */
329f9bc7 1060 lpfc_nlp_put(ndlp);
e47c9093 1061
c9f8735b 1062 return 1;
dea3101e 1063}
1064
e59058c4 1065/**
3621a710 1066 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
e59058c4
JS
1067 * @vport: pointer to a host virtual N_Port data structure.
1068 *
1069 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1070 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1071 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1072 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1073 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1074 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1075 * @vport.
1076 *
1077 * Return code
1078 * 0 - failed to issue initial fdisc for @vport
1079 * 1 - successfully issued initial fdisc for @vport
1080 **/
92d7f7b0
JS
1081int
1082lpfc_initial_fdisc(struct lpfc_vport *vport)
1083{
1084 struct lpfc_hba *phba = vport->phba;
1085 struct lpfc_nodelist *ndlp;
1086
1087 /* First look for the Fabric ndlp */
1088 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1089 if (!ndlp) {
1090 /* Cannot find existing Fabric ndlp, so allocate a new one */
1091 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1092 if (!ndlp)
1093 return 0;
1094 lpfc_nlp_init(vport, ndlp, Fabric_DID);
e47c9093
JS
1095 /* Put ndlp onto node list */
1096 lpfc_enqueue_node(vport, ndlp);
1097 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1098 /* re-setup ndlp without removing from node list */
1099 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1100 if (!ndlp)
1101 return 0;
92d7f7b0 1102 }
e47c9093 1103
92d7f7b0 1104 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
fa4066b6
JS
1105 /* decrement node reference count to trigger the release of
1106 * the node.
1107 */
92d7f7b0 1108 lpfc_nlp_put(ndlp);
fa4066b6 1109 return 0;
92d7f7b0
JS
1110 }
1111 return 1;
1112}
87af33fe 1113
e59058c4 1114/**
3621a710 1115 * lpfc_more_plogi - Check and issue remaining plogis for a vport
e59058c4
JS
1116 * @vport: pointer to a host virtual N_Port data structure.
1117 *
1118 * This routine checks whether there are more remaining Port Logins
1119 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1120 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1121 * to issue ELS PLOGIs up to the configured discover threads with the
1122 * @vport (@vport->cfg_discovery_threads). The function also decrement
1123 * the @vport's num_disc_node by 1 if it is not already 0.
1124 **/
87af33fe 1125void
2e0fef85 1126lpfc_more_plogi(struct lpfc_vport *vport)
dea3101e 1127{
1128 int sentplogi;
1129
2e0fef85
JS
1130 if (vport->num_disc_nodes)
1131 vport->num_disc_nodes--;
dea3101e 1132
1133 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
e8b62011
JS
1134 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1135 "0232 Continue discovery with %d PLOGIs to go "
1136 "Data: x%x x%x x%x\n",
1137 vport->num_disc_nodes, vport->fc_plogi_cnt,
1138 vport->fc_flag, vport->port_state);
dea3101e 1139 /* Check to see if there are more PLOGIs to be sent */
2e0fef85
JS
1140 if (vport->fc_flag & FC_NLP_MORE)
1141 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1142 sentplogi = lpfc_els_disc_plogi(vport);
1143
dea3101e 1144 return;
1145}
1146
e59058c4 1147/**
3621a710 1148 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
e59058c4
JS
1149 * @phba: pointer to lpfc hba data structure.
1150 * @prsp: pointer to response IOCB payload.
1151 * @ndlp: pointer to a node-list data structure.
1152 *
1153 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1154 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1155 * The following cases are considered N_Port confirmed:
1156 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1157 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1158 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1159 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1160 * 1) if there is a node on vport list other than the @ndlp with the same
1161 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1162 * on that node to release the RPI associated with the node; 2) if there is
1163 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1164 * into, a new node shall be allocated (or activated). In either case, the
1165 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1166 * be released and the new_ndlp shall be put on to the vport node list and
1167 * its pointer returned as the confirmed node.
1168 *
1169 * Note that before the @ndlp got "released", the keepDID from not-matching
1170 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1171 * of the @ndlp. This is because the release of @ndlp is actually to put it
1172 * into an inactive state on the vport node list and the vport node list
1173 * management algorithm does not allow two node with a same DID.
1174 *
1175 * Return code
1176 * pointer to the PLOGI N_Port @ndlp
1177 **/
488d1469 1178static struct lpfc_nodelist *
92d7f7b0 1179lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
488d1469
JS
1180 struct lpfc_nodelist *ndlp)
1181{
2e0fef85 1182 struct lpfc_vport *vport = ndlp->vport;
488d1469 1183 struct lpfc_nodelist *new_ndlp;
0ff10d46
JS
1184 struct lpfc_rport_data *rdata;
1185 struct fc_rport *rport;
488d1469 1186 struct serv_parm *sp;
92d7f7b0 1187 uint8_t name[sizeof(struct lpfc_name)];
58da1ffb 1188 uint32_t rc, keepDID = 0;
488d1469 1189
2fb9bd8b
JS
1190 /* Fabric nodes can have the same WWPN so we don't bother searching
1191 * by WWPN. Just return the ndlp that was given to us.
1192 */
1193 if (ndlp->nlp_type & NLP_FABRIC)
1194 return ndlp;
1195
92d7f7b0 1196 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
685f0bf7 1197 memset(name, 0, sizeof(struct lpfc_name));
488d1469 1198
685f0bf7 1199 /* Now we find out if the NPort we are logging into, matches the WWPN
488d1469
JS
1200 * we have for that ndlp. If not, we have some work to do.
1201 */
2e0fef85 1202 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
488d1469 1203
e47c9093 1204 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
488d1469 1205 return ndlp;
488d1469
JS
1206
1207 if (!new_ndlp) {
2e0fef85
JS
1208 rc = memcmp(&ndlp->nlp_portname, name,
1209 sizeof(struct lpfc_name));
92795650
JS
1210 if (!rc)
1211 return ndlp;
488d1469
JS
1212 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1213 if (!new_ndlp)
1214 return ndlp;
2e0fef85 1215 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
e47c9093 1216 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
58da1ffb
JS
1217 rc = memcmp(&ndlp->nlp_portname, name,
1218 sizeof(struct lpfc_name));
1219 if (!rc)
1220 return ndlp;
e47c9093
JS
1221 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1222 NLP_STE_UNUSED_NODE);
1223 if (!new_ndlp)
1224 return ndlp;
58da1ffb
JS
1225 keepDID = new_ndlp->nlp_DID;
1226 } else
1227 keepDID = new_ndlp->nlp_DID;
488d1469 1228
2e0fef85 1229 lpfc_unreg_rpi(vport, new_ndlp);
488d1469 1230 new_ndlp->nlp_DID = ndlp->nlp_DID;
92795650 1231 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
0ff10d46
JS
1232
1233 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1234 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1235 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1236
e47c9093 1237 /* Set state will put new_ndlp on to node list if not already done */
2e0fef85 1238 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
488d1469 1239
2e0fef85 1240 /* Move this back to NPR state */
87af33fe
JS
1241 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1242 /* The new_ndlp is replacing ndlp totally, so we need
1243 * to put ndlp on UNUSED list and try to free it.
1244 */
0ff10d46
JS
1245
1246 /* Fix up the rport accordingly */
1247 rport = ndlp->rport;
1248 if (rport) {
1249 rdata = rport->dd_data;
1250 if (rdata->pnode == ndlp) {
1251 lpfc_nlp_put(ndlp);
1252 ndlp->rport = NULL;
1253 rdata->pnode = lpfc_nlp_get(new_ndlp);
1254 new_ndlp->rport = rport;
1255 }
1256 new_ndlp->nlp_type = ndlp->nlp_type;
1257 }
58da1ffb
JS
1258 /* We shall actually free the ndlp with both nlp_DID and
1259 * nlp_portname fields equals 0 to avoid any ndlp on the
1260 * nodelist never to be used.
1261 */
1262 if (ndlp->nlp_DID == 0) {
1263 spin_lock_irq(&phba->ndlp_lock);
1264 NLP_SET_FREE_REQ(ndlp);
1265 spin_unlock_irq(&phba->ndlp_lock);
1266 }
0ff10d46 1267
58da1ffb
JS
1268 /* Two ndlps cannot have the same did on the nodelist */
1269 ndlp->nlp_DID = keepDID;
2e0fef85 1270 lpfc_drop_node(vport, ndlp);
87af33fe 1271 }
92795650 1272 else {
2e0fef85 1273 lpfc_unreg_rpi(vport, ndlp);
58da1ffb
JS
1274 /* Two ndlps cannot have the same did */
1275 ndlp->nlp_DID = keepDID;
2e0fef85 1276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
92795650 1277 }
488d1469
JS
1278 return new_ndlp;
1279}
1280
e59058c4 1281/**
3621a710 1282 * lpfc_end_rscn - Check and handle more rscn for a vport
e59058c4
JS
1283 * @vport: pointer to a host virtual N_Port data structure.
1284 *
1285 * This routine checks whether more Registration State Change
1286 * Notifications (RSCNs) came in while the discovery state machine was in
1287 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1288 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1289 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1290 * handling the RSCNs.
1291 **/
87af33fe
JS
1292void
1293lpfc_end_rscn(struct lpfc_vport *vport)
1294{
1295 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1296
1297 if (vport->fc_flag & FC_RSCN_MODE) {
1298 /*
1299 * Check to see if more RSCNs came in while we were
1300 * processing this one.
1301 */
1302 if (vport->fc_rscn_id_cnt ||
1303 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1304 lpfc_els_handle_rscn(vport);
1305 else {
1306 spin_lock_irq(shost->host_lock);
1307 vport->fc_flag &= ~FC_RSCN_MODE;
1308 spin_unlock_irq(shost->host_lock);
1309 }
1310 }
1311}
1312
e59058c4 1313/**
3621a710 1314 * lpfc_cmpl_els_plogi - Completion callback function for plogi
e59058c4
JS
1315 * @phba: pointer to lpfc hba data structure.
1316 * @cmdiocb: pointer to lpfc command iocb data structure.
1317 * @rspiocb: pointer to lpfc response iocb data structure.
1318 *
1319 * This routine is the completion callback function for issuing the Port
1320 * Login (PLOGI) command. For PLOGI completion, there must be an active
1321 * ndlp on the vport node list that matches the remote node ID from the
1322 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
1323 * ignored and command IOCB released. The PLOGI response IOCB status is
1324 * checked for error conditons. If there is error status reported, PLOGI
1325 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1326 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1327 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1328 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1329 * there are additional N_Port nodes with the vport that need to perform
1330 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1331 * PLOGIs.
1332 **/
dea3101e 1333static void
2e0fef85
JS
1334lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1335 struct lpfc_iocbq *rspiocb)
dea3101e 1336{
2e0fef85
JS
1337 struct lpfc_vport *vport = cmdiocb->vport;
1338 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1339 IOCB_t *irsp;
dea3101e 1340 struct lpfc_nodelist *ndlp;
92795650 1341 struct lpfc_dmabuf *prsp;
dea3101e 1342 int disc, rc, did, type;
1343
dea3101e 1344 /* we pass cmdiocb to state machine which needs rspiocb as well */
1345 cmdiocb->context_un.rsp_iocb = rspiocb;
1346
1347 irsp = &rspiocb->iocb;
858c9f6c
JS
1348 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1349 "PLOGI cmpl: status:x%x/x%x did:x%x",
1350 irsp->ulpStatus, irsp->un.ulpWord[4],
1351 irsp->un.elsreq64.remoteID);
1352
2e0fef85 1353 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
e47c9093 1354 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
e8b62011
JS
1355 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1356 "0136 PLOGI completes to NPort x%x "
1357 "with no ndlp. Data: x%x x%x x%x\n",
1358 irsp->un.elsreq64.remoteID,
1359 irsp->ulpStatus, irsp->un.ulpWord[4],
1360 irsp->ulpIoTag);
488d1469 1361 goto out;
ed957684 1362 }
dea3101e 1363
1364 /* Since ndlp can be freed in the disc state machine, note if this node
1365 * is being used during discovery.
1366 */
2e0fef85 1367 spin_lock_irq(shost->host_lock);
dea3101e 1368 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
488d1469 1369 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85 1370 spin_unlock_irq(shost->host_lock);
dea3101e 1371 rc = 0;
1372
1373 /* PLOGI completes to NPort <nlp_DID> */
e8b62011
JS
1374 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1375 "0102 PLOGI completes to NPort x%x "
1376 "Data: x%x x%x x%x x%x x%x\n",
1377 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1378 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 1379 /* Check to see if link went down during discovery */
2e0fef85
JS
1380 if (lpfc_els_chk_latt(vport)) {
1381 spin_lock_irq(shost->host_lock);
dea3101e 1382 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1383 spin_unlock_irq(shost->host_lock);
dea3101e 1384 goto out;
1385 }
1386
1387 /* ndlp could be freed in DSM, save these values now */
1388 type = ndlp->nlp_type;
1389 did = ndlp->nlp_DID;
1390
1391 if (irsp->ulpStatus) {
1392 /* Check for retry */
1393 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1394 /* ELS command is being retried */
1395 if (disc) {
2e0fef85 1396 spin_lock_irq(shost->host_lock);
dea3101e 1397 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1398 spin_unlock_irq(shost->host_lock);
dea3101e 1399 }
1400 goto out;
1401 }
dea3101e 1402 /* PLOGI failed */
1403 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1404 if (lpfc_error_lost_link(irsp))
c9f8735b 1405 rc = NLP_STE_FREED_NODE;
e47c9093 1406 else
2e0fef85 1407 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1408 NLP_EVT_CMPL_PLOGI);
dea3101e 1409 } else {
1410 /* Good status, call state machine */
92795650 1411 prsp = list_entry(((struct lpfc_dmabuf *)
92d7f7b0
JS
1412 cmdiocb->context2)->list.next,
1413 struct lpfc_dmabuf, list);
1414 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2e0fef85 1415 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1416 NLP_EVT_CMPL_PLOGI);
dea3101e 1417 }
1418
2e0fef85 1419 if (disc && vport->num_disc_nodes) {
dea3101e 1420 /* Check to see if there are more PLOGIs to be sent */
2e0fef85 1421 lpfc_more_plogi(vport);
dea3101e 1422
2e0fef85
JS
1423 if (vport->num_disc_nodes == 0) {
1424 spin_lock_irq(shost->host_lock);
1425 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1426 spin_unlock_irq(shost->host_lock);
dea3101e 1427
2e0fef85 1428 lpfc_can_disctmo(vport);
87af33fe 1429 lpfc_end_rscn(vport);
dea3101e 1430 }
1431 }
1432
1433out:
1434 lpfc_els_free_iocb(phba, cmdiocb);
1435 return;
1436}
1437
e59058c4 1438/**
3621a710 1439 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
e59058c4
JS
1440 * @vport: pointer to a host virtual N_Port data structure.
1441 * @did: destination port identifier.
1442 * @retry: number of retries to the command IOCB.
1443 *
1444 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1445 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1446 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1447 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1448 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1449 *
1450 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1451 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1452 * will be stored into the context1 field of the IOCB for the completion
1453 * callback function to the PLOGI ELS command.
1454 *
1455 * Return code
1456 * 0 - Successfully issued a plogi for @vport
1457 * 1 - failed to issue a plogi for @vport
1458 **/
dea3101e 1459int
2e0fef85 1460lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
dea3101e 1461{
2e0fef85 1462 struct lpfc_hba *phba = vport->phba;
dea3101e 1463 struct serv_parm *sp;
1464 IOCB_t *icmd;
98c9ea5c 1465 struct lpfc_nodelist *ndlp;
dea3101e 1466 struct lpfc_iocbq *elsiocb;
dea3101e 1467 struct lpfc_sli *psli;
1468 uint8_t *pcmd;
1469 uint16_t cmdsize;
92d7f7b0 1470 int ret;
dea3101e 1471
1472 psli = &phba->sli;
dea3101e 1473
98c9ea5c 1474 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
1475 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1476 ndlp = NULL;
98c9ea5c 1477
e47c9093 1478 /* If ndlp is not NULL, we will bump the reference count on it */
92d7f7b0 1479 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
98c9ea5c 1480 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2e0fef85 1481 ELS_CMD_PLOGI);
c9f8735b
JW
1482 if (!elsiocb)
1483 return 1;
dea3101e 1484
1485 icmd = &elsiocb->iocb;
1486 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1487
1488 /* For PLOGI request, remainder of payload is service parameters */
1489 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
92d7f7b0
JS
1490 pcmd += sizeof(uint32_t);
1491 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
dea3101e 1492 sp = (struct serv_parm *) pcmd;
1493
1494 if (sp->cmn.fcphLow < FC_PH_4_3)
1495 sp->cmn.fcphLow = FC_PH_4_3;
1496
1497 if (sp->cmn.fcphHigh < FC_PH3)
1498 sp->cmn.fcphHigh = FC_PH3;
1499
858c9f6c
JS
1500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1501 "Issue PLOGI: did:x%x",
1502 did, 0, 0);
1503
dea3101e 1504 phba->fc_stat.elsXmitPLOGI++;
1505 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
3772a991 1506 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
1507
1508 if (ret == IOCB_ERROR) {
dea3101e 1509 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1510 return 1;
dea3101e 1511 }
c9f8735b 1512 return 0;
dea3101e 1513}
1514
e59058c4 1515/**
3621a710 1516 * lpfc_cmpl_els_prli - Completion callback function for prli
e59058c4
JS
1517 * @phba: pointer to lpfc hba data structure.
1518 * @cmdiocb: pointer to lpfc command iocb data structure.
1519 * @rspiocb: pointer to lpfc response iocb data structure.
1520 *
1521 * This routine is the completion callback function for a Process Login
1522 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1523 * status. If there is error status reported, PRLI retry shall be attempted
1524 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1525 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1526 * ndlp to mark the PRLI completion.
1527 **/
dea3101e 1528static void
2e0fef85
JS
1529lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1530 struct lpfc_iocbq *rspiocb)
dea3101e 1531{
2e0fef85
JS
1532 struct lpfc_vport *vport = cmdiocb->vport;
1533 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1534 IOCB_t *irsp;
1535 struct lpfc_sli *psli;
1536 struct lpfc_nodelist *ndlp;
1537
1538 psli = &phba->sli;
1539 /* we pass cmdiocb to state machine which needs rspiocb as well */
1540 cmdiocb->context_un.rsp_iocb = rspiocb;
1541
1542 irsp = &(rspiocb->iocb);
1543 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2e0fef85 1544 spin_lock_irq(shost->host_lock);
dea3101e 1545 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 1546 spin_unlock_irq(shost->host_lock);
dea3101e 1547
858c9f6c
JS
1548 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1549 "PRLI cmpl: status:x%x/x%x did:x%x",
1550 irsp->ulpStatus, irsp->un.ulpWord[4],
1551 ndlp->nlp_DID);
dea3101e 1552 /* PRLI completes to NPort <nlp_DID> */
e8b62011
JS
1553 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1554 "0103 PRLI completes to NPort x%x "
1555 "Data: x%x x%x x%x x%x\n",
1556 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1557 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 1558
2e0fef85 1559 vport->fc_prli_sent--;
dea3101e 1560 /* Check to see if link went down during discovery */
2e0fef85 1561 if (lpfc_els_chk_latt(vport))
dea3101e 1562 goto out;
1563
1564 if (irsp->ulpStatus) {
1565 /* Check for retry */
1566 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1567 /* ELS command is being retried */
1568 goto out;
1569 }
1570 /* PRLI failed */
1571 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1572 if (lpfc_error_lost_link(irsp))
dea3101e 1573 goto out;
e47c9093 1574 else
2e0fef85 1575 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1576 NLP_EVT_CMPL_PRLI);
e47c9093 1577 } else
dea3101e 1578 /* Good status, call state machine */
2e0fef85 1579 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 1580 NLP_EVT_CMPL_PRLI);
dea3101e 1581out:
1582 lpfc_els_free_iocb(phba, cmdiocb);
1583 return;
1584}
1585
e59058c4 1586/**
3621a710 1587 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
e59058c4
JS
1588 * @vport: pointer to a host virtual N_Port data structure.
1589 * @ndlp: pointer to a node-list data structure.
1590 * @retry: number of retries to the command IOCB.
1591 *
1592 * This routine issues a Process Login (PRLI) ELS command for the
1593 * @vport. The PRLI service parameters are set up in the payload of the
1594 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1595 * is put to the IOCB completion callback func field before invoking the
1596 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1597 *
1598 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1599 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1600 * will be stored into the context1 field of the IOCB for the completion
1601 * callback function to the PRLI ELS command.
1602 *
1603 * Return code
1604 * 0 - successfully issued prli iocb command for @vport
1605 * 1 - failed to issue prli iocb command for @vport
1606 **/
dea3101e 1607int
2e0fef85 1608lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e 1609 uint8_t retry)
1610{
2e0fef85
JS
1611 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1612 struct lpfc_hba *phba = vport->phba;
dea3101e 1613 PRLI *npr;
1614 IOCB_t *icmd;
1615 struct lpfc_iocbq *elsiocb;
dea3101e 1616 uint8_t *pcmd;
1617 uint16_t cmdsize;
1618
92d7f7b0 1619 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2e0fef85
JS
1620 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1621 ndlp->nlp_DID, ELS_CMD_PRLI);
488d1469 1622 if (!elsiocb)
c9f8735b 1623 return 1;
dea3101e 1624
1625 icmd = &elsiocb->iocb;
1626 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1627
1628 /* For PRLI request, remainder of payload is service parameters */
92d7f7b0 1629 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
dea3101e 1630 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
92d7f7b0 1631 pcmd += sizeof(uint32_t);
dea3101e 1632
1633 /* For PRLI, remainder of payload is PRLI parameter page */
1634 npr = (PRLI *) pcmd;
1635 /*
1636 * If our firmware version is 3.20 or later,
1637 * set the following bits for FC-TAPE support.
1638 */
1639 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1640 npr->ConfmComplAllowed = 1;
1641 npr->Retry = 1;
1642 npr->TaskRetryIdReq = 1;
1643 }
1644 npr->estabImagePair = 1;
1645 npr->readXferRdyDis = 1;
1646
1647 /* For FCP support */
1648 npr->prliType = PRLI_FCP_TYPE;
1649 npr->initiatorFunc = 1;
1650
858c9f6c
JS
1651 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1652 "Issue PRLI: did:x%x",
1653 ndlp->nlp_DID, 0, 0);
1654
dea3101e 1655 phba->fc_stat.elsXmitPRLI++;
1656 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2e0fef85 1657 spin_lock_irq(shost->host_lock);
dea3101e 1658 ndlp->nlp_flag |= NLP_PRLI_SND;
2e0fef85 1659 spin_unlock_irq(shost->host_lock);
3772a991
JS
1660 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1661 IOCB_ERROR) {
2e0fef85 1662 spin_lock_irq(shost->host_lock);
dea3101e 1663 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2e0fef85 1664 spin_unlock_irq(shost->host_lock);
dea3101e 1665 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1666 return 1;
dea3101e 1667 }
2e0fef85 1668 vport->fc_prli_sent++;
c9f8735b 1669 return 0;
dea3101e 1670}
1671
90160e01 1672/**
3621a710 1673 * lpfc_rscn_disc - Perform rscn discovery for a vport
90160e01
JS
1674 * @vport: pointer to a host virtual N_Port data structure.
1675 *
1676 * This routine performs Registration State Change Notification (RSCN)
1677 * discovery for a @vport. If the @vport's node port recovery count is not
1678 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1679 * the nodes that need recovery. If none of the PLOGI were needed through
1680 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1681 * invoked to check and handle possible more RSCN came in during the period
1682 * of processing the current ones.
1683 **/
1684static void
1685lpfc_rscn_disc(struct lpfc_vport *vport)
1686{
1687 lpfc_can_disctmo(vport);
1688
1689 /* RSCN discovery */
1690 /* go thru NPR nodes and issue ELS PLOGIs */
1691 if (vport->fc_npr_cnt)
1692 if (lpfc_els_disc_plogi(vport))
1693 return;
1694
1695 lpfc_end_rscn(vport);
1696}
1697
1698/**
3621a710 1699 * lpfc_adisc_done - Complete the adisc phase of discovery
90160e01
JS
1700 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
1701 *
1702 * This function is called when the final ADISC is completed during discovery.
1703 * This function handles clearing link attention or issuing reg_vpi depending
1704 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
1705 * discovery.
1706 * This function is called with no locks held.
1707 **/
1708static void
1709lpfc_adisc_done(struct lpfc_vport *vport)
1710{
1711 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1712 struct lpfc_hba *phba = vport->phba;
1713
1714 /*
1715 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1716 * and continue discovery.
1717 */
1718 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6fb120a7
JS
1719 !(vport->fc_flag & FC_RSCN_MODE) &&
1720 (phba->sli_rev < LPFC_SLI_REV4)) {
90160e01
JS
1721 lpfc_issue_reg_vpi(phba, vport);
1722 return;
1723 }
1724 /*
1725 * For SLI2, we need to set port_state to READY
1726 * and continue discovery.
1727 */
1728 if (vport->port_state < LPFC_VPORT_READY) {
1729 /* If we get here, there is nothing to ADISC */
1730 if (vport->port_type == LPFC_PHYSICAL_PORT)
1731 lpfc_issue_clear_la(phba, vport);
1732 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1733 vport->num_disc_nodes = 0;
1734 /* go thru NPR list, issue ELS PLOGIs */
1735 if (vport->fc_npr_cnt)
1736 lpfc_els_disc_plogi(vport);
1737 if (!vport->num_disc_nodes) {
1738 spin_lock_irq(shost->host_lock);
1739 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1740 spin_unlock_irq(shost->host_lock);
1741 lpfc_can_disctmo(vport);
1742 lpfc_end_rscn(vport);
1743 }
1744 }
1745 vport->port_state = LPFC_VPORT_READY;
1746 } else
1747 lpfc_rscn_disc(vport);
1748}
1749
e59058c4 1750/**
3621a710 1751 * lpfc_more_adisc - Issue more adisc as needed
e59058c4
JS
1752 * @vport: pointer to a host virtual N_Port data structure.
1753 *
1754 * This routine determines whether there are more ndlps on a @vport
1755 * node list need to have Address Discover (ADISC) issued. If so, it will
1756 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
1757 * remaining nodes which need to have ADISC sent.
1758 **/
0ff10d46 1759void
2e0fef85 1760lpfc_more_adisc(struct lpfc_vport *vport)
dea3101e 1761{
1762 int sentadisc;
1763
2e0fef85
JS
1764 if (vport->num_disc_nodes)
1765 vport->num_disc_nodes--;
dea3101e 1766 /* Continue discovery with <num_disc_nodes> ADISCs to go */
e8b62011
JS
1767 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1768 "0210 Continue discovery with %d ADISCs to go "
1769 "Data: x%x x%x x%x\n",
1770 vport->num_disc_nodes, vport->fc_adisc_cnt,
1771 vport->fc_flag, vport->port_state);
dea3101e 1772 /* Check to see if there are more ADISCs to be sent */
2e0fef85
JS
1773 if (vport->fc_flag & FC_NLP_MORE) {
1774 lpfc_set_disctmo(vport);
1775 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1776 sentadisc = lpfc_els_disc_adisc(vport);
dea3101e 1777 }
90160e01
JS
1778 if (!vport->num_disc_nodes)
1779 lpfc_adisc_done(vport);
dea3101e 1780 return;
1781}
1782
e59058c4 1783/**
3621a710 1784 * lpfc_cmpl_els_adisc - Completion callback function for adisc
e59058c4
JS
1785 * @phba: pointer to lpfc hba data structure.
1786 * @cmdiocb: pointer to lpfc command iocb data structure.
1787 * @rspiocb: pointer to lpfc response iocb data structure.
1788 *
1789 * This routine is the completion function for issuing the Address Discover
1790 * (ADISC) command. It first checks to see whether link went down during
1791 * the discovery process. If so, the node will be marked as node port
1792 * recovery for issuing discover IOCB by the link attention handler and
1793 * exit. Otherwise, the response status is checked. If error was reported
1794 * in the response status, the ADISC command shall be retried by invoking
1795 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
1796 * the response status, the state machine is invoked to set transition
1797 * with respect to NLP_EVT_CMPL_ADISC event.
1798 **/
dea3101e 1799static void
2e0fef85
JS
1800lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1801 struct lpfc_iocbq *rspiocb)
dea3101e 1802{
2e0fef85
JS
1803 struct lpfc_vport *vport = cmdiocb->vport;
1804 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1805 IOCB_t *irsp;
dea3101e 1806 struct lpfc_nodelist *ndlp;
2e0fef85 1807 int disc;
dea3101e 1808
1809 /* we pass cmdiocb to state machine which needs rspiocb as well */
1810 cmdiocb->context_un.rsp_iocb = rspiocb;
1811
1812 irsp = &(rspiocb->iocb);
1813 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
dea3101e 1814
858c9f6c
JS
1815 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1816 "ADISC cmpl: status:x%x/x%x did:x%x",
1817 irsp->ulpStatus, irsp->un.ulpWord[4],
1818 ndlp->nlp_DID);
1819
dea3101e 1820 /* Since ndlp can be freed in the disc state machine, note if this node
1821 * is being used during discovery.
1822 */
2e0fef85 1823 spin_lock_irq(shost->host_lock);
dea3101e 1824 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
c9f8735b 1825 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2e0fef85 1826 spin_unlock_irq(shost->host_lock);
dea3101e 1827 /* ADISC completes to NPort <nlp_DID> */
e8b62011
JS
1828 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1829 "0104 ADISC completes to NPort x%x "
1830 "Data: x%x x%x x%x x%x x%x\n",
1831 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1832 irsp->ulpTimeout, disc, vport->num_disc_nodes);
dea3101e 1833 /* Check to see if link went down during discovery */
2e0fef85
JS
1834 if (lpfc_els_chk_latt(vport)) {
1835 spin_lock_irq(shost->host_lock);
dea3101e 1836 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 1837 spin_unlock_irq(shost->host_lock);
dea3101e 1838 goto out;
1839 }
1840
1841 if (irsp->ulpStatus) {
1842 /* Check for retry */
1843 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1844 /* ELS command is being retried */
1845 if (disc) {
2e0fef85 1846 spin_lock_irq(shost->host_lock);
dea3101e 1847 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85
JS
1848 spin_unlock_irq(shost->host_lock);
1849 lpfc_set_disctmo(vport);
dea3101e 1850 }
1851 goto out;
1852 }
1853 /* ADISC failed */
1854 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
e47c9093 1855 if (!lpfc_error_lost_link(irsp))
2e0fef85 1856 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
858c9f6c 1857 NLP_EVT_CMPL_ADISC);
e47c9093 1858 } else
dea3101e 1859 /* Good status, call state machine */
2e0fef85 1860 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
dea3101e 1861 NLP_EVT_CMPL_ADISC);
dea3101e 1862
90160e01
JS
1863 /* Check to see if there are more ADISCs to be sent */
1864 if (disc && vport->num_disc_nodes)
2e0fef85 1865 lpfc_more_adisc(vport);
dea3101e 1866out:
1867 lpfc_els_free_iocb(phba, cmdiocb);
1868 return;
1869}
1870
e59058c4 1871/**
3621a710 1872 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
e59058c4
JS
1873 * @vport: pointer to a virtual N_Port data structure.
1874 * @ndlp: pointer to a node-list data structure.
1875 * @retry: number of retries to the command IOCB.
1876 *
1877 * This routine issues an Address Discover (ADISC) for an @ndlp on a
1878 * @vport. It prepares the payload of the ADISC ELS command, updates the
1879 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
1880 * to issue the ADISC ELS command.
1881 *
1882 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1883 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1884 * will be stored into the context1 field of the IOCB for the completion
1885 * callback function to the ADISC ELS command.
1886 *
1887 * Return code
1888 * 0 - successfully issued adisc
1889 * 1 - failed to issue adisc
1890 **/
dea3101e 1891int
2e0fef85 1892lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e 1893 uint8_t retry)
1894{
2e0fef85
JS
1895 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1896 struct lpfc_hba *phba = vport->phba;
dea3101e 1897 ADISC *ap;
1898 IOCB_t *icmd;
1899 struct lpfc_iocbq *elsiocb;
dea3101e 1900 uint8_t *pcmd;
1901 uint16_t cmdsize;
1902
92d7f7b0 1903 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2e0fef85
JS
1904 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1905 ndlp->nlp_DID, ELS_CMD_ADISC);
488d1469 1906 if (!elsiocb)
c9f8735b 1907 return 1;
dea3101e 1908
1909 icmd = &elsiocb->iocb;
1910 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1911
1912 /* For ADISC request, remainder of payload is service parameters */
1913 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
92d7f7b0 1914 pcmd += sizeof(uint32_t);
dea3101e 1915
1916 /* Fill in ADISC payload */
1917 ap = (ADISC *) pcmd;
1918 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
1919 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1920 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 1921 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 1922
858c9f6c
JS
1923 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1924 "Issue ADISC: did:x%x",
1925 ndlp->nlp_DID, 0, 0);
1926
dea3101e 1927 phba->fc_stat.elsXmitADISC++;
1928 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2e0fef85 1929 spin_lock_irq(shost->host_lock);
dea3101e 1930 ndlp->nlp_flag |= NLP_ADISC_SND;
2e0fef85 1931 spin_unlock_irq(shost->host_lock);
3772a991
JS
1932 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1933 IOCB_ERROR) {
2e0fef85 1934 spin_lock_irq(shost->host_lock);
dea3101e 1935 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2e0fef85 1936 spin_unlock_irq(shost->host_lock);
dea3101e 1937 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 1938 return 1;
dea3101e 1939 }
c9f8735b 1940 return 0;
dea3101e 1941}
1942
e59058c4 1943/**
3621a710 1944 * lpfc_cmpl_els_logo - Completion callback function for logo
e59058c4
JS
1945 * @phba: pointer to lpfc hba data structure.
1946 * @cmdiocb: pointer to lpfc command iocb data structure.
1947 * @rspiocb: pointer to lpfc response iocb data structure.
1948 *
1949 * This routine is the completion function for issuing the ELS Logout (LOGO)
1950 * command. If no error status was reported from the LOGO response, the
1951 * state machine of the associated ndlp shall be invoked for transition with
1952 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
1953 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
1954 **/
dea3101e 1955static void
2e0fef85
JS
1956lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1957 struct lpfc_iocbq *rspiocb)
dea3101e 1958{
2e0fef85
JS
1959 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1960 struct lpfc_vport *vport = ndlp->vport;
1961 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1962 IOCB_t *irsp;
1963 struct lpfc_sli *psli;
dea3101e 1964
1965 psli = &phba->sli;
1966 /* we pass cmdiocb to state machine which needs rspiocb as well */
1967 cmdiocb->context_un.rsp_iocb = rspiocb;
1968
1969 irsp = &(rspiocb->iocb);
2e0fef85 1970 spin_lock_irq(shost->host_lock);
dea3101e 1971 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 1972 spin_unlock_irq(shost->host_lock);
dea3101e 1973
858c9f6c
JS
1974 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1975 "LOGO cmpl: status:x%x/x%x did:x%x",
1976 irsp->ulpStatus, irsp->un.ulpWord[4],
1977 ndlp->nlp_DID);
dea3101e 1978 /* LOGO completes to NPort <nlp_DID> */
e8b62011
JS
1979 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1980 "0105 LOGO completes to NPort x%x "
1981 "Data: x%x x%x x%x x%x\n",
1982 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1983 irsp->ulpTimeout, vport->num_disc_nodes);
dea3101e 1984 /* Check to see if link went down during discovery */
2e0fef85 1985 if (lpfc_els_chk_latt(vport))
dea3101e 1986 goto out;
1987
92d7f7b0
JS
1988 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1989 /* NLP_EVT_DEVICE_RM should unregister the RPI
1990 * which should abort all outstanding IOs.
1991 */
1992 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1993 NLP_EVT_DEVICE_RM);
1994 goto out;
1995 }
1996
dea3101e 1997 if (irsp->ulpStatus) {
1998 /* Check for retry */
2e0fef85 1999 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
dea3101e 2000 /* ELS command is being retried */
2001 goto out;
dea3101e 2002 /* LOGO failed */
2003 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
858c9f6c 2004 if (lpfc_error_lost_link(irsp))
dea3101e 2005 goto out;
858c9f6c 2006 else
2e0fef85 2007 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2008 NLP_EVT_CMPL_LOGO);
e47c9093 2009 } else
5024ab17
JW
2010 /* Good status, call state machine.
2011 * This will unregister the rpi if needed.
2012 */
2e0fef85 2013 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
92d7f7b0 2014 NLP_EVT_CMPL_LOGO);
dea3101e 2015out:
2016 lpfc_els_free_iocb(phba, cmdiocb);
2017 return;
2018}
2019
e59058c4 2020/**
3621a710 2021 * lpfc_issue_els_logo - Issue a logo to an node on a vport
e59058c4
JS
2022 * @vport: pointer to a virtual N_Port data structure.
2023 * @ndlp: pointer to a node-list data structure.
2024 * @retry: number of retries to the command IOCB.
2025 *
2026 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2027 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2028 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2029 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2030 *
2031 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2032 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2033 * will be stored into the context1 field of the IOCB for the completion
2034 * callback function to the LOGO ELS command.
2035 *
2036 * Return code
2037 * 0 - successfully issued logo
2038 * 1 - failed to issue logo
2039 **/
dea3101e 2040int
2e0fef85 2041lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
dea3101e 2042 uint8_t retry)
2043{
2e0fef85
JS
2044 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2045 struct lpfc_hba *phba = vport->phba;
dea3101e 2046 IOCB_t *icmd;
2047 struct lpfc_iocbq *elsiocb;
dea3101e 2048 uint8_t *pcmd;
2049 uint16_t cmdsize;
92d7f7b0 2050 int rc;
dea3101e 2051
98c9ea5c
JS
2052 spin_lock_irq(shost->host_lock);
2053 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2054 spin_unlock_irq(shost->host_lock);
2055 return 0;
2056 }
2057 spin_unlock_irq(shost->host_lock);
2058
92d7f7b0 2059 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2e0fef85
JS
2060 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2061 ndlp->nlp_DID, ELS_CMD_LOGO);
488d1469 2062 if (!elsiocb)
c9f8735b 2063 return 1;
dea3101e 2064
2065 icmd = &elsiocb->iocb;
2066 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2067 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
92d7f7b0 2068 pcmd += sizeof(uint32_t);
dea3101e 2069
2070 /* Fill in LOGO payload */
2e0fef85 2071 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
92d7f7b0
JS
2072 pcmd += sizeof(uint32_t);
2073 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e 2074
858c9f6c
JS
2075 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2076 "Issue LOGO: did:x%x",
2077 ndlp->nlp_DID, 0, 0);
2078
dea3101e 2079 phba->fc_stat.elsXmitLOGO++;
2080 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2e0fef85 2081 spin_lock_irq(shost->host_lock);
dea3101e 2082 ndlp->nlp_flag |= NLP_LOGO_SND;
2e0fef85 2083 spin_unlock_irq(shost->host_lock);
3772a991 2084 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
92d7f7b0
JS
2085
2086 if (rc == IOCB_ERROR) {
2e0fef85 2087 spin_lock_irq(shost->host_lock);
dea3101e 2088 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2e0fef85 2089 spin_unlock_irq(shost->host_lock);
dea3101e 2090 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2091 return 1;
dea3101e 2092 }
c9f8735b 2093 return 0;
dea3101e 2094}
2095
e59058c4 2096/**
3621a710 2097 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
e59058c4
JS
2098 * @phba: pointer to lpfc hba data structure.
2099 * @cmdiocb: pointer to lpfc command iocb data structure.
2100 * @rspiocb: pointer to lpfc response iocb data structure.
2101 *
2102 * This routine is a generic completion callback function for ELS commands.
2103 * Specifically, it is the callback function which does not need to perform
2104 * any command specific operations. It is currently used by the ELS command
2105 * issuing routines for the ELS State Change Request (SCR),
2106 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2107 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2108 * certain debug loggings, this callback function simply invokes the
2109 * lpfc_els_chk_latt() routine to check whether link went down during the
2110 * discovery process.
2111 **/
dea3101e 2112static void
2e0fef85
JS
2113lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2114 struct lpfc_iocbq *rspiocb)
dea3101e 2115{
2e0fef85 2116 struct lpfc_vport *vport = cmdiocb->vport;
dea3101e 2117 IOCB_t *irsp;
2118
2119 irsp = &rspiocb->iocb;
2120
858c9f6c
JS
2121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2122 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2123 irsp->ulpStatus, irsp->un.ulpWord[4],
2124 irsp->un.elsreq64.remoteID);
dea3101e 2125 /* ELS cmd tag <ulpIoTag> completes */
e8b62011
JS
2126 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2127 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2128 irsp->ulpIoTag, irsp->ulpStatus,
2129 irsp->un.ulpWord[4], irsp->ulpTimeout);
dea3101e 2130 /* Check to see if link went down during discovery */
2e0fef85 2131 lpfc_els_chk_latt(vport);
dea3101e 2132 lpfc_els_free_iocb(phba, cmdiocb);
2133 return;
2134}
2135
e59058c4 2136/**
3621a710 2137 * lpfc_issue_els_scr - Issue a scr to an node on a vport
e59058c4
JS
2138 * @vport: pointer to a host virtual N_Port data structure.
2139 * @nportid: N_Port identifier to the remote node.
2140 * @retry: number of retries to the command IOCB.
2141 *
2142 * This routine issues a State Change Request (SCR) to a fabric node
2143 * on a @vport. The remote node @nportid is passed into the function. It
2144 * first search the @vport node list to find the matching ndlp. If no such
2145 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2146 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2147 * routine is invoked to send the SCR IOCB.
2148 *
2149 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2150 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2151 * will be stored into the context1 field of the IOCB for the completion
2152 * callback function to the SCR ELS command.
2153 *
2154 * Return code
2155 * 0 - Successfully issued scr command
2156 * 1 - Failed to issue scr command
2157 **/
dea3101e 2158int
2e0fef85 2159lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2160{
2e0fef85 2161 struct lpfc_hba *phba = vport->phba;
dea3101e 2162 IOCB_t *icmd;
2163 struct lpfc_iocbq *elsiocb;
dea3101e 2164 struct lpfc_sli *psli;
2165 uint8_t *pcmd;
2166 uint16_t cmdsize;
2167 struct lpfc_nodelist *ndlp;
2168
2169 psli = &phba->sli;
92d7f7b0 2170 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
dea3101e 2171
e47c9093
JS
2172 ndlp = lpfc_findnode_did(vport, nportid);
2173 if (!ndlp) {
2174 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2175 if (!ndlp)
2176 return 1;
2177 lpfc_nlp_init(vport, ndlp, nportid);
2178 lpfc_enqueue_node(vport, ndlp);
2179 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2180 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2181 if (!ndlp)
2182 return 1;
2183 }
2e0fef85
JS
2184
2185 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2186 ndlp->nlp_DID, ELS_CMD_SCR);
dea3101e 2187
488d1469 2188 if (!elsiocb) {
fa4066b6
JS
2189 /* This will trigger the release of the node just
2190 * allocated
2191 */
329f9bc7 2192 lpfc_nlp_put(ndlp);
c9f8735b 2193 return 1;
dea3101e 2194 }
2195
2196 icmd = &elsiocb->iocb;
2197 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2198
2199 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
92d7f7b0 2200 pcmd += sizeof(uint32_t);
dea3101e 2201
2202 /* For SCR, remainder of payload is SCR parameter page */
92d7f7b0 2203 memset(pcmd, 0, sizeof(SCR));
dea3101e 2204 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2205
858c9f6c
JS
2206 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2207 "Issue SCR: did:x%x",
2208 ndlp->nlp_DID, 0, 0);
2209
dea3101e 2210 phba->fc_stat.elsXmitSCR++;
2211 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2212 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2213 IOCB_ERROR) {
fa4066b6
JS
2214 /* The additional lpfc_nlp_put will cause the following
2215 * lpfc_els_free_iocb routine to trigger the rlease of
2216 * the node.
2217 */
329f9bc7 2218 lpfc_nlp_put(ndlp);
dea3101e 2219 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2220 return 1;
dea3101e 2221 }
fa4066b6
JS
2222 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2223 * trigger the release of node.
2224 */
329f9bc7 2225 lpfc_nlp_put(ndlp);
c9f8735b 2226 return 0;
dea3101e 2227}
2228
e59058c4 2229/**
3621a710 2230 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
e59058c4
JS
2231 * @vport: pointer to a host virtual N_Port data structure.
2232 * @nportid: N_Port identifier to the remote node.
2233 * @retry: number of retries to the command IOCB.
2234 *
2235 * This routine issues a Fibre Channel Address Resolution Response
2236 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2237 * is passed into the function. It first search the @vport node list to find
2238 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2239 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2240 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2241 *
2242 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2243 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2244 * will be stored into the context1 field of the IOCB for the completion
2245 * callback function to the PARPR ELS command.
2246 *
2247 * Return code
2248 * 0 - Successfully issued farpr command
2249 * 1 - Failed to issue farpr command
2250 **/
dea3101e 2251static int
2e0fef85 2252lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
dea3101e 2253{
2e0fef85 2254 struct lpfc_hba *phba = vport->phba;
dea3101e 2255 IOCB_t *icmd;
2256 struct lpfc_iocbq *elsiocb;
dea3101e 2257 struct lpfc_sli *psli;
2258 FARP *fp;
2259 uint8_t *pcmd;
2260 uint32_t *lp;
2261 uint16_t cmdsize;
2262 struct lpfc_nodelist *ondlp;
2263 struct lpfc_nodelist *ndlp;
2264
2265 psli = &phba->sli;
92d7f7b0 2266 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
dea3101e 2267
e47c9093
JS
2268 ndlp = lpfc_findnode_did(vport, nportid);
2269 if (!ndlp) {
2270 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2271 if (!ndlp)
2272 return 1;
2273 lpfc_nlp_init(vport, ndlp, nportid);
2274 lpfc_enqueue_node(vport, ndlp);
2275 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2276 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2277 if (!ndlp)
2278 return 1;
2279 }
2e0fef85
JS
2280
2281 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2282 ndlp->nlp_DID, ELS_CMD_RNID);
488d1469 2283 if (!elsiocb) {
fa4066b6
JS
2284 /* This will trigger the release of the node just
2285 * allocated
2286 */
329f9bc7 2287 lpfc_nlp_put(ndlp);
c9f8735b 2288 return 1;
dea3101e 2289 }
2290
2291 icmd = &elsiocb->iocb;
2292 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2293
2294 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
92d7f7b0 2295 pcmd += sizeof(uint32_t);
dea3101e 2296
2297 /* Fill in FARPR payload */
2298 fp = (FARP *) (pcmd);
92d7f7b0 2299 memset(fp, 0, sizeof(FARP));
dea3101e 2300 lp = (uint32_t *) pcmd;
2301 *lp++ = be32_to_cpu(nportid);
2e0fef85 2302 *lp++ = be32_to_cpu(vport->fc_myDID);
dea3101e 2303 fp->Rflags = 0;
2304 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2305
92d7f7b0
JS
2306 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2307 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 2308 ondlp = lpfc_findnode_did(vport, nportid);
e47c9093 2309 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
dea3101e 2310 memcpy(&fp->OportName, &ondlp->nlp_portname,
92d7f7b0 2311 sizeof(struct lpfc_name));
dea3101e 2312 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
92d7f7b0 2313 sizeof(struct lpfc_name));
dea3101e 2314 }
2315
858c9f6c
JS
2316 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2317 "Issue FARPR: did:x%x",
2318 ndlp->nlp_DID, 0, 0);
2319
dea3101e 2320 phba->fc_stat.elsXmitFARPR++;
2321 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3772a991
JS
2322 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2323 IOCB_ERROR) {
fa4066b6
JS
2324 /* The additional lpfc_nlp_put will cause the following
2325 * lpfc_els_free_iocb routine to trigger the release of
2326 * the node.
2327 */
329f9bc7 2328 lpfc_nlp_put(ndlp);
dea3101e 2329 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 2330 return 1;
dea3101e 2331 }
fa4066b6
JS
2332 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2333 * trigger the release of the node.
2334 */
329f9bc7 2335 lpfc_nlp_put(ndlp);
c9f8735b 2336 return 0;
dea3101e 2337}
2338
e59058c4 2339/**
3621a710 2340 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
e59058c4
JS
2341 * @vport: pointer to a host virtual N_Port data structure.
2342 * @nlp: pointer to a node-list data structure.
2343 *
2344 * This routine cancels the timer with a delayed IOCB-command retry for
2345 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2346 * removes the ELS retry event if it presents. In addition, if the
2347 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2348 * commands are sent for the @vport's nodes that require issuing discovery
2349 * ADISC.
2350 **/
fdcebe28 2351void
2e0fef85 2352lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
fdcebe28 2353{
2e0fef85 2354 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
e47c9093 2355 struct lpfc_work_evt *evtp;
2e0fef85 2356
0d2b6b83
JS
2357 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2358 return;
2e0fef85 2359 spin_lock_irq(shost->host_lock);
fdcebe28 2360 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2361 spin_unlock_irq(shost->host_lock);
fdcebe28
JS
2362 del_timer_sync(&nlp->nlp_delayfunc);
2363 nlp->nlp_last_elscmd = 0;
e47c9093 2364 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
fdcebe28 2365 list_del_init(&nlp->els_retry_evt.evt_listp);
e47c9093
JS
2366 /* Decrement nlp reference count held for the delayed retry */
2367 evtp = &nlp->els_retry_evt;
2368 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2369 }
fdcebe28 2370 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2e0fef85 2371 spin_lock_irq(shost->host_lock);
fdcebe28 2372 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2e0fef85
JS
2373 spin_unlock_irq(shost->host_lock);
2374 if (vport->num_disc_nodes) {
0d2b6b83
JS
2375 if (vport->port_state < LPFC_VPORT_READY) {
2376 /* Check if there are more ADISCs to be sent */
2377 lpfc_more_adisc(vport);
0d2b6b83
JS
2378 } else {
2379 /* Check if there are more PLOGIs to be sent */
2380 lpfc_more_plogi(vport);
90160e01
JS
2381 if (vport->num_disc_nodes == 0) {
2382 spin_lock_irq(shost->host_lock);
2383 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2384 spin_unlock_irq(shost->host_lock);
2385 lpfc_can_disctmo(vport);
2386 lpfc_end_rscn(vport);
2387 }
fdcebe28
JS
2388 }
2389 }
2390 }
2391 return;
2392}
2393
e59058c4 2394/**
3621a710 2395 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
e59058c4
JS
2396 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2397 *
2398 * This routine is invoked by the ndlp delayed-function timer to check
2399 * whether there is any pending ELS retry event(s) with the node. If not, it
2400 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2401 * adds the delayed events to the HBA work list and invokes the
2402 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2403 * event. Note that lpfc_nlp_get() is called before posting the event to
2404 * the work list to hold reference count of ndlp so that it guarantees the
2405 * reference to ndlp will still be available when the worker thread gets
2406 * to the event associated with the ndlp.
2407 **/
dea3101e 2408void
2409lpfc_els_retry_delay(unsigned long ptr)
2410{
2e0fef85
JS
2411 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2412 struct lpfc_vport *vport = ndlp->vport;
2e0fef85 2413 struct lpfc_hba *phba = vport->phba;
92d7f7b0 2414 unsigned long flags;
2e0fef85 2415 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
dea3101e 2416
92d7f7b0 2417 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2418 if (!list_empty(&evtp->evt_listp)) {
92d7f7b0 2419 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 2420 return;
2421 }
2422
fa4066b6
JS
2423 /* We need to hold the node by incrementing the reference
2424 * count until the queued work is done
2425 */
2426 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
5e9d9b82
JS
2427 if (evtp->evt_arg1) {
2428 evtp->evt = LPFC_EVT_ELS_RETRY;
2429 list_add_tail(&evtp->evt_listp, &phba->work_list);
92d7f7b0 2430 lpfc_worker_wake_up(phba);
5e9d9b82 2431 }
92d7f7b0 2432 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 2433 return;
2434}
2435
e59058c4 2436/**
3621a710 2437 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
e59058c4
JS
2438 * @ndlp: pointer to a node-list data structure.
2439 *
2440 * This routine is the worker-thread handler for processing the @ndlp delayed
2441 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2442 * the last ELS command from the associated ndlp and invokes the proper ELS
2443 * function according to the delayed ELS command to retry the command.
2444 **/
dea3101e 2445void
2446lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2447{
2e0fef85
JS
2448 struct lpfc_vport *vport = ndlp->vport;
2449 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2450 uint32_t cmd, did, retry;
dea3101e 2451
2e0fef85 2452 spin_lock_irq(shost->host_lock);
5024ab17
JW
2453 did = ndlp->nlp_DID;
2454 cmd = ndlp->nlp_last_elscmd;
2455 ndlp->nlp_last_elscmd = 0;
dea3101e 2456
2457 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2e0fef85 2458 spin_unlock_irq(shost->host_lock);
dea3101e 2459 return;
2460 }
2461
2462 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2463 spin_unlock_irq(shost->host_lock);
1a169689
JS
2464 /*
2465 * If a discovery event readded nlp_delayfunc after timer
2466 * firing and before processing the timer, cancel the
2467 * nlp_delayfunc.
2468 */
2469 del_timer_sync(&ndlp->nlp_delayfunc);
dea3101e 2470 retry = ndlp->nlp_retry;
4d9ab994 2471 ndlp->nlp_retry = 0;
dea3101e 2472
2473 switch (cmd) {
2474 case ELS_CMD_FLOGI:
2e0fef85 2475 lpfc_issue_els_flogi(vport, ndlp, retry);
dea3101e 2476 break;
2477 case ELS_CMD_PLOGI:
2e0fef85 2478 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
5024ab17 2479 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2480 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6ad42535 2481 }
dea3101e 2482 break;
2483 case ELS_CMD_ADISC:
2e0fef85 2484 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
5024ab17 2485 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2486 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6ad42535 2487 }
dea3101e 2488 break;
2489 case ELS_CMD_PRLI:
2e0fef85 2490 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
5024ab17 2491 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2492 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
6ad42535 2493 }
dea3101e 2494 break;
2495 case ELS_CMD_LOGO:
2e0fef85 2496 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
5024ab17 2497 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2498 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6ad42535 2499 }
dea3101e 2500 break;
92d7f7b0
JS
2501 case ELS_CMD_FDISC:
2502 lpfc_issue_els_fdisc(vport, ndlp, retry);
2503 break;
dea3101e 2504 }
2505 return;
2506}
2507
e59058c4 2508/**
3621a710 2509 * lpfc_els_retry - Make retry decision on an els command iocb
e59058c4
JS
2510 * @phba: pointer to lpfc hba data structure.
2511 * @cmdiocb: pointer to lpfc command iocb data structure.
2512 * @rspiocb: pointer to lpfc response iocb data structure.
2513 *
2514 * This routine makes a retry decision on an ELS command IOCB, which has
2515 * failed. The following ELS IOCBs use this function for retrying the command
2516 * when previously issued command responsed with error status: FLOGI, PLOGI,
2517 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2518 * returned error status, it makes the decision whether a retry shall be
2519 * issued for the command, and whether a retry shall be made immediately or
2520 * delayed. In the former case, the corresponding ELS command issuing-function
2521 * is called to retry the command. In the later case, the ELS command shall
2522 * be posted to the ndlp delayed event and delayed function timer set to the
2523 * ndlp for the delayed command issusing.
2524 *
2525 * Return code
2526 * 0 - No retry of els command is made
2527 * 1 - Immediate or delayed retry of els command is made
2528 **/
dea3101e 2529static int
2e0fef85
JS
2530lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2531 struct lpfc_iocbq *rspiocb)
dea3101e 2532{
2e0fef85
JS
2533 struct lpfc_vport *vport = cmdiocb->vport;
2534 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2535 IOCB_t *irsp = &rspiocb->iocb;
2536 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2537 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
dea3101e 2538 uint32_t *elscmd;
2539 struct ls_rjt stat;
2e0fef85 2540 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
98c9ea5c 2541 int logerr = 0;
2e0fef85 2542 uint32_t cmd = 0;
488d1469 2543 uint32_t did;
dea3101e 2544
488d1469 2545
dea3101e 2546 /* Note: context2 may be 0 for internal driver abort
2547 * of delays ELS command.
2548 */
2549
2550 if (pcmd && pcmd->virt) {
2551 elscmd = (uint32_t *) (pcmd->virt);
2552 cmd = *elscmd++;
2553 }
2554
e47c9093 2555 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
488d1469
JS
2556 did = ndlp->nlp_DID;
2557 else {
2558 /* We should only hit this case for retrying PLOGI */
2559 did = irsp->un.elsreq64.remoteID;
2e0fef85 2560 ndlp = lpfc_findnode_did(vport, did);
e47c9093
JS
2561 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2562 && (cmd != ELS_CMD_PLOGI))
488d1469
JS
2563 return 1;
2564 }
2565
858c9f6c
JS
2566 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2567 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2568 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2569
dea3101e 2570 switch (irsp->ulpStatus) {
2571 case IOSTAT_FCP_RSP_ERROR:
2572 case IOSTAT_REMOTE_STOP:
2573 break;
2574
2575 case IOSTAT_LOCAL_REJECT:
2576 switch ((irsp->un.ulpWord[4] & 0xff)) {
2577 case IOERR_LOOP_OPEN_FAILURE:
eaf15d5b
JS
2578 if (cmd == ELS_CMD_FLOGI) {
2579 if (PCI_DEVICE_ID_HORNET ==
2580 phba->pcidev->device) {
2581 phba->fc_topology = TOPOLOGY_LOOP;
2582 phba->pport->fc_myDID = 0;
2583 phba->alpa_map[0] = 0;
2584 phba->alpa_map[1] = 0;
2585 }
2586 }
2e0fef85 2587 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
92d7f7b0 2588 delay = 1000;
dea3101e 2589 retry = 1;
2590 break;
2591
92d7f7b0 2592 case IOERR_ILLEGAL_COMMAND:
7f5f3d0d
JS
2593 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2594 "0124 Retry illegal cmd x%x "
2595 "retry:x%x delay:x%x\n",
2596 cmd, cmdiocb->retry, delay);
2597 retry = 1;
2598 /* All command's retry policy */
2599 maxretry = 8;
2600 if (cmdiocb->retry > 2)
2601 delay = 1000;
92d7f7b0
JS
2602 break;
2603
dea3101e 2604 case IOERR_NO_RESOURCES:
98c9ea5c 2605 logerr = 1; /* HBA out of resources */
858c9f6c
JS
2606 retry = 1;
2607 if (cmdiocb->retry > 100)
2608 delay = 100;
2609 maxretry = 250;
2610 break;
2611
2612 case IOERR_ILLEGAL_FRAME:
92d7f7b0 2613 delay = 100;
dea3101e 2614 retry = 1;
2615 break;
2616
858c9f6c 2617 case IOERR_SEQUENCE_TIMEOUT:
dea3101e 2618 case IOERR_INVALID_RPI:
2619 retry = 1;
2620 break;
2621 }
2622 break;
2623
2624 case IOSTAT_NPORT_RJT:
2625 case IOSTAT_FABRIC_RJT:
2626 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2627 retry = 1;
2628 break;
2629 }
2630 break;
2631
2632 case IOSTAT_NPORT_BSY:
2633 case IOSTAT_FABRIC_BSY:
98c9ea5c 2634 logerr = 1; /* Fabric / Remote NPort out of resources */
dea3101e 2635 retry = 1;
2636 break;
2637
2638 case IOSTAT_LS_RJT:
2639 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2640 /* Added for Vendor specifc support
2641 * Just keep retrying for these Rsn / Exp codes
2642 */
2643 switch (stat.un.b.lsRjtRsnCode) {
2644 case LSRJT_UNABLE_TPC:
2645 if (stat.un.b.lsRjtRsnCodeExp ==
2646 LSEXP_CMD_IN_PROGRESS) {
2647 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 2648 delay = 1000;
dea3101e 2649 maxretry = 48;
2650 }
2651 retry = 1;
2652 break;
2653 }
2654 if (cmd == ELS_CMD_PLOGI) {
92d7f7b0 2655 delay = 1000;
dea3101e 2656 maxretry = lpfc_max_els_tries + 1;
2657 retry = 1;
2658 break;
2659 }
92d7f7b0
JS
2660 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2661 (cmd == ELS_CMD_FDISC) &&
2662 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
e8b62011
JS
2663 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2664 "0125 FDISC Failed (x%x). "
2665 "Fabric out of resources\n",
2666 stat.un.lsRjtError);
92d7f7b0
JS
2667 lpfc_vport_set_state(vport,
2668 FC_VPORT_NO_FABRIC_RSCS);
2669 }
dea3101e 2670 break;
2671
2672 case LSRJT_LOGICAL_BSY:
858c9f6c
JS
2673 if ((cmd == ELS_CMD_PLOGI) ||
2674 (cmd == ELS_CMD_PRLI)) {
92d7f7b0 2675 delay = 1000;
dea3101e 2676 maxretry = 48;
92d7f7b0 2677 } else if (cmd == ELS_CMD_FDISC) {
51ef4c26
JS
2678 /* FDISC retry policy */
2679 maxretry = 48;
2680 if (cmdiocb->retry >= 32)
2681 delay = 1000;
dea3101e 2682 }
2683 retry = 1;
2684 break;
92d7f7b0
JS
2685
2686 case LSRJT_LOGICAL_ERR:
7f5f3d0d
JS
2687 /* There are some cases where switches return this
2688 * error when they are not ready and should be returning
2689 * Logical Busy. We should delay every time.
2690 */
2691 if (cmd == ELS_CMD_FDISC &&
2692 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2693 maxretry = 3;
2694 delay = 1000;
2695 retry = 1;
2696 break;
2697 }
92d7f7b0
JS
2698 case LSRJT_PROTOCOL_ERR:
2699 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2700 (cmd == ELS_CMD_FDISC) &&
2701 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2702 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2703 ) {
e8b62011 2704 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 2705 "0122 FDISC Failed (x%x). "
e8b62011
JS
2706 "Fabric Detected Bad WWN\n",
2707 stat.un.lsRjtError);
92d7f7b0
JS
2708 lpfc_vport_set_state(vport,
2709 FC_VPORT_FABRIC_REJ_WWN);
2710 }
2711 break;
dea3101e 2712 }
2713 break;
2714
2715 case IOSTAT_INTERMED_RSP:
2716 case IOSTAT_BA_RJT:
2717 break;
2718
2719 default:
2720 break;
2721 }
2722
488d1469 2723 if (did == FDMI_DID)
dea3101e 2724 retry = 1;
dea3101e 2725
695a814e 2726 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
1b32f6aa
JS
2727 (phba->fc_topology != TOPOLOGY_LOOP) &&
2728 !lpfc_error_lost_link(irsp)) {
98c9ea5c
JS
2729 /* FLOGI retry policy */
2730 retry = 1;
6669f9bb
JS
2731 /* retry forever */
2732 maxretry = 0;
2733 if (cmdiocb->retry >= 100)
2734 delay = 5000;
2735 else if (cmdiocb->retry >= 32)
98c9ea5c
JS
2736 delay = 1000;
2737 }
2738
6669f9bb
JS
2739 cmdiocb->retry++;
2740 if (maxretry && (cmdiocb->retry >= maxretry)) {
dea3101e 2741 phba->fc_stat.elsRetryExceeded++;
2742 retry = 0;
2743 }
2744
ed957684
JS
2745 if ((vport->load_flag & FC_UNLOADING) != 0)
2746 retry = 0;
2747
dea3101e 2748 if (retry) {
2749
2750 /* Retry ELS command <elsCmd> to remote NPORT <did> */
e8b62011
JS
2751 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2752 "0107 Retry ELS command x%x to remote "
2753 "NPORT x%x Data: x%x x%x\n",
2754 cmd, did, cmdiocb->retry, delay);
dea3101e 2755
858c9f6c
JS
2756 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2757 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2758 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2759 /* Don't reset timer for no resources */
2760
dea3101e 2761 /* If discovery / RSCN timer is running, reset it */
2e0fef85 2762 if (timer_pending(&vport->fc_disctmo) ||
92d7f7b0 2763 (vport->fc_flag & FC_RSCN_MODE))
2e0fef85 2764 lpfc_set_disctmo(vport);
dea3101e 2765 }
2766
2767 phba->fc_stat.elsXmitRetry++;
58da1ffb 2768 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
dea3101e 2769 phba->fc_stat.elsDelayRetry++;
2770 ndlp->nlp_retry = cmdiocb->retry;
2771
92d7f7b0
JS
2772 /* delay is specified in milliseconds */
2773 mod_timer(&ndlp->nlp_delayfunc,
2774 jiffies + msecs_to_jiffies(delay));
2e0fef85 2775 spin_lock_irq(shost->host_lock);
dea3101e 2776 ndlp->nlp_flag |= NLP_DELAY_TMO;
2e0fef85 2777 spin_unlock_irq(shost->host_lock);
dea3101e 2778
5024ab17 2779 ndlp->nlp_prev_state = ndlp->nlp_state;
858c9f6c
JS
2780 if (cmd == ELS_CMD_PRLI)
2781 lpfc_nlp_set_state(vport, ndlp,
2782 NLP_STE_REG_LOGIN_ISSUE);
2783 else
2784 lpfc_nlp_set_state(vport, ndlp,
2785 NLP_STE_NPR_NODE);
dea3101e 2786 ndlp->nlp_last_elscmd = cmd;
2787
c9f8735b 2788 return 1;
dea3101e 2789 }
2790 switch (cmd) {
2791 case ELS_CMD_FLOGI:
2e0fef85 2792 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
c9f8735b 2793 return 1;
92d7f7b0
JS
2794 case ELS_CMD_FDISC:
2795 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2796 return 1;
dea3101e 2797 case ELS_CMD_PLOGI:
58da1ffb 2798 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
488d1469 2799 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 2800 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 2801 NLP_STE_PLOGI_ISSUE);
488d1469 2802 }
2e0fef85 2803 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
c9f8735b 2804 return 1;
dea3101e 2805 case ELS_CMD_ADISC:
5024ab17 2806 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
2807 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2808 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
c9f8735b 2809 return 1;
dea3101e 2810 case ELS_CMD_PRLI:
5024ab17 2811 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
2812 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2813 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
c9f8735b 2814 return 1;
dea3101e 2815 case ELS_CMD_LOGO:
5024ab17 2816 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
2817 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2818 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
c9f8735b 2819 return 1;
dea3101e 2820 }
2821 }
dea3101e 2822 /* No retry ELS command <elsCmd> to remote NPORT <did> */
98c9ea5c
JS
2823 if (logerr) {
2824 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2825 "0137 No retry ELS command x%x to remote "
2826 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2827 cmd, did, irsp->ulpStatus,
2828 irsp->un.ulpWord[4]);
2829 }
2830 else {
2831 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
a58cbd52
JS
2832 "0108 No retry ELS command x%x to remote "
2833 "NPORT x%x Retried:%d Error:x%x/%x\n",
2834 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2835 irsp->un.ulpWord[4]);
98c9ea5c 2836 }
c9f8735b 2837 return 0;
dea3101e 2838}
2839
e59058c4 2840/**
3621a710 2841 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
e59058c4
JS
2842 * @phba: pointer to lpfc hba data structure.
2843 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
2844 *
2845 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
2846 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
2847 * checks to see whether there is a lpfc DMA buffer associated with the
2848 * response of the command IOCB. If so, it will be released before releasing
2849 * the lpfc DMA buffer associated with the IOCB itself.
2850 *
2851 * Return code
2852 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2853 **/
09372820 2854static int
87af33fe
JS
2855lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2856{
2857 struct lpfc_dmabuf *buf_ptr;
2858
e59058c4 2859 /* Free the response before processing the command. */
87af33fe
JS
2860 if (!list_empty(&buf_ptr1->list)) {
2861 list_remove_head(&buf_ptr1->list, buf_ptr,
2862 struct lpfc_dmabuf,
2863 list);
2864 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2865 kfree(buf_ptr);
2866 }
2867 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2868 kfree(buf_ptr1);
2869 return 0;
2870}
2871
e59058c4 2872/**
3621a710 2873 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
e59058c4
JS
2874 * @phba: pointer to lpfc hba data structure.
2875 * @buf_ptr: pointer to the lpfc dma buffer data structure.
2876 *
2877 * This routine releases the lpfc Direct Memory Access (DMA) buffer
2878 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
2879 * pool.
2880 *
2881 * Return code
2882 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2883 **/
09372820 2884static int
87af33fe
JS
2885lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2886{
2887 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2888 kfree(buf_ptr);
2889 return 0;
2890}
2891
e59058c4 2892/**
3621a710 2893 * lpfc_els_free_iocb - Free a command iocb and its associated resources
e59058c4
JS
2894 * @phba: pointer to lpfc hba data structure.
2895 * @elsiocb: pointer to lpfc els command iocb data structure.
2896 *
2897 * This routine frees a command IOCB and its associated resources. The
2898 * command IOCB data structure contains the reference to various associated
2899 * resources, these fields must be set to NULL if the associated reference
2900 * not present:
2901 * context1 - reference to ndlp
2902 * context2 - reference to cmd
2903 * context2->next - reference to rsp
2904 * context3 - reference to bpl
2905 *
2906 * It first properly decrements the reference count held on ndlp for the
2907 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
2908 * set, it invokes the lpfc_els_free_data() routine to release the Direct
2909 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
2910 * adds the DMA buffer the @phba data structure for the delayed release.
2911 * If reference to the Buffer Pointer List (BPL) is present, the
2912 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
2913 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
2914 * invoked to release the IOCB data structure back to @phba IOCBQ list.
2915 *
2916 * Return code
2917 * 0 - Success (currently, always return 0)
2918 **/
dea3101e 2919int
329f9bc7 2920lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
dea3101e 2921{
2922 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
a8adb832
JS
2923 struct lpfc_nodelist *ndlp;
2924
2925 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
2926 if (ndlp) {
2927 if (ndlp->nlp_flag & NLP_DEFER_RM) {
2928 lpfc_nlp_put(ndlp);
dea3101e 2929
a8adb832
JS
2930 /* If the ndlp is not being used by another discovery
2931 * thread, free it.
2932 */
2933 if (!lpfc_nlp_not_used(ndlp)) {
2934 /* If ndlp is being used by another discovery
2935 * thread, just clear NLP_DEFER_RM
2936 */
2937 ndlp->nlp_flag &= ~NLP_DEFER_RM;
2938 }
2939 }
2940 else
2941 lpfc_nlp_put(ndlp);
329f9bc7
JS
2942 elsiocb->context1 = NULL;
2943 }
dea3101e 2944 /* context2 = cmd, context2->next = rsp, context3 = bpl */
2945 if (elsiocb->context2) {
0ff10d46
JS
2946 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
2947 /* Firmware could still be in progress of DMAing
2948 * payload, so don't free data buffer till after
2949 * a hbeat.
2950 */
2951 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
2952 buf_ptr = elsiocb->context2;
2953 elsiocb->context2 = NULL;
2954 if (buf_ptr) {
2955 buf_ptr1 = NULL;
2956 spin_lock_irq(&phba->hbalock);
2957 if (!list_empty(&buf_ptr->list)) {
2958 list_remove_head(&buf_ptr->list,
2959 buf_ptr1, struct lpfc_dmabuf,
2960 list);
2961 INIT_LIST_HEAD(&buf_ptr1->list);
2962 list_add_tail(&buf_ptr1->list,
2963 &phba->elsbuf);
2964 phba->elsbuf_cnt++;
2965 }
2966 INIT_LIST_HEAD(&buf_ptr->list);
2967 list_add_tail(&buf_ptr->list, &phba->elsbuf);
2968 phba->elsbuf_cnt++;
2969 spin_unlock_irq(&phba->hbalock);
2970 }
2971 } else {
2972 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
2973 lpfc_els_free_data(phba, buf_ptr1);
2974 }
dea3101e 2975 }
2976
2977 if (elsiocb->context3) {
2978 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
87af33fe 2979 lpfc_els_free_bpl(phba, buf_ptr);
dea3101e 2980 }
604a3e30 2981 lpfc_sli_release_iocbq(phba, elsiocb);
dea3101e 2982 return 0;
2983}
2984
e59058c4 2985/**
3621a710 2986 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
e59058c4
JS
2987 * @phba: pointer to lpfc hba data structure.
2988 * @cmdiocb: pointer to lpfc command iocb data structure.
2989 * @rspiocb: pointer to lpfc response iocb data structure.
2990 *
2991 * This routine is the completion callback function to the Logout (LOGO)
2992 * Accept (ACC) Response ELS command. This routine is invoked to indicate
2993 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
2994 * release the ndlp if it has the last reference remaining (reference count
2995 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
2996 * field to NULL to inform the following lpfc_els_free_iocb() routine no
2997 * ndlp reference count needs to be decremented. Otherwise, the ndlp
2998 * reference use-count shall be decremented by the lpfc_els_free_iocb()
2999 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3000 * IOCB data structure.
3001 **/
dea3101e 3002static void
2e0fef85
JS
3003lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3004 struct lpfc_iocbq *rspiocb)
dea3101e 3005{
2e0fef85
JS
3006 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3007 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c
JS
3008 IOCB_t *irsp;
3009
3010 irsp = &rspiocb->iocb;
3011 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3012 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3013 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
dea3101e 3014 /* ACC to LOGO completes to NPort <nlp_DID> */
e8b62011
JS
3015 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3016 "0109 ACC to LOGO completes to NPort x%x "
3017 "Data: x%x x%x x%x\n",
3018 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3019 ndlp->nlp_rpi);
87af33fe
JS
3020
3021 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3022 /* NPort Recovery mode or node is just allocated */
3023 if (!lpfc_nlp_not_used(ndlp)) {
3024 /* If the ndlp is being used by another discovery
3025 * thread, just unregister the RPI.
3026 */
3027 lpfc_unreg_rpi(vport, ndlp);
fa4066b6
JS
3028 } else {
3029 /* Indicate the node has already released, should
3030 * not reference to it from within lpfc_els_free_iocb.
3031 */
3032 cmdiocb->context1 = NULL;
87af33fe 3033 }
dea3101e 3034 }
3035 lpfc_els_free_iocb(phba, cmdiocb);
3036 return;
3037}
3038
e59058c4 3039/**
3621a710 3040 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
e59058c4
JS
3041 * @phba: pointer to lpfc hba data structure.
3042 * @pmb: pointer to the driver internal queue element for mailbox command.
3043 *
3044 * This routine is the completion callback function for unregister default
3045 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3046 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3047 * decrements the ndlp reference count held for this completion callback
3048 * function. After that, it invokes the lpfc_nlp_not_used() to check
3049 * whether there is only one reference left on the ndlp. If so, it will
3050 * perform one more decrement and trigger the release of the ndlp.
3051 **/
858c9f6c
JS
3052void
3053lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3054{
3055 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3056 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3057
6fb120a7
JS
3058 /*
3059 * This routine is used to register and unregister in previous SLI
3060 * modes.
3061 */
3062 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3063 (phba->sli_rev == LPFC_SLI_REV4))
3064 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3065
858c9f6c
JS
3066 pmb->context1 = NULL;
3067 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3068 kfree(mp);
3069 mempool_free(pmb, phba->mbox_mem_pool);
58da1ffb 3070 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
a8adb832 3071 lpfc_nlp_put(ndlp);
a8adb832
JS
3072 /* This is the end of the default RPI cleanup logic for this
3073 * ndlp. If no other discovery threads are using this ndlp.
3074 * we should free all resources associated with it.
3075 */
3076 lpfc_nlp_not_used(ndlp);
3077 }
3772a991 3078
858c9f6c
JS
3079 return;
3080}
3081
e59058c4 3082/**
3621a710 3083 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
e59058c4
JS
3084 * @phba: pointer to lpfc hba data structure.
3085 * @cmdiocb: pointer to lpfc command iocb data structure.
3086 * @rspiocb: pointer to lpfc response iocb data structure.
3087 *
3088 * This routine is the completion callback function for ELS Response IOCB
3089 * command. In normal case, this callback function just properly sets the
3090 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3091 * field in the command IOCB is not NULL, the referred mailbox command will
3092 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3093 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3094 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3095 * routine shall be invoked trying to release the ndlp if no other threads
3096 * are currently referring it.
3097 **/
dea3101e 3098static void
858c9f6c 3099lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
329f9bc7 3100 struct lpfc_iocbq *rspiocb)
dea3101e 3101{
2e0fef85
JS
3102 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3103 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3104 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
87af33fe
JS
3105 IOCB_t *irsp;
3106 uint8_t *pcmd;
dea3101e 3107 LPFC_MBOXQ_t *mbox = NULL;
2e0fef85 3108 struct lpfc_dmabuf *mp = NULL;
87af33fe 3109 uint32_t ls_rjt = 0;
dea3101e 3110
33ccf8d1
JS
3111 irsp = &rspiocb->iocb;
3112
dea3101e 3113 if (cmdiocb->context_un.mbox)
3114 mbox = cmdiocb->context_un.mbox;
3115
fa4066b6
JS
3116 /* First determine if this is a LS_RJT cmpl. Note, this callback
3117 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3118 */
87af33fe 3119 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
58da1ffb
JS
3120 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3121 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
fa4066b6
JS
3122 /* A LS_RJT associated with Default RPI cleanup has its own
3123 * seperate code path.
87af33fe
JS
3124 */
3125 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3126 ls_rjt = 1;
3127 }
3128
dea3101e 3129 /* Check to see if link went down during discovery */
58da1ffb 3130 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
dea3101e 3131 if (mbox) {
14691150
JS
3132 mp = (struct lpfc_dmabuf *) mbox->context1;
3133 if (mp) {
3134 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3135 kfree(mp);
3136 }
329f9bc7 3137 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 3138 }
58da1ffb
JS
3139 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3140 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
fa4066b6 3141 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3142 ndlp = NULL;
fa4066b6
JS
3143 /* Indicate the node has already released,
3144 * should not reference to it from within
3145 * the routine lpfc_els_free_iocb.
3146 */
3147 cmdiocb->context1 = NULL;
3148 }
dea3101e 3149 goto out;
3150 }
3151
858c9f6c 3152 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
51ef4c26 3153 "ELS rsp cmpl: status:x%x/x%x did:x%x",
858c9f6c 3154 irsp->ulpStatus, irsp->un.ulpWord[4],
51ef4c26 3155 cmdiocb->iocb.un.elsreq64.remoteID);
dea3101e 3156 /* ELS response tag <ulpIoTag> completes */
e8b62011
JS
3157 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3158 "0110 ELS response tag x%x completes "
3159 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3160 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3161 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3162 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3163 ndlp->nlp_rpi);
dea3101e 3164 if (mbox) {
3165 if ((rspiocb->iocb.ulpStatus == 0)
3166 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2e0fef85 3167 lpfc_unreg_rpi(vport, ndlp);
e47c9093
JS
3168 /* Increment reference count to ndlp to hold the
3169 * reference to ndlp for the callback function.
3170 */
329f9bc7 3171 mbox->context2 = lpfc_nlp_get(ndlp);
2e0fef85 3172 mbox->vport = vport;
858c9f6c
JS
3173 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3174 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3175 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3176 }
3177 else {
3178 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3179 ndlp->nlp_prev_state = ndlp->nlp_state;
3180 lpfc_nlp_set_state(vport, ndlp,
2e0fef85 3181 NLP_STE_REG_LOGIN_ISSUE);
858c9f6c 3182 }
0b727fea 3183 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
e47c9093 3184 != MBX_NOT_FINISHED)
dea3101e 3185 goto out;
e47c9093
JS
3186 else
3187 /* Decrement the ndlp reference count we
3188 * set for this failed mailbox command.
3189 */
3190 lpfc_nlp_put(ndlp);
98c9ea5c
JS
3191
3192 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3193 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3194 "0138 ELS rsp: Cannot issue reg_login for x%x "
3195 "Data: x%x x%x x%x\n",
3196 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3197 ndlp->nlp_rpi);
3198
fa4066b6 3199 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3200 ndlp = NULL;
fa4066b6
JS
3201 /* Indicate node has already been released,
3202 * should not reference to it from within
3203 * the routine lpfc_els_free_iocb.
3204 */
3205 cmdiocb->context1 = NULL;
3206 }
dea3101e 3207 } else {
858c9f6c
JS
3208 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3209 if (!lpfc_error_lost_link(irsp) &&
3210 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
fa4066b6 3211 if (lpfc_nlp_not_used(ndlp)) {
98c9ea5c 3212 ndlp = NULL;
fa4066b6
JS
3213 /* Indicate node has already been
3214 * released, should not reference
3215 * to it from within the routine
3216 * lpfc_els_free_iocb.
3217 */
3218 cmdiocb->context1 = NULL;
3219 }
dea3101e 3220 }
3221 }
14691150
JS
3222 mp = (struct lpfc_dmabuf *) mbox->context1;
3223 if (mp) {
3224 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3225 kfree(mp);
3226 }
3227 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 3228 }
3229out:
58da1ffb 3230 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2e0fef85 3231 spin_lock_irq(shost->host_lock);
858c9f6c 3232 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
2e0fef85 3233 spin_unlock_irq(shost->host_lock);
87af33fe
JS
3234
3235 /* If the node is not being used by another discovery thread,
3236 * and we are sending a reject, we are done with it.
3237 * Release driver reference count here and free associated
3238 * resources.
3239 */
3240 if (ls_rjt)
fa4066b6
JS
3241 if (lpfc_nlp_not_used(ndlp))
3242 /* Indicate node has already been released,
3243 * should not reference to it from within
3244 * the routine lpfc_els_free_iocb.
3245 */
3246 cmdiocb->context1 = NULL;
dea3101e 3247 }
87af33fe 3248
dea3101e 3249 lpfc_els_free_iocb(phba, cmdiocb);
3250 return;
3251}
3252
e59058c4 3253/**
3621a710 3254 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
e59058c4
JS
3255 * @vport: pointer to a host virtual N_Port data structure.
3256 * @flag: the els command code to be accepted.
3257 * @oldiocb: pointer to the original lpfc command iocb data structure.
3258 * @ndlp: pointer to a node-list data structure.
3259 * @mbox: pointer to the driver internal queue element for mailbox command.
3260 *
3261 * This routine prepares and issues an Accept (ACC) response IOCB
3262 * command. It uses the @flag to properly set up the IOCB field for the
3263 * specific ACC response command to be issued and invokes the
3264 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3265 * @mbox pointer is passed in, it will be put into the context_un.mbox
3266 * field of the IOCB for the completion callback function to issue the
3267 * mailbox command to the HBA later when callback is invoked.
3268 *
3269 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3270 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3271 * will be stored into the context1 field of the IOCB for the completion
3272 * callback function to the corresponding response ELS IOCB command.
3273 *
3274 * Return code
3275 * 0 - Successfully issued acc response
3276 * 1 - Failed to issue acc response
3277 **/
dea3101e 3278int
2e0fef85
JS
3279lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3280 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
51ef4c26 3281 LPFC_MBOXQ_t *mbox)
dea3101e 3282{
2e0fef85
JS
3283 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3284 struct lpfc_hba *phba = vport->phba;
dea3101e 3285 IOCB_t *icmd;
3286 IOCB_t *oldcmd;
3287 struct lpfc_iocbq *elsiocb;
dea3101e 3288 struct lpfc_sli *psli;
3289 uint8_t *pcmd;
3290 uint16_t cmdsize;
3291 int rc;
82d9a2a2 3292 ELS_PKT *els_pkt_ptr;
dea3101e 3293
3294 psli = &phba->sli;
dea3101e 3295 oldcmd = &oldiocb->iocb;
3296
3297 switch (flag) {
3298 case ELS_CMD_ACC:
92d7f7b0 3299 cmdsize = sizeof(uint32_t);
2e0fef85
JS
3300 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3301 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3302 if (!elsiocb) {
2e0fef85 3303 spin_lock_irq(shost->host_lock);
5024ab17 3304 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3305 spin_unlock_irq(shost->host_lock);
c9f8735b 3306 return 1;
dea3101e 3307 }
2e0fef85 3308
dea3101e 3309 icmd = &elsiocb->iocb;
3310 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3311 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3312 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3313 pcmd += sizeof(uint32_t);
858c9f6c
JS
3314
3315 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3316 "Issue ACC: did:x%x flg:x%x",
3317 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3318 break;
3319 case ELS_CMD_PLOGI:
92d7f7b0 3320 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
2e0fef85
JS
3321 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3322 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3323 if (!elsiocb)
c9f8735b 3324 return 1;
488d1469 3325
dea3101e 3326 icmd = &elsiocb->iocb;
3327 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3328 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3329
3330 if (mbox)
3331 elsiocb->context_un.mbox = mbox;
3332
3333 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0
JS
3334 pcmd += sizeof(uint32_t);
3335 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
858c9f6c
JS
3336
3337 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3338 "Issue ACC PLOGI: did:x%x flg:x%x",
3339 ndlp->nlp_DID, ndlp->nlp_flag, 0);
dea3101e 3340 break;
82d9a2a2 3341 case ELS_CMD_PRLO:
92d7f7b0 3342 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
2e0fef85 3343 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
82d9a2a2
JS
3344 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3345 if (!elsiocb)
3346 return 1;
3347
3348 icmd = &elsiocb->iocb;
3349 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3350 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3351
3352 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
92d7f7b0 3353 sizeof(uint32_t) + sizeof(PRLO));
82d9a2a2
JS
3354 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3355 els_pkt_ptr = (ELS_PKT *) pcmd;
3356 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
858c9f6c
JS
3357
3358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3359 "Issue ACC PRLO: did:x%x flg:x%x",
3360 ndlp->nlp_DID, ndlp->nlp_flag, 0);
82d9a2a2 3361 break;
dea3101e 3362 default:
c9f8735b 3363 return 1;
dea3101e 3364 }
dea3101e 3365 /* Xmit ELS ACC response tag <ulpIoTag> */
e8b62011
JS
3366 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3367 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3368 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3369 elsiocb->iotag, elsiocb->iocb.ulpContext,
3370 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3371 ndlp->nlp_rpi);
dea3101e 3372 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2e0fef85 3373 spin_lock_irq(shost->host_lock);
c9f8735b 3374 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2e0fef85 3375 spin_unlock_irq(shost->host_lock);
dea3101e 3376 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3377 } else {
858c9f6c 3378 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 3379 }
3380
3381 phba->fc_stat.elsXmitACC++;
3772a991 3382 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e 3383 if (rc == IOCB_ERROR) {
3384 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3385 return 1;
dea3101e 3386 }
c9f8735b 3387 return 0;
dea3101e 3388}
3389
e59058c4 3390/**
3621a710 3391 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
e59058c4
JS
3392 * @vport: pointer to a virtual N_Port data structure.
3393 * @rejectError:
3394 * @oldiocb: pointer to the original lpfc command iocb data structure.
3395 * @ndlp: pointer to a node-list data structure.
3396 * @mbox: pointer to the driver internal queue element for mailbox command.
3397 *
3398 * This routine prepares and issue an Reject (RJT) response IOCB
3399 * command. If a @mbox pointer is passed in, it will be put into the
3400 * context_un.mbox field of the IOCB for the completion callback function
3401 * to issue to the HBA later.
3402 *
3403 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3404 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3405 * will be stored into the context1 field of the IOCB for the completion
3406 * callback function to the reject response ELS IOCB command.
3407 *
3408 * Return code
3409 * 0 - Successfully issued reject response
3410 * 1 - Failed to issue reject response
3411 **/
dea3101e 3412int
2e0fef85 3413lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
858c9f6c
JS
3414 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3415 LPFC_MBOXQ_t *mbox)
dea3101e 3416{
2e0fef85 3417 struct lpfc_hba *phba = vport->phba;
dea3101e 3418 IOCB_t *icmd;
3419 IOCB_t *oldcmd;
3420 struct lpfc_iocbq *elsiocb;
dea3101e 3421 struct lpfc_sli *psli;
3422 uint8_t *pcmd;
3423 uint16_t cmdsize;
3424 int rc;
3425
3426 psli = &phba->sli;
92d7f7b0 3427 cmdsize = 2 * sizeof(uint32_t);
2e0fef85
JS
3428 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3429 ndlp->nlp_DID, ELS_CMD_LS_RJT);
488d1469 3430 if (!elsiocb)
c9f8735b 3431 return 1;
dea3101e 3432
3433 icmd = &elsiocb->iocb;
3434 oldcmd = &oldiocb->iocb;
3435 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3436 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3437
3438 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
92d7f7b0 3439 pcmd += sizeof(uint32_t);
dea3101e 3440 *((uint32_t *) (pcmd)) = rejectError;
3441
51ef4c26 3442 if (mbox)
858c9f6c 3443 elsiocb->context_un.mbox = mbox;
858c9f6c 3444
dea3101e 3445 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
e8b62011
JS
3446 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3447 "0129 Xmit ELS RJT x%x response tag x%x "
3448 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3449 "rpi x%x\n",
3450 rejectError, elsiocb->iotag,
3451 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3452 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
858c9f6c
JS
3453 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3454 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3455 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3456
dea3101e 3457 phba->fc_stat.elsXmitLSRJT++;
858c9f6c 3458 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 3459 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
51ef4c26 3460
dea3101e 3461 if (rc == IOCB_ERROR) {
3462 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3463 return 1;
dea3101e 3464 }
c9f8735b 3465 return 0;
dea3101e 3466}
3467
e59058c4 3468/**
3621a710 3469 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
e59058c4
JS
3470 * @vport: pointer to a virtual N_Port data structure.
3471 * @oldiocb: pointer to the original lpfc command iocb data structure.
3472 * @ndlp: pointer to a node-list data structure.
3473 *
3474 * This routine prepares and issues an Accept (ACC) response to Address
3475 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3476 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3477 *
3478 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3479 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3480 * will be stored into the context1 field of the IOCB for the completion
3481 * callback function to the ADISC Accept response ELS IOCB command.
3482 *
3483 * Return code
3484 * 0 - Successfully issued acc adisc response
3485 * 1 - Failed to issue adisc acc response
3486 **/
dea3101e 3487int
2e0fef85
JS
3488lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3489 struct lpfc_nodelist *ndlp)
dea3101e 3490{
2e0fef85 3491 struct lpfc_hba *phba = vport->phba;
dea3101e 3492 ADISC *ap;
2e0fef85 3493 IOCB_t *icmd, *oldcmd;
dea3101e 3494 struct lpfc_iocbq *elsiocb;
dea3101e 3495 uint8_t *pcmd;
3496 uint16_t cmdsize;
3497 int rc;
3498
92d7f7b0 3499 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2e0fef85
JS
3500 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3501 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3502 if (!elsiocb)
c9f8735b 3503 return 1;
dea3101e 3504
5b8bd0c9
JS
3505 icmd = &elsiocb->iocb;
3506 oldcmd = &oldiocb->iocb;
3507 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3508
dea3101e 3509 /* Xmit ADISC ACC response tag <ulpIoTag> */
e8b62011
JS
3510 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3511 "0130 Xmit ADISC ACC response iotag x%x xri: "
3512 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3513 elsiocb->iotag, elsiocb->iocb.ulpContext,
3514 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3515 ndlp->nlp_rpi);
dea3101e 3516 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3517
3518 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3519 pcmd += sizeof(uint32_t);
dea3101e 3520
3521 ap = (ADISC *) (pcmd);
3522 ap->hardAL_PA = phba->fc_pref_ALPA;
92d7f7b0
JS
3523 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3524 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2e0fef85 3525 ap->DID = be32_to_cpu(vport->fc_myDID);
dea3101e 3526
858c9f6c
JS
3527 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3528 "Issue ACC ADISC: did:x%x flg:x%x",
3529 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3530
dea3101e 3531 phba->fc_stat.elsXmitACC++;
858c9f6c 3532 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3772a991 3533 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e 3534 if (rc == IOCB_ERROR) {
3535 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3536 return 1;
dea3101e 3537 }
c9f8735b 3538 return 0;
dea3101e 3539}
3540
e59058c4 3541/**
3621a710 3542 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
e59058c4
JS
3543 * @vport: pointer to a virtual N_Port data structure.
3544 * @oldiocb: pointer to the original lpfc command iocb data structure.
3545 * @ndlp: pointer to a node-list data structure.
3546 *
3547 * This routine prepares and issues an Accept (ACC) response to Process
3548 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3549 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3550 *
3551 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3552 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3553 * will be stored into the context1 field of the IOCB for the completion
3554 * callback function to the PRLI Accept response ELS IOCB command.
3555 *
3556 * Return code
3557 * 0 - Successfully issued acc prli response
3558 * 1 - Failed to issue acc prli response
3559 **/
dea3101e 3560int
2e0fef85 3561lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5b8bd0c9 3562 struct lpfc_nodelist *ndlp)
dea3101e 3563{
2e0fef85 3564 struct lpfc_hba *phba = vport->phba;
dea3101e 3565 PRLI *npr;
3566 lpfc_vpd_t *vpd;
3567 IOCB_t *icmd;
3568 IOCB_t *oldcmd;
3569 struct lpfc_iocbq *elsiocb;
dea3101e 3570 struct lpfc_sli *psli;
3571 uint8_t *pcmd;
3572 uint16_t cmdsize;
3573 int rc;
3574
3575 psli = &phba->sli;
dea3101e 3576
92d7f7b0 3577 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2e0fef85 3578 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
92d7f7b0 3579 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
c9f8735b
JW
3580 if (!elsiocb)
3581 return 1;
dea3101e 3582
5b8bd0c9
JS
3583 icmd = &elsiocb->iocb;
3584 oldcmd = &oldiocb->iocb;
3585 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
dea3101e 3586 /* Xmit PRLI ACC response tag <ulpIoTag> */
e8b62011
JS
3587 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3588 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3589 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3590 elsiocb->iotag, elsiocb->iocb.ulpContext,
3591 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3592 ndlp->nlp_rpi);
dea3101e 3593 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3594
3595 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
92d7f7b0 3596 pcmd += sizeof(uint32_t);
dea3101e 3597
3598 /* For PRLI, remainder of payload is PRLI parameter page */
92d7f7b0 3599 memset(pcmd, 0, sizeof(PRLI));
dea3101e 3600
3601 npr = (PRLI *) pcmd;
3602 vpd = &phba->vpd;
3603 /*
0d2b6b83
JS
3604 * If the remote port is a target and our firmware version is 3.20 or
3605 * later, set the following bits for FC-TAPE support.
dea3101e 3606 */
0d2b6b83
JS
3607 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3608 (vpd->rev.feaLevelHigh >= 0x02)) {
dea3101e 3609 npr->ConfmComplAllowed = 1;
3610 npr->Retry = 1;
3611 npr->TaskRetryIdReq = 1;
3612 }
3613
3614 npr->acceptRspCode = PRLI_REQ_EXECUTED;
3615 npr->estabImagePair = 1;
3616 npr->readXferRdyDis = 1;
3617 npr->ConfmComplAllowed = 1;
3618
3619 npr->prliType = PRLI_FCP_TYPE;
3620 npr->initiatorFunc = 1;
3621
858c9f6c
JS
3622 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3623 "Issue ACC PRLI: did:x%x flg:x%x",
3624 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3625
dea3101e 3626 phba->fc_stat.elsXmitACC++;
858c9f6c 3627 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
dea3101e 3628
3772a991 3629 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e 3630 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3632 return 1;
dea3101e 3633 }
c9f8735b 3634 return 0;
dea3101e 3635}
3636
e59058c4 3637/**
3621a710 3638 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
e59058c4
JS
3639 * @vport: pointer to a virtual N_Port data structure.
3640 * @format: rnid command format.
3641 * @oldiocb: pointer to the original lpfc command iocb data structure.
3642 * @ndlp: pointer to a node-list data structure.
3643 *
3644 * This routine issues a Request Node Identification Data (RNID) Accept
3645 * (ACC) response. It constructs the RNID ACC response command according to
3646 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
3647 * issue the response. Note that this command does not need to hold the ndlp
3648 * reference count for the callback. So, the ndlp reference count taken by
3649 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
3650 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
3651 * there is no ndlp reference available.
3652 *
3653 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3654 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3655 * will be stored into the context1 field of the IOCB for the completion
3656 * callback function. However, for the RNID Accept Response ELS command,
3657 * this is undone later by this routine after the IOCB is allocated.
3658 *
3659 * Return code
3660 * 0 - Successfully issued acc rnid response
3661 * 1 - Failed to issue acc rnid response
3662 **/
dea3101e 3663static int
2e0fef85 3664lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
329f9bc7 3665 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
dea3101e 3666{
2e0fef85 3667 struct lpfc_hba *phba = vport->phba;
dea3101e 3668 RNID *rn;
2e0fef85 3669 IOCB_t *icmd, *oldcmd;
dea3101e 3670 struct lpfc_iocbq *elsiocb;
dea3101e 3671 struct lpfc_sli *psli;
3672 uint8_t *pcmd;
3673 uint16_t cmdsize;
3674 int rc;
3675
3676 psli = &phba->sli;
92d7f7b0
JS
3677 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3678 + (2 * sizeof(struct lpfc_name));
dea3101e 3679 if (format)
92d7f7b0 3680 cmdsize += sizeof(RNID_TOP_DISC);
dea3101e 3681
2e0fef85
JS
3682 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3683 ndlp->nlp_DID, ELS_CMD_ACC);
488d1469 3684 if (!elsiocb)
c9f8735b 3685 return 1;
dea3101e 3686
5b8bd0c9
JS
3687 icmd = &elsiocb->iocb;
3688 oldcmd = &oldiocb->iocb;
3689 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
dea3101e 3690 /* Xmit RNID ACC response tag <ulpIoTag> */
e8b62011
JS
3691 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3692 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
3693 elsiocb->iotag, elsiocb->iocb.ulpContext);
dea3101e 3694 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
dea3101e 3695 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 3696 pcmd += sizeof(uint32_t);
dea3101e 3697
92d7f7b0 3698 memset(pcmd, 0, sizeof(RNID));
dea3101e 3699 rn = (RNID *) (pcmd);
3700 rn->Format = format;
92d7f7b0
JS
3701 rn->CommonLen = (2 * sizeof(struct lpfc_name));
3702 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3703 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
dea3101e 3704 switch (format) {
3705 case 0:
3706 rn->SpecificLen = 0;
3707 break;
3708 case RNID_TOPOLOGY_DISC:
92d7f7b0 3709 rn->SpecificLen = sizeof(RNID_TOP_DISC);
dea3101e 3710 memcpy(&rn->un.topologyDisc.portName,
92d7f7b0 3711 &vport->fc_portname, sizeof(struct lpfc_name));
dea3101e 3712 rn->un.topologyDisc.unitType = RNID_HBA;
3713 rn->un.topologyDisc.physPort = 0;
3714 rn->un.topologyDisc.attachedNodes = 0;
3715 break;
3716 default:
3717 rn->CommonLen = 0;
3718 rn->SpecificLen = 0;
3719 break;
3720 }
3721
858c9f6c
JS
3722 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3723 "Issue ACC RNID: did:x%x flg:x%x",
3724 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3725
dea3101e 3726 phba->fc_stat.elsXmitACC++;
858c9f6c 3727 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
329f9bc7 3728 lpfc_nlp_put(ndlp);
dea3101e 3729 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3730 * it could be freed */
3731
3772a991 3732 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
dea3101e 3733 if (rc == IOCB_ERROR) {
3734 lpfc_els_free_iocb(phba, elsiocb);
c9f8735b 3735 return 1;
dea3101e 3736 }
c9f8735b 3737 return 0;
dea3101e 3738}
3739
e59058c4 3740/**
3621a710 3741 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
e59058c4
JS
3742 * @vport: pointer to a host virtual N_Port data structure.
3743 *
3744 * This routine issues Address Discover (ADISC) ELS commands to those
3745 * N_Ports which are in node port recovery state and ADISC has not been issued
3746 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
3747 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
3748 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
3749 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
3750 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
3751 * IOCBs quit for later pick up. On the other hand, after walking through
3752 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
3753 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
3754 * no more ADISC need to be sent.
3755 *
3756 * Return code
3757 * The number of N_Ports with adisc issued.
3758 **/
dea3101e 3759int
2e0fef85 3760lpfc_els_disc_adisc(struct lpfc_vport *vport)
dea3101e 3761{
2e0fef85 3762 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 3763 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 3764 int sentadisc = 0;
dea3101e 3765
685f0bf7 3766 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2e0fef85 3767 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
3768 if (!NLP_CHK_NODE_ACT(ndlp))
3769 continue;
685f0bf7
JS
3770 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3771 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3772 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2e0fef85 3773 spin_lock_irq(shost->host_lock);
685f0bf7 3774 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2e0fef85 3775 spin_unlock_irq(shost->host_lock);
685f0bf7 3776 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3777 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3778 lpfc_issue_els_adisc(vport, ndlp, 0);
685f0bf7 3779 sentadisc++;
2e0fef85
JS
3780 vport->num_disc_nodes++;
3781 if (vport->num_disc_nodes >=
3de2a653 3782 vport->cfg_discovery_threads) {
2e0fef85
JS
3783 spin_lock_irq(shost->host_lock);
3784 vport->fc_flag |= FC_NLP_MORE;
3785 spin_unlock_irq(shost->host_lock);
685f0bf7 3786 break;
dea3101e 3787 }
3788 }
3789 }
3790 if (sentadisc == 0) {
2e0fef85
JS
3791 spin_lock_irq(shost->host_lock);
3792 vport->fc_flag &= ~FC_NLP_MORE;
3793 spin_unlock_irq(shost->host_lock);
dea3101e 3794 }
2fe165b6 3795 return sentadisc;
dea3101e 3796}
3797
e59058c4 3798/**
3621a710 3799 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
e59058c4
JS
3800 * @vport: pointer to a host virtual N_Port data structure.
3801 *
3802 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
3803 * which are in node port recovery state, with a @vport. Each time an ELS
3804 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
3805 * the per @vport number of discover count (num_disc_nodes) shall be
3806 * incremented. If the num_disc_nodes reaches a pre-configured threshold
3807 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
3808 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
3809 * later pick up. On the other hand, after walking through all the ndlps with
3810 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
3811 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
3812 * PLOGI need to be sent.
3813 *
3814 * Return code
3815 * The number of N_Ports with plogi issued.
3816 **/
dea3101e 3817int
2e0fef85 3818lpfc_els_disc_plogi(struct lpfc_vport *vport)
dea3101e 3819{
2e0fef85 3820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 3821 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 3822 int sentplogi = 0;
dea3101e 3823
2e0fef85
JS
3824 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
3825 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
3826 if (!NLP_CHK_NODE_ACT(ndlp))
3827 continue;
685f0bf7
JS
3828 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3829 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3830 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
3831 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
3832 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85
JS
3833 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3834 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
685f0bf7 3835 sentplogi++;
2e0fef85
JS
3836 vport->num_disc_nodes++;
3837 if (vport->num_disc_nodes >=
3de2a653 3838 vport->cfg_discovery_threads) {
2e0fef85
JS
3839 spin_lock_irq(shost->host_lock);
3840 vport->fc_flag |= FC_NLP_MORE;
3841 spin_unlock_irq(shost->host_lock);
685f0bf7 3842 break;
dea3101e 3843 }
3844 }
3845 }
87af33fe
JS
3846 if (sentplogi) {
3847 lpfc_set_disctmo(vport);
3848 }
3849 else {
2e0fef85
JS
3850 spin_lock_irq(shost->host_lock);
3851 vport->fc_flag &= ~FC_NLP_MORE;
3852 spin_unlock_irq(shost->host_lock);
dea3101e 3853 }
2fe165b6 3854 return sentplogi;
dea3101e 3855}
3856
e59058c4 3857/**
3621a710 3858 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
e59058c4
JS
3859 * @vport: pointer to a host virtual N_Port data structure.
3860 *
3861 * This routine cleans up any Registration State Change Notification
3862 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
3863 * @vport together with the host_lock is used to prevent multiple thread
3864 * trying to access the RSCN array on a same @vport at the same time.
3865 **/
92d7f7b0 3866void
2e0fef85 3867lpfc_els_flush_rscn(struct lpfc_vport *vport)
dea3101e 3868{
2e0fef85
JS
3869 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3870 struct lpfc_hba *phba = vport->phba;
dea3101e 3871 int i;
3872
7f5f3d0d
JS
3873 spin_lock_irq(shost->host_lock);
3874 if (vport->fc_rscn_flush) {
3875 /* Another thread is walking fc_rscn_id_list on this vport */
3876 spin_unlock_irq(shost->host_lock);
3877 return;
3878 }
3879 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
3880 vport->fc_rscn_flush = 1;
3881 spin_unlock_irq(shost->host_lock);
3882
2e0fef85 3883 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0 3884 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2e0fef85 3885 vport->fc_rscn_id_list[i] = NULL;
dea3101e 3886 }
2e0fef85
JS
3887 spin_lock_irq(shost->host_lock);
3888 vport->fc_rscn_id_cnt = 0;
3889 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
3890 spin_unlock_irq(shost->host_lock);
3891 lpfc_can_disctmo(vport);
7f5f3d0d
JS
3892 /* Indicate we are done walking this fc_rscn_id_list */
3893 vport->fc_rscn_flush = 0;
dea3101e 3894}
3895
e59058c4 3896/**
3621a710 3897 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
e59058c4
JS
3898 * @vport: pointer to a host virtual N_Port data structure.
3899 * @did: remote destination port identifier.
3900 *
3901 * This routine checks whether there is any pending Registration State
3902 * Configuration Notification (RSCN) to a @did on @vport.
3903 *
3904 * Return code
3905 * None zero - The @did matched with a pending rscn
3906 * 0 - not able to match @did with a pending rscn
3907 **/
dea3101e 3908int
2e0fef85 3909lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
dea3101e 3910{
3911 D_ID ns_did;
3912 D_ID rscn_did;
dea3101e 3913 uint32_t *lp;
92d7f7b0 3914 uint32_t payload_len, i;
7f5f3d0d 3915 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 3916
3917 ns_did.un.word = did;
dea3101e 3918
3919 /* Never match fabric nodes for RSCNs */
3920 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2e0fef85 3921 return 0;
dea3101e 3922
3923 /* If we are doing a FULL RSCN rediscovery, match everything */
2e0fef85 3924 if (vport->fc_flag & FC_RSCN_DISCOVERY)
c9f8735b 3925 return did;
dea3101e 3926
7f5f3d0d
JS
3927 spin_lock_irq(shost->host_lock);
3928 if (vport->fc_rscn_flush) {
3929 /* Another thread is walking fc_rscn_id_list on this vport */
3930 spin_unlock_irq(shost->host_lock);
3931 return 0;
3932 }
3933 /* Indicate we are walking fc_rscn_id_list on this vport */
3934 vport->fc_rscn_flush = 1;
3935 spin_unlock_irq(shost->host_lock);
2e0fef85 3936 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
92d7f7b0
JS
3937 lp = vport->fc_rscn_id_list[i]->virt;
3938 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
3939 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 3940 while (payload_len) {
92d7f7b0
JS
3941 rscn_did.un.word = be32_to_cpu(*lp++);
3942 payload_len -= sizeof(uint32_t);
eaf15d5b
JS
3943 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3944 case RSCN_ADDRESS_FORMAT_PORT:
6fb120a7
JS
3945 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3946 && (ns_did.un.b.area == rscn_did.un.b.area)
3947 && (ns_did.un.b.id == rscn_did.un.b.id))
7f5f3d0d 3948 goto return_did_out;
dea3101e 3949 break;
eaf15d5b 3950 case RSCN_ADDRESS_FORMAT_AREA:
dea3101e 3951 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3952 && (ns_did.un.b.area == rscn_did.un.b.area))
7f5f3d0d 3953 goto return_did_out;
dea3101e 3954 break;
eaf15d5b 3955 case RSCN_ADDRESS_FORMAT_DOMAIN:
dea3101e 3956 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7f5f3d0d 3957 goto return_did_out;
dea3101e 3958 break;
eaf15d5b 3959 case RSCN_ADDRESS_FORMAT_FABRIC:
7f5f3d0d 3960 goto return_did_out;
dea3101e 3961 }
3962 }
92d7f7b0 3963 }
7f5f3d0d
JS
3964 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3965 vport->fc_rscn_flush = 0;
92d7f7b0 3966 return 0;
7f5f3d0d
JS
3967return_did_out:
3968 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3969 vport->fc_rscn_flush = 0;
3970 return did;
dea3101e 3971}
3972
e59058c4 3973/**
3621a710 3974 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
e59058c4
JS
3975 * @vport: pointer to a host virtual N_Port data structure.
3976 *
3977 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
3978 * state machine for a @vport's nodes that are with pending RSCN (Registration
3979 * State Change Notification).
3980 *
3981 * Return code
3982 * 0 - Successful (currently alway return 0)
3983 **/
dea3101e 3984static int
2e0fef85 3985lpfc_rscn_recovery_check(struct lpfc_vport *vport)
dea3101e 3986{
685f0bf7 3987 struct lpfc_nodelist *ndlp = NULL;
dea3101e 3988
0d2b6b83 3989 /* Move all affected nodes by pending RSCNs to NPR state. */
2e0fef85 3990 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093 3991 if (!NLP_CHK_NODE_ACT(ndlp) ||
0d2b6b83
JS
3992 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
3993 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
685f0bf7 3994 continue;
2e0fef85 3995 lpfc_disc_state_machine(vport, ndlp, NULL,
0d2b6b83
JS
3996 NLP_EVT_DEVICE_RECOVERY);
3997 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e 3998 }
c9f8735b 3999 return 0;
dea3101e 4000}
4001
ddcc50f0 4002/**
3621a710 4003 * lpfc_send_rscn_event - Send an RSCN event to management application
ddcc50f0
JS
4004 * @vport: pointer to a host virtual N_Port data structure.
4005 * @cmdiocb: pointer to lpfc command iocb data structure.
4006 *
4007 * lpfc_send_rscn_event sends an RSCN netlink event to management
4008 * applications.
4009 */
4010static void
4011lpfc_send_rscn_event(struct lpfc_vport *vport,
4012 struct lpfc_iocbq *cmdiocb)
4013{
4014 struct lpfc_dmabuf *pcmd;
4015 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4016 uint32_t *payload_ptr;
4017 uint32_t payload_len;
4018 struct lpfc_rscn_event_header *rscn_event_data;
4019
4020 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4021 payload_ptr = (uint32_t *) pcmd->virt;
4022 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4023
4024 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4025 payload_len, GFP_KERNEL);
4026 if (!rscn_event_data) {
4027 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4028 "0147 Failed to allocate memory for RSCN event\n");
4029 return;
4030 }
4031 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4032 rscn_event_data->payload_length = payload_len;
4033 memcpy(rscn_event_data->rscn_payload, payload_ptr,
4034 payload_len);
4035
4036 fc_host_post_vendor_event(shost,
4037 fc_get_event_number(),
4038 sizeof(struct lpfc_els_event_header) + payload_len,
4039 (char *)rscn_event_data,
4040 LPFC_NL_VENDOR_ID);
4041
4042 kfree(rscn_event_data);
4043}
4044
e59058c4 4045/**
3621a710 4046 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
e59058c4
JS
4047 * @vport: pointer to a host virtual N_Port data structure.
4048 * @cmdiocb: pointer to lpfc command iocb data structure.
4049 * @ndlp: pointer to a node-list data structure.
4050 *
4051 * This routine processes an unsolicited RSCN (Registration State Change
4052 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4053 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4054 * discover state machine is about to begin discovery, it just accepts the
4055 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4056 * contains N_Port IDs for other vports on this HBA, it just accepts the
4057 * RSCN and ignore processing it. If the state machine is in the recovery
4058 * state, the fc_rscn_id_list of this @vport is walked and the
4059 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4060 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4061 * routine is invoked to handle the RSCN event.
4062 *
4063 * Return code
4064 * 0 - Just sent the acc response
4065 * 1 - Sent the acc response and waited for name server completion
4066 **/
dea3101e 4067static int
2e0fef85 4068lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 4069 struct lpfc_nodelist *ndlp)
dea3101e 4070{
2e0fef85
JS
4071 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4072 struct lpfc_hba *phba = vport->phba;
dea3101e 4073 struct lpfc_dmabuf *pcmd;
92d7f7b0 4074 uint32_t *lp, *datap;
dea3101e 4075 IOCB_t *icmd;
92d7f7b0 4076 uint32_t payload_len, length, nportid, *cmd;
7f5f3d0d 4077 int rscn_cnt;
92d7f7b0 4078 int rscn_id = 0, hba_id = 0;
d2873e4c 4079 int i;
dea3101e 4080
4081 icmd = &cmdiocb->iocb;
4082 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4083 lp = (uint32_t *) pcmd->virt;
4084
92d7f7b0
JS
4085 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4086 payload_len -= sizeof(uint32_t); /* take off word 0 */
dea3101e 4087 /* RSCN received */
e8b62011
JS
4088 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4089 "0214 RSCN received Data: x%x x%x x%x x%x\n",
7f5f3d0d
JS
4090 vport->fc_flag, payload_len, *lp,
4091 vport->fc_rscn_id_cnt);
ddcc50f0
JS
4092
4093 /* Send an RSCN event to the management application */
4094 lpfc_send_rscn_event(vport, cmdiocb);
4095
d2873e4c 4096 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2e0fef85 4097 fc_host_post_event(shost, fc_get_event_number(),
d2873e4c
JS
4098 FCH_EVT_RSCN, lp[i]);
4099
dea3101e 4100 /* If we are about to begin discovery, just ACC the RSCN.
4101 * Discovery processing will satisfy it.
4102 */
2e0fef85 4103 if (vport->port_state <= LPFC_NS_QRY) {
858c9f6c
JS
4104 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4105 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4106 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4107
51ef4c26 4108 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
c9f8735b 4109 return 0;
dea3101e 4110 }
4111
92d7f7b0
JS
4112 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4113 * just ACC and ignore it.
4114 */
4115 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3de2a653 4116 !(vport->cfg_peer_port_login)) {
92d7f7b0
JS
4117 i = payload_len;
4118 datap = lp;
4119 while (i > 0) {
4120 nportid = *datap++;
4121 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4122 i -= sizeof(uint32_t);
4123 rscn_id++;
549e55cd
JS
4124 if (lpfc_find_vport_by_did(phba, nportid))
4125 hba_id++;
92d7f7b0
JS
4126 }
4127 if (rscn_id == hba_id) {
4128 /* ALL NPortIDs in RSCN are on HBA */
e8b62011 4129 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
d7c255b2 4130 "0219 Ignore RSCN "
e8b62011
JS
4131 "Data: x%x x%x x%x x%x\n",
4132 vport->fc_flag, payload_len,
7f5f3d0d 4133 *lp, vport->fc_rscn_id_cnt);
858c9f6c
JS
4134 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4135 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4136 ndlp->nlp_DID, vport->port_state,
4137 ndlp->nlp_flag);
4138
92d7f7b0 4139 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
51ef4c26 4140 ndlp, NULL);
92d7f7b0
JS
4141 return 0;
4142 }
4143 }
4144
7f5f3d0d
JS
4145 spin_lock_irq(shost->host_lock);
4146 if (vport->fc_rscn_flush) {
4147 /* Another thread is walking fc_rscn_id_list on this vport */
7f5f3d0d 4148 vport->fc_flag |= FC_RSCN_DISCOVERY;
97957244 4149 spin_unlock_irq(shost->host_lock);
58da1ffb
JS
4150 /* Send back ACC */
4151 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7f5f3d0d
JS
4152 return 0;
4153 }
4154 /* Indicate we are walking fc_rscn_id_list on this vport */
4155 vport->fc_rscn_flush = 1;
4156 spin_unlock_irq(shost->host_lock);
af901ca1 4157 /* Get the array count after successfully have the token */
7f5f3d0d 4158 rscn_cnt = vport->fc_rscn_id_cnt;
dea3101e 4159 /* If we are already processing an RSCN, save the received
4160 * RSCN payload buffer, cmdiocb->context2 to process later.
4161 */
2e0fef85 4162 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
858c9f6c
JS
4163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4164 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4165 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4166
09372820 4167 spin_lock_irq(shost->host_lock);
92d7f7b0
JS
4168 vport->fc_flag |= FC_RSCN_DEFERRED;
4169 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2e0fef85 4170 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2e0fef85
JS
4171 vport->fc_flag |= FC_RSCN_MODE;
4172 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
4173 if (rscn_cnt) {
4174 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4175 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4176 }
4177 if ((rscn_cnt) &&
4178 (payload_len + length <= LPFC_BPL_SIZE)) {
4179 *cmd &= ELS_CMD_MASK;
7f5f3d0d 4180 *cmd |= cpu_to_be32(payload_len + length);
92d7f7b0
JS
4181 memcpy(((uint8_t *)cmd) + length, lp,
4182 payload_len);
4183 } else {
4184 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4185 vport->fc_rscn_id_cnt++;
4186 /* If we zero, cmdiocb->context2, the calling
4187 * routine will not try to free it.
4188 */
4189 cmdiocb->context2 = NULL;
4190 }
dea3101e 4191 /* Deferred RSCN */
e8b62011
JS
4192 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4193 "0235 Deferred RSCN "
4194 "Data: x%x x%x x%x\n",
4195 vport->fc_rscn_id_cnt, vport->fc_flag,
4196 vport->port_state);
dea3101e 4197 } else {
2e0fef85
JS
4198 vport->fc_flag |= FC_RSCN_DISCOVERY;
4199 spin_unlock_irq(shost->host_lock);
dea3101e 4200 /* ReDiscovery RSCN */
e8b62011
JS
4201 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4202 "0234 ReDiscovery RSCN "
4203 "Data: x%x x%x x%x\n",
4204 vport->fc_rscn_id_cnt, vport->fc_flag,
4205 vport->port_state);
dea3101e 4206 }
7f5f3d0d
JS
4207 /* Indicate we are done walking fc_rscn_id_list on this vport */
4208 vport->fc_rscn_flush = 0;
dea3101e 4209 /* Send back ACC */
51ef4c26 4210 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4211 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 4212 lpfc_rscn_recovery_check(vport);
09372820 4213 spin_lock_irq(shost->host_lock);
92d7f7b0 4214 vport->fc_flag &= ~FC_RSCN_DEFERRED;
09372820 4215 spin_unlock_irq(shost->host_lock);
c9f8735b 4216 return 0;
dea3101e 4217 }
858c9f6c
JS
4218 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4219 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4220 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4221
2e0fef85
JS
4222 spin_lock_irq(shost->host_lock);
4223 vport->fc_flag |= FC_RSCN_MODE;
4224 spin_unlock_irq(shost->host_lock);
4225 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7f5f3d0d
JS
4226 /* Indicate we are done walking fc_rscn_id_list on this vport */
4227 vport->fc_rscn_flush = 0;
dea3101e 4228 /*
4229 * If we zero, cmdiocb->context2, the calling routine will
4230 * not try to free it.
4231 */
4232 cmdiocb->context2 = NULL;
2e0fef85 4233 lpfc_set_disctmo(vport);
dea3101e 4234 /* Send back ACC */
51ef4c26 4235 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4236 /* send RECOVERY event for ALL nodes that match RSCN payload */
2e0fef85 4237 lpfc_rscn_recovery_check(vport);
2e0fef85 4238 return lpfc_els_handle_rscn(vport);
dea3101e 4239}
4240
e59058c4 4241/**
3621a710 4242 * lpfc_els_handle_rscn - Handle rscn for a vport
e59058c4
JS
4243 * @vport: pointer to a host virtual N_Port data structure.
4244 *
4245 * This routine handles the Registration State Configuration Notification
4246 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4247 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4248 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4249 * NameServer shall be issued. If CT command to the NameServer fails to be
4250 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4251 * RSCN activities with the @vport.
4252 *
4253 * Return code
4254 * 0 - Cleaned up rscn on the @vport
4255 * 1 - Wait for plogi to name server before proceed
4256 **/
dea3101e 4257int
2e0fef85 4258lpfc_els_handle_rscn(struct lpfc_vport *vport)
dea3101e 4259{
4260 struct lpfc_nodelist *ndlp;
2e0fef85 4261 struct lpfc_hba *phba = vport->phba;
dea3101e 4262
92d7f7b0
JS
4263 /* Ignore RSCN if the port is being torn down. */
4264 if (vport->load_flag & FC_UNLOADING) {
4265 lpfc_els_flush_rscn(vport);
4266 return 0;
4267 }
4268
dea3101e 4269 /* Start timer for RSCN processing */
2e0fef85 4270 lpfc_set_disctmo(vport);
dea3101e 4271
4272 /* RSCN processed */
e8b62011
JS
4273 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4274 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4275 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4276 vport->port_state);
dea3101e 4277
4278 /* To process RSCN, first compare RSCN data with NameServer */
2e0fef85 4279 vport->fc_ns_retry = 0;
0ff10d46
JS
4280 vport->num_disc_nodes = 0;
4281
2e0fef85 4282 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093
JS
4283 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4284 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
dea3101e 4285 /* Good ndlp, issue CT Request to NameServer */
92d7f7b0 4286 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
dea3101e 4287 /* Wait for NameServer query cmpl before we can
4288 continue */
c9f8735b 4289 return 1;
dea3101e 4290 } else {
4291 /* If login to NameServer does not exist, issue one */
4292 /* Good status, issue PLOGI to NameServer */
2e0fef85 4293 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 4294 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
dea3101e 4295 /* Wait for NameServer login cmpl before we can
4296 continue */
c9f8735b 4297 return 1;
2e0fef85 4298
e47c9093
JS
4299 if (ndlp) {
4300 ndlp = lpfc_enable_node(vport, ndlp,
4301 NLP_STE_PLOGI_ISSUE);
4302 if (!ndlp) {
4303 lpfc_els_flush_rscn(vport);
4304 return 0;
4305 }
4306 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
dea3101e 4307 } else {
e47c9093
JS
4308 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4309 if (!ndlp) {
4310 lpfc_els_flush_rscn(vport);
4311 return 0;
4312 }
2e0fef85 4313 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5024ab17 4314 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 4315 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
dea3101e 4316 }
e47c9093
JS
4317 ndlp->nlp_type |= NLP_FABRIC;
4318 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4319 /* Wait for NameServer login cmpl before we can
4320 * continue
4321 */
4322 return 1;
dea3101e 4323 }
4324
2e0fef85 4325 lpfc_els_flush_rscn(vport);
c9f8735b 4326 return 0;
dea3101e 4327}
4328
e59058c4 4329/**
3621a710 4330 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
e59058c4
JS
4331 * @vport: pointer to a host virtual N_Port data structure.
4332 * @cmdiocb: pointer to lpfc command iocb data structure.
4333 * @ndlp: pointer to a node-list data structure.
4334 *
4335 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4336 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4337 * point topology. As an unsolicited FLOGI should not be received in a loop
4338 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4339 * lpfc_check_sparm() routine is invoked to check the parameters in the
4340 * unsolicited FLOGI. If parameters validation failed, the routine
4341 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4342 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4343 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4344 * will initiate PLOGI. The higher lexicographical value party shall has
4345 * higher priority (as the winning port) and will initiate PLOGI and
4346 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4347 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4348 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4349 *
4350 * Return code
4351 * 0 - Successfully processed the unsolicited flogi
4352 * 1 - Failed to process the unsolicited flogi
4353 **/
dea3101e 4354static int
2e0fef85 4355lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
51ef4c26 4356 struct lpfc_nodelist *ndlp)
dea3101e 4357{
2e0fef85
JS
4358 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4359 struct lpfc_hba *phba = vport->phba;
dea3101e 4360 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4361 uint32_t *lp = (uint32_t *) pcmd->virt;
4362 IOCB_t *icmd = &cmdiocb->iocb;
4363 struct serv_parm *sp;
4364 LPFC_MBOXQ_t *mbox;
4365 struct ls_rjt stat;
4366 uint32_t cmd, did;
4367 int rc;
4368
4369 cmd = *lp++;
4370 sp = (struct serv_parm *) lp;
4371
4372 /* FLOGI received */
4373
2e0fef85 4374 lpfc_set_disctmo(vport);
dea3101e 4375
4376 if (phba->fc_topology == TOPOLOGY_LOOP) {
4377 /* We should never receive a FLOGI in loop mode, ignore it */
4378 did = icmd->un.elsreq64.remoteID;
4379
4380 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4381 Loop Mode */
e8b62011
JS
4382 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4383 "0113 An FLOGI ELS command x%x was "
4384 "received from DID x%x in Loop Mode\n",
4385 cmd, did);
c9f8735b 4386 return 1;
dea3101e 4387 }
4388
4389 did = Fabric_DID;
4390
341af102 4391 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
dea3101e 4392 /* For a FLOGI we accept, then if our portname is greater
4393 * then the remote portname we initiate Nport login.
4394 */
4395
2e0fef85 4396 rc = memcmp(&vport->fc_portname, &sp->portName,
92d7f7b0 4397 sizeof(struct lpfc_name));
dea3101e 4398
4399 if (!rc) {
2e0fef85
JS
4400 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4401 if (!mbox)
c9f8735b 4402 return 1;
2e0fef85 4403
dea3101e 4404 lpfc_linkdown(phba);
4405 lpfc_init_link(phba, mbox,
4406 phba->cfg_topology,
4407 phba->cfg_link_speed);
04c68496 4408 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
dea3101e 4409 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 4410 mbox->vport = vport;
0b727fea 4411 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5b8bd0c9 4412 lpfc_set_loopback_flag(phba);
dea3101e 4413 if (rc == MBX_NOT_FINISHED) {
329f9bc7 4414 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 4415 }
c9f8735b 4416 return 1;
2fe165b6 4417 } else if (rc > 0) { /* greater than */
2e0fef85
JS
4418 spin_lock_irq(shost->host_lock);
4419 vport->fc_flag |= FC_PT2PT_PLOGI;
4420 spin_unlock_irq(shost->host_lock);
dea3101e 4421 }
2e0fef85
JS
4422 spin_lock_irq(shost->host_lock);
4423 vport->fc_flag |= FC_PT2PT;
4424 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4425 spin_unlock_irq(shost->host_lock);
dea3101e 4426 } else {
4427 /* Reject this request because invalid parameters */
4428 stat.un.b.lsRjtRsvd0 = 0;
4429 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4430 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4431 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4432 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4433 NULL);
c9f8735b 4434 return 1;
dea3101e 4435 }
4436
4437 /* Send back ACC */
51ef4c26 4438 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
dea3101e 4439
c9f8735b 4440 return 0;
dea3101e 4441}
4442
e59058c4 4443/**
3621a710 4444 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
e59058c4
JS
4445 * @vport: pointer to a host virtual N_Port data structure.
4446 * @cmdiocb: pointer to lpfc command iocb data structure.
4447 * @ndlp: pointer to a node-list data structure.
4448 *
4449 * This routine processes Request Node Identification Data (RNID) IOCB
4450 * received as an ELS unsolicited event. Only when the RNID specified format
4451 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4452 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4453 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4454 * rejected by invoking the lpfc_els_rsp_reject() routine.
4455 *
4456 * Return code
4457 * 0 - Successfully processed rnid iocb (currently always return 0)
4458 **/
dea3101e 4459static int
2e0fef85
JS
4460lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4461 struct lpfc_nodelist *ndlp)
dea3101e 4462{
4463 struct lpfc_dmabuf *pcmd;
4464 uint32_t *lp;
4465 IOCB_t *icmd;
4466 RNID *rn;
4467 struct ls_rjt stat;
4468 uint32_t cmd, did;
4469
4470 icmd = &cmdiocb->iocb;
4471 did = icmd->un.elsreq64.remoteID;
4472 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4473 lp = (uint32_t *) pcmd->virt;
4474
4475 cmd = *lp++;
4476 rn = (RNID *) lp;
4477
4478 /* RNID received */
4479
4480 switch (rn->Format) {
4481 case 0:
4482 case RNID_TOPOLOGY_DISC:
4483 /* Send back ACC */
2e0fef85 4484 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
dea3101e 4485 break;
4486 default:
4487 /* Reject this request because format not supported */
4488 stat.un.b.lsRjtRsvd0 = 0;
4489 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4490 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4491 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4492 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4493 NULL);
dea3101e 4494 }
c9f8735b 4495 return 0;
dea3101e 4496}
4497
e59058c4 4498/**
3621a710 4499 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
e59058c4
JS
4500 * @vport: pointer to a host virtual N_Port data structure.
4501 * @cmdiocb: pointer to lpfc command iocb data structure.
4502 * @ndlp: pointer to a node-list data structure.
4503 *
4504 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4505 * received as an ELS unsolicited event. Currently, this function just invokes
4506 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4507 *
4508 * Return code
4509 * 0 - Successfully processed lirr iocb (currently always return 0)
4510 **/
dea3101e 4511static int
2e0fef85
JS
4512lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4513 struct lpfc_nodelist *ndlp)
7bb3b137
JW
4514{
4515 struct ls_rjt stat;
4516
4517 /* For now, unconditionally reject this command */
4518 stat.un.b.lsRjtRsvd0 = 0;
4519 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4520 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4521 stat.un.b.vendorUnique = 0;
858c9f6c 4522 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
4523 return 0;
4524}
4525
5ffc266e
JS
4526/**
4527 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4528 * @vport: pointer to a host virtual N_Port data structure.
4529 * @cmdiocb: pointer to lpfc command iocb data structure.
4530 * @ndlp: pointer to a node-list data structure.
4531 *
4532 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4533 * received as an ELS unsolicited event. A request to RRQ shall only
4534 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4535 * Nx_Port N_Port_ID of the target Exchange is the same as the
4536 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4537 * not accepted, an LS_RJT with reason code "Unable to perform
4538 * command request" and reason code explanation "Invalid Originator
4539 * S_ID" shall be returned. For now, we just unconditionally accept
4540 * RRQ from the target.
4541 **/
4542static void
4543lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4544 struct lpfc_nodelist *ndlp)
4545{
4546 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4547}
4548
e59058c4 4549/**
3621a710 4550 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
e59058c4
JS
4551 * @phba: pointer to lpfc hba data structure.
4552 * @pmb: pointer to the driver internal queue element for mailbox command.
4553 *
4554 * This routine is the completion callback function for the MBX_READ_LNK_STAT
4555 * mailbox command. This callback function is to actually send the Accept
4556 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
4557 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
4558 * mailbox command, constructs the RPS response with the link statistics
4559 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
4560 * response to the RPS.
4561 *
4562 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4563 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4564 * will be stored into the context1 field of the IOCB for the completion
4565 * callback function to the RPS Accept Response ELS IOCB command.
4566 *
4567 **/
082c0266 4568static void
329f9bc7 4569lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7bb3b137 4570{
7bb3b137
JW
4571 MAILBOX_t *mb;
4572 IOCB_t *icmd;
4573 RPS_RSP *rps_rsp;
4574 uint8_t *pcmd;
4575 struct lpfc_iocbq *elsiocb;
4576 struct lpfc_nodelist *ndlp;
4577 uint16_t xri, status;
4578 uint32_t cmdsize;
4579
04c68496 4580 mb = &pmb->u.mb;
7bb3b137
JW
4581
4582 ndlp = (struct lpfc_nodelist *) pmb->context2;
4583 xri = (uint16_t) ((unsigned long)(pmb->context1));
041976fb
RD
4584 pmb->context1 = NULL;
4585 pmb->context2 = NULL;
7bb3b137
JW
4586
4587 if (mb->mbxStatus) {
329f9bc7 4588 mempool_free(pmb, phba->mbox_mem_pool);
7bb3b137
JW
4589 return;
4590 }
4591
4592 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
329f9bc7 4593 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
4594 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
4595 lpfc_max_els_tries, ndlp,
4596 ndlp->nlp_DID, ELS_CMD_ACC);
fa4066b6
JS
4597
4598 /* Decrement the ndlp reference count from previous mbox command */
329f9bc7 4599 lpfc_nlp_put(ndlp);
fa4066b6 4600
c9f8735b 4601 if (!elsiocb)
7bb3b137 4602 return;
7bb3b137
JW
4603
4604 icmd = &elsiocb->iocb;
4605 icmd->ulpContext = xri;
4606
4607 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4608 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4609 pcmd += sizeof(uint32_t); /* Skip past command */
7bb3b137
JW
4610 rps_rsp = (RPS_RSP *)pcmd;
4611
4612 if (phba->fc_topology != TOPOLOGY_LOOP)
4613 status = 0x10;
4614 else
4615 status = 0x8;
2e0fef85 4616 if (phba->pport->fc_flag & FC_FABRIC)
7bb3b137
JW
4617 status |= 0x4;
4618
4619 rps_rsp->rsvd1 = 0;
09372820
JS
4620 rps_rsp->portStatus = cpu_to_be16(status);
4621 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
4622 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
4623 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
4624 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
4625 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
4626 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7bb3b137 4627 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
e8b62011
JS
4628 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
4629 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
4630 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4631 elsiocb->iotag, elsiocb->iocb.ulpContext,
4632 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4633 ndlp->nlp_rpi);
858c9f6c 4634 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 4635 phba->fc_stat.elsXmitACC++;
3772a991 4636 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7bb3b137 4637 lpfc_els_free_iocb(phba, elsiocb);
7bb3b137
JW
4638 return;
4639}
4640
e59058c4 4641/**
3621a710 4642 * lpfc_els_rcv_rps - Process an unsolicited rps iocb
e59058c4
JS
4643 * @vport: pointer to a host virtual N_Port data structure.
4644 * @cmdiocb: pointer to lpfc command iocb data structure.
4645 * @ndlp: pointer to a node-list data structure.
4646 *
4647 * This routine processes Read Port Status (RPS) IOCB received as an
4648 * ELS unsolicited event. It first checks the remote port state. If the
4649 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
4650 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
4651 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
4652 * for reading the HBA link statistics. It is for the callback function,
4653 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
4654 * to actually sending out RPS Accept (ACC) response.
4655 *
4656 * Return codes
4657 * 0 - Successfully processed rps iocb (currently always return 0)
4658 **/
7bb3b137 4659static int
2e0fef85
JS
4660lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4661 struct lpfc_nodelist *ndlp)
dea3101e 4662{
2e0fef85 4663 struct lpfc_hba *phba = vport->phba;
dea3101e 4664 uint32_t *lp;
7bb3b137
JW
4665 uint8_t flag;
4666 LPFC_MBOXQ_t *mbox;
4667 struct lpfc_dmabuf *pcmd;
4668 RPS *rps;
4669 struct ls_rjt stat;
4670
2fe165b6 4671 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
90160e01
JS
4672 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
4673 /* reject the unsolicited RPS request and done with it */
4674 goto reject_out;
7bb3b137
JW
4675
4676 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4677 lp = (uint32_t *) pcmd->virt;
4678 flag = (be32_to_cpu(*lp++) & 0xf);
4679 rps = (RPS *) lp;
4680
4681 if ((flag == 0) ||
4682 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2e0fef85 4683 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
92d7f7b0 4684 sizeof(struct lpfc_name)) == 0))) {
2e0fef85 4685
92d7f7b0
JS
4686 printk("Fix me....\n");
4687 dump_stack();
2e0fef85
JS
4688 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
4689 if (mbox) {
7bb3b137
JW
4690 lpfc_read_lnk_stat(phba, mbox);
4691 mbox->context1 =
92d7f7b0 4692 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
329f9bc7 4693 mbox->context2 = lpfc_nlp_get(ndlp);
92d7f7b0 4694 mbox->vport = vport;
7bb3b137 4695 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
fa4066b6 4696 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
0b727fea 4697 != MBX_NOT_FINISHED)
7bb3b137
JW
4698 /* Mbox completion will send ELS Response */
4699 return 0;
fa4066b6
JS
4700 /* Decrement reference count used for the failed mbox
4701 * command.
4702 */
329f9bc7 4703 lpfc_nlp_put(ndlp);
7bb3b137
JW
4704 mempool_free(mbox, phba->mbox_mem_pool);
4705 }
4706 }
90160e01
JS
4707
4708reject_out:
4709 /* issue rejection response */
7bb3b137
JW
4710 stat.un.b.lsRjtRsvd0 = 0;
4711 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4712 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4713 stat.un.b.vendorUnique = 0;
858c9f6c 4714 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7bb3b137
JW
4715 return 0;
4716}
4717
e59058c4 4718/**
3621a710 4719 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
e59058c4
JS
4720 * @vport: pointer to a host virtual N_Port data structure.
4721 * @cmdsize: size of the ELS command.
4722 * @oldiocb: pointer to the original lpfc command iocb data structure.
4723 * @ndlp: pointer to a node-list data structure.
4724 *
4725 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
4726 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
4727 *
4728 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4729 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4730 * will be stored into the context1 field of the IOCB for the completion
4731 * callback function to the RPL Accept Response ELS command.
4732 *
4733 * Return code
4734 * 0 - Successfully issued ACC RPL ELS command
4735 * 1 - Failed to issue ACC RPL ELS command
4736 **/
082c0266 4737static int
2e0fef85
JS
4738lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4739 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7bb3b137 4740{
2e0fef85
JS
4741 struct lpfc_hba *phba = vport->phba;
4742 IOCB_t *icmd, *oldcmd;
7bb3b137
JW
4743 RPL_RSP rpl_rsp;
4744 struct lpfc_iocbq *elsiocb;
7bb3b137 4745 uint8_t *pcmd;
dea3101e 4746
2e0fef85
JS
4747 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4748 ndlp->nlp_DID, ELS_CMD_ACC);
7bb3b137 4749
488d1469 4750 if (!elsiocb)
7bb3b137 4751 return 1;
488d1469 4752
7bb3b137
JW
4753 icmd = &elsiocb->iocb;
4754 oldcmd = &oldiocb->iocb;
4755 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
4756
4757 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4758 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
92d7f7b0 4759 pcmd += sizeof(uint16_t);
7bb3b137
JW
4760 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
4761 pcmd += sizeof(uint16_t);
4762
4763 /* Setup the RPL ACC payload */
4764 rpl_rsp.listLen = be32_to_cpu(1);
4765 rpl_rsp.index = 0;
4766 rpl_rsp.port_num_blk.portNum = 0;
2e0fef85
JS
4767 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
4768 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7bb3b137 4769 sizeof(struct lpfc_name));
7bb3b137 4770 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7bb3b137 4771 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
e8b62011
JS
4772 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4773 "0120 Xmit ELS RPL ACC response tag x%x "
4774 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4775 "rpi x%x\n",
4776 elsiocb->iotag, elsiocb->iocb.ulpContext,
4777 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4778 ndlp->nlp_rpi);
858c9f6c 4779 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7bb3b137 4780 phba->fc_stat.elsXmitACC++;
3772a991
JS
4781 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4782 IOCB_ERROR) {
7bb3b137
JW
4783 lpfc_els_free_iocb(phba, elsiocb);
4784 return 1;
4785 }
4786 return 0;
4787}
4788
e59058c4 4789/**
3621a710 4790 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
e59058c4
JS
4791 * @vport: pointer to a host virtual N_Port data structure.
4792 * @cmdiocb: pointer to lpfc command iocb data structure.
4793 * @ndlp: pointer to a node-list data structure.
4794 *
4795 * This routine processes Read Port List (RPL) IOCB received as an ELS
4796 * unsolicited event. It first checks the remote port state. If the remote
4797 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
4798 * invokes the lpfc_els_rsp_reject() routine to send reject response.
4799 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
4800 * to accept the RPL.
4801 *
4802 * Return code
4803 * 0 - Successfully processed rpl iocb (currently always return 0)
4804 **/
7bb3b137 4805static int
2e0fef85
JS
4806lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4807 struct lpfc_nodelist *ndlp)
7bb3b137
JW
4808{
4809 struct lpfc_dmabuf *pcmd;
4810 uint32_t *lp;
4811 uint32_t maxsize;
4812 uint16_t cmdsize;
4813 RPL *rpl;
4814 struct ls_rjt stat;
4815
2fe165b6
JW
4816 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
4817 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
90160e01 4818 /* issue rejection response */
7bb3b137
JW
4819 stat.un.b.lsRjtRsvd0 = 0;
4820 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4821 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4822 stat.un.b.vendorUnique = 0;
858c9f6c
JS
4823 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4824 NULL);
90160e01
JS
4825 /* rejected the unsolicited RPL request and done with it */
4826 return 0;
7bb3b137
JW
4827 }
4828
dea3101e 4829 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4830 lp = (uint32_t *) pcmd->virt;
7bb3b137 4831 rpl = (RPL *) (lp + 1);
dea3101e 4832
7bb3b137 4833 maxsize = be32_to_cpu(rpl->maxsize);
dea3101e 4834
7bb3b137
JW
4835 /* We support only one port */
4836 if ((rpl->index == 0) &&
4837 ((maxsize == 0) ||
4838 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
4839 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
2fe165b6 4840 } else {
7bb3b137
JW
4841 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
4842 }
2e0fef85 4843 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
dea3101e 4844
4845 return 0;
4846}
4847
e59058c4 4848/**
3621a710 4849 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
e59058c4
JS
4850 * @vport: pointer to a virtual N_Port data structure.
4851 * @cmdiocb: pointer to lpfc command iocb data structure.
4852 * @ndlp: pointer to a node-list data structure.
4853 *
4854 * This routine processes Fibre Channel Address Resolution Protocol
4855 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
4856 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
4857 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
4858 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
4859 * remote PortName is compared against the FC PortName stored in the @vport
4860 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
4861 * compared against the FC NodeName stored in the @vport data structure.
4862 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
4863 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
4864 * invoked to send out FARP Response to the remote node. Before sending the
4865 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
4866 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
4867 * routine is invoked to log into the remote port first.
4868 *
4869 * Return code
4870 * 0 - Either the FARP Match Mode not supported or successfully processed
4871 **/
dea3101e 4872static int
2e0fef85
JS
4873lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4874 struct lpfc_nodelist *ndlp)
dea3101e 4875{
4876 struct lpfc_dmabuf *pcmd;
4877 uint32_t *lp;
4878 IOCB_t *icmd;
4879 FARP *fp;
4880 uint32_t cmd, cnt, did;
4881
4882 icmd = &cmdiocb->iocb;
4883 did = icmd->un.elsreq64.remoteID;
4884 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4885 lp = (uint32_t *) pcmd->virt;
4886
4887 cmd = *lp++;
4888 fp = (FARP *) lp;
dea3101e 4889 /* FARP-REQ received from DID <did> */
e8b62011
JS
4890 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4891 "0601 FARP-REQ received from DID x%x\n", did);
dea3101e 4892 /* We will only support match on WWPN or WWNN */
4893 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
c9f8735b 4894 return 0;
dea3101e 4895 }
4896
4897 cnt = 0;
4898 /* If this FARP command is searching for my portname */
4899 if (fp->Mflags & FARP_MATCH_PORT) {
2e0fef85 4900 if (memcmp(&fp->RportName, &vport->fc_portname,
92d7f7b0 4901 sizeof(struct lpfc_name)) == 0)
dea3101e 4902 cnt = 1;
4903 }
4904
4905 /* If this FARP command is searching for my nodename */
4906 if (fp->Mflags & FARP_MATCH_NODE) {
2e0fef85 4907 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
92d7f7b0 4908 sizeof(struct lpfc_name)) == 0)
dea3101e 4909 cnt = 1;
4910 }
4911
4912 if (cnt) {
4913 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
4914 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
4915 /* Log back into the node before sending the FARP. */
4916 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5024ab17 4917 ndlp->nlp_prev_state = ndlp->nlp_state;
2e0fef85 4918 lpfc_nlp_set_state(vport, ndlp,
de0c5b32 4919 NLP_STE_PLOGI_ISSUE);
2e0fef85 4920 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
dea3101e 4921 }
4922
4923 /* Send a FARP response to that node */
2e0fef85
JS
4924 if (fp->Rflags & FARP_REQUEST_FARPR)
4925 lpfc_issue_els_farpr(vport, did, 0);
dea3101e 4926 }
4927 }
c9f8735b 4928 return 0;
dea3101e 4929}
4930
e59058c4 4931/**
3621a710 4932 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
e59058c4
JS
4933 * @vport: pointer to a host virtual N_Port data structure.
4934 * @cmdiocb: pointer to lpfc command iocb data structure.
4935 * @ndlp: pointer to a node-list data structure.
4936 *
4937 * This routine processes Fibre Channel Address Resolution Protocol
4938 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
4939 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
4940 * the FARP response request.
4941 *
4942 * Return code
4943 * 0 - Successfully processed FARPR IOCB (currently always return 0)
4944 **/
dea3101e 4945static int
2e0fef85
JS
4946lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4947 struct lpfc_nodelist *ndlp)
dea3101e 4948{
4949 struct lpfc_dmabuf *pcmd;
4950 uint32_t *lp;
4951 IOCB_t *icmd;
4952 uint32_t cmd, did;
4953
4954 icmd = &cmdiocb->iocb;
4955 did = icmd->un.elsreq64.remoteID;
4956 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4957 lp = (uint32_t *) pcmd->virt;
4958
4959 cmd = *lp++;
4960 /* FARP-RSP received from DID <did> */
e8b62011
JS
4961 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4962 "0600 FARP-RSP received from DID x%x\n", did);
dea3101e 4963 /* ACCEPT the Farp resp request */
51ef4c26 4964 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
dea3101e 4965
4966 return 0;
4967}
4968
e59058c4 4969/**
3621a710 4970 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
e59058c4
JS
4971 * @vport: pointer to a host virtual N_Port data structure.
4972 * @cmdiocb: pointer to lpfc command iocb data structure.
4973 * @fan_ndlp: pointer to a node-list data structure.
4974 *
4975 * This routine processes a Fabric Address Notification (FAN) IOCB
4976 * command received as an ELS unsolicited event. The FAN ELS command will
4977 * only be processed on a physical port (i.e., the @vport represents the
4978 * physical port). The fabric NodeName and PortName from the FAN IOCB are
4979 * compared against those in the phba data structure. If any of those is
4980 * different, the lpfc_initial_flogi() routine is invoked to initialize
4981 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
4982 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
4983 * is invoked to register login to the fabric.
4984 *
4985 * Return code
4986 * 0 - Successfully processed fan iocb (currently always return 0).
4987 **/
dea3101e 4988static int
2e0fef85
JS
4989lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4990 struct lpfc_nodelist *fan_ndlp)
dea3101e 4991{
0d2b6b83 4992 struct lpfc_hba *phba = vport->phba;
dea3101e 4993 uint32_t *lp;
5024ab17 4994 FAN *fp;
dea3101e 4995
0d2b6b83
JS
4996 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
4997 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
4998 fp = (FAN *) ++lp;
5024ab17 4999 /* FAN received; Fan does not have a reply sequence */
0d2b6b83
JS
5000 if ((vport == phba->pport) &&
5001 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5024ab17 5002 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
0d2b6b83 5003 sizeof(struct lpfc_name))) ||
5024ab17 5004 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
0d2b6b83
JS
5005 sizeof(struct lpfc_name)))) {
5006 /* This port has switched fabrics. FLOGI is required */
2e0fef85 5007 lpfc_initial_flogi(vport);
0d2b6b83
JS
5008 } else {
5009 /* FAN verified - skip FLOGI */
5010 vport->fc_myDID = vport->fc_prevDID;
6fb120a7
JS
5011 if (phba->sli_rev < LPFC_SLI_REV4)
5012 lpfc_issue_fabric_reglogin(vport);
5013 else
5014 lpfc_issue_reg_vfi(vport);
5024ab17 5015 }
dea3101e 5016 }
c9f8735b 5017 return 0;
dea3101e 5018}
5019
e59058c4 5020/**
3621a710 5021 * lpfc_els_timeout - Handler funciton to the els timer
e59058c4
JS
5022 * @ptr: holder for the timer function associated data.
5023 *
5024 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5025 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5026 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5027 * up the worker thread. It is for the worker thread to invoke the routine
5028 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5029 **/
dea3101e 5030void
5031lpfc_els_timeout(unsigned long ptr)
5032{
2e0fef85
JS
5033 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5034 struct lpfc_hba *phba = vport->phba;
5e9d9b82 5035 uint32_t tmo_posted;
dea3101e 5036 unsigned long iflag;
5037
2e0fef85 5038 spin_lock_irqsave(&vport->work_port_lock, iflag);
5e9d9b82
JS
5039 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5040 if (!tmo_posted)
2e0fef85 5041 vport->work_port_events |= WORKER_ELS_TMO;
5e9d9b82 5042 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
92d7f7b0 5043
5e9d9b82
JS
5044 if (!tmo_posted)
5045 lpfc_worker_wake_up(phba);
dea3101e 5046 return;
5047}
5048
e59058c4 5049/**
3621a710 5050 * lpfc_els_timeout_handler - Process an els timeout event
e59058c4
JS
5051 * @vport: pointer to a virtual N_Port data structure.
5052 *
5053 * This routine is the actual handler function that processes an ELS timeout
5054 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5055 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5056 * invoking the lpfc_sli_issue_abort_iotag() routine.
5057 **/
dea3101e 5058void
2e0fef85 5059lpfc_els_timeout_handler(struct lpfc_vport *vport)
dea3101e 5060{
2e0fef85 5061 struct lpfc_hba *phba = vport->phba;
dea3101e 5062 struct lpfc_sli_ring *pring;
5063 struct lpfc_iocbq *tmp_iocb, *piocb;
5064 IOCB_t *cmd = NULL;
5065 struct lpfc_dmabuf *pcmd;
2e0fef85 5066 uint32_t els_command = 0;
dea3101e 5067 uint32_t timeout;
2e0fef85 5068 uint32_t remote_ID = 0xffffffff;
dea3101e 5069
2e0fef85 5070 spin_lock_irq(&phba->hbalock);
dea3101e 5071 timeout = (uint32_t)(phba->fc_ratov << 1);
5072
5073 pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 5074
5075 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5076 cmd = &piocb->iocb;
5077
2e0fef85
JS
5078 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5079 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5080 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
dea3101e 5081 continue;
2e0fef85
JS
5082
5083 if (piocb->vport != vport)
5084 continue;
5085
dea3101e 5086 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2e0fef85
JS
5087 if (pcmd)
5088 els_command = *(uint32_t *) (pcmd->virt);
dea3101e 5089
92d7f7b0
JS
5090 if (els_command == ELS_CMD_FARP ||
5091 els_command == ELS_CMD_FARPR ||
5092 els_command == ELS_CMD_FDISC)
5093 continue;
5094
dea3101e 5095 if (piocb->drvrTimeout > 0) {
92d7f7b0 5096 if (piocb->drvrTimeout >= timeout)
dea3101e 5097 piocb->drvrTimeout -= timeout;
92d7f7b0 5098 else
dea3101e 5099 piocb->drvrTimeout = 0;
dea3101e 5100 continue;
5101 }
5102
2e0fef85
JS
5103 remote_ID = 0xffffffff;
5104 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
dea3101e 5105 remote_ID = cmd->un.elsreq64.remoteID;
2e0fef85
JS
5106 else {
5107 struct lpfc_nodelist *ndlp;
5108 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
58da1ffb 5109 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2e0fef85 5110 remote_ID = ndlp->nlp_DID;
dea3101e 5111 }
e8b62011
JS
5112 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5113 "0127 ELS timeout Data: x%x x%x x%x "
5114 "x%x\n", els_command,
5115 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
07951076 5116 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
dea3101e 5117 }
2e0fef85 5118 spin_unlock_irq(&phba->hbalock);
5a0e326d 5119
2e0fef85
JS
5120 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5121 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
dea3101e 5122}
5123
e59058c4 5124/**
3621a710 5125 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
e59058c4
JS
5126 * @vport: pointer to a host virtual N_Port data structure.
5127 *
5128 * This routine is used to clean up all the outstanding ELS commands on a
5129 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5130 * routine. After that, it walks the ELS transmit queue to remove all the
5131 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5132 * the IOCBs with a non-NULL completion callback function, the callback
5133 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5134 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5135 * callback function, the IOCB will simply be released. Finally, it walks
5136 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5137 * completion queue IOCB that is associated with the @vport and is not
5138 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5139 * part of the discovery state machine) out to HBA by invoking the
5140 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5141 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5142 * the IOCBs are aborted when this function returns.
5143 **/
dea3101e 5144void
2e0fef85 5145lpfc_els_flush_cmd(struct lpfc_vport *vport)
dea3101e 5146{
2534ba75 5147 LIST_HEAD(completions);
2e0fef85 5148 struct lpfc_hba *phba = vport->phba;
329f9bc7 5149 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 5150 struct lpfc_iocbq *tmp_iocb, *piocb;
5151 IOCB_t *cmd = NULL;
92d7f7b0
JS
5152
5153 lpfc_fabric_abort_vport(vport);
dea3101e 5154
2e0fef85 5155 spin_lock_irq(&phba->hbalock);
dea3101e 5156 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5157 cmd = &piocb->iocb;
5158
5159 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5160 continue;
5161 }
5162
5163 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
329f9bc7
JS
5164 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5165 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5166 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5167 cmd->ulpCommand == CMD_ABORT_XRI_CN)
dea3101e 5168 continue;
dea3101e 5169
2e0fef85
JS
5170 if (piocb->vport != vport)
5171 continue;
5172
2534ba75 5173 list_move_tail(&piocb->list, &completions);
1dcb58e5 5174 pring->txq_cnt--;
dea3101e 5175 }
5176
5177 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
dea3101e 5178 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5179 continue;
5180 }
dea3101e 5181
2e0fef85
JS
5182 if (piocb->vport != vport)
5183 continue;
5184
07951076 5185 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
dea3101e 5186 }
2e0fef85 5187 spin_unlock_irq(&phba->hbalock);
2534ba75 5188
a257bf90
JS
5189 /* Cancell all the IOCBs from the completions list */
5190 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5191 IOERR_SLI_ABORTED);
2534ba75 5192
dea3101e 5193 return;
5194}
5195
e59058c4 5196/**
3621a710 5197 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
e59058c4
JS
5198 * @phba: pointer to lpfc hba data structure.
5199 *
5200 * This routine is used to clean up all the outstanding ELS commands on a
5201 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
5202 * routine. After that, it walks the ELS transmit queue to remove all the
5203 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
5204 * the IOCBs with the completion callback function associated, the callback
5205 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5206 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
5207 * callback function associated, the IOCB will simply be released. Finally,
5208 * it walks the ELS transmit completion queue to issue an abort IOCB to any
5209 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
5210 * management plane IOCBs that are not part of the discovery state machine)
5211 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
5212 **/
549e55cd
JS
5213void
5214lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
5215{
5216 LIST_HEAD(completions);
5217 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5218 struct lpfc_iocbq *tmp_iocb, *piocb;
5219 IOCB_t *cmd = NULL;
5220
5221 lpfc_fabric_abort_hba(phba);
5222 spin_lock_irq(&phba->hbalock);
5223 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5224 cmd = &piocb->iocb;
5225 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5226 continue;
5227 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5228 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5229 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5230 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5231 cmd->ulpCommand == CMD_ABORT_XRI_CN)
5232 continue;
5233 list_move_tail(&piocb->list, &completions);
5234 pring->txq_cnt--;
5235 }
5236 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5237 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5238 continue;
5239 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5240 }
5241 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
5242
5243 /* Cancel all the IOCBs from the completions list */
5244 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5245 IOERR_SLI_ABORTED);
5246
549e55cd
JS
5247 return;
5248}
5249
ea2151b4 5250/**
3621a710 5251 * lpfc_send_els_failure_event - Posts an ELS command failure event
ea2151b4
JS
5252 * @phba: Pointer to hba context object.
5253 * @cmdiocbp: Pointer to command iocb which reported error.
5254 * @rspiocbp: Pointer to response iocb which reported error.
5255 *
5256 * This function sends an event when there is an ELS command
5257 * failure.
5258 **/
5259void
5260lpfc_send_els_failure_event(struct lpfc_hba *phba,
5261 struct lpfc_iocbq *cmdiocbp,
5262 struct lpfc_iocbq *rspiocbp)
5263{
5264 struct lpfc_vport *vport = cmdiocbp->vport;
5265 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5266 struct lpfc_lsrjt_event lsrjt_event;
5267 struct lpfc_fabric_event_header fabric_event;
5268 struct ls_rjt stat;
5269 struct lpfc_nodelist *ndlp;
5270 uint32_t *pcmd;
5271
5272 ndlp = cmdiocbp->context1;
5273 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5274 return;
5275
5276 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
5277 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
5278 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
5279 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
5280 sizeof(struct lpfc_name));
5281 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
5282 sizeof(struct lpfc_name));
5283 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
5284 cmdiocbp->context2)->virt);
5285 lsrjt_event.command = *pcmd;
5286 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
5287 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
5288 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
5289 fc_host_post_vendor_event(shost,
5290 fc_get_event_number(),
5291 sizeof(lsrjt_event),
5292 (char *)&lsrjt_event,
ddcc50f0 5293 LPFC_NL_VENDOR_ID);
ea2151b4
JS
5294 return;
5295 }
5296 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
5297 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
5298 fabric_event.event_type = FC_REG_FABRIC_EVENT;
5299 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
5300 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
5301 else
5302 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
5303 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
5304 sizeof(struct lpfc_name));
5305 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
5306 sizeof(struct lpfc_name));
5307 fc_host_post_vendor_event(shost,
5308 fc_get_event_number(),
5309 sizeof(fabric_event),
5310 (char *)&fabric_event,
ddcc50f0 5311 LPFC_NL_VENDOR_ID);
ea2151b4
JS
5312 return;
5313 }
5314
5315}
5316
5317/**
3621a710 5318 * lpfc_send_els_event - Posts unsolicited els event
ea2151b4
JS
5319 * @vport: Pointer to vport object.
5320 * @ndlp: Pointer FC node object.
5321 * @cmd: ELS command code.
5322 *
5323 * This function posts an event when there is an incoming
5324 * unsolicited ELS command.
5325 **/
5326static void
5327lpfc_send_els_event(struct lpfc_vport *vport,
5328 struct lpfc_nodelist *ndlp,
ddcc50f0 5329 uint32_t *payload)
ea2151b4 5330{
ddcc50f0
JS
5331 struct lpfc_els_event_header *els_data = NULL;
5332 struct lpfc_logo_event *logo_data = NULL;
ea2151b4
JS
5333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5334
ddcc50f0
JS
5335 if (*payload == ELS_CMD_LOGO) {
5336 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
5337 if (!logo_data) {
5338 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5339 "0148 Failed to allocate memory "
5340 "for LOGO event\n");
5341 return;
5342 }
5343 els_data = &logo_data->header;
5344 } else {
5345 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
5346 GFP_KERNEL);
5347 if (!els_data) {
5348 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5349 "0149 Failed to allocate memory "
5350 "for ELS event\n");
5351 return;
5352 }
5353 }
5354 els_data->event_type = FC_REG_ELS_EVENT;
5355 switch (*payload) {
ea2151b4 5356 case ELS_CMD_PLOGI:
ddcc50f0 5357 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
ea2151b4
JS
5358 break;
5359 case ELS_CMD_PRLO:
ddcc50f0 5360 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
ea2151b4
JS
5361 break;
5362 case ELS_CMD_ADISC:
ddcc50f0
JS
5363 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
5364 break;
5365 case ELS_CMD_LOGO:
5366 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
5367 /* Copy the WWPN in the LOGO payload */
5368 memcpy(logo_data->logo_wwpn, &payload[2],
5369 sizeof(struct lpfc_name));
ea2151b4
JS
5370 break;
5371 default:
e916141c 5372 kfree(els_data);
ea2151b4
JS
5373 return;
5374 }
ddcc50f0
JS
5375 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5376 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5377 if (*payload == ELS_CMD_LOGO) {
5378 fc_host_post_vendor_event(shost,
5379 fc_get_event_number(),
5380 sizeof(struct lpfc_logo_event),
5381 (char *)logo_data,
5382 LPFC_NL_VENDOR_ID);
5383 kfree(logo_data);
5384 } else {
5385 fc_host_post_vendor_event(shost,
5386 fc_get_event_number(),
5387 sizeof(struct lpfc_els_event_header),
5388 (char *)els_data,
5389 LPFC_NL_VENDOR_ID);
5390 kfree(els_data);
5391 }
ea2151b4
JS
5392
5393 return;
5394}
5395
5396
e59058c4 5397/**
3621a710 5398 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
e59058c4
JS
5399 * @phba: pointer to lpfc hba data structure.
5400 * @pring: pointer to a SLI ring.
5401 * @vport: pointer to a host virtual N_Port data structure.
5402 * @elsiocb: pointer to lpfc els command iocb data structure.
5403 *
5404 * This routine is used for processing the IOCB associated with a unsolicited
5405 * event. It first determines whether there is an existing ndlp that matches
5406 * the DID from the unsolicited IOCB. If not, it will create a new one with
5407 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
5408 * IOCB is then used to invoke the proper routine and to set up proper state
5409 * of the discovery state machine.
5410 **/
ed957684
JS
5411static void
5412lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
92d7f7b0 5413 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
dea3101e 5414{
87af33fe 5415 struct Scsi_Host *shost;
dea3101e 5416 struct lpfc_nodelist *ndlp;
dea3101e 5417 struct ls_rjt stat;
92d7f7b0 5418 uint32_t *payload;
2e0fef85 5419 uint32_t cmd, did, newnode, rjt_err = 0;
ed957684 5420 IOCB_t *icmd = &elsiocb->iocb;
dea3101e 5421
e47c9093 5422 if (!vport || !(elsiocb->context2))
dea3101e 5423 goto dropit;
2e0fef85 5424
dea3101e 5425 newnode = 0;
92d7f7b0
JS
5426 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
5427 cmd = *payload;
ed957684 5428 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
495a714c 5429 lpfc_post_buffer(phba, pring, 1);
dea3101e 5430
858c9f6c
JS
5431 did = icmd->un.rcvels.remoteID;
5432 if (icmd->ulpStatus) {
5433 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5434 "RCV Unsol ELS: status:x%x/x%x did:x%x",
5435 icmd->ulpStatus, icmd->un.ulpWord[4], did);
dea3101e 5436 goto dropit;
858c9f6c 5437 }
dea3101e 5438
5439 /* Check to see if link went down during discovery */
ed957684 5440 if (lpfc_els_chk_latt(vport))
dea3101e 5441 goto dropit;
dea3101e 5442
c868595d 5443 /* Ignore traffic received during vport shutdown. */
92d7f7b0
JS
5444 if (vport->load_flag & FC_UNLOADING)
5445 goto dropit;
5446
2e0fef85 5447 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 5448 if (!ndlp) {
dea3101e 5449 /* Cannot find existing Fabric ndlp, so allocate a new one */
c9f8735b 5450 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
ed957684 5451 if (!ndlp)
dea3101e 5452 goto dropit;
dea3101e 5453
2e0fef85 5454 lpfc_nlp_init(vport, ndlp, did);
98c9ea5c 5455 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
dea3101e 5456 newnode = 1;
e47c9093 5457 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
dea3101e 5458 ndlp->nlp_type |= NLP_FABRIC;
58da1ffb
JS
5459 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5460 ndlp = lpfc_enable_node(vport, ndlp,
5461 NLP_STE_UNUSED_NODE);
5462 if (!ndlp)
5463 goto dropit;
5464 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5465 newnode = 1;
5466 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5467 ndlp->nlp_type |= NLP_FABRIC;
5468 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
5469 /* This is similar to the new node path */
5470 ndlp = lpfc_nlp_get(ndlp);
5471 if (!ndlp)
5472 goto dropit;
5473 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5474 newnode = 1;
87af33fe 5475 }
dea3101e 5476
5477 phba->fc_stat.elsRcvFrame++;
e47c9093 5478
329f9bc7 5479 elsiocb->context1 = lpfc_nlp_get(ndlp);
2e0fef85 5480 elsiocb->vport = vport;
dea3101e 5481
5482 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
5483 cmd &= ELS_CMD_MASK;
5484 }
5485 /* ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
5486 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5487 "0112 ELS command x%x received from NPORT x%x "
5488 "Data: x%x\n", cmd, did, vport->port_state);
dea3101e 5489 switch (cmd) {
5490 case ELS_CMD_PLOGI:
858c9f6c
JS
5491 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5492 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
5493 did, vport->port_state, ndlp->nlp_flag);
5494
dea3101e 5495 phba->fc_stat.elsRcvPLOGI++;
858c9f6c
JS
5496 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
5497
ddcc50f0 5498 lpfc_send_els_event(vport, ndlp, payload);
858c9f6c 5499 if (vport->port_state < LPFC_DISC_AUTH) {
1b32f6aa
JS
5500 if (!(phba->pport->fc_flag & FC_PT2PT) ||
5501 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
5502 rjt_err = LSRJT_UNABLE_TPC;
5503 break;
5504 }
5505 /* We get here, and drop thru, if we are PT2PT with
5506 * another NPort and the other side has initiated
5507 * the PLOGI before responding to our FLOGI.
5508 */
dea3101e 5509 }
87af33fe
JS
5510
5511 shost = lpfc_shost_from_vport(vport);
5512 spin_lock_irq(shost->host_lock);
5513 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
5514 spin_unlock_irq(shost->host_lock);
5515
2e0fef85
JS
5516 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5517 NLP_EVT_RCV_PLOGI);
858c9f6c 5518
dea3101e 5519 break;
5520 case ELS_CMD_FLOGI:
858c9f6c
JS
5521 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5522 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
5523 did, vport->port_state, ndlp->nlp_flag);
5524
dea3101e 5525 phba->fc_stat.elsRcvFLOGI++;
51ef4c26 5526 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
87af33fe 5527 if (newnode)
98c9ea5c 5528 lpfc_nlp_put(ndlp);
dea3101e 5529 break;
5530 case ELS_CMD_LOGO:
858c9f6c
JS
5531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5532 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
5533 did, vport->port_state, ndlp->nlp_flag);
5534
dea3101e 5535 phba->fc_stat.elsRcvLOGO++;
ddcc50f0 5536 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 5537 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 5538 rjt_err = LSRJT_UNABLE_TPC;
dea3101e 5539 break;
5540 }
2e0fef85 5541 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
dea3101e 5542 break;
5543 case ELS_CMD_PRLO:
858c9f6c
JS
5544 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5545 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
5546 did, vport->port_state, ndlp->nlp_flag);
5547
dea3101e 5548 phba->fc_stat.elsRcvPRLO++;
ddcc50f0 5549 lpfc_send_els_event(vport, ndlp, payload);
2e0fef85 5550 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 5551 rjt_err = LSRJT_UNABLE_TPC;
dea3101e 5552 break;
5553 }
2e0fef85 5554 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
dea3101e 5555 break;
5556 case ELS_CMD_RSCN:
5557 phba->fc_stat.elsRcvRSCN++;
51ef4c26 5558 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
87af33fe 5559 if (newnode)
98c9ea5c 5560 lpfc_nlp_put(ndlp);
dea3101e 5561 break;
5562 case ELS_CMD_ADISC:
858c9f6c
JS
5563 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5564 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
5565 did, vport->port_state, ndlp->nlp_flag);
5566
ddcc50f0 5567 lpfc_send_els_event(vport, ndlp, payload);
dea3101e 5568 phba->fc_stat.elsRcvADISC++;
2e0fef85 5569 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 5570 rjt_err = LSRJT_UNABLE_TPC;
dea3101e 5571 break;
5572 }
2e0fef85
JS
5573 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5574 NLP_EVT_RCV_ADISC);
dea3101e 5575 break;
5576 case ELS_CMD_PDISC:
858c9f6c
JS
5577 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5578 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
5579 did, vport->port_state, ndlp->nlp_flag);
5580
dea3101e 5581 phba->fc_stat.elsRcvPDISC++;
2e0fef85 5582 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 5583 rjt_err = LSRJT_UNABLE_TPC;
dea3101e 5584 break;
5585 }
2e0fef85
JS
5586 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5587 NLP_EVT_RCV_PDISC);
dea3101e 5588 break;
5589 case ELS_CMD_FARPR:
858c9f6c
JS
5590 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5591 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
5592 did, vport->port_state, ndlp->nlp_flag);
5593
dea3101e 5594 phba->fc_stat.elsRcvFARPR++;
2e0fef85 5595 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
dea3101e 5596 break;
5597 case ELS_CMD_FARP:
858c9f6c
JS
5598 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5599 "RCV FARP: did:x%x/ste:x%x flg:x%x",
5600 did, vport->port_state, ndlp->nlp_flag);
5601
dea3101e 5602 phba->fc_stat.elsRcvFARP++;
2e0fef85 5603 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
dea3101e 5604 break;
5605 case ELS_CMD_FAN:
858c9f6c
JS
5606 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5607 "RCV FAN: did:x%x/ste:x%x flg:x%x",
5608 did, vport->port_state, ndlp->nlp_flag);
5609
dea3101e 5610 phba->fc_stat.elsRcvFAN++;
2e0fef85 5611 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
dea3101e 5612 break;
dea3101e 5613 case ELS_CMD_PRLI:
858c9f6c
JS
5614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5615 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
5616 did, vport->port_state, ndlp->nlp_flag);
5617
dea3101e 5618 phba->fc_stat.elsRcvPRLI++;
2e0fef85 5619 if (vport->port_state < LPFC_DISC_AUTH) {
858c9f6c 5620 rjt_err = LSRJT_UNABLE_TPC;
dea3101e 5621 break;
5622 }
2e0fef85 5623 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
dea3101e 5624 break;
7bb3b137 5625 case ELS_CMD_LIRR:
858c9f6c
JS
5626 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5627 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
5628 did, vport->port_state, ndlp->nlp_flag);
5629
7bb3b137 5630 phba->fc_stat.elsRcvLIRR++;
2e0fef85 5631 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
87af33fe 5632 if (newnode)
98c9ea5c 5633 lpfc_nlp_put(ndlp);
7bb3b137
JW
5634 break;
5635 case ELS_CMD_RPS:
858c9f6c
JS
5636 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5637 "RCV RPS: did:x%x/ste:x%x flg:x%x",
5638 did, vport->port_state, ndlp->nlp_flag);
5639
7bb3b137 5640 phba->fc_stat.elsRcvRPS++;
2e0fef85 5641 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
87af33fe 5642 if (newnode)
98c9ea5c 5643 lpfc_nlp_put(ndlp);
7bb3b137
JW
5644 break;
5645 case ELS_CMD_RPL:
858c9f6c
JS
5646 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5647 "RCV RPL: did:x%x/ste:x%x flg:x%x",
5648 did, vport->port_state, ndlp->nlp_flag);
5649
7bb3b137 5650 phba->fc_stat.elsRcvRPL++;
2e0fef85 5651 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
87af33fe 5652 if (newnode)
98c9ea5c 5653 lpfc_nlp_put(ndlp);
7bb3b137 5654 break;
dea3101e 5655 case ELS_CMD_RNID:
858c9f6c
JS
5656 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5657 "RCV RNID: did:x%x/ste:x%x flg:x%x",
5658 did, vport->port_state, ndlp->nlp_flag);
5659
dea3101e 5660 phba->fc_stat.elsRcvRNID++;
2e0fef85 5661 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
87af33fe 5662 if (newnode)
98c9ea5c 5663 lpfc_nlp_put(ndlp);
dea3101e 5664 break;
5ffc266e
JS
5665 case ELS_CMD_RRQ:
5666 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5667 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
5668 did, vport->port_state, ndlp->nlp_flag);
5669
5670 phba->fc_stat.elsRcvRRQ++;
5671 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5672 if (newnode)
5673 lpfc_nlp_put(ndlp);
5674 break;
dea3101e 5675 default:
858c9f6c
JS
5676 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5677 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
5678 cmd, did, vport->port_state);
5679
dea3101e 5680 /* Unsupported ELS command, reject */
858c9f6c 5681 rjt_err = LSRJT_INVALID_CMD;
dea3101e 5682
5683 /* Unknown ELS command <elsCmd> received from NPORT <did> */
e8b62011
JS
5684 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5685 "0115 Unknown ELS command x%x "
5686 "received from NPORT x%x\n", cmd, did);
87af33fe 5687 if (newnode)
98c9ea5c 5688 lpfc_nlp_put(ndlp);
dea3101e 5689 break;
5690 }
5691
5692 /* check if need to LS_RJT received ELS cmd */
5693 if (rjt_err) {
92d7f7b0 5694 memset(&stat, 0, sizeof(stat));
858c9f6c 5695 stat.un.b.lsRjtRsnCode = rjt_err;
1f679caf 5696 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
858c9f6c
JS
5697 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
5698 NULL);
dea3101e 5699 }
5700
d7c255b2
JS
5701 lpfc_nlp_put(elsiocb->context1);
5702 elsiocb->context1 = NULL;
ed957684
JS
5703 return;
5704
5705dropit:
98c9ea5c 5706 if (vport && !(vport->load_flag & FC_UNLOADING))
6fb120a7
JS
5707 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5708 "0111 Dropping received ELS cmd "
ed957684 5709 "Data: x%x x%x x%x\n",
6fb120a7 5710 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
ed957684
JS
5711 phba->fc_stat.elsRcvDrop++;
5712}
5713
e59058c4 5714/**
3621a710 5715 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
e59058c4
JS
5716 * @phba: pointer to lpfc hba data structure.
5717 * @vpi: host virtual N_Port identifier.
5718 *
5719 * This routine finds a vport on a HBA (referred by @phba) through a
5720 * @vpi. The function walks the HBA's vport list and returns the address
5721 * of the vport with the matching @vpi.
5722 *
5723 * Return code
5724 * NULL - No vport with the matching @vpi found
5725 * Otherwise - Address to the vport with the matching @vpi.
5726 **/
6669f9bb 5727struct lpfc_vport *
92d7f7b0
JS
5728lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5729{
5730 struct lpfc_vport *vport;
549e55cd 5731 unsigned long flags;
92d7f7b0 5732
549e55cd 5733 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 5734 list_for_each_entry(vport, &phba->port_list, listentry) {
549e55cd
JS
5735 if (vport->vpi == vpi) {
5736 spin_unlock_irqrestore(&phba->hbalock, flags);
92d7f7b0 5737 return vport;
549e55cd 5738 }
92d7f7b0 5739 }
549e55cd 5740 spin_unlock_irqrestore(&phba->hbalock, flags);
92d7f7b0
JS
5741 return NULL;
5742}
ed957684 5743
e59058c4 5744/**
3621a710 5745 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
e59058c4
JS
5746 * @phba: pointer to lpfc hba data structure.
5747 * @pring: pointer to a SLI ring.
5748 * @elsiocb: pointer to lpfc els iocb data structure.
5749 *
5750 * This routine is used to process an unsolicited event received from a SLI
5751 * (Service Level Interface) ring. The actual processing of the data buffer
5752 * associated with the unsolicited event is done by invoking the routine
5753 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
5754 * SLI ring on which the unsolicited event was received.
5755 **/
ed957684
JS
5756void
5757lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5758 struct lpfc_iocbq *elsiocb)
5759{
5760 struct lpfc_vport *vport = phba->pport;
ed957684 5761 IOCB_t *icmd = &elsiocb->iocb;
ed957684 5762 dma_addr_t paddr;
92d7f7b0
JS
5763 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
5764 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
5765
d7c255b2 5766 elsiocb->context1 = NULL;
92d7f7b0
JS
5767 elsiocb->context2 = NULL;
5768 elsiocb->context3 = NULL;
ed957684 5769
92d7f7b0
JS
5770 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
5771 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
5772 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
5773 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
ed957684
JS
5774 phba->fc_stat.NoRcvBuf++;
5775 /* Not enough posted buffers; Try posting more buffers */
92d7f7b0 5776 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
495a714c 5777 lpfc_post_buffer(phba, pring, 0);
ed957684
JS
5778 return;
5779 }
5780
92d7f7b0
JS
5781 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5782 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
5783 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5784 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5785 vport = phba->pport;
6fb120a7
JS
5786 else
5787 vport = lpfc_find_vport_by_vpid(phba,
5788 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
92d7f7b0 5789 }
7f5f3d0d
JS
5790 /* If there are no BDEs associated
5791 * with this IOCB, there is nothing to do.
5792 */
ed957684
JS
5793 if (icmd->ulpBdeCount == 0)
5794 return;
5795
7f5f3d0d
JS
5796 /* type of ELS cmd is first 32bit word
5797 * in packet
5798 */
ed957684 5799 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
92d7f7b0 5800 elsiocb->context2 = bdeBuf1;
ed957684
JS
5801 } else {
5802 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
5803 icmd->un.cont64[0].addrLow);
92d7f7b0
JS
5804 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
5805 paddr);
ed957684
JS
5806 }
5807
92d7f7b0
JS
5808 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
5809 /*
5810 * The different unsolicited event handlers would tell us
5811 * if they are done with "mp" by setting context2 to NULL.
5812 */
dea3101e 5813 if (elsiocb->context2) {
92d7f7b0
JS
5814 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
5815 elsiocb->context2 = NULL;
dea3101e 5816 }
ed957684
JS
5817
5818 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
92d7f7b0 5819 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
ed957684 5820 icmd->ulpBdeCount == 2) {
92d7f7b0
JS
5821 elsiocb->context2 = bdeBuf2;
5822 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
ed957684
JS
5823 /* free mp if we are done with it */
5824 if (elsiocb->context2) {
92d7f7b0
JS
5825 lpfc_in_buf_free(phba, elsiocb->context2);
5826 elsiocb->context2 = NULL;
5827 }
5828 }
5829}
5830
e59058c4 5831/**
3621a710 5832 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
e59058c4
JS
5833 * @phba: pointer to lpfc hba data structure.
5834 * @vport: pointer to a virtual N_Port data structure.
5835 *
5836 * This routine issues a Port Login (PLOGI) to the Name Server with
5837 * State Change Request (SCR) for a @vport. This routine will create an
5838 * ndlp for the Name Server associated to the @vport if such node does
5839 * not already exist. The PLOGI to Name Server is issued by invoking the
5840 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
5841 * (FDMI) is configured to the @vport, a FDMI node will be created and
5842 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
5843 **/
92d7f7b0
JS
5844void
5845lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5846{
5847 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
5848
5849 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5850 if (!ndlp) {
5851 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5852 if (!ndlp) {
5853 if (phba->fc_topology == TOPOLOGY_LOOP) {
5854 lpfc_disc_start(vport);
5855 return;
5856 }
5857 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
5858 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5859 "0251 NameServer login: no memory\n");
92d7f7b0
JS
5860 return;
5861 }
5862 lpfc_nlp_init(vport, ndlp, NameServer_DID);
e47c9093
JS
5863 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5864 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5865 if (!ndlp) {
5866 if (phba->fc_topology == TOPOLOGY_LOOP) {
5867 lpfc_disc_start(vport);
5868 return;
5869 }
5870 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5871 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5872 "0348 NameServer login: node freed\n");
5873 return;
5874 }
92d7f7b0 5875 }
58da1ffb 5876 ndlp->nlp_type |= NLP_FABRIC;
92d7f7b0
JS
5877
5878 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5879
5880 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
5881 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
5882 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5883 "0252 Cannot issue NameServer login\n");
92d7f7b0
JS
5884 return;
5885 }
5886
3de2a653 5887 if (vport->cfg_fdmi_on) {
92d7f7b0
JS
5888 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
5889 GFP_KERNEL);
5890 if (ndlp_fdmi) {
5891 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
5892 ndlp_fdmi->nlp_type |= NLP_FABRIC;
58da1ffb
JS
5893 lpfc_nlp_set_state(vport, ndlp_fdmi,
5894 NLP_STE_PLOGI_ISSUE);
92d7f7b0
JS
5895 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
5896 0);
5897 }
5898 }
5899 return;
5900}
5901
e59058c4 5902/**
3621a710 5903 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
e59058c4
JS
5904 * @phba: pointer to lpfc hba data structure.
5905 * @pmb: pointer to the driver internal queue element for mailbox command.
5906 *
5907 * This routine is the completion callback function to register new vport
5908 * mailbox command. If the new vport mailbox command completes successfully,
5909 * the fabric registration login shall be performed on physical port (the
5910 * new vport created is actually a physical port, with VPI 0) or the port
5911 * login to Name Server for State Change Request (SCR) will be performed
5912 * on virtual port (real virtual port, with VPI greater than 0).
5913 **/
92d7f7b0
JS
5914static void
5915lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5916{
5917 struct lpfc_vport *vport = pmb->vport;
5918 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5919 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
04c68496 5920 MAILBOX_t *mb = &pmb->u.mb;
695a814e 5921 int rc;
92d7f7b0 5922
09372820 5923 spin_lock_irq(shost->host_lock);
92d7f7b0 5924 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
09372820 5925 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
5926
5927 if (mb->mbxStatus) {
e8b62011
JS
5928 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
5929 "0915 Register VPI failed: 0x%x\n",
5930 mb->mbxStatus);
92d7f7b0
JS
5931
5932 switch (mb->mbxStatus) {
5933 case 0x11: /* unsupported feature */
5934 case 0x9603: /* max_vpi exceeded */
7f5f3d0d 5935 case 0x9602: /* Link event since CLEAR_LA */
92d7f7b0
JS
5936 /* giving up on vport registration */
5937 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5938 spin_lock_irq(shost->host_lock);
5939 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
5940 spin_unlock_irq(shost->host_lock);
5941 lpfc_can_disctmo(vport);
5942 break;
695a814e
JS
5943 /* If reg_vpi fail with invalid VPI status, re-init VPI */
5944 case 0x20:
5945 spin_lock_irq(shost->host_lock);
5946 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
5947 spin_unlock_irq(shost->host_lock);
5948 lpfc_init_vpi(phba, pmb, vport->vpi);
5949 pmb->vport = vport;
5950 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
5951 rc = lpfc_sli_issue_mbox(phba, pmb,
5952 MBX_NOWAIT);
5953 if (rc == MBX_NOT_FINISHED) {
5954 lpfc_printf_vlog(vport,
5955 KERN_ERR, LOG_MBOX,
5956 "2732 Failed to issue INIT_VPI"
5957 " mailbox command\n");
5958 } else {
5959 lpfc_nlp_put(ndlp);
5960 return;
5961 }
5962
92d7f7b0
JS
5963 default:
5964 /* Try to recover from this error */
5965 lpfc_mbx_unreg_vpi(vport);
09372820 5966 spin_lock_irq(shost->host_lock);
92d7f7b0 5967 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 5968 spin_unlock_irq(shost->host_lock);
7f5f3d0d
JS
5969 if (vport->port_type == LPFC_PHYSICAL_PORT)
5970 lpfc_initial_flogi(vport);
5971 else
5972 lpfc_initial_fdisc(vport);
92d7f7b0
JS
5973 break;
5974 }
92d7f7b0 5975 } else {
695a814e 5976 spin_lock_irq(shost->host_lock);
1987807d 5977 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e
JS
5978 spin_unlock_irq(shost->host_lock);
5979 if (vport == phba->pport) {
6fb120a7
JS
5980 if (phba->sli_rev < LPFC_SLI_REV4)
5981 lpfc_issue_fabric_reglogin(vport);
695a814e
JS
5982 else {
5983 lpfc_start_fdiscs(phba);
5984 lpfc_do_scr_ns_plogi(phba, vport);
5985 }
5986 } else
92d7f7b0
JS
5987 lpfc_do_scr_ns_plogi(phba, vport);
5988 }
fa4066b6
JS
5989
5990 /* Now, we decrement the ndlp reference count held for this
5991 * callback function
5992 */
5993 lpfc_nlp_put(ndlp);
5994
92d7f7b0
JS
5995 mempool_free(pmb, phba->mbox_mem_pool);
5996 return;
5997}
5998
e59058c4 5999/**
3621a710 6000 * lpfc_register_new_vport - Register a new vport with a HBA
e59058c4
JS
6001 * @phba: pointer to lpfc hba data structure.
6002 * @vport: pointer to a host virtual N_Port data structure.
6003 * @ndlp: pointer to a node-list data structure.
6004 *
6005 * This routine registers the @vport as a new virtual port with a HBA.
6006 * It is done through a registering vpi mailbox command.
6007 **/
695a814e 6008void
92d7f7b0
JS
6009lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6010 struct lpfc_nodelist *ndlp)
6011{
09372820 6012 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
6013 LPFC_MBOXQ_t *mbox;
6014
6015 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6016 if (mbox) {
6fb120a7 6017 lpfc_reg_vpi(vport, mbox);
92d7f7b0
JS
6018 mbox->vport = vport;
6019 mbox->context2 = lpfc_nlp_get(ndlp);
6020 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
0b727fea 6021 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
92d7f7b0 6022 == MBX_NOT_FINISHED) {
fa4066b6
JS
6023 /* mailbox command not success, decrement ndlp
6024 * reference count for this command
6025 */
6026 lpfc_nlp_put(ndlp);
92d7f7b0 6027 mempool_free(mbox, phba->mbox_mem_pool);
92d7f7b0 6028
e8b62011
JS
6029 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6030 "0253 Register VPI: Can't send mbox\n");
fa4066b6 6031 goto mbox_err_exit;
92d7f7b0
JS
6032 }
6033 } else {
e8b62011
JS
6034 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6035 "0254 Register VPI: no memory\n");
fa4066b6 6036 goto mbox_err_exit;
92d7f7b0 6037 }
fa4066b6
JS
6038 return;
6039
6040mbox_err_exit:
6041 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6042 spin_lock_irq(shost->host_lock);
6043 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6044 spin_unlock_irq(shost->host_lock);
6045 return;
92d7f7b0
JS
6046}
6047
695a814e
JS
6048/**
6049 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6050 * @phba: pointer to lpfc hba data structure.
6051 *
6052 * This routine abort all pending discovery commands and
6053 * start a timer to retry FLOGI for the physical port
6054 * discovery.
6055 **/
6056void
6057lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6058{
6059 struct lpfc_vport **vports;
6060 struct lpfc_nodelist *ndlp;
6061 struct Scsi_Host *shost;
6062 int i;
6063 uint32_t link_state;
6064
6065 /* Treat this failure as linkdown for all vports */
6066 link_state = phba->link_state;
6067 lpfc_linkdown(phba);
6068 phba->link_state = link_state;
6069
6070 vports = lpfc_create_vport_work_array(phba);
6071
6072 if (vports) {
6073 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6074 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6075 if (ndlp)
6076 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6077 lpfc_els_flush_cmd(vports[i]);
6078 }
6079 lpfc_destroy_vport_work_array(phba, vports);
6080 }
6081
6082 /* If fabric require FLOGI, then re-instantiate physical login */
6083 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6084 if (!ndlp)
6085 return;
6086
6087
6088 shost = lpfc_shost_from_vport(phba->pport);
6089 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6090 spin_lock_irq(shost->host_lock);
6091 ndlp->nlp_flag |= NLP_DELAY_TMO;
6092 spin_unlock_irq(shost->host_lock);
6093 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6094 phba->pport->port_state = LPFC_FLOGI;
6095 return;
6096}
6097
6098/**
6099 * lpfc_fabric_login_reqd - Check if FLOGI required.
6100 * @phba: pointer to lpfc hba data structure.
6101 * @cmdiocb: pointer to FDISC command iocb.
6102 * @rspiocb: pointer to FDISC response iocb.
6103 *
6104 * This routine checks if a FLOGI is reguired for FDISC
6105 * to succeed.
6106 **/
6107static int
6108lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6109 struct lpfc_iocbq *cmdiocb,
6110 struct lpfc_iocbq *rspiocb)
6111{
6112
6113 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6114 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6115 return 0;
6116 else
6117 return 1;
6118}
6119
e59058c4 6120/**
3621a710 6121 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
e59058c4
JS
6122 * @phba: pointer to lpfc hba data structure.
6123 * @cmdiocb: pointer to lpfc command iocb data structure.
6124 * @rspiocb: pointer to lpfc response iocb data structure.
6125 *
6126 * This routine is the completion callback function to a Fabric Discover
6127 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
6128 * single threaded, each FDISC completion callback function will reset
6129 * the discovery timer for all vports such that the timers will not get
6130 * unnecessary timeout. The function checks the FDISC IOCB status. If error
6131 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
6132 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
6133 * assigned to the vport has been changed with the completion of the FDISC
6134 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
6135 * are unregistered from the HBA, and then the lpfc_register_new_vport()
6136 * routine is invoked to register new vport with the HBA. Otherwise, the
6137 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
6138 * Server for State Change Request (SCR).
6139 **/
92d7f7b0
JS
6140static void
6141lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6142 struct lpfc_iocbq *rspiocb)
6143{
6144 struct lpfc_vport *vport = cmdiocb->vport;
6145 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6146 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
6147 struct lpfc_nodelist *np;
6148 struct lpfc_nodelist *next_np;
6149 IOCB_t *irsp = &rspiocb->iocb;
6150 struct lpfc_iocbq *piocb;
6151
e8b62011
JS
6152 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6153 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
6154 irsp->ulpStatus, irsp->un.ulpWord[4],
6155 vport->fc_prevDID);
92d7f7b0
JS
6156 /* Since all FDISCs are being single threaded, we
6157 * must reset the discovery timer for ALL vports
6158 * waiting to send FDISC when one completes.
6159 */
6160 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
6161 lpfc_set_disctmo(piocb->vport);
6162 }
6163
858c9f6c
JS
6164 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6165 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
6166 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6167
92d7f7b0 6168 if (irsp->ulpStatus) {
695a814e
JS
6169
6170 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6171 lpfc_retry_pport_discovery(phba);
6172 goto out;
6173 }
6174
92d7f7b0
JS
6175 /* Check for retry */
6176 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6177 goto out;
92d7f7b0 6178 /* FDISC failed */
e8b62011 6179 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
d7c255b2 6180 "0126 FDISC failed. (%d/%d)\n",
e8b62011 6181 irsp->ulpStatus, irsp->un.ulpWord[4]);
d7c255b2
JS
6182 goto fdisc_failed;
6183 }
d7c255b2 6184 spin_lock_irq(shost->host_lock);
695a814e 6185 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
d7c255b2
JS
6186 vport->fc_flag |= FC_FABRIC;
6187 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6188 vport->fc_flag |= FC_PUBLIC_LOOP;
6189 spin_unlock_irq(shost->host_lock);
92d7f7b0 6190
d7c255b2
JS
6191 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
6192 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
6193 if ((vport->fc_prevDID != vport->fc_myDID) &&
6194 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
6195 /* If our NportID changed, we need to ensure all
6196 * remaining NPORTs get unreg_login'ed so we can
6197 * issue unreg_vpi.
6198 */
6199 list_for_each_entry_safe(np, next_np,
6200 &vport->fc_nodes, nlp_listp) {
6201 if (!NLP_CHK_NODE_ACT(ndlp) ||
6202 (np->nlp_state != NLP_STE_NPR_NODE) ||
6203 !(np->nlp_flag & NLP_NPR_ADISC))
6204 continue;
09372820 6205 spin_lock_irq(shost->host_lock);
d7c255b2 6206 np->nlp_flag &= ~NLP_NPR_ADISC;
09372820 6207 spin_unlock_irq(shost->host_lock);
d7c255b2 6208 lpfc_unreg_rpi(vport, np);
92d7f7b0 6209 }
d7c255b2
JS
6210 lpfc_mbx_unreg_vpi(vport);
6211 spin_lock_irq(shost->host_lock);
6212 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6213 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
6214 }
6215
d7c255b2
JS
6216 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6217 lpfc_register_new_vport(phba, vport, ndlp);
6218 else
6219 lpfc_do_scr_ns_plogi(phba, vport);
6220 goto out;
6221fdisc_failed:
6222 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6223 /* Cancel discovery timer */
6224 lpfc_can_disctmo(vport);
6225 lpfc_nlp_put(ndlp);
92d7f7b0
JS
6226out:
6227 lpfc_els_free_iocb(phba, cmdiocb);
6228}
6229
e59058c4 6230/**
3621a710 6231 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
e59058c4
JS
6232 * @vport: pointer to a virtual N_Port data structure.
6233 * @ndlp: pointer to a node-list data structure.
6234 * @retry: number of retries to the command IOCB.
6235 *
6236 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
6237 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
6238 * routine to issue the IOCB, which makes sure only one outstanding fabric
6239 * IOCB will be sent off HBA at any given time.
6240 *
6241 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6242 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6243 * will be stored into the context1 field of the IOCB for the completion
6244 * callback function to the FDISC ELS command.
6245 *
6246 * Return code
6247 * 0 - Successfully issued fdisc iocb command
6248 * 1 - Failed to issue fdisc iocb command
6249 **/
a6ababd2 6250static int
92d7f7b0
JS
6251lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6252 uint8_t retry)
6253{
6254 struct lpfc_hba *phba = vport->phba;
6255 IOCB_t *icmd;
6256 struct lpfc_iocbq *elsiocb;
6257 struct serv_parm *sp;
6258 uint8_t *pcmd;
6259 uint16_t cmdsize;
6260 int did = ndlp->nlp_DID;
6261 int rc;
92d7f7b0 6262
5ffc266e 6263 vport->port_state = LPFC_FDISC;
92d7f7b0
JS
6264 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6265 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6266 ELS_CMD_FDISC);
6267 if (!elsiocb) {
92d7f7b0 6268 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
6269 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6270 "0255 Issue FDISC: no IOCB\n");
92d7f7b0
JS
6271 return 1;
6272 }
6273
6274 icmd = &elsiocb->iocb;
6275 icmd->un.elsreq64.myID = 0;
6276 icmd->un.elsreq64.fl = 1;
6277
f1126688
JS
6278 if (phba->sli_rev == LPFC_SLI_REV4) {
6279 /* FDISC needs to be 1 for WQE VPI */
6280 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6281 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6282 /* Set the ulpContext to the vpi */
6283 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6284 } else {
6285 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6286 icmd->ulpCt_h = 1;
6287 icmd->ulpCt_l = 0;
6288 }
92d7f7b0
JS
6289
6290 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6291 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
6292 pcmd += sizeof(uint32_t); /* CSP Word 1 */
6293 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
6294 sp = (struct serv_parm *) pcmd;
6295 /* Setup CSPs accordingly for Fabric */
6296 sp->cmn.e_d_tov = 0;
6297 sp->cmn.w2.r_a_tov = 0;
6298 sp->cls1.classValid = 0;
6299 sp->cls2.seqDelivery = 1;
6300 sp->cls3.seqDelivery = 1;
6301
6302 pcmd += sizeof(uint32_t); /* CSP Word 2 */
6303 pcmd += sizeof(uint32_t); /* CSP Word 3 */
6304 pcmd += sizeof(uint32_t); /* CSP Word 4 */
6305 pcmd += sizeof(uint32_t); /* Port Name */
6306 memcpy(pcmd, &vport->fc_portname, 8);
6307 pcmd += sizeof(uint32_t); /* Node Name */
6308 pcmd += sizeof(uint32_t); /* Node Name */
6309 memcpy(pcmd, &vport->fc_nodename, 8);
6310
6311 lpfc_set_disctmo(vport);
6312
6313 phba->fc_stat.elsXmitFDISC++;
6314 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
6315
858c9f6c
JS
6316 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6317 "Issue FDISC: did:x%x",
6318 did, 0, 0);
6319
92d7f7b0
JS
6320 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
6321 if (rc == IOCB_ERROR) {
6322 lpfc_els_free_iocb(phba, elsiocb);
92d7f7b0 6323 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
6324 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6325 "0256 Issue FDISC: Cannot send IOCB\n");
92d7f7b0
JS
6326 return 1;
6327 }
6328 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
92d7f7b0
JS
6329 return 0;
6330}
6331
e59058c4 6332/**
3621a710 6333 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
e59058c4
JS
6334 * @phba: pointer to lpfc hba data structure.
6335 * @cmdiocb: pointer to lpfc command iocb data structure.
6336 * @rspiocb: pointer to lpfc response iocb data structure.
6337 *
6338 * This routine is the completion callback function to the issuing of a LOGO
6339 * ELS command off a vport. It frees the command IOCB and then decrement the
6340 * reference count held on ndlp for this completion function, indicating that
6341 * the reference to the ndlp is no long needed. Note that the
6342 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
6343 * callback function and an additional explicit ndlp reference decrementation
6344 * will trigger the actual release of the ndlp.
6345 **/
92d7f7b0
JS
6346static void
6347lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6348 struct lpfc_iocbq *rspiocb)
6349{
6350 struct lpfc_vport *vport = cmdiocb->vport;
858c9f6c 6351 IOCB_t *irsp;
e47c9093
JS
6352 struct lpfc_nodelist *ndlp;
6353 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
858c9f6c
JS
6354
6355 irsp = &rspiocb->iocb;
6356 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6357 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
6358 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
92d7f7b0
JS
6359
6360 lpfc_els_free_iocb(phba, cmdiocb);
6361 vport->unreg_vpi_cmpl = VPORT_ERROR;
e47c9093
JS
6362
6363 /* Trigger the release of the ndlp after logo */
6364 lpfc_nlp_put(ndlp);
92d7f7b0
JS
6365}
6366
e59058c4 6367/**
3621a710 6368 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
e59058c4
JS
6369 * @vport: pointer to a virtual N_Port data structure.
6370 * @ndlp: pointer to a node-list data structure.
6371 *
6372 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
6373 *
6374 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6375 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6376 * will be stored into the context1 field of the IOCB for the completion
6377 * callback function to the LOGO ELS command.
6378 *
6379 * Return codes
6380 * 0 - Successfully issued logo off the @vport
6381 * 1 - Failed to issue logo off the @vport
6382 **/
92d7f7b0
JS
6383int
6384lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6385{
6386 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6387 struct lpfc_hba *phba = vport->phba;
92d7f7b0
JS
6388 IOCB_t *icmd;
6389 struct lpfc_iocbq *elsiocb;
6390 uint8_t *pcmd;
6391 uint16_t cmdsize;
6392
6393 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
6394 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
6395 ELS_CMD_LOGO);
6396 if (!elsiocb)
6397 return 1;
6398
6399 icmd = &elsiocb->iocb;
6400 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6401 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
6402 pcmd += sizeof(uint32_t);
6403
6404 /* Fill in LOGO payload */
6405 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
6406 pcmd += sizeof(uint32_t);
6407 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
6408
858c9f6c
JS
6409 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6410 "Issue LOGO npiv did:x%x flg:x%x",
6411 ndlp->nlp_DID, ndlp->nlp_flag, 0);
6412
92d7f7b0
JS
6413 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
6414 spin_lock_irq(shost->host_lock);
6415 ndlp->nlp_flag |= NLP_LOGO_SND;
6416 spin_unlock_irq(shost->host_lock);
3772a991
JS
6417 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6418 IOCB_ERROR) {
92d7f7b0
JS
6419 spin_lock_irq(shost->host_lock);
6420 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6421 spin_unlock_irq(shost->host_lock);
6422 lpfc_els_free_iocb(phba, elsiocb);
6423 return 1;
6424 }
6425 return 0;
6426}
6427
e59058c4 6428/**
3621a710 6429 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
e59058c4
JS
6430 * @ptr: holder for the timer function associated data.
6431 *
6432 * This routine is invoked by the fabric iocb block timer after
6433 * timeout. It posts the fabric iocb block timeout event by setting the
6434 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
6435 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
6436 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
6437 * posted event WORKER_FABRIC_BLOCK_TMO.
6438 **/
92d7f7b0
JS
6439void
6440lpfc_fabric_block_timeout(unsigned long ptr)
6441{
6442 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6443 unsigned long iflags;
6444 uint32_t tmo_posted;
5e9d9b82 6445
92d7f7b0
JS
6446 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
6447 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
6448 if (!tmo_posted)
6449 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
6450 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
6451
5e9d9b82
JS
6452 if (!tmo_posted)
6453 lpfc_worker_wake_up(phba);
6454 return;
92d7f7b0
JS
6455}
6456
e59058c4 6457/**
3621a710 6458 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
e59058c4
JS
6459 * @phba: pointer to lpfc hba data structure.
6460 *
6461 * This routine issues one fabric iocb from the driver internal list to
6462 * the HBA. It first checks whether it's ready to issue one fabric iocb to
6463 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
6464 * remove one pending fabric iocb from the driver internal list and invokes
6465 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
6466 **/
92d7f7b0
JS
6467static void
6468lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6469{
6470 struct lpfc_iocbq *iocb;
6471 unsigned long iflags;
6472 int ret;
92d7f7b0
JS
6473 IOCB_t *cmd;
6474
6475repeat:
6476 iocb = NULL;
6477 spin_lock_irqsave(&phba->hbalock, iflags);
7f5f3d0d 6478 /* Post any pending iocb to the SLI layer */
92d7f7b0
JS
6479 if (atomic_read(&phba->fabric_iocb_count) == 0) {
6480 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
6481 list);
6482 if (iocb)
7f5f3d0d 6483 /* Increment fabric iocb count to hold the position */
92d7f7b0
JS
6484 atomic_inc(&phba->fabric_iocb_count);
6485 }
6486 spin_unlock_irqrestore(&phba->hbalock, iflags);
6487 if (iocb) {
6488 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6489 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6490 iocb->iocb_flag |= LPFC_IO_FABRIC;
6491
858c9f6c
JS
6492 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6493 "Fabric sched1: ste:x%x",
6494 iocb->vport->port_state, 0, 0);
6495
3772a991 6496 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
6497
6498 if (ret == IOCB_ERROR) {
6499 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6500 iocb->fabric_iocb_cmpl = NULL;
6501 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6502 cmd = &iocb->iocb;
6503 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6504 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6505 iocb->iocb_cmpl(phba, iocb, iocb);
6506
6507 atomic_dec(&phba->fabric_iocb_count);
6508 goto repeat;
6509 }
6510 }
6511
6512 return;
6513}
6514
e59058c4 6515/**
3621a710 6516 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
e59058c4
JS
6517 * @phba: pointer to lpfc hba data structure.
6518 *
6519 * This routine unblocks the issuing fabric iocb command. The function
6520 * will clear the fabric iocb block bit and then invoke the routine
6521 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
6522 * from the driver internal fabric iocb list.
6523 **/
92d7f7b0
JS
6524void
6525lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
6526{
6527 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6528
6529 lpfc_resume_fabric_iocbs(phba);
6530 return;
6531}
6532
e59058c4 6533/**
3621a710 6534 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
e59058c4
JS
6535 * @phba: pointer to lpfc hba data structure.
6536 *
6537 * This routine blocks the issuing fabric iocb for a specified amount of
6538 * time (currently 100 ms). This is done by set the fabric iocb block bit
6539 * and set up a timeout timer for 100ms. When the block bit is set, no more
6540 * fabric iocb will be issued out of the HBA.
6541 **/
92d7f7b0
JS
6542static void
6543lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
6544{
6545 int blocked;
6546
6547 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7f5f3d0d 6548 /* Start a timer to unblock fabric iocbs after 100ms */
92d7f7b0
JS
6549 if (!blocked)
6550 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
6551
6552 return;
6553}
6554
e59058c4 6555/**
3621a710 6556 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
e59058c4
JS
6557 * @phba: pointer to lpfc hba data structure.
6558 * @cmdiocb: pointer to lpfc command iocb data structure.
6559 * @rspiocb: pointer to lpfc response iocb data structure.
6560 *
6561 * This routine is the callback function that is put to the fabric iocb's
6562 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
6563 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
6564 * function first restores and invokes the original iocb's callback function
6565 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
6566 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
6567 **/
92d7f7b0
JS
6568static void
6569lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6570 struct lpfc_iocbq *rspiocb)
6571{
6572 struct ls_rjt stat;
6573
6574 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
6575 BUG();
6576
6577 switch (rspiocb->iocb.ulpStatus) {
6578 case IOSTAT_NPORT_RJT:
6579 case IOSTAT_FABRIC_RJT:
6580 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
6581 lpfc_block_fabric_iocbs(phba);
ed957684 6582 }
92d7f7b0
JS
6583 break;
6584
6585 case IOSTAT_NPORT_BSY:
6586 case IOSTAT_FABRIC_BSY:
6587 lpfc_block_fabric_iocbs(phba);
6588 break;
6589
6590 case IOSTAT_LS_RJT:
6591 stat.un.lsRjtError =
6592 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
6593 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
6594 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
6595 lpfc_block_fabric_iocbs(phba);
6596 break;
6597 }
6598
6599 if (atomic_read(&phba->fabric_iocb_count) == 0)
6600 BUG();
6601
6602 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
6603 cmdiocb->fabric_iocb_cmpl = NULL;
6604 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
6605 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
6606
6607 atomic_dec(&phba->fabric_iocb_count);
6608 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7f5f3d0d
JS
6609 /* Post any pending iocbs to HBA */
6610 lpfc_resume_fabric_iocbs(phba);
92d7f7b0
JS
6611 }
6612}
6613
e59058c4 6614/**
3621a710 6615 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
e59058c4
JS
6616 * @phba: pointer to lpfc hba data structure.
6617 * @iocb: pointer to lpfc command iocb data structure.
6618 *
6619 * This routine is used as the top-level API for issuing a fabric iocb command
6620 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
6621 * function makes sure that only one fabric bound iocb will be outstanding at
6622 * any given time. As such, this function will first check to see whether there
6623 * is already an outstanding fabric iocb on the wire. If so, it will put the
6624 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
6625 * issued later. Otherwise, it will issue the iocb on the wire and update the
6626 * fabric iocb count it indicate that there is one fabric iocb on the wire.
6627 *
6628 * Note, this implementation has a potential sending out fabric IOCBs out of
6629 * order. The problem is caused by the construction of the "ready" boolen does
6630 * not include the condition that the internal fabric IOCB list is empty. As
6631 * such, it is possible a fabric IOCB issued by this routine might be "jump"
6632 * ahead of the fabric IOCBs in the internal list.
6633 *
6634 * Return code
6635 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
6636 * IOCB_ERROR - failed to issue fabric iocb
6637 **/
a6ababd2 6638static int
92d7f7b0
JS
6639lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6640{
6641 unsigned long iflags;
92d7f7b0
JS
6642 int ready;
6643 int ret;
6644
6645 if (atomic_read(&phba->fabric_iocb_count) > 1)
6646 BUG();
6647
6648 spin_lock_irqsave(&phba->hbalock, iflags);
6649 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
6650 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6651
7f5f3d0d
JS
6652 if (ready)
6653 /* Increment fabric iocb count to hold the position */
6654 atomic_inc(&phba->fabric_iocb_count);
92d7f7b0
JS
6655 spin_unlock_irqrestore(&phba->hbalock, iflags);
6656 if (ready) {
6657 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6658 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6659 iocb->iocb_flag |= LPFC_IO_FABRIC;
6660
858c9f6c
JS
6661 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6662 "Fabric sched2: ste:x%x",
6663 iocb->vport->port_state, 0, 0);
6664
3772a991 6665 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
92d7f7b0
JS
6666
6667 if (ret == IOCB_ERROR) {
6668 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6669 iocb->fabric_iocb_cmpl = NULL;
6670 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6671 atomic_dec(&phba->fabric_iocb_count);
6672 }
6673 } else {
6674 spin_lock_irqsave(&phba->hbalock, iflags);
6675 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
6676 spin_unlock_irqrestore(&phba->hbalock, iflags);
6677 ret = IOCB_SUCCESS;
6678 }
6679 return ret;
6680}
6681
e59058c4 6682/**
3621a710 6683 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
e59058c4
JS
6684 * @vport: pointer to a virtual N_Port data structure.
6685 *
6686 * This routine aborts all the IOCBs associated with a @vport from the
6687 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6688 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6689 * list, removes each IOCB associated with the @vport off the list, set the
6690 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6691 * associated with the IOCB.
6692 **/
a6ababd2 6693static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
92d7f7b0
JS
6694{
6695 LIST_HEAD(completions);
6696 struct lpfc_hba *phba = vport->phba;
6697 struct lpfc_iocbq *tmp_iocb, *piocb;
92d7f7b0
JS
6698
6699 spin_lock_irq(&phba->hbalock);
6700 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6701 list) {
6702
6703 if (piocb->vport != vport)
6704 continue;
6705
6706 list_move_tail(&piocb->list, &completions);
6707 }
6708 spin_unlock_irq(&phba->hbalock);
6709
a257bf90
JS
6710 /* Cancel all the IOCBs from the completions list */
6711 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6712 IOERR_SLI_ABORTED);
92d7f7b0
JS
6713}
6714
e59058c4 6715/**
3621a710 6716 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
e59058c4
JS
6717 * @ndlp: pointer to a node-list data structure.
6718 *
6719 * This routine aborts all the IOCBs associated with an @ndlp from the
6720 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6721 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6722 * list, removes each IOCB associated with the @ndlp off the list, set the
6723 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6724 * associated with the IOCB.
6725 **/
92d7f7b0
JS
6726void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6727{
6728 LIST_HEAD(completions);
a257bf90 6729 struct lpfc_hba *phba = ndlp->phba;
92d7f7b0
JS
6730 struct lpfc_iocbq *tmp_iocb, *piocb;
6731 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
92d7f7b0
JS
6732
6733 spin_lock_irq(&phba->hbalock);
6734 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6735 list) {
6736 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
6737
6738 list_move_tail(&piocb->list, &completions);
ed957684 6739 }
dea3101e 6740 }
92d7f7b0
JS
6741 spin_unlock_irq(&phba->hbalock);
6742
a257bf90
JS
6743 /* Cancel all the IOCBs from the completions list */
6744 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6745 IOERR_SLI_ABORTED);
92d7f7b0
JS
6746}
6747
e59058c4 6748/**
3621a710 6749 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
e59058c4
JS
6750 * @phba: pointer to lpfc hba data structure.
6751 *
6752 * This routine aborts all the IOCBs currently on the driver internal
6753 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
6754 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
6755 * list, removes IOCBs off the list, set the status feild to
6756 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
6757 * the IOCB.
6758 **/
92d7f7b0
JS
6759void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6760{
6761 LIST_HEAD(completions);
92d7f7b0
JS
6762
6763 spin_lock_irq(&phba->hbalock);
6764 list_splice_init(&phba->fabric_iocb_list, &completions);
6765 spin_unlock_irq(&phba->hbalock);
6766
a257bf90
JS
6767 /* Cancel all the IOCBs from the completions list */
6768 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6769 IOERR_SLI_ABORTED);
dea3101e 6770}
6fb120a7
JS
6771
6772/**
6773 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6774 * @phba: pointer to lpfc hba data structure.
6775 * @axri: pointer to the els xri abort wcqe structure.
6776 *
6777 * This routine is invoked by the worker thread to process a SLI4 slow-path
6778 * ELS aborted xri.
6779 **/
6780void
6781lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6782 struct sli4_wcqe_xri_aborted *axri)
6783{
6784 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6785 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6786 unsigned long iflag = 0;
6787
6788 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6789 list_for_each_entry_safe(sglq_entry, sglq_next,
6790 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6791 if (sglq_entry->sli4_xritag == xri) {
6792 list_del(&sglq_entry->list);
6793 spin_unlock_irqrestore(
6794 &phba->sli4_hba.abts_sgl_list_lock,
6795 iflag);
6796 spin_lock_irqsave(&phba->hbalock, iflag);
6797
6798 list_add_tail(&sglq_entry->list,
6799 &phba->sli4_hba.lpfc_sgl_list);
6800 spin_unlock_irqrestore(&phba->hbalock, iflag);
6801 return;
6802 }
6803 }
6804 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6805}
This page took 0.935865 seconds and 5 git commands to generate.