[SCSI] lpfc 8.2.2 : Rework the lpfc_printf_log() macro
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
41
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 };
58
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60
61 void
62 lpfc_terminate_rport_io(struct fc_rport *rport)
63 {
64 struct lpfc_rport_data *rdata;
65 struct lpfc_nodelist * ndlp;
66 struct lpfc_hba *phba;
67
68 rdata = rport->dd_data;
69 ndlp = rdata->pnode;
70
71 if (!ndlp) {
72 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73 printk(KERN_ERR "Cannot find remote node"
74 " to terminate I/O Data x%x\n",
75 rport->port_id);
76 return;
77 }
78
79 phba = ndlp->vport->phba;
80
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
84
85 if (ndlp->nlp_sid != NLP_NO_SID) {
86 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
87 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88 }
89
90 return;
91 }
92
93 /*
94 * This function will be called when dev_loss_tmo fire.
95 */
96 void
97 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
98 {
99 struct lpfc_rport_data *rdata;
100 struct lpfc_nodelist * ndlp;
101 struct lpfc_vport *vport;
102 struct lpfc_hba *phba;
103 struct completion devloss_compl;
104 struct lpfc_work_evt *evtp;
105
106 rdata = rport->dd_data;
107 ndlp = rdata->pnode;
108
109 if (!ndlp) {
110 if (rport->scsi_target_id != -1) {
111 printk(KERN_ERR "Cannot find remote node"
112 " for rport in dev_loss_tmo_callbk x%x\n",
113 rport->port_id);
114 }
115 return;
116 }
117
118 vport = ndlp->vport;
119 phba = vport->phba;
120
121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
122 "rport devlosscb: sid:x%x did:x%x flg:x%x",
123 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
124
125 init_completion(&devloss_compl);
126 evtp = &ndlp->dev_loss_evt;
127
128 if (!list_empty(&evtp->evt_listp))
129 return;
130
131 spin_lock_irq(&phba->hbalock);
132 evtp->evt_arg1 = ndlp;
133 evtp->evt_arg2 = &devloss_compl;
134 evtp->evt = LPFC_EVT_DEV_LOSS;
135 list_add_tail(&evtp->evt_listp, &phba->work_list);
136 if (phba->work_wait)
137 wake_up(phba->work_wait);
138
139 spin_unlock_irq(&phba->hbalock);
140
141 wait_for_completion(&devloss_compl);
142
143 return;
144 }
145
146 /*
147 * This function is called from the worker thread when dev_loss_tmo
148 * expire.
149 */
150 void
151 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
152 {
153 struct lpfc_rport_data *rdata;
154 struct fc_rport *rport;
155 struct lpfc_vport *vport;
156 struct lpfc_hba *phba;
157 uint8_t *name;
158 int warn_on = 0;
159
160 rport = ndlp->rport;
161
162 if (!rport)
163 return;
164
165 rdata = rport->dd_data;
166 name = (uint8_t *) &ndlp->nlp_portname;
167 vport = ndlp->vport;
168 phba = vport->phba;
169
170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
171 "rport devlosstmo:did:x%x type:x%x id:x%x",
172 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
173
174 if (!(vport->load_flag & FC_UNLOADING) &&
175 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
176 return;
177
178 if (ndlp->nlp_type & NLP_FABRIC) {
179 int put_node;
180 int put_rport;
181
182 /* We will clean up these Nodes in linkup */
183 put_node = rdata->pnode != NULL;
184 put_rport = ndlp->rport != NULL;
185 rdata->pnode = NULL;
186 ndlp->rport = NULL;
187 if (put_node)
188 lpfc_nlp_put(ndlp);
189 if (put_rport)
190 put_device(&rport->dev);
191 return;
192 }
193
194 if (ndlp->nlp_sid != NLP_NO_SID) {
195 warn_on = 1;
196 /* flush the target */
197 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
198 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
199 }
200 if (vport->load_flag & FC_UNLOADING)
201 warn_on = 0;
202
203 if (warn_on) {
204 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
205 "0203 Devloss timeout on "
206 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
207 "NPort x%x Data: x%x x%x x%x\n",
208 *name, *(name+1), *(name+2), *(name+3),
209 *(name+4), *(name+5), *(name+6), *(name+7),
210 ndlp->nlp_DID, ndlp->nlp_flag,
211 ndlp->nlp_state, ndlp->nlp_rpi);
212 } else {
213 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
214 "0204 Devloss timeout on "
215 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
216 "NPort x%x Data: x%x x%x x%x\n",
217 *name, *(name+1), *(name+2), *(name+3),
218 *(name+4), *(name+5), *(name+6), *(name+7),
219 ndlp->nlp_DID, ndlp->nlp_flag,
220 ndlp->nlp_state, ndlp->nlp_rpi);
221 }
222
223 if (!(vport->load_flag & FC_UNLOADING) &&
224 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
225 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
226 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
227 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
228 else {
229 int put_node;
230 int put_rport;
231
232 put_node = rdata->pnode != NULL;
233 put_rport = ndlp->rport != NULL;
234 rdata->pnode = NULL;
235 ndlp->rport = NULL;
236 if (put_node)
237 lpfc_nlp_put(ndlp);
238 if (put_rport)
239 put_device(&rport->dev);
240 }
241 }
242
243
244 void
245 lpfc_worker_wake_up(struct lpfc_hba *phba)
246 {
247 wake_up(phba->work_wait);
248 return;
249 }
250
251 static void
252 lpfc_work_list_done(struct lpfc_hba *phba)
253 {
254 struct lpfc_work_evt *evtp = NULL;
255 struct lpfc_nodelist *ndlp;
256 struct lpfc_vport *vport;
257 int free_evt;
258
259 spin_lock_irq(&phba->hbalock);
260 while (!list_empty(&phba->work_list)) {
261 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
262 evt_listp);
263 spin_unlock_irq(&phba->hbalock);
264 free_evt = 1;
265 switch (evtp->evt) {
266 case LPFC_EVT_DEV_LOSS_DELAY:
267 free_evt = 0; /* evt is part of ndlp */
268 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
269 vport = ndlp->vport;
270 if (!vport)
271 break;
272
273 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
274 "rport devlossdly:did:x%x flg:x%x",
275 ndlp->nlp_DID, ndlp->nlp_flag, 0);
276
277 if (!(vport->load_flag & FC_UNLOADING) &&
278 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
279 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
280 lpfc_disc_state_machine(vport, ndlp, NULL,
281 NLP_EVT_DEVICE_RM);
282 }
283 break;
284 case LPFC_EVT_ELS_RETRY:
285 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
286 lpfc_els_retry_delay_handler(ndlp);
287 free_evt = 0; /* evt is part of ndlp */
288 break;
289 case LPFC_EVT_DEV_LOSS:
290 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
291 lpfc_nlp_get(ndlp);
292 lpfc_dev_loss_tmo_handler(ndlp);
293 free_evt = 0;
294 complete((struct completion *)(evtp->evt_arg2));
295 lpfc_nlp_put(ndlp);
296 break;
297 case LPFC_EVT_ONLINE:
298 if (phba->link_state < LPFC_LINK_DOWN)
299 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
300 else
301 *(int *) (evtp->evt_arg1) = 0;
302 complete((struct completion *)(evtp->evt_arg2));
303 break;
304 case LPFC_EVT_OFFLINE_PREP:
305 if (phba->link_state >= LPFC_LINK_DOWN)
306 lpfc_offline_prep(phba);
307 *(int *)(evtp->evt_arg1) = 0;
308 complete((struct completion *)(evtp->evt_arg2));
309 break;
310 case LPFC_EVT_OFFLINE:
311 lpfc_offline(phba);
312 lpfc_sli_brdrestart(phba);
313 *(int *)(evtp->evt_arg1) =
314 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
315 lpfc_unblock_mgmt_io(phba);
316 complete((struct completion *)(evtp->evt_arg2));
317 break;
318 case LPFC_EVT_WARM_START:
319 lpfc_offline(phba);
320 lpfc_reset_barrier(phba);
321 lpfc_sli_brdreset(phba);
322 lpfc_hba_down_post(phba);
323 *(int *)(evtp->evt_arg1) =
324 lpfc_sli_brdready(phba, HS_MBRDY);
325 lpfc_unblock_mgmt_io(phba);
326 complete((struct completion *)(evtp->evt_arg2));
327 break;
328 case LPFC_EVT_KILL:
329 lpfc_offline(phba);
330 *(int *)(evtp->evt_arg1)
331 = (phba->pport->stopped)
332 ? 0 : lpfc_sli_brdkill(phba);
333 lpfc_unblock_mgmt_io(phba);
334 complete((struct completion *)(evtp->evt_arg2));
335 break;
336 }
337 if (free_evt)
338 kfree(evtp);
339 spin_lock_irq(&phba->hbalock);
340 }
341 spin_unlock_irq(&phba->hbalock);
342
343 }
344
345 void
346 lpfc_work_done(struct lpfc_hba *phba)
347 {
348 struct lpfc_sli_ring *pring;
349 uint32_t ha_copy, status, control, work_port_events;
350 struct lpfc_vport **vports;
351 int i;
352
353 spin_lock_irq(&phba->hbalock);
354 ha_copy = phba->work_ha;
355 phba->work_ha = 0;
356 spin_unlock_irq(&phba->hbalock);
357
358 if (ha_copy & HA_ERATT)
359 lpfc_handle_eratt(phba);
360
361 if (ha_copy & HA_MBATT)
362 lpfc_sli_handle_mb_event(phba);
363
364 if (ha_copy & HA_LATT)
365 lpfc_handle_latt(phba);
366 vports = lpfc_create_vport_work_array(phba);
367 if (vports != NULL)
368 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
369 work_port_events = vports[i]->work_port_events;
370 if (work_port_events & WORKER_DISC_TMO)
371 lpfc_disc_timeout_handler(vports[i]);
372 if (work_port_events & WORKER_ELS_TMO)
373 lpfc_els_timeout_handler(vports[i]);
374 if (work_port_events & WORKER_HB_TMO)
375 lpfc_hb_timeout_handler(phba);
376 if (work_port_events & WORKER_MBOX_TMO)
377 lpfc_mbox_timeout_handler(phba);
378 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
379 lpfc_unblock_fabric_iocbs(phba);
380 if (work_port_events & WORKER_FDMI_TMO)
381 lpfc_fdmi_timeout_handler(vports[i]);
382 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
383 lpfc_ramp_down_queue_handler(phba);
384 if (work_port_events & WORKER_RAMP_UP_QUEUE)
385 lpfc_ramp_up_queue_handler(phba);
386 spin_lock_irq(&vports[i]->work_port_lock);
387 vports[i]->work_port_events &= ~work_port_events;
388 spin_unlock_irq(&vports[i]->work_port_lock);
389 }
390 lpfc_destroy_vport_work_array(vports);
391
392 pring = &phba->sli.ring[LPFC_ELS_RING];
393 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
394 status >>= (4*LPFC_ELS_RING);
395 if ((status & HA_RXMASK)
396 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
397 if (pring->flag & LPFC_STOP_IOCB_MASK) {
398 pring->flag |= LPFC_DEFERRED_RING_EVENT;
399 } else {
400 lpfc_sli_handle_slow_ring_event(phba, pring,
401 (status &
402 HA_RXMASK));
403 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
404 }
405 /*
406 * Turn on Ring interrupts
407 */
408 spin_lock_irq(&phba->hbalock);
409 control = readl(phba->HCregaddr);
410 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
411 lpfc_debugfs_slow_ring_trc(phba,
412 "WRK Enable ring: cntl:x%x hacopy:x%x",
413 control, ha_copy, 0);
414
415 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
416 writel(control, phba->HCregaddr);
417 readl(phba->HCregaddr); /* flush */
418 }
419 else {
420 lpfc_debugfs_slow_ring_trc(phba,
421 "WRK Ring ok: cntl:x%x hacopy:x%x",
422 control, ha_copy, 0);
423 }
424 spin_unlock_irq(&phba->hbalock);
425 }
426 lpfc_work_list_done(phba);
427 }
428
429 static int
430 check_work_wait_done(struct lpfc_hba *phba)
431 {
432 struct lpfc_vport *vport;
433 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
434 int rc = 0;
435
436 spin_lock_irq(&phba->hbalock);
437 list_for_each_entry(vport, &phba->port_list, listentry) {
438 if (vport->work_port_events) {
439 rc = 1;
440 break;
441 }
442 }
443 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
444 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
445 rc = 1;
446 phba->work_found++;
447 } else
448 phba->work_found = 0;
449 spin_unlock_irq(&phba->hbalock);
450 return rc;
451 }
452
453
454 int
455 lpfc_do_work(void *p)
456 {
457 struct lpfc_hba *phba = p;
458 int rc;
459 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
460
461 set_user_nice(current, -20);
462 phba->work_wait = &work_waitq;
463 phba->work_found = 0;
464
465 while (1) {
466
467 rc = wait_event_interruptible(work_waitq,
468 check_work_wait_done(phba));
469
470 BUG_ON(rc);
471
472 if (kthread_should_stop())
473 break;
474
475 lpfc_work_done(phba);
476
477 /* If there is alot of slow ring work, like during link up
478 * check_work_wait_done() may cause this thread to not give
479 * up the CPU for very long periods of time. This may cause
480 * soft lockups or other problems. To avoid these situations
481 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
482 * consecutive iterations.
483 */
484 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
485 phba->work_found = 0;
486 schedule();
487 }
488 }
489 phba->work_wait = NULL;
490 return 0;
491 }
492
493 /*
494 * This is only called to handle FC worker events. Since this a rare
495 * occurance, we allocate a struct lpfc_work_evt structure here instead of
496 * embedding it in the IOCB.
497 */
498 int
499 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
500 uint32_t evt)
501 {
502 struct lpfc_work_evt *evtp;
503 unsigned long flags;
504
505 /*
506 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
507 * be queued to worker thread for processing
508 */
509 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
510 if (!evtp)
511 return 0;
512
513 evtp->evt_arg1 = arg1;
514 evtp->evt_arg2 = arg2;
515 evtp->evt = evt;
516
517 spin_lock_irqsave(&phba->hbalock, flags);
518 list_add_tail(&evtp->evt_listp, &phba->work_list);
519 if (phba->work_wait)
520 lpfc_worker_wake_up(phba);
521 spin_unlock_irqrestore(&phba->hbalock, flags);
522
523 return 1;
524 }
525
526 void
527 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
528 {
529 struct lpfc_hba *phba = vport->phba;
530 struct lpfc_nodelist *ndlp, *next_ndlp;
531 int rc;
532
533 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
534 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
535 continue;
536
537 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
538 lpfc_unreg_rpi(vport, ndlp);
539
540 /* Leave Fabric nodes alone on link down */
541 if (!remove && ndlp->nlp_type & NLP_FABRIC)
542 continue;
543 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
544 remove
545 ? NLP_EVT_DEVICE_RM
546 : NLP_EVT_DEVICE_RECOVERY);
547 }
548 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
549 lpfc_mbx_unreg_vpi(vport);
550 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
551 }
552 }
553
554 static void
555 lpfc_linkdown_port(struct lpfc_vport *vport)
556 {
557 struct lpfc_nodelist *ndlp, *next_ndlp;
558 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
559
560 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
561
562 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
563 "Link Down: state:x%x rtry:x%x flg:x%x",
564 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
565
566 /* Cleanup any outstanding RSCN activity */
567 lpfc_els_flush_rscn(vport);
568
569 /* Cleanup any outstanding ELS commands */
570 lpfc_els_flush_cmd(vport);
571
572 lpfc_cleanup_rpis(vport, 0);
573
574 /* free any ndlp's on unused list */
575 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
576 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
577 lpfc_drop_node(vport, ndlp);
578
579 /* Turn off discovery timer if its running */
580 lpfc_can_disctmo(vport);
581 }
582
583 int
584 lpfc_linkdown(struct lpfc_hba *phba)
585 {
586 struct lpfc_vport *vport = phba->pport;
587 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
588 struct lpfc_vport **vports;
589 LPFC_MBOXQ_t *mb;
590 int i;
591
592 if (phba->link_state == LPFC_LINK_DOWN) {
593 return 0;
594 }
595 spin_lock_irq(&phba->hbalock);
596 if (phba->link_state > LPFC_LINK_DOWN) {
597 phba->link_state = LPFC_LINK_DOWN;
598 phba->pport->fc_flag &= ~FC_LBIT;
599 }
600 spin_unlock_irq(&phba->hbalock);
601 vports = lpfc_create_vport_work_array(phba);
602 if (vports != NULL)
603 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
604 /* Issue a LINK DOWN event to all nodes */
605 lpfc_linkdown_port(vports[i]);
606 }
607 lpfc_destroy_vport_work_array(vports);
608 /* Clean up any firmware default rpi's */
609 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
610 if (mb) {
611 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
612 mb->vport = vport;
613 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
614 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
615 == MBX_NOT_FINISHED) {
616 mempool_free(mb, phba->mbox_mem_pool);
617 }
618 }
619
620 /* Setup myDID for link up if we are in pt2pt mode */
621 if (phba->pport->fc_flag & FC_PT2PT) {
622 phba->pport->fc_myDID = 0;
623 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
624 if (mb) {
625 lpfc_config_link(phba, mb);
626 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
627 mb->vport = vport;
628 if (lpfc_sli_issue_mbox(phba, mb,
629 (MBX_NOWAIT | MBX_STOP_IOCB))
630 == MBX_NOT_FINISHED) {
631 mempool_free(mb, phba->mbox_mem_pool);
632 }
633 }
634 spin_lock_irq(shost->host_lock);
635 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
636 spin_unlock_irq(shost->host_lock);
637 }
638
639 return 0;
640 }
641
642 static void
643 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
644 {
645 struct lpfc_nodelist *ndlp;
646
647 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
648 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
649 continue;
650
651 if (ndlp->nlp_type & NLP_FABRIC) {
652 /* On Linkup its safe to clean up the ndlp
653 * from Fabric connections.
654 */
655 if (ndlp->nlp_DID != Fabric_DID)
656 lpfc_unreg_rpi(vport, ndlp);
657 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
658 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
659 /* Fail outstanding IO now since device is
660 * marked for PLOGI.
661 */
662 lpfc_unreg_rpi(vport, ndlp);
663 }
664 }
665 }
666
667 static void
668 lpfc_linkup_port(struct lpfc_vport *vport)
669 {
670 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
671 struct lpfc_nodelist *ndlp, *next_ndlp;
672 struct lpfc_hba *phba = vport->phba;
673
674 if ((vport->load_flag & FC_UNLOADING) != 0)
675 return;
676
677 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
678 "Link Up: top:x%x speed:x%x flg:x%x",
679 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
680
681 /* If NPIV is not enabled, only bring the physical port up */
682 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
683 (vport != phba->pport))
684 return;
685
686 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
687
688 spin_lock_irq(shost->host_lock);
689 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
690 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
691 vport->fc_flag |= FC_NDISC_ACTIVE;
692 vport->fc_ns_retry = 0;
693 spin_unlock_irq(shost->host_lock);
694
695 if (vport->fc_flag & FC_LBIT)
696 lpfc_linkup_cleanup_nodes(vport);
697
698 /* free any ndlp's in unused state */
699 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
700 nlp_listp)
701 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
702 lpfc_drop_node(vport, ndlp);
703 }
704
705 static int
706 lpfc_linkup(struct lpfc_hba *phba)
707 {
708 struct lpfc_vport **vports;
709 int i;
710
711 phba->link_state = LPFC_LINK_UP;
712
713 /* Unblock fabric iocbs if they are blocked */
714 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
715 del_timer_sync(&phba->fabric_block_timer);
716
717 vports = lpfc_create_vport_work_array(phba);
718 if (vports != NULL)
719 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
720 lpfc_linkup_port(vports[i]);
721 lpfc_destroy_vport_work_array(vports);
722 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
723 lpfc_issue_clear_la(phba, phba->pport);
724
725 return 0;
726 }
727
728 /*
729 * This routine handles processing a CLEAR_LA mailbox
730 * command upon completion. It is setup in the LPFC_MBOXQ
731 * as the completion routine when the command is
732 * handed off to the SLI layer.
733 */
734 void
735 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
736 {
737 struct lpfc_vport *vport = pmb->vport;
738 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
739 struct lpfc_sli *psli = &phba->sli;
740 MAILBOX_t *mb = &pmb->mb;
741 uint32_t control;
742
743 /* Since we don't do discovery right now, turn these off here */
744 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
745 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
746 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
747
748 /* Check for error */
749 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
750 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
751 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
752 "0320 CLEAR_LA mbxStatus error x%x hba "
753 "state x%x\n",
754 mb->mbxStatus, vport->port_state);
755 phba->link_state = LPFC_HBA_ERROR;
756 goto out;
757 }
758
759 if (vport->port_type == LPFC_PHYSICAL_PORT)
760 phba->link_state = LPFC_HBA_READY;
761
762 spin_lock_irq(&phba->hbalock);
763 psli->sli_flag |= LPFC_PROCESS_LA;
764 control = readl(phba->HCregaddr);
765 control |= HC_LAINT_ENA;
766 writel(control, phba->HCregaddr);
767 readl(phba->HCregaddr); /* flush */
768 spin_unlock_irq(&phba->hbalock);
769 return;
770
771 vport->num_disc_nodes = 0;
772 /* go thru NPR nodes and issue ELS PLOGIs */
773 if (vport->fc_npr_cnt)
774 lpfc_els_disc_plogi(vport);
775
776 if (!vport->num_disc_nodes) {
777 spin_lock_irq(shost->host_lock);
778 vport->fc_flag &= ~FC_NDISC_ACTIVE;
779 spin_unlock_irq(shost->host_lock);
780 }
781
782 vport->port_state = LPFC_VPORT_READY;
783
784 out:
785 /* Device Discovery completes */
786 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
787 "0225 Device Discovery completes\n");
788 mempool_free(pmb, phba->mbox_mem_pool);
789
790 spin_lock_irq(shost->host_lock);
791 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
792 spin_unlock_irq(shost->host_lock);
793
794 del_timer_sync(&phba->fc_estabtmo);
795
796 lpfc_can_disctmo(vport);
797
798 /* turn on Link Attention interrupts */
799
800 spin_lock_irq(&phba->hbalock);
801 psli->sli_flag |= LPFC_PROCESS_LA;
802 control = readl(phba->HCregaddr);
803 control |= HC_LAINT_ENA;
804 writel(control, phba->HCregaddr);
805 readl(phba->HCregaddr); /* flush */
806 spin_unlock_irq(&phba->hbalock);
807
808 return;
809 }
810
811
812 static void
813 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
814 {
815 struct lpfc_vport *vport = pmb->vport;
816
817 if (pmb->mb.mbxStatus)
818 goto out;
819
820 mempool_free(pmb, phba->mbox_mem_pool);
821
822 if (phba->fc_topology == TOPOLOGY_LOOP &&
823 vport->fc_flag & FC_PUBLIC_LOOP &&
824 !(vport->fc_flag & FC_LBIT)) {
825 /* Need to wait for FAN - use discovery timer
826 * for timeout. port_state is identically
827 * LPFC_LOCAL_CFG_LINK while waiting for FAN
828 */
829 lpfc_set_disctmo(vport);
830 return;
831 }
832
833 /* Start discovery by sending a FLOGI. port_state is identically
834 * LPFC_FLOGI while waiting for FLOGI cmpl
835 */
836 if (vport->port_state != LPFC_FLOGI) {
837 vport->port_state = LPFC_FLOGI;
838 lpfc_set_disctmo(vport);
839 lpfc_initial_flogi(vport);
840 }
841 return;
842
843 out:
844 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
845 "0306 CONFIG_LINK mbxStatus error x%x "
846 "HBA state x%x\n",
847 pmb->mb.mbxStatus, vport->port_state);
848 mempool_free(pmb, phba->mbox_mem_pool);
849
850 lpfc_linkdown(phba);
851
852 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
853 "0200 CONFIG_LINK bad hba state x%x\n",
854 vport->port_state);
855
856 lpfc_issue_clear_la(phba, vport);
857 return;
858 }
859
860 static void
861 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
862 {
863 MAILBOX_t *mb = &pmb->mb;
864 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
865 struct lpfc_vport *vport = pmb->vport;
866
867
868 /* Check for error */
869 if (mb->mbxStatus) {
870 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
871 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
872 "0319 READ_SPARAM mbxStatus error x%x "
873 "hba state x%x>\n",
874 mb->mbxStatus, vport->port_state);
875 lpfc_linkdown(phba);
876 goto out;
877 }
878
879 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
880 sizeof (struct serv_parm));
881 if (phba->cfg_soft_wwnn)
882 u64_to_wwn(phba->cfg_soft_wwnn,
883 vport->fc_sparam.nodeName.u.wwn);
884 if (phba->cfg_soft_wwpn)
885 u64_to_wwn(phba->cfg_soft_wwpn,
886 vport->fc_sparam.portName.u.wwn);
887 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
888 sizeof(vport->fc_nodename));
889 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
890 sizeof(vport->fc_portname));
891 if (vport->port_type == LPFC_PHYSICAL_PORT) {
892 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
893 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
894 }
895
896 lpfc_mbuf_free(phba, mp->virt, mp->phys);
897 kfree(mp);
898 mempool_free(pmb, phba->mbox_mem_pool);
899 return;
900
901 out:
902 pmb->context1 = NULL;
903 lpfc_mbuf_free(phba, mp->virt, mp->phys);
904 kfree(mp);
905 lpfc_issue_clear_la(phba, vport);
906 mempool_free(pmb, phba->mbox_mem_pool);
907 return;
908 }
909
910 static void
911 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
912 {
913 struct lpfc_vport *vport = phba->pport;
914 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
915 int i;
916 struct lpfc_dmabuf *mp;
917 int rc;
918
919 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
920 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
921
922 spin_lock_irq(&phba->hbalock);
923 switch (la->UlnkSpeed) {
924 case LA_1GHZ_LINK:
925 phba->fc_linkspeed = LA_1GHZ_LINK;
926 break;
927 case LA_2GHZ_LINK:
928 phba->fc_linkspeed = LA_2GHZ_LINK;
929 break;
930 case LA_4GHZ_LINK:
931 phba->fc_linkspeed = LA_4GHZ_LINK;
932 break;
933 case LA_8GHZ_LINK:
934 phba->fc_linkspeed = LA_8GHZ_LINK;
935 break;
936 default:
937 phba->fc_linkspeed = LA_UNKNW_LINK;
938 break;
939 }
940
941 phba->fc_topology = la->topology;
942 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
943
944 if (phba->fc_topology == TOPOLOGY_LOOP) {
945 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
946
947 /* Get Loop Map information */
948 if (la->il)
949 vport->fc_flag |= FC_LBIT;
950
951 vport->fc_myDID = la->granted_AL_PA;
952 i = la->un.lilpBde64.tus.f.bdeSize;
953
954 if (i == 0) {
955 phba->alpa_map[0] = 0;
956 } else {
957 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
958 int numalpa, j, k;
959 union {
960 uint8_t pamap[16];
961 struct {
962 uint32_t wd1;
963 uint32_t wd2;
964 uint32_t wd3;
965 uint32_t wd4;
966 } pa;
967 } un;
968 numalpa = phba->alpa_map[0];
969 j = 0;
970 while (j < numalpa) {
971 memset(un.pamap, 0, 16);
972 for (k = 1; j < numalpa; k++) {
973 un.pamap[k - 1] =
974 phba->alpa_map[j + 1];
975 j++;
976 if (k == 16)
977 break;
978 }
979 /* Link Up Event ALPA map */
980 lpfc_printf_log(phba,
981 KERN_WARNING,
982 LOG_LINK_EVENT,
983 "1304 Link Up Event "
984 "ALPA map Data: x%x "
985 "x%x x%x x%x\n",
986 un.pa.wd1, un.pa.wd2,
987 un.pa.wd3, un.pa.wd4);
988 }
989 }
990 }
991 } else {
992 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
993 if (phba->max_vpi && phba->cfg_npiv_enable &&
994 (phba->sli_rev == 3))
995 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
996 }
997 vport->fc_myDID = phba->fc_pref_DID;
998 vport->fc_flag |= FC_LBIT;
999 }
1000 spin_unlock_irq(&phba->hbalock);
1001
1002 lpfc_linkup(phba);
1003 if (sparam_mbox) {
1004 lpfc_read_sparam(phba, sparam_mbox, 0);
1005 sparam_mbox->vport = vport;
1006 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1007 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
1008 (MBX_NOWAIT | MBX_STOP_IOCB));
1009 if (rc == MBX_NOT_FINISHED) {
1010 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1011 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1012 kfree(mp);
1013 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1014 if (cfglink_mbox)
1015 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1016 goto out;
1017 }
1018 }
1019
1020 if (cfglink_mbox) {
1021 vport->port_state = LPFC_LOCAL_CFG_LINK;
1022 lpfc_config_link(phba, cfglink_mbox);
1023 cfglink_mbox->vport = vport;
1024 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1025 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
1026 (MBX_NOWAIT | MBX_STOP_IOCB));
1027 if (rc != MBX_NOT_FINISHED)
1028 return;
1029 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1030 }
1031 out:
1032 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1033 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1034 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1035 vport->port_state, sparam_mbox, cfglink_mbox);
1036 lpfc_issue_clear_la(phba, vport);
1037 return;
1038 }
1039
1040 static void
1041 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1042 {
1043 uint32_t control;
1044 struct lpfc_sli *psli = &phba->sli;
1045
1046 lpfc_linkdown(phba);
1047
1048 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1049 spin_lock_irq(&phba->hbalock);
1050 psli->sli_flag |= LPFC_PROCESS_LA;
1051 control = readl(phba->HCregaddr);
1052 control |= HC_LAINT_ENA;
1053 writel(control, phba->HCregaddr);
1054 readl(phba->HCregaddr); /* flush */
1055 spin_unlock_irq(&phba->hbalock);
1056 }
1057
1058 /*
1059 * This routine handles processing a READ_LA mailbox
1060 * command upon completion. It is setup in the LPFC_MBOXQ
1061 * as the completion routine when the command is
1062 * handed off to the SLI layer.
1063 */
1064 void
1065 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1066 {
1067 struct lpfc_vport *vport = pmb->vport;
1068 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1069 READ_LA_VAR *la;
1070 MAILBOX_t *mb = &pmb->mb;
1071 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1072
1073 /* Check for error */
1074 if (mb->mbxStatus) {
1075 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1076 "1307 READ_LA mbox error x%x state x%x\n",
1077 mb->mbxStatus, vport->port_state);
1078 lpfc_mbx_issue_link_down(phba);
1079 phba->link_state = LPFC_HBA_ERROR;
1080 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1081 }
1082
1083 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1084
1085 memcpy(&phba->alpa_map[0], mp->virt, 128);
1086
1087 spin_lock_irq(shost->host_lock);
1088 if (la->pb)
1089 vport->fc_flag |= FC_BYPASSED_MODE;
1090 else
1091 vport->fc_flag &= ~FC_BYPASSED_MODE;
1092 spin_unlock_irq(shost->host_lock);
1093
1094 if (((phba->fc_eventTag + 1) < la->eventTag) ||
1095 (phba->fc_eventTag == la->eventTag)) {
1096 phba->fc_stat.LinkMultiEvent++;
1097 if (la->attType == AT_LINK_UP)
1098 if (phba->fc_eventTag != 0)
1099 lpfc_linkdown(phba);
1100 }
1101
1102 phba->fc_eventTag = la->eventTag;
1103
1104 if (la->attType == AT_LINK_UP) {
1105 phba->fc_stat.LinkUp++;
1106 if (phba->link_flag & LS_LOOPBACK_MODE) {
1107 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1108 "1306 Link Up Event in loop back mode "
1109 "x%x received Data: x%x x%x x%x x%x\n",
1110 la->eventTag, phba->fc_eventTag,
1111 la->granted_AL_PA, la->UlnkSpeed,
1112 phba->alpa_map[0]);
1113 } else {
1114 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1115 "1303 Link Up Event x%x received "
1116 "Data: x%x x%x x%x x%x\n",
1117 la->eventTag, phba->fc_eventTag,
1118 la->granted_AL_PA, la->UlnkSpeed,
1119 phba->alpa_map[0]);
1120 }
1121 lpfc_mbx_process_link_up(phba, la);
1122 } else {
1123 phba->fc_stat.LinkDown++;
1124 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1125 "1305 Link Down Event x%x received "
1126 "Data: x%x x%x x%x\n",
1127 la->eventTag, phba->fc_eventTag,
1128 phba->pport->port_state, vport->fc_flag);
1129 lpfc_mbx_issue_link_down(phba);
1130 }
1131
1132 lpfc_mbx_cmpl_read_la_free_mbuf:
1133 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1134 kfree(mp);
1135 mempool_free(pmb, phba->mbox_mem_pool);
1136 return;
1137 }
1138
1139 /*
1140 * This routine handles processing a REG_LOGIN mailbox
1141 * command upon completion. It is setup in the LPFC_MBOXQ
1142 * as the completion routine when the command is
1143 * handed off to the SLI layer.
1144 */
1145 void
1146 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1147 {
1148 struct lpfc_vport *vport = pmb->vport;
1149 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1150 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1151
1152 pmb->context1 = NULL;
1153
1154 /* Good status, call state machine */
1155 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1156 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1157 kfree(mp);
1158 mempool_free(pmb, phba->mbox_mem_pool);
1159 lpfc_nlp_put(ndlp);
1160
1161 return;
1162 }
1163
1164 static void
1165 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1166 {
1167 MAILBOX_t *mb = &pmb->mb;
1168 struct lpfc_vport *vport = pmb->vport;
1169 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1170
1171 switch (mb->mbxStatus) {
1172 case 0x0011:
1173 case 0x0020:
1174 case 0x9700:
1175 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1176 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1177 mb->mbxStatus);
1178 break;
1179 }
1180 vport->unreg_vpi_cmpl = VPORT_OK;
1181 mempool_free(pmb, phba->mbox_mem_pool);
1182 /*
1183 * This shost reference might have been taken at the beginning of
1184 * lpfc_vport_delete()
1185 */
1186 if (vport->load_flag & FC_UNLOADING)
1187 scsi_host_put(shost);
1188 }
1189
1190 void
1191 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1192 {
1193 struct lpfc_hba *phba = vport->phba;
1194 LPFC_MBOXQ_t *mbox;
1195 int rc;
1196
1197 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1198 if (!mbox)
1199 return;
1200
1201 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1202 mbox->vport = vport;
1203 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1204 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1205 if (rc == MBX_NOT_FINISHED) {
1206 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1207 "1800 Could not issue unreg_vpi\n");
1208 mempool_free(mbox, phba->mbox_mem_pool);
1209 vport->unreg_vpi_cmpl = VPORT_ERROR;
1210 }
1211 }
1212
1213 static void
1214 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1215 {
1216 struct lpfc_vport *vport = pmb->vport;
1217 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1218 MAILBOX_t *mb = &pmb->mb;
1219
1220 switch (mb->mbxStatus) {
1221 case 0x0011:
1222 case 0x9601:
1223 case 0x9602:
1224 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1225 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1226 mb->mbxStatus);
1227 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1228 spin_lock_irq(shost->host_lock);
1229 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1230 spin_unlock_irq(shost->host_lock);
1231 vport->fc_myDID = 0;
1232 goto out;
1233 }
1234
1235 vport->num_disc_nodes = 0;
1236 /* go thru NPR list and issue ELS PLOGIs */
1237 if (vport->fc_npr_cnt)
1238 lpfc_els_disc_plogi(vport);
1239
1240 if (!vport->num_disc_nodes) {
1241 spin_lock_irq(shost->host_lock);
1242 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1243 spin_unlock_irq(shost->host_lock);
1244 lpfc_can_disctmo(vport);
1245 }
1246 vport->port_state = LPFC_VPORT_READY;
1247
1248 out:
1249 mempool_free(pmb, phba->mbox_mem_pool);
1250 return;
1251 }
1252
1253 /*
1254 * This routine handles processing a Fabric REG_LOGIN mailbox
1255 * command upon completion. It is setup in the LPFC_MBOXQ
1256 * as the completion routine when the command is
1257 * handed off to the SLI layer.
1258 */
1259 void
1260 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1261 {
1262 struct lpfc_vport *vport = pmb->vport;
1263 MAILBOX_t *mb = &pmb->mb;
1264 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1265 struct lpfc_nodelist *ndlp;
1266 struct lpfc_vport **vports;
1267 int i;
1268
1269 ndlp = (struct lpfc_nodelist *) pmb->context2;
1270 pmb->context1 = NULL;
1271 pmb->context2 = NULL;
1272 if (mb->mbxStatus) {
1273 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1274 kfree(mp);
1275 mempool_free(pmb, phba->mbox_mem_pool);
1276 lpfc_nlp_put(ndlp);
1277
1278 if (phba->fc_topology == TOPOLOGY_LOOP) {
1279 /* FLOGI failed, use loop map to make discovery list */
1280 lpfc_disc_list_loopmap(vport);
1281
1282 /* Start discovery */
1283 lpfc_disc_start(vport);
1284 return;
1285 }
1286
1287 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1288 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1289 "0258 Register Fabric login error: 0x%x\n",
1290 mb->mbxStatus);
1291 return;
1292 }
1293
1294 ndlp->nlp_rpi = mb->un.varWords[0];
1295 ndlp->nlp_type |= NLP_FABRIC;
1296 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1297
1298 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1299
1300 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1301 vports = lpfc_create_vport_work_array(phba);
1302 if (vports != NULL)
1303 for(i = 0;
1304 i < LPFC_MAX_VPORTS && vports[i] != NULL;
1305 i++) {
1306 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1307 continue;
1308 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1309 lpfc_initial_fdisc(vports[i]);
1310 else if (phba->sli3_options &
1311 LPFC_SLI3_NPIV_ENABLED) {
1312 lpfc_vport_set_state(vports[i],
1313 FC_VPORT_NO_FABRIC_SUPP);
1314 lpfc_printf_vlog(vport, KERN_ERR,
1315 LOG_ELS,
1316 "0259 No NPIV "
1317 "Fabric support\n");
1318 }
1319 }
1320 lpfc_destroy_vport_work_array(vports);
1321 lpfc_do_scr_ns_plogi(phba, vport);
1322 }
1323
1324 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1325 kfree(mp);
1326 mempool_free(pmb, phba->mbox_mem_pool);
1327 return;
1328 }
1329
1330 /*
1331 * This routine handles processing a NameServer REG_LOGIN mailbox
1332 * command upon completion. It is setup in the LPFC_MBOXQ
1333 * as the completion routine when the command is
1334 * handed off to the SLI layer.
1335 */
1336 void
1337 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1338 {
1339 MAILBOX_t *mb = &pmb->mb;
1340 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1341 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1342 struct lpfc_vport *vport = pmb->vport;
1343
1344 if (mb->mbxStatus) {
1345 out:
1346 lpfc_nlp_put(ndlp);
1347 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1348 kfree(mp);
1349 mempool_free(pmb, phba->mbox_mem_pool);
1350 lpfc_drop_node(vport, ndlp);
1351
1352 if (phba->fc_topology == TOPOLOGY_LOOP) {
1353 /*
1354 * RegLogin failed, use loop map to make discovery
1355 * list
1356 */
1357 lpfc_disc_list_loopmap(vport);
1358
1359 /* Start discovery */
1360 lpfc_disc_start(vport);
1361 return;
1362 }
1363 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1364 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1365 "0260 Register NameServer error: 0x%x\n",
1366 mb->mbxStatus);
1367 return;
1368 }
1369
1370 pmb->context1 = NULL;
1371
1372 ndlp->nlp_rpi = mb->un.varWords[0];
1373 ndlp->nlp_type |= NLP_FABRIC;
1374 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1375
1376 if (vport->port_state < LPFC_VPORT_READY) {
1377 /* Link up discovery requires Fabric registration. */
1378 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1379 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1380 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1381 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1382 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1383
1384 /* Issue SCR just before NameServer GID_FT Query */
1385 lpfc_issue_els_scr(vport, SCR_DID, 0);
1386 }
1387
1388 vport->fc_ns_retry = 0;
1389 /* Good status, issue CT Request to NameServer */
1390 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1391 /* Cannot issue NameServer Query, so finish up discovery */
1392 goto out;
1393 }
1394
1395 lpfc_nlp_put(ndlp);
1396 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1397 kfree(mp);
1398 mempool_free(pmb, phba->mbox_mem_pool);
1399
1400 return;
1401 }
1402
1403 static void
1404 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1405 {
1406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1407 struct fc_rport *rport;
1408 struct lpfc_rport_data *rdata;
1409 struct fc_rport_identifiers rport_ids;
1410 struct lpfc_hba *phba = vport->phba;
1411
1412 /* Remote port has reappeared. Re-register w/ FC transport */
1413 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1414 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1415 rport_ids.port_id = ndlp->nlp_DID;
1416 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1417
1418 /*
1419 * We leave our node pointer in rport->dd_data when we unregister a
1420 * FCP target port. But fc_remote_port_add zeros the space to which
1421 * rport->dd_data points. So, if we're reusing a previously
1422 * registered port, drop the reference that we took the last time we
1423 * registered the port.
1424 */
1425 if (ndlp->rport && ndlp->rport->dd_data &&
1426 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1427 lpfc_nlp_put(ndlp);
1428 }
1429
1430 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1431 "rport add: did:x%x flg:x%x type x%x",
1432 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1433
1434 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1435 if (!rport || !get_device(&rport->dev)) {
1436 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1437 "Warning: fc_remote_port_add failed\n");
1438 return;
1439 }
1440
1441 /* initialize static port data */
1442 rport->maxframe_size = ndlp->nlp_maxframe;
1443 rport->supported_classes = ndlp->nlp_class_sup;
1444 rdata = rport->dd_data;
1445 rdata->pnode = lpfc_nlp_get(ndlp);
1446
1447 if (ndlp->nlp_type & NLP_FCP_TARGET)
1448 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1449 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1450 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1451
1452
1453 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1454 fc_remote_port_rolechg(rport, rport_ids.roles);
1455
1456 if ((rport->scsi_target_id != -1) &&
1457 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1458 ndlp->nlp_sid = rport->scsi_target_id;
1459 }
1460 return;
1461 }
1462
1463 static void
1464 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1465 {
1466 struct fc_rport *rport = ndlp->rport;
1467
1468 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1469 "rport delete: did:x%x flg:x%x type x%x",
1470 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1471
1472 fc_remote_port_delete(rport);
1473
1474 return;
1475 }
1476
1477 static void
1478 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1479 {
1480 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1481
1482 spin_lock_irq(shost->host_lock);
1483 switch (state) {
1484 case NLP_STE_UNUSED_NODE:
1485 vport->fc_unused_cnt += count;
1486 break;
1487 case NLP_STE_PLOGI_ISSUE:
1488 vport->fc_plogi_cnt += count;
1489 break;
1490 case NLP_STE_ADISC_ISSUE:
1491 vport->fc_adisc_cnt += count;
1492 break;
1493 case NLP_STE_REG_LOGIN_ISSUE:
1494 vport->fc_reglogin_cnt += count;
1495 break;
1496 case NLP_STE_PRLI_ISSUE:
1497 vport->fc_prli_cnt += count;
1498 break;
1499 case NLP_STE_UNMAPPED_NODE:
1500 vport->fc_unmap_cnt += count;
1501 break;
1502 case NLP_STE_MAPPED_NODE:
1503 vport->fc_map_cnt += count;
1504 break;
1505 case NLP_STE_NPR_NODE:
1506 vport->fc_npr_cnt += count;
1507 break;
1508 }
1509 spin_unlock_irq(shost->host_lock);
1510 }
1511
1512 static void
1513 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1514 int old_state, int new_state)
1515 {
1516 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1517
1518 if (new_state == NLP_STE_UNMAPPED_NODE) {
1519 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1520 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1521 ndlp->nlp_type |= NLP_FC_NODE;
1522 }
1523 if (new_state == NLP_STE_MAPPED_NODE)
1524 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1525 if (new_state == NLP_STE_NPR_NODE)
1526 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1527
1528 /* Transport interface */
1529 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1530 old_state == NLP_STE_UNMAPPED_NODE)) {
1531 vport->phba->nport_event_cnt++;
1532 lpfc_unregister_remote_port(ndlp);
1533 }
1534
1535 if (new_state == NLP_STE_MAPPED_NODE ||
1536 new_state == NLP_STE_UNMAPPED_NODE) {
1537 vport->phba->nport_event_cnt++;
1538 /*
1539 * Tell the fc transport about the port, if we haven't
1540 * already. If we have, and it's a scsi entity, be
1541 * sure to unblock any attached scsi devices
1542 */
1543 lpfc_register_remote_port(vport, ndlp);
1544 }
1545 /*
1546 * if we added to Mapped list, but the remote port
1547 * registration failed or assigned a target id outside
1548 * our presentable range - move the node to the
1549 * Unmapped List
1550 */
1551 if (new_state == NLP_STE_MAPPED_NODE &&
1552 (!ndlp->rport ||
1553 ndlp->rport->scsi_target_id == -1 ||
1554 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1555 spin_lock_irq(shost->host_lock);
1556 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1557 spin_unlock_irq(shost->host_lock);
1558 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1559 }
1560 }
1561
1562 static char *
1563 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1564 {
1565 static char *states[] = {
1566 [NLP_STE_UNUSED_NODE] = "UNUSED",
1567 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1568 [NLP_STE_ADISC_ISSUE] = "ADISC",
1569 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1570 [NLP_STE_PRLI_ISSUE] = "PRLI",
1571 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1572 [NLP_STE_MAPPED_NODE] = "MAPPED",
1573 [NLP_STE_NPR_NODE] = "NPR",
1574 };
1575
1576 if (state < ARRAY_SIZE(states) && states[state])
1577 strlcpy(buffer, states[state], size);
1578 else
1579 snprintf(buffer, size, "unknown (%d)", state);
1580 return buffer;
1581 }
1582
1583 void
1584 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1585 int state)
1586 {
1587 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1588 int old_state = ndlp->nlp_state;
1589 char name1[16], name2[16];
1590
1591 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1592 "0904 NPort state transition x%06x, %s -> %s\n",
1593 ndlp->nlp_DID,
1594 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1595 lpfc_nlp_state_name(name2, sizeof(name2), state));
1596
1597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1598 "node statechg did:x%x old:%d ste:%d",
1599 ndlp->nlp_DID, old_state, state);
1600
1601 if (old_state == NLP_STE_NPR_NODE &&
1602 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1603 state != NLP_STE_NPR_NODE)
1604 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1605 if (old_state == NLP_STE_UNMAPPED_NODE) {
1606 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1607 ndlp->nlp_type &= ~NLP_FC_NODE;
1608 }
1609
1610 if (list_empty(&ndlp->nlp_listp)) {
1611 spin_lock_irq(shost->host_lock);
1612 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1613 spin_unlock_irq(shost->host_lock);
1614 } else if (old_state)
1615 lpfc_nlp_counters(vport, old_state, -1);
1616
1617 ndlp->nlp_state = state;
1618 lpfc_nlp_counters(vport, state, 1);
1619 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1620 }
1621
1622 void
1623 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1624 {
1625 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1626
1627 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1628 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1629 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1630 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1631 spin_lock_irq(shost->host_lock);
1632 list_del_init(&ndlp->nlp_listp);
1633 spin_unlock_irq(shost->host_lock);
1634 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1635 NLP_STE_UNUSED_NODE);
1636 }
1637
1638 void
1639 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1640 {
1641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1642
1643 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1644 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1645 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1646 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1647 spin_lock_irq(shost->host_lock);
1648 list_del_init(&ndlp->nlp_listp);
1649 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
1650 spin_unlock_irq(shost->host_lock);
1651 lpfc_nlp_put(ndlp);
1652 }
1653
1654 /*
1655 * Start / ReStart rescue timer for Discovery / RSCN handling
1656 */
1657 void
1658 lpfc_set_disctmo(struct lpfc_vport *vport)
1659 {
1660 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1661 struct lpfc_hba *phba = vport->phba;
1662 uint32_t tmo;
1663
1664 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1665 /* For FAN, timeout should be greater then edtov */
1666 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1667 } else {
1668 /* Normal discovery timeout should be > then ELS/CT timeout
1669 * FC spec states we need 3 * ratov for CT requests
1670 */
1671 tmo = ((phba->fc_ratov * 3) + 3);
1672 }
1673
1674
1675 if (!timer_pending(&vport->fc_disctmo)) {
1676 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1677 "set disc timer: tmo:x%x state:x%x flg:x%x",
1678 tmo, vport->port_state, vport->fc_flag);
1679 }
1680
1681 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1682 spin_lock_irq(shost->host_lock);
1683 vport->fc_flag |= FC_DISC_TMO;
1684 spin_unlock_irq(shost->host_lock);
1685
1686 /* Start Discovery Timer state <hba_state> */
1687 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1688 "0247 Start Discovery Timer state x%x "
1689 "Data: x%x x%lx x%x x%x\n",
1690 vport->port_state, tmo,
1691 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1692 vport->fc_adisc_cnt);
1693
1694 return;
1695 }
1696
1697 /*
1698 * Cancel rescue timer for Discovery / RSCN handling
1699 */
1700 int
1701 lpfc_can_disctmo(struct lpfc_vport *vport)
1702 {
1703 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1704 unsigned long iflags;
1705
1706 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1707 "can disc timer: state:x%x rtry:x%x flg:x%x",
1708 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1709
1710 /* Turn off discovery timer if its running */
1711 if (vport->fc_flag & FC_DISC_TMO) {
1712 spin_lock_irqsave(shost->host_lock, iflags);
1713 vport->fc_flag &= ~FC_DISC_TMO;
1714 spin_unlock_irqrestore(shost->host_lock, iflags);
1715 del_timer_sync(&vport->fc_disctmo);
1716 spin_lock_irqsave(&vport->work_port_lock, iflags);
1717 vport->work_port_events &= ~WORKER_DISC_TMO;
1718 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1719 }
1720
1721 /* Cancel Discovery Timer state <hba_state> */
1722 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1723 "0248 Cancel Discovery Timer state x%x "
1724 "Data: x%x x%x x%x\n",
1725 vport->port_state, vport->fc_flag,
1726 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1727 return 0;
1728 }
1729
1730 /*
1731 * Check specified ring for outstanding IOCB on the SLI queue
1732 * Return true if iocb matches the specified nport
1733 */
1734 int
1735 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1736 struct lpfc_sli_ring *pring,
1737 struct lpfc_iocbq *iocb,
1738 struct lpfc_nodelist *ndlp)
1739 {
1740 struct lpfc_sli *psli = &phba->sli;
1741 IOCB_t *icmd = &iocb->iocb;
1742 struct lpfc_vport *vport = ndlp->vport;
1743
1744 if (iocb->vport != vport)
1745 return 0;
1746
1747 if (pring->ringno == LPFC_ELS_RING) {
1748 switch (icmd->ulpCommand) {
1749 case CMD_GEN_REQUEST64_CR:
1750 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1751 return 1;
1752 case CMD_ELS_REQUEST64_CR:
1753 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1754 return 1;
1755 case CMD_XMIT_ELS_RSP64_CX:
1756 if (iocb->context1 == (uint8_t *) ndlp)
1757 return 1;
1758 }
1759 } else if (pring->ringno == psli->extra_ring) {
1760
1761 } else if (pring->ringno == psli->fcp_ring) {
1762 /* Skip match check if waiting to relogin to FCP target */
1763 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1764 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1765 return 0;
1766 }
1767 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1768 return 1;
1769 }
1770 } else if (pring->ringno == psli->next_ring) {
1771
1772 }
1773 return 0;
1774 }
1775
1776 /*
1777 * Free resources / clean up outstanding I/Os
1778 * associated with nlp_rpi in the LPFC_NODELIST entry.
1779 */
1780 static int
1781 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1782 {
1783 LIST_HEAD(completions);
1784 struct lpfc_sli *psli;
1785 struct lpfc_sli_ring *pring;
1786 struct lpfc_iocbq *iocb, *next_iocb;
1787 IOCB_t *icmd;
1788 uint32_t rpi, i;
1789
1790 lpfc_fabric_abort_nport(ndlp);
1791
1792 /*
1793 * Everything that matches on txcmplq will be returned
1794 * by firmware with a no rpi error.
1795 */
1796 psli = &phba->sli;
1797 rpi = ndlp->nlp_rpi;
1798 if (rpi) {
1799 /* Now process each ring */
1800 for (i = 0; i < psli->num_rings; i++) {
1801 pring = &psli->ring[i];
1802
1803 spin_lock_irq(&phba->hbalock);
1804 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1805 list) {
1806 /*
1807 * Check to see if iocb matches the nport we are
1808 * looking for
1809 */
1810 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1811 ndlp))) {
1812 /* It matches, so deque and call compl
1813 with an error */
1814 list_move_tail(&iocb->list,
1815 &completions);
1816 pring->txq_cnt--;
1817 }
1818 }
1819 spin_unlock_irq(&phba->hbalock);
1820 }
1821 }
1822
1823 while (!list_empty(&completions)) {
1824 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1825 list_del_init(&iocb->list);
1826
1827 if (!iocb->iocb_cmpl)
1828 lpfc_sli_release_iocbq(phba, iocb);
1829 else {
1830 icmd = &iocb->iocb;
1831 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1832 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1833 (iocb->iocb_cmpl)(phba, iocb, iocb);
1834 }
1835 }
1836
1837 return 0;
1838 }
1839
1840 /*
1841 * Free rpi associated with LPFC_NODELIST entry.
1842 * This routine is called from lpfc_freenode(), when we are removing
1843 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1844 * LOGO that completes successfully, and we are waiting to PLOGI back
1845 * to the remote NPort. In addition, it is called after we receive
1846 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1847 * we are waiting to PLOGI back to the remote NPort.
1848 */
1849 int
1850 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1851 {
1852 struct lpfc_hba *phba = vport->phba;
1853 LPFC_MBOXQ_t *mbox;
1854 int rc;
1855
1856 if (ndlp->nlp_rpi) {
1857 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1858 if (mbox) {
1859 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1860 mbox->vport = vport;
1861 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1862 rc = lpfc_sli_issue_mbox(phba, mbox,
1863 (MBX_NOWAIT | MBX_STOP_IOCB));
1864 if (rc == MBX_NOT_FINISHED)
1865 mempool_free(mbox, phba->mbox_mem_pool);
1866 }
1867 lpfc_no_rpi(phba, ndlp);
1868 ndlp->nlp_rpi = 0;
1869 return 1;
1870 }
1871 return 0;
1872 }
1873
1874 void
1875 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1876 {
1877 struct lpfc_hba *phba = vport->phba;
1878 LPFC_MBOXQ_t *mbox;
1879 int rc;
1880
1881 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1882 if (mbox) {
1883 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1884 mbox->vport = vport;
1885 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1886 rc = lpfc_sli_issue_mbox(phba, mbox,
1887 (MBX_NOWAIT | MBX_STOP_IOCB));
1888 if (rc == MBX_NOT_FINISHED) {
1889 mempool_free(mbox, phba->mbox_mem_pool);
1890 }
1891 }
1892 }
1893
1894 void
1895 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1896 {
1897 struct lpfc_hba *phba = vport->phba;
1898 LPFC_MBOXQ_t *mbox;
1899 int rc;
1900
1901 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1902 if (mbox) {
1903 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1904 mbox->vport = vport;
1905 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1906 rc = lpfc_sli_issue_mbox(phba, mbox,
1907 (MBX_NOWAIT | MBX_STOP_IOCB));
1908 if (rc == MBX_NOT_FINISHED) {
1909 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1910 "1815 Could not issue "
1911 "unreg_did (default rpis)\n");
1912 mempool_free(mbox, phba->mbox_mem_pool);
1913 }
1914 }
1915 }
1916
1917 /*
1918 * Free resources associated with LPFC_NODELIST entry
1919 * so it can be freed.
1920 */
1921 static int
1922 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1923 {
1924 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1925 struct lpfc_hba *phba = vport->phba;
1926 LPFC_MBOXQ_t *mb, *nextmb;
1927 struct lpfc_dmabuf *mp;
1928
1929 /* Cleanup node for NPort <nlp_DID> */
1930 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1931 "0900 Cleanup node for NPort x%x "
1932 "Data: x%x x%x x%x\n",
1933 ndlp->nlp_DID, ndlp->nlp_flag,
1934 ndlp->nlp_state, ndlp->nlp_rpi);
1935 lpfc_dequeue_node(vport, ndlp);
1936
1937 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1938 if ((mb = phba->sli.mbox_active)) {
1939 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1940 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1941 mb->context2 = NULL;
1942 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1943 }
1944 }
1945
1946 spin_lock_irq(&phba->hbalock);
1947 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1948 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1949 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1950 mp = (struct lpfc_dmabuf *) (mb->context1);
1951 if (mp) {
1952 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1953 kfree(mp);
1954 }
1955 list_del(&mb->list);
1956 mempool_free(mb, phba->mbox_mem_pool);
1957 lpfc_nlp_put(ndlp);
1958 }
1959 }
1960 spin_unlock_irq(&phba->hbalock);
1961
1962 lpfc_els_abort(phba,ndlp);
1963 spin_lock_irq(shost->host_lock);
1964 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1965 spin_unlock_irq(shost->host_lock);
1966
1967 ndlp->nlp_last_elscmd = 0;
1968 del_timer_sync(&ndlp->nlp_delayfunc);
1969
1970 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1971 list_del_init(&ndlp->els_retry_evt.evt_listp);
1972 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1973 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1974
1975 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
1976 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1977 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
1978 }
1979
1980 lpfc_unreg_rpi(vport, ndlp);
1981
1982 return 0;
1983 }
1984
1985 /*
1986 * Check to see if we can free the nlp back to the freelist.
1987 * If we are in the middle of using the nlp in the discovery state
1988 * machine, defer the free till we reach the end of the state machine.
1989 */
1990 static void
1991 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1992 {
1993 struct lpfc_rport_data *rdata;
1994
1995 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1996 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1997 }
1998
1999 lpfc_cleanup_node(vport, ndlp);
2000
2001 /*
2002 * We can get here with a non-NULL ndlp->rport because when we
2003 * unregister a rport we don't break the rport/node linkage. So if we
2004 * do, make sure we don't leaving any dangling pointers behind.
2005 */
2006 if (ndlp->rport) {
2007 rdata = ndlp->rport->dd_data;
2008 rdata->pnode = NULL;
2009 ndlp->rport = NULL;
2010 }
2011 }
2012
2013 static int
2014 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2015 uint32_t did)
2016 {
2017 D_ID mydid, ndlpdid, matchdid;
2018
2019 if (did == Bcast_DID)
2020 return 0;
2021
2022 if (ndlp->nlp_DID == 0) {
2023 return 0;
2024 }
2025
2026 /* First check for Direct match */
2027 if (ndlp->nlp_DID == did)
2028 return 1;
2029
2030 /* Next check for area/domain identically equals 0 match */
2031 mydid.un.word = vport->fc_myDID;
2032 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2033 return 0;
2034 }
2035
2036 matchdid.un.word = did;
2037 ndlpdid.un.word = ndlp->nlp_DID;
2038 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2039 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2040 (mydid.un.b.area == matchdid.un.b.area)) {
2041 if ((ndlpdid.un.b.domain == 0) &&
2042 (ndlpdid.un.b.area == 0)) {
2043 if (ndlpdid.un.b.id)
2044 return 1;
2045 }
2046 return 0;
2047 }
2048
2049 matchdid.un.word = ndlp->nlp_DID;
2050 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2051 (mydid.un.b.area == ndlpdid.un.b.area)) {
2052 if ((matchdid.un.b.domain == 0) &&
2053 (matchdid.un.b.area == 0)) {
2054 if (matchdid.un.b.id)
2055 return 1;
2056 }
2057 }
2058 }
2059 return 0;
2060 }
2061
2062 /* Search for a nodelist entry */
2063 static struct lpfc_nodelist *
2064 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2065 {
2066 struct lpfc_nodelist *ndlp;
2067 uint32_t data1;
2068
2069 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2070 if (lpfc_matchdid(vport, ndlp, did)) {
2071 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2072 ((uint32_t) ndlp->nlp_xri << 16) |
2073 ((uint32_t) ndlp->nlp_type << 8) |
2074 ((uint32_t) ndlp->nlp_rpi & 0xff));
2075 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2076 "0929 FIND node DID "
2077 "Data: x%p x%x x%x x%x\n",
2078 ndlp, ndlp->nlp_DID,
2079 ndlp->nlp_flag, data1);
2080 return ndlp;
2081 }
2082 }
2083
2084 /* FIND node did <did> NOT FOUND */
2085 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2086 "0932 FIND node did x%x NOT FOUND.\n", did);
2087 return NULL;
2088 }
2089
2090 struct lpfc_nodelist *
2091 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2092 {
2093 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2094 struct lpfc_nodelist *ndlp;
2095
2096 spin_lock_irq(shost->host_lock);
2097 ndlp = __lpfc_findnode_did(vport, did);
2098 spin_unlock_irq(shost->host_lock);
2099 return ndlp;
2100 }
2101
2102 struct lpfc_nodelist *
2103 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2104 {
2105 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2106 struct lpfc_nodelist *ndlp;
2107
2108 ndlp = lpfc_findnode_did(vport, did);
2109 if (!ndlp) {
2110 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2111 lpfc_rscn_payload_check(vport, did) == 0)
2112 return NULL;
2113 ndlp = (struct lpfc_nodelist *)
2114 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2115 if (!ndlp)
2116 return NULL;
2117 lpfc_nlp_init(vport, ndlp, did);
2118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2119 spin_lock_irq(shost->host_lock);
2120 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2121 spin_unlock_irq(shost->host_lock);
2122 return ndlp;
2123 }
2124 if (vport->fc_flag & FC_RSCN_MODE) {
2125 if (lpfc_rscn_payload_check(vport, did)) {
2126 spin_lock_irq(shost->host_lock);
2127 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2128 spin_unlock_irq(shost->host_lock);
2129
2130 /* Since this node is marked for discovery,
2131 * delay timeout is not needed.
2132 */
2133 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2134 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2135 } else
2136 ndlp = NULL;
2137 } else {
2138 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2139 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
2140 return NULL;
2141 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2142 spin_lock_irq(shost->host_lock);
2143 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2144 spin_unlock_irq(shost->host_lock);
2145 }
2146 return ndlp;
2147 }
2148
2149 /* Build a list of nodes to discover based on the loopmap */
2150 void
2151 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2152 {
2153 struct lpfc_hba *phba = vport->phba;
2154 int j;
2155 uint32_t alpa, index;
2156
2157 if (!lpfc_is_link_up(phba))
2158 return;
2159
2160 if (phba->fc_topology != TOPOLOGY_LOOP)
2161 return;
2162
2163 /* Check for loop map present or not */
2164 if (phba->alpa_map[0]) {
2165 for (j = 1; j <= phba->alpa_map[0]; j++) {
2166 alpa = phba->alpa_map[j];
2167 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2168 continue;
2169 lpfc_setup_disc_node(vport, alpa);
2170 }
2171 } else {
2172 /* No alpamap, so try all alpa's */
2173 for (j = 0; j < FC_MAXLOOP; j++) {
2174 /* If cfg_scan_down is set, start from highest
2175 * ALPA (0xef) to lowest (0x1).
2176 */
2177 if (vport->cfg_scan_down)
2178 index = j;
2179 else
2180 index = FC_MAXLOOP - j - 1;
2181 alpa = lpfcAlpaArray[index];
2182 if ((vport->fc_myDID & 0xff) == alpa)
2183 continue;
2184 lpfc_setup_disc_node(vport, alpa);
2185 }
2186 }
2187 return;
2188 }
2189
2190 void
2191 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2192 {
2193 LPFC_MBOXQ_t *mbox;
2194 struct lpfc_sli *psli = &phba->sli;
2195 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2196 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2197 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2198 int rc;
2199
2200 /*
2201 * if it's not a physical port or if we already send
2202 * clear_la then don't send it.
2203 */
2204 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2205 (vport->port_type != LPFC_PHYSICAL_PORT))
2206 return;
2207
2208 /* Link up discovery */
2209 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2210 phba->link_state = LPFC_CLEAR_LA;
2211 lpfc_clear_la(phba, mbox);
2212 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2213 mbox->vport = vport;
2214 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
2215 MBX_STOP_IOCB));
2216 if (rc == MBX_NOT_FINISHED) {
2217 mempool_free(mbox, phba->mbox_mem_pool);
2218 lpfc_disc_flush_list(vport);
2219 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2220 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2221 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2222 phba->link_state = LPFC_HBA_ERROR;
2223 }
2224 }
2225 }
2226
2227 /* Reg_vpi to tell firmware to resume normal operations */
2228 void
2229 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2230 {
2231 LPFC_MBOXQ_t *regvpimbox;
2232
2233 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2234 if (regvpimbox) {
2235 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2236 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2237 regvpimbox->vport = vport;
2238 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2239 (MBX_NOWAIT | MBX_STOP_IOCB))
2240 == MBX_NOT_FINISHED) {
2241 mempool_free(regvpimbox, phba->mbox_mem_pool);
2242 }
2243 }
2244 }
2245
2246 /* Start Link up / RSCN discovery on NPR nodes */
2247 void
2248 lpfc_disc_start(struct lpfc_vport *vport)
2249 {
2250 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2251 struct lpfc_hba *phba = vport->phba;
2252 uint32_t num_sent;
2253 uint32_t clear_la_pending;
2254 int did_changed;
2255
2256 if (!lpfc_is_link_up(phba))
2257 return;
2258
2259 if (phba->link_state == LPFC_CLEAR_LA)
2260 clear_la_pending = 1;
2261 else
2262 clear_la_pending = 0;
2263
2264 if (vport->port_state < LPFC_VPORT_READY)
2265 vport->port_state = LPFC_DISC_AUTH;
2266
2267 lpfc_set_disctmo(vport);
2268
2269 if (vport->fc_prevDID == vport->fc_myDID)
2270 did_changed = 0;
2271 else
2272 did_changed = 1;
2273
2274 vport->fc_prevDID = vport->fc_myDID;
2275 vport->num_disc_nodes = 0;
2276
2277 /* Start Discovery state <hba_state> */
2278 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2279 "0202 Start Discovery hba state x%x "
2280 "Data: x%x x%x x%x\n",
2281 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2282 vport->fc_adisc_cnt);
2283
2284 /* First do ADISCs - if any */
2285 num_sent = lpfc_els_disc_adisc(vport);
2286
2287 if (num_sent)
2288 return;
2289
2290 /*
2291 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2292 * continue discovery.
2293 */
2294 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2295 !(vport->fc_flag & FC_RSCN_MODE)) {
2296 lpfc_issue_reg_vpi(phba, vport);
2297 return;
2298 }
2299
2300 /*
2301 * For SLI2, we need to set port_state to READY and continue
2302 * discovery.
2303 */
2304 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2305 /* If we get here, there is nothing to ADISC */
2306 if (vport->port_type == LPFC_PHYSICAL_PORT)
2307 lpfc_issue_clear_la(phba, vport);
2308
2309 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2310 vport->num_disc_nodes = 0;
2311 /* go thru NPR nodes and issue ELS PLOGIs */
2312 if (vport->fc_npr_cnt)
2313 lpfc_els_disc_plogi(vport);
2314
2315 if (!vport->num_disc_nodes) {
2316 spin_lock_irq(shost->host_lock);
2317 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2318 spin_unlock_irq(shost->host_lock);
2319 lpfc_can_disctmo(vport);
2320 }
2321 }
2322 vport->port_state = LPFC_VPORT_READY;
2323 } else {
2324 /* Next do PLOGIs - if any */
2325 num_sent = lpfc_els_disc_plogi(vport);
2326
2327 if (num_sent)
2328 return;
2329
2330 if (vport->fc_flag & FC_RSCN_MODE) {
2331 /* Check to see if more RSCNs came in while we
2332 * were processing this one.
2333 */
2334 if ((vport->fc_rscn_id_cnt == 0) &&
2335 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2336 spin_lock_irq(shost->host_lock);
2337 vport->fc_flag &= ~FC_RSCN_MODE;
2338 spin_unlock_irq(shost->host_lock);
2339 lpfc_can_disctmo(vport);
2340 } else
2341 lpfc_els_handle_rscn(vport);
2342 }
2343 }
2344 return;
2345 }
2346
2347 /*
2348 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2349 * ring the match the sppecified nodelist.
2350 */
2351 static void
2352 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2353 {
2354 LIST_HEAD(completions);
2355 struct lpfc_sli *psli;
2356 IOCB_t *icmd;
2357 struct lpfc_iocbq *iocb, *next_iocb;
2358 struct lpfc_sli_ring *pring;
2359
2360 psli = &phba->sli;
2361 pring = &psli->ring[LPFC_ELS_RING];
2362
2363 /* Error matching iocb on txq or txcmplq
2364 * First check the txq.
2365 */
2366 spin_lock_irq(&phba->hbalock);
2367 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2368 if (iocb->context1 != ndlp) {
2369 continue;
2370 }
2371 icmd = &iocb->iocb;
2372 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2373 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2374
2375 list_move_tail(&iocb->list, &completions);
2376 pring->txq_cnt--;
2377 }
2378 }
2379
2380 /* Next check the txcmplq */
2381 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2382 if (iocb->context1 != ndlp) {
2383 continue;
2384 }
2385 icmd = &iocb->iocb;
2386 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2387 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2388 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2389 }
2390 }
2391 spin_unlock_irq(&phba->hbalock);
2392
2393 while (!list_empty(&completions)) {
2394 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2395 list_del_init(&iocb->list);
2396
2397 if (!iocb->iocb_cmpl)
2398 lpfc_sli_release_iocbq(phba, iocb);
2399 else {
2400 icmd = &iocb->iocb;
2401 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2402 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2403 (iocb->iocb_cmpl) (phba, iocb, iocb);
2404 }
2405 }
2406 }
2407
2408 void
2409 lpfc_disc_flush_list(struct lpfc_vport *vport)
2410 {
2411 struct lpfc_nodelist *ndlp, *next_ndlp;
2412 struct lpfc_hba *phba = vport->phba;
2413
2414 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2415 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2416 nlp_listp) {
2417 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2418 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2419 lpfc_free_tx(phba, ndlp);
2420 lpfc_nlp_put(ndlp);
2421 }
2422 }
2423 }
2424 }
2425
2426 void
2427 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2428 {
2429 lpfc_els_flush_rscn(vport);
2430 lpfc_els_flush_cmd(vport);
2431 lpfc_disc_flush_list(vport);
2432 }
2433
2434 /*****************************************************************************/
2435 /*
2436 * NAME: lpfc_disc_timeout
2437 *
2438 * FUNCTION: Fibre Channel driver discovery timeout routine.
2439 *
2440 * EXECUTION ENVIRONMENT: interrupt only
2441 *
2442 * CALLED FROM:
2443 * Timer function
2444 *
2445 * RETURNS:
2446 * none
2447 */
2448 /*****************************************************************************/
2449 void
2450 lpfc_disc_timeout(unsigned long ptr)
2451 {
2452 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2453 struct lpfc_hba *phba = vport->phba;
2454 unsigned long flags = 0;
2455
2456 if (unlikely(!phba))
2457 return;
2458
2459 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2460 spin_lock_irqsave(&vport->work_port_lock, flags);
2461 vport->work_port_events |= WORKER_DISC_TMO;
2462 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2463
2464 spin_lock_irqsave(&phba->hbalock, flags);
2465 if (phba->work_wait)
2466 lpfc_worker_wake_up(phba);
2467 spin_unlock_irqrestore(&phba->hbalock, flags);
2468 }
2469 return;
2470 }
2471
2472 static void
2473 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2474 {
2475 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2476 struct lpfc_hba *phba = vport->phba;
2477 struct lpfc_sli *psli = &phba->sli;
2478 struct lpfc_nodelist *ndlp, *next_ndlp;
2479 LPFC_MBOXQ_t *initlinkmbox;
2480 int rc, clrlaerr = 0;
2481
2482 if (!(vport->fc_flag & FC_DISC_TMO))
2483 return;
2484
2485 spin_lock_irq(shost->host_lock);
2486 vport->fc_flag &= ~FC_DISC_TMO;
2487 spin_unlock_irq(shost->host_lock);
2488
2489 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2490 "disc timeout: state:x%x rtry:x%x flg:x%x",
2491 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2492
2493 switch (vport->port_state) {
2494
2495 case LPFC_LOCAL_CFG_LINK:
2496 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2497 * FAN
2498 */
2499 /* FAN timeout */
2500 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2501 "0221 FAN timeout\n");
2502 /* Start discovery by sending FLOGI, clean up old rpis */
2503 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2504 nlp_listp) {
2505 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2506 continue;
2507 if (ndlp->nlp_type & NLP_FABRIC) {
2508 /* Clean up the ndlp on Fabric connections */
2509 lpfc_drop_node(vport, ndlp);
2510 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2511 /* Fail outstanding IO now since device
2512 * is marked for PLOGI.
2513 */
2514 lpfc_unreg_rpi(vport, ndlp);
2515 }
2516 }
2517 if (vport->port_state != LPFC_FLOGI) {
2518 vport->port_state = LPFC_FLOGI;
2519 lpfc_set_disctmo(vport);
2520 lpfc_initial_flogi(vport);
2521 }
2522 break;
2523
2524 case LPFC_FDISC:
2525 case LPFC_FLOGI:
2526 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2527 /* Initial FLOGI timeout */
2528 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2529 "0222 Initial %s timeout\n",
2530 vport->vpi ? "FLOGI" : "FDISC");
2531
2532 /* Assume no Fabric and go on with discovery.
2533 * Check for outstanding ELS FLOGI to abort.
2534 */
2535
2536 /* FLOGI failed, so just use loop map to make discovery list */
2537 lpfc_disc_list_loopmap(vport);
2538
2539 /* Start discovery */
2540 lpfc_disc_start(vport);
2541 break;
2542
2543 case LPFC_FABRIC_CFG_LINK:
2544 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2545 NameServer login */
2546 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2547 "0223 Timeout while waiting for "
2548 "NameServer login\n");
2549 /* Next look for NameServer ndlp */
2550 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2551 if (ndlp)
2552 lpfc_nlp_put(ndlp);
2553 /* Start discovery */
2554 lpfc_disc_start(vport);
2555 break;
2556
2557 case LPFC_NS_QRY:
2558 /* Check for wait for NameServer Rsp timeout */
2559 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2560 "0224 NameServer Query timeout "
2561 "Data: x%x x%x\n",
2562 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2563
2564 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2565 /* Try it one more time */
2566 vport->fc_ns_retry++;
2567 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2568 vport->fc_ns_retry, 0);
2569 if (rc == 0)
2570 break;
2571 }
2572 vport->fc_ns_retry = 0;
2573
2574 /*
2575 * Discovery is over.
2576 * set port_state to PORT_READY if SLI2.
2577 * cmpl_reg_vpi will set port_state to READY for SLI3.
2578 */
2579 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2580 lpfc_issue_reg_vpi(phba, vport);
2581 else { /* NPIV Not enabled */
2582 lpfc_issue_clear_la(phba, vport);
2583 vport->port_state = LPFC_VPORT_READY;
2584 }
2585
2586 /* Setup and issue mailbox INITIALIZE LINK command */
2587 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2588 if (!initlinkmbox) {
2589 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2590 "0206 Device Discovery "
2591 "completion error\n");
2592 phba->link_state = LPFC_HBA_ERROR;
2593 break;
2594 }
2595
2596 lpfc_linkdown(phba);
2597 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2598 phba->cfg_link_speed);
2599 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2600 initlinkmbox->vport = vport;
2601 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2602 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2603 (MBX_NOWAIT | MBX_STOP_IOCB));
2604 lpfc_set_loopback_flag(phba);
2605 if (rc == MBX_NOT_FINISHED)
2606 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2607
2608 break;
2609
2610 case LPFC_DISC_AUTH:
2611 /* Node Authentication timeout */
2612 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2613 "0227 Node Authentication timeout\n");
2614 lpfc_disc_flush_list(vport);
2615
2616 /*
2617 * set port_state to PORT_READY if SLI2.
2618 * cmpl_reg_vpi will set port_state to READY for SLI3.
2619 */
2620 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2621 lpfc_issue_reg_vpi(phba, vport);
2622 else { /* NPIV Not enabled */
2623 lpfc_issue_clear_la(phba, vport);
2624 vport->port_state = LPFC_VPORT_READY;
2625 }
2626 break;
2627
2628 case LPFC_VPORT_READY:
2629 if (vport->fc_flag & FC_RSCN_MODE) {
2630 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2631 "0231 RSCN timeout Data: x%x "
2632 "x%x\n",
2633 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2634
2635 /* Cleanup any outstanding ELS commands */
2636 lpfc_els_flush_cmd(vport);
2637
2638 lpfc_els_flush_rscn(vport);
2639 lpfc_disc_flush_list(vport);
2640 }
2641 break;
2642
2643 default:
2644 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2645 "0229 Unexpected discovery timeout, "
2646 "vport State x%x\n", vport->port_state);
2647 break;
2648 }
2649
2650 switch (phba->link_state) {
2651 case LPFC_CLEAR_LA:
2652 /* CLEAR LA timeout */
2653 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2654 "0228 CLEAR LA timeout\n");
2655 clrlaerr = 1;
2656 break;
2657
2658 case LPFC_LINK_UNKNOWN:
2659 case LPFC_WARM_START:
2660 case LPFC_INIT_START:
2661 case LPFC_INIT_MBX_CMDS:
2662 case LPFC_LINK_DOWN:
2663 case LPFC_LINK_UP:
2664 case LPFC_HBA_ERROR:
2665 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2666 "0230 Unexpected timeout, hba link "
2667 "state x%x\n", phba->link_state);
2668 clrlaerr = 1;
2669 break;
2670
2671 case LPFC_HBA_READY:
2672 break;
2673 }
2674
2675 if (clrlaerr) {
2676 lpfc_disc_flush_list(vport);
2677 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2678 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2679 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2680 vport->port_state = LPFC_VPORT_READY;
2681 }
2682
2683 return;
2684 }
2685
2686 /*
2687 * This routine handles processing a NameServer REG_LOGIN mailbox
2688 * command upon completion. It is setup in the LPFC_MBOXQ
2689 * as the completion routine when the command is
2690 * handed off to the SLI layer.
2691 */
2692 void
2693 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2694 {
2695 MAILBOX_t *mb = &pmb->mb;
2696 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2697 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2698 struct lpfc_vport *vport = pmb->vport;
2699
2700 pmb->context1 = NULL;
2701
2702 ndlp->nlp_rpi = mb->un.varWords[0];
2703 ndlp->nlp_type |= NLP_FABRIC;
2704 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2705
2706 /*
2707 * Start issuing Fabric-Device Management Interface (FDMI) command to
2708 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2709 * fdmi-on=2 (supporting RPA/hostnmae)
2710 */
2711
2712 if (vport->cfg_fdmi_on == 1)
2713 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2714 else
2715 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2716
2717 /* Mailbox took a reference to the node */
2718 lpfc_nlp_put(ndlp);
2719 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2720 kfree(mp);
2721 mempool_free(pmb, phba->mbox_mem_pool);
2722
2723 return;
2724 }
2725
2726 static int
2727 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2728 {
2729 uint16_t *rpi = param;
2730
2731 return ndlp->nlp_rpi == *rpi;
2732 }
2733
2734 static int
2735 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2736 {
2737 return memcmp(&ndlp->nlp_portname, param,
2738 sizeof(ndlp->nlp_portname)) == 0;
2739 }
2740
2741 struct lpfc_nodelist *
2742 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2743 {
2744 struct lpfc_nodelist *ndlp;
2745
2746 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2747 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2748 filter(ndlp, param))
2749 return ndlp;
2750 }
2751 return NULL;
2752 }
2753
2754 /*
2755 * Search node lists for a remote port matching filter criteria
2756 * Caller needs to hold host_lock before calling this routine.
2757 */
2758 struct lpfc_nodelist *
2759 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2760 {
2761 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2762 struct lpfc_nodelist *ndlp;
2763
2764 spin_lock_irq(shost->host_lock);
2765 ndlp = __lpfc_find_node(vport, filter, param);
2766 spin_unlock_irq(shost->host_lock);
2767 return ndlp;
2768 }
2769
2770 /*
2771 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2772 * returns the node list element pointer else return NULL.
2773 */
2774 struct lpfc_nodelist *
2775 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2776 {
2777 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2778 }
2779
2780 struct lpfc_nodelist *
2781 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2782 {
2783 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2784 struct lpfc_nodelist *ndlp;
2785
2786 spin_lock_irq(shost->host_lock);
2787 ndlp = __lpfc_findnode_rpi(vport, rpi);
2788 spin_unlock_irq(shost->host_lock);
2789 return ndlp;
2790 }
2791
2792 /*
2793 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2794 * returns the node element list pointer else return NULL.
2795 */
2796 struct lpfc_nodelist *
2797 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2798 {
2799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2800 struct lpfc_nodelist *ndlp;
2801
2802 spin_lock_irq(shost->host_lock);
2803 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2804 spin_unlock_irq(shost->host_lock);
2805 return ndlp;
2806 }
2807
2808 void
2809 lpfc_dev_loss_delay(unsigned long ptr)
2810 {
2811 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2812 struct lpfc_vport *vport = ndlp->vport;
2813 struct lpfc_hba *phba = vport->phba;
2814 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2815 unsigned long flags;
2816
2817 evtp = &ndlp->dev_loss_evt;
2818
2819 spin_lock_irqsave(&phba->hbalock, flags);
2820 if (!list_empty(&evtp->evt_listp)) {
2821 spin_unlock_irqrestore(&phba->hbalock, flags);
2822 return;
2823 }
2824
2825 evtp->evt_arg1 = ndlp;
2826 evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
2827 list_add_tail(&evtp->evt_listp, &phba->work_list);
2828 if (phba->work_wait)
2829 lpfc_worker_wake_up(phba);
2830 spin_unlock_irqrestore(&phba->hbalock, flags);
2831 return;
2832 }
2833
2834 void
2835 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2836 uint32_t did)
2837 {
2838 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2839 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2840 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2841 init_timer(&ndlp->nlp_delayfunc);
2842 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2843 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2844 ndlp->nlp_DID = did;
2845 ndlp->vport = vport;
2846 ndlp->nlp_sid = NLP_NO_SID;
2847 INIT_LIST_HEAD(&ndlp->nlp_listp);
2848 kref_init(&ndlp->kref);
2849
2850 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2851 "node init: did:x%x",
2852 ndlp->nlp_DID, 0, 0);
2853
2854 return;
2855 }
2856
2857 void
2858 lpfc_nlp_release(struct kref *kref)
2859 {
2860 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2861 kref);
2862
2863 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2864 "node release: did:x%x flg:x%x type:x%x",
2865 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2866
2867 lpfc_nlp_remove(ndlp->vport, ndlp);
2868 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2869 }
2870
2871 struct lpfc_nodelist *
2872 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2873 {
2874 if (ndlp)
2875 kref_get(&ndlp->kref);
2876 return ndlp;
2877 }
2878
2879 int
2880 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2881 {
2882 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2883 }
This page took 0.147374 seconds and 5 git commands to generate.