[SCSI] lpfc 8.3.24: Add resource extent support
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_vport.c
CommitLineData
92d7f7b0
JS
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
e47c9093 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
92d7f7b0
JS
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
5a0e3ad6 29#include <linux/slab.h>
92d7f7b0
JS
30#include <linux/spinlock.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
da0436e9 36#include "lpfc_hw4.h"
92d7f7b0
JS
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
92d7f7b0
JS
41#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h"
44#include "lpfc_logmsg.h"
45#include "lpfc_crtn.h"
46#include "lpfc_version.h"
47#include "lpfc_vport.h"
48
49inline void lpfc_vport_set_state(struct lpfc_vport *vport,
50 enum fc_vport_state new_state)
51{
52 struct fc_vport *fc_vport = vport->fc_vport;
53
54 if (fc_vport) {
55 /*
56 * When the transport defines fc_vport_set state we will replace
57 * this code with the following line
58 */
59 /* fc_vport_set_state(fc_vport, new_state); */
60 if (new_state != FC_VPORT_INITIALIZING)
61 fc_vport->vport_last_state = fc_vport->vport_state;
62 fc_vport->vport_state = new_state;
63 }
64
65 /* for all the error states we will set the invternal state to FAILED */
66 switch (new_state) {
67 case FC_VPORT_NO_FABRIC_SUPP:
68 case FC_VPORT_NO_FABRIC_RSCS:
69 case FC_VPORT_FABRIC_LOGOUT:
70 case FC_VPORT_FABRIC_REJ_WWN:
71 case FC_VPORT_FAILED:
72 vport->port_state = LPFC_VPORT_FAILED;
73 break;
74 case FC_VPORT_LINKDOWN:
75 vport->port_state = LPFC_VPORT_UNKNOWN;
76 break;
77 default:
78 /* do nothing */
79 break;
80 }
81}
82
83static int
84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{
6d368e53 86 unsigned long vpi;
92d7f7b0
JS
87
88 spin_lock_irq(&phba->hbalock);
858c9f6c
JS
89 /* Start at bit 1 because vpi zero is reserved for the physical port */
90 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
92d7f7b0
JS
91 if (vpi > phba->max_vpi)
92 vpi = 0;
93 else
94 set_bit(vpi, phba->vpi_bmask);
da0436e9
JS
95 if (phba->sli_rev == LPFC_SLI_REV4)
96 phba->sli4_hba.max_cfg_param.vpi_used++;
92d7f7b0
JS
97 spin_unlock_irq(&phba->hbalock);
98 return vpi;
99}
100
101static void
102lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
103{
da0436e9
JS
104 if (vpi == 0)
105 return;
92d7f7b0
JS
106 spin_lock_irq(&phba->hbalock);
107 clear_bit(vpi, phba->vpi_bmask);
da0436e9
JS
108 if (phba->sli_rev == LPFC_SLI_REV4)
109 phba->sli4_hba.max_cfg_param.vpi_used--;
92d7f7b0
JS
110 spin_unlock_irq(&phba->hbalock);
111}
112
113static int
114lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
115{
116 LPFC_MBOXQ_t *pmb;
117 MAILBOX_t *mb;
118 struct lpfc_dmabuf *mp;
119 int rc;
120
121 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
122 if (!pmb) {
123 return -ENOMEM;
124 }
f4b4c68f 125 mb = &pmb->u.mb;
92d7f7b0 126
9f1177a3
JS
127 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
128 if (rc) {
129 mempool_free(pmb, phba->mbox_mem_pool);
130 return -ENOMEM;
131 }
132
92d7f7b0
JS
133 /*
134 * Grab buffer pointer and clear context1 so we can use
135 * lpfc_sli_issue_box_wait
136 */
137 mp = (struct lpfc_dmabuf *) pmb->context1;
138 pmb->context1 = NULL;
139
140 pmb->vport = vport;
141 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
142 if (rc != MBX_SUCCESS) {
98c9ea5c
JS
143 if (signal_pending(current)) {
144 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
145 "1830 Signal aborted mbxCmd x%x\n",
146 mb->mbxCommand);
147 lpfc_mbuf_free(phba, mp->virt, mp->phys);
148 kfree(mp);
149 if (rc != MBX_TIMEOUT)
150 mempool_free(pmb, phba->mbox_mem_pool);
151 return -EINTR;
152 } else {
153 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
154 "1818 VPort failed init, mbxCmd x%x "
155 "READ_SPARM mbxStatus x%x, rc = x%x\n",
156 mb->mbxCommand, mb->mbxStatus, rc);
157 lpfc_mbuf_free(phba, mp->virt, mp->phys);
158 kfree(mp);
159 if (rc != MBX_TIMEOUT)
160 mempool_free(pmb, phba->mbox_mem_pool);
161 return -EIO;
162 }
92d7f7b0
JS
163 }
164
165 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
166 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
167 sizeof (struct lpfc_name));
168 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
169 sizeof (struct lpfc_name));
170
171 lpfc_mbuf_free(phba, mp->virt, mp->phys);
172 kfree(mp);
173 mempool_free(pmb, phba->mbox_mem_pool);
174
175 return 0;
176}
177
178static int
179lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
180 const char *name_type)
181{
182 /* ensure that IEEE format 1 addresses
183 * contain zeros in bits 59-48
184 */
185 if (!((wwn->u.wwn[0] >> 4) == 1 &&
186 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
187 return 1;
188
189 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
e8b62011 190 "1822 Invalid %s: %02x:%02x:%02x:%02x:"
92d7f7b0 191 "%02x:%02x:%02x:%02x\n",
e8b62011 192 name_type,
92d7f7b0
JS
193 wwn->u.wwn[0], wwn->u.wwn[1],
194 wwn->u.wwn[2], wwn->u.wwn[3],
195 wwn->u.wwn[4], wwn->u.wwn[5],
196 wwn->u.wwn[6], wwn->u.wwn[7]);
197 return 0;
198}
199
200static int
201lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
202{
203 struct lpfc_vport *vport;
549e55cd 204 unsigned long flags;
92d7f7b0 205
549e55cd 206 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
207 list_for_each_entry(vport, &phba->port_list, listentry) {
208 if (vport == new_vport)
209 continue;
210 /* If they match, return not unique */
211 if (memcmp(&vport->fc_sparam.portName,
549e55cd
JS
212 &new_vport->fc_sparam.portName,
213 sizeof(struct lpfc_name)) == 0) {
214 spin_unlock_irqrestore(&phba->hbalock, flags);
92d7f7b0 215 return 0;
549e55cd 216 }
92d7f7b0 217 }
549e55cd 218 spin_unlock_irqrestore(&phba->hbalock, flags);
92d7f7b0
JS
219 return 1;
220}
221
90160e01 222/**
3621a710 223 * lpfc_discovery_wait - Wait for driver discovery to quiesce
90160e01
JS
224 * @vport: The virtual port for which this call is being executed.
225 *
226 * This driver calls this routine specifically from lpfc_vport_delete
227 * to enforce a synchronous execution of vport
228 * delete relative to discovery activities. The
229 * lpfc_vport_delete routine should not return until it
230 * can reasonably guarantee that discovery has quiesced.
231 * Post FDISC LOGO, the driver must wait until its SAN teardown is
232 * complete and all resources recovered before allowing
233 * cleanup.
234 *
235 * This routine does not require any locks held.
236 **/
237static void lpfc_discovery_wait(struct lpfc_vport *vport)
238{
239 struct lpfc_hba *phba = vport->phba;
240 uint32_t wait_flags = 0;
241 unsigned long wait_time_max;
242 unsigned long start_time;
243
244 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
245 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
246
247 /*
248 * The time constraint on this loop is a balance between the
249 * fabric RA_TOV value and dev_loss tmo. The driver's
250 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
251 */
252 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
253 wait_time_max += jiffies;
254 start_time = jiffies;
255 while (time_before(jiffies, wait_time_max)) {
256 if ((vport->num_disc_nodes > 0) ||
257 (vport->fc_flag & wait_flags) ||
258 ((vport->port_state > LPFC_VPORT_FAILED) &&
259 (vport->port_state < LPFC_VPORT_READY))) {
21e9a0a5 260 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
90160e01 261 "1833 Vport discovery quiesce Wait:"
21e9a0a5 262 " state x%x fc_flags x%x"
90160e01
JS
263 " num_nodes x%x, waiting 1000 msecs"
264 " total wait msecs x%x\n",
21e9a0a5
JS
265 vport->port_state, vport->fc_flag,
266 vport->num_disc_nodes,
90160e01
JS
267 jiffies_to_msecs(jiffies - start_time));
268 msleep(1000);
269 } else {
270 /* Base case. Wait variants satisfied. Break out */
21e9a0a5 271 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
90160e01 272 "1834 Vport discovery quiesced:"
21e9a0a5 273 " state x%x fc_flags x%x"
90160e01 274 " wait msecs x%x\n",
21e9a0a5 275 vport->port_state, vport->fc_flag,
90160e01
JS
276 jiffies_to_msecs(jiffies
277 - start_time));
278 break;
279 }
280 }
281
282 if (time_after(jiffies, wait_time_max))
21e9a0a5 283 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
90160e01 284 "1835 Vport discovery quiesce failed:"
21e9a0a5
JS
285 " state x%x fc_flags x%x wait msecs x%x\n",
286 vport->port_state, vport->fc_flag,
90160e01
JS
287 jiffies_to_msecs(jiffies - start_time));
288}
289
92d7f7b0
JS
290int
291lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
292{
293 struct lpfc_nodelist *ndlp;
3de2a653
JS
294 struct Scsi_Host *shost = fc_vport->shost;
295 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
92d7f7b0
JS
296 struct lpfc_hba *phba = pport->phba;
297 struct lpfc_vport *vport = NULL;
298 int instance;
299 int vpi;
300 int rc = VPORT_ERROR;
98c9ea5c 301 int status;
92d7f7b0 302
eada272d 303 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
92d7f7b0 304 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
e8b62011 305 "1808 Create VPORT failed: "
92d7f7b0 306 "NPIV is not enabled: SLImode:%d\n",
e8b62011 307 phba->sli_rev);
92d7f7b0
JS
308 rc = VPORT_INVAL;
309 goto error_out;
310 }
311
312 vpi = lpfc_alloc_vpi(phba);
313 if (vpi == 0) {
314 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
e8b62011 315 "1809 Create VPORT failed: "
92d7f7b0 316 "Max VPORTs (%d) exceeded\n",
e8b62011 317 phba->max_vpi);
92d7f7b0
JS
318 rc = VPORT_NORESOURCES;
319 goto error_out;
320 }
321
92d7f7b0
JS
322 /* Assign an unused board number */
323 if ((instance = lpfc_get_instance()) < 0) {
324 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
e8b62011
JS
325 "1810 Create VPORT failed: Cannot get "
326 "instance number\n");
92d7f7b0
JS
327 lpfc_free_vpi(phba, vpi);
328 rc = VPORT_NORESOURCES;
329 goto error_out;
330 }
331
3de2a653 332 vport = lpfc_create_port(phba, instance, &fc_vport->dev);
92d7f7b0
JS
333 if (!vport) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
e8b62011 335 "1811 Create VPORT failed: vpi x%x\n", vpi);
92d7f7b0
JS
336 lpfc_free_vpi(phba, vpi);
337 rc = VPORT_NORESOURCES;
338 goto error_out;
339 }
340
341 vport->vpi = vpi;
858c9f6c
JS
342 lpfc_debugfs_initialize(vport);
343
98c9ea5c
JS
344 if ((status = lpfc_vport_sparm(phba, vport))) {
345 if (status == -EINTR) {
346 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
347 "1831 Create VPORT Interrupted.\n");
348 rc = VPORT_ERROR;
349 } else {
350 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
351 "1813 Create VPORT failed. "
352 "Cannot get sparam\n");
353 rc = VPORT_NORESOURCES;
354 }
92d7f7b0
JS
355 lpfc_free_vpi(phba, vpi);
356 destroy_port(vport);
92d7f7b0
JS
357 goto error_out;
358 }
359
1c6834a7
JS
360 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
361 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
92d7f7b0
JS
362
363 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
364 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
365
366 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
367 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
e8b62011
JS
368 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
369 "1821 Create VPORT failed. "
370 "Invalid WWN format\n");
92d7f7b0
JS
371 lpfc_free_vpi(phba, vpi);
372 destroy_port(vport);
373 rc = VPORT_INVAL;
374 goto error_out;
375 }
376
377 if (!lpfc_unique_wwpn(phba, vport)) {
e8b62011
JS
378 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
379 "1823 Create VPORT failed. "
380 "Duplicate WWN on HBA\n");
92d7f7b0
JS
381 lpfc_free_vpi(phba, vpi);
382 destroy_port(vport);
383 rc = VPORT_INVAL;
384 goto error_out;
385 }
386
eada272d
JS
387 /* Create binary sysfs attribute for vport */
388 lpfc_alloc_sysfs_attr(vport);
389
92d7f7b0
JS
390 *(struct lpfc_vport **)fc_vport->dd_data = vport;
391 vport->fc_vport = fc_vport;
392
1c6834a7
JS
393 /*
394 * In SLI4, the vpi must be activated before it can be used
395 * by the port.
396 */
397 if ((phba->sli_rev == LPFC_SLI_REV4) &&
76a95d75
JS
398 (pport->fc_flag & FC_VFI_REGISTERED)) {
399 rc = lpfc_sli4_init_vpi(vport);
1c6834a7
JS
400 if (rc) {
401 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
402 "1838 Failed to INIT_VPI on vpi %d "
403 "status %d\n", vpi, rc);
404 rc = VPORT_NORESOURCES;
405 lpfc_free_vpi(phba, vpi);
406 goto error_out;
407 }
408 } else if (phba->sli_rev == LPFC_SLI_REV4) {
409 /*
410 * Driver cannot INIT_VPI now. Set the flags to
411 * init_vpi when reg_vfi complete.
412 */
413 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
414 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
415 rc = VPORT_OK;
416 goto out;
417 }
418
92d7f7b0 419 if ((phba->link_state < LPFC_LINK_UP) ||
1c6834a7 420 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
76a95d75 421 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
92d7f7b0
JS
422 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
423 rc = VPORT_OK;
424 goto out;
425 }
426
427 if (disable) {
eada272d 428 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
92d7f7b0
JS
429 rc = VPORT_OK;
430 goto out;
431 }
432
433 /* Use the Physical nodes Fabric NDLP to determine if the link is
434 * up and ready to FDISC.
435 */
436 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
e47c9093
JS
437 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
438 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
858c9f6c
JS
439 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
440 lpfc_set_disctmo(vport);
441 lpfc_initial_fdisc(vport);
442 } else {
443 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
e8b62011
JS
444 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
445 "0262 No NPIV Fabric support\n");
858c9f6c 446 }
92d7f7b0
JS
447 } else {
448 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
449 }
450 rc = VPORT_OK;
451
452out:
a58cbd52
JS
453 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
454 "1825 Vport Created.\n");
92d7f7b0
JS
455 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
456error_out:
457 return rc;
458}
459
311464ec 460static int
92d7f7b0
JS
461disable_vport(struct fc_vport *fc_vport)
462{
463 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
464 struct lpfc_hba *phba = vport->phba;
465 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
466 long timeout;
fedd3b7b 467 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
468
469 ndlp = lpfc_findnode_did(vport, Fabric_DID);
e47c9093
JS
470 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
471 && phba->link_state >= LPFC_LINK_UP) {
92d7f7b0
JS
472 vport->unreg_vpi_cmpl = VPORT_INVAL;
473 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
474 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
475 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
476 timeout = schedule_timeout(timeout);
477 }
478
479 lpfc_sli_host_down(vport);
480
481 /* Mark all nodes for discovery so we can remove them by
482 * calling lpfc_cleanup_rpis(vport, 1)
483 */
484 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
485 if (!NLP_CHK_NODE_ACT(ndlp))
486 continue;
92d7f7b0
JS
487 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
488 continue;
489 lpfc_disc_state_machine(vport, ndlp, NULL,
490 NLP_EVT_DEVICE_RECOVERY);
491 }
492 lpfc_cleanup_rpis(vport, 1);
493
494 lpfc_stop_vport_timers(vport);
495 lpfc_unreg_all_rpis(vport);
496 lpfc_unreg_default_rpis(vport);
497 /*
498 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
499 * scsi_host_put() to release the vport.
500 */
501 lpfc_mbx_unreg_vpi(vport);
fedd3b7b
JS
502 spin_lock_irq(shost->host_lock);
503 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
504 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
505
506 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
a58cbd52
JS
507 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
508 "1826 Vport Disabled.\n");
92d7f7b0
JS
509 return VPORT_OK;
510}
511
311464ec 512static int
92d7f7b0
JS
513enable_vport(struct fc_vport *fc_vport)
514{
515 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
516 struct lpfc_hba *phba = vport->phba;
517 struct lpfc_nodelist *ndlp = NULL;
72100cc4 518 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
519
520 if ((phba->link_state < LPFC_LINK_UP) ||
76a95d75 521 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
92d7f7b0
JS
522 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
523 return VPORT_OK;
524 }
525
72100cc4 526 spin_lock_irq(shost->host_lock);
92d7f7b0
JS
527 vport->load_flag |= FC_LOADING;
528 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
72100cc4 529 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
530
531 /* Use the Physical nodes Fabric NDLP to determine if the link is
532 * up and ready to FDISC.
533 */
534 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
e47c9093
JS
535 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
536 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
858c9f6c
JS
537 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
538 lpfc_set_disctmo(vport);
539 lpfc_initial_fdisc(vport);
540 } else {
541 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
e8b62011
JS
542 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
543 "0264 No NPIV Fabric support\n");
858c9f6c 544 }
92d7f7b0
JS
545 } else {
546 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
547 }
a58cbd52
JS
548 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
549 "1827 Vport Enabled.\n");
92d7f7b0
JS
550 return VPORT_OK;
551}
552
553int
554lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
555{
556 if (disable)
557 return disable_vport(fc_vport);
558 else
559 return enable_vport(fc_vport);
560}
561
562
563int
564lpfc_vport_delete(struct fc_vport *fc_vport)
565{
566 struct lpfc_nodelist *ndlp = NULL;
92d7f7b0
JS
567 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
569 struct lpfc_hba *phba = vport->phba;
570 long timeout;
92d7f7b0 571
51ef4c26
JS
572 if (vport->port_type == LPFC_PHYSICAL_PORT) {
573 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
574 "1812 vport_delete failed: Cannot delete "
575 "physical host\n");
576 return VPORT_ERROR;
577 }
21e9a0a5
JS
578
579 /* If the vport is a static vport fail the deletion. */
580 if ((vport->vport_flag & STATIC_VPORT) &&
581 !(phba->pport->load_flag & FC_UNLOADING)) {
582 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
583 "1837 vport_delete failed: Cannot delete "
584 "static vport.\n");
585 return VPORT_ERROR;
586 }
d439d286
JS
587 spin_lock_irq(&phba->hbalock);
588 vport->load_flag |= FC_UNLOADING;
589 spin_unlock_irq(&phba->hbalock);
51ef4c26
JS
590 /*
591 * If we are not unloading the driver then prevent the vport_delete
592 * from happening until after this vport's discovery is finished.
593 */
594 if (!(phba->pport->load_flag & FC_UNLOADING)) {
595 int check_count = 0;
596 while (check_count < ((phba->fc_ratov * 3) + 3) &&
597 vport->port_state > LPFC_VPORT_FAILED &&
598 vport->port_state < LPFC_VPORT_READY) {
599 check_count++;
600 msleep(1000);
601 }
602 if (vport->port_state > LPFC_VPORT_FAILED &&
603 vport->port_state < LPFC_VPORT_READY)
604 return -EAGAIN;
605 }
92d7f7b0
JS
606 /*
607 * This is a bit of a mess. We want to ensure the shost doesn't get
608 * torn down until we're done with the embedded lpfc_vport structure.
609 *
610 * Beyond holding a reference for this function, we also need a
611 * reference for outstanding I/O requests we schedule during delete
612 * processing. But once we scsi_remove_host() we can no longer obtain
613 * a reference through scsi_host_get().
614 *
615 * So we take two references here. We release one reference at the
616 * bottom of the function -- after delinking the vport. And we
617 * release the other at the completion of the unreg_vpi that get's
618 * initiated after we've disposed of all other resources associated
619 * with the port.
620 */
d7c255b2 621 if (!scsi_host_get(shost))
92d7f7b0 622 return VPORT_INVAL;
d7c255b2
JS
623 if (!scsi_host_get(shost)) {
624 scsi_host_put(shost);
625 return VPORT_INVAL;
626 }
eada272d
JS
627 lpfc_free_sysfs_attr(vport);
628
858c9f6c 629 lpfc_debugfs_terminate(vport);
eada272d
JS
630
631 /* Remove FC host and then SCSI host with the vport */
92d7f7b0
JS
632 fc_remove_host(lpfc_shost_from_vport(vport));
633 scsi_remove_host(lpfc_shost_from_vport(vport));
634
635 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
e47c9093
JS
636
637 /* In case of driver unload, we shall not perform fabric logo as the
638 * worker thread already stopped at this stage and, in this case, we
639 * can safely skip the fabric logo.
640 */
641 if (phba->pport->load_flag & FC_UNLOADING) {
642 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
643 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
644 phba->link_state >= LPFC_LINK_UP) {
645 /* First look for the Fabric ndlp */
646 ndlp = lpfc_findnode_did(vport, Fabric_DID);
647 if (!ndlp)
648 goto skip_logo;
649 else if (!NLP_CHK_NODE_ACT(ndlp)) {
650 ndlp = lpfc_enable_node(vport, ndlp,
651 NLP_STE_UNUSED_NODE);
652 if (!ndlp)
653 goto skip_logo;
654 }
655 /* Remove ndlp from vport npld list */
656 lpfc_dequeue_node(vport, ndlp);
657
658 /* Indicate free memory when release */
659 spin_lock_irq(&phba->ndlp_lock);
660 NLP_SET_FREE_REQ(ndlp);
661 spin_unlock_irq(&phba->ndlp_lock);
662 /* Kick off release ndlp when it can be safely done */
663 lpfc_nlp_put(ndlp);
664 }
665 goto skip_logo;
666 }
667
668 /* Otherwise, we will perform fabric logo as needed */
669 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
670 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
58da1ffb 671 phba->link_state >= LPFC_LINK_UP &&
76a95d75 672 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
7ee5d43e
JS
673 if (vport->cfg_enable_da_id) {
674 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
675 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
676 while (vport->ct_flags && timeout)
677 timeout = schedule_timeout(timeout);
678 else
679 lpfc_printf_log(vport->phba, KERN_WARNING,
680 LOG_VPORT,
681 "1829 CT command failed to "
e4e74273 682 "delete objects on fabric\n");
7ee5d43e 683 }
92d7f7b0
JS
684 /* First look for the Fabric ndlp */
685 ndlp = lpfc_findnode_did(vport, Fabric_DID);
686 if (!ndlp) {
687 /* Cannot find existing Fabric ndlp, allocate one */
688 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
689 if (!ndlp)
690 goto skip_logo;
691 lpfc_nlp_init(vport, ndlp, Fabric_DID);
e47c9093
JS
692 /* Indicate free memory when release */
693 NLP_SET_FREE_REQ(ndlp);
92d7f7b0 694 } else {
e47c9093
JS
695 if (!NLP_CHK_NODE_ACT(ndlp))
696 ndlp = lpfc_enable_node(vport, ndlp,
697 NLP_STE_UNUSED_NODE);
698 if (!ndlp)
699 goto skip_logo;
700
701 /* Remove ndlp from vport npld list */
92d7f7b0 702 lpfc_dequeue_node(vport, ndlp);
e47c9093
JS
703 spin_lock_irq(&phba->ndlp_lock);
704 if (!NLP_CHK_FREE_REQ(ndlp))
705 /* Indicate free memory when release */
706 NLP_SET_FREE_REQ(ndlp);
707 else {
708 /* Skip this if ndlp is already in free mode */
709 spin_unlock_irq(&phba->ndlp_lock);
710 goto skip_logo;
711 }
712 spin_unlock_irq(&phba->ndlp_lock);
92d7f7b0 713 }
1987807d 714 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
5ffc266e 715 goto skip_logo;
92d7f7b0
JS
716 vport->unreg_vpi_cmpl = VPORT_INVAL;
717 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
718 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
719 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
720 timeout = schedule_timeout(timeout);
721 }
722
90160e01
JS
723 if (!(phba->pport->load_flag & FC_UNLOADING))
724 lpfc_discovery_wait(vport);
725
92d7f7b0 726skip_logo:
87af33fe 727 lpfc_cleanup(vport);
92d7f7b0
JS
728 lpfc_sli_host_down(vport);
729
92d7f7b0 730 lpfc_stop_vport_timers(vport);
87af33fe
JS
731
732 if (!(phba->pport->load_flag & FC_UNLOADING)) {
e47c9093 733 lpfc_unreg_all_rpis(vport);
87af33fe
JS
734 lpfc_unreg_default_rpis(vport);
735 /*
736 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
737 * does the scsi_host_put() to release the vport.
738 */
d7c255b2
JS
739 if (lpfc_mbx_unreg_vpi(vport))
740 scsi_host_put(shost);
741 } else
742 scsi_host_put(shost);
92d7f7b0
JS
743
744 lpfc_free_vpi(phba, vport->vpi);
745 vport->work_port_events = 0;
746 spin_lock_irq(&phba->hbalock);
747 list_del_init(&vport->listentry);
748 spin_unlock_irq(&phba->hbalock);
a58cbd52
JS
749 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
750 "1828 Vport Deleted.\n");
92d7f7b0 751 scsi_host_put(shost);
51ef4c26 752 return VPORT_OK;
92d7f7b0
JS
753}
754
549e55cd
JS
755struct lpfc_vport **
756lpfc_create_vport_work_array(struct lpfc_hba *phba)
757{
758 struct lpfc_vport *port_iterator;
759 struct lpfc_vport **vports;
760 int index = 0;
21e9a0a5 761 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
549e55cd
JS
762 GFP_KERNEL);
763 if (vports == NULL)
764 return NULL;
765 spin_lock_irq(&phba->hbalock);
766 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
e8b62011 767 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
15672319
JS
768 if (!(port_iterator->load_flag & FC_UNLOADING))
769 lpfc_printf_vlog(port_iterator, KERN_ERR,
770 LOG_VPORT,
e8b62011
JS
771 "1801 Create vport work array FAILED: "
772 "cannot do scsi_host_get\n");
549e55cd 773 continue;
e8b62011 774 }
549e55cd
JS
775 vports[index++] = port_iterator;
776 }
777 spin_unlock_irq(&phba->hbalock);
778 return vports;
779}
780
781void
09372820 782lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
549e55cd
JS
783{
784 int i;
785 if (vports == NULL)
786 return;
98fc5dd9 787 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
549e55cd
JS
788 scsi_host_put(lpfc_shost_from_vport(vports[i]));
789 kfree(vports);
790}
ea2151b4
JS
791
792
793/**
3621a710 794 * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
ea2151b4
JS
795 * @vport: Pointer to vport object.
796 *
797 * This function resets the statistical data for the vport. This function
798 * is called with the host_lock held
799 **/
800void
801lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
802{
803 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
804
805 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
806 if (!NLP_CHK_NODE_ACT(ndlp))
807 continue;
808 if (ndlp->lat_data)
809 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
810 sizeof(struct lpfc_scsicmd_bkt));
811 }
812}
813
814
815/**
3621a710 816 * lpfc_alloc_bucket - Allocate data buffer required for statistical data
ea2151b4
JS
817 * @vport: Pointer to vport object.
818 *
819 * This function allocates data buffer required for all the FC
820 * nodes of the vport to collect statistical data.
821 **/
822void
823lpfc_alloc_bucket(struct lpfc_vport *vport)
824{
825 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
826
827 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
828 if (!NLP_CHK_NODE_ACT(ndlp))
829 continue;
830
831 kfree(ndlp->lat_data);
832 ndlp->lat_data = NULL;
833
834 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
835 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
836 sizeof(struct lpfc_scsicmd_bkt),
837 GFP_ATOMIC);
838
839 if (!ndlp->lat_data)
840 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
841 "0287 lpfc_alloc_bucket failed to "
842 "allocate statistical data buffer DID "
843 "0x%x\n", ndlp->nlp_DID);
844 }
845 }
846}
847
848/**
3621a710 849 * lpfc_free_bucket - Free data buffer required for statistical data
ea2151b4
JS
850 * @vport: Pointer to vport object.
851 *
852 * Th function frees statistical data buffer of all the FC
853 * nodes of the vport.
854 **/
855void
856lpfc_free_bucket(struct lpfc_vport *vport)
857{
858 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
859
860 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
861 if (!NLP_CHK_NODE_ACT(ndlp))
862 continue;
863
864 kfree(ndlp->lat_data);
865 ndlp->lat_data = NULL;
866 }
867}
This page took 0.461173 seconds and 5 git commands to generate.