Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / scsi / bfa / bfad_bsg.c
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18 #include <linux/uaccess.h>
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfad_bsg.h"
22
23 BFA_TRC_FILE(LDRV, BSG);
24
25 int
26 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
27 {
28 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
29 int rc = 0;
30 unsigned long flags;
31
32 spin_lock_irqsave(&bfad->bfad_lock, flags);
33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 iocmd->status = BFA_STATUS_OK;
37 return rc;
38 }
39
40 init_completion(&bfad->enable_comp);
41 bfa_iocfc_enable(&bfad->bfa);
42 iocmd->status = BFA_STATUS_OK;
43 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 wait_for_completion(&bfad->enable_comp);
45
46 return rc;
47 }
48
49 int
50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
51 {
52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53 int rc = 0;
54 unsigned long flags;
55
56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 iocmd->status = BFA_STATUS_OK;
60 return rc;
61 }
62
63 if (bfad->disable_active) {
64 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
65 return -EBUSY;
66 }
67
68 bfad->disable_active = BFA_TRUE;
69 init_completion(&bfad->disable_comp);
70 bfa_iocfc_disable(&bfad->bfa);
71 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
72
73 wait_for_completion(&bfad->disable_comp);
74 bfad->disable_active = BFA_FALSE;
75 iocmd->status = BFA_STATUS_OK;
76
77 return rc;
78 }
79
80 static int
81 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
82 {
83 int i;
84 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
85 struct bfad_im_port_s *im_port;
86 struct bfa_port_attr_s pattr;
87 unsigned long flags;
88
89 spin_lock_irqsave(&bfad->bfad_lock, flags);
90 bfa_fcport_get_attr(&bfad->bfa, &pattr);
91 iocmd->nwwn = pattr.nwwn;
92 iocmd->pwwn = pattr.pwwn;
93 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
94 iocmd->mac = bfa_get_mac(&bfad->bfa);
95 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
96 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
97 iocmd->factorynwwn = pattr.factorynwwn;
98 iocmd->factorypwwn = pattr.factorypwwn;
99 iocmd->bfad_num = bfad->inst_no;
100 im_port = bfad->pport.im_port;
101 iocmd->host = im_port->shost->host_no;
102 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
103
104 strcpy(iocmd->name, bfad->adapter_name);
105 strcpy(iocmd->port_name, bfad->port_name);
106 strcpy(iocmd->hwpath, bfad->pci_name);
107
108 /* set adapter hw path */
109 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
110 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
111 ;
112 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
113 ;
114 iocmd->adapter_hwpath[i] = '\0';
115 iocmd->status = BFA_STATUS_OK;
116 return 0;
117 }
118
119 static int
120 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
121 {
122 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
123 unsigned long flags;
124
125 spin_lock_irqsave(&bfad->bfad_lock, flags);
126 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
127 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
128
129 /* fill in driver attr info */
130 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
131 strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
132 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
133 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
134 iocmd->ioc_attr.adapter_attr.fw_ver);
135 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
136 iocmd->ioc_attr.adapter_attr.optrom_ver);
137
138 /* copy chip rev info first otherwise it will be overwritten */
139 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
140 sizeof(bfad->pci_attr.chip_rev));
141 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
142 sizeof(struct bfa_ioc_pci_attr_s));
143
144 iocmd->status = BFA_STATUS_OK;
145 return 0;
146 }
147
148 int
149 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
150 {
151 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
152
153 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
154 iocmd->status = BFA_STATUS_OK;
155 return 0;
156 }
157
158 int
159 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
160 unsigned int payload_len)
161 {
162 struct bfa_bsg_ioc_fwstats_s *iocmd =
163 (struct bfa_bsg_ioc_fwstats_s *)cmd;
164 void *iocmd_bufptr;
165 unsigned long flags;
166
167 if (bfad_chk_iocmd_sz(payload_len,
168 sizeof(struct bfa_bsg_ioc_fwstats_s),
169 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
170 iocmd->status = BFA_STATUS_VERSION_FAIL;
171 goto out;
172 }
173
174 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
175 spin_lock_irqsave(&bfad->bfad_lock, flags);
176 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
177 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
178
179 if (iocmd->status != BFA_STATUS_OK) {
180 bfa_trc(bfad, iocmd->status);
181 goto out;
182 }
183 out:
184 bfa_trc(bfad, 0x6666);
185 return 0;
186 }
187
188 int
189 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
190 {
191 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
192 unsigned long flags;
193
194 if (v_cmd == IOCMD_IOC_RESET_STATS) {
195 bfa_ioc_clear_stats(&bfad->bfa);
196 iocmd->status = BFA_STATUS_OK;
197 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
198 spin_lock_irqsave(&bfad->bfad_lock, flags);
199 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
200 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
201 }
202
203 return 0;
204 }
205
206 int
207 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
208 {
209 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
210
211 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
212 strcpy(bfad->adapter_name, iocmd->name);
213 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
214 strcpy(bfad->port_name, iocmd->name);
215
216 iocmd->status = BFA_STATUS_OK;
217 return 0;
218 }
219
220 int
221 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
222 {
223 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
224
225 iocmd->status = BFA_STATUS_OK;
226 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
227
228 return 0;
229 }
230
231 int
232 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
233 {
234 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
235 unsigned long flags;
236
237 spin_lock_irqsave(&bfad->bfad_lock, flags);
238 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
239 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
240
241 return 0;
242 }
243
244 int
245 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
246 {
247 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
248 struct bfad_hal_comp fcomp;
249 unsigned long flags;
250
251 init_completion(&fcomp.comp);
252 spin_lock_irqsave(&bfad->bfad_lock, flags);
253 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
254 bfad_hcb_comp, &fcomp);
255 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
256 if (iocmd->status != BFA_STATUS_OK) {
257 bfa_trc(bfad, iocmd->status);
258 return 0;
259 }
260 wait_for_completion(&fcomp.comp);
261 iocmd->status = fcomp.status;
262 return 0;
263 }
264
265 int
266 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
267 {
268 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
269 struct bfad_hal_comp fcomp;
270 unsigned long flags;
271
272 init_completion(&fcomp.comp);
273 spin_lock_irqsave(&bfad->bfad_lock, flags);
274 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
275 bfad_hcb_comp, &fcomp);
276 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
277
278 if (iocmd->status != BFA_STATUS_OK) {
279 bfa_trc(bfad, iocmd->status);
280 return 0;
281 }
282 wait_for_completion(&fcomp.comp);
283 iocmd->status = fcomp.status;
284 return 0;
285 }
286
287 static int
288 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
289 {
290 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
291 struct bfa_lport_attr_s port_attr;
292 unsigned long flags;
293
294 spin_lock_irqsave(&bfad->bfad_lock, flags);
295 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
296 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
297 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
298
299 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
300 iocmd->attr.pid = port_attr.pid;
301 else
302 iocmd->attr.pid = 0;
303
304 iocmd->attr.port_type = port_attr.port_type;
305 iocmd->attr.loopback = port_attr.loopback;
306 iocmd->attr.authfail = port_attr.authfail;
307 strncpy(iocmd->attr.port_symname.symname,
308 port_attr.port_cfg.sym_name.symname,
309 sizeof(port_attr.port_cfg.sym_name.symname));
310
311 iocmd->status = BFA_STATUS_OK;
312 return 0;
313 }
314
315 int
316 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
317 unsigned int payload_len)
318 {
319 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
320 struct bfad_hal_comp fcomp;
321 void *iocmd_bufptr;
322 unsigned long flags;
323
324 if (bfad_chk_iocmd_sz(payload_len,
325 sizeof(struct bfa_bsg_port_stats_s),
326 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
327 iocmd->status = BFA_STATUS_VERSION_FAIL;
328 return 0;
329 }
330
331 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
332
333 init_completion(&fcomp.comp);
334 spin_lock_irqsave(&bfad->bfad_lock, flags);
335 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
336 iocmd_bufptr, bfad_hcb_comp, &fcomp);
337 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
338 if (iocmd->status != BFA_STATUS_OK) {
339 bfa_trc(bfad, iocmd->status);
340 goto out;
341 }
342
343 wait_for_completion(&fcomp.comp);
344 iocmd->status = fcomp.status;
345 out:
346 return 0;
347 }
348
349 int
350 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
351 {
352 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
353 struct bfad_hal_comp fcomp;
354 unsigned long flags;
355
356 init_completion(&fcomp.comp);
357 spin_lock_irqsave(&bfad->bfad_lock, flags);
358 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
359 bfad_hcb_comp, &fcomp);
360 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
361 if (iocmd->status != BFA_STATUS_OK) {
362 bfa_trc(bfad, iocmd->status);
363 return 0;
364 }
365 wait_for_completion(&fcomp.comp);
366 iocmd->status = fcomp.status;
367 return 0;
368 }
369
370 int
371 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
372 {
373 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
374 unsigned long flags;
375
376 spin_lock_irqsave(&bfad->bfad_lock, flags);
377 if (v_cmd == IOCMD_PORT_CFG_TOPO)
378 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
379 else if (v_cmd == IOCMD_PORT_CFG_SPEED)
380 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
381 else if (v_cmd == IOCMD_PORT_CFG_ALPA)
382 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
383 else if (v_cmd == IOCMD_PORT_CLR_ALPA)
384 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
385 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
386
387 return 0;
388 }
389
390 int
391 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
392 {
393 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
394 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
395 unsigned long flags;
396
397 spin_lock_irqsave(&bfad->bfad_lock, flags);
398 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
399 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
400
401 return 0;
402 }
403
404 int
405 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
406 {
407 struct bfa_bsg_bbcr_enable_s *iocmd =
408 (struct bfa_bsg_bbcr_enable_s *)pcmd;
409 unsigned long flags;
410 int rc;
411
412 spin_lock_irqsave(&bfad->bfad_lock, flags);
413 if (cmd == IOCMD_PORT_BBCR_ENABLE)
414 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
415 else if (cmd == IOCMD_PORT_BBCR_DISABLE)
416 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
417 else {
418 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
419 return -EINVAL;
420 }
421 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
422
423 iocmd->status = rc;
424 return 0;
425 }
426
427 int
428 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
429 {
430 struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
431 unsigned long flags;
432
433 spin_lock_irqsave(&bfad->bfad_lock, flags);
434 iocmd->status =
435 bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
436 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
437
438 return 0;
439 }
440
441
442 static int
443 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
444 {
445 struct bfa_fcs_lport_s *fcs_port;
446 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
447 unsigned long flags;
448
449 spin_lock_irqsave(&bfad->bfad_lock, flags);
450 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
451 iocmd->vf_id, iocmd->pwwn);
452 if (fcs_port == NULL) {
453 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
454 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
455 goto out;
456 }
457
458 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
459 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
460 iocmd->status = BFA_STATUS_OK;
461 out:
462 return 0;
463 }
464
465 int
466 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
467 {
468 struct bfa_fcs_lport_s *fcs_port;
469 struct bfa_bsg_lport_stats_s *iocmd =
470 (struct bfa_bsg_lport_stats_s *)cmd;
471 unsigned long flags;
472
473 spin_lock_irqsave(&bfad->bfad_lock, flags);
474 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
475 iocmd->vf_id, iocmd->pwwn);
476 if (fcs_port == NULL) {
477 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
478 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
479 goto out;
480 }
481
482 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
483 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
484 iocmd->status = BFA_STATUS_OK;
485 out:
486 return 0;
487 }
488
489 int
490 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
491 {
492 struct bfa_fcs_lport_s *fcs_port;
493 struct bfa_bsg_reset_stats_s *iocmd =
494 (struct bfa_bsg_reset_stats_s *)cmd;
495 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
496 struct list_head *qe, *qen;
497 struct bfa_itnim_s *itnim;
498 unsigned long flags;
499
500 spin_lock_irqsave(&bfad->bfad_lock, flags);
501 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
502 iocmd->vf_id, iocmd->vpwwn);
503 if (fcs_port == NULL) {
504 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
505 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
506 goto out;
507 }
508
509 bfa_fcs_lport_clear_stats(fcs_port);
510 /* clear IO stats from all active itnims */
511 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
512 itnim = (struct bfa_itnim_s *) qe;
513 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
514 continue;
515 bfa_itnim_clear_stats(itnim);
516 }
517 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
518 iocmd->status = BFA_STATUS_OK;
519 out:
520 return 0;
521 }
522
523 int
524 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
525 {
526 struct bfa_fcs_lport_s *fcs_port;
527 struct bfa_bsg_lport_iostats_s *iocmd =
528 (struct bfa_bsg_lport_iostats_s *)cmd;
529 unsigned long flags;
530
531 spin_lock_irqsave(&bfad->bfad_lock, flags);
532 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
533 iocmd->vf_id, iocmd->pwwn);
534 if (fcs_port == NULL) {
535 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
536 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
537 goto out;
538 }
539
540 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
541 fcs_port->lp_tag);
542 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
543 iocmd->status = BFA_STATUS_OK;
544 out:
545 return 0;
546 }
547
548 int
549 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
550 unsigned int payload_len)
551 {
552 struct bfa_bsg_lport_get_rports_s *iocmd =
553 (struct bfa_bsg_lport_get_rports_s *)cmd;
554 struct bfa_fcs_lport_s *fcs_port;
555 unsigned long flags;
556 void *iocmd_bufptr;
557
558 if (iocmd->nrports == 0)
559 return -EINVAL;
560
561 if (bfad_chk_iocmd_sz(payload_len,
562 sizeof(struct bfa_bsg_lport_get_rports_s),
563 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
564 != BFA_STATUS_OK) {
565 iocmd->status = BFA_STATUS_VERSION_FAIL;
566 return 0;
567 }
568
569 iocmd_bufptr = (char *)iocmd +
570 sizeof(struct bfa_bsg_lport_get_rports_s);
571 spin_lock_irqsave(&bfad->bfad_lock, flags);
572 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
573 iocmd->vf_id, iocmd->pwwn);
574 if (fcs_port == NULL) {
575 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
576 bfa_trc(bfad, 0);
577 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
578 goto out;
579 }
580
581 bfa_fcs_lport_get_rport_quals(fcs_port,
582 (struct bfa_rport_qualifier_s *)iocmd_bufptr,
583 &iocmd->nrports);
584 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
585 iocmd->status = BFA_STATUS_OK;
586 out:
587 return 0;
588 }
589
590 int
591 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
592 {
593 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
594 struct bfa_fcs_lport_s *fcs_port;
595 struct bfa_fcs_rport_s *fcs_rport;
596 unsigned long flags;
597
598 spin_lock_irqsave(&bfad->bfad_lock, flags);
599 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
600 iocmd->vf_id, iocmd->pwwn);
601 if (fcs_port == NULL) {
602 bfa_trc(bfad, 0);
603 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
604 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
605 goto out;
606 }
607
608 if (iocmd->pid)
609 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
610 iocmd->rpwwn, iocmd->pid);
611 else
612 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
613 if (fcs_rport == NULL) {
614 bfa_trc(bfad, 0);
615 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
616 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
617 goto out;
618 }
619
620 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
621 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
622 iocmd->status = BFA_STATUS_OK;
623 out:
624 return 0;
625 }
626
627 static int
628 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
629 {
630 struct bfa_bsg_rport_scsi_addr_s *iocmd =
631 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
632 struct bfa_fcs_lport_s *fcs_port;
633 struct bfa_fcs_itnim_s *fcs_itnim;
634 struct bfad_itnim_s *drv_itnim;
635 unsigned long flags;
636
637 spin_lock_irqsave(&bfad->bfad_lock, flags);
638 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
639 iocmd->vf_id, iocmd->pwwn);
640 if (fcs_port == NULL) {
641 bfa_trc(bfad, 0);
642 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
643 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
644 goto out;
645 }
646
647 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
648 if (fcs_itnim == NULL) {
649 bfa_trc(bfad, 0);
650 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
651 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
652 goto out;
653 }
654
655 drv_itnim = fcs_itnim->itnim_drv;
656
657 if (drv_itnim && drv_itnim->im_port)
658 iocmd->host = drv_itnim->im_port->shost->host_no;
659 else {
660 bfa_trc(bfad, 0);
661 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
662 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
663 goto out;
664 }
665
666 iocmd->target = drv_itnim->scsi_tgt_id;
667 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
668
669 iocmd->bus = 0;
670 iocmd->lun = 0;
671 iocmd->status = BFA_STATUS_OK;
672 out:
673 return 0;
674 }
675
676 int
677 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
678 {
679 struct bfa_bsg_rport_stats_s *iocmd =
680 (struct bfa_bsg_rport_stats_s *)cmd;
681 struct bfa_fcs_lport_s *fcs_port;
682 struct bfa_fcs_rport_s *fcs_rport;
683 unsigned long flags;
684
685 spin_lock_irqsave(&bfad->bfad_lock, flags);
686 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
687 iocmd->vf_id, iocmd->pwwn);
688 if (fcs_port == NULL) {
689 bfa_trc(bfad, 0);
690 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
691 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
692 goto out;
693 }
694
695 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
696 if (fcs_rport == NULL) {
697 bfa_trc(bfad, 0);
698 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
699 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
700 goto out;
701 }
702
703 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
704 sizeof(struct bfa_rport_stats_s));
705 if (bfa_fcs_rport_get_halrport(fcs_rport)) {
706 memcpy((void *)&iocmd->stats.hal_stats,
707 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
708 sizeof(struct bfa_rport_hal_stats_s));
709 }
710
711 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
712 iocmd->status = BFA_STATUS_OK;
713 out:
714 return 0;
715 }
716
717 int
718 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
719 {
720 struct bfa_bsg_rport_reset_stats_s *iocmd =
721 (struct bfa_bsg_rport_reset_stats_s *)cmd;
722 struct bfa_fcs_lport_s *fcs_port;
723 struct bfa_fcs_rport_s *fcs_rport;
724 struct bfa_rport_s *rport;
725 unsigned long flags;
726
727 spin_lock_irqsave(&bfad->bfad_lock, flags);
728 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
729 iocmd->vf_id, iocmd->pwwn);
730 if (fcs_port == NULL) {
731 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
732 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
733 goto out;
734 }
735
736 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
737 if (fcs_rport == NULL) {
738 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
739 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
740 goto out;
741 }
742
743 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
744 rport = bfa_fcs_rport_get_halrport(fcs_rport);
745 if (rport)
746 memset(&rport->stats, 0, sizeof(rport->stats));
747 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
748 iocmd->status = BFA_STATUS_OK;
749 out:
750 return 0;
751 }
752
753 int
754 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
755 {
756 struct bfa_bsg_rport_set_speed_s *iocmd =
757 (struct bfa_bsg_rport_set_speed_s *)cmd;
758 struct bfa_fcs_lport_s *fcs_port;
759 struct bfa_fcs_rport_s *fcs_rport;
760 unsigned long flags;
761
762 spin_lock_irqsave(&bfad->bfad_lock, flags);
763 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
764 iocmd->vf_id, iocmd->pwwn);
765 if (fcs_port == NULL) {
766 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
767 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
768 goto out;
769 }
770
771 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
772 if (fcs_rport == NULL) {
773 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
774 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
775 goto out;
776 }
777
778 fcs_rport->rpf.assigned_speed = iocmd->speed;
779 /* Set this speed in f/w only if the RPSC speed is not available */
780 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
781 if (fcs_rport->bfa_rport)
782 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
783 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
784 iocmd->status = BFA_STATUS_OK;
785 out:
786 return 0;
787 }
788
789 int
790 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
791 {
792 struct bfa_fcs_vport_s *fcs_vport;
793 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
794 unsigned long flags;
795
796 spin_lock_irqsave(&bfad->bfad_lock, flags);
797 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
798 iocmd->vf_id, iocmd->vpwwn);
799 if (fcs_vport == NULL) {
800 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
801 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
802 goto out;
803 }
804
805 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
806 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
807 iocmd->status = BFA_STATUS_OK;
808 out:
809 return 0;
810 }
811
812 int
813 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
814 {
815 struct bfa_fcs_vport_s *fcs_vport;
816 struct bfa_bsg_vport_stats_s *iocmd =
817 (struct bfa_bsg_vport_stats_s *)cmd;
818 unsigned long flags;
819
820 spin_lock_irqsave(&bfad->bfad_lock, flags);
821 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
822 iocmd->vf_id, iocmd->vpwwn);
823 if (fcs_vport == NULL) {
824 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
825 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
826 goto out;
827 }
828
829 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
830 sizeof(struct bfa_vport_stats_s));
831 memcpy((void *)&iocmd->vport_stats.port_stats,
832 (void *)&fcs_vport->lport.stats,
833 sizeof(struct bfa_lport_stats_s));
834 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
835 iocmd->status = BFA_STATUS_OK;
836 out:
837 return 0;
838 }
839
840 int
841 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
842 {
843 struct bfa_fcs_vport_s *fcs_vport;
844 struct bfa_bsg_reset_stats_s *iocmd =
845 (struct bfa_bsg_reset_stats_s *)cmd;
846 unsigned long flags;
847
848 spin_lock_irqsave(&bfad->bfad_lock, flags);
849 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
850 iocmd->vf_id, iocmd->vpwwn);
851 if (fcs_vport == NULL) {
852 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
853 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
854 goto out;
855 }
856
857 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
858 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
859 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
860 iocmd->status = BFA_STATUS_OK;
861 out:
862 return 0;
863 }
864
865 static int
866 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
867 unsigned int payload_len)
868 {
869 struct bfa_bsg_fabric_get_lports_s *iocmd =
870 (struct bfa_bsg_fabric_get_lports_s *)cmd;
871 bfa_fcs_vf_t *fcs_vf;
872 uint32_t nports = iocmd->nports;
873 unsigned long flags;
874 void *iocmd_bufptr;
875
876 if (nports == 0) {
877 iocmd->status = BFA_STATUS_EINVAL;
878 goto out;
879 }
880
881 if (bfad_chk_iocmd_sz(payload_len,
882 sizeof(struct bfa_bsg_fabric_get_lports_s),
883 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
884 iocmd->status = BFA_STATUS_VERSION_FAIL;
885 goto out;
886 }
887
888 iocmd_bufptr = (char *)iocmd +
889 sizeof(struct bfa_bsg_fabric_get_lports_s);
890
891 spin_lock_irqsave(&bfad->bfad_lock, flags);
892 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
893 if (fcs_vf == NULL) {
894 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
895 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
896 goto out;
897 }
898 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
899 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900
901 iocmd->nports = nports;
902 iocmd->status = BFA_STATUS_OK;
903 out:
904 return 0;
905 }
906
907 int
908 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
909 {
910 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
911 unsigned long flags;
912
913 spin_lock_irqsave(&bfad->bfad_lock, flags);
914 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
915 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
916
917 return 0;
918 }
919
920 int
921 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
922 {
923 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
924 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
925 unsigned long flags;
926
927 spin_lock_irqsave(&bfad->bfad_lock, flags);
928
929 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
930 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
931 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
932 else {
933 if (cmd == IOCMD_RATELIM_ENABLE)
934 fcport->cfg.ratelimit = BFA_TRUE;
935 else if (cmd == IOCMD_RATELIM_DISABLE)
936 fcport->cfg.ratelimit = BFA_FALSE;
937
938 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
939 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
940
941 iocmd->status = BFA_STATUS_OK;
942 }
943
944 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
945
946 return 0;
947 }
948
949 int
950 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
951 {
952 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
953 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
954 unsigned long flags;
955
956 spin_lock_irqsave(&bfad->bfad_lock, flags);
957
958 /* Auto and speeds greater than the supported speed, are invalid */
959 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
960 (iocmd->speed > fcport->speed_sup)) {
961 iocmd->status = BFA_STATUS_UNSUPP_SPEED;
962 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
963 return 0;
964 }
965
966 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
967 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
968 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
969 else {
970 fcport->cfg.trl_def_speed = iocmd->speed;
971 iocmd->status = BFA_STATUS_OK;
972 }
973 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
974
975 return 0;
976 }
977
978 int
979 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
980 {
981 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
982 unsigned long flags;
983
984 spin_lock_irqsave(&bfad->bfad_lock, flags);
985 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
986 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
987 iocmd->status = BFA_STATUS_OK;
988 return 0;
989 }
990
991 int
992 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
993 {
994 struct bfa_bsg_fcpim_modstats_s *iocmd =
995 (struct bfa_bsg_fcpim_modstats_s *)cmd;
996 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
997 struct list_head *qe, *qen;
998 struct bfa_itnim_s *itnim;
999 unsigned long flags;
1000
1001 spin_lock_irqsave(&bfad->bfad_lock, flags);
1002 /* accumulate IO stats from itnim */
1003 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
1004 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1005 itnim = (struct bfa_itnim_s *) qe;
1006 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
1007 }
1008 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1009 iocmd->status = BFA_STATUS_OK;
1010 return 0;
1011 }
1012
1013 int
1014 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
1015 {
1016 struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
1017 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
1018 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1019 struct list_head *qe, *qen;
1020 struct bfa_itnim_s *itnim;
1021 unsigned long flags;
1022
1023 spin_lock_irqsave(&bfad->bfad_lock, flags);
1024 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1025 itnim = (struct bfa_itnim_s *) qe;
1026 bfa_itnim_clear_stats(itnim);
1027 }
1028 memset(&fcpim->del_itn_stats, 0,
1029 sizeof(struct bfa_fcpim_del_itn_stats_s));
1030 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1031 iocmd->status = BFA_STATUS_OK;
1032 return 0;
1033 }
1034
1035 int
1036 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
1037 {
1038 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
1039 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
1040 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1041 unsigned long flags;
1042
1043 spin_lock_irqsave(&bfad->bfad_lock, flags);
1044 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
1045 sizeof(struct bfa_fcpim_del_itn_stats_s));
1046 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1047
1048 iocmd->status = BFA_STATUS_OK;
1049 return 0;
1050 }
1051
1052 static int
1053 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1054 {
1055 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1056 struct bfa_fcs_lport_s *fcs_port;
1057 unsigned long flags;
1058
1059 spin_lock_irqsave(&bfad->bfad_lock, flags);
1060 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1061 iocmd->vf_id, iocmd->lpwwn);
1062 if (!fcs_port)
1063 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1064 else
1065 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1066 iocmd->rpwwn, &iocmd->attr);
1067 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1068 return 0;
1069 }
1070
1071 static int
1072 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1073 {
1074 struct bfa_bsg_itnim_iostats_s *iocmd =
1075 (struct bfa_bsg_itnim_iostats_s *)cmd;
1076 struct bfa_fcs_lport_s *fcs_port;
1077 struct bfa_fcs_itnim_s *itnim;
1078 unsigned long flags;
1079
1080 spin_lock_irqsave(&bfad->bfad_lock, flags);
1081 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1082 iocmd->vf_id, iocmd->lpwwn);
1083 if (!fcs_port) {
1084 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1085 bfa_trc(bfad, 0);
1086 } else {
1087 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1088 if (itnim == NULL)
1089 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1090 else {
1091 iocmd->status = BFA_STATUS_OK;
1092 if (bfa_fcs_itnim_get_halitn(itnim))
1093 memcpy((void *)&iocmd->iostats, (void *)
1094 &(bfa_fcs_itnim_get_halitn(itnim)->stats),
1095 sizeof(struct bfa_itnim_iostats_s));
1096 }
1097 }
1098 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1099 return 0;
1100 }
1101
1102 static int
1103 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1104 {
1105 struct bfa_bsg_rport_reset_stats_s *iocmd =
1106 (struct bfa_bsg_rport_reset_stats_s *)cmd;
1107 struct bfa_fcs_lport_s *fcs_port;
1108 struct bfa_fcs_itnim_s *itnim;
1109 unsigned long flags;
1110
1111 spin_lock_irqsave(&bfad->bfad_lock, flags);
1112 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1113 iocmd->vf_id, iocmd->pwwn);
1114 if (!fcs_port)
1115 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1116 else {
1117 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1118 if (itnim == NULL)
1119 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1120 else {
1121 iocmd->status = BFA_STATUS_OK;
1122 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1123 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1124 }
1125 }
1126 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1127
1128 return 0;
1129 }
1130
1131 static int
1132 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1133 {
1134 struct bfa_bsg_itnim_itnstats_s *iocmd =
1135 (struct bfa_bsg_itnim_itnstats_s *)cmd;
1136 struct bfa_fcs_lport_s *fcs_port;
1137 struct bfa_fcs_itnim_s *itnim;
1138 unsigned long flags;
1139
1140 spin_lock_irqsave(&bfad->bfad_lock, flags);
1141 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1142 iocmd->vf_id, iocmd->lpwwn);
1143 if (!fcs_port) {
1144 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1145 bfa_trc(bfad, 0);
1146 } else {
1147 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1148 if (itnim == NULL)
1149 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1150 else {
1151 iocmd->status = BFA_STATUS_OK;
1152 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1153 &iocmd->itnstats);
1154 }
1155 }
1156 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1157 return 0;
1158 }
1159
1160 int
1161 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1162 {
1163 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1164 unsigned long flags;
1165
1166 spin_lock_irqsave(&bfad->bfad_lock, flags);
1167 iocmd->status = bfa_fcport_enable(&bfad->bfa);
1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1169
1170 return 0;
1171 }
1172
1173 int
1174 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1175 {
1176 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1177 unsigned long flags;
1178
1179 spin_lock_irqsave(&bfad->bfad_lock, flags);
1180 iocmd->status = bfa_fcport_disable(&bfad->bfa);
1181 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1182
1183 return 0;
1184 }
1185
1186 int
1187 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1188 {
1189 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1190 struct bfad_hal_comp fcomp;
1191 unsigned long flags;
1192
1193 init_completion(&fcomp.comp);
1194 spin_lock_irqsave(&bfad->bfad_lock, flags);
1195 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1196 &iocmd->pcifn_cfg,
1197 bfad_hcb_comp, &fcomp);
1198 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1199 if (iocmd->status != BFA_STATUS_OK)
1200 goto out;
1201
1202 wait_for_completion(&fcomp.comp);
1203 iocmd->status = fcomp.status;
1204 out:
1205 return 0;
1206 }
1207
1208 int
1209 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1210 {
1211 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1212 struct bfad_hal_comp fcomp;
1213 unsigned long flags;
1214
1215 init_completion(&fcomp.comp);
1216 spin_lock_irqsave(&bfad->bfad_lock, flags);
1217 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1218 &iocmd->pcifn_id, iocmd->port,
1219 iocmd->pcifn_class, iocmd->bw_min,
1220 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1221 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1222 if (iocmd->status != BFA_STATUS_OK)
1223 goto out;
1224
1225 wait_for_completion(&fcomp.comp);
1226 iocmd->status = fcomp.status;
1227 out:
1228 return 0;
1229 }
1230
1231 int
1232 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1233 {
1234 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1235 struct bfad_hal_comp fcomp;
1236 unsigned long flags;
1237
1238 init_completion(&fcomp.comp);
1239 spin_lock_irqsave(&bfad->bfad_lock, flags);
1240 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1241 iocmd->pcifn_id,
1242 bfad_hcb_comp, &fcomp);
1243 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1244 if (iocmd->status != BFA_STATUS_OK)
1245 goto out;
1246
1247 wait_for_completion(&fcomp.comp);
1248 iocmd->status = fcomp.status;
1249 out:
1250 return 0;
1251 }
1252
1253 int
1254 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1255 {
1256 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1257 struct bfad_hal_comp fcomp;
1258 unsigned long flags;
1259
1260 init_completion(&fcomp.comp);
1261 spin_lock_irqsave(&bfad->bfad_lock, flags);
1262 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1263 iocmd->pcifn_id, iocmd->bw_min,
1264 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1265 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1266 bfa_trc(bfad, iocmd->status);
1267 if (iocmd->status != BFA_STATUS_OK)
1268 goto out;
1269
1270 wait_for_completion(&fcomp.comp);
1271 iocmd->status = fcomp.status;
1272 bfa_trc(bfad, iocmd->status);
1273 out:
1274 return 0;
1275 }
1276
1277 int
1278 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1279 {
1280 struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1281 (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1282 struct bfad_hal_comp fcomp;
1283 unsigned long flags = 0;
1284
1285 init_completion(&fcomp.comp);
1286 spin_lock_irqsave(&bfad->bfad_lock, flags);
1287 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1288 iocmd->cfg.mode, iocmd->cfg.max_pf,
1289 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1290 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1291 if (iocmd->status != BFA_STATUS_OK)
1292 goto out;
1293
1294 wait_for_completion(&fcomp.comp);
1295 iocmd->status = fcomp.status;
1296 out:
1297 return 0;
1298 }
1299
1300 int
1301 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1302 {
1303 struct bfa_bsg_port_cfg_mode_s *iocmd =
1304 (struct bfa_bsg_port_cfg_mode_s *)cmd;
1305 struct bfad_hal_comp fcomp;
1306 unsigned long flags = 0;
1307
1308 init_completion(&fcomp.comp);
1309 spin_lock_irqsave(&bfad->bfad_lock, flags);
1310 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1311 iocmd->instance, iocmd->cfg.mode,
1312 iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1313 bfad_hcb_comp, &fcomp);
1314 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1315 if (iocmd->status != BFA_STATUS_OK)
1316 goto out;
1317
1318 wait_for_completion(&fcomp.comp);
1319 iocmd->status = fcomp.status;
1320 out:
1321 return 0;
1322 }
1323
1324 int
1325 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1326 {
1327 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1328 struct bfad_hal_comp fcomp;
1329 unsigned long flags;
1330
1331 init_completion(&fcomp.comp);
1332 spin_lock_irqsave(&bfad->bfad_lock, flags);
1333 if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1334 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1335 bfad_hcb_comp, &fcomp);
1336 else
1337 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1338 bfad_hcb_comp, &fcomp);
1339 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1340
1341 if (iocmd->status != BFA_STATUS_OK)
1342 goto out;
1343
1344 wait_for_completion(&fcomp.comp);
1345 iocmd->status = fcomp.status;
1346 out:
1347 return 0;
1348 }
1349
1350 int
1351 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1352 {
1353 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1354 struct bfad_hal_comp fcomp;
1355 unsigned long flags;
1356
1357 init_completion(&fcomp.comp);
1358 iocmd->status = BFA_STATUS_OK;
1359 spin_lock_irqsave(&bfad->bfad_lock, flags);
1360 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1361 bfad_hcb_comp, &fcomp);
1362 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1363
1364 if (iocmd->status != BFA_STATUS_OK)
1365 goto out;
1366
1367 wait_for_completion(&fcomp.comp);
1368 iocmd->status = fcomp.status;
1369 out:
1370 return 0;
1371 }
1372
1373 int
1374 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1375 {
1376 struct bfa_bsg_cee_attr_s *iocmd =
1377 (struct bfa_bsg_cee_attr_s *)cmd;
1378 void *iocmd_bufptr;
1379 struct bfad_hal_comp cee_comp;
1380 unsigned long flags;
1381
1382 if (bfad_chk_iocmd_sz(payload_len,
1383 sizeof(struct bfa_bsg_cee_attr_s),
1384 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1385 iocmd->status = BFA_STATUS_VERSION_FAIL;
1386 return 0;
1387 }
1388
1389 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1390
1391 cee_comp.status = 0;
1392 init_completion(&cee_comp.comp);
1393 mutex_lock(&bfad_mutex);
1394 spin_lock_irqsave(&bfad->bfad_lock, flags);
1395 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1396 bfad_hcb_comp, &cee_comp);
1397 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1398 if (iocmd->status != BFA_STATUS_OK) {
1399 mutex_unlock(&bfad_mutex);
1400 bfa_trc(bfad, 0x5555);
1401 goto out;
1402 }
1403 wait_for_completion(&cee_comp.comp);
1404 mutex_unlock(&bfad_mutex);
1405 out:
1406 return 0;
1407 }
1408
1409 int
1410 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1411 unsigned int payload_len)
1412 {
1413 struct bfa_bsg_cee_stats_s *iocmd =
1414 (struct bfa_bsg_cee_stats_s *)cmd;
1415 void *iocmd_bufptr;
1416 struct bfad_hal_comp cee_comp;
1417 unsigned long flags;
1418
1419 if (bfad_chk_iocmd_sz(payload_len,
1420 sizeof(struct bfa_bsg_cee_stats_s),
1421 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1422 iocmd->status = BFA_STATUS_VERSION_FAIL;
1423 return 0;
1424 }
1425
1426 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1427
1428 cee_comp.status = 0;
1429 init_completion(&cee_comp.comp);
1430 mutex_lock(&bfad_mutex);
1431 spin_lock_irqsave(&bfad->bfad_lock, flags);
1432 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1433 bfad_hcb_comp, &cee_comp);
1434 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1435 if (iocmd->status != BFA_STATUS_OK) {
1436 mutex_unlock(&bfad_mutex);
1437 bfa_trc(bfad, 0x5555);
1438 goto out;
1439 }
1440 wait_for_completion(&cee_comp.comp);
1441 mutex_unlock(&bfad_mutex);
1442 out:
1443 return 0;
1444 }
1445
1446 int
1447 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1448 {
1449 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1450 unsigned long flags;
1451
1452 spin_lock_irqsave(&bfad->bfad_lock, flags);
1453 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1454 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1455 if (iocmd->status != BFA_STATUS_OK)
1456 bfa_trc(bfad, 0x5555);
1457 return 0;
1458 }
1459
1460 int
1461 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1462 {
1463 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1464 struct bfad_hal_comp fcomp;
1465 unsigned long flags;
1466
1467 init_completion(&fcomp.comp);
1468 spin_lock_irqsave(&bfad->bfad_lock, flags);
1469 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1470 bfad_hcb_comp, &fcomp);
1471 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1472 bfa_trc(bfad, iocmd->status);
1473 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1474 goto out;
1475
1476 wait_for_completion(&fcomp.comp);
1477 iocmd->status = fcomp.status;
1478 out:
1479 return 0;
1480 }
1481
1482 int
1483 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1484 {
1485 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1486 struct bfad_hal_comp fcomp;
1487 unsigned long flags;
1488
1489 init_completion(&fcomp.comp);
1490 spin_lock_irqsave(&bfad->bfad_lock, flags);
1491 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1492 bfad_hcb_comp, &fcomp);
1493 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1494 bfa_trc(bfad, iocmd->status);
1495 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1496 goto out;
1497 wait_for_completion(&fcomp.comp);
1498 iocmd->status = fcomp.status;
1499 out:
1500 return 0;
1501 }
1502
1503 int
1504 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1505 {
1506 struct bfa_bsg_flash_attr_s *iocmd =
1507 (struct bfa_bsg_flash_attr_s *)cmd;
1508 struct bfad_hal_comp fcomp;
1509 unsigned long flags;
1510
1511 init_completion(&fcomp.comp);
1512 spin_lock_irqsave(&bfad->bfad_lock, flags);
1513 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1514 bfad_hcb_comp, &fcomp);
1515 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1516 if (iocmd->status != BFA_STATUS_OK)
1517 goto out;
1518 wait_for_completion(&fcomp.comp);
1519 iocmd->status = fcomp.status;
1520 out:
1521 return 0;
1522 }
1523
1524 int
1525 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1526 {
1527 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1528 struct bfad_hal_comp fcomp;
1529 unsigned long flags;
1530
1531 init_completion(&fcomp.comp);
1532 spin_lock_irqsave(&bfad->bfad_lock, flags);
1533 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1534 iocmd->instance, bfad_hcb_comp, &fcomp);
1535 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1536 if (iocmd->status != BFA_STATUS_OK)
1537 goto out;
1538 wait_for_completion(&fcomp.comp);
1539 iocmd->status = fcomp.status;
1540 out:
1541 return 0;
1542 }
1543
1544 int
1545 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1546 unsigned int payload_len)
1547 {
1548 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1549 void *iocmd_bufptr;
1550 struct bfad_hal_comp fcomp;
1551 unsigned long flags;
1552
1553 if (bfad_chk_iocmd_sz(payload_len,
1554 sizeof(struct bfa_bsg_flash_s),
1555 iocmd->bufsz) != BFA_STATUS_OK) {
1556 iocmd->status = BFA_STATUS_VERSION_FAIL;
1557 return 0;
1558 }
1559
1560 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1561
1562 init_completion(&fcomp.comp);
1563 spin_lock_irqsave(&bfad->bfad_lock, flags);
1564 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1565 iocmd->type, iocmd->instance, iocmd_bufptr,
1566 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1567 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1568 if (iocmd->status != BFA_STATUS_OK)
1569 goto out;
1570 wait_for_completion(&fcomp.comp);
1571 iocmd->status = fcomp.status;
1572 out:
1573 return 0;
1574 }
1575
1576 int
1577 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1578 unsigned int payload_len)
1579 {
1580 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1581 struct bfad_hal_comp fcomp;
1582 void *iocmd_bufptr;
1583 unsigned long flags;
1584
1585 if (bfad_chk_iocmd_sz(payload_len,
1586 sizeof(struct bfa_bsg_flash_s),
1587 iocmd->bufsz) != BFA_STATUS_OK) {
1588 iocmd->status = BFA_STATUS_VERSION_FAIL;
1589 return 0;
1590 }
1591
1592 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1593
1594 init_completion(&fcomp.comp);
1595 spin_lock_irqsave(&bfad->bfad_lock, flags);
1596 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1597 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1598 bfad_hcb_comp, &fcomp);
1599 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1600 if (iocmd->status != BFA_STATUS_OK)
1601 goto out;
1602 wait_for_completion(&fcomp.comp);
1603 iocmd->status = fcomp.status;
1604 out:
1605 return 0;
1606 }
1607
1608 int
1609 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1610 {
1611 struct bfa_bsg_diag_get_temp_s *iocmd =
1612 (struct bfa_bsg_diag_get_temp_s *)cmd;
1613 struct bfad_hal_comp fcomp;
1614 unsigned long flags;
1615
1616 init_completion(&fcomp.comp);
1617 spin_lock_irqsave(&bfad->bfad_lock, flags);
1618 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1619 &iocmd->result, bfad_hcb_comp, &fcomp);
1620 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1621 bfa_trc(bfad, iocmd->status);
1622 if (iocmd->status != BFA_STATUS_OK)
1623 goto out;
1624 wait_for_completion(&fcomp.comp);
1625 iocmd->status = fcomp.status;
1626 out:
1627 return 0;
1628 }
1629
1630 int
1631 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1632 {
1633 struct bfa_bsg_diag_memtest_s *iocmd =
1634 (struct bfa_bsg_diag_memtest_s *)cmd;
1635 struct bfad_hal_comp fcomp;
1636 unsigned long flags;
1637
1638 init_completion(&fcomp.comp);
1639 spin_lock_irqsave(&bfad->bfad_lock, flags);
1640 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1641 &iocmd->memtest, iocmd->pat,
1642 &iocmd->result, bfad_hcb_comp, &fcomp);
1643 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1644 bfa_trc(bfad, iocmd->status);
1645 if (iocmd->status != BFA_STATUS_OK)
1646 goto out;
1647 wait_for_completion(&fcomp.comp);
1648 iocmd->status = fcomp.status;
1649 out:
1650 return 0;
1651 }
1652
1653 int
1654 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1655 {
1656 struct bfa_bsg_diag_loopback_s *iocmd =
1657 (struct bfa_bsg_diag_loopback_s *)cmd;
1658 struct bfad_hal_comp fcomp;
1659 unsigned long flags;
1660
1661 init_completion(&fcomp.comp);
1662 spin_lock_irqsave(&bfad->bfad_lock, flags);
1663 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1664 iocmd->speed, iocmd->lpcnt, iocmd->pat,
1665 &iocmd->result, bfad_hcb_comp, &fcomp);
1666 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1667 bfa_trc(bfad, iocmd->status);
1668 if (iocmd->status != BFA_STATUS_OK)
1669 goto out;
1670 wait_for_completion(&fcomp.comp);
1671 iocmd->status = fcomp.status;
1672 out:
1673 return 0;
1674 }
1675
1676 int
1677 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1678 {
1679 struct bfa_bsg_diag_fwping_s *iocmd =
1680 (struct bfa_bsg_diag_fwping_s *)cmd;
1681 struct bfad_hal_comp fcomp;
1682 unsigned long flags;
1683
1684 init_completion(&fcomp.comp);
1685 spin_lock_irqsave(&bfad->bfad_lock, flags);
1686 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1687 iocmd->pattern, &iocmd->result,
1688 bfad_hcb_comp, &fcomp);
1689 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1690 bfa_trc(bfad, iocmd->status);
1691 if (iocmd->status != BFA_STATUS_OK)
1692 goto out;
1693 bfa_trc(bfad, 0x77771);
1694 wait_for_completion(&fcomp.comp);
1695 iocmd->status = fcomp.status;
1696 out:
1697 return 0;
1698 }
1699
1700 int
1701 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1702 {
1703 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1704 struct bfad_hal_comp fcomp;
1705 unsigned long flags;
1706
1707 init_completion(&fcomp.comp);
1708 spin_lock_irqsave(&bfad->bfad_lock, flags);
1709 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1710 iocmd->queue, &iocmd->result,
1711 bfad_hcb_comp, &fcomp);
1712 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1713 if (iocmd->status != BFA_STATUS_OK)
1714 goto out;
1715 wait_for_completion(&fcomp.comp);
1716 iocmd->status = fcomp.status;
1717 out:
1718 return 0;
1719 }
1720
1721 int
1722 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1723 {
1724 struct bfa_bsg_sfp_show_s *iocmd =
1725 (struct bfa_bsg_sfp_show_s *)cmd;
1726 struct bfad_hal_comp fcomp;
1727 unsigned long flags;
1728
1729 init_completion(&fcomp.comp);
1730 spin_lock_irqsave(&bfad->bfad_lock, flags);
1731 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1732 bfad_hcb_comp, &fcomp);
1733 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1734 bfa_trc(bfad, iocmd->status);
1735 if (iocmd->status != BFA_STATUS_OK)
1736 goto out;
1737 wait_for_completion(&fcomp.comp);
1738 iocmd->status = fcomp.status;
1739 bfa_trc(bfad, iocmd->status);
1740 out:
1741 return 0;
1742 }
1743
1744 int
1745 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1746 {
1747 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1748 unsigned long flags;
1749
1750 spin_lock_irqsave(&bfad->bfad_lock, flags);
1751 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1752 &iocmd->ledtest);
1753 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1754 return 0;
1755 }
1756
1757 int
1758 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1759 {
1760 struct bfa_bsg_diag_beacon_s *iocmd =
1761 (struct bfa_bsg_diag_beacon_s *)cmd;
1762 unsigned long flags;
1763
1764 spin_lock_irqsave(&bfad->bfad_lock, flags);
1765 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1766 iocmd->beacon, iocmd->link_e2e_beacon,
1767 iocmd->second);
1768 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1769 return 0;
1770 }
1771
1772 int
1773 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1774 {
1775 struct bfa_bsg_diag_lb_stat_s *iocmd =
1776 (struct bfa_bsg_diag_lb_stat_s *)cmd;
1777 unsigned long flags;
1778
1779 spin_lock_irqsave(&bfad->bfad_lock, flags);
1780 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1781 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1782 bfa_trc(bfad, iocmd->status);
1783
1784 return 0;
1785 }
1786
1787 int
1788 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
1789 {
1790 struct bfa_bsg_dport_enable_s *iocmd =
1791 (struct bfa_bsg_dport_enable_s *)pcmd;
1792 unsigned long flags;
1793 struct bfad_hal_comp fcomp;
1794
1795 init_completion(&fcomp.comp);
1796 spin_lock_irqsave(&bfad->bfad_lock, flags);
1797 iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
1798 iocmd->pat, bfad_hcb_comp, &fcomp);
1799 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1800 if (iocmd->status != BFA_STATUS_OK)
1801 bfa_trc(bfad, iocmd->status);
1802 else {
1803 wait_for_completion(&fcomp.comp);
1804 iocmd->status = fcomp.status;
1805 }
1806 return 0;
1807 }
1808
1809 int
1810 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
1811 {
1812 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1813 unsigned long flags;
1814 struct bfad_hal_comp fcomp;
1815
1816 init_completion(&fcomp.comp);
1817 spin_lock_irqsave(&bfad->bfad_lock, flags);
1818 iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1819 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1820 if (iocmd->status != BFA_STATUS_OK)
1821 bfa_trc(bfad, iocmd->status);
1822 else {
1823 wait_for_completion(&fcomp.comp);
1824 iocmd->status = fcomp.status;
1825 }
1826 return 0;
1827 }
1828
1829 int
1830 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
1831 {
1832 struct bfa_bsg_dport_enable_s *iocmd =
1833 (struct bfa_bsg_dport_enable_s *)pcmd;
1834 unsigned long flags;
1835 struct bfad_hal_comp fcomp;
1836
1837 init_completion(&fcomp.comp);
1838 spin_lock_irqsave(&bfad->bfad_lock, flags);
1839 iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
1840 iocmd->pat, bfad_hcb_comp,
1841 &fcomp);
1842 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1843
1844 if (iocmd->status != BFA_STATUS_OK) {
1845 bfa_trc(bfad, iocmd->status);
1846 } else {
1847 wait_for_completion(&fcomp.comp);
1848 iocmd->status = fcomp.status;
1849 }
1850
1851 return 0;
1852 }
1853
1854 int
1855 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
1856 {
1857 struct bfa_bsg_diag_dport_show_s *iocmd =
1858 (struct bfa_bsg_diag_dport_show_s *)pcmd;
1859 unsigned long flags;
1860
1861 spin_lock_irqsave(&bfad->bfad_lock, flags);
1862 iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
1863 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1864
1865 return 0;
1866 }
1867
1868
1869 int
1870 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1871 {
1872 struct bfa_bsg_phy_attr_s *iocmd =
1873 (struct bfa_bsg_phy_attr_s *)cmd;
1874 struct bfad_hal_comp fcomp;
1875 unsigned long flags;
1876
1877 init_completion(&fcomp.comp);
1878 spin_lock_irqsave(&bfad->bfad_lock, flags);
1879 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1880 &iocmd->attr, bfad_hcb_comp, &fcomp);
1881 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1882 if (iocmd->status != BFA_STATUS_OK)
1883 goto out;
1884 wait_for_completion(&fcomp.comp);
1885 iocmd->status = fcomp.status;
1886 out:
1887 return 0;
1888 }
1889
1890 int
1891 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1892 {
1893 struct bfa_bsg_phy_stats_s *iocmd =
1894 (struct bfa_bsg_phy_stats_s *)cmd;
1895 struct bfad_hal_comp fcomp;
1896 unsigned long flags;
1897
1898 init_completion(&fcomp.comp);
1899 spin_lock_irqsave(&bfad->bfad_lock, flags);
1900 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1901 &iocmd->stats, bfad_hcb_comp, &fcomp);
1902 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1903 if (iocmd->status != BFA_STATUS_OK)
1904 goto out;
1905 wait_for_completion(&fcomp.comp);
1906 iocmd->status = fcomp.status;
1907 out:
1908 return 0;
1909 }
1910
1911 int
1912 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1913 {
1914 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1915 struct bfad_hal_comp fcomp;
1916 void *iocmd_bufptr;
1917 unsigned long flags;
1918
1919 if (bfad_chk_iocmd_sz(payload_len,
1920 sizeof(struct bfa_bsg_phy_s),
1921 iocmd->bufsz) != BFA_STATUS_OK) {
1922 iocmd->status = BFA_STATUS_VERSION_FAIL;
1923 return 0;
1924 }
1925
1926 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1927 init_completion(&fcomp.comp);
1928 spin_lock_irqsave(&bfad->bfad_lock, flags);
1929 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1930 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1931 0, bfad_hcb_comp, &fcomp);
1932 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1933 if (iocmd->status != BFA_STATUS_OK)
1934 goto out;
1935 wait_for_completion(&fcomp.comp);
1936 iocmd->status = fcomp.status;
1937 if (iocmd->status != BFA_STATUS_OK)
1938 goto out;
1939 out:
1940 return 0;
1941 }
1942
1943 int
1944 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1945 {
1946 struct bfa_bsg_vhba_attr_s *iocmd =
1947 (struct bfa_bsg_vhba_attr_s *)cmd;
1948 struct bfa_vhba_attr_s *attr = &iocmd->attr;
1949 unsigned long flags;
1950
1951 spin_lock_irqsave(&bfad->bfad_lock, flags);
1952 attr->pwwn = bfad->bfa.ioc.attr->pwwn;
1953 attr->nwwn = bfad->bfa.ioc.attr->nwwn;
1954 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1955 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1956 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
1957 iocmd->status = BFA_STATUS_OK;
1958 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1959 return 0;
1960 }
1961
1962 int
1963 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1964 {
1965 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1966 void *iocmd_bufptr;
1967 struct bfad_hal_comp fcomp;
1968 unsigned long flags;
1969
1970 if (bfad_chk_iocmd_sz(payload_len,
1971 sizeof(struct bfa_bsg_phy_s),
1972 iocmd->bufsz) != BFA_STATUS_OK) {
1973 iocmd->status = BFA_STATUS_VERSION_FAIL;
1974 return 0;
1975 }
1976
1977 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1978 init_completion(&fcomp.comp);
1979 spin_lock_irqsave(&bfad->bfad_lock, flags);
1980 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1981 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1982 0, bfad_hcb_comp, &fcomp);
1983 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1984 if (iocmd->status != BFA_STATUS_OK)
1985 goto out;
1986 wait_for_completion(&fcomp.comp);
1987 iocmd->status = fcomp.status;
1988 out:
1989 return 0;
1990 }
1991
1992 int
1993 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
1994 {
1995 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1996 void *iocmd_bufptr;
1997
1998 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
1999 bfa_trc(bfad, sizeof(struct bfa_plog_s));
2000 iocmd->status = BFA_STATUS_EINVAL;
2001 goto out;
2002 }
2003
2004 iocmd->status = BFA_STATUS_OK;
2005 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2006 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
2007 out:
2008 return 0;
2009 }
2010
2011 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2012 int
2013 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
2014 unsigned int payload_len)
2015 {
2016 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2017 void *iocmd_bufptr;
2018 unsigned long flags;
2019 u32 offset;
2020
2021 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
2022 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
2023 iocmd->status = BFA_STATUS_VERSION_FAIL;
2024 return 0;
2025 }
2026
2027 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
2028 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
2029 !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
2030 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
2031 iocmd->status = BFA_STATUS_EINVAL;
2032 goto out;
2033 }
2034
2035 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2036 spin_lock_irqsave(&bfad->bfad_lock, flags);
2037 offset = iocmd->offset;
2038 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
2039 &offset, &iocmd->bufsz);
2040 iocmd->offset = offset;
2041 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2042 out:
2043 return 0;
2044 }
2045
2046 int
2047 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2048 {
2049 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2050 unsigned long flags;
2051
2052 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
2053 spin_lock_irqsave(&bfad->bfad_lock, flags);
2054 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
2055 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2056 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
2057 bfad->plog_buf.head = bfad->plog_buf.tail = 0;
2058 else if (v_cmd == IOCMD_DEBUG_START_DTRC)
2059 bfa_trc_init(bfad->trcmod);
2060 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
2061 bfa_trc_stop(bfad->trcmod);
2062
2063 iocmd->status = BFA_STATUS_OK;
2064 return 0;
2065 }
2066
2067 int
2068 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
2069 {
2070 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
2071
2072 if (iocmd->ctl == BFA_TRUE)
2073 bfad->plog_buf.plog_enabled = 1;
2074 else
2075 bfad->plog_buf.plog_enabled = 0;
2076
2077 iocmd->status = BFA_STATUS_OK;
2078 return 0;
2079 }
2080
2081 int
2082 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2083 {
2084 struct bfa_bsg_fcpim_profile_s *iocmd =
2085 (struct bfa_bsg_fcpim_profile_s *)cmd;
2086 struct timeval tv;
2087 unsigned long flags;
2088
2089 do_gettimeofday(&tv);
2090 spin_lock_irqsave(&bfad->bfad_lock, flags);
2091 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2092 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
2093 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2094 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2095 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2096
2097 return 0;
2098 }
2099
2100 static int
2101 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2102 {
2103 struct bfa_bsg_itnim_ioprofile_s *iocmd =
2104 (struct bfa_bsg_itnim_ioprofile_s *)cmd;
2105 struct bfa_fcs_lport_s *fcs_port;
2106 struct bfa_fcs_itnim_s *itnim;
2107 unsigned long flags;
2108
2109 spin_lock_irqsave(&bfad->bfad_lock, flags);
2110 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2111 iocmd->vf_id, iocmd->lpwwn);
2112 if (!fcs_port)
2113 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2114 else {
2115 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2116 if (itnim == NULL)
2117 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2118 else
2119 iocmd->status = bfa_itnim_get_ioprofile(
2120 bfa_fcs_itnim_get_halitn(itnim),
2121 &iocmd->ioprofile);
2122 }
2123 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2124 return 0;
2125 }
2126
2127 int
2128 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2129 {
2130 struct bfa_bsg_fcport_stats_s *iocmd =
2131 (struct bfa_bsg_fcport_stats_s *)cmd;
2132 struct bfad_hal_comp fcomp;
2133 unsigned long flags;
2134 struct bfa_cb_pending_q_s cb_qe;
2135
2136 init_completion(&fcomp.comp);
2137 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2138 &fcomp, &iocmd->stats);
2139 spin_lock_irqsave(&bfad->bfad_lock, flags);
2140 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2141 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2142 if (iocmd->status != BFA_STATUS_OK) {
2143 bfa_trc(bfad, iocmd->status);
2144 goto out;
2145 }
2146 wait_for_completion(&fcomp.comp);
2147 iocmd->status = fcomp.status;
2148 out:
2149 return 0;
2150 }
2151
2152 int
2153 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2154 {
2155 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2156 struct bfad_hal_comp fcomp;
2157 unsigned long flags;
2158 struct bfa_cb_pending_q_s cb_qe;
2159
2160 init_completion(&fcomp.comp);
2161 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2162
2163 spin_lock_irqsave(&bfad->bfad_lock, flags);
2164 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2165 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2166 if (iocmd->status != BFA_STATUS_OK) {
2167 bfa_trc(bfad, iocmd->status);
2168 goto out;
2169 }
2170 wait_for_completion(&fcomp.comp);
2171 iocmd->status = fcomp.status;
2172 out:
2173 return 0;
2174 }
2175
2176 int
2177 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2178 {
2179 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2180 struct bfad_hal_comp fcomp;
2181 unsigned long flags;
2182
2183 init_completion(&fcomp.comp);
2184 spin_lock_irqsave(&bfad->bfad_lock, flags);
2185 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2186 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2187 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2188 bfad_hcb_comp, &fcomp);
2189 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2190 if (iocmd->status != BFA_STATUS_OK)
2191 goto out;
2192 wait_for_completion(&fcomp.comp);
2193 iocmd->status = fcomp.status;
2194 out:
2195 return 0;
2196 }
2197
2198 int
2199 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2200 {
2201 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2202 struct bfad_hal_comp fcomp;
2203 unsigned long flags;
2204
2205 init_completion(&fcomp.comp);
2206 spin_lock_irqsave(&bfad->bfad_lock, flags);
2207 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2208 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2209 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2210 bfad_hcb_comp, &fcomp);
2211 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2212 if (iocmd->status != BFA_STATUS_OK)
2213 goto out;
2214 wait_for_completion(&fcomp.comp);
2215 iocmd->status = fcomp.status;
2216 out:
2217 return 0;
2218 }
2219
2220 int
2221 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2222 {
2223 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2224 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2225 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2226 unsigned long flags;
2227
2228 spin_lock_irqsave(&bfad->bfad_lock, flags);
2229 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2230 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2231 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2232 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2233 iocmd->status = BFA_STATUS_OK;
2234 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2235
2236 return 0;
2237 }
2238
2239 int
2240 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2241 {
2242 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2243 struct bfad_hal_comp fcomp;
2244 unsigned long flags;
2245
2246 init_completion(&fcomp.comp);
2247 spin_lock_irqsave(&bfad->bfad_lock, flags);
2248 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2249 BFA_FLASH_PART_PXECFG,
2250 bfad->bfa.ioc.port_id, &iocmd->cfg,
2251 sizeof(struct bfa_ethboot_cfg_s), 0,
2252 bfad_hcb_comp, &fcomp);
2253 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2254 if (iocmd->status != BFA_STATUS_OK)
2255 goto out;
2256 wait_for_completion(&fcomp.comp);
2257 iocmd->status = fcomp.status;
2258 out:
2259 return 0;
2260 }
2261
2262 int
2263 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2264 {
2265 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2266 struct bfad_hal_comp fcomp;
2267 unsigned long flags;
2268
2269 init_completion(&fcomp.comp);
2270 spin_lock_irqsave(&bfad->bfad_lock, flags);
2271 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2272 BFA_FLASH_PART_PXECFG,
2273 bfad->bfa.ioc.port_id, &iocmd->cfg,
2274 sizeof(struct bfa_ethboot_cfg_s), 0,
2275 bfad_hcb_comp, &fcomp);
2276 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2277 if (iocmd->status != BFA_STATUS_OK)
2278 goto out;
2279 wait_for_completion(&fcomp.comp);
2280 iocmd->status = fcomp.status;
2281 out:
2282 return 0;
2283 }
2284
2285 int
2286 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2287 {
2288 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2289 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2290 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2291 unsigned long flags;
2292
2293 spin_lock_irqsave(&bfad->bfad_lock, flags);
2294
2295 if (bfa_fcport_is_dport(&bfad->bfa))
2296 return BFA_STATUS_DPORT_ERR;
2297
2298 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2299 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2300 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2301 else {
2302 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2303 trunk->attr.state = BFA_TRUNK_OFFLINE;
2304 bfa_fcport_disable(&bfad->bfa);
2305 fcport->cfg.trunked = BFA_TRUE;
2306 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2307 trunk->attr.state = BFA_TRUNK_DISABLED;
2308 bfa_fcport_disable(&bfad->bfa);
2309 fcport->cfg.trunked = BFA_FALSE;
2310 }
2311
2312 if (!bfa_fcport_is_disabled(&bfad->bfa))
2313 bfa_fcport_enable(&bfad->bfa);
2314
2315 iocmd->status = BFA_STATUS_OK;
2316 }
2317
2318 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2319
2320 return 0;
2321 }
2322
2323 int
2324 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2325 {
2326 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2327 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2328 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2329 unsigned long flags;
2330
2331 spin_lock_irqsave(&bfad->bfad_lock, flags);
2332 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2333 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2334 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2335 else {
2336 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2337 sizeof(struct bfa_trunk_attr_s));
2338 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2339 iocmd->status = BFA_STATUS_OK;
2340 }
2341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2342
2343 return 0;
2344 }
2345
2346 int
2347 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2348 {
2349 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2350 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2351 unsigned long flags;
2352
2353 spin_lock_irqsave(&bfad->bfad_lock, flags);
2354 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2355 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2356 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2357 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2358 else {
2359 if (v_cmd == IOCMD_QOS_ENABLE)
2360 fcport->cfg.qos_enabled = BFA_TRUE;
2361 else if (v_cmd == IOCMD_QOS_DISABLE) {
2362 fcport->cfg.qos_enabled = BFA_FALSE;
2363 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2364 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2365 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2366 }
2367 }
2368 }
2369 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2370
2371 return 0;
2372 }
2373
2374 int
2375 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2376 {
2377 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2378 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2379 unsigned long flags;
2380
2381 spin_lock_irqsave(&bfad->bfad_lock, flags);
2382 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2383 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2384 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2385 else {
2386 iocmd->attr.state = fcport->qos_attr.state;
2387 iocmd->attr.total_bb_cr =
2388 be32_to_cpu(fcport->qos_attr.total_bb_cr);
2389 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2390 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2391 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2392 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2393 iocmd->status = BFA_STATUS_OK;
2394 }
2395 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2396
2397 return 0;
2398 }
2399
2400 int
2401 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2402 {
2403 struct bfa_bsg_qos_vc_attr_s *iocmd =
2404 (struct bfa_bsg_qos_vc_attr_s *)cmd;
2405 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2406 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2407 unsigned long flags;
2408 u32 i = 0;
2409
2410 spin_lock_irqsave(&bfad->bfad_lock, flags);
2411 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2412 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2413 iocmd->attr.elp_opmode_flags =
2414 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2415
2416 /* Individual VC info */
2417 while (i < iocmd->attr.total_vc_count) {
2418 iocmd->attr.vc_info[i].vc_credit =
2419 bfa_vc_attr->vc_info[i].vc_credit;
2420 iocmd->attr.vc_info[i].borrow_credit =
2421 bfa_vc_attr->vc_info[i].borrow_credit;
2422 iocmd->attr.vc_info[i].priority =
2423 bfa_vc_attr->vc_info[i].priority;
2424 i++;
2425 }
2426 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2427
2428 iocmd->status = BFA_STATUS_OK;
2429 return 0;
2430 }
2431
2432 int
2433 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2434 {
2435 struct bfa_bsg_fcport_stats_s *iocmd =
2436 (struct bfa_bsg_fcport_stats_s *)cmd;
2437 struct bfad_hal_comp fcomp;
2438 unsigned long flags;
2439 struct bfa_cb_pending_q_s cb_qe;
2440 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2441
2442 init_completion(&fcomp.comp);
2443 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2444 &fcomp, &iocmd->stats);
2445
2446 spin_lock_irqsave(&bfad->bfad_lock, flags);
2447 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2448 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2449 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2450 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2451 else
2452 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2453 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2454 if (iocmd->status != BFA_STATUS_OK) {
2455 bfa_trc(bfad, iocmd->status);
2456 goto out;
2457 }
2458 wait_for_completion(&fcomp.comp);
2459 iocmd->status = fcomp.status;
2460 out:
2461 return 0;
2462 }
2463
2464 int
2465 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2466 {
2467 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2468 struct bfad_hal_comp fcomp;
2469 unsigned long flags;
2470 struct bfa_cb_pending_q_s cb_qe;
2471 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2472
2473 init_completion(&fcomp.comp);
2474 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2475 &fcomp, NULL);
2476
2477 spin_lock_irqsave(&bfad->bfad_lock, flags);
2478 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2479 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2480 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2481 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2482 else
2483 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2484 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2485 if (iocmd->status != BFA_STATUS_OK) {
2486 bfa_trc(bfad, iocmd->status);
2487 goto out;
2488 }
2489 wait_for_completion(&fcomp.comp);
2490 iocmd->status = fcomp.status;
2491 out:
2492 return 0;
2493 }
2494
2495 int
2496 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2497 {
2498 struct bfa_bsg_vf_stats_s *iocmd =
2499 (struct bfa_bsg_vf_stats_s *)cmd;
2500 struct bfa_fcs_fabric_s *fcs_vf;
2501 unsigned long flags;
2502
2503 spin_lock_irqsave(&bfad->bfad_lock, flags);
2504 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2505 if (fcs_vf == NULL) {
2506 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2507 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2508 goto out;
2509 }
2510 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2511 sizeof(struct bfa_vf_stats_s));
2512 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2513 iocmd->status = BFA_STATUS_OK;
2514 out:
2515 return 0;
2516 }
2517
2518 int
2519 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2520 {
2521 struct bfa_bsg_vf_reset_stats_s *iocmd =
2522 (struct bfa_bsg_vf_reset_stats_s *)cmd;
2523 struct bfa_fcs_fabric_s *fcs_vf;
2524 unsigned long flags;
2525
2526 spin_lock_irqsave(&bfad->bfad_lock, flags);
2527 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2528 if (fcs_vf == NULL) {
2529 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2530 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2531 goto out;
2532 }
2533 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2534 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2535 iocmd->status = BFA_STATUS_OK;
2536 out:
2537 return 0;
2538 }
2539
2540 /* Function to reset the LUN SCAN mode */
2541 static void
2542 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2543 {
2544 struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2545 struct bfad_vport_s *vport = NULL;
2546
2547 /* Set the scsi device LUN SCAN flags for base port */
2548 bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2549
2550 /* Set the scsi device LUN SCAN flags for the vports */
2551 list_for_each_entry(vport, &bfad->vport_list, list_entry)
2552 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2553 }
2554
2555 int
2556 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2557 {
2558 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2559 unsigned long flags;
2560
2561 spin_lock_irqsave(&bfad->bfad_lock, flags);
2562 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2563 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2564 /* Set the LUN Scanning mode to be Sequential scan */
2565 if (iocmd->status == BFA_STATUS_OK)
2566 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2567 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2568 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2569 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2570 if (iocmd->status == BFA_STATUS_OK)
2571 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2572 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2573 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2574 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2575 return 0;
2576 }
2577
2578 int
2579 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2580 {
2581 struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2582 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2583 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2584 unsigned long flags;
2585
2586 spin_lock_irqsave(&bfad->bfad_lock, flags);
2587 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2588 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2589 return 0;
2590 }
2591
2592 int
2593 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2594 {
2595 struct bfa_bsg_fcpim_lunmask_s *iocmd =
2596 (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2597 unsigned long flags;
2598
2599 spin_lock_irqsave(&bfad->bfad_lock, flags);
2600 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2601 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2602 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2603 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2604 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2605 iocmd->vf_id, &iocmd->pwwn,
2606 iocmd->rpwwn, iocmd->lun);
2607 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2608 return 0;
2609 }
2610
2611 int
2612 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2613 {
2614 struct bfa_bsg_fcpim_throttle_s *iocmd =
2615 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2616 unsigned long flags;
2617
2618 spin_lock_irqsave(&bfad->bfad_lock, flags);
2619 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2620 (void *)&iocmd->throttle);
2621 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2622
2623 return 0;
2624 }
2625
2626 int
2627 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2628 {
2629 struct bfa_bsg_fcpim_throttle_s *iocmd =
2630 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2631 unsigned long flags;
2632
2633 spin_lock_irqsave(&bfad->bfad_lock, flags);
2634 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2635 iocmd->throttle.cfg_value);
2636 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2637
2638 return 0;
2639 }
2640
2641 int
2642 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2643 {
2644 struct bfa_bsg_tfru_s *iocmd =
2645 (struct bfa_bsg_tfru_s *)cmd;
2646 struct bfad_hal_comp fcomp;
2647 unsigned long flags = 0;
2648
2649 init_completion(&fcomp.comp);
2650 spin_lock_irqsave(&bfad->bfad_lock, flags);
2651 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2652 &iocmd->data, iocmd->len, iocmd->offset,
2653 bfad_hcb_comp, &fcomp);
2654 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2655 if (iocmd->status == BFA_STATUS_OK) {
2656 wait_for_completion(&fcomp.comp);
2657 iocmd->status = fcomp.status;
2658 }
2659
2660 return 0;
2661 }
2662
2663 int
2664 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2665 {
2666 struct bfa_bsg_tfru_s *iocmd =
2667 (struct bfa_bsg_tfru_s *)cmd;
2668 struct bfad_hal_comp fcomp;
2669 unsigned long flags = 0;
2670
2671 init_completion(&fcomp.comp);
2672 spin_lock_irqsave(&bfad->bfad_lock, flags);
2673 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2674 &iocmd->data, iocmd->len, iocmd->offset,
2675 bfad_hcb_comp, &fcomp);
2676 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2677 if (iocmd->status == BFA_STATUS_OK) {
2678 wait_for_completion(&fcomp.comp);
2679 iocmd->status = fcomp.status;
2680 }
2681
2682 return 0;
2683 }
2684
2685 int
2686 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2687 {
2688 struct bfa_bsg_fruvpd_s *iocmd =
2689 (struct bfa_bsg_fruvpd_s *)cmd;
2690 struct bfad_hal_comp fcomp;
2691 unsigned long flags = 0;
2692
2693 init_completion(&fcomp.comp);
2694 spin_lock_irqsave(&bfad->bfad_lock, flags);
2695 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2696 &iocmd->data, iocmd->len, iocmd->offset,
2697 bfad_hcb_comp, &fcomp);
2698 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2699 if (iocmd->status == BFA_STATUS_OK) {
2700 wait_for_completion(&fcomp.comp);
2701 iocmd->status = fcomp.status;
2702 }
2703
2704 return 0;
2705 }
2706
2707 int
2708 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2709 {
2710 struct bfa_bsg_fruvpd_s *iocmd =
2711 (struct bfa_bsg_fruvpd_s *)cmd;
2712 struct bfad_hal_comp fcomp;
2713 unsigned long flags = 0;
2714
2715 init_completion(&fcomp.comp);
2716 spin_lock_irqsave(&bfad->bfad_lock, flags);
2717 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2718 &iocmd->data, iocmd->len, iocmd->offset,
2719 bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
2720 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2721 if (iocmd->status == BFA_STATUS_OK) {
2722 wait_for_completion(&fcomp.comp);
2723 iocmd->status = fcomp.status;
2724 }
2725
2726 return 0;
2727 }
2728
2729 int
2730 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2731 {
2732 struct bfa_bsg_fruvpd_max_size_s *iocmd =
2733 (struct bfa_bsg_fruvpd_max_size_s *)cmd;
2734 unsigned long flags = 0;
2735
2736 spin_lock_irqsave(&bfad->bfad_lock, flags);
2737 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2738 &iocmd->max_size);
2739 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2740
2741 return 0;
2742 }
2743
2744 static int
2745 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2746 unsigned int payload_len)
2747 {
2748 int rc = -EINVAL;
2749
2750 switch (cmd) {
2751 case IOCMD_IOC_ENABLE:
2752 rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2753 break;
2754 case IOCMD_IOC_DISABLE:
2755 rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2756 break;
2757 case IOCMD_IOC_GET_INFO:
2758 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2759 break;
2760 case IOCMD_IOC_GET_ATTR:
2761 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2762 break;
2763 case IOCMD_IOC_GET_STATS:
2764 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2765 break;
2766 case IOCMD_IOC_GET_FWSTATS:
2767 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2768 break;
2769 case IOCMD_IOC_RESET_STATS:
2770 case IOCMD_IOC_RESET_FWSTATS:
2771 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2772 break;
2773 case IOCMD_IOC_SET_ADAPTER_NAME:
2774 case IOCMD_IOC_SET_PORT_NAME:
2775 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2776 break;
2777 case IOCMD_IOCFC_GET_ATTR:
2778 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2779 break;
2780 case IOCMD_IOCFC_SET_INTR:
2781 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2782 break;
2783 case IOCMD_PORT_ENABLE:
2784 rc = bfad_iocmd_port_enable(bfad, iocmd);
2785 break;
2786 case IOCMD_PORT_DISABLE:
2787 rc = bfad_iocmd_port_disable(bfad, iocmd);
2788 break;
2789 case IOCMD_PORT_GET_ATTR:
2790 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2791 break;
2792 case IOCMD_PORT_GET_STATS:
2793 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2794 break;
2795 case IOCMD_PORT_RESET_STATS:
2796 rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2797 break;
2798 case IOCMD_PORT_CFG_TOPO:
2799 case IOCMD_PORT_CFG_SPEED:
2800 case IOCMD_PORT_CFG_ALPA:
2801 case IOCMD_PORT_CLR_ALPA:
2802 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2803 break;
2804 case IOCMD_PORT_CFG_MAXFRSZ:
2805 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2806 break;
2807 case IOCMD_PORT_BBCR_ENABLE:
2808 case IOCMD_PORT_BBCR_DISABLE:
2809 rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
2810 break;
2811 case IOCMD_PORT_BBCR_GET_ATTR:
2812 rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
2813 break;
2814 case IOCMD_LPORT_GET_ATTR:
2815 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2816 break;
2817 case IOCMD_LPORT_GET_STATS:
2818 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2819 break;
2820 case IOCMD_LPORT_RESET_STATS:
2821 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2822 break;
2823 case IOCMD_LPORT_GET_IOSTATS:
2824 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2825 break;
2826 case IOCMD_LPORT_GET_RPORTS:
2827 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2828 break;
2829 case IOCMD_RPORT_GET_ATTR:
2830 rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2831 break;
2832 case IOCMD_RPORT_GET_ADDR:
2833 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2834 break;
2835 case IOCMD_RPORT_GET_STATS:
2836 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2837 break;
2838 case IOCMD_RPORT_RESET_STATS:
2839 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2840 break;
2841 case IOCMD_RPORT_SET_SPEED:
2842 rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2843 break;
2844 case IOCMD_VPORT_GET_ATTR:
2845 rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2846 break;
2847 case IOCMD_VPORT_GET_STATS:
2848 rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2849 break;
2850 case IOCMD_VPORT_RESET_STATS:
2851 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2852 break;
2853 case IOCMD_FABRIC_GET_LPORTS:
2854 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2855 break;
2856 case IOCMD_RATELIM_ENABLE:
2857 case IOCMD_RATELIM_DISABLE:
2858 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2859 break;
2860 case IOCMD_RATELIM_DEF_SPEED:
2861 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2862 break;
2863 case IOCMD_FCPIM_FAILOVER:
2864 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2865 break;
2866 case IOCMD_FCPIM_MODSTATS:
2867 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2868 break;
2869 case IOCMD_FCPIM_MODSTATSCLR:
2870 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2871 break;
2872 case IOCMD_FCPIM_DEL_ITN_STATS:
2873 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2874 break;
2875 case IOCMD_ITNIM_GET_ATTR:
2876 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2877 break;
2878 case IOCMD_ITNIM_GET_IOSTATS:
2879 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2880 break;
2881 case IOCMD_ITNIM_RESET_STATS:
2882 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2883 break;
2884 case IOCMD_ITNIM_GET_ITNSTATS:
2885 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2886 break;
2887 case IOCMD_FCPORT_ENABLE:
2888 rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2889 break;
2890 case IOCMD_FCPORT_DISABLE:
2891 rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2892 break;
2893 case IOCMD_IOC_PCIFN_CFG:
2894 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2895 break;
2896 case IOCMD_PCIFN_CREATE:
2897 rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2898 break;
2899 case IOCMD_PCIFN_DELETE:
2900 rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2901 break;
2902 case IOCMD_PCIFN_BW:
2903 rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2904 break;
2905 case IOCMD_ADAPTER_CFG_MODE:
2906 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2907 break;
2908 case IOCMD_PORT_CFG_MODE:
2909 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2910 break;
2911 case IOCMD_FLASH_ENABLE_OPTROM:
2912 case IOCMD_FLASH_DISABLE_OPTROM:
2913 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2914 break;
2915 case IOCMD_FAA_QUERY:
2916 rc = bfad_iocmd_faa_query(bfad, iocmd);
2917 break;
2918 case IOCMD_CEE_GET_ATTR:
2919 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2920 break;
2921 case IOCMD_CEE_GET_STATS:
2922 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2923 break;
2924 case IOCMD_CEE_RESET_STATS:
2925 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2926 break;
2927 case IOCMD_SFP_MEDIA:
2928 rc = bfad_iocmd_sfp_media(bfad, iocmd);
2929 break;
2930 case IOCMD_SFP_SPEED:
2931 rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2932 break;
2933 case IOCMD_FLASH_GET_ATTR:
2934 rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2935 break;
2936 case IOCMD_FLASH_ERASE_PART:
2937 rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2938 break;
2939 case IOCMD_FLASH_UPDATE_PART:
2940 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2941 break;
2942 case IOCMD_FLASH_READ_PART:
2943 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2944 break;
2945 case IOCMD_DIAG_TEMP:
2946 rc = bfad_iocmd_diag_temp(bfad, iocmd);
2947 break;
2948 case IOCMD_DIAG_MEMTEST:
2949 rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2950 break;
2951 case IOCMD_DIAG_LOOPBACK:
2952 rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2953 break;
2954 case IOCMD_DIAG_FWPING:
2955 rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2956 break;
2957 case IOCMD_DIAG_QUEUETEST:
2958 rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2959 break;
2960 case IOCMD_DIAG_SFP:
2961 rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2962 break;
2963 case IOCMD_DIAG_LED:
2964 rc = bfad_iocmd_diag_led(bfad, iocmd);
2965 break;
2966 case IOCMD_DIAG_BEACON_LPORT:
2967 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
2968 break;
2969 case IOCMD_DIAG_LB_STAT:
2970 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2971 break;
2972 case IOCMD_DIAG_DPORT_ENABLE:
2973 rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
2974 break;
2975 case IOCMD_DIAG_DPORT_DISABLE:
2976 rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
2977 break;
2978 case IOCMD_DIAG_DPORT_SHOW:
2979 rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
2980 break;
2981 case IOCMD_DIAG_DPORT_START:
2982 rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
2983 break;
2984 case IOCMD_PHY_GET_ATTR:
2985 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
2986 break;
2987 case IOCMD_PHY_GET_STATS:
2988 rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
2989 break;
2990 case IOCMD_PHY_UPDATE_FW:
2991 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
2992 break;
2993 case IOCMD_PHY_READ_FW:
2994 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
2995 break;
2996 case IOCMD_VHBA_QUERY:
2997 rc = bfad_iocmd_vhba_query(bfad, iocmd);
2998 break;
2999 case IOCMD_DEBUG_PORTLOG:
3000 rc = bfad_iocmd_porglog_get(bfad, iocmd);
3001 break;
3002 case IOCMD_DEBUG_FW_CORE:
3003 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
3004 break;
3005 case IOCMD_DEBUG_FW_STATE_CLR:
3006 case IOCMD_DEBUG_PORTLOG_CLR:
3007 case IOCMD_DEBUG_START_DTRC:
3008 case IOCMD_DEBUG_STOP_DTRC:
3009 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
3010 break;
3011 case IOCMD_DEBUG_PORTLOG_CTL:
3012 rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
3013 break;
3014 case IOCMD_FCPIM_PROFILE_ON:
3015 case IOCMD_FCPIM_PROFILE_OFF:
3016 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
3017 break;
3018 case IOCMD_ITNIM_GET_IOPROFILE:
3019 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
3020 break;
3021 case IOCMD_FCPORT_GET_STATS:
3022 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
3023 break;
3024 case IOCMD_FCPORT_RESET_STATS:
3025 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
3026 break;
3027 case IOCMD_BOOT_CFG:
3028 rc = bfad_iocmd_boot_cfg(bfad, iocmd);
3029 break;
3030 case IOCMD_BOOT_QUERY:
3031 rc = bfad_iocmd_boot_query(bfad, iocmd);
3032 break;
3033 case IOCMD_PREBOOT_QUERY:
3034 rc = bfad_iocmd_preboot_query(bfad, iocmd);
3035 break;
3036 case IOCMD_ETHBOOT_CFG:
3037 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
3038 break;
3039 case IOCMD_ETHBOOT_QUERY:
3040 rc = bfad_iocmd_ethboot_query(bfad, iocmd);
3041 break;
3042 case IOCMD_TRUNK_ENABLE:
3043 case IOCMD_TRUNK_DISABLE:
3044 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
3045 break;
3046 case IOCMD_TRUNK_GET_ATTR:
3047 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
3048 break;
3049 case IOCMD_QOS_ENABLE:
3050 case IOCMD_QOS_DISABLE:
3051 rc = bfad_iocmd_qos(bfad, iocmd, cmd);
3052 break;
3053 case IOCMD_QOS_GET_ATTR:
3054 rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
3055 break;
3056 case IOCMD_QOS_GET_VC_ATTR:
3057 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
3058 break;
3059 case IOCMD_QOS_GET_STATS:
3060 rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
3061 break;
3062 case IOCMD_QOS_RESET_STATS:
3063 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
3064 break;
3065 case IOCMD_QOS_SET_BW:
3066 rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3067 break;
3068 case IOCMD_VF_GET_STATS:
3069 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
3070 break;
3071 case IOCMD_VF_RESET_STATS:
3072 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
3073 break;
3074 case IOCMD_FCPIM_LUNMASK_ENABLE:
3075 case IOCMD_FCPIM_LUNMASK_DISABLE:
3076 case IOCMD_FCPIM_LUNMASK_CLEAR:
3077 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
3078 break;
3079 case IOCMD_FCPIM_LUNMASK_QUERY:
3080 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
3081 break;
3082 case IOCMD_FCPIM_LUNMASK_ADD:
3083 case IOCMD_FCPIM_LUNMASK_DELETE:
3084 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
3085 break;
3086 case IOCMD_FCPIM_THROTTLE_QUERY:
3087 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3088 break;
3089 case IOCMD_FCPIM_THROTTLE_SET:
3090 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3091 break;
3092 /* TFRU */
3093 case IOCMD_TFRU_READ:
3094 rc = bfad_iocmd_tfru_read(bfad, iocmd);
3095 break;
3096 case IOCMD_TFRU_WRITE:
3097 rc = bfad_iocmd_tfru_write(bfad, iocmd);
3098 break;
3099 /* FRU */
3100 case IOCMD_FRUVPD_READ:
3101 rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3102 break;
3103 case IOCMD_FRUVPD_UPDATE:
3104 rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3105 break;
3106 case IOCMD_FRUVPD_GET_MAX_SIZE:
3107 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3108 break;
3109 default:
3110 rc = -EINVAL;
3111 break;
3112 }
3113 return rc;
3114 }
3115
3116 static int
3117 bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
3118 {
3119 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
3120 struct bfad_im_port_s *im_port =
3121 (struct bfad_im_port_s *) job->shost->hostdata[0];
3122 struct bfad_s *bfad = im_port->bfad;
3123 struct request_queue *request_q = job->req->q;
3124 void *payload_kbuf;
3125 int rc = -EINVAL;
3126
3127 /*
3128 * Set the BSG device request_queue size to 256 to support
3129 * payloads larger than 512*1024K bytes.
3130 */
3131 blk_queue_max_segments(request_q, 256);
3132
3133 /* Allocate a temp buffer to hold the passed in user space command */
3134 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3135 if (!payload_kbuf) {
3136 rc = -ENOMEM;
3137 goto out;
3138 }
3139
3140 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3141 sg_copy_to_buffer(job->request_payload.sg_list,
3142 job->request_payload.sg_cnt, payload_kbuf,
3143 job->request_payload.payload_len);
3144
3145 /* Invoke IOCMD handler - to handle all the vendor command requests */
3146 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
3147 job->request_payload.payload_len);
3148 if (rc != BFA_STATUS_OK)
3149 goto error;
3150
3151 /* Copy the response data to the job->reply_payload sg_list */
3152 sg_copy_from_buffer(job->reply_payload.sg_list,
3153 job->reply_payload.sg_cnt,
3154 payload_kbuf,
3155 job->reply_payload.payload_len);
3156
3157 /* free the command buffer */
3158 kfree(payload_kbuf);
3159
3160 /* Fill the BSG job reply data */
3161 job->reply_len = job->reply_payload.payload_len;
3162 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3163 job->reply->result = rc;
3164
3165 job->job_done(job);
3166 return rc;
3167 error:
3168 /* free the command buffer */
3169 kfree(payload_kbuf);
3170 out:
3171 job->reply->result = rc;
3172 job->reply_len = sizeof(uint32_t);
3173 job->reply->reply_payload_rcv_len = 0;
3174 return rc;
3175 }
3176
3177 /* FC passthru call backs */
3178 u64
3179 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
3180 {
3181 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3182 struct bfa_sge_s *sge;
3183 u64 addr;
3184
3185 sge = drv_fcxp->req_sge + sgeid;
3186 addr = (u64)(size_t) sge->sg_addr;
3187 return addr;
3188 }
3189
3190 u32
3191 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
3192 {
3193 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3194 struct bfa_sge_s *sge;
3195
3196 sge = drv_fcxp->req_sge + sgeid;
3197 return sge->sg_len;
3198 }
3199
3200 u64
3201 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
3202 {
3203 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3204 struct bfa_sge_s *sge;
3205 u64 addr;
3206
3207 sge = drv_fcxp->rsp_sge + sgeid;
3208 addr = (u64)(size_t) sge->sg_addr;
3209 return addr;
3210 }
3211
3212 u32
3213 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
3214 {
3215 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3216 struct bfa_sge_s *sge;
3217
3218 sge = drv_fcxp->rsp_sge + sgeid;
3219 return sge->sg_len;
3220 }
3221
3222 void
3223 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
3224 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
3225 struct fchs_s *rsp_fchs)
3226 {
3227 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3228
3229 drv_fcxp->req_status = req_status;
3230 drv_fcxp->rsp_len = rsp_len;
3231
3232 /* bfa_fcxp will be automatically freed by BFA */
3233 drv_fcxp->bfa_fcxp = NULL;
3234 complete(&drv_fcxp->comp);
3235 }
3236
3237 struct bfad_buf_info *
3238 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3239 uint32_t payload_len, uint32_t *num_sgles)
3240 {
3241 struct bfad_buf_info *buf_base, *buf_info;
3242 struct bfa_sge_s *sg_table;
3243 int sge_num = 1;
3244
3245 buf_base = kzalloc((sizeof(struct bfad_buf_info) +
3246 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
3247 if (!buf_base)
3248 return NULL;
3249
3250 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
3251 (sizeof(struct bfad_buf_info) * sge_num));
3252
3253 /* Allocate dma coherent memory */
3254 buf_info = buf_base;
3255 buf_info->size = payload_len;
3256 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
3257 &buf_info->phys, GFP_KERNEL);
3258 if (!buf_info->virt)
3259 goto out_free_mem;
3260
3261 /* copy the linear bsg buffer to buf_info */
3262 memset(buf_info->virt, 0, buf_info->size);
3263 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
3264
3265 /*
3266 * Setup SG table
3267 */
3268 sg_table->sg_len = buf_info->size;
3269 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
3270
3271 *num_sgles = sge_num;
3272
3273 return buf_base;
3274
3275 out_free_mem:
3276 kfree(buf_base);
3277 return NULL;
3278 }
3279
3280 void
3281 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
3282 uint32_t num_sgles)
3283 {
3284 int i;
3285 struct bfad_buf_info *buf_info = buf_base;
3286
3287 if (buf_base) {
3288 for (i = 0; i < num_sgles; buf_info++, i++) {
3289 if (buf_info->virt != NULL)
3290 dma_free_coherent(&bfad->pcidev->dev,
3291 buf_info->size, buf_info->virt,
3292 buf_info->phys);
3293 }
3294 kfree(buf_base);
3295 }
3296 }
3297
3298 int
3299 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
3300 bfa_bsg_fcpt_t *bsg_fcpt)
3301 {
3302 struct bfa_fcxp_s *hal_fcxp;
3303 struct bfad_s *bfad = drv_fcxp->port->bfad;
3304 unsigned long flags;
3305 uint8_t lp_tag;
3306
3307 spin_lock_irqsave(&bfad->bfad_lock, flags);
3308
3309 /* Allocate bfa_fcxp structure */
3310 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
3311 drv_fcxp->num_req_sgles,
3312 drv_fcxp->num_rsp_sgles,
3313 bfad_fcxp_get_req_sgaddr_cb,
3314 bfad_fcxp_get_req_sglen_cb,
3315 bfad_fcxp_get_rsp_sgaddr_cb,
3316 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
3317 if (!hal_fcxp) {
3318 bfa_trc(bfad, 0);
3319 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3320 return BFA_STATUS_ENOMEM;
3321 }
3322
3323 drv_fcxp->bfa_fcxp = hal_fcxp;
3324
3325 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
3326
3327 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
3328 bsg_fcpt->cts, bsg_fcpt->cos,
3329 job->request_payload.payload_len,
3330 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
3331 job->reply_payload.payload_len, bsg_fcpt->tsecs);
3332
3333 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3334
3335 return BFA_STATUS_OK;
3336 }
3337
3338 int
3339 bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
3340 {
3341 struct bfa_bsg_data *bsg_data;
3342 struct bfad_im_port_s *im_port =
3343 (struct bfad_im_port_s *) job->shost->hostdata[0];
3344 struct bfad_s *bfad = im_port->bfad;
3345 bfa_bsg_fcpt_t *bsg_fcpt;
3346 struct bfad_fcxp *drv_fcxp;
3347 struct bfa_fcs_lport_s *fcs_port;
3348 struct bfa_fcs_rport_s *fcs_rport;
3349 uint32_t command_type = job->request->msgcode;
3350 unsigned long flags;
3351 struct bfad_buf_info *rsp_buf_info;
3352 void *req_kbuf = NULL, *rsp_kbuf = NULL;
3353 int rc = -EINVAL;
3354
3355 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
3356 job->reply->reply_payload_rcv_len = 0;
3357
3358 /* Get the payload passed in from userspace */
3359 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
3360 sizeof(struct fc_bsg_request));
3361 if (bsg_data == NULL)
3362 goto out;
3363
3364 /*
3365 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3366 * buffer of size bsg_data->payload_len
3367 */
3368 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3369 if (!bsg_fcpt) {
3370 rc = -ENOMEM;
3371 goto out;
3372 }
3373
3374 if (copy_from_user((uint8_t *)bsg_fcpt,
3375 (void *)(unsigned long)bsg_data->payload,
3376 bsg_data->payload_len)) {
3377 kfree(bsg_fcpt);
3378 rc = -EIO;
3379 goto out;
3380 }
3381
3382 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3383 if (drv_fcxp == NULL) {
3384 kfree(bsg_fcpt);
3385 rc = -ENOMEM;
3386 goto out;
3387 }
3388
3389 spin_lock_irqsave(&bfad->bfad_lock, flags);
3390 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3391 bsg_fcpt->lpwwn);
3392 if (fcs_port == NULL) {
3393 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3394 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3395 goto out_free_mem;
3396 }
3397
3398 /* Check if the port is online before sending FC Passthru cmd */
3399 if (!bfa_fcs_lport_is_online(fcs_port)) {
3400 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3401 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3402 goto out_free_mem;
3403 }
3404
3405 drv_fcxp->port = fcs_port->bfad_port;
3406
3407 if (drv_fcxp->port->bfad == 0)
3408 drv_fcxp->port->bfad = bfad;
3409
3410 /* Fetch the bfa_rport - if nexus needed */
3411 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3412 command_type == FC_BSG_HST_CT) {
3413 /* BSG HST commands: no nexus needed */
3414 drv_fcxp->bfa_rport = NULL;
3415
3416 } else if (command_type == FC_BSG_RPT_ELS ||
3417 command_type == FC_BSG_RPT_CT) {
3418 /* BSG RPT commands: nexus needed */
3419 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3420 bsg_fcpt->dpwwn);
3421 if (fcs_rport == NULL) {
3422 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3423 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3424 goto out_free_mem;
3425 }
3426
3427 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3428
3429 } else { /* Unknown BSG msgcode; return -EINVAL */
3430 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3431 goto out_free_mem;
3432 }
3433
3434 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3435
3436 /* allocate memory for req / rsp buffers */
3437 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3438 if (!req_kbuf) {
3439 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3440 bfad->pci_name);
3441 rc = -ENOMEM;
3442 goto out_free_mem;
3443 }
3444
3445 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3446 if (!rsp_kbuf) {
3447 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3448 bfad->pci_name);
3449 rc = -ENOMEM;
3450 goto out_free_mem;
3451 }
3452
3453 /* map req sg - copy the sg_list passed in to the linear buffer */
3454 sg_copy_to_buffer(job->request_payload.sg_list,
3455 job->request_payload.sg_cnt, req_kbuf,
3456 job->request_payload.payload_len);
3457
3458 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3459 job->request_payload.payload_len,
3460 &drv_fcxp->num_req_sgles);
3461 if (!drv_fcxp->reqbuf_info) {
3462 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3463 bfad->pci_name);
3464 rc = -ENOMEM;
3465 goto out_free_mem;
3466 }
3467
3468 drv_fcxp->req_sge = (struct bfa_sge_s *)
3469 (((uint8_t *)drv_fcxp->reqbuf_info) +
3470 (sizeof(struct bfad_buf_info) *
3471 drv_fcxp->num_req_sgles));
3472
3473 /* map rsp sg */
3474 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3475 job->reply_payload.payload_len,
3476 &drv_fcxp->num_rsp_sgles);
3477 if (!drv_fcxp->rspbuf_info) {
3478 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3479 bfad->pci_name);
3480 rc = -ENOMEM;
3481 goto out_free_mem;
3482 }
3483
3484 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3485 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
3486 (((uint8_t *)drv_fcxp->rspbuf_info) +
3487 (sizeof(struct bfad_buf_info) *
3488 drv_fcxp->num_rsp_sgles));
3489
3490 /* fcxp send */
3491 init_completion(&drv_fcxp->comp);
3492 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3493 if (rc == BFA_STATUS_OK) {
3494 wait_for_completion(&drv_fcxp->comp);
3495 bsg_fcpt->status = drv_fcxp->req_status;
3496 } else {
3497 bsg_fcpt->status = rc;
3498 goto out_free_mem;
3499 }
3500
3501 /* fill the job->reply data */
3502 if (drv_fcxp->req_status == BFA_STATUS_OK) {
3503 job->reply_len = drv_fcxp->rsp_len;
3504 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3505 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3506 } else {
3507 job->reply->reply_payload_rcv_len =
3508 sizeof(struct fc_bsg_ctels_reply);
3509 job->reply_len = sizeof(uint32_t);
3510 job->reply->reply_data.ctels_reply.status =
3511 FC_CTELS_STATUS_REJECT;
3512 }
3513
3514 /* Copy the response data to the reply_payload sg list */
3515 sg_copy_from_buffer(job->reply_payload.sg_list,
3516 job->reply_payload.sg_cnt,
3517 (uint8_t *)rsp_buf_info->virt,
3518 job->reply_payload.payload_len);
3519
3520 out_free_mem:
3521 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3522 drv_fcxp->num_rsp_sgles);
3523 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3524 drv_fcxp->num_req_sgles);
3525 kfree(req_kbuf);
3526 kfree(rsp_kbuf);
3527
3528 /* Need a copy to user op */
3529 if (copy_to_user((void *)(unsigned long)bsg_data->payload,
3530 (void *)bsg_fcpt, bsg_data->payload_len))
3531 rc = -EIO;
3532
3533 kfree(bsg_fcpt);
3534 kfree(drv_fcxp);
3535 out:
3536 job->reply->result = rc;
3537
3538 if (rc == BFA_STATUS_OK)
3539 job->job_done(job);
3540
3541 return rc;
3542 }
3543
3544 int
3545 bfad_im_bsg_request(struct fc_bsg_job *job)
3546 {
3547 uint32_t rc = BFA_STATUS_OK;
3548
3549 switch (job->request->msgcode) {
3550 case FC_BSG_HST_VENDOR:
3551 /* Process BSG HST Vendor requests */
3552 rc = bfad_im_bsg_vendor_request(job);
3553 break;
3554 case FC_BSG_HST_ELS_NOLOGIN:
3555 case FC_BSG_RPT_ELS:
3556 case FC_BSG_HST_CT:
3557 case FC_BSG_RPT_CT:
3558 /* Process BSG ELS/CT commands */
3559 rc = bfad_im_bsg_els_ct_request(job);
3560 break;
3561 default:
3562 job->reply->result = rc = -EINVAL;
3563 job->reply->reply_payload_rcv_len = 0;
3564 break;
3565 }
3566
3567 return rc;
3568 }
3569
3570 int
3571 bfad_im_bsg_timeout(struct fc_bsg_job *job)
3572 {
3573 /* Don't complete the BSG job request - return -EAGAIN
3574 * to reset bsg job timeout : for ELS/CT pass thru we
3575 * already have timer to track the request.
3576 */
3577 return -EAGAIN;
3578 }
This page took 0.103865 seconds and 5 git commands to generate.