ffa84898760818713263c027aafd57b313103a5c
[deliverable/linux.git] / drivers / scsi / csiostor / csio_mb.c
1 /*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/delay.h>
36 #include <linux/jiffies.h>
37 #include <linux/string.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_transport_fc.h>
40
41 #include "csio_hw.h"
42 #include "csio_lnode.h"
43 #include "csio_rnode.h"
44 #include "csio_mb.h"
45 #include "csio_wr.h"
46
47 #define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
48
49 /* MB Command/Response Helpers */
50 /*
51 * csio_mb_fw_retval - FW return value from a mailbox response.
52 * @mbp: Mailbox structure
53 *
54 */
55 enum fw_retval
56 csio_mb_fw_retval(struct csio_mb *mbp)
57 {
58 struct fw_cmd_hdr *hdr;
59
60 hdr = (struct fw_cmd_hdr *)(mbp->mb);
61
62 return FW_CMD_RETVAL_G(ntohl(hdr->lo));
63 }
64
65 /*
66 * csio_mb_hello - FW HELLO command helper
67 * @hw: The HW structure
68 * @mbp: Mailbox structure
69 * @m_mbox: Master mailbox number, if any.
70 * @a_mbox: Mailbox number for asycn notifications.
71 * @master: Device mastership.
72 * @cbfn: Callback, if any.
73 *
74 */
75 void
76 csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
77 uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
78 void (*cbfn) (struct csio_hw *, struct csio_mb *))
79 {
80 struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
81
82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
83
84 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) |
85 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
86 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
87 cmdp->err_to_clearinit = htonl(
88 FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
89 FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
90 FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
91 m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
92 FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
93 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
94 FW_HELLO_CMD_CLEARINIT);
95
96 }
97
98 /*
99 * csio_mb_process_hello_rsp - FW HELLO response processing helper
100 * @hw: The HW structure
101 * @mbp: Mailbox structure
102 * @retval: Mailbox return value from Firmware
103 * @state: State that the function is in.
104 * @mpfn: Master pfn
105 *
106 */
107 void
108 csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
109 enum fw_retval *retval, enum csio_dev_state *state,
110 uint8_t *mpfn)
111 {
112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
113 uint32_t value;
114
115 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
116
117 if (*retval == FW_SUCCESS) {
118 hw->fwrev = ntohl(rsp->fwrev);
119
120 value = ntohl(rsp->err_to_clearinit);
121 *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
122
123 if (value & FW_HELLO_CMD_INIT)
124 *state = CSIO_DEV_STATE_INIT;
125 else if (value & FW_HELLO_CMD_ERR)
126 *state = CSIO_DEV_STATE_ERR;
127 else
128 *state = CSIO_DEV_STATE_UNINIT;
129 }
130 }
131
132 /*
133 * csio_mb_bye - FW BYE command helper
134 * @hw: The HW structure
135 * @mbp: Mailbox structure
136 * @cbfn: Callback, if any.
137 *
138 */
139 void
140 csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
141 void (*cbfn) (struct csio_hw *, struct csio_mb *))
142 {
143 struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
144
145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
146
147 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) |
148 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
149 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
150
151 }
152
153 /*
154 * csio_mb_reset - FW RESET command helper
155 * @hw: The HW structure
156 * @mbp: Mailbox structure
157 * @reset: Type of reset.
158 * @cbfn: Callback, if any.
159 *
160 */
161 void
162 csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
163 int reset, int halt,
164 void (*cbfn) (struct csio_hw *, struct csio_mb *))
165 {
166 struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
167
168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
169
170 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) |
171 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
172 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
173 cmdp->val = htonl(reset);
174 cmdp->halt_pkd = htonl(halt);
175
176 }
177
178 /*
179 * csio_mb_params - FW PARAMS command helper
180 * @hw: The HW structure
181 * @mbp: Mailbox structure
182 * @tmo: Command timeout.
183 * @pf: PF number.
184 * @vf: VF number.
185 * @nparams: Number of parameters
186 * @params: Parameter mnemonic array.
187 * @val: Parameter value array.
188 * @wr: Write/Read PARAMS.
189 * @cbfn: Callback, if any.
190 *
191 */
192 void
193 csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
194 unsigned int pf, unsigned int vf, unsigned int nparams,
195 const u32 *params, u32 *val, bool wr,
196 void (*cbfn)(struct csio_hw *, struct csio_mb *))
197 {
198 uint32_t i;
199 uint32_t temp_params = 0, temp_val = 0;
200 struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
201 __be32 *p = &cmdp->param[0].mnem;
202
203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
204
205 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) |
206 FW_CMD_REQUEST_F |
207 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) |
208 FW_PARAMS_CMD_PFN(pf) |
209 FW_PARAMS_CMD_VFN(vf));
210 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
211
212 /* Write Params */
213 if (wr) {
214 while (nparams--) {
215 temp_params = *params++;
216 temp_val = *val++;
217
218 *p++ = htonl(temp_params);
219 *p++ = htonl(temp_val);
220 }
221 } else {
222 for (i = 0; i < nparams; i++, p += 2) {
223 temp_params = *params++;
224 *p = htonl(temp_params);
225 }
226 }
227
228 }
229
230 /*
231 * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
232 * @hw: The HW structure
233 * @mbp: Mailbox structure
234 * @retval: Mailbox return value from Firmware
235 * @nparams: Number of parameters
236 * @val: Parameter value array.
237 *
238 */
239 void
240 csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
241 enum fw_retval *retval, unsigned int nparams,
242 u32 *val)
243 {
244 struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
245 uint32_t i;
246 __be32 *p = &rsp->param[0].val;
247
248 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
249
250 if (*retval == FW_SUCCESS)
251 for (i = 0; i < nparams; i++, p += 2)
252 *val++ = ntohl(*p);
253 }
254
255 /*
256 * csio_mb_ldst - FW LDST command
257 * @hw: The HW structure
258 * @mbp: Mailbox structure
259 * @tmo: timeout
260 * @reg: register
261 *
262 */
263 void
264 csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
265 {
266 struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
267 CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
268
269 /*
270 * Construct and send the Firmware LDST Command to retrieve the
271 * specified PCI-E Configuration Space register.
272 */
273 ldst_cmd->op_to_addrspace =
274 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
275 FW_CMD_REQUEST_F |
276 FW_CMD_READ_F |
277 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
280 ldst_cmd->u.pcie.ctrl_to_fn =
281 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
282 ldst_cmd->u.pcie.r = (uint8_t)reg;
283 }
284
285 /*
286 *
287 * csio_mb_caps_config - FW Read/Write Capabilities command helper
288 * @hw: The HW structure
289 * @mbp: Mailbox structure
290 * @wr: Write if 1, Read if 0
291 * @init: Turn on initiator mode.
292 * @tgt: Turn on target mode.
293 * @cofld: If 1, Control Offload for FCoE
294 * @cbfn: Callback, if any.
295 *
296 * This helper assumes that cmdp has MB payload from a previous CAPS
297 * read command.
298 */
299 void
300 csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
301 bool wr, bool init, bool tgt, bool cofld,
302 void (*cbfn) (struct csio_hw *, struct csio_mb *))
303 {
304 struct fw_caps_config_cmd *cmdp =
305 (struct fw_caps_config_cmd *)(mbp->mb);
306
307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
308
309 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
310 FW_CMD_REQUEST_F |
311 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F));
312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
313
314 /* Read config */
315 if (!wr)
316 return;
317
318 /* Write config */
319 cmdp->fcoecaps = 0;
320
321 if (cofld)
322 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
323 if (init)
324 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
325 if (tgt)
326 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
327 }
328
329 #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
330 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
331
332 /*
333 * csio_mb_port- FW PORT command helper
334 * @hw: The HW structure
335 * @mbp: Mailbox structure
336 * @tmo: COmmand timeout
337 * @portid: Port ID to get/set info
338 * @wr: Write/Read PORT information.
339 * @fc: Flow control
340 * @caps: Port capabilites to set.
341 * @cbfn: Callback, if any.
342 *
343 */
344 void
345 csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
346 uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
347 void (*cbfn) (struct csio_hw *, struct csio_mb *))
348 {
349 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
350 unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
351
352 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
353
354 cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
355 FW_CMD_REQUEST_F |
356 (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) |
357 FW_PORT_CMD_PORTID(portid));
358 if (!wr) {
359 cmdp->action_to_len16 = htonl(
360 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
361 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
362 return;
363 }
364
365 /* Set port */
366 cmdp->action_to_len16 = htonl(
367 FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
369
370 if (fc & PAUSE_RX)
371 lfc |= FW_PORT_CAP_FC_RX;
372 if (fc & PAUSE_TX)
373 lfc |= FW_PORT_CAP_FC_TX;
374
375 if (!(caps & FW_PORT_CAP_ANEG))
376 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
377 else
378 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
379 lfc | mdi);
380 }
381
382 /*
383 * csio_mb_process_read_port_rsp - FW PORT command response processing helper
384 * @hw: The HW structure
385 * @mbp: Mailbox structure
386 * @retval: Mailbox return value from Firmware
387 * @caps: port capabilities
388 *
389 */
390 void
391 csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
392 enum fw_retval *retval, uint16_t *caps)
393 {
394 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
395
396 *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16));
397
398 if (*retval == FW_SUCCESS)
399 *caps = ntohs(rsp->u.info.pcap);
400 }
401
402 /*
403 * csio_mb_initialize - FW INITIALIZE command helper
404 * @hw: The HW structure
405 * @mbp: Mailbox structure
406 * @tmo: COmmand timeout
407 * @cbfn: Callback, if any.
408 *
409 */
410 void
411 csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
412 void (*cbfn) (struct csio_hw *, struct csio_mb *))
413 {
414 struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
415
416 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
417
418 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) |
419 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
420 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
421
422 }
423
424 /*
425 * csio_mb_iq_alloc - Initializes the mailbox to allocate an
426 * Ingress DMA queue in the firmware.
427 *
428 * @hw: The hw structure
429 * @mbp: Mailbox structure to initialize
430 * @priv: Private object
431 * @mb_tmo: Mailbox time-out period (in ms).
432 * @iq_params: Ingress queue params needed for allocation.
433 * @cbfn: The call-back function
434 *
435 *
436 */
437 static void
438 csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
439 uint32_t mb_tmo, struct csio_iq_params *iq_params,
440 void (*cbfn) (struct csio_hw *, struct csio_mb *))
441 {
442 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
443
444 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
445
446 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
447 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
448 FW_IQ_CMD_PFN(iq_params->pfn) |
449 FW_IQ_CMD_VFN(iq_params->vfn));
450
451 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
452 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
453
454 cmdp->type_to_iqandstindex = htonl(
455 FW_IQ_CMD_VIID(iq_params->viid) |
456 FW_IQ_CMD_TYPE(iq_params->type) |
457 FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
458
459 cmdp->fl0size = htons(iq_params->fl0size);
460 cmdp->fl0size = htons(iq_params->fl1size);
461
462 } /* csio_mb_iq_alloc */
463
464 /*
465 * csio_mb_iq_write - Initializes the mailbox for writing into an
466 * Ingress DMA Queue.
467 *
468 * @hw: The HW structure
469 * @mbp: Mailbox structure to initialize
470 * @priv: Private object
471 * @mb_tmo: Mailbox time-out period (in ms).
472 * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
473 * @iq_params: Ingress queue params needed for writing.
474 * @cbfn: The call-back function
475 *
476 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
477 * because this IQ write request can be cascaded with a previous
478 * IQ alloc request, and we dont want to over-write the bits set by
479 * that request. This logic will work even in a non-cascaded case, since the
480 * cmdp structure is zeroed out by CSIO_INIT_MBP.
481 */
482 static void
483 csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
484 uint32_t mb_tmo, bool cascaded_req,
485 struct csio_iq_params *iq_params,
486 void (*cbfn) (struct csio_hw *, struct csio_mb *))
487 {
488 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
489
490 uint32_t iq_start_stop = (iq_params->iq_start) ?
491 FW_IQ_CMD_IQSTART(1) :
492 FW_IQ_CMD_IQSTOP(1);
493
494 /*
495 * If this IQ write is cascaded with IQ alloc request, do not
496 * re-initialize with 0's.
497 *
498 */
499 if (!cascaded_req)
500 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
501
502 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) |
503 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
504 FW_IQ_CMD_PFN(iq_params->pfn) |
505 FW_IQ_CMD_VFN(iq_params->vfn));
506 cmdp->alloc_to_len16 |= htonl(iq_start_stop |
507 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
508 cmdp->iqid |= htons(iq_params->iqid);
509 cmdp->fl0id |= htons(iq_params->fl0id);
510 cmdp->fl1id |= htons(iq_params->fl1id);
511 cmdp->type_to_iqandstindex |= htonl(
512 FW_IQ_CMD_IQANDST(iq_params->iqandst) |
513 FW_IQ_CMD_IQANUS(iq_params->iqanus) |
514 FW_IQ_CMD_IQANUD(iq_params->iqanud) |
515 FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
516 cmdp->iqdroprss_to_iqesize |= htons(
517 FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
518 FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
519 FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
520 FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
521 FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
522 FW_IQ_CMD_IQESIZE(iq_params->iqesize));
523
524 cmdp->iqsize |= htons(iq_params->iqsize);
525 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
526
527 if (iq_params->type == 0) {
528 cmdp->iqns_to_fl0congen |= htonl(
529 FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
530 FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
531 }
532
533 if (iq_params->fl0size && iq_params->fl0addr &&
534 (iq_params->fl0id != 0xFFFF)) {
535
536 cmdp->iqns_to_fl0congen |= htonl(
537 FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
538 FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
539 FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
540 FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
541 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
542 FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
543 FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
544 FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
545 FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
546 FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
547 cmdp->fl0size |= htons(iq_params->fl0size);
548 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
549 }
550 } /* csio_mb_iq_write */
551
552 /*
553 * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
554 * Ingress DMA Queue.
555 *
556 * @hw: The HW structure
557 * @mbp: Mailbox structure to initialize
558 * @priv: Private data.
559 * @mb_tmo: Mailbox time-out period (in ms).
560 * @iq_params: Ingress queue params needed for allocation & writing.
561 * @cbfn: The call-back function
562 *
563 *
564 */
565 void
566 csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
567 uint32_t mb_tmo, struct csio_iq_params *iq_params,
568 void (*cbfn) (struct csio_hw *, struct csio_mb *))
569 {
570 csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
571 csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
572 } /* csio_mb_iq_alloc_write */
573
574 /*
575 * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
576 * of ingress DMA queue mailbox's response.
577 *
578 * @hw: The HW structure.
579 * @mbp: Mailbox structure to initialize.
580 * @retval: Firmware return value.
581 * @iq_params: Ingress queue parameters, after allocation and write.
582 *
583 */
584 void
585 csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
586 enum fw_retval *ret_val,
587 struct csio_iq_params *iq_params)
588 {
589 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
590
591 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
592 if (*ret_val == FW_SUCCESS) {
593 iq_params->physiqid = ntohs(rsp->physiqid);
594 iq_params->iqid = ntohs(rsp->iqid);
595 iq_params->fl0id = ntohs(rsp->fl0id);
596 iq_params->fl1id = ntohs(rsp->fl1id);
597 } else {
598 iq_params->physiqid = iq_params->iqid =
599 iq_params->fl0id = iq_params->fl1id = 0;
600 }
601 } /* csio_mb_iq_alloc_write_rsp */
602
603 /*
604 * csio_mb_iq_free - Initializes the mailbox for freeing a
605 * specified Ingress DMA Queue.
606 *
607 * @hw: The HW structure
608 * @mbp: Mailbox structure to initialize
609 * @priv: Private data
610 * @mb_tmo: Mailbox time-out period (in ms).
611 * @iq_params: Parameters of ingress queue, that is to be freed.
612 * @cbfn: The call-back function
613 *
614 *
615 */
616 void
617 csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
618 uint32_t mb_tmo, struct csio_iq_params *iq_params,
619 void (*cbfn) (struct csio_hw *, struct csio_mb *))
620 {
621 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
622
623 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
624
625 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
626 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
627 FW_IQ_CMD_PFN(iq_params->pfn) |
628 FW_IQ_CMD_VFN(iq_params->vfn));
629 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
630 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
631 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
632
633 cmdp->iqid = htons(iq_params->iqid);
634 cmdp->fl0id = htons(iq_params->fl0id);
635 cmdp->fl1id = htons(iq_params->fl1id);
636
637 } /* csio_mb_iq_free */
638
639 /*
640 * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
641 * an offload-egress queue.
642 *
643 * @hw: The HW structure
644 * @mbp: Mailbox structure to initialize
645 * @priv: Private data
646 * @mb_tmo: Mailbox time-out period (in ms).
647 * @eq_ofld_params: (Offload) Egress queue parameters.
648 * @cbfn: The call-back function
649 *
650 *
651 */
652 static void
653 csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
654 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
655 void (*cbfn) (struct csio_hw *, struct csio_mb *))
656 {
657 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
658
659 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
660 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
661 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
662 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
663 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
664 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
665 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
666
667 } /* csio_mb_eq_ofld_alloc */
668
669 /*
670 * csio_mb_eq_ofld_write - Initializes the mailbox for writing
671 * an alloacted offload-egress queue.
672 *
673 * @hw: The HW structure
674 * @mbp: Mailbox structure to initialize
675 * @priv: Private data
676 * @mb_tmo: Mailbox time-out period (in ms).
677 * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
678 * @eq_ofld_params: (Offload) Egress queue parameters.
679 * @cbfn: The call-back function
680 *
681 *
682 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
683 * because this EQ write request can be cascaded with a previous
684 * EQ alloc request, and we dont want to over-write the bits set by
685 * that request. This logic will work even in a non-cascaded case, since the
686 * cmdp structure is zeroed out by CSIO_INIT_MBP.
687 */
688 static void
689 csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
690 uint32_t mb_tmo, bool cascaded_req,
691 struct csio_eq_params *eq_ofld_params,
692 void (*cbfn) (struct csio_hw *, struct csio_mb *))
693 {
694 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
695
696 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
697 FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
698
699 /*
700 * If this EQ write is cascaded with EQ alloc request, do not
701 * re-initialize with 0's.
702 *
703 */
704 if (!cascaded_req)
705 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
706
707 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
708 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
709 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
710 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
711 cmdp->alloc_to_len16 |= htonl(eq_start_stop |
712 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
713
714 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
715
716 cmdp->fetchszm_to_iqid |= htonl(
717 FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
718 FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
719 FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
720 FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
721
722 cmdp->dcaen_to_eqsize |= htonl(
723 FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
724 FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
725 FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
726 FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
727 FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
728 FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
729 FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
730
731 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
732
733 } /* csio_mb_eq_ofld_write */
734
735 /*
736 * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
737 * writing into an Engress DMA Queue.
738 *
739 * @hw: The HW structure
740 * @mbp: Mailbox structure to initialize
741 * @priv: Private data.
742 * @mb_tmo: Mailbox time-out period (in ms).
743 * @eq_ofld_params: (Offload) Egress queue parameters.
744 * @cbfn: The call-back function
745 *
746 *
747 */
748 void
749 csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
750 void *priv, uint32_t mb_tmo,
751 struct csio_eq_params *eq_ofld_params,
752 void (*cbfn) (struct csio_hw *, struct csio_mb *))
753 {
754 csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
755 csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
756 eq_ofld_params, cbfn);
757 } /* csio_mb_eq_ofld_alloc_write */
758
759 /*
760 * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
761 * & write egress DMA queue mailbox's response.
762 *
763 * @hw: The HW structure.
764 * @mbp: Mailbox structure to initialize.
765 * @retval: Firmware return value.
766 * @eq_ofld_params: (Offload) Egress queue parameters.
767 *
768 */
769 void
770 csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
771 struct csio_mb *mbp, enum fw_retval *ret_val,
772 struct csio_eq_params *eq_ofld_params)
773 {
774 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
775
776 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
777
778 if (*ret_val == FW_SUCCESS) {
779 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
780 ntohl(rsp->eqid_pkd));
781 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
782 ntohl(rsp->physeqid_pkd));
783 } else
784 eq_ofld_params->eqid = 0;
785
786 } /* csio_mb_eq_ofld_alloc_write_rsp */
787
788 /*
789 * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
790 * specified Engress DMA Queue.
791 *
792 * @hw: The HW structure
793 * @mbp: Mailbox structure to initialize
794 * @priv: Private data area.
795 * @mb_tmo: Mailbox time-out period (in ms).
796 * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed.
797 * @cbfn: The call-back function
798 *
799 *
800 */
801 void
802 csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
803 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
804 void (*cbfn) (struct csio_hw *, struct csio_mb *))
805 {
806 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
807
808 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
809
810 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
811 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
812 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
813 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
814 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
815 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
816 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
817
818 } /* csio_mb_eq_ofld_free */
819
820 /*
821 * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
822 * condition.
823 *
824 * @ln: The Lnode structure
825 * @mbp: Mailbox structure to initialize
826 * @mb_tmo: Mailbox time-out period (in ms).
827 * @cbfn: The call back function.
828 *
829 *
830 */
831 void
832 csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
833 uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
834 uint8_t cos, bool link_status, uint32_t fcfi,
835 void (*cbfn) (struct csio_hw *, struct csio_mb *))
836 {
837 struct fw_fcoe_link_cmd *cmdp =
838 (struct fw_fcoe_link_cmd *)(mbp->mb);
839
840 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
841
842 cmdp->op_to_portid = htonl((
843 FW_CMD_OP_V(FW_FCOE_LINK_CMD) |
844 FW_CMD_REQUEST_F |
845 FW_CMD_WRITE_F |
846 FW_FCOE_LINK_CMD_PORTID(port_id)));
847 cmdp->sub_opcode_fcfi = htonl(
848 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
849 FW_FCOE_LINK_CMD_FCFI(fcfi));
850 cmdp->lstatus = link_status;
851 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
852
853 } /* csio_write_fcoe_link_cond_init_mb */
854
855 /*
856 * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
857 * resource information(FW_GET_RES_INFO_CMD).
858 *
859 * @hw: The HW structure
860 * @mbp: Mailbox structure to initialize
861 * @mb_tmo: Mailbox time-out period (in ms).
862 * @cbfn: The call-back function
863 *
864 *
865 */
866 void
867 csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
868 uint32_t mb_tmo,
869 void (*cbfn) (struct csio_hw *, struct csio_mb *))
870 {
871 struct fw_fcoe_res_info_cmd *cmdp =
872 (struct fw_fcoe_res_info_cmd *)(mbp->mb);
873
874 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
875
876 cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) |
877 FW_CMD_REQUEST_F |
878 FW_CMD_READ_F));
879
880 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
881
882 } /* csio_fcoe_read_res_info_init_mb */
883
884 /*
885 * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
886 * in the firmware (FW_FCOE_VNP_CMD).
887 *
888 * @ln: The Lnode structure.
889 * @mbp: Mailbox structure to initialize.
890 * @mb_tmo: Mailbox time-out period (in ms).
891 * @fcfi: FCF Index.
892 * @vnpi: vnpi
893 * @iqid: iqid
894 * @vnport_wwnn: vnport WWNN
895 * @vnport_wwpn: vnport WWPN
896 * @cbfn: The call-back function.
897 *
898 *
899 */
900 void
901 csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
902 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
903 uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
904 void (*cbfn) (struct csio_hw *, struct csio_mb *))
905 {
906 struct fw_fcoe_vnp_cmd *cmdp =
907 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
908
909 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
910
911 cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
912 FW_CMD_REQUEST_F |
913 FW_CMD_EXEC_F |
914 FW_FCOE_VNP_CMD_FCFI(fcfi)));
915
916 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
917 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
918
919 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
920
921 cmdp->iqid = htons(iqid);
922
923 if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
924 cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
925
926 if (vnport_wwnn)
927 memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
928 if (vnport_wwpn)
929 memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
930
931 } /* csio_fcoe_vnp_alloc_init_mb */
932
933 /*
934 * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
935 * @ln: The Lnode structure.
936 * @mbp: Mailbox structure to initialize.
937 * @mb_tmo: Mailbox time-out period (in ms).
938 * @fcfi: FCF Index.
939 * @vnpi: vnpi
940 * @cbfn: The call-back handler.
941 */
942 void
943 csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
944 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
945 void (*cbfn) (struct csio_hw *, struct csio_mb *))
946 {
947 struct fw_fcoe_vnp_cmd *cmdp =
948 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
949
950 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
951 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
952 FW_CMD_REQUEST_F |
953 FW_CMD_READ_F |
954 FW_FCOE_VNP_CMD_FCFI(fcfi));
955 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
956 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
957 }
958
959 /*
960 * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
961 * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
962 *
963 * @ln: The Lnode structure.
964 * @mbp: Mailbox structure to initialize.
965 * @mb_tmo: Mailbox time-out period (in ms).
966 * @fcfi: FCF flow id
967 * @vnpi: VNP flow id
968 * @cbfn: The call-back function.
969 * Return: None
970 */
971 void
972 csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
973 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
974 void (*cbfn) (struct csio_hw *, struct csio_mb *))
975 {
976 struct fw_fcoe_vnp_cmd *cmdp =
977 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
978
979 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
980
981 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
982 FW_CMD_REQUEST_F |
983 FW_CMD_EXEC_F |
984 FW_FCOE_VNP_CMD_FCFI(fcfi));
985 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
986 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
987 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
988 }
989
990 /*
991 * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
992 * FCF records.
993 *
994 * @ln: The Lnode structure
995 * @mbp: Mailbox structure to initialize
996 * @mb_tmo: Mailbox time-out period (in ms).
997 * @fcf_params: FC-Forwarder parameters.
998 * @cbfn: The call-back function
999 *
1000 *
1001 */
1002 void
1003 csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1004 uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
1005 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1006 {
1007 struct fw_fcoe_fcf_cmd *cmdp =
1008 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
1009
1010 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1011
1012 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) |
1013 FW_CMD_REQUEST_F |
1014 FW_CMD_READ_F |
1015 FW_FCOE_FCF_CMD_FCFI(fcfi));
1016 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
1017
1018 } /* csio_fcoe_read_fcf_init_mb */
1019
1020 void
1021 csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
1022 uint32_t mb_tmo,
1023 struct fw_fcoe_port_cmd_params *portparams,
1024 void (*cbfn)(struct csio_hw *,
1025 struct csio_mb *))
1026 {
1027 struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1028
1029 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
1030 mbp->mb_size = 64;
1031
1032 cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) |
1033 FW_CMD_REQUEST_F | FW_CMD_READ_F);
1034 cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16));
1035
1036 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
1037 FW_FCOE_STATS_CMD_PORT(portparams->portid);
1038
1039 cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
1040 FW_FCOE_STATS_CMD_PORT_VALID;
1041
1042 } /* csio_fcoe_read_portparams_init_mb */
1043
1044 void
1045 csio_mb_process_portparams_rsp(struct csio_hw *hw,
1046 struct csio_mb *mbp,
1047 enum fw_retval *retval,
1048 struct fw_fcoe_port_cmd_params *portparams,
1049 struct fw_fcoe_port_stats *portstats)
1050 {
1051 struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1052 struct fw_fcoe_port_stats stats;
1053 uint8_t *src;
1054 uint8_t *dst;
1055
1056 *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16));
1057
1058 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
1059
1060 if (*retval == FW_SUCCESS) {
1061 dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
1062 src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
1063 memcpy(dst, src, (portparams->nstats * 8));
1064 if (portparams->idx == 1) {
1065 /* Get the first 6 flits from the Mailbox */
1066 portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
1067 portstats->tx_bcast_frames = stats.tx_bcast_frames;
1068 portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
1069 portstats->tx_mcast_frames = stats.tx_mcast_frames;
1070 portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
1071 portstats->tx_ucast_frames = stats.tx_ucast_frames;
1072 }
1073 if (portparams->idx == 7) {
1074 /* Get the second 6 flits from the Mailbox */
1075 portstats->tx_drop_frames = stats.tx_drop_frames;
1076 portstats->tx_offload_bytes = stats.tx_offload_bytes;
1077 portstats->tx_offload_frames = stats.tx_offload_frames;
1078 #if 0
1079 portstats->rx_pf_bytes = stats.rx_pf_bytes;
1080 portstats->rx_pf_frames = stats.rx_pf_frames;
1081 #endif
1082 portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
1083 portstats->rx_bcast_frames = stats.rx_bcast_frames;
1084 portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
1085 }
1086 if (portparams->idx == 13) {
1087 /* Get the last 4 flits from the Mailbox */
1088 portstats->rx_mcast_frames = stats.rx_mcast_frames;
1089 portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
1090 portstats->rx_ucast_frames = stats.rx_ucast_frames;
1091 portstats->rx_err_frames = stats.rx_err_frames;
1092 }
1093 }
1094 }
1095
1096 /* Entry points/APIs for MB module */
1097 /*
1098 * csio_mb_intr_enable - Enable Interrupts from mailboxes.
1099 * @hw: The HW structure
1100 *
1101 * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
1102 */
1103 void
1104 csio_mb_intr_enable(struct csio_hw *hw)
1105 {
1106 csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1107 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1108 }
1109
1110 /*
1111 * csio_mb_intr_disable - Disable Interrupts from mailboxes.
1112 * @hw: The HW structure
1113 *
1114 * Disable bit in HostInterruptEnable CIM register.
1115 */
1116 void
1117 csio_mb_intr_disable(struct csio_hw *hw)
1118 {
1119 csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1120 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1121 }
1122
1123 static void
1124 csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
1125 {
1126 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
1127
1128 if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
1129 csio_info(hw, "FW print message:\n");
1130 csio_info(hw, "\tdebug->dprtstridx = %d\n",
1131 ntohs(dbg->u.prt.dprtstridx));
1132 csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
1133 ntohl(dbg->u.prt.dprtstrparam0));
1134 csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
1135 ntohl(dbg->u.prt.dprtstrparam1));
1136 csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
1137 ntohl(dbg->u.prt.dprtstrparam2));
1138 csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
1139 ntohl(dbg->u.prt.dprtstrparam3));
1140 } else {
1141 /* This is a FW assertion */
1142 csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
1143 dbg->u.assert.filename_0_7,
1144 ntohl(dbg->u.assert.line),
1145 ntohl(dbg->u.assert.x),
1146 ntohl(dbg->u.assert.y));
1147 }
1148 }
1149
1150 static void
1151 csio_mb_debug_cmd_handler(struct csio_hw *hw)
1152 {
1153 int i;
1154 __be64 cmd[CSIO_MB_MAX_REGS];
1155 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1156 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1157 int size = sizeof(struct fw_debug_cmd);
1158
1159 /* Copy mailbox data */
1160 for (i = 0; i < size; i += 8)
1161 cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
1162
1163 csio_mb_dump_fw_dbg(hw, cmd);
1164
1165 /* Notify FW of mailbox by setting owner as UP */
1166 csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
1167 ctl_reg);
1168
1169 csio_rd_reg32(hw, ctl_reg);
1170 wmb();
1171 }
1172
1173 /*
1174 * csio_mb_issue - generic routine for issuing Mailbox commands.
1175 * @hw: The HW structure
1176 * @mbp: Mailbox command to issue
1177 *
1178 * Caller should hold hw lock across this call.
1179 */
1180 int
1181 csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1182 {
1183 uint32_t owner, ctl;
1184 int i;
1185 uint32_t ii;
1186 __be64 *cmd = mbp->mb;
1187 __be64 hdr;
1188 struct csio_mbm *mbm = &hw->mbm;
1189 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1190 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1191 int size = mbp->mb_size;
1192 int rv = -EINVAL;
1193 struct fw_cmd_hdr *fw_hdr;
1194
1195 /* Determine mode */
1196 if (mbp->mb_cbfn == NULL) {
1197 /* Need to issue/get results in the same context */
1198 if (mbp->tmo < CSIO_MB_POLL_FREQ) {
1199 csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
1200 goto error_out;
1201 }
1202 } else if (!csio_is_host_intr_enabled(hw) ||
1203 !csio_is_hw_intr_enabled(hw)) {
1204 csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
1205 *((uint8_t *)mbp->mb));
1206 goto error_out;
1207 }
1208
1209 if (mbm->mcurrent != NULL) {
1210 /* Queue mbox cmd, if another mbox cmd is active */
1211 if (mbp->mb_cbfn == NULL) {
1212 rv = -EBUSY;
1213 csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
1214 hw->pfn, *((uint8_t *)mbp->mb));
1215
1216 goto error_out;
1217 } else {
1218 list_add_tail(&mbp->list, &mbm->req_q);
1219 CSIO_INC_STATS(mbm, n_activeq);
1220
1221 return 0;
1222 }
1223 }
1224
1225 /* Now get ownership of mailbox */
1226 owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
1227
1228 if (!csio_mb_is_host_owner(owner)) {
1229
1230 for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
1231 owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
1232 /*
1233 * Mailbox unavailable. In immediate mode, fail the command.
1234 * In other modes, enqueue the request.
1235 */
1236 if (!csio_mb_is_host_owner(owner)) {
1237 if (mbp->mb_cbfn == NULL) {
1238 rv = owner ? -EBUSY : -ETIMEDOUT;
1239
1240 csio_dbg(hw,
1241 "Couldnt own Mailbox %x op:0x%x "
1242 "owner:%x\n",
1243 hw->pfn, *((uint8_t *)mbp->mb), owner);
1244 goto error_out;
1245 } else {
1246 if (mbm->mcurrent == NULL) {
1247 csio_err(hw,
1248 "Couldnt own Mailbox %x "
1249 "op:0x%x owner:%x\n",
1250 hw->pfn, *((uint8_t *)mbp->mb),
1251 owner);
1252 csio_err(hw,
1253 "No outstanding driver"
1254 " mailbox as well\n");
1255 goto error_out;
1256 }
1257 }
1258 }
1259 }
1260
1261 /* Mailbox is available, copy mailbox data into it */
1262 for (i = 0; i < size; i += 8) {
1263 csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
1264 cmd++;
1265 }
1266
1267 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1268
1269 /* Start completion timers in non-immediate modes and notify FW */
1270 if (mbp->mb_cbfn != NULL) {
1271 mbm->mcurrent = mbp;
1272 mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
1273 csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
1274 MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
1275 } else
1276 csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
1277 ctl_reg);
1278
1279 /* Flush posted writes */
1280 csio_rd_reg32(hw, ctl_reg);
1281 wmb();
1282
1283 CSIO_INC_STATS(mbm, n_req);
1284
1285 if (mbp->mb_cbfn)
1286 return 0;
1287
1288 /* Poll for completion in immediate mode */
1289 cmd = mbp->mb;
1290
1291 for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
1292 mdelay(CSIO_MB_POLL_FREQ);
1293
1294 /* Check for response */
1295 ctl = csio_rd_reg32(hw, ctl_reg);
1296 if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
1297
1298 if (!(ctl & MBMSGVALID)) {
1299 csio_wr_reg32(hw, 0, ctl_reg);
1300 continue;
1301 }
1302
1303 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1304
1305 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1306 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1307
1308 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
1309 case FW_DEBUG_CMD:
1310 csio_mb_debug_cmd_handler(hw);
1311 continue;
1312 }
1313
1314 /* Copy response */
1315 for (i = 0; i < size; i += 8)
1316 *cmd++ = cpu_to_be64(csio_rd_reg64
1317 (hw, data_reg + i));
1318 csio_wr_reg32(hw, 0, ctl_reg);
1319
1320 if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
1321 CSIO_INC_STATS(mbm, n_err);
1322
1323 CSIO_INC_STATS(mbm, n_rsp);
1324 return 0;
1325 }
1326 }
1327
1328 CSIO_INC_STATS(mbm, n_tmo);
1329
1330 csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
1331 hw->pfn, *((uint8_t *)cmd));
1332
1333 return -ETIMEDOUT;
1334
1335 error_out:
1336 CSIO_INC_STATS(mbm, n_err);
1337 return rv;
1338 }
1339
1340 /*
1341 * csio_mb_completions - Completion handler for Mailbox commands
1342 * @hw: The HW structure
1343 * @cbfn_q: Completion queue.
1344 *
1345 */
1346 void
1347 csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
1348 {
1349 struct csio_mb *mbp;
1350 struct csio_mbm *mbm = &hw->mbm;
1351 enum fw_retval rv;
1352
1353 while (!list_empty(cbfn_q)) {
1354 mbp = list_first_entry(cbfn_q, struct csio_mb, list);
1355 list_del_init(&mbp->list);
1356
1357 rv = csio_mb_fw_retval(mbp);
1358 if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
1359 CSIO_INC_STATS(mbm, n_err);
1360 else if (rv != FW_HOSTERROR)
1361 CSIO_INC_STATS(mbm, n_rsp);
1362
1363 if (mbp->mb_cbfn)
1364 mbp->mb_cbfn(hw, mbp);
1365 }
1366 }
1367
1368 static void
1369 csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
1370 {
1371 static char *mod_str[] = {
1372 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
1373 };
1374
1375 struct csio_pport *port = &hw->pport[port_id];
1376
1377 if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
1378 csio_info(hw, "Port:%d - port module unplugged\n", port_id);
1379 else if (port->mod_type < ARRAY_SIZE(mod_str))
1380 csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
1381 mod_str[port->mod_type]);
1382 else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1383 csio_info(hw,
1384 "Port:%d - unsupported optical port module "
1385 "inserted\n", port_id);
1386 else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1387 csio_info(hw,
1388 "Port:%d - unknown port module inserted, forcing "
1389 "TWINAX\n", port_id);
1390 else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
1391 csio_info(hw, "Port:%d - transceiver module error\n", port_id);
1392 else
1393 csio_info(hw, "Port:%d - unknown module type %d inserted\n",
1394 port_id, port->mod_type);
1395 }
1396
1397 int
1398 csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
1399 {
1400 uint8_t opcode = *(uint8_t *)cmd;
1401 struct fw_port_cmd *pcmd;
1402 uint8_t port_id;
1403 uint32_t link_status;
1404 uint16_t action;
1405 uint8_t mod_type;
1406
1407 if (opcode == FW_PORT_CMD) {
1408 pcmd = (struct fw_port_cmd *)cmd;
1409 port_id = FW_PORT_CMD_PORTID_GET(
1410 ntohl(pcmd->op_to_portid));
1411 action = FW_PORT_CMD_ACTION_GET(
1412 ntohl(pcmd->action_to_len16));
1413 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1414 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
1415 action);
1416 return -EINVAL;
1417 }
1418
1419 link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
1420 mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
1421
1422 hw->pport[port_id].link_status =
1423 FW_PORT_CMD_LSTATUS_GET(link_status);
1424 hw->pport[port_id].link_speed =
1425 FW_PORT_CMD_LSPEED_GET(link_status);
1426
1427 csio_info(hw, "Port:%x - LINK %s\n", port_id,
1428 FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
1429
1430 if (mod_type != hw->pport[port_id].mod_type) {
1431 hw->pport[port_id].mod_type = mod_type;
1432 csio_mb_portmod_changed(hw, port_id);
1433 }
1434 } else if (opcode == FW_DEBUG_CMD) {
1435 csio_mb_dump_fw_dbg(hw, cmd);
1436 } else {
1437 csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
1438 return -EINVAL;
1439 }
1440
1441 return 0;
1442 }
1443
1444 /*
1445 * csio_mb_isr_handler - Handle mailboxes related interrupts.
1446 * @hw: The HW structure
1447 *
1448 * Called from the ISR to handle Mailbox related interrupts.
1449 * HW Lock should be held across this call.
1450 */
1451 int
1452 csio_mb_isr_handler(struct csio_hw *hw)
1453 {
1454 struct csio_mbm *mbm = &hw->mbm;
1455 struct csio_mb *mbp = mbm->mcurrent;
1456 __be64 *cmd;
1457 uint32_t ctl, cim_cause, pl_cause;
1458 int i;
1459 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1460 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1461 int size;
1462 __be64 hdr;
1463 struct fw_cmd_hdr *fw_hdr;
1464
1465 pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
1466 cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
1467
1468 if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
1469 CSIO_INC_STATS(hw, n_mbint_unexp);
1470 return -EINVAL;
1471 }
1472
1473 /*
1474 * The cause registers below HAVE to be cleared in the SAME
1475 * order as below: The low level cause register followed by
1476 * the upper level cause register. In other words, CIM-cause
1477 * first followed by PL-Cause next.
1478 */
1479 csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
1480 csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
1481
1482 ctl = csio_rd_reg32(hw, ctl_reg);
1483
1484 if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
1485
1486 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1487
1488 if (!(ctl & MBMSGVALID)) {
1489 csio_warn(hw,
1490 "Stray mailbox interrupt recvd,"
1491 " mailbox data not valid\n");
1492 csio_wr_reg32(hw, 0, ctl_reg);
1493 /* Flush */
1494 csio_rd_reg32(hw, ctl_reg);
1495 return -EINVAL;
1496 }
1497
1498 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1499 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1500
1501 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
1502 case FW_DEBUG_CMD:
1503 csio_mb_debug_cmd_handler(hw);
1504 return -EINVAL;
1505 #if 0
1506 case FW_ERROR_CMD:
1507 case FW_INITIALIZE_CMD: /* When we are not master */
1508 #endif
1509 }
1510
1511 CSIO_ASSERT(mbp != NULL);
1512
1513 cmd = mbp->mb;
1514 size = mbp->mb_size;
1515 /* Get response */
1516 for (i = 0; i < size; i += 8)
1517 *cmd++ = cpu_to_be64(csio_rd_reg64
1518 (hw, data_reg + i));
1519
1520 csio_wr_reg32(hw, 0, ctl_reg);
1521 /* Flush */
1522 csio_rd_reg32(hw, ctl_reg);
1523
1524 mbm->mcurrent = NULL;
1525
1526 /* Add completion to tail of cbfn queue */
1527 list_add_tail(&mbp->list, &mbm->cbfn_q);
1528 CSIO_INC_STATS(mbm, n_cbfnq);
1529
1530 /*
1531 * Enqueue event to EventQ. Events processing happens
1532 * in Event worker thread context
1533 */
1534 if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
1535 CSIO_INC_STATS(hw, n_evt_drop);
1536
1537 return 0;
1538
1539 } else {
1540 /*
1541 * We can get here if mailbox MSIX vector is shared,
1542 * or in INTx case. Or a stray interrupt.
1543 */
1544 csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
1545 CSIO_INC_STATS(hw, n_int_stray);
1546 return -EINVAL;
1547 }
1548 }
1549
1550 /*
1551 * csio_mb_tmo_handler - Timeout handler
1552 * @hw: The HW structure
1553 *
1554 */
1555 struct csio_mb *
1556 csio_mb_tmo_handler(struct csio_hw *hw)
1557 {
1558 struct csio_mbm *mbm = &hw->mbm;
1559 struct csio_mb *mbp = mbm->mcurrent;
1560 struct fw_cmd_hdr *fw_hdr;
1561
1562 /*
1563 * Could be a race b/w the completion handler and the timer
1564 * and the completion handler won that race.
1565 */
1566 if (mbp == NULL) {
1567 CSIO_DB_ASSERT(0);
1568 return NULL;
1569 }
1570
1571 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
1572
1573 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
1574 FW_CMD_OP_G(ntohl(fw_hdr->hi)));
1575
1576 mbm->mcurrent = NULL;
1577 CSIO_INC_STATS(mbm, n_tmo);
1578 fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT));
1579
1580 return mbp;
1581 }
1582
1583 /*
1584 * csio_mb_cancel_all - Cancel all waiting commands.
1585 * @hw: The HW structure
1586 * @cbfn_q: The callback queue.
1587 *
1588 * Caller should hold hw lock across this call.
1589 */
1590 void
1591 csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
1592 {
1593 struct csio_mb *mbp;
1594 struct csio_mbm *mbm = &hw->mbm;
1595 struct fw_cmd_hdr *hdr;
1596 struct list_head *tmp;
1597
1598 if (mbm->mcurrent) {
1599 mbp = mbm->mcurrent;
1600
1601 /* Stop mailbox completion timer */
1602 del_timer_sync(&mbm->timer);
1603
1604 /* Add completion to tail of cbfn queue */
1605 list_add_tail(&mbp->list, cbfn_q);
1606 mbm->mcurrent = NULL;
1607 }
1608
1609 if (!list_empty(&mbm->req_q)) {
1610 list_splice_tail_init(&mbm->req_q, cbfn_q);
1611 mbm->stats.n_activeq = 0;
1612 }
1613
1614 if (!list_empty(&mbm->cbfn_q)) {
1615 list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
1616 mbm->stats.n_cbfnq = 0;
1617 }
1618
1619 if (list_empty(cbfn_q))
1620 return;
1621
1622 list_for_each(tmp, cbfn_q) {
1623 mbp = (struct csio_mb *)tmp;
1624 hdr = (struct fw_cmd_hdr *)(mbp->mb);
1625
1626 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
1627 hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
1628
1629 CSIO_INC_STATS(mbm, n_cancel);
1630 hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR));
1631 }
1632 }
1633
1634 /*
1635 * csio_mbm_init - Initialize Mailbox module
1636 * @mbm: Mailbox module
1637 * @hw: The HW structure
1638 * @timer: Timing function for interrupting mailboxes
1639 *
1640 * Initialize timer and the request/response queues.
1641 */
1642 int
1643 csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
1644 void (*timer_fn)(uintptr_t))
1645 {
1646 struct timer_list *timer = &mbm->timer;
1647
1648 init_timer(timer);
1649 timer->function = timer_fn;
1650 timer->data = (unsigned long)hw;
1651
1652 INIT_LIST_HEAD(&mbm->req_q);
1653 INIT_LIST_HEAD(&mbm->cbfn_q);
1654 csio_set_mb_intr_idx(mbm, -1);
1655
1656 return 0;
1657 }
1658
1659 /*
1660 * csio_mbm_exit - Uninitialize mailbox module
1661 * @mbm: Mailbox module
1662 *
1663 * Stop timer.
1664 */
1665 void
1666 csio_mbm_exit(struct csio_mbm *mbm)
1667 {
1668 del_timer_sync(&mbm->timer);
1669
1670 CSIO_DB_ASSERT(mbm->mcurrent == NULL);
1671 CSIO_DB_ASSERT(list_empty(&mbm->req_q));
1672 CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
1673 }
This page took 0.061996 seconds and 4 git commands to generate.