net/mlx5: Extend mlx5_core to support ConnectX-4 Ethernet functionality
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / cmd.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
e126ba97
EC
35#include <linux/errno.h>
36#include <linux/pci.h>
37#include <linux/dma-mapping.h>
38#include <linux/slab.h>
39#include <linux/delay.h>
40#include <linux/random.h>
41#include <linux/io-mapping.h>
42#include <linux/mlx5/driver.h>
43#include <linux/debugfs.h>
44
45#include "mlx5_core.h"
46
47enum {
0a324f31 48 CMD_IF_REV = 5,
e126ba97
EC
49};
50
51enum {
52 CMD_MODE_POLLING,
53 CMD_MODE_EVENTS
54};
55
56enum {
57 NUM_LONG_LISTS = 2,
58 NUM_MED_LISTS = 64,
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
62};
63
64enum {
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
76};
77
e126ba97
EC
78static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
80 struct mlx5_cmd_msg *out,
746b5583 81 void *uout, int uout_size,
e126ba97
EC
82 mlx5_cmd_cbk_t cbk,
83 void *context, int page_queue)
84{
85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
86 struct mlx5_cmd_work_ent *ent;
87
88 ent = kzalloc(sizeof(*ent), alloc_flags);
89 if (!ent)
90 return ERR_PTR(-ENOMEM);
91
92 ent->in = in;
93 ent->out = out;
746b5583
EC
94 ent->uout = uout;
95 ent->uout_size = uout_size;
e126ba97
EC
96 ent->callback = cbk;
97 ent->context = context;
98 ent->cmd = cmd;
99 ent->page_queue = page_queue;
100
101 return ent;
102}
103
104static u8 alloc_token(struct mlx5_cmd *cmd)
105{
106 u8 token;
107
108 spin_lock(&cmd->token_lock);
4cbdd27c
AS
109 cmd->token++;
110 if (cmd->token == 0)
111 cmd->token++;
112 token = cmd->token;
e126ba97
EC
113 spin_unlock(&cmd->token_lock);
114
115 return token;
116}
117
118static int alloc_ent(struct mlx5_cmd *cmd)
119{
120 unsigned long flags;
121 int ret;
122
123 spin_lock_irqsave(&cmd->alloc_lock, flags);
124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
125 if (ret < cmd->max_reg_cmds)
126 clear_bit(ret, &cmd->bitmask);
127 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
128
129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
130}
131
132static void free_ent(struct mlx5_cmd *cmd, int idx)
133{
134 unsigned long flags;
135
136 spin_lock_irqsave(&cmd->alloc_lock, flags);
137 set_bit(idx, &cmd->bitmask);
138 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
139}
140
141static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
142{
143 return cmd->cmd_buf + (idx << cmd->log_stride);
144}
145
146static u8 xor8_buf(void *buf, int len)
147{
148 u8 *ptr = buf;
149 u8 sum = 0;
150 int i;
151
152 for (i = 0; i < len; i++)
153 sum ^= ptr[i];
154
155 return sum;
156}
157
158static int verify_block_sig(struct mlx5_cmd_prot_block *block)
159{
160 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
161 return -EINVAL;
162
163 if (xor8_buf(block, sizeof(*block)) != 0xff)
164 return -EINVAL;
165
166 return 0;
167}
168
c1868b82
EC
169static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
170 int csum)
e126ba97
EC
171{
172 block->token = token;
c1868b82
EC
173 if (csum) {
174 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
175 sizeof(block->data) - 2);
176 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
177 }
e126ba97
EC
178}
179
c1868b82 180static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
e126ba97
EC
181{
182 struct mlx5_cmd_mailbox *next = msg->next;
183
184 while (next) {
c1868b82 185 calc_block_sig(next->buf, token, csum);
e126ba97
EC
186 next = next->next;
187 }
188}
189
c1868b82 190static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
e126ba97
EC
191{
192 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
c1868b82
EC
193 calc_chain_sig(ent->in, ent->token, csum);
194 calc_chain_sig(ent->out, ent->token, csum);
e126ba97
EC
195}
196
197static void poll_timeout(struct mlx5_cmd_work_ent *ent)
198{
199 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
200 u8 own;
201
202 do {
203 own = ent->lay->status_own;
204 if (!(own & CMD_OWNER_HW)) {
205 ent->ret = 0;
206 return;
207 }
208 usleep_range(5000, 10000);
209 } while (time_before(jiffies, poll_end));
210
211 ent->ret = -ETIMEDOUT;
212}
213
214static void free_cmd(struct mlx5_cmd_work_ent *ent)
215{
216 kfree(ent);
217}
218
219
220static int verify_signature(struct mlx5_cmd_work_ent *ent)
221{
222 struct mlx5_cmd_mailbox *next = ent->out->next;
223 int err;
224 u8 sig;
225
226 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
227 if (sig != 0xff)
228 return -EINVAL;
229
230 while (next) {
231 err = verify_block_sig(next->buf);
232 if (err)
233 return err;
234
235 next = next->next;
236 }
237
238 return 0;
239}
240
241static void dump_buf(void *buf, int size, int data_only, int offset)
242{
243 __be32 *p = buf;
244 int i;
245
246 for (i = 0; i < size; i += 16) {
247 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
248 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
249 be32_to_cpu(p[3]));
250 p += 4;
251 offset += 16;
252 }
253 if (!data_only)
254 pr_debug("\n");
255}
256
257const char *mlx5_command_str(int command)
258{
259 switch (command) {
260 case MLX5_CMD_OP_QUERY_HCA_CAP:
261 return "QUERY_HCA_CAP";
262
263 case MLX5_CMD_OP_SET_HCA_CAP:
264 return "SET_HCA_CAP";
265
266 case MLX5_CMD_OP_QUERY_ADAPTER:
267 return "QUERY_ADAPTER";
268
269 case MLX5_CMD_OP_INIT_HCA:
270 return "INIT_HCA";
271
272 case MLX5_CMD_OP_TEARDOWN_HCA:
273 return "TEARDOWN_HCA";
274
cd23b14b
EC
275 case MLX5_CMD_OP_ENABLE_HCA:
276 return "MLX5_CMD_OP_ENABLE_HCA";
277
278 case MLX5_CMD_OP_DISABLE_HCA:
279 return "MLX5_CMD_OP_DISABLE_HCA";
280
e126ba97
EC
281 case MLX5_CMD_OP_QUERY_PAGES:
282 return "QUERY_PAGES";
283
284 case MLX5_CMD_OP_MANAGE_PAGES:
285 return "MANAGE_PAGES";
286
287 case MLX5_CMD_OP_CREATE_MKEY:
288 return "CREATE_MKEY";
289
290 case MLX5_CMD_OP_QUERY_MKEY:
291 return "QUERY_MKEY";
292
293 case MLX5_CMD_OP_DESTROY_MKEY:
294 return "DESTROY_MKEY";
295
296 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
297 return "QUERY_SPECIAL_CONTEXTS";
298
299 case MLX5_CMD_OP_CREATE_EQ:
300 return "CREATE_EQ";
301
302 case MLX5_CMD_OP_DESTROY_EQ:
303 return "DESTROY_EQ";
304
305 case MLX5_CMD_OP_QUERY_EQ:
306 return "QUERY_EQ";
307
308 case MLX5_CMD_OP_CREATE_CQ:
309 return "CREATE_CQ";
310
311 case MLX5_CMD_OP_DESTROY_CQ:
312 return "DESTROY_CQ";
313
314 case MLX5_CMD_OP_QUERY_CQ:
315 return "QUERY_CQ";
316
317 case MLX5_CMD_OP_MODIFY_CQ:
318 return "MODIFY_CQ";
319
320 case MLX5_CMD_OP_CREATE_QP:
321 return "CREATE_QP";
322
323 case MLX5_CMD_OP_DESTROY_QP:
324 return "DESTROY_QP";
325
326 case MLX5_CMD_OP_RST2INIT_QP:
327 return "RST2INIT_QP";
328
329 case MLX5_CMD_OP_INIT2RTR_QP:
330 return "INIT2RTR_QP";
331
332 case MLX5_CMD_OP_RTR2RTS_QP:
333 return "RTR2RTS_QP";
334
335 case MLX5_CMD_OP_RTS2RTS_QP:
336 return "RTS2RTS_QP";
337
338 case MLX5_CMD_OP_SQERR2RTS_QP:
339 return "SQERR2RTS_QP";
340
341 case MLX5_CMD_OP_2ERR_QP:
342 return "2ERR_QP";
343
e126ba97
EC
344 case MLX5_CMD_OP_2RST_QP:
345 return "2RST_QP";
346
347 case MLX5_CMD_OP_QUERY_QP:
348 return "QUERY_QP";
349
e126ba97
EC
350 case MLX5_CMD_OP_MAD_IFC:
351 return "MAD_IFC";
352
353 case MLX5_CMD_OP_INIT2INIT_QP:
354 return "INIT2INIT_QP";
355
e126ba97
EC
356 case MLX5_CMD_OP_CREATE_PSV:
357 return "CREATE_PSV";
358
359 case MLX5_CMD_OP_DESTROY_PSV:
360 return "DESTROY_PSV";
361
e126ba97
EC
362 case MLX5_CMD_OP_CREATE_SRQ:
363 return "CREATE_SRQ";
364
365 case MLX5_CMD_OP_DESTROY_SRQ:
366 return "DESTROY_SRQ";
367
368 case MLX5_CMD_OP_QUERY_SRQ:
369 return "QUERY_SRQ";
370
371 case MLX5_CMD_OP_ARM_RQ:
372 return "ARM_RQ";
373
e281682b
SM
374 case MLX5_CMD_OP_CREATE_XRC_SRQ:
375 return "CREATE_XRC_SRQ";
376
377 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
378 return "DESTROY_XRC_SRQ";
379
380 case MLX5_CMD_OP_QUERY_XRC_SRQ:
381 return "QUERY_XRC_SRQ";
382
383 case MLX5_CMD_OP_ARM_XRC_SRQ:
384 return "ARM_XRC_SRQ";
e126ba97
EC
385
386 case MLX5_CMD_OP_ALLOC_PD:
387 return "ALLOC_PD";
388
389 case MLX5_CMD_OP_DEALLOC_PD:
390 return "DEALLOC_PD";
391
392 case MLX5_CMD_OP_ALLOC_UAR:
393 return "ALLOC_UAR";
394
395 case MLX5_CMD_OP_DEALLOC_UAR:
396 return "DEALLOC_UAR";
397
398 case MLX5_CMD_OP_ATTACH_TO_MCG:
399 return "ATTACH_TO_MCG";
400
e281682b
SM
401 case MLX5_CMD_OP_DETTACH_FROM_MCG:
402 return "DETTACH_FROM_MCG";
e126ba97
EC
403
404 case MLX5_CMD_OP_ALLOC_XRCD:
405 return "ALLOC_XRCD";
406
407 case MLX5_CMD_OP_DEALLOC_XRCD:
408 return "DEALLOC_XRCD";
409
410 case MLX5_CMD_OP_ACCESS_REG:
411 return "MLX5_CMD_OP_ACCESS_REG";
412
413 default: return "unknown command opcode";
414 }
415}
416
417static void dump_command(struct mlx5_core_dev *dev,
418 struct mlx5_cmd_work_ent *ent, int input)
419{
420 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
421 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
422 struct mlx5_cmd_mailbox *next = msg->next;
423 int data_only;
f241e749 424 u32 offset = 0;
e126ba97
EC
425 int dump_len;
426
427 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
428
429 if (data_only)
430 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
431 "dump command data %s(0x%x) %s\n",
432 mlx5_command_str(op), op,
433 input ? "INPUT" : "OUTPUT");
434 else
435 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
436 mlx5_command_str(op), op,
437 input ? "INPUT" : "OUTPUT");
438
439 if (data_only) {
440 if (input) {
441 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
442 offset += sizeof(ent->lay->in);
443 } else {
444 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
445 offset += sizeof(ent->lay->out);
446 }
447 } else {
448 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
449 offset += sizeof(*ent->lay);
450 }
451
452 while (next && offset < msg->len) {
453 if (data_only) {
454 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
455 dump_buf(next->buf, dump_len, 1, offset);
456 offset += MLX5_CMD_DATA_BLOCK_SIZE;
457 } else {
458 mlx5_core_dbg(dev, "command block:\n");
459 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
460 offset += sizeof(struct mlx5_cmd_prot_block);
461 }
462 next = next->next;
463 }
464
465 if (data_only)
466 pr_debug("\n");
467}
468
469static void cmd_work_handler(struct work_struct *work)
470{
471 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
472 struct mlx5_cmd *cmd = ent->cmd;
473 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
474 struct mlx5_cmd_layout *lay;
475 struct semaphore *sem;
476
477 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
478 down(sem);
479 if (!ent->page_queue) {
480 ent->idx = alloc_ent(cmd);
481 if (ent->idx < 0) {
482 mlx5_core_err(dev, "failed to allocate command entry\n");
483 up(sem);
484 return;
485 }
486 } else {
487 ent->idx = cmd->max_reg_cmds;
488 }
489
490 ent->token = alloc_token(cmd);
491 cmd->ent_arr[ent->idx] = ent;
492 lay = get_inst(cmd, ent->idx);
493 ent->lay = lay;
494 memset(lay, 0, sizeof(*lay));
495 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
746b5583 496 ent->op = be32_to_cpu(lay->in[0]) >> 16;
e126ba97
EC
497 if (ent->in->next)
498 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
499 lay->inlen = cpu_to_be32(ent->in->len);
500 if (ent->out->next)
501 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
502 lay->outlen = cpu_to_be32(ent->out->len);
503 lay->type = MLX5_PCI_CMD_XPORT;
504 lay->token = ent->token;
505 lay->status_own = CMD_OWNER_HW;
c1868b82 506 set_signature(ent, !cmd->checksum_disabled);
e126ba97 507 dump_command(dev, ent, 1);
14a70046 508 ent->ts1 = ktime_get_ns();
e126ba97
EC
509
510 /* ring doorbell after the descriptor is valid */
21db5074 511 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
e126ba97
EC
512 wmb();
513 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
e126ba97 514 mmiowb();
21db5074 515 /* if not in polling don't use ent after this point */
e126ba97
EC
516 if (cmd->mode == CMD_MODE_POLLING) {
517 poll_timeout(ent);
518 /* make sure we read the descriptor after ownership is SW */
519 rmb();
520 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
521 }
522}
523
524static const char *deliv_status_to_str(u8 status)
525{
526 switch (status) {
527 case MLX5_CMD_DELIVERY_STAT_OK:
528 return "no errors";
529 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
530 return "signature error";
531 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
532 return "token error";
533 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
534 return "bad block number";
535 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
536 return "output pointer not aligned to block size";
537 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
538 return "input pointer not aligned to block size";
539 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
540 return "firmware internal error";
541 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
542 return "command input length error";
543 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
544 return "command ouput length error";
545 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
546 return "reserved fields not cleared";
547 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
548 return "bad command descriptor type";
549 default:
550 return "unknown status code";
551 }
552}
553
554static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
555{
556 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
557
558 return be16_to_cpu(hdr->opcode);
559}
560
561static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
562{
563 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
564 struct mlx5_cmd *cmd = &dev->cmd;
565 int err;
566
567 if (cmd->mode == CMD_MODE_POLLING) {
568 wait_for_completion(&ent->done);
569 err = ent->ret;
570 } else {
571 if (!wait_for_completion_timeout(&ent->done, timeout))
572 err = -ETIMEDOUT;
573 else
574 err = 0;
575 }
576 if (err == -ETIMEDOUT) {
577 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
578 mlx5_command_str(msg_to_opcode(ent->in)),
579 msg_to_opcode(ent->in));
580 }
1a91de28
JP
581 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
582 err, deliv_status_to_str(ent->status), ent->status);
e126ba97
EC
583
584 return err;
585}
586
587/* Notes:
588 * 1. Callback functions may not sleep
589 * 2. page queue commands do not support asynchrous completion
590 */
591static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
746b5583
EC
592 struct mlx5_cmd_msg *out, void *uout, int uout_size,
593 mlx5_cmd_cbk_t callback,
e126ba97
EC
594 void *context, int page_queue, u8 *status)
595{
596 struct mlx5_cmd *cmd = &dev->cmd;
597 struct mlx5_cmd_work_ent *ent;
e126ba97
EC
598 struct mlx5_cmd_stats *stats;
599 int err = 0;
600 s64 ds;
601 u16 op;
602
603 if (callback && page_queue)
604 return -EINVAL;
605
746b5583
EC
606 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
607 page_queue);
e126ba97
EC
608 if (IS_ERR(ent))
609 return PTR_ERR(ent);
610
611 if (!callback)
612 init_completion(&ent->done);
613
614 INIT_WORK(&ent->work, cmd_work_handler);
615 if (page_queue) {
616 cmd_work_handler(&ent->work);
617 } else if (!queue_work(cmd->wq, &ent->work)) {
618 mlx5_core_warn(dev, "failed to queue work\n");
619 err = -ENOMEM;
620 goto out_free;
621 }
622
623 if (!callback) {
624 err = wait_func(dev, ent);
625 if (err == -ETIMEDOUT)
626 goto out;
627
14a70046 628 ds = ent->ts2 - ent->ts1;
e126ba97
EC
629 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
630 if (op < ARRAY_SIZE(cmd->stats)) {
631 stats = &cmd->stats[op];
746b5583 632 spin_lock_irq(&stats->lock);
e126ba97
EC
633 stats->sum += ds;
634 ++stats->n;
746b5583 635 spin_unlock_irq(&stats->lock);
e126ba97
EC
636 }
637 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
638 "fw exec time for %s is %lld nsec\n",
639 mlx5_command_str(op), ds);
640 *status = ent->status;
641 free_cmd(ent);
642 }
643
644 return err;
645
646out_free:
647 free_cmd(ent);
648out:
649 return err;
650}
651
652static ssize_t dbg_write(struct file *filp, const char __user *buf,
653 size_t count, loff_t *pos)
654{
655 struct mlx5_core_dev *dev = filp->private_data;
656 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
657 char lbuf[3];
658 int err;
659
660 if (!dbg->in_msg || !dbg->out_msg)
661 return -ENOMEM;
662
663 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 664 return -EFAULT;
e126ba97
EC
665
666 lbuf[sizeof(lbuf) - 1] = 0;
667
668 if (strcmp(lbuf, "go"))
669 return -EINVAL;
670
671 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
672
673 return err ? err : count;
674}
675
676
677static const struct file_operations fops = {
678 .owner = THIS_MODULE,
679 .open = simple_open,
680 .write = dbg_write,
681};
682
683static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
684{
685 struct mlx5_cmd_prot_block *block;
686 struct mlx5_cmd_mailbox *next;
687 int copy;
688
689 if (!to || !from)
690 return -ENOMEM;
691
692 copy = min_t(int, size, sizeof(to->first.data));
693 memcpy(to->first.data, from, copy);
694 size -= copy;
695 from += copy;
696
697 next = to->next;
698 while (size) {
699 if (!next) {
700 /* this is a BUG */
701 return -ENOMEM;
702 }
703
704 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
705 block = next->buf;
706 memcpy(block->data, from, copy);
707 from += copy;
708 size -= copy;
709 next = next->next;
710 }
711
712 return 0;
713}
714
715static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
716{
717 struct mlx5_cmd_prot_block *block;
718 struct mlx5_cmd_mailbox *next;
719 int copy;
720
721 if (!to || !from)
722 return -ENOMEM;
723
724 copy = min_t(int, size, sizeof(from->first.data));
725 memcpy(to, from->first.data, copy);
726 size -= copy;
727 to += copy;
728
729 next = from->next;
730 while (size) {
731 if (!next) {
732 /* this is a BUG */
733 return -ENOMEM;
734 }
735
736 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
737 block = next->buf;
e126ba97
EC
738
739 memcpy(to, block->data, copy);
740 to += copy;
741 size -= copy;
742 next = next->next;
743 }
744
745 return 0;
746}
747
748static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
749 gfp_t flags)
750{
751 struct mlx5_cmd_mailbox *mailbox;
752
753 mailbox = kmalloc(sizeof(*mailbox), flags);
754 if (!mailbox)
755 return ERR_PTR(-ENOMEM);
756
757 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
758 &mailbox->dma);
759 if (!mailbox->buf) {
760 mlx5_core_dbg(dev, "failed allocation\n");
761 kfree(mailbox);
762 return ERR_PTR(-ENOMEM);
763 }
764 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
765 mailbox->next = NULL;
766
767 return mailbox;
768}
769
770static void free_cmd_box(struct mlx5_core_dev *dev,
771 struct mlx5_cmd_mailbox *mailbox)
772{
773 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
774 kfree(mailbox);
775}
776
777static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
778 gfp_t flags, int size)
779{
780 struct mlx5_cmd_mailbox *tmp, *head = NULL;
781 struct mlx5_cmd_prot_block *block;
782 struct mlx5_cmd_msg *msg;
783 int blen;
784 int err;
785 int n;
786 int i;
787
746b5583 788 msg = kzalloc(sizeof(*msg), flags);
e126ba97
EC
789 if (!msg)
790 return ERR_PTR(-ENOMEM);
791
792 blen = size - min_t(int, sizeof(msg->first.data), size);
793 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
794
795 for (i = 0; i < n; i++) {
796 tmp = alloc_cmd_box(dev, flags);
797 if (IS_ERR(tmp)) {
798 mlx5_core_warn(dev, "failed allocating block\n");
799 err = PTR_ERR(tmp);
800 goto err_alloc;
801 }
802
803 block = tmp->buf;
804 tmp->next = head;
805 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
806 block->block_num = cpu_to_be32(n - i - 1);
807 head = tmp;
808 }
809 msg->next = head;
810 msg->len = size;
811 return msg;
812
813err_alloc:
814 while (head) {
815 tmp = head->next;
816 free_cmd_box(dev, head);
817 head = tmp;
818 }
819 kfree(msg);
820
821 return ERR_PTR(err);
822}
823
824static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
825 struct mlx5_cmd_msg *msg)
826{
827 struct mlx5_cmd_mailbox *head = msg->next;
828 struct mlx5_cmd_mailbox *next;
829
830 while (head) {
831 next = head->next;
832 free_cmd_box(dev, head);
833 head = next;
834 }
835 kfree(msg);
836}
837
838static ssize_t data_write(struct file *filp, const char __user *buf,
839 size_t count, loff_t *pos)
840{
841 struct mlx5_core_dev *dev = filp->private_data;
842 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
843 void *ptr;
844 int err;
845
846 if (*pos != 0)
847 return -EINVAL;
848
849 kfree(dbg->in_msg);
850 dbg->in_msg = NULL;
851 dbg->inlen = 0;
852
853 ptr = kzalloc(count, GFP_KERNEL);
854 if (!ptr)
855 return -ENOMEM;
856
857 if (copy_from_user(ptr, buf, count)) {
5e631a03 858 err = -EFAULT;
e126ba97
EC
859 goto out;
860 }
861 dbg->in_msg = ptr;
862 dbg->inlen = count;
863
864 *pos = count;
865
866 return count;
867
868out:
869 kfree(ptr);
870 return err;
871}
872
873static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
874 loff_t *pos)
875{
876 struct mlx5_core_dev *dev = filp->private_data;
877 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
878 int copy;
879
880 if (*pos)
881 return 0;
882
883 if (!dbg->out_msg)
884 return -ENOMEM;
885
886 copy = min_t(int, count, dbg->outlen);
887 if (copy_to_user(buf, dbg->out_msg, copy))
5e631a03 888 return -EFAULT;
e126ba97
EC
889
890 *pos += copy;
891
892 return copy;
893}
894
895static const struct file_operations dfops = {
896 .owner = THIS_MODULE,
897 .open = simple_open,
898 .write = data_write,
899 .read = data_read,
900};
901
902static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
903 loff_t *pos)
904{
905 struct mlx5_core_dev *dev = filp->private_data;
906 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
907 char outlen[8];
908 int err;
909
910 if (*pos)
911 return 0;
912
913 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
914 if (err < 0)
915 return err;
916
917 if (copy_to_user(buf, &outlen, err))
5e631a03 918 return -EFAULT;
e126ba97
EC
919
920 *pos += err;
921
922 return err;
923}
924
925static ssize_t outlen_write(struct file *filp, const char __user *buf,
926 size_t count, loff_t *pos)
927{
928 struct mlx5_core_dev *dev = filp->private_data;
929 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
930 char outlen_str[8];
931 int outlen;
932 void *ptr;
933 int err;
934
935 if (*pos != 0 || count > 6)
936 return -EINVAL;
937
938 kfree(dbg->out_msg);
939 dbg->out_msg = NULL;
940 dbg->outlen = 0;
941
942 if (copy_from_user(outlen_str, buf, count))
5e631a03 943 return -EFAULT;
e126ba97
EC
944
945 outlen_str[7] = 0;
946
947 err = sscanf(outlen_str, "%d", &outlen);
948 if (err < 0)
949 return err;
950
951 ptr = kzalloc(outlen, GFP_KERNEL);
952 if (!ptr)
953 return -ENOMEM;
954
955 dbg->out_msg = ptr;
956 dbg->outlen = outlen;
957
958 *pos = count;
959
960 return count;
961}
962
963static const struct file_operations olfops = {
964 .owner = THIS_MODULE,
965 .open = simple_open,
966 .write = outlen_write,
967 .read = outlen_read,
968};
969
970static void set_wqname(struct mlx5_core_dev *dev)
971{
972 struct mlx5_cmd *cmd = &dev->cmd;
973
974 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
975 dev_name(&dev->pdev->dev));
976}
977
978static void clean_debug_files(struct mlx5_core_dev *dev)
979{
980 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
981
982 if (!mlx5_debugfs_root)
983 return;
984
985 mlx5_cmdif_debugfs_cleanup(dev);
986 debugfs_remove_recursive(dbg->dbg_root);
987}
988
989static int create_debugfs_files(struct mlx5_core_dev *dev)
990{
991 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
992 int err = -ENOMEM;
993
994 if (!mlx5_debugfs_root)
995 return 0;
996
997 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
998 if (!dbg->dbg_root)
999 return err;
1000
1001 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1002 dev, &dfops);
1003 if (!dbg->dbg_in)
1004 goto err_dbg;
1005
1006 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1007 dev, &dfops);
1008 if (!dbg->dbg_out)
1009 goto err_dbg;
1010
1011 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1012 dev, &olfops);
1013 if (!dbg->dbg_outlen)
1014 goto err_dbg;
1015
1016 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1017 &dbg->status);
1018 if (!dbg->dbg_status)
1019 goto err_dbg;
1020
1021 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1022 if (!dbg->dbg_run)
1023 goto err_dbg;
1024
1025 mlx5_cmdif_debugfs_init(dev);
1026
1027 return 0;
1028
1029err_dbg:
1030 clean_debug_files(dev);
1031 return err;
1032}
1033
1034void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1035{
1036 struct mlx5_cmd *cmd = &dev->cmd;
1037 int i;
1038
1039 for (i = 0; i < cmd->max_reg_cmds; i++)
1040 down(&cmd->sem);
1041
1042 down(&cmd->pages_sem);
1043
1044 flush_workqueue(cmd->wq);
1045
1046 cmd->mode = CMD_MODE_EVENTS;
1047
1048 up(&cmd->pages_sem);
1049 for (i = 0; i < cmd->max_reg_cmds; i++)
1050 up(&cmd->sem);
1051}
1052
1053void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1054{
1055 struct mlx5_cmd *cmd = &dev->cmd;
1056 int i;
1057
1058 for (i = 0; i < cmd->max_reg_cmds; i++)
1059 down(&cmd->sem);
1060
1061 down(&cmd->pages_sem);
1062
1063 flush_workqueue(cmd->wq);
1064 cmd->mode = CMD_MODE_POLLING;
1065
1066 up(&cmd->pages_sem);
1067 for (i = 0; i < cmd->max_reg_cmds; i++)
1068 up(&cmd->sem);
1069}
1070
746b5583
EC
1071static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1072{
1073 unsigned long flags;
1074
1075 if (msg->cache) {
1076 spin_lock_irqsave(&msg->cache->lock, flags);
1077 list_add_tail(&msg->list, &msg->cache->head);
1078 spin_unlock_irqrestore(&msg->cache->lock, flags);
1079 } else {
1080 mlx5_free_cmd_msg(dev, msg);
1081 }
1082}
1083
e126ba97
EC
1084void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1085{
1086 struct mlx5_cmd *cmd = &dev->cmd;
1087 struct mlx5_cmd_work_ent *ent;
1088 mlx5_cmd_cbk_t callback;
1089 void *context;
1090 int err;
1091 int i;
746b5583
EC
1092 s64 ds;
1093 struct mlx5_cmd_stats *stats;
1094 unsigned long flags;
e126ba97
EC
1095
1096 for (i = 0; i < (1 << cmd->log_sz); i++) {
1097 if (test_bit(i, &vector)) {
11940c87
DC
1098 struct semaphore *sem;
1099
e126ba97 1100 ent = cmd->ent_arr[i];
11940c87
DC
1101 if (ent->page_queue)
1102 sem = &cmd->pages_sem;
1103 else
1104 sem = &cmd->sem;
14a70046 1105 ent->ts2 = ktime_get_ns();
e126ba97
EC
1106 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1107 dump_command(dev, ent, 0);
1108 if (!ent->ret) {
1109 if (!cmd->checksum_disabled)
1110 ent->ret = verify_signature(ent);
1111 else
1112 ent->ret = 0;
1113 ent->status = ent->lay->status_own >> 1;
1114 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1115 ent->ret, deliv_status_to_str(ent->status), ent->status);
1116 }
1117 free_ent(cmd, ent->idx);
1118 if (ent->callback) {
14a70046 1119 ds = ent->ts2 - ent->ts1;
746b5583
EC
1120 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1121 stats = &cmd->stats[ent->op];
1122 spin_lock_irqsave(&stats->lock, flags);
1123 stats->sum += ds;
1124 ++stats->n;
1125 spin_unlock_irqrestore(&stats->lock, flags);
1126 }
1127
e126ba97
EC
1128 callback = ent->callback;
1129 context = ent->context;
1130 err = ent->ret;
746b5583
EC
1131 if (!err)
1132 err = mlx5_copy_from_msg(ent->uout,
1133 ent->out,
1134 ent->uout_size);
1135
1136 mlx5_free_cmd_msg(dev, ent->out);
1137 free_msg(dev, ent->in);
1138
e126ba97
EC
1139 free_cmd(ent);
1140 callback(err, context);
1141 } else {
1142 complete(&ent->done);
1143 }
11940c87 1144 up(sem);
e126ba97
EC
1145 }
1146 }
1147}
1148EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1149
1150static int status_to_err(u8 status)
1151{
1152 return status ? -1 : 0; /* TBD more meaningful codes */
1153}
1154
746b5583
EC
1155static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1156 gfp_t gfp)
e126ba97
EC
1157{
1158 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1159 struct mlx5_cmd *cmd = &dev->cmd;
1160 struct cache_ent *ent = NULL;
1161
1162 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1163 ent = &cmd->cache.large;
1164 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1165 ent = &cmd->cache.med;
1166
1167 if (ent) {
746b5583 1168 spin_lock_irq(&ent->lock);
e126ba97
EC
1169 if (!list_empty(&ent->head)) {
1170 msg = list_entry(ent->head.next, typeof(*msg), list);
1171 /* For cached lists, we must explicitly state what is
1172 * the real size
1173 */
1174 msg->len = in_size;
1175 list_del(&msg->list);
1176 }
746b5583 1177 spin_unlock_irq(&ent->lock);
e126ba97
EC
1178 }
1179
1180 if (IS_ERR(msg))
746b5583 1181 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
e126ba97
EC
1182
1183 return msg;
1184}
1185
e126ba97
EC
1186static int is_manage_pages(struct mlx5_inbox_hdr *in)
1187{
1188 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1189}
1190
746b5583
EC
1191static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1192 int out_size, mlx5_cmd_cbk_t callback, void *context)
e126ba97
EC
1193{
1194 struct mlx5_cmd_msg *inb;
1195 struct mlx5_cmd_msg *outb;
1196 int pages_queue;
746b5583 1197 gfp_t gfp;
e126ba97
EC
1198 int err;
1199 u8 status = 0;
1200
1201 pages_queue = is_manage_pages(in);
746b5583 1202 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
e126ba97 1203
746b5583 1204 inb = alloc_msg(dev, in_size, gfp);
e126ba97
EC
1205 if (IS_ERR(inb)) {
1206 err = PTR_ERR(inb);
1207 return err;
1208 }
1209
1210 err = mlx5_copy_to_msg(inb, in, in_size);
1211 if (err) {
1212 mlx5_core_warn(dev, "err %d\n", err);
1213 goto out_in;
1214 }
1215
746b5583 1216 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
e126ba97
EC
1217 if (IS_ERR(outb)) {
1218 err = PTR_ERR(outb);
1219 goto out_in;
1220 }
1221
746b5583
EC
1222 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1223 pages_queue, &status);
e126ba97
EC
1224 if (err)
1225 goto out_out;
1226
1227 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1228 if (status) {
1229 err = status_to_err(status);
1230 goto out_out;
1231 }
1232
05e4ecd1
EC
1233 if (!callback)
1234 err = mlx5_copy_from_msg(out, outb, out_size);
e126ba97
EC
1235
1236out_out:
746b5583
EC
1237 if (!callback)
1238 mlx5_free_cmd_msg(dev, outb);
e126ba97
EC
1239
1240out_in:
746b5583
EC
1241 if (!callback)
1242 free_msg(dev, inb);
e126ba97
EC
1243 return err;
1244}
746b5583
EC
1245
1246int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1247 int out_size)
1248{
1249 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1250}
e126ba97
EC
1251EXPORT_SYMBOL(mlx5_cmd_exec);
1252
746b5583
EC
1253int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1254 void *out, int out_size, mlx5_cmd_cbk_t callback,
1255 void *context)
1256{
1257 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1258}
1259EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1260
e126ba97
EC
1261static void destroy_msg_cache(struct mlx5_core_dev *dev)
1262{
1263 struct mlx5_cmd *cmd = &dev->cmd;
1264 struct mlx5_cmd_msg *msg;
1265 struct mlx5_cmd_msg *n;
1266
1267 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1268 list_del(&msg->list);
1269 mlx5_free_cmd_msg(dev, msg);
1270 }
1271
1272 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1273 list_del(&msg->list);
1274 mlx5_free_cmd_msg(dev, msg);
1275 }
1276}
1277
1278static int create_msg_cache(struct mlx5_core_dev *dev)
1279{
1280 struct mlx5_cmd *cmd = &dev->cmd;
1281 struct mlx5_cmd_msg *msg;
1282 int err;
1283 int i;
1284
1285 spin_lock_init(&cmd->cache.large.lock);
1286 INIT_LIST_HEAD(&cmd->cache.large.head);
1287 spin_lock_init(&cmd->cache.med.lock);
1288 INIT_LIST_HEAD(&cmd->cache.med.head);
1289
1290 for (i = 0; i < NUM_LONG_LISTS; i++) {
1291 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1292 if (IS_ERR(msg)) {
1293 err = PTR_ERR(msg);
1294 goto ex_err;
1295 }
1296 msg->cache = &cmd->cache.large;
1297 list_add_tail(&msg->list, &cmd->cache.large.head);
1298 }
1299
1300 for (i = 0; i < NUM_MED_LISTS; i++) {
1301 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1302 if (IS_ERR(msg)) {
1303 err = PTR_ERR(msg);
1304 goto ex_err;
1305 }
1306 msg->cache = &cmd->cache.med;
1307 list_add_tail(&msg->list, &cmd->cache.med.head);
1308 }
1309
1310 return 0;
1311
1312ex_err:
1313 destroy_msg_cache(dev);
1314 return err;
1315}
1316
64599cca
EC
1317static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1318{
1319 struct device *ddev = &dev->pdev->dev;
1320
1321 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1322 &cmd->alloc_dma, GFP_KERNEL);
1323 if (!cmd->cmd_alloc_buf)
1324 return -ENOMEM;
1325
1326 /* make sure it is aligned to 4K */
1327 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1328 cmd->cmd_buf = cmd->cmd_alloc_buf;
1329 cmd->dma = cmd->alloc_dma;
1330 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1331 return 0;
1332 }
1333
1334 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1335 cmd->alloc_dma);
1336 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1337 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1338 &cmd->alloc_dma, GFP_KERNEL);
1339 if (!cmd->cmd_alloc_buf)
1340 return -ENOMEM;
1341
1342 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1343 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1344 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1345 return 0;
1346}
1347
1348static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1349{
1350 struct device *ddev = &dev->pdev->dev;
1351
1352 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1353 cmd->alloc_dma);
1354}
1355
e126ba97
EC
1356int mlx5_cmd_init(struct mlx5_core_dev *dev)
1357{
1358 int size = sizeof(struct mlx5_cmd_prot_block);
1359 int align = roundup_pow_of_two(size);
1360 struct mlx5_cmd *cmd = &dev->cmd;
1361 u32 cmd_h, cmd_l;
1362 u16 cmd_if_rev;
1363 int err;
1364 int i;
1365
1366 cmd_if_rev = cmdif_rev(dev);
1367 if (cmd_if_rev != CMD_IF_REV) {
1368 dev_err(&dev->pdev->dev,
1369 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1370 CMD_IF_REV, cmd_if_rev);
1371 return -EINVAL;
1372 }
1373
1374 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1375 if (!cmd->pool)
1376 return -ENOMEM;
1377
64599cca
EC
1378 err = alloc_cmd_page(dev, cmd);
1379 if (err)
e126ba97 1380 goto err_free_pool;
e126ba97
EC
1381
1382 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1383 cmd->log_sz = cmd_l >> 4 & 0xf;
1384 cmd->log_stride = cmd_l & 0xf;
1385 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1386 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1387 1 << cmd->log_sz);
1388 err = -EINVAL;
64599cca 1389 goto err_free_page;
e126ba97
EC
1390 }
1391
2d446d18 1392 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
e126ba97
EC
1393 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1394 err = -EINVAL;
64599cca 1395 goto err_free_page;
e126ba97
EC
1396 }
1397
c1868b82 1398 cmd->checksum_disabled = 1;
e126ba97
EC
1399 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1400 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1401
1402 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1403 if (cmd->cmdif_rev > CMD_IF_REV) {
1404 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1405 CMD_IF_REV, cmd->cmdif_rev);
1406 err = -ENOTSUPP;
64599cca 1407 goto err_free_page;
e126ba97
EC
1408 }
1409
1410 spin_lock_init(&cmd->alloc_lock);
1411 spin_lock_init(&cmd->token_lock);
1412 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1413 spin_lock_init(&cmd->stats[i].lock);
1414
1415 sema_init(&cmd->sem, cmd->max_reg_cmds);
1416 sema_init(&cmd->pages_sem, 1);
1417
1418 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1419 cmd_l = (u32)(cmd->dma);
1420 if (cmd_l & 0xfff) {
1421 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1422 err = -ENOMEM;
64599cca 1423 goto err_free_page;
e126ba97
EC
1424 }
1425
1426 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1427 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1428
1429 /* Make sure firmware sees the complete address before we proceed */
1430 wmb();
1431
1432 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1433
1434 cmd->mode = CMD_MODE_POLLING;
1435
1436 err = create_msg_cache(dev);
1437 if (err) {
1438 dev_err(&dev->pdev->dev, "failed to create command cache\n");
64599cca 1439 goto err_free_page;
e126ba97
EC
1440 }
1441
1442 set_wqname(dev);
1443 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1444 if (!cmd->wq) {
1445 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1446 err = -ENOMEM;
1447 goto err_cache;
1448 }
1449
1450 err = create_debugfs_files(dev);
1451 if (err) {
1452 err = -ENOMEM;
1453 goto err_wq;
1454 }
1455
1456 return 0;
1457
1458err_wq:
1459 destroy_workqueue(cmd->wq);
1460
1461err_cache:
1462 destroy_msg_cache(dev);
1463
64599cca
EC
1464err_free_page:
1465 free_cmd_page(dev, cmd);
e126ba97
EC
1466
1467err_free_pool:
1468 pci_pool_destroy(cmd->pool);
1469
1470 return err;
1471}
1472EXPORT_SYMBOL(mlx5_cmd_init);
1473
1474void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1475{
1476 struct mlx5_cmd *cmd = &dev->cmd;
1477
1478 clean_debug_files(dev);
1479 destroy_workqueue(cmd->wq);
1480 destroy_msg_cache(dev);
64599cca 1481 free_cmd_page(dev, cmd);
e126ba97
EC
1482 pci_pool_destroy(cmd->pool);
1483}
1484EXPORT_SYMBOL(mlx5_cmd_cleanup);
1485
1486static const char *cmd_status_str(u8 status)
1487{
1488 switch (status) {
1489 case MLX5_CMD_STAT_OK:
1490 return "OK";
1491 case MLX5_CMD_STAT_INT_ERR:
1492 return "internal error";
1493 case MLX5_CMD_STAT_BAD_OP_ERR:
1494 return "bad operation";
1495 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1496 return "bad parameter";
1497 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1498 return "bad system state";
1499 case MLX5_CMD_STAT_BAD_RES_ERR:
1500 return "bad resource";
1501 case MLX5_CMD_STAT_RES_BUSY:
1502 return "resource busy";
1503 case MLX5_CMD_STAT_LIM_ERR:
1504 return "limits exceeded";
1505 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1506 return "bad resource state";
1507 case MLX5_CMD_STAT_IX_ERR:
1508 return "bad index";
1509 case MLX5_CMD_STAT_NO_RES_ERR:
1510 return "no resources";
1511 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1512 return "bad input length";
1513 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1514 return "bad output length";
1515 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1516 return "bad QP state";
1517 case MLX5_CMD_STAT_BAD_PKT_ERR:
1518 return "bad packet (discarded)";
1519 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1520 return "bad size too many outstanding CQEs";
1521 default:
1522 return "unknown status";
1523 }
1524}
1525
c7a08ac7 1526static int cmd_status_to_err(u8 status)
e126ba97 1527{
c7a08ac7 1528 switch (status) {
e126ba97
EC
1529 case MLX5_CMD_STAT_OK: return 0;
1530 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1531 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1532 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1533 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1534 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1535 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
9c865131 1536 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
e126ba97
EC
1537 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1538 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1539 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1540 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1541 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1542 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1543 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1544 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1545 default: return -EIO;
1546 }
1547}
c7a08ac7
EC
1548
1549/* this will be available till all the commands use set/get macros */
1550int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1551{
1552 if (!hdr->status)
1553 return 0;
1554
1555 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1556 cmd_status_str(hdr->status), hdr->status,
1557 be32_to_cpu(hdr->syndrome));
1558
1559 return cmd_status_to_err(hdr->status);
1560}
b775516b
EC
1561
1562int mlx5_cmd_status_to_err_v2(void *ptr)
1563{
1564 u32 syndrome;
1565 u8 status;
1566
1567 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1568 if (!status)
1569 return 0;
1570
1571 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1572
1573 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1574 cmd_status_str(status), status, syndrome);
1575
1576 return cmd_status_to_err(status);
1577}
This page took 0.52776 seconds and 5 git commands to generate.