2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE
= (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE
) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE
,
61 MED_LIST_SIZE
= 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
65 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
79 MLX5_CMD_STAT_OK
= 0x0,
80 MLX5_CMD_STAT_INT_ERR
= 0x1,
81 MLX5_CMD_STAT_BAD_OP_ERR
= 0x2,
82 MLX5_CMD_STAT_BAD_PARAM_ERR
= 0x3,
83 MLX5_CMD_STAT_BAD_SYS_STATE_ERR
= 0x4,
84 MLX5_CMD_STAT_BAD_RES_ERR
= 0x5,
85 MLX5_CMD_STAT_RES_BUSY
= 0x6,
86 MLX5_CMD_STAT_LIM_ERR
= 0x8,
87 MLX5_CMD_STAT_BAD_RES_STATE_ERR
= 0x9,
88 MLX5_CMD_STAT_IX_ERR
= 0xa,
89 MLX5_CMD_STAT_NO_RES_ERR
= 0xf,
90 MLX5_CMD_STAT_BAD_INP_LEN_ERR
= 0x50,
91 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
= 0x51,
92 MLX5_CMD_STAT_BAD_QP_STATE_ERR
= 0x10,
93 MLX5_CMD_STAT_BAD_PKT_ERR
= 0x30,
94 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
= 0x40,
97 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
98 struct mlx5_cmd_msg
*in
,
99 struct mlx5_cmd_msg
*out
,
100 void *uout
, int uout_size
,
102 void *context
, int page_queue
)
104 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
105 struct mlx5_cmd_work_ent
*ent
;
107 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
109 return ERR_PTR(-ENOMEM
);
114 ent
->uout_size
= uout_size
;
116 ent
->context
= context
;
118 ent
->page_queue
= page_queue
;
123 static u8
alloc_token(struct mlx5_cmd
*cmd
)
127 spin_lock(&cmd
->token_lock
);
128 token
= cmd
->token
++ % 255 + 1;
129 spin_unlock(&cmd
->token_lock
);
134 static int alloc_ent(struct mlx5_cmd
*cmd
)
139 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
140 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
141 if (ret
< cmd
->max_reg_cmds
)
142 clear_bit(ret
, &cmd
->bitmask
);
143 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
145 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
148 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
152 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
153 set_bit(idx
, &cmd
->bitmask
);
154 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
157 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
159 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
162 static u8
xor8_buf(void *buf
, int len
)
168 for (i
= 0; i
< len
; i
++)
174 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
176 if (xor8_buf(block
->rsvd0
, sizeof(*block
) - sizeof(block
->data
) - 1) != 0xff)
179 if (xor8_buf(block
, sizeof(*block
)) != 0xff)
185 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
, u8 token
,
188 block
->token
= token
;
190 block
->ctrl_sig
= ~xor8_buf(block
->rsvd0
, sizeof(*block
) -
191 sizeof(block
->data
) - 2);
192 block
->sig
= ~xor8_buf(block
, sizeof(*block
) - 1);
196 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
, u8 token
, int csum
)
198 struct mlx5_cmd_mailbox
*next
= msg
->next
;
201 calc_block_sig(next
->buf
, token
, csum
);
206 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
208 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
209 calc_chain_sig(ent
->in
, ent
->token
, csum
);
210 calc_chain_sig(ent
->out
, ent
->token
, csum
);
213 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
215 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
219 own
= ent
->lay
->status_own
;
220 if (!(own
& CMD_OWNER_HW
)) {
224 usleep_range(5000, 10000);
225 } while (time_before(jiffies
, poll_end
));
227 ent
->ret
= -ETIMEDOUT
;
230 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
236 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
238 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
242 sig
= xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
247 err
= verify_block_sig(next
->buf
);
257 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
262 for (i
= 0; i
< size
; i
+= 16) {
263 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
264 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
273 const char *mlx5_command_str(int command
)
276 case MLX5_CMD_OP_QUERY_HCA_CAP
:
277 return "QUERY_HCA_CAP";
279 case MLX5_CMD_OP_SET_HCA_CAP
:
280 return "SET_HCA_CAP";
282 case MLX5_CMD_OP_QUERY_ADAPTER
:
283 return "QUERY_ADAPTER";
285 case MLX5_CMD_OP_INIT_HCA
:
288 case MLX5_CMD_OP_TEARDOWN_HCA
:
289 return "TEARDOWN_HCA";
291 case MLX5_CMD_OP_ENABLE_HCA
:
292 return "MLX5_CMD_OP_ENABLE_HCA";
294 case MLX5_CMD_OP_DISABLE_HCA
:
295 return "MLX5_CMD_OP_DISABLE_HCA";
297 case MLX5_CMD_OP_QUERY_PAGES
:
298 return "QUERY_PAGES";
300 case MLX5_CMD_OP_MANAGE_PAGES
:
301 return "MANAGE_PAGES";
303 case MLX5_CMD_OP_CREATE_MKEY
:
304 return "CREATE_MKEY";
306 case MLX5_CMD_OP_QUERY_MKEY
:
309 case MLX5_CMD_OP_DESTROY_MKEY
:
310 return "DESTROY_MKEY";
312 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
313 return "QUERY_SPECIAL_CONTEXTS";
315 case MLX5_CMD_OP_CREATE_EQ
:
318 case MLX5_CMD_OP_DESTROY_EQ
:
321 case MLX5_CMD_OP_QUERY_EQ
:
324 case MLX5_CMD_OP_CREATE_CQ
:
327 case MLX5_CMD_OP_DESTROY_CQ
:
330 case MLX5_CMD_OP_QUERY_CQ
:
333 case MLX5_CMD_OP_MODIFY_CQ
:
336 case MLX5_CMD_OP_CREATE_QP
:
339 case MLX5_CMD_OP_DESTROY_QP
:
342 case MLX5_CMD_OP_RST2INIT_QP
:
343 return "RST2INIT_QP";
345 case MLX5_CMD_OP_INIT2RTR_QP
:
346 return "INIT2RTR_QP";
348 case MLX5_CMD_OP_RTR2RTS_QP
:
351 case MLX5_CMD_OP_RTS2RTS_QP
:
354 case MLX5_CMD_OP_SQERR2RTS_QP
:
355 return "SQERR2RTS_QP";
357 case MLX5_CMD_OP_2ERR_QP
:
360 case MLX5_CMD_OP_2RST_QP
:
363 case MLX5_CMD_OP_QUERY_QP
:
366 case MLX5_CMD_OP_MAD_IFC
:
369 case MLX5_CMD_OP_INIT2INIT_QP
:
370 return "INIT2INIT_QP";
372 case MLX5_CMD_OP_CREATE_PSV
:
375 case MLX5_CMD_OP_DESTROY_PSV
:
376 return "DESTROY_PSV";
378 case MLX5_CMD_OP_CREATE_SRQ
:
381 case MLX5_CMD_OP_DESTROY_SRQ
:
382 return "DESTROY_SRQ";
384 case MLX5_CMD_OP_QUERY_SRQ
:
387 case MLX5_CMD_OP_ARM_RQ
:
390 case MLX5_CMD_OP_RESIZE_SRQ
:
393 case MLX5_CMD_OP_ALLOC_PD
:
396 case MLX5_CMD_OP_DEALLOC_PD
:
399 case MLX5_CMD_OP_ALLOC_UAR
:
402 case MLX5_CMD_OP_DEALLOC_UAR
:
403 return "DEALLOC_UAR";
405 case MLX5_CMD_OP_ATTACH_TO_MCG
:
406 return "ATTACH_TO_MCG";
408 case MLX5_CMD_OP_DETACH_FROM_MCG
:
409 return "DETACH_FROM_MCG";
411 case MLX5_CMD_OP_ALLOC_XRCD
:
414 case MLX5_CMD_OP_DEALLOC_XRCD
:
415 return "DEALLOC_XRCD";
417 case MLX5_CMD_OP_ACCESS_REG
:
418 return "MLX5_CMD_OP_ACCESS_REG";
420 default: return "unknown command opcode";
424 static void dump_command(struct mlx5_core_dev
*dev
,
425 struct mlx5_cmd_work_ent
*ent
, int input
)
427 u16 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)(ent
->lay
->in
))->opcode
);
428 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
429 struct mlx5_cmd_mailbox
*next
= msg
->next
;
434 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
437 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
438 "dump command data %s(0x%x) %s\n",
439 mlx5_command_str(op
), op
,
440 input
? "INPUT" : "OUTPUT");
442 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
443 mlx5_command_str(op
), op
,
444 input
? "INPUT" : "OUTPUT");
448 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
449 offset
+= sizeof(ent
->lay
->in
);
451 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
452 offset
+= sizeof(ent
->lay
->out
);
455 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
456 offset
+= sizeof(*ent
->lay
);
459 while (next
&& offset
< msg
->len
) {
461 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
462 dump_buf(next
->buf
, dump_len
, 1, offset
);
463 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
465 mlx5_core_dbg(dev
, "command block:\n");
466 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
467 offset
+= sizeof(struct mlx5_cmd_prot_block
);
476 static void cmd_work_handler(struct work_struct
*work
)
478 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
479 struct mlx5_cmd
*cmd
= ent
->cmd
;
480 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
481 struct mlx5_cmd_layout
*lay
;
482 struct semaphore
*sem
;
484 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
486 if (!ent
->page_queue
) {
487 ent
->idx
= alloc_ent(cmd
);
489 mlx5_core_err(dev
, "failed to allocate command entry\n");
494 ent
->idx
= cmd
->max_reg_cmds
;
497 ent
->token
= alloc_token(cmd
);
498 cmd
->ent_arr
[ent
->idx
] = ent
;
499 lay
= get_inst(cmd
, ent
->idx
);
501 memset(lay
, 0, sizeof(*lay
));
502 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
503 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
505 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
506 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
508 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
509 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
510 lay
->type
= MLX5_PCI_CMD_XPORT
;
511 lay
->token
= ent
->token
;
512 lay
->status_own
= CMD_OWNER_HW
;
513 set_signature(ent
, !cmd
->checksum_disabled
);
514 dump_command(dev
, ent
, 1);
515 ent
->ts1
= ktime_get_ns();
517 /* ring doorbell after the descriptor is valid */
519 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
520 mlx5_core_dbg(dev
, "write 0x%x to command doorbell\n", 1 << ent
->idx
);
522 if (cmd
->mode
== CMD_MODE_POLLING
) {
524 /* make sure we read the descriptor after ownership is SW */
526 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
530 static const char *deliv_status_to_str(u8 status
)
533 case MLX5_CMD_DELIVERY_STAT_OK
:
535 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
536 return "signature error";
537 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
538 return "token error";
539 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
540 return "bad block number";
541 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
542 return "output pointer not aligned to block size";
543 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
544 return "input pointer not aligned to block size";
545 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
546 return "firmware internal error";
547 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
548 return "command input length error";
549 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
550 return "command ouput length error";
551 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
552 return "reserved fields not cleared";
553 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
554 return "bad command descriptor type";
556 return "unknown status code";
560 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
562 struct mlx5_inbox_hdr
*hdr
= (struct mlx5_inbox_hdr
*)(in
->first
.data
);
564 return be16_to_cpu(hdr
->opcode
);
567 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
569 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
570 struct mlx5_cmd
*cmd
= &dev
->cmd
;
573 if (cmd
->mode
== CMD_MODE_POLLING
) {
574 wait_for_completion(&ent
->done
);
577 if (!wait_for_completion_timeout(&ent
->done
, timeout
))
582 if (err
== -ETIMEDOUT
) {
583 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
584 mlx5_command_str(msg_to_opcode(ent
->in
)),
585 msg_to_opcode(ent
->in
));
587 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
588 err
, deliv_status_to_str(ent
->status
), ent
->status
);
594 * 1. Callback functions may not sleep
595 * 2. page queue commands do not support asynchrous completion
597 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
598 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
599 mlx5_cmd_cbk_t callback
,
600 void *context
, int page_queue
, u8
*status
)
602 struct mlx5_cmd
*cmd
= &dev
->cmd
;
603 struct mlx5_cmd_work_ent
*ent
;
604 struct mlx5_cmd_stats
*stats
;
609 if (callback
&& page_queue
)
612 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
618 init_completion(&ent
->done
);
620 INIT_WORK(&ent
->work
, cmd_work_handler
);
622 cmd_work_handler(&ent
->work
);
623 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
624 mlx5_core_warn(dev
, "failed to queue work\n");
630 err
= wait_func(dev
, ent
);
631 if (err
== -ETIMEDOUT
)
634 ds
= ent
->ts2
- ent
->ts1
;
635 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)in
->first
.data
)->opcode
);
636 if (op
< ARRAY_SIZE(cmd
->stats
)) {
637 stats
= &cmd
->stats
[op
];
638 spin_lock_irq(&stats
->lock
);
641 spin_unlock_irq(&stats
->lock
);
643 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
644 "fw exec time for %s is %lld nsec\n",
645 mlx5_command_str(op
), ds
);
646 *status
= ent
->status
;
658 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
659 size_t count
, loff_t
*pos
)
661 struct mlx5_core_dev
*dev
= filp
->private_data
;
662 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
666 if (!dbg
->in_msg
|| !dbg
->out_msg
)
669 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
672 lbuf
[sizeof(lbuf
) - 1] = 0;
674 if (strcmp(lbuf
, "go"))
677 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
679 return err
? err
: count
;
683 static const struct file_operations fops
= {
684 .owner
= THIS_MODULE
,
689 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
)
691 struct mlx5_cmd_prot_block
*block
;
692 struct mlx5_cmd_mailbox
*next
;
698 copy
= min_t(int, size
, sizeof(to
->first
.data
));
699 memcpy(to
->first
.data
, from
, copy
);
710 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
712 memcpy(block
->data
, from
, copy
);
721 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
723 struct mlx5_cmd_prot_block
*block
;
724 struct mlx5_cmd_mailbox
*next
;
730 copy
= min_t(int, size
, sizeof(from
->first
.data
));
731 memcpy(to
, from
->first
.data
, copy
);
742 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
745 memcpy(to
, block
->data
, copy
);
754 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
757 struct mlx5_cmd_mailbox
*mailbox
;
759 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
761 return ERR_PTR(-ENOMEM
);
763 mailbox
->buf
= pci_pool_alloc(dev
->cmd
.pool
, flags
,
766 mlx5_core_dbg(dev
, "failed allocation\n");
768 return ERR_PTR(-ENOMEM
);
770 memset(mailbox
->buf
, 0, sizeof(struct mlx5_cmd_prot_block
));
771 mailbox
->next
= NULL
;
776 static void free_cmd_box(struct mlx5_core_dev
*dev
,
777 struct mlx5_cmd_mailbox
*mailbox
)
779 pci_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
783 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
784 gfp_t flags
, int size
)
786 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
787 struct mlx5_cmd_prot_block
*block
;
788 struct mlx5_cmd_msg
*msg
;
794 msg
= kzalloc(sizeof(*msg
), flags
);
796 return ERR_PTR(-ENOMEM
);
798 blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
799 n
= (blen
+ MLX5_CMD_DATA_BLOCK_SIZE
- 1) / MLX5_CMD_DATA_BLOCK_SIZE
;
801 for (i
= 0; i
< n
; i
++) {
802 tmp
= alloc_cmd_box(dev
, flags
);
804 mlx5_core_warn(dev
, "failed allocating block\n");
811 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
812 block
->block_num
= cpu_to_be32(n
- i
- 1);
822 free_cmd_box(dev
, head
);
830 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
831 struct mlx5_cmd_msg
*msg
)
833 struct mlx5_cmd_mailbox
*head
= msg
->next
;
834 struct mlx5_cmd_mailbox
*next
;
838 free_cmd_box(dev
, head
);
844 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
845 size_t count
, loff_t
*pos
)
847 struct mlx5_core_dev
*dev
= filp
->private_data
;
848 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
859 ptr
= kzalloc(count
, GFP_KERNEL
);
863 if (copy_from_user(ptr
, buf
, count
)) {
879 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
882 struct mlx5_core_dev
*dev
= filp
->private_data
;
883 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
892 copy
= min_t(int, count
, dbg
->outlen
);
893 if (copy_to_user(buf
, dbg
->out_msg
, copy
))
901 static const struct file_operations dfops
= {
902 .owner
= THIS_MODULE
,
908 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
911 struct mlx5_core_dev
*dev
= filp
->private_data
;
912 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
919 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
923 if (copy_to_user(buf
, &outlen
, err
))
931 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
932 size_t count
, loff_t
*pos
)
934 struct mlx5_core_dev
*dev
= filp
->private_data
;
935 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
941 if (*pos
!= 0 || count
> 6)
948 if (copy_from_user(outlen_str
, buf
, count
))
953 err
= sscanf(outlen_str
, "%d", &outlen
);
957 ptr
= kzalloc(outlen
, GFP_KERNEL
);
962 dbg
->outlen
= outlen
;
969 static const struct file_operations olfops
= {
970 .owner
= THIS_MODULE
,
972 .write
= outlen_write
,
976 static void set_wqname(struct mlx5_core_dev
*dev
)
978 struct mlx5_cmd
*cmd
= &dev
->cmd
;
980 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
981 dev_name(&dev
->pdev
->dev
));
984 static void clean_debug_files(struct mlx5_core_dev
*dev
)
986 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
988 if (!mlx5_debugfs_root
)
991 mlx5_cmdif_debugfs_cleanup(dev
);
992 debugfs_remove_recursive(dbg
->dbg_root
);
995 static int create_debugfs_files(struct mlx5_core_dev
*dev
)
997 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1000 if (!mlx5_debugfs_root
)
1003 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1007 dbg
->dbg_in
= debugfs_create_file("in", 0400, dbg
->dbg_root
,
1012 dbg
->dbg_out
= debugfs_create_file("out", 0200, dbg
->dbg_root
,
1017 dbg
->dbg_outlen
= debugfs_create_file("out_len", 0600, dbg
->dbg_root
,
1019 if (!dbg
->dbg_outlen
)
1022 dbg
->dbg_status
= debugfs_create_u8("status", 0600, dbg
->dbg_root
,
1024 if (!dbg
->dbg_status
)
1027 dbg
->dbg_run
= debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1031 mlx5_cmdif_debugfs_init(dev
);
1036 clean_debug_files(dev
);
1040 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1042 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1045 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1048 down(&cmd
->pages_sem
);
1050 flush_workqueue(cmd
->wq
);
1052 cmd
->mode
= CMD_MODE_EVENTS
;
1054 up(&cmd
->pages_sem
);
1055 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1059 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1061 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1064 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1067 down(&cmd
->pages_sem
);
1069 flush_workqueue(cmd
->wq
);
1070 cmd
->mode
= CMD_MODE_POLLING
;
1072 up(&cmd
->pages_sem
);
1073 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1077 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1079 unsigned long flags
;
1082 spin_lock_irqsave(&msg
->cache
->lock
, flags
);
1083 list_add_tail(&msg
->list
, &msg
->cache
->head
);
1084 spin_unlock_irqrestore(&msg
->cache
->lock
, flags
);
1086 mlx5_free_cmd_msg(dev
, msg
);
1090 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, unsigned long vector
)
1092 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1093 struct mlx5_cmd_work_ent
*ent
;
1094 mlx5_cmd_cbk_t callback
;
1099 struct mlx5_cmd_stats
*stats
;
1100 unsigned long flags
;
1102 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1103 if (test_bit(i
, &vector
)) {
1104 struct semaphore
*sem
;
1106 ent
= cmd
->ent_arr
[i
];
1107 if (ent
->page_queue
)
1108 sem
= &cmd
->pages_sem
;
1111 ent
->ts2
= ktime_get_ns();
1112 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1113 dump_command(dev
, ent
, 0);
1115 if (!cmd
->checksum_disabled
)
1116 ent
->ret
= verify_signature(ent
);
1119 ent
->status
= ent
->lay
->status_own
>> 1;
1120 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1121 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1123 free_ent(cmd
, ent
->idx
);
1124 if (ent
->callback
) {
1125 ds
= ent
->ts2
- ent
->ts1
;
1126 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1127 stats
= &cmd
->stats
[ent
->op
];
1128 spin_lock_irqsave(&stats
->lock
, flags
);
1131 spin_unlock_irqrestore(&stats
->lock
, flags
);
1134 callback
= ent
->callback
;
1135 context
= ent
->context
;
1138 err
= mlx5_copy_from_msg(ent
->uout
,
1142 mlx5_free_cmd_msg(dev
, ent
->out
);
1143 free_msg(dev
, ent
->in
);
1146 callback(err
, context
);
1148 complete(&ent
->done
);
1154 EXPORT_SYMBOL(mlx5_cmd_comp_handler
);
1156 static int status_to_err(u8 status
)
1158 return status
? -1 : 0; /* TBD more meaningful codes */
1161 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1164 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1165 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1166 struct cache_ent
*ent
= NULL
;
1168 if (in_size
> MED_LIST_SIZE
&& in_size
<= LONG_LIST_SIZE
)
1169 ent
= &cmd
->cache
.large
;
1170 else if (in_size
> 16 && in_size
<= MED_LIST_SIZE
)
1171 ent
= &cmd
->cache
.med
;
1174 spin_lock_irq(&ent
->lock
);
1175 if (!list_empty(&ent
->head
)) {
1176 msg
= list_entry(ent
->head
.next
, typeof(*msg
), list
);
1177 /* For cached lists, we must explicitly state what is
1181 list_del(&msg
->list
);
1183 spin_unlock_irq(&ent
->lock
);
1187 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
);
1192 static int is_manage_pages(struct mlx5_inbox_hdr
*in
)
1194 return be16_to_cpu(in
->opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1197 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1198 int out_size
, mlx5_cmd_cbk_t callback
, void *context
)
1200 struct mlx5_cmd_msg
*inb
;
1201 struct mlx5_cmd_msg
*outb
;
1207 pages_queue
= is_manage_pages(in
);
1208 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1210 inb
= alloc_msg(dev
, in_size
, gfp
);
1216 err
= mlx5_copy_to_msg(inb
, in
, in_size
);
1218 mlx5_core_warn(dev
, "err %d\n", err
);
1222 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
);
1224 err
= PTR_ERR(outb
);
1228 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1229 pages_queue
, &status
);
1233 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1235 err
= status_to_err(status
);
1239 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1243 mlx5_free_cmd_msg(dev
, outb
);
1251 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1254 return cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
);
1256 EXPORT_SYMBOL(mlx5_cmd_exec
);
1258 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1259 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
1262 return cmd_exec(dev
, in
, in_size
, out
, out_size
, callback
, context
);
1264 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1266 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1268 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1269 struct mlx5_cmd_msg
*msg
;
1270 struct mlx5_cmd_msg
*n
;
1272 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.large
.head
, list
) {
1273 list_del(&msg
->list
);
1274 mlx5_free_cmd_msg(dev
, msg
);
1277 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.med
.head
, list
) {
1278 list_del(&msg
->list
);
1279 mlx5_free_cmd_msg(dev
, msg
);
1283 static int create_msg_cache(struct mlx5_core_dev
*dev
)
1285 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1286 struct mlx5_cmd_msg
*msg
;
1290 spin_lock_init(&cmd
->cache
.large
.lock
);
1291 INIT_LIST_HEAD(&cmd
->cache
.large
.head
);
1292 spin_lock_init(&cmd
->cache
.med
.lock
);
1293 INIT_LIST_HEAD(&cmd
->cache
.med
.head
);
1295 for (i
= 0; i
< NUM_LONG_LISTS
; i
++) {
1296 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, LONG_LIST_SIZE
);
1301 msg
->cache
= &cmd
->cache
.large
;
1302 list_add_tail(&msg
->list
, &cmd
->cache
.large
.head
);
1305 for (i
= 0; i
< NUM_MED_LISTS
; i
++) {
1306 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, MED_LIST_SIZE
);
1311 msg
->cache
= &cmd
->cache
.med
;
1312 list_add_tail(&msg
->list
, &cmd
->cache
.med
.head
);
1318 destroy_msg_cache(dev
);
1322 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1324 int size
= sizeof(struct mlx5_cmd_prot_block
);
1325 int align
= roundup_pow_of_two(size
);
1326 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1332 cmd_if_rev
= cmdif_rev(dev
);
1333 if (cmd_if_rev
!= CMD_IF_REV
) {
1334 dev_err(&dev
->pdev
->dev
,
1335 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1336 CMD_IF_REV
, cmd_if_rev
);
1340 cmd
->pool
= pci_pool_create("mlx5_cmd", dev
->pdev
, size
, align
, 0);
1344 cmd
->cmd_buf
= (void *)__get_free_pages(GFP_ATOMIC
, 0);
1345 if (!cmd
->cmd_buf
) {
1349 cmd
->dma
= dma_map_single(&dev
->pdev
->dev
, cmd
->cmd_buf
, PAGE_SIZE
,
1351 if (dma_mapping_error(&dev
->pdev
->dev
, cmd
->dma
)) {
1356 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1357 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1358 cmd
->log_stride
= cmd_l
& 0xf;
1359 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1360 dev_err(&dev
->pdev
->dev
, "firmware reports too many outstanding commands %d\n",
1366 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
1367 dev_err(&dev
->pdev
->dev
, "command queue size overflow\n");
1372 cmd
->checksum_disabled
= 1;
1373 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1374 cmd
->bitmask
= (1 << cmd
->max_reg_cmds
) - 1;
1376 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1377 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1378 dev_err(&dev
->pdev
->dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1379 CMD_IF_REV
, cmd
->cmdif_rev
);
1384 spin_lock_init(&cmd
->alloc_lock
);
1385 spin_lock_init(&cmd
->token_lock
);
1386 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1387 spin_lock_init(&cmd
->stats
[i
].lock
);
1389 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1390 sema_init(&cmd
->pages_sem
, 1);
1392 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1393 cmd_l
= (u32
)(cmd
->dma
);
1394 if (cmd_l
& 0xfff) {
1395 dev_err(&dev
->pdev
->dev
, "invalid command queue address\n");
1400 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1401 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1403 /* Make sure firmware sees the complete address before we proceed */
1406 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1408 cmd
->mode
= CMD_MODE_POLLING
;
1410 err
= create_msg_cache(dev
);
1412 dev_err(&dev
->pdev
->dev
, "failed to create command cache\n");
1417 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1419 dev_err(&dev
->pdev
->dev
, "failed to create command workqueue\n");
1424 err
= create_debugfs_files(dev
);
1433 destroy_workqueue(cmd
->wq
);
1436 destroy_msg_cache(dev
);
1439 dma_unmap_single(&dev
->pdev
->dev
, cmd
->dma
, PAGE_SIZE
,
1442 free_pages((unsigned long)cmd
->cmd_buf
, 0);
1445 pci_pool_destroy(cmd
->pool
);
1449 EXPORT_SYMBOL(mlx5_cmd_init
);
1451 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
1453 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1455 clean_debug_files(dev
);
1456 destroy_workqueue(cmd
->wq
);
1457 destroy_msg_cache(dev
);
1458 dma_unmap_single(&dev
->pdev
->dev
, cmd
->dma
, PAGE_SIZE
,
1460 free_pages((unsigned long)cmd
->cmd_buf
, 0);
1461 pci_pool_destroy(cmd
->pool
);
1463 EXPORT_SYMBOL(mlx5_cmd_cleanup
);
1465 static const char *cmd_status_str(u8 status
)
1468 case MLX5_CMD_STAT_OK
:
1470 case MLX5_CMD_STAT_INT_ERR
:
1471 return "internal error";
1472 case MLX5_CMD_STAT_BAD_OP_ERR
:
1473 return "bad operation";
1474 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
1475 return "bad parameter";
1476 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
1477 return "bad system state";
1478 case MLX5_CMD_STAT_BAD_RES_ERR
:
1479 return "bad resource";
1480 case MLX5_CMD_STAT_RES_BUSY
:
1481 return "resource busy";
1482 case MLX5_CMD_STAT_LIM_ERR
:
1483 return "limits exceeded";
1484 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
1485 return "bad resource state";
1486 case MLX5_CMD_STAT_IX_ERR
:
1488 case MLX5_CMD_STAT_NO_RES_ERR
:
1489 return "no resources";
1490 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
1491 return "bad input length";
1492 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
1493 return "bad output length";
1494 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
1495 return "bad QP state";
1496 case MLX5_CMD_STAT_BAD_PKT_ERR
:
1497 return "bad packet (discarded)";
1498 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
1499 return "bad size too many outstanding CQEs";
1501 return "unknown status";
1505 static int cmd_status_to_err(u8 status
)
1508 case MLX5_CMD_STAT_OK
: return 0;
1509 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
1510 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
1511 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
1512 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
1513 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
1514 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
1515 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
1516 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
1517 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
1518 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
1519 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
1520 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
1521 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
1522 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
1523 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
1524 default: return -EIO
;
1528 /* this will be available till all the commands use set/get macros */
1529 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
)
1534 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1535 cmd_status_str(hdr
->status
), hdr
->status
,
1536 be32_to_cpu(hdr
->syndrome
));
1538 return cmd_status_to_err(hdr
->status
);
1541 int mlx5_cmd_status_to_err_v2(void *ptr
)
1546 status
= be32_to_cpu(*(__be32
*)ptr
) >> 24;
1550 syndrome
= be32_to_cpu(*(__be32
*)(ptr
+ 4));
1552 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1553 cmd_status_str(status
), status
, syndrome
);
1555 return cmd_status_to_err(status
);