2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE
= (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE
) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE
,
61 MED_LIST_SIZE
= 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
65 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
79 MLX5_CMD_STAT_OK
= 0x0,
80 MLX5_CMD_STAT_INT_ERR
= 0x1,
81 MLX5_CMD_STAT_BAD_OP_ERR
= 0x2,
82 MLX5_CMD_STAT_BAD_PARAM_ERR
= 0x3,
83 MLX5_CMD_STAT_BAD_SYS_STATE_ERR
= 0x4,
84 MLX5_CMD_STAT_BAD_RES_ERR
= 0x5,
85 MLX5_CMD_STAT_RES_BUSY
= 0x6,
86 MLX5_CMD_STAT_LIM_ERR
= 0x8,
87 MLX5_CMD_STAT_BAD_RES_STATE_ERR
= 0x9,
88 MLX5_CMD_STAT_IX_ERR
= 0xa,
89 MLX5_CMD_STAT_NO_RES_ERR
= 0xf,
90 MLX5_CMD_STAT_BAD_INP_LEN_ERR
= 0x50,
91 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
= 0x51,
92 MLX5_CMD_STAT_BAD_QP_STATE_ERR
= 0x10,
93 MLX5_CMD_STAT_BAD_PKT_ERR
= 0x30,
94 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
= 0x40,
97 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
98 struct mlx5_cmd_msg
*in
,
99 struct mlx5_cmd_msg
*out
,
100 void *uout
, int uout_size
,
102 void *context
, int page_queue
)
104 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
105 struct mlx5_cmd_work_ent
*ent
;
107 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
109 return ERR_PTR(-ENOMEM
);
114 ent
->uout_size
= uout_size
;
116 ent
->context
= context
;
118 ent
->page_queue
= page_queue
;
123 static u8
alloc_token(struct mlx5_cmd
*cmd
)
127 spin_lock(&cmd
->token_lock
);
132 spin_unlock(&cmd
->token_lock
);
137 static int alloc_ent(struct mlx5_cmd
*cmd
)
142 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
143 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
144 if (ret
< cmd
->max_reg_cmds
)
145 clear_bit(ret
, &cmd
->bitmask
);
146 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
148 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
151 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
155 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
156 set_bit(idx
, &cmd
->bitmask
);
157 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
160 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
162 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
165 static u8
xor8_buf(void *buf
, int len
)
171 for (i
= 0; i
< len
; i
++)
177 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
179 if (xor8_buf(block
->rsvd0
, sizeof(*block
) - sizeof(block
->data
) - 1) != 0xff)
182 if (xor8_buf(block
, sizeof(*block
)) != 0xff)
188 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
, u8 token
,
191 block
->token
= token
;
193 block
->ctrl_sig
= ~xor8_buf(block
->rsvd0
, sizeof(*block
) -
194 sizeof(block
->data
) - 2);
195 block
->sig
= ~xor8_buf(block
, sizeof(*block
) - 1);
199 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
, u8 token
, int csum
)
201 struct mlx5_cmd_mailbox
*next
= msg
->next
;
204 calc_block_sig(next
->buf
, token
, csum
);
209 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
211 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
212 calc_chain_sig(ent
->in
, ent
->token
, csum
);
213 calc_chain_sig(ent
->out
, ent
->token
, csum
);
216 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
218 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
222 own
= ent
->lay
->status_own
;
223 if (!(own
& CMD_OWNER_HW
)) {
227 usleep_range(5000, 10000);
228 } while (time_before(jiffies
, poll_end
));
230 ent
->ret
= -ETIMEDOUT
;
233 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
239 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
241 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
245 sig
= xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
250 err
= verify_block_sig(next
->buf
);
260 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
265 for (i
= 0; i
< size
; i
+= 16) {
266 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
267 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
276 const char *mlx5_command_str(int command
)
279 case MLX5_CMD_OP_QUERY_HCA_CAP
:
280 return "QUERY_HCA_CAP";
282 case MLX5_CMD_OP_SET_HCA_CAP
:
283 return "SET_HCA_CAP";
285 case MLX5_CMD_OP_QUERY_ADAPTER
:
286 return "QUERY_ADAPTER";
288 case MLX5_CMD_OP_INIT_HCA
:
291 case MLX5_CMD_OP_TEARDOWN_HCA
:
292 return "TEARDOWN_HCA";
294 case MLX5_CMD_OP_ENABLE_HCA
:
295 return "MLX5_CMD_OP_ENABLE_HCA";
297 case MLX5_CMD_OP_DISABLE_HCA
:
298 return "MLX5_CMD_OP_DISABLE_HCA";
300 case MLX5_CMD_OP_QUERY_PAGES
:
301 return "QUERY_PAGES";
303 case MLX5_CMD_OP_MANAGE_PAGES
:
304 return "MANAGE_PAGES";
306 case MLX5_CMD_OP_CREATE_MKEY
:
307 return "CREATE_MKEY";
309 case MLX5_CMD_OP_QUERY_MKEY
:
312 case MLX5_CMD_OP_DESTROY_MKEY
:
313 return "DESTROY_MKEY";
315 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
316 return "QUERY_SPECIAL_CONTEXTS";
318 case MLX5_CMD_OP_CREATE_EQ
:
321 case MLX5_CMD_OP_DESTROY_EQ
:
324 case MLX5_CMD_OP_QUERY_EQ
:
327 case MLX5_CMD_OP_CREATE_CQ
:
330 case MLX5_CMD_OP_DESTROY_CQ
:
333 case MLX5_CMD_OP_QUERY_CQ
:
336 case MLX5_CMD_OP_MODIFY_CQ
:
339 case MLX5_CMD_OP_CREATE_QP
:
342 case MLX5_CMD_OP_DESTROY_QP
:
345 case MLX5_CMD_OP_RST2INIT_QP
:
346 return "RST2INIT_QP";
348 case MLX5_CMD_OP_INIT2RTR_QP
:
349 return "INIT2RTR_QP";
351 case MLX5_CMD_OP_RTR2RTS_QP
:
354 case MLX5_CMD_OP_RTS2RTS_QP
:
357 case MLX5_CMD_OP_SQERR2RTS_QP
:
358 return "SQERR2RTS_QP";
360 case MLX5_CMD_OP_2ERR_QP
:
363 case MLX5_CMD_OP_2RST_QP
:
366 case MLX5_CMD_OP_QUERY_QP
:
369 case MLX5_CMD_OP_MAD_IFC
:
372 case MLX5_CMD_OP_INIT2INIT_QP
:
373 return "INIT2INIT_QP";
375 case MLX5_CMD_OP_CREATE_PSV
:
378 case MLX5_CMD_OP_DESTROY_PSV
:
379 return "DESTROY_PSV";
381 case MLX5_CMD_OP_CREATE_SRQ
:
384 case MLX5_CMD_OP_DESTROY_SRQ
:
385 return "DESTROY_SRQ";
387 case MLX5_CMD_OP_QUERY_SRQ
:
390 case MLX5_CMD_OP_ARM_RQ
:
393 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
394 return "CREATE_XRC_SRQ";
396 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
397 return "DESTROY_XRC_SRQ";
399 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
400 return "QUERY_XRC_SRQ";
402 case MLX5_CMD_OP_ARM_XRC_SRQ
:
403 return "ARM_XRC_SRQ";
405 case MLX5_CMD_OP_ALLOC_PD
:
408 case MLX5_CMD_OP_DEALLOC_PD
:
411 case MLX5_CMD_OP_ALLOC_UAR
:
414 case MLX5_CMD_OP_DEALLOC_UAR
:
415 return "DEALLOC_UAR";
417 case MLX5_CMD_OP_ATTACH_TO_MCG
:
418 return "ATTACH_TO_MCG";
420 case MLX5_CMD_OP_DETTACH_FROM_MCG
:
421 return "DETTACH_FROM_MCG";
423 case MLX5_CMD_OP_ALLOC_XRCD
:
426 case MLX5_CMD_OP_DEALLOC_XRCD
:
427 return "DEALLOC_XRCD";
429 case MLX5_CMD_OP_ACCESS_REG
:
430 return "MLX5_CMD_OP_ACCESS_REG";
432 default: return "unknown command opcode";
436 static void dump_command(struct mlx5_core_dev
*dev
,
437 struct mlx5_cmd_work_ent
*ent
, int input
)
439 u16 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)(ent
->lay
->in
))->opcode
);
440 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
441 struct mlx5_cmd_mailbox
*next
= msg
->next
;
446 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
449 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
450 "dump command data %s(0x%x) %s\n",
451 mlx5_command_str(op
), op
,
452 input
? "INPUT" : "OUTPUT");
454 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
455 mlx5_command_str(op
), op
,
456 input
? "INPUT" : "OUTPUT");
460 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
461 offset
+= sizeof(ent
->lay
->in
);
463 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
464 offset
+= sizeof(ent
->lay
->out
);
467 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
468 offset
+= sizeof(*ent
->lay
);
471 while (next
&& offset
< msg
->len
) {
473 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
474 dump_buf(next
->buf
, dump_len
, 1, offset
);
475 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
477 mlx5_core_dbg(dev
, "command block:\n");
478 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
479 offset
+= sizeof(struct mlx5_cmd_prot_block
);
488 static void cmd_work_handler(struct work_struct
*work
)
490 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
491 struct mlx5_cmd
*cmd
= ent
->cmd
;
492 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
493 struct mlx5_cmd_layout
*lay
;
494 struct semaphore
*sem
;
496 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
498 if (!ent
->page_queue
) {
499 ent
->idx
= alloc_ent(cmd
);
501 mlx5_core_err(dev
, "failed to allocate command entry\n");
506 ent
->idx
= cmd
->max_reg_cmds
;
509 ent
->token
= alloc_token(cmd
);
510 cmd
->ent_arr
[ent
->idx
] = ent
;
511 lay
= get_inst(cmd
, ent
->idx
);
513 memset(lay
, 0, sizeof(*lay
));
514 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
515 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
517 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
518 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
520 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
521 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
522 lay
->type
= MLX5_PCI_CMD_XPORT
;
523 lay
->token
= ent
->token
;
524 lay
->status_own
= CMD_OWNER_HW
;
525 set_signature(ent
, !cmd
->checksum_disabled
);
526 dump_command(dev
, ent
, 1);
527 ent
->ts1
= ktime_get_ns();
529 /* ring doorbell after the descriptor is valid */
530 mlx5_core_dbg(dev
, "writing 0x%x to command doorbell\n", 1 << ent
->idx
);
532 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
534 /* if not in polling don't use ent after this point */
535 if (cmd
->mode
== CMD_MODE_POLLING
) {
537 /* make sure we read the descriptor after ownership is SW */
539 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
543 static const char *deliv_status_to_str(u8 status
)
546 case MLX5_CMD_DELIVERY_STAT_OK
:
548 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
549 return "signature error";
550 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
551 return "token error";
552 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
553 return "bad block number";
554 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
555 return "output pointer not aligned to block size";
556 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
557 return "input pointer not aligned to block size";
558 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
559 return "firmware internal error";
560 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
561 return "command input length error";
562 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
563 return "command ouput length error";
564 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
565 return "reserved fields not cleared";
566 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
567 return "bad command descriptor type";
569 return "unknown status code";
573 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
575 struct mlx5_inbox_hdr
*hdr
= (struct mlx5_inbox_hdr
*)(in
->first
.data
);
577 return be16_to_cpu(hdr
->opcode
);
580 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
582 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
583 struct mlx5_cmd
*cmd
= &dev
->cmd
;
586 if (cmd
->mode
== CMD_MODE_POLLING
) {
587 wait_for_completion(&ent
->done
);
590 if (!wait_for_completion_timeout(&ent
->done
, timeout
))
595 if (err
== -ETIMEDOUT
) {
596 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
597 mlx5_command_str(msg_to_opcode(ent
->in
)),
598 msg_to_opcode(ent
->in
));
600 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
601 err
, deliv_status_to_str(ent
->status
), ent
->status
);
607 * 1. Callback functions may not sleep
608 * 2. page queue commands do not support asynchrous completion
610 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
611 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
612 mlx5_cmd_cbk_t callback
,
613 void *context
, int page_queue
, u8
*status
)
615 struct mlx5_cmd
*cmd
= &dev
->cmd
;
616 struct mlx5_cmd_work_ent
*ent
;
617 struct mlx5_cmd_stats
*stats
;
622 if (callback
&& page_queue
)
625 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
631 init_completion(&ent
->done
);
633 INIT_WORK(&ent
->work
, cmd_work_handler
);
635 cmd_work_handler(&ent
->work
);
636 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
637 mlx5_core_warn(dev
, "failed to queue work\n");
643 err
= wait_func(dev
, ent
);
644 if (err
== -ETIMEDOUT
)
647 ds
= ent
->ts2
- ent
->ts1
;
648 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)in
->first
.data
)->opcode
);
649 if (op
< ARRAY_SIZE(cmd
->stats
)) {
650 stats
= &cmd
->stats
[op
];
651 spin_lock_irq(&stats
->lock
);
654 spin_unlock_irq(&stats
->lock
);
656 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
657 "fw exec time for %s is %lld nsec\n",
658 mlx5_command_str(op
), ds
);
659 *status
= ent
->status
;
671 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
672 size_t count
, loff_t
*pos
)
674 struct mlx5_core_dev
*dev
= filp
->private_data
;
675 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
679 if (!dbg
->in_msg
|| !dbg
->out_msg
)
682 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
685 lbuf
[sizeof(lbuf
) - 1] = 0;
687 if (strcmp(lbuf
, "go"))
690 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
692 return err
? err
: count
;
696 static const struct file_operations fops
= {
697 .owner
= THIS_MODULE
,
702 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
)
704 struct mlx5_cmd_prot_block
*block
;
705 struct mlx5_cmd_mailbox
*next
;
711 copy
= min_t(int, size
, sizeof(to
->first
.data
));
712 memcpy(to
->first
.data
, from
, copy
);
723 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
725 memcpy(block
->data
, from
, copy
);
734 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
736 struct mlx5_cmd_prot_block
*block
;
737 struct mlx5_cmd_mailbox
*next
;
743 copy
= min_t(int, size
, sizeof(from
->first
.data
));
744 memcpy(to
, from
->first
.data
, copy
);
755 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
758 memcpy(to
, block
->data
, copy
);
767 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
770 struct mlx5_cmd_mailbox
*mailbox
;
772 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
774 return ERR_PTR(-ENOMEM
);
776 mailbox
->buf
= pci_pool_alloc(dev
->cmd
.pool
, flags
,
779 mlx5_core_dbg(dev
, "failed allocation\n");
781 return ERR_PTR(-ENOMEM
);
783 memset(mailbox
->buf
, 0, sizeof(struct mlx5_cmd_prot_block
));
784 mailbox
->next
= NULL
;
789 static void free_cmd_box(struct mlx5_core_dev
*dev
,
790 struct mlx5_cmd_mailbox
*mailbox
)
792 pci_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
796 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
797 gfp_t flags
, int size
)
799 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
800 struct mlx5_cmd_prot_block
*block
;
801 struct mlx5_cmd_msg
*msg
;
807 msg
= kzalloc(sizeof(*msg
), flags
);
809 return ERR_PTR(-ENOMEM
);
811 blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
812 n
= (blen
+ MLX5_CMD_DATA_BLOCK_SIZE
- 1) / MLX5_CMD_DATA_BLOCK_SIZE
;
814 for (i
= 0; i
< n
; i
++) {
815 tmp
= alloc_cmd_box(dev
, flags
);
817 mlx5_core_warn(dev
, "failed allocating block\n");
824 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
825 block
->block_num
= cpu_to_be32(n
- i
- 1);
835 free_cmd_box(dev
, head
);
843 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
844 struct mlx5_cmd_msg
*msg
)
846 struct mlx5_cmd_mailbox
*head
= msg
->next
;
847 struct mlx5_cmd_mailbox
*next
;
851 free_cmd_box(dev
, head
);
857 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
858 size_t count
, loff_t
*pos
)
860 struct mlx5_core_dev
*dev
= filp
->private_data
;
861 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
872 ptr
= kzalloc(count
, GFP_KERNEL
);
876 if (copy_from_user(ptr
, buf
, count
)) {
892 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
895 struct mlx5_core_dev
*dev
= filp
->private_data
;
896 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
905 copy
= min_t(int, count
, dbg
->outlen
);
906 if (copy_to_user(buf
, dbg
->out_msg
, copy
))
914 static const struct file_operations dfops
= {
915 .owner
= THIS_MODULE
,
921 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
924 struct mlx5_core_dev
*dev
= filp
->private_data
;
925 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
932 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
936 if (copy_to_user(buf
, &outlen
, err
))
944 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
945 size_t count
, loff_t
*pos
)
947 struct mlx5_core_dev
*dev
= filp
->private_data
;
948 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
954 if (*pos
!= 0 || count
> 6)
961 if (copy_from_user(outlen_str
, buf
, count
))
966 err
= sscanf(outlen_str
, "%d", &outlen
);
970 ptr
= kzalloc(outlen
, GFP_KERNEL
);
975 dbg
->outlen
= outlen
;
982 static const struct file_operations olfops
= {
983 .owner
= THIS_MODULE
,
985 .write
= outlen_write
,
989 static void set_wqname(struct mlx5_core_dev
*dev
)
991 struct mlx5_cmd
*cmd
= &dev
->cmd
;
993 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
994 dev_name(&dev
->pdev
->dev
));
997 static void clean_debug_files(struct mlx5_core_dev
*dev
)
999 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1001 if (!mlx5_debugfs_root
)
1004 mlx5_cmdif_debugfs_cleanup(dev
);
1005 debugfs_remove_recursive(dbg
->dbg_root
);
1008 static int create_debugfs_files(struct mlx5_core_dev
*dev
)
1010 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1013 if (!mlx5_debugfs_root
)
1016 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1020 dbg
->dbg_in
= debugfs_create_file("in", 0400, dbg
->dbg_root
,
1025 dbg
->dbg_out
= debugfs_create_file("out", 0200, dbg
->dbg_root
,
1030 dbg
->dbg_outlen
= debugfs_create_file("out_len", 0600, dbg
->dbg_root
,
1032 if (!dbg
->dbg_outlen
)
1035 dbg
->dbg_status
= debugfs_create_u8("status", 0600, dbg
->dbg_root
,
1037 if (!dbg
->dbg_status
)
1040 dbg
->dbg_run
= debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1044 mlx5_cmdif_debugfs_init(dev
);
1049 clean_debug_files(dev
);
1053 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1055 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1058 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1061 down(&cmd
->pages_sem
);
1063 flush_workqueue(cmd
->wq
);
1065 cmd
->mode
= CMD_MODE_EVENTS
;
1067 up(&cmd
->pages_sem
);
1068 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1072 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1074 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1077 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1080 down(&cmd
->pages_sem
);
1082 flush_workqueue(cmd
->wq
);
1083 cmd
->mode
= CMD_MODE_POLLING
;
1085 up(&cmd
->pages_sem
);
1086 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1090 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1092 unsigned long flags
;
1095 spin_lock_irqsave(&msg
->cache
->lock
, flags
);
1096 list_add_tail(&msg
->list
, &msg
->cache
->head
);
1097 spin_unlock_irqrestore(&msg
->cache
->lock
, flags
);
1099 mlx5_free_cmd_msg(dev
, msg
);
1103 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, unsigned long vector
)
1105 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1106 struct mlx5_cmd_work_ent
*ent
;
1107 mlx5_cmd_cbk_t callback
;
1112 struct mlx5_cmd_stats
*stats
;
1113 unsigned long flags
;
1115 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1116 if (test_bit(i
, &vector
)) {
1117 struct semaphore
*sem
;
1119 ent
= cmd
->ent_arr
[i
];
1120 if (ent
->page_queue
)
1121 sem
= &cmd
->pages_sem
;
1124 ent
->ts2
= ktime_get_ns();
1125 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1126 dump_command(dev
, ent
, 0);
1128 if (!cmd
->checksum_disabled
)
1129 ent
->ret
= verify_signature(ent
);
1132 ent
->status
= ent
->lay
->status_own
>> 1;
1133 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1134 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1136 free_ent(cmd
, ent
->idx
);
1137 if (ent
->callback
) {
1138 ds
= ent
->ts2
- ent
->ts1
;
1139 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1140 stats
= &cmd
->stats
[ent
->op
];
1141 spin_lock_irqsave(&stats
->lock
, flags
);
1144 spin_unlock_irqrestore(&stats
->lock
, flags
);
1147 callback
= ent
->callback
;
1148 context
= ent
->context
;
1151 err
= mlx5_copy_from_msg(ent
->uout
,
1155 mlx5_free_cmd_msg(dev
, ent
->out
);
1156 free_msg(dev
, ent
->in
);
1159 callback(err
, context
);
1161 complete(&ent
->done
);
1167 EXPORT_SYMBOL(mlx5_cmd_comp_handler
);
1169 static int status_to_err(u8 status
)
1171 return status
? -1 : 0; /* TBD more meaningful codes */
1174 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1177 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1178 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1179 struct cache_ent
*ent
= NULL
;
1181 if (in_size
> MED_LIST_SIZE
&& in_size
<= LONG_LIST_SIZE
)
1182 ent
= &cmd
->cache
.large
;
1183 else if (in_size
> 16 && in_size
<= MED_LIST_SIZE
)
1184 ent
= &cmd
->cache
.med
;
1187 spin_lock_irq(&ent
->lock
);
1188 if (!list_empty(&ent
->head
)) {
1189 msg
= list_entry(ent
->head
.next
, typeof(*msg
), list
);
1190 /* For cached lists, we must explicitly state what is
1194 list_del(&msg
->list
);
1196 spin_unlock_irq(&ent
->lock
);
1200 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
);
1205 static int is_manage_pages(struct mlx5_inbox_hdr
*in
)
1207 return be16_to_cpu(in
->opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1210 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1211 int out_size
, mlx5_cmd_cbk_t callback
, void *context
)
1213 struct mlx5_cmd_msg
*inb
;
1214 struct mlx5_cmd_msg
*outb
;
1220 pages_queue
= is_manage_pages(in
);
1221 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1223 inb
= alloc_msg(dev
, in_size
, gfp
);
1229 err
= mlx5_copy_to_msg(inb
, in
, in_size
);
1231 mlx5_core_warn(dev
, "err %d\n", err
);
1235 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
);
1237 err
= PTR_ERR(outb
);
1241 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1242 pages_queue
, &status
);
1246 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1248 err
= status_to_err(status
);
1253 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1257 mlx5_free_cmd_msg(dev
, outb
);
1265 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1268 return cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
);
1270 EXPORT_SYMBOL(mlx5_cmd_exec
);
1272 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1273 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
1276 return cmd_exec(dev
, in
, in_size
, out
, out_size
, callback
, context
);
1278 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1280 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1282 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1283 struct mlx5_cmd_msg
*msg
;
1284 struct mlx5_cmd_msg
*n
;
1286 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.large
.head
, list
) {
1287 list_del(&msg
->list
);
1288 mlx5_free_cmd_msg(dev
, msg
);
1291 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.med
.head
, list
) {
1292 list_del(&msg
->list
);
1293 mlx5_free_cmd_msg(dev
, msg
);
1297 static int create_msg_cache(struct mlx5_core_dev
*dev
)
1299 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1300 struct mlx5_cmd_msg
*msg
;
1304 spin_lock_init(&cmd
->cache
.large
.lock
);
1305 INIT_LIST_HEAD(&cmd
->cache
.large
.head
);
1306 spin_lock_init(&cmd
->cache
.med
.lock
);
1307 INIT_LIST_HEAD(&cmd
->cache
.med
.head
);
1309 for (i
= 0; i
< NUM_LONG_LISTS
; i
++) {
1310 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, LONG_LIST_SIZE
);
1315 msg
->cache
= &cmd
->cache
.large
;
1316 list_add_tail(&msg
->list
, &cmd
->cache
.large
.head
);
1319 for (i
= 0; i
< NUM_MED_LISTS
; i
++) {
1320 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, MED_LIST_SIZE
);
1325 msg
->cache
= &cmd
->cache
.med
;
1326 list_add_tail(&msg
->list
, &cmd
->cache
.med
.head
);
1332 destroy_msg_cache(dev
);
1336 static int alloc_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1338 struct device
*ddev
= &dev
->pdev
->dev
;
1340 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
,
1341 &cmd
->alloc_dma
, GFP_KERNEL
);
1342 if (!cmd
->cmd_alloc_buf
)
1345 /* make sure it is aligned to 4K */
1346 if (!((uintptr_t)cmd
->cmd_alloc_buf
& (MLX5_ADAPTER_PAGE_SIZE
- 1))) {
1347 cmd
->cmd_buf
= cmd
->cmd_alloc_buf
;
1348 cmd
->dma
= cmd
->alloc_dma
;
1349 cmd
->alloc_size
= MLX5_ADAPTER_PAGE_SIZE
;
1353 dma_free_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
, cmd
->cmd_alloc_buf
,
1355 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
,
1356 2 * MLX5_ADAPTER_PAGE_SIZE
- 1,
1357 &cmd
->alloc_dma
, GFP_KERNEL
);
1358 if (!cmd
->cmd_alloc_buf
)
1361 cmd
->cmd_buf
= PTR_ALIGN(cmd
->cmd_alloc_buf
, MLX5_ADAPTER_PAGE_SIZE
);
1362 cmd
->dma
= ALIGN(cmd
->alloc_dma
, MLX5_ADAPTER_PAGE_SIZE
);
1363 cmd
->alloc_size
= 2 * MLX5_ADAPTER_PAGE_SIZE
- 1;
1367 static void free_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1369 struct device
*ddev
= &dev
->pdev
->dev
;
1371 dma_free_coherent(ddev
, cmd
->alloc_size
, cmd
->cmd_alloc_buf
,
1375 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1377 int size
= sizeof(struct mlx5_cmd_prot_block
);
1378 int align
= roundup_pow_of_two(size
);
1379 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1385 cmd_if_rev
= cmdif_rev(dev
);
1386 if (cmd_if_rev
!= CMD_IF_REV
) {
1387 dev_err(&dev
->pdev
->dev
,
1388 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1389 CMD_IF_REV
, cmd_if_rev
);
1393 cmd
->pool
= pci_pool_create("mlx5_cmd", dev
->pdev
, size
, align
, 0);
1397 err
= alloc_cmd_page(dev
, cmd
);
1401 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1402 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1403 cmd
->log_stride
= cmd_l
& 0xf;
1404 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1405 dev_err(&dev
->pdev
->dev
, "firmware reports too many outstanding commands %d\n",
1411 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
1412 dev_err(&dev
->pdev
->dev
, "command queue size overflow\n");
1417 cmd
->checksum_disabled
= 1;
1418 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1419 cmd
->bitmask
= (1 << cmd
->max_reg_cmds
) - 1;
1421 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1422 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1423 dev_err(&dev
->pdev
->dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1424 CMD_IF_REV
, cmd
->cmdif_rev
);
1429 spin_lock_init(&cmd
->alloc_lock
);
1430 spin_lock_init(&cmd
->token_lock
);
1431 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1432 spin_lock_init(&cmd
->stats
[i
].lock
);
1434 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1435 sema_init(&cmd
->pages_sem
, 1);
1437 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1438 cmd_l
= (u32
)(cmd
->dma
);
1439 if (cmd_l
& 0xfff) {
1440 dev_err(&dev
->pdev
->dev
, "invalid command queue address\n");
1445 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1446 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1448 /* Make sure firmware sees the complete address before we proceed */
1451 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1453 cmd
->mode
= CMD_MODE_POLLING
;
1455 err
= create_msg_cache(dev
);
1457 dev_err(&dev
->pdev
->dev
, "failed to create command cache\n");
1462 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1464 dev_err(&dev
->pdev
->dev
, "failed to create command workqueue\n");
1469 err
= create_debugfs_files(dev
);
1478 destroy_workqueue(cmd
->wq
);
1481 destroy_msg_cache(dev
);
1484 free_cmd_page(dev
, cmd
);
1487 pci_pool_destroy(cmd
->pool
);
1491 EXPORT_SYMBOL(mlx5_cmd_init
);
1493 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
1495 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1497 clean_debug_files(dev
);
1498 destroy_workqueue(cmd
->wq
);
1499 destroy_msg_cache(dev
);
1500 free_cmd_page(dev
, cmd
);
1501 pci_pool_destroy(cmd
->pool
);
1503 EXPORT_SYMBOL(mlx5_cmd_cleanup
);
1505 static const char *cmd_status_str(u8 status
)
1508 case MLX5_CMD_STAT_OK
:
1510 case MLX5_CMD_STAT_INT_ERR
:
1511 return "internal error";
1512 case MLX5_CMD_STAT_BAD_OP_ERR
:
1513 return "bad operation";
1514 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
1515 return "bad parameter";
1516 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
1517 return "bad system state";
1518 case MLX5_CMD_STAT_BAD_RES_ERR
:
1519 return "bad resource";
1520 case MLX5_CMD_STAT_RES_BUSY
:
1521 return "resource busy";
1522 case MLX5_CMD_STAT_LIM_ERR
:
1523 return "limits exceeded";
1524 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
1525 return "bad resource state";
1526 case MLX5_CMD_STAT_IX_ERR
:
1528 case MLX5_CMD_STAT_NO_RES_ERR
:
1529 return "no resources";
1530 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
1531 return "bad input length";
1532 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
1533 return "bad output length";
1534 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
1535 return "bad QP state";
1536 case MLX5_CMD_STAT_BAD_PKT_ERR
:
1537 return "bad packet (discarded)";
1538 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
1539 return "bad size too many outstanding CQEs";
1541 return "unknown status";
1545 static int cmd_status_to_err(u8 status
)
1548 case MLX5_CMD_STAT_OK
: return 0;
1549 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
1550 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
1551 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
1552 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
1553 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
1554 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
1555 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
1556 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
1557 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
1558 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
1559 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
1560 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
1561 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
1562 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
1563 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
1564 default: return -EIO
;
1568 /* this will be available till all the commands use set/get macros */
1569 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
)
1574 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1575 cmd_status_str(hdr
->status
), hdr
->status
,
1576 be32_to_cpu(hdr
->syndrome
));
1578 return cmd_status_to_err(hdr
->status
);
1581 int mlx5_cmd_status_to_err_v2(void *ptr
)
1586 status
= be32_to_cpu(*(__be32
*)ptr
) >> 24;
1590 syndrome
= be32_to_cpu(*(__be32
*)(ptr
+ 4));
1592 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1593 cmd_status_str(status
), status
, syndrome
);
1595 return cmd_status_to_err(status
);