2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
39 #define MLX5_INVALID_LKEY 0x100
40 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE 8
42 #define MLX5_STRIDE_BLOCK_OP 0x400
43 #define MLX5_CPY_GRD_MASK 0xc0
44 #define MLX5_CPY_APP_MASK 0x30
45 #define MLX5_CPY_REF_MASK 0x0f
46 #define MLX5_BSF_INC_REFTAG (1 << 6)
47 #define MLX5_BSF_INL_VALID (1 << 15)
48 #define MLX5_BSF_REFRESH_DIF (1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE 0x1
51 #define MLX5_BSF_APPREF_ESCAPE 0x2
53 #define MLX5_QPN_BITS 24
54 #define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
58 MLX5_QP_OPTPAR_RRE
= 1 << 1,
59 MLX5_QP_OPTPAR_RAE
= 1 << 2,
60 MLX5_QP_OPTPAR_RWE
= 1 << 3,
61 MLX5_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
62 MLX5_QP_OPTPAR_Q_KEY
= 1 << 5,
63 MLX5_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
64 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
65 MLX5_QP_OPTPAR_SRA_MAX
= 1 << 8,
66 MLX5_QP_OPTPAR_RRA_MAX
= 1 << 9,
67 MLX5_QP_OPTPAR_PM_STATE
= 1 << 10,
68 MLX5_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
69 MLX5_QP_OPTPAR_RNR_RETRY
= 1 << 13,
70 MLX5_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
71 MLX5_QP_OPTPAR_PRI_PORT
= 1 << 16,
72 MLX5_QP_OPTPAR_SRQN
= 1 << 18,
73 MLX5_QP_OPTPAR_CQN_RCV
= 1 << 19,
74 MLX5_QP_OPTPAR_DC_HS
= 1 << 20,
75 MLX5_QP_OPTPAR_DC_KEY
= 1 << 21,
79 MLX5_QP_STATE_RST
= 0,
80 MLX5_QP_STATE_INIT
= 1,
81 MLX5_QP_STATE_RTR
= 2,
82 MLX5_QP_STATE_RTS
= 3,
83 MLX5_QP_STATE_SQER
= 4,
84 MLX5_QP_STATE_SQD
= 5,
85 MLX5_QP_STATE_ERR
= 6,
86 MLX5_QP_STATE_SQ_DRAINING
= 7,
87 MLX5_QP_STATE_SUSPENDED
= 9,
94 MLX5_SQ_STATE_NA
= MLX5_SQC_STATE_ERR
+ 1,
95 MLX5_SQ_NUM_STATE
= MLX5_SQ_STATE_NA
+ 1,
96 MLX5_RQ_STATE_NA
= MLX5_RQC_STATE_ERR
+ 1,
97 MLX5_RQ_NUM_STATE
= MLX5_RQ_STATE_NA
+ 1,
104 MLX5_QP_ST_XRC
= 0x3,
105 MLX5_QP_ST_MLX
= 0x4,
106 MLX5_QP_ST_DCI
= 0x5,
107 MLX5_QP_ST_DCT
= 0x6,
108 MLX5_QP_ST_QP0
= 0x7,
109 MLX5_QP_ST_QP1
= 0x8,
110 MLX5_QP_ST_RAW_ETHERTYPE
= 0x9,
111 MLX5_QP_ST_RAW_IPV6
= 0xa,
112 MLX5_QP_ST_SNIFFER
= 0xb,
113 MLX5_QP_ST_SYNC_UMR
= 0xe,
114 MLX5_QP_ST_PTP_1588
= 0xd,
115 MLX5_QP_ST_REG_UMR
= 0xc,
120 MLX5_QP_PM_MIGRATED
= 0x3,
121 MLX5_QP_PM_ARMED
= 0x0,
122 MLX5_QP_PM_REARM
= 0x1
126 MLX5_NON_ZERO_RQ
= 0 << 24,
127 MLX5_SRQ_RQ
= 1 << 24,
128 MLX5_CRQ_RQ
= 2 << 24,
129 MLX5_ZERO_LEN_RQ
= 3 << 24
134 MLX5_QP_BIT_SRE
= 1 << 15,
135 MLX5_QP_BIT_SWE
= 1 << 14,
136 MLX5_QP_BIT_SAE
= 1 << 13,
138 MLX5_QP_BIT_RRE
= 1 << 15,
139 MLX5_QP_BIT_RWE
= 1 << 14,
140 MLX5_QP_BIT_RAE
= 1 << 13,
141 MLX5_QP_BIT_RIC
= 1 << 4,
142 MLX5_QP_BIT_CC_SLAVE_RECV
= 1 << 2,
143 MLX5_QP_BIT_CC_SLAVE_SEND
= 1 << 1,
144 MLX5_QP_BIT_CC_MASTER
= 1 << 0
148 MLX5_WQE_CTRL_CQ_UPDATE
= 2 << 2,
149 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE
= 3 << 2,
150 MLX5_WQE_CTRL_SOLICITED
= 1 << 1,
154 MLX5_SEND_WQE_DS
= 16,
155 MLX5_SEND_WQE_BB
= 64,
158 #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
161 MLX5_SEND_WQE_MAX_WQEBBS
= 16,
165 MLX5_WQE_FMR_PERM_LOCAL_READ
= 1 << 27,
166 MLX5_WQE_FMR_PERM_LOCAL_WRITE
= 1 << 28,
167 MLX5_WQE_FMR_PERM_REMOTE_READ
= 1 << 29,
168 MLX5_WQE_FMR_PERM_REMOTE_WRITE
= 1 << 30,
169 MLX5_WQE_FMR_PERM_ATOMIC
= 1 << 31
173 MLX5_FENCE_MODE_NONE
= 0 << 5,
174 MLX5_FENCE_MODE_INITIATOR_SMALL
= 1 << 5,
175 MLX5_FENCE_MODE_FENCE
= 2 << 5,
176 MLX5_FENCE_MODE_STRONG_ORDERING
= 3 << 5,
177 MLX5_FENCE_MODE_SMALL_AND_FENCE
= 4 << 5,
181 MLX5_QP_LAT_SENSITIVE
= 1 << 28,
182 MLX5_QP_BLOCK_MCAST
= 1 << 30,
183 MLX5_QP_ENABLE_SIG
= 1 << 31,
192 MLX5_FLAGS_INLINE
= 1<<7,
193 MLX5_FLAGS_CHECK_FREE
= 1<<5,
196 struct mlx5_wqe_fmr_seg
{
207 struct mlx5_wqe_ctrl_seg
{
208 __be32 opmod_idx_opcode
;
216 #define MLX5_WQE_CTRL_DS_MASK 0x3f
217 #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
218 #define MLX5_WQE_CTRL_QPN_SHIFT 8
219 #define MLX5_WQE_DS_UNITS 16
220 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
221 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
222 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
225 MLX5_ETH_WQE_L3_INNER_CSUM
= 1 << 4,
226 MLX5_ETH_WQE_L4_INNER_CSUM
= 1 << 5,
227 MLX5_ETH_WQE_L3_CSUM
= 1 << 6,
228 MLX5_ETH_WQE_L4_CSUM
= 1 << 7,
231 struct mlx5_wqe_eth_seg
{
237 __be16 inline_hdr_sz
;
238 u8 inline_hdr_start
[2];
241 struct mlx5_wqe_xrc_seg
{
246 struct mlx5_wqe_masked_atomic_seg
{
249 __be64 swap_add_mask
;
276 struct mlx5_wqe_datagram_seg
{
280 struct mlx5_wqe_raddr_seg
{
286 struct mlx5_wqe_atomic_seg
{
291 struct mlx5_wqe_data_seg
{
297 struct mlx5_wqe_umr_ctrl_seg
{
300 __be16 klm_octowords
;
301 __be16 bsf_octowords
;
306 struct mlx5_seg_set_psv
{
310 __be32 transient_sig
;
314 struct mlx5_seg_get_psv
{
322 struct mlx5_seg_check_psv
{
324 __be16 err_coalescing_op
;
328 __be16 xport_err_mask
;
336 struct mlx5_rwqe_sig
{
342 struct mlx5_wqe_signature_seg
{
348 #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
350 struct mlx5_wqe_inline_seg
{
359 struct mlx5_bsf_inl
{
366 u8 dif_inc_ref_guard_check
;
367 __be16 dif_app_bitmask_check
;
371 struct mlx5_bsf_basic
{
383 __be32 raw_data_size
;
387 struct mlx5_bsf_ext
{
388 __be32 t_init_gen_pro_size
;
389 __be32 rsvd_epi_size
;
393 struct mlx5_bsf_inl w_inl
;
394 struct mlx5_bsf_inl m_inl
;
403 struct mlx5_stride_block_entry
{
410 struct mlx5_stride_block_ctrl_seg
{
411 __be32 bcount_per_cycle
;
418 enum mlx5_pagefault_flags
{
419 MLX5_PFAULT_REQUESTOR
= 1 << 0,
420 MLX5_PFAULT_WRITE
= 1 << 1,
421 MLX5_PFAULT_RDMA
= 1 << 2,
424 /* Contains the details of a pagefault. */
425 struct mlx5_pagefault
{
428 enum mlx5_pagefault_flags flags
;
430 /* Initiator or send message responder pagefault details. */
432 /* Received packet size, only valid for responders. */
435 * WQE index. Refers to either the send queue or
436 * receive queue, according to event_subtype.
440 /* RDMA responder pagefault details */
444 * Received packet size, minimal size page fault
445 * resolution required for forward progress.
454 struct mlx5_core_qp
{
455 struct mlx5_core_rsc_common common
; /* must be first */
456 void (*event
) (struct mlx5_core_qp
*, int);
457 void (*pfault_handler
)(struct mlx5_core_qp
*, struct mlx5_pagefault
*);
459 struct mlx5_rsc_debug
*dbg
;
463 struct mlx5_qp_path
{
474 __be32 tclass_flowlabel
;
487 struct mlx5_qp_context
{
493 __be32 qp_counter_set_usr_page
;
495 __be32 log_pg_sz_remote_qpn
;
496 struct mlx5_qp_path pri_path
;
497 struct mlx5_qp_path alt_path
;
500 __be32 next_send_psn
;
504 __be32 last_acked_psn
;
507 __be32 rnr_nextrecvpsn
;
514 __be16 hw_sq_wqe_counter
;
515 __be16 sw_sq_wqe_counter
;
516 __be16 hw_rcyclic_byte_counter
;
517 __be16 hw_rq_counter
;
518 __be16 sw_rcyclic_byte_counter
;
519 __be16 sw_rq_counter
;
524 __be64 dc_access_key
;
528 struct mlx5_create_qp_mbox_in
{
529 struct mlx5_inbox_hdr hdr
;
532 __be32 opt_param_mask
;
534 struct mlx5_qp_context ctx
;
539 struct mlx5_create_qp_mbox_out
{
540 struct mlx5_outbox_hdr hdr
;
545 struct mlx5_destroy_qp_mbox_in
{
546 struct mlx5_inbox_hdr hdr
;
551 struct mlx5_destroy_qp_mbox_out
{
552 struct mlx5_outbox_hdr hdr
;
556 struct mlx5_modify_qp_mbox_in
{
557 struct mlx5_inbox_hdr hdr
;
562 struct mlx5_qp_context ctx
;
566 struct mlx5_modify_qp_mbox_out
{
567 struct mlx5_outbox_hdr hdr
;
571 struct mlx5_query_qp_mbox_in
{
572 struct mlx5_inbox_hdr hdr
;
577 struct mlx5_query_qp_mbox_out
{
578 struct mlx5_outbox_hdr hdr
;
582 struct mlx5_qp_context ctx
;
587 struct mlx5_conf_sqp_mbox_in
{
588 struct mlx5_inbox_hdr hdr
;
594 struct mlx5_conf_sqp_mbox_out
{
595 struct mlx5_outbox_hdr hdr
;
599 struct mlx5_alloc_xrcd_mbox_in
{
600 struct mlx5_inbox_hdr hdr
;
604 struct mlx5_alloc_xrcd_mbox_out
{
605 struct mlx5_outbox_hdr hdr
;
610 struct mlx5_dealloc_xrcd_mbox_in
{
611 struct mlx5_inbox_hdr hdr
;
616 struct mlx5_dealloc_xrcd_mbox_out
{
617 struct mlx5_outbox_hdr hdr
;
621 static inline struct mlx5_core_qp
*__mlx5_qp_lookup(struct mlx5_core_dev
*dev
, u32 qpn
)
623 return radix_tree_lookup(&dev
->priv
.qp_table
.tree
, qpn
);
626 static inline struct mlx5_core_mkey
*__mlx5_mr_lookup(struct mlx5_core_dev
*dev
, u32 key
)
628 return radix_tree_lookup(&dev
->priv
.mkey_table
.tree
, key
);
631 struct mlx5_page_fault_resume_mbox_in
{
632 struct mlx5_inbox_hdr hdr
;
637 struct mlx5_page_fault_resume_mbox_out
{
638 struct mlx5_outbox_hdr hdr
;
642 int mlx5_core_create_qp(struct mlx5_core_dev
*dev
,
643 struct mlx5_core_qp
*qp
,
644 struct mlx5_create_qp_mbox_in
*in
,
646 int mlx5_core_qp_modify(struct mlx5_core_dev
*dev
, u16 operation
,
647 struct mlx5_modify_qp_mbox_in
*in
, int sqd_event
,
648 struct mlx5_core_qp
*qp
);
649 int mlx5_core_destroy_qp(struct mlx5_core_dev
*dev
,
650 struct mlx5_core_qp
*qp
);
651 int mlx5_core_qp_query(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
,
652 struct mlx5_query_qp_mbox_out
*out
, int outlen
);
654 int mlx5_core_xrcd_alloc(struct mlx5_core_dev
*dev
, u32
*xrcdn
);
655 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev
*dev
, u32 xrcdn
);
656 void mlx5_init_qp_table(struct mlx5_core_dev
*dev
);
657 void mlx5_cleanup_qp_table(struct mlx5_core_dev
*dev
);
658 int mlx5_debug_qp_add(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
);
659 void mlx5_debug_qp_remove(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
);
660 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
661 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 qpn
,
662 u8 context
, int error
);
664 int mlx5_core_create_rq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
665 struct mlx5_core_qp
*rq
);
666 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev
*dev
,
667 struct mlx5_core_qp
*rq
);
668 int mlx5_core_create_sq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
669 struct mlx5_core_qp
*sq
);
670 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev
*dev
,
671 struct mlx5_core_qp
*sq
);
672 int mlx5_core_alloc_q_counter(struct mlx5_core_dev
*dev
, u16
*counter_id
);
673 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
);
674 int mlx5_core_query_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
,
675 int reset
, void *out
, int out_size
);
676 int mlx5_core_query_out_of_buffer(struct mlx5_core_dev
*dev
, u16 counter_id
,
679 static inline const char *mlx5_qp_type_str(int type
)
682 case MLX5_QP_ST_RC
: return "RC";
683 case MLX5_QP_ST_UC
: return "C";
684 case MLX5_QP_ST_UD
: return "UD";
685 case MLX5_QP_ST_XRC
: return "XRC";
686 case MLX5_QP_ST_MLX
: return "MLX";
687 case MLX5_QP_ST_QP0
: return "QP0";
688 case MLX5_QP_ST_QP1
: return "QP1";
689 case MLX5_QP_ST_RAW_ETHERTYPE
: return "RAW_ETHERTYPE";
690 case MLX5_QP_ST_RAW_IPV6
: return "RAW_IPV6";
691 case MLX5_QP_ST_SNIFFER
: return "SNIFFER";
692 case MLX5_QP_ST_SYNC_UMR
: return "SYNC_UMR";
693 case MLX5_QP_ST_PTP_1588
: return "PTP_1588";
694 case MLX5_QP_ST_REG_UMR
: return "REG_UMR";
695 default: return "Invalid transport type";
699 static inline const char *mlx5_qp_state_str(int state
)
702 case MLX5_QP_STATE_RST
:
704 case MLX5_QP_STATE_INIT
:
706 case MLX5_QP_STATE_RTR
:
708 case MLX5_QP_STATE_RTS
:
710 case MLX5_QP_STATE_SQER
:
712 case MLX5_QP_STATE_SQD
:
714 case MLX5_QP_STATE_ERR
:
716 case MLX5_QP_STATE_SQ_DRAINING
:
717 return "SQ_DRAINING";
718 case MLX5_QP_STATE_SUSPENDED
:
720 default: return "Invalid QP state";
724 #endif /* MLX5_QP_H */