mlx5_core: Add support for page faults events and low level handling
[deliverable/linux.git] / include / linux / mlx5 / qp.h
1 /*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_QP_H
34 #define MLX5_QP_H
35
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
38
39 #define MLX5_INVALID_LKEY 0x100
40 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE 8
42 #define MLX5_STRIDE_BLOCK_OP 0x400
43 #define MLX5_CPY_GRD_MASK 0xc0
44 #define MLX5_CPY_APP_MASK 0x30
45 #define MLX5_CPY_REF_MASK 0x0f
46 #define MLX5_BSF_INC_REFTAG (1 << 6)
47 #define MLX5_BSF_INL_VALID (1 << 15)
48 #define MLX5_BSF_REFRESH_DIF (1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE 0x1
51 #define MLX5_BSF_APPREF_ESCAPE 0x2
52
53 #define MLX5_QPN_BITS 24
54 #define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
55
56 enum mlx5_qp_optpar {
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58 MLX5_QP_OPTPAR_RRE = 1 << 1,
59 MLX5_QP_OPTPAR_RAE = 1 << 2,
60 MLX5_QP_OPTPAR_RWE = 1 << 3,
61 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
62 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
63 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
64 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
65 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
66 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
67 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
68 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
69 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
70 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
71 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
72 MLX5_QP_OPTPAR_SRQN = 1 << 18,
73 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
74 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
75 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
76 };
77
78 enum mlx5_qp_state {
79 MLX5_QP_STATE_RST = 0,
80 MLX5_QP_STATE_INIT = 1,
81 MLX5_QP_STATE_RTR = 2,
82 MLX5_QP_STATE_RTS = 3,
83 MLX5_QP_STATE_SQER = 4,
84 MLX5_QP_STATE_SQD = 5,
85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9,
88 MLX5_QP_NUM_STATE
89 };
90
91 enum {
92 MLX5_QP_ST_RC = 0x0,
93 MLX5_QP_ST_UC = 0x1,
94 MLX5_QP_ST_UD = 0x2,
95 MLX5_QP_ST_XRC = 0x3,
96 MLX5_QP_ST_MLX = 0x4,
97 MLX5_QP_ST_DCI = 0x5,
98 MLX5_QP_ST_DCT = 0x6,
99 MLX5_QP_ST_QP0 = 0x7,
100 MLX5_QP_ST_QP1 = 0x8,
101 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
102 MLX5_QP_ST_RAW_IPV6 = 0xa,
103 MLX5_QP_ST_SNIFFER = 0xb,
104 MLX5_QP_ST_SYNC_UMR = 0xe,
105 MLX5_QP_ST_PTP_1588 = 0xd,
106 MLX5_QP_ST_REG_UMR = 0xc,
107 MLX5_QP_ST_MAX
108 };
109
110 enum {
111 MLX5_QP_PM_MIGRATED = 0x3,
112 MLX5_QP_PM_ARMED = 0x0,
113 MLX5_QP_PM_REARM = 0x1
114 };
115
116 enum {
117 MLX5_NON_ZERO_RQ = 0 << 24,
118 MLX5_SRQ_RQ = 1 << 24,
119 MLX5_CRQ_RQ = 2 << 24,
120 MLX5_ZERO_LEN_RQ = 3 << 24
121 };
122
123 enum {
124 /* params1 */
125 MLX5_QP_BIT_SRE = 1 << 15,
126 MLX5_QP_BIT_SWE = 1 << 14,
127 MLX5_QP_BIT_SAE = 1 << 13,
128 /* params2 */
129 MLX5_QP_BIT_RRE = 1 << 15,
130 MLX5_QP_BIT_RWE = 1 << 14,
131 MLX5_QP_BIT_RAE = 1 << 13,
132 MLX5_QP_BIT_RIC = 1 << 4,
133 };
134
135 enum {
136 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
137 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
138 };
139
140 enum {
141 MLX5_SEND_WQE_BB = 64,
142 };
143
144 enum {
145 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
146 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
147 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
148 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
149 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
150 };
151
152 enum {
153 MLX5_FENCE_MODE_NONE = 0 << 5,
154 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
155 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
156 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
157 };
158
159 enum {
160 MLX5_QP_LAT_SENSITIVE = 1 << 28,
161 MLX5_QP_BLOCK_MCAST = 1 << 30,
162 MLX5_QP_ENABLE_SIG = 1 << 31,
163 };
164
165 enum {
166 MLX5_RCV_DBR = 0,
167 MLX5_SND_DBR = 1,
168 };
169
170 enum {
171 MLX5_FLAGS_INLINE = 1<<7,
172 MLX5_FLAGS_CHECK_FREE = 1<<5,
173 };
174
175 struct mlx5_wqe_fmr_seg {
176 __be32 flags;
177 __be32 mem_key;
178 __be64 buf_list;
179 __be64 start_addr;
180 __be64 reg_len;
181 __be32 offset;
182 __be32 page_size;
183 u32 reserved[2];
184 };
185
186 struct mlx5_wqe_ctrl_seg {
187 __be32 opmod_idx_opcode;
188 __be32 qpn_ds;
189 u8 signature;
190 u8 rsvd[2];
191 u8 fm_ce_se;
192 __be32 imm;
193 };
194
195 #define MLX5_WQE_CTRL_DS_MASK 0x3f
196 #define MLX5_WQE_DS_UNITS 16
197
198 struct mlx5_wqe_xrc_seg {
199 __be32 xrc_srqn;
200 u8 rsvd[12];
201 };
202
203 struct mlx5_wqe_masked_atomic_seg {
204 __be64 swap_add;
205 __be64 compare;
206 __be64 swap_add_mask;
207 __be64 compare_mask;
208 };
209
210 struct mlx5_av {
211 union {
212 struct {
213 __be32 qkey;
214 __be32 reserved;
215 } qkey;
216 __be64 dc_key;
217 } key;
218 __be32 dqp_dct;
219 u8 stat_rate_sl;
220 u8 fl_mlid;
221 __be16 rlid;
222 u8 reserved0[10];
223 u8 tclass;
224 u8 hop_limit;
225 __be32 grh_gid_fl;
226 u8 rgid[16];
227 };
228
229 struct mlx5_wqe_datagram_seg {
230 struct mlx5_av av;
231 };
232
233 struct mlx5_wqe_raddr_seg {
234 __be64 raddr;
235 __be32 rkey;
236 u32 reserved;
237 };
238
239 struct mlx5_wqe_atomic_seg {
240 __be64 swap_add;
241 __be64 compare;
242 };
243
244 struct mlx5_wqe_data_seg {
245 __be32 byte_count;
246 __be32 lkey;
247 __be64 addr;
248 };
249
250 struct mlx5_wqe_umr_ctrl_seg {
251 u8 flags;
252 u8 rsvd0[3];
253 __be16 klm_octowords;
254 __be16 bsf_octowords;
255 __be64 mkey_mask;
256 u8 rsvd1[32];
257 };
258
259 struct mlx5_seg_set_psv {
260 __be32 psv_num;
261 __be16 syndrome;
262 __be16 status;
263 __be32 transient_sig;
264 __be32 ref_tag;
265 };
266
267 struct mlx5_seg_get_psv {
268 u8 rsvd[19];
269 u8 num_psv;
270 __be32 l_key;
271 __be64 va;
272 __be32 psv_index[4];
273 };
274
275 struct mlx5_seg_check_psv {
276 u8 rsvd0[2];
277 __be16 err_coalescing_op;
278 u8 rsvd1[2];
279 __be16 xport_err_op;
280 u8 rsvd2[2];
281 __be16 xport_err_mask;
282 u8 rsvd3[7];
283 u8 num_psv;
284 __be32 l_key;
285 __be64 va;
286 __be32 psv_index[4];
287 };
288
289 struct mlx5_rwqe_sig {
290 u8 rsvd0[4];
291 u8 signature;
292 u8 rsvd1[11];
293 };
294
295 struct mlx5_wqe_signature_seg {
296 u8 rsvd0[4];
297 u8 signature;
298 u8 rsvd1[11];
299 };
300
301 struct mlx5_wqe_inline_seg {
302 __be32 byte_count;
303 };
304
305 enum mlx5_sig_type {
306 MLX5_DIF_CRC = 0x1,
307 MLX5_DIF_IPCS = 0x2,
308 };
309
310 struct mlx5_bsf_inl {
311 __be16 vld_refresh;
312 __be16 dif_apptag;
313 __be32 dif_reftag;
314 u8 sig_type;
315 u8 rp_inv_seed;
316 u8 rsvd[3];
317 u8 dif_inc_ref_guard_check;
318 __be16 dif_app_bitmask_check;
319 };
320
321 struct mlx5_bsf {
322 struct mlx5_bsf_basic {
323 u8 bsf_size_sbs;
324 u8 check_byte_mask;
325 union {
326 u8 copy_byte_mask;
327 u8 bs_selector;
328 u8 rsvd_wflags;
329 } wire;
330 union {
331 u8 bs_selector;
332 u8 rsvd_mflags;
333 } mem;
334 __be32 raw_data_size;
335 __be32 w_bfs_psv;
336 __be32 m_bfs_psv;
337 } basic;
338 struct mlx5_bsf_ext {
339 __be32 t_init_gen_pro_size;
340 __be32 rsvd_epi_size;
341 __be32 w_tfs_psv;
342 __be32 m_tfs_psv;
343 } ext;
344 struct mlx5_bsf_inl w_inl;
345 struct mlx5_bsf_inl m_inl;
346 };
347
348 struct mlx5_klm {
349 __be32 bcount;
350 __be32 key;
351 __be64 va;
352 };
353
354 struct mlx5_stride_block_entry {
355 __be16 stride;
356 __be16 bcount;
357 __be32 key;
358 __be64 va;
359 };
360
361 struct mlx5_stride_block_ctrl_seg {
362 __be32 bcount_per_cycle;
363 __be32 op;
364 __be32 repeat_count;
365 u16 rsvd;
366 __be16 num_entries;
367 };
368
369 enum mlx5_pagefault_flags {
370 MLX5_PFAULT_REQUESTOR = 1 << 0,
371 MLX5_PFAULT_WRITE = 1 << 1,
372 MLX5_PFAULT_RDMA = 1 << 2,
373 };
374
375 /* Contains the details of a pagefault. */
376 struct mlx5_pagefault {
377 u32 bytes_committed;
378 u8 event_subtype;
379 enum mlx5_pagefault_flags flags;
380 union {
381 /* Initiator or send message responder pagefault details. */
382 struct {
383 /* Received packet size, only valid for responders. */
384 u32 packet_size;
385 /*
386 * WQE index. Refers to either the send queue or
387 * receive queue, according to event_subtype.
388 */
389 u16 wqe_index;
390 } wqe;
391 /* RDMA responder pagefault details */
392 struct {
393 u32 r_key;
394 /*
395 * Received packet size, minimal size page fault
396 * resolution required for forward progress.
397 */
398 u32 packet_size;
399 u32 rdma_op_len;
400 u64 rdma_va;
401 } rdma;
402 };
403 };
404
405 struct mlx5_core_qp {
406 struct mlx5_core_rsc_common common; /* must be first */
407 void (*event) (struct mlx5_core_qp *, int);
408 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
409 int qpn;
410 struct mlx5_rsc_debug *dbg;
411 int pid;
412 };
413
414 struct mlx5_qp_path {
415 u8 fl;
416 u8 rsvd3;
417 u8 free_ar;
418 u8 pkey_index;
419 u8 rsvd0;
420 u8 grh_mlid;
421 __be16 rlid;
422 u8 ackto_lt;
423 u8 mgid_index;
424 u8 static_rate;
425 u8 hop_limit;
426 __be32 tclass_flowlabel;
427 u8 rgid[16];
428 u8 rsvd1[4];
429 u8 sl;
430 u8 port;
431 u8 rsvd2[6];
432 };
433
434 struct mlx5_qp_context {
435 __be32 flags;
436 __be32 flags_pd;
437 u8 mtu_msgmax;
438 u8 rq_size_stride;
439 __be16 sq_crq_size;
440 __be32 qp_counter_set_usr_page;
441 __be32 wire_qpn;
442 __be32 log_pg_sz_remote_qpn;
443 struct mlx5_qp_path pri_path;
444 struct mlx5_qp_path alt_path;
445 __be32 params1;
446 u8 reserved2[4];
447 __be32 next_send_psn;
448 __be32 cqn_send;
449 u8 reserved3[8];
450 __be32 last_acked_psn;
451 __be32 ssn;
452 __be32 params2;
453 __be32 rnr_nextrecvpsn;
454 __be32 xrcd;
455 __be32 cqn_recv;
456 __be64 db_rec_addr;
457 __be32 qkey;
458 __be32 rq_type_srqn;
459 __be32 rmsn;
460 __be16 hw_sq_wqe_counter;
461 __be16 sw_sq_wqe_counter;
462 __be16 hw_rcyclic_byte_counter;
463 __be16 hw_rq_counter;
464 __be16 sw_rcyclic_byte_counter;
465 __be16 sw_rq_counter;
466 u8 rsvd0[5];
467 u8 cgs;
468 u8 cs_req;
469 u8 cs_res;
470 __be64 dc_access_key;
471 u8 rsvd1[24];
472 };
473
474 struct mlx5_create_qp_mbox_in {
475 struct mlx5_inbox_hdr hdr;
476 __be32 input_qpn;
477 u8 rsvd0[4];
478 __be32 opt_param_mask;
479 u8 rsvd1[4];
480 struct mlx5_qp_context ctx;
481 u8 rsvd3[16];
482 __be64 pas[0];
483 };
484
485 struct mlx5_create_qp_mbox_out {
486 struct mlx5_outbox_hdr hdr;
487 __be32 qpn;
488 u8 rsvd0[4];
489 };
490
491 struct mlx5_destroy_qp_mbox_in {
492 struct mlx5_inbox_hdr hdr;
493 __be32 qpn;
494 u8 rsvd0[4];
495 };
496
497 struct mlx5_destroy_qp_mbox_out {
498 struct mlx5_outbox_hdr hdr;
499 u8 rsvd0[8];
500 };
501
502 struct mlx5_modify_qp_mbox_in {
503 struct mlx5_inbox_hdr hdr;
504 __be32 qpn;
505 u8 rsvd1[4];
506 __be32 optparam;
507 u8 rsvd0[4];
508 struct mlx5_qp_context ctx;
509 };
510
511 struct mlx5_modify_qp_mbox_out {
512 struct mlx5_outbox_hdr hdr;
513 u8 rsvd0[8];
514 };
515
516 struct mlx5_query_qp_mbox_in {
517 struct mlx5_inbox_hdr hdr;
518 __be32 qpn;
519 u8 rsvd[4];
520 };
521
522 struct mlx5_query_qp_mbox_out {
523 struct mlx5_outbox_hdr hdr;
524 u8 rsvd1[8];
525 __be32 optparam;
526 u8 rsvd0[4];
527 struct mlx5_qp_context ctx;
528 u8 rsvd2[16];
529 __be64 pas[0];
530 };
531
532 struct mlx5_conf_sqp_mbox_in {
533 struct mlx5_inbox_hdr hdr;
534 __be32 qpn;
535 u8 rsvd[3];
536 u8 type;
537 };
538
539 struct mlx5_conf_sqp_mbox_out {
540 struct mlx5_outbox_hdr hdr;
541 u8 rsvd[8];
542 };
543
544 struct mlx5_alloc_xrcd_mbox_in {
545 struct mlx5_inbox_hdr hdr;
546 u8 rsvd[8];
547 };
548
549 struct mlx5_alloc_xrcd_mbox_out {
550 struct mlx5_outbox_hdr hdr;
551 __be32 xrcdn;
552 u8 rsvd[4];
553 };
554
555 struct mlx5_dealloc_xrcd_mbox_in {
556 struct mlx5_inbox_hdr hdr;
557 __be32 xrcdn;
558 u8 rsvd[4];
559 };
560
561 struct mlx5_dealloc_xrcd_mbox_out {
562 struct mlx5_outbox_hdr hdr;
563 u8 rsvd[8];
564 };
565
566 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
567 {
568 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
569 }
570
571 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
572 {
573 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
574 }
575
576 struct mlx5_page_fault_resume_mbox_in {
577 struct mlx5_inbox_hdr hdr;
578 __be32 flags_qpn;
579 u8 reserved[4];
580 };
581
582 struct mlx5_page_fault_resume_mbox_out {
583 struct mlx5_outbox_hdr hdr;
584 u8 rsvd[8];
585 };
586
587 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
588 struct mlx5_core_qp *qp,
589 struct mlx5_create_qp_mbox_in *in,
590 int inlen);
591 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
592 enum mlx5_qp_state new_state,
593 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
594 struct mlx5_core_qp *qp);
595 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
596 struct mlx5_core_qp *qp);
597 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
598 struct mlx5_query_qp_mbox_out *out, int outlen);
599
600 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
601 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
602 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
603 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
604 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
605 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
606 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
607 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
608 u8 context, int error);
609 #endif
610
611 static inline const char *mlx5_qp_type_str(int type)
612 {
613 switch (type) {
614 case MLX5_QP_ST_RC: return "RC";
615 case MLX5_QP_ST_UC: return "C";
616 case MLX5_QP_ST_UD: return "UD";
617 case MLX5_QP_ST_XRC: return "XRC";
618 case MLX5_QP_ST_MLX: return "MLX";
619 case MLX5_QP_ST_QP0: return "QP0";
620 case MLX5_QP_ST_QP1: return "QP1";
621 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
622 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
623 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
624 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
625 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
626 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
627 default: return "Invalid transport type";
628 }
629 }
630
631 static inline const char *mlx5_qp_state_str(int state)
632 {
633 switch (state) {
634 case MLX5_QP_STATE_RST:
635 return "RST";
636 case MLX5_QP_STATE_INIT:
637 return "INIT";
638 case MLX5_QP_STATE_RTR:
639 return "RTR";
640 case MLX5_QP_STATE_RTS:
641 return "RTS";
642 case MLX5_QP_STATE_SQER:
643 return "SQER";
644 case MLX5_QP_STATE_SQD:
645 return "SQD";
646 case MLX5_QP_STATE_ERR:
647 return "ERR";
648 case MLX5_QP_STATE_SQ_DRAINING:
649 return "SQ_DRAINING";
650 case MLX5_QP_STATE_SUSPENDED:
651 return "SUSPENDED";
652 default: return "Invalid QP state";
653 }
654 }
655
656 #endif /* MLX5_QP_H */
This page took 0.053311 seconds and 5 git commands to generate.