IB/mlx5: Support the new memory registration API
[deliverable/linux.git] / drivers / infiniband / hw / mlx5 / mlx5_ib.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_IB_H
34 #define MLX5_IB_H
35
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45
46 #define mlx5_ib_dbg(dev, format, arg...) \
47 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
48 __LINE__, current->pid, ##arg)
49
50 #define mlx5_ib_err(dev, format, arg...) \
51 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
52 __LINE__, current->pid, ##arg)
53
54 #define mlx5_ib_warn(dev, format, arg...) \
55 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
56 __LINE__, current->pid, ##arg)
57
58 enum {
59 MLX5_IB_MMAP_CMD_SHIFT = 8,
60 MLX5_IB_MMAP_CMD_MASK = 0xff,
61 };
62
63 enum mlx5_ib_mmap_cmd {
64 MLX5_IB_MMAP_REGULAR_PAGE = 0,
65 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */
66 };
67
68 enum {
69 MLX5_RES_SCAT_DATA32_CQE = 0x1,
70 MLX5_RES_SCAT_DATA64_CQE = 0x2,
71 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
72 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
73 };
74
75 enum mlx5_ib_latency_class {
76 MLX5_IB_LATENCY_CLASS_LOW,
77 MLX5_IB_LATENCY_CLASS_MEDIUM,
78 MLX5_IB_LATENCY_CLASS_HIGH,
79 MLX5_IB_LATENCY_CLASS_FAST_PATH
80 };
81
82 enum mlx5_ib_mad_ifc_flags {
83 MLX5_MAD_IFC_IGNORE_MKEY = 1,
84 MLX5_MAD_IFC_IGNORE_BKEY = 2,
85 MLX5_MAD_IFC_NET_VIEW = 4,
86 };
87
88 struct mlx5_ib_ucontext {
89 struct ib_ucontext ibucontext;
90 struct list_head db_page_list;
91
92 /* protect doorbell record alloc/free
93 */
94 struct mutex db_page_mutex;
95 struct mlx5_uuar_info uuari;
96 };
97
98 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
99 {
100 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
101 }
102
103 struct mlx5_ib_pd {
104 struct ib_pd ibpd;
105 u32 pdn;
106 };
107
108 /* Use macros here so that don't have to duplicate
109 * enum ib_send_flags and enum ib_qp_type for low-level driver
110 */
111
112 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
113 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
114 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
115 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
116 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
117
118 struct wr_list {
119 u16 opcode;
120 u16 next;
121 };
122
123 struct mlx5_ib_wq {
124 u64 *wrid;
125 u32 *wr_data;
126 struct wr_list *w_list;
127 unsigned *wqe_head;
128 u16 unsig_count;
129
130 /* serialize post to the work queue
131 */
132 spinlock_t lock;
133 int wqe_cnt;
134 int max_post;
135 int max_gs;
136 int offset;
137 int wqe_shift;
138 unsigned head;
139 unsigned tail;
140 u16 cur_post;
141 u16 last_poll;
142 void *qend;
143 };
144
145 enum {
146 MLX5_QP_USER,
147 MLX5_QP_KERNEL,
148 MLX5_QP_EMPTY
149 };
150
151 /*
152 * Connect-IB can trigger up to four concurrent pagefaults
153 * per-QP.
154 */
155 enum mlx5_ib_pagefault_context {
156 MLX5_IB_PAGEFAULT_RESPONDER_READ,
157 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
158 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
159 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
160 MLX5_IB_PAGEFAULT_CONTEXTS
161 };
162
163 static inline enum mlx5_ib_pagefault_context
164 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
165 {
166 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
167 }
168
169 struct mlx5_ib_pfault {
170 struct work_struct work;
171 struct mlx5_pagefault mpfault;
172 };
173
174 struct mlx5_ib_qp {
175 struct ib_qp ibqp;
176 struct mlx5_core_qp mqp;
177 struct mlx5_buf buf;
178
179 struct mlx5_db db;
180 struct mlx5_ib_wq rq;
181
182 u32 doorbell_qpn;
183 u8 sq_signal_bits;
184 u8 fm_cache;
185 int sq_max_wqes_per_wr;
186 int sq_spare_wqes;
187 struct mlx5_ib_wq sq;
188
189 struct ib_umem *umem;
190 int buf_size;
191
192 /* serialize qp state modifications
193 */
194 struct mutex mutex;
195 u16 xrcdn;
196 u32 flags;
197 u8 port;
198 u8 alt_port;
199 u8 atomic_rd_en;
200 u8 resp_depth;
201 u8 state;
202 int mlx_type;
203 int wq_sig;
204 int scat_cqe;
205 int max_inline_data;
206 struct mlx5_bf *bf;
207 int has_rq;
208
209 /* only for user space QPs. For kernel
210 * we have it from the bf object
211 */
212 int uuarn;
213
214 int create_type;
215
216 /* Store signature errors */
217 bool signature_en;
218
219 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
220 /*
221 * A flag that is true for QP's that are in a state that doesn't
222 * allow page faults, and shouldn't schedule any more faults.
223 */
224 int disable_page_faults;
225 /*
226 * The disable_page_faults_lock protects a QP's disable_page_faults
227 * field, allowing for a thread to atomically check whether the QP
228 * allows page faults, and if so schedule a page fault.
229 */
230 spinlock_t disable_page_faults_lock;
231 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
232 #endif
233 };
234
235 struct mlx5_ib_cq_buf {
236 struct mlx5_buf buf;
237 struct ib_umem *umem;
238 int cqe_size;
239 int nent;
240 };
241
242 enum mlx5_ib_qp_flags {
243 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
244 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
245 };
246
247 struct mlx5_umr_wr {
248 struct ib_send_wr wr;
249 union {
250 u64 virt_addr;
251 u64 offset;
252 } target;
253 struct ib_pd *pd;
254 unsigned int page_shift;
255 unsigned int npages;
256 u32 length;
257 int access_flags;
258 u32 mkey;
259 };
260
261 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
262 {
263 return container_of(wr, struct mlx5_umr_wr, wr);
264 }
265
266 struct mlx5_shared_mr_info {
267 int mr_id;
268 struct ib_umem *umem;
269 };
270
271 struct mlx5_ib_cq {
272 struct ib_cq ibcq;
273 struct mlx5_core_cq mcq;
274 struct mlx5_ib_cq_buf buf;
275 struct mlx5_db db;
276
277 /* serialize access to the CQ
278 */
279 spinlock_t lock;
280
281 /* protect resize cq
282 */
283 struct mutex resize_mutex;
284 struct mlx5_ib_cq_buf *resize_buf;
285 struct ib_umem *resize_umem;
286 int cqe_size;
287 };
288
289 struct mlx5_ib_srq {
290 struct ib_srq ibsrq;
291 struct mlx5_core_srq msrq;
292 struct mlx5_buf buf;
293 struct mlx5_db db;
294 u64 *wrid;
295 /* protect SRQ hanlding
296 */
297 spinlock_t lock;
298 int head;
299 int tail;
300 u16 wqe_ctr;
301 struct ib_umem *umem;
302 /* serialize arming a SRQ
303 */
304 struct mutex mutex;
305 int wq_sig;
306 };
307
308 struct mlx5_ib_xrcd {
309 struct ib_xrcd ibxrcd;
310 u32 xrcdn;
311 };
312
313 enum mlx5_ib_mtt_access_flags {
314 MLX5_IB_MTT_READ = (1 << 0),
315 MLX5_IB_MTT_WRITE = (1 << 1),
316 };
317
318 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
319
320 struct mlx5_ib_mr {
321 struct ib_mr ibmr;
322 void *descs;
323 dma_addr_t desc_map;
324 int ndescs;
325 int max_descs;
326 int desc_size;
327 struct mlx5_core_mr mmr;
328 struct ib_umem *umem;
329 struct mlx5_shared_mr_info *smr_info;
330 struct list_head list;
331 int order;
332 int umred;
333 int npages;
334 struct mlx5_ib_dev *dev;
335 struct mlx5_create_mkey_mbox_out out;
336 struct mlx5_core_sig_ctx *sig;
337 int live;
338 void *descs_alloc;
339 };
340
341 struct mlx5_ib_fast_reg_page_list {
342 struct ib_fast_reg_page_list ibfrpl;
343 __be64 *mapped_page_list;
344 dma_addr_t map;
345 };
346
347 struct mlx5_ib_umr_context {
348 enum ib_wc_status status;
349 struct completion done;
350 };
351
352 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
353 {
354 context->status = -1;
355 init_completion(&context->done);
356 }
357
358 struct umr_common {
359 struct ib_pd *pd;
360 struct ib_cq *cq;
361 struct ib_qp *qp;
362 /* control access to UMR QP
363 */
364 struct semaphore sem;
365 };
366
367 enum {
368 MLX5_FMR_INVALID,
369 MLX5_FMR_VALID,
370 MLX5_FMR_BUSY,
371 };
372
373 struct mlx5_cache_ent {
374 struct list_head head;
375 /* sync access to the cahce entry
376 */
377 spinlock_t lock;
378
379
380 struct dentry *dir;
381 char name[4];
382 u32 order;
383 u32 size;
384 u32 cur;
385 u32 miss;
386 u32 limit;
387
388 struct dentry *fsize;
389 struct dentry *fcur;
390 struct dentry *fmiss;
391 struct dentry *flimit;
392
393 struct mlx5_ib_dev *dev;
394 struct work_struct work;
395 struct delayed_work dwork;
396 int pending;
397 };
398
399 struct mlx5_mr_cache {
400 struct workqueue_struct *wq;
401 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
402 int stopped;
403 struct dentry *root;
404 unsigned long last_add;
405 };
406
407 struct mlx5_ib_resources {
408 struct ib_cq *c0;
409 struct ib_xrcd *x0;
410 struct ib_xrcd *x1;
411 struct ib_pd *p0;
412 struct ib_srq *s0;
413 struct ib_srq *s1;
414 };
415
416 struct mlx5_ib_dev {
417 struct ib_device ib_dev;
418 struct mlx5_core_dev *mdev;
419 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
420 int num_ports;
421 /* serialize update of capability mask
422 */
423 struct mutex cap_mask_mutex;
424 bool ib_active;
425 struct umr_common umrc;
426 /* sync used page count stats
427 */
428 struct mlx5_ib_resources devr;
429 struct mlx5_mr_cache cache;
430 struct timer_list delay_timer;
431 int fill_delay;
432 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
433 struct ib_odp_caps odp_caps;
434 /*
435 * Sleepable RCU that prevents destruction of MRs while they are still
436 * being used by a page fault handler.
437 */
438 struct srcu_struct mr_srcu;
439 #endif
440 };
441
442 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
443 {
444 return container_of(mcq, struct mlx5_ib_cq, mcq);
445 }
446
447 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
448 {
449 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
450 }
451
452 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
453 {
454 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
455 }
456
457 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
458 {
459 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
460 }
461
462 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
463 {
464 return container_of(mqp, struct mlx5_ib_qp, mqp);
465 }
466
467 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
468 {
469 return container_of(mmr, struct mlx5_ib_mr, mmr);
470 }
471
472 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
473 {
474 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
475 }
476
477 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
478 {
479 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
480 }
481
482 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
483 {
484 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
485 }
486
487 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
488 {
489 return container_of(msrq, struct mlx5_ib_srq, msrq);
490 }
491
492 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
493 {
494 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
495 }
496
497 static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
498 {
499 return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
500 }
501
502 struct mlx5_ib_ah {
503 struct ib_ah ibah;
504 struct mlx5_av av;
505 };
506
507 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
508 {
509 return container_of(ibah, struct mlx5_ib_ah, ibah);
510 }
511
512 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
513 struct mlx5_db *db);
514 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
515 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
516 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
517 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
518 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
519 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
520 const void *in_mad, void *response_mad);
521 struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
522 struct mlx5_ib_ah *ah);
523 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
524 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
525 int mlx5_ib_destroy_ah(struct ib_ah *ah);
526 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
527 struct ib_srq_init_attr *init_attr,
528 struct ib_udata *udata);
529 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
530 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
531 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
532 int mlx5_ib_destroy_srq(struct ib_srq *srq);
533 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
534 struct ib_recv_wr **bad_wr);
535 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
536 struct ib_qp_init_attr *init_attr,
537 struct ib_udata *udata);
538 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
539 int attr_mask, struct ib_udata *udata);
540 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
541 struct ib_qp_init_attr *qp_init_attr);
542 int mlx5_ib_destroy_qp(struct ib_qp *qp);
543 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
544 struct ib_send_wr **bad_wr);
545 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
546 struct ib_recv_wr **bad_wr);
547 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
548 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
549 void *buffer, u32 length);
550 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
551 const struct ib_cq_init_attr *attr,
552 struct ib_ucontext *context,
553 struct ib_udata *udata);
554 int mlx5_ib_destroy_cq(struct ib_cq *cq);
555 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
556 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
557 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
558 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
559 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
560 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
561 u64 virt_addr, int access_flags,
562 struct ib_udata *udata);
563 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
564 int npages, int zap);
565 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
566 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
567 enum ib_mr_type mr_type,
568 u32 max_num_sg);
569 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
570 struct scatterlist *sg,
571 int sg_nents);
572 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
573 int page_list_len);
574 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
575 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
576 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
577 const struct ib_mad_hdr *in, size_t in_mad_size,
578 struct ib_mad_hdr *out, size_t *out_mad_size,
579 u16 *out_mad_pkey_index);
580 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
581 struct ib_ucontext *context,
582 struct ib_udata *udata);
583 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
584 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
585 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
586 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
587 struct ib_smp *out_mad);
588 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
589 __be64 *sys_image_guid);
590 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
591 u16 *max_pkeys);
592 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
593 u32 *vendor_id);
594 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
595 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
596 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
597 u16 *pkey);
598 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
599 union ib_gid *gid);
600 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
601 struct ib_port_attr *props);
602 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
603 struct ib_port_attr *props);
604 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
605 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
606 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
607 int *ncont, int *order);
608 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
609 int page_shift, size_t offset, size_t num_pages,
610 __be64 *pas, int access_flags);
611 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
612 int page_shift, __be64 *pas, int access_flags);
613 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
614 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
615 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
616 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
617 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
618 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
619 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
620 struct ib_mr_status *mr_status);
621
622 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
623 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
624
625 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
626 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
627 struct mlx5_ib_pfault *pfault);
628 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
629 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
630 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
631 int __init mlx5_ib_odp_init(void);
632 void mlx5_ib_odp_cleanup(void);
633 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
634 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
635 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
636 unsigned long end);
637
638 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
639 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
640 {
641 return;
642 }
643
644 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
645 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
646 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
647 static inline int mlx5_ib_odp_init(void) { return 0; }
648 static inline void mlx5_ib_odp_cleanup(void) {}
649 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
650 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
651
652 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
653
654 static inline void init_query_mad(struct ib_smp *mad)
655 {
656 mad->base_version = 1;
657 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
658 mad->class_version = 1;
659 mad->method = IB_MGMT_METHOD_GET;
660 }
661
662 static inline u8 convert_access(int acc)
663 {
664 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
665 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
666 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
667 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
668 MLX5_PERM_LOCAL_READ;
669 }
670
671 static inline int is_qp1(enum ib_qp_type qp_type)
672 {
673 return qp_type == IB_QPT_GSI;
674 }
675
676 #define MLX5_MAX_UMR_SHIFT 16
677 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
678
679 #endif /* MLX5_IB_H */
This page took 0.097032 seconds and 5 git commands to generate.