mlx4: In RoCE allow guests to have multiple GIDS
[deliverable/linux.git] / drivers / infiniband / hw / mlx4 / mlx4_ib.h
1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef MLX4_IB_H
35 #define MLX4_IB_H
36
37 #include <linux/compiler.h>
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/idr.h>
41
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_umem.h>
44 #include <rdma/ib_mad.h>
45 #include <rdma/ib_sa.h>
46
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/doorbell.h>
49
50 #define MLX4_IB_DRV_NAME "mlx4_ib"
51
52 #ifdef pr_fmt
53 #undef pr_fmt
54 #endif
55 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
56
57 #define mlx4_ib_warn(ibdev, format, arg...) \
58 dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
59
60 enum {
61 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
62 MLX4_IB_MAX_HEADROOM = 2048
63 };
64
65 #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
66 #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
67
68 /*module param to indicate if SM assigns the alias_GUID*/
69 extern int mlx4_ib_sm_guid_assign;
70
71 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
72 #define MLX4_IB_UC_MAX_NUM_QPS 256
73 struct mlx4_ib_ucontext {
74 struct ib_ucontext ibucontext;
75 struct mlx4_uar uar;
76 struct list_head db_page_list;
77 struct mutex db_page_mutex;
78 };
79
80 struct mlx4_ib_pd {
81 struct ib_pd ibpd;
82 u32 pdn;
83 };
84
85 struct mlx4_ib_xrcd {
86 struct ib_xrcd ibxrcd;
87 u32 xrcdn;
88 struct ib_pd *pd;
89 struct ib_cq *cq;
90 };
91
92 struct mlx4_ib_cq_buf {
93 struct mlx4_buf buf;
94 struct mlx4_mtt mtt;
95 int entry_size;
96 };
97
98 struct mlx4_ib_cq_resize {
99 struct mlx4_ib_cq_buf buf;
100 int cqe;
101 };
102
103 struct mlx4_ib_cq {
104 struct ib_cq ibcq;
105 struct mlx4_cq mcq;
106 struct mlx4_ib_cq_buf buf;
107 struct mlx4_ib_cq_resize *resize_buf;
108 struct mlx4_db db;
109 spinlock_t lock;
110 struct mutex resize_mutex;
111 struct ib_umem *umem;
112 struct ib_umem *resize_umem;
113 };
114
115 struct mlx4_ib_mr {
116 struct ib_mr ibmr;
117 struct mlx4_mr mmr;
118 struct ib_umem *umem;
119 };
120
121 struct mlx4_ib_mw {
122 struct ib_mw ibmw;
123 struct mlx4_mw mmw;
124 };
125
126 struct mlx4_ib_fast_reg_page_list {
127 struct ib_fast_reg_page_list ibfrpl;
128 __be64 *mapped_page_list;
129 dma_addr_t map;
130 };
131
132 struct mlx4_ib_fmr {
133 struct ib_fmr ibfmr;
134 struct mlx4_fmr mfmr;
135 };
136
137 struct mlx4_ib_flow {
138 struct ib_flow ibflow;
139 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
140 u64 reg_id[2];
141 };
142
143 struct mlx4_ib_wq {
144 u64 *wrid;
145 spinlock_t lock;
146 int wqe_cnt;
147 int max_post;
148 int max_gs;
149 int offset;
150 int wqe_shift;
151 unsigned head;
152 unsigned tail;
153 };
154
155 enum mlx4_ib_qp_flags {
156 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
157 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
158 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
159 MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
160 MLX4_IB_SRIOV_SQP = 1 << 31,
161 };
162
163 struct mlx4_ib_gid_entry {
164 struct list_head list;
165 union ib_gid gid;
166 int added;
167 u8 port;
168 };
169
170 enum mlx4_ib_qp_type {
171 /*
172 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
173 * here (and in that order) since the MAD layer uses them as
174 * indices into a 2-entry table.
175 */
176 MLX4_IB_QPT_SMI = IB_QPT_SMI,
177 MLX4_IB_QPT_GSI = IB_QPT_GSI,
178
179 MLX4_IB_QPT_RC = IB_QPT_RC,
180 MLX4_IB_QPT_UC = IB_QPT_UC,
181 MLX4_IB_QPT_UD = IB_QPT_UD,
182 MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
183 MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
184 MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
185 MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
186 MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
187
188 MLX4_IB_QPT_PROXY_SMI_OWNER = 1 << 16,
189 MLX4_IB_QPT_PROXY_SMI = 1 << 17,
190 MLX4_IB_QPT_PROXY_GSI = 1 << 18,
191 MLX4_IB_QPT_TUN_SMI_OWNER = 1 << 19,
192 MLX4_IB_QPT_TUN_SMI = 1 << 20,
193 MLX4_IB_QPT_TUN_GSI = 1 << 21,
194 };
195
196 #define MLX4_IB_QPT_ANY_SRIOV (MLX4_IB_QPT_PROXY_SMI_OWNER | \
197 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
198 MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
199
200 enum mlx4_ib_mad_ifc_flags {
201 MLX4_MAD_IFC_IGNORE_MKEY = 1,
202 MLX4_MAD_IFC_IGNORE_BKEY = 2,
203 MLX4_MAD_IFC_IGNORE_KEYS = (MLX4_MAD_IFC_IGNORE_MKEY |
204 MLX4_MAD_IFC_IGNORE_BKEY),
205 MLX4_MAD_IFC_NET_VIEW = 4,
206 };
207
208 enum {
209 MLX4_NUM_TUNNEL_BUFS = 256,
210 };
211
212 struct mlx4_ib_tunnel_header {
213 struct mlx4_av av;
214 __be32 remote_qpn;
215 __be32 qkey;
216 __be16 vlan;
217 u8 mac[6];
218 __be16 pkey_index;
219 u8 reserved[6];
220 };
221
222 struct mlx4_ib_buf {
223 void *addr;
224 dma_addr_t map;
225 };
226
227 struct mlx4_rcv_tunnel_hdr {
228 __be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
229 * 0x0 - no vlan was in the packet
230 * 0x01 - C-VLAN was in the packet */
231 u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
232 u8 reserved;
233 __be16 pkey_index;
234 __be16 sl_vid;
235 __be16 slid_mac_47_32;
236 __be32 mac_31_0;
237 };
238
239 struct mlx4_ib_proxy_sqp_hdr {
240 struct ib_grh grh;
241 struct mlx4_rcv_tunnel_hdr tun;
242 } __packed;
243
244 struct mlx4_ib_qp {
245 struct ib_qp ibqp;
246 struct mlx4_qp mqp;
247 struct mlx4_buf buf;
248
249 struct mlx4_db db;
250 struct mlx4_ib_wq rq;
251
252 u32 doorbell_qpn;
253 __be32 sq_signal_bits;
254 unsigned sq_next_wqe;
255 int sq_max_wqes_per_wr;
256 int sq_spare_wqes;
257 struct mlx4_ib_wq sq;
258
259 enum mlx4_ib_qp_type mlx4_ib_qp_type;
260 struct ib_umem *umem;
261 struct mlx4_mtt mtt;
262 int buf_size;
263 struct mutex mutex;
264 u16 xrcdn;
265 u32 flags;
266 u8 port;
267 u8 alt_port;
268 u8 atomic_rd_en;
269 u8 resp_depth;
270 u8 sq_no_prefetch;
271 u8 state;
272 int mlx_type;
273 struct list_head gid_list;
274 struct list_head steering_rules;
275 struct mlx4_ib_buf *sqp_proxy_rcv;
276 u64 reg_id;
277
278 };
279
280 struct mlx4_ib_srq {
281 struct ib_srq ibsrq;
282 struct mlx4_srq msrq;
283 struct mlx4_buf buf;
284 struct mlx4_db db;
285 u64 *wrid;
286 spinlock_t lock;
287 int head;
288 int tail;
289 u16 wqe_ctr;
290 struct ib_umem *umem;
291 struct mlx4_mtt mtt;
292 struct mutex mutex;
293 };
294
295 struct mlx4_ib_ah {
296 struct ib_ah ibah;
297 union mlx4_ext_av av;
298 };
299
300 /****************************************/
301 /* alias guid support */
302 /****************************************/
303 #define NUM_PORT_ALIAS_GUID 2
304 #define NUM_ALIAS_GUID_IN_REC 8
305 #define NUM_ALIAS_GUID_REC_IN_PORT 16
306 #define GUID_REC_SIZE 8
307 #define NUM_ALIAS_GUID_PER_PORT 128
308 #define MLX4_NOT_SET_GUID (0x00LL)
309 #define MLX4_GUID_FOR_DELETE_VAL (~(0x00LL))
310
311 enum mlx4_guid_alias_rec_status {
312 MLX4_GUID_INFO_STATUS_IDLE,
313 MLX4_GUID_INFO_STATUS_SET,
314 MLX4_GUID_INFO_STATUS_PENDING,
315 };
316
317 enum mlx4_guid_alias_rec_ownership {
318 MLX4_GUID_DRIVER_ASSIGN,
319 MLX4_GUID_SYSADMIN_ASSIGN,
320 MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
321 };
322
323 enum mlx4_guid_alias_rec_method {
324 MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
325 MLX4_GUID_INFO_RECORD_DELETE = IB_SA_METHOD_DELETE,
326 };
327
328 struct mlx4_sriov_alias_guid_info_rec_det {
329 u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
330 ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
331 enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
332 u8 method; /*set or delete*/
333 enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/
334 };
335
336 struct mlx4_sriov_alias_guid_port_rec_det {
337 struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
338 struct workqueue_struct *wq;
339 struct delayed_work alias_guid_work;
340 u8 port;
341 struct mlx4_sriov_alias_guid *parent;
342 struct list_head cb_list;
343 };
344
345 struct mlx4_sriov_alias_guid {
346 struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
347 spinlock_t ag_work_lock;
348 struct ib_sa_client *sa_client;
349 };
350
351 struct mlx4_ib_demux_work {
352 struct work_struct work;
353 struct mlx4_ib_dev *dev;
354 int slave;
355 int do_init;
356 u8 port;
357
358 };
359
360 struct mlx4_ib_tun_tx_buf {
361 struct mlx4_ib_buf buf;
362 struct ib_ah *ah;
363 };
364
365 struct mlx4_ib_demux_pv_qp {
366 struct ib_qp *qp;
367 enum ib_qp_type proxy_qpt;
368 struct mlx4_ib_buf *ring;
369 struct mlx4_ib_tun_tx_buf *tx_ring;
370 spinlock_t tx_lock;
371 unsigned tx_ix_head;
372 unsigned tx_ix_tail;
373 };
374
375 enum mlx4_ib_demux_pv_state {
376 DEMUX_PV_STATE_DOWN,
377 DEMUX_PV_STATE_STARTING,
378 DEMUX_PV_STATE_ACTIVE,
379 DEMUX_PV_STATE_DOWNING,
380 };
381
382 struct mlx4_ib_demux_pv_ctx {
383 int port;
384 int slave;
385 enum mlx4_ib_demux_pv_state state;
386 int has_smi;
387 struct ib_device *ib_dev;
388 struct ib_cq *cq;
389 struct ib_pd *pd;
390 struct ib_mr *mr;
391 struct work_struct work;
392 struct workqueue_struct *wq;
393 struct mlx4_ib_demux_pv_qp qp[2];
394 };
395
396 struct mlx4_ib_demux_ctx {
397 struct ib_device *ib_dev;
398 int port;
399 struct workqueue_struct *wq;
400 struct workqueue_struct *ud_wq;
401 spinlock_t ud_lock;
402 __be64 subnet_prefix;
403 __be64 guid_cache[128];
404 struct mlx4_ib_dev *dev;
405 /* the following lock protects both mcg_table and mcg_mgid0_list */
406 struct mutex mcg_table_lock;
407 struct rb_root mcg_table;
408 struct list_head mcg_mgid0_list;
409 struct workqueue_struct *mcg_wq;
410 struct mlx4_ib_demux_pv_ctx **tun;
411 atomic_t tid;
412 int flushing; /* flushing the work queue */
413 };
414
415 struct mlx4_ib_sriov {
416 struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
417 struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
418 /* when using this spinlock you should use "irq" because
419 * it may be called from interrupt context.*/
420 spinlock_t going_down_lock;
421 int is_going_down;
422
423 struct mlx4_sriov_alias_guid alias_guid;
424
425 /* CM paravirtualization fields */
426 struct list_head cm_list;
427 spinlock_t id_map_lock;
428 struct rb_root sl_id_map;
429 struct idr pv_id_table;
430 };
431
432 struct mlx4_ib_iboe {
433 spinlock_t lock;
434 struct net_device *netdevs[MLX4_MAX_PORTS];
435 struct net_device *masters[MLX4_MAX_PORTS];
436 struct notifier_block nb;
437 struct notifier_block nb_inet;
438 struct notifier_block nb_inet6;
439 union ib_gid gid_table[MLX4_MAX_PORTS][128];
440 };
441
442 struct pkey_mgt {
443 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
444 u16 phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
445 struct list_head pkey_port_list[MLX4_MFUNC_MAX];
446 struct kobject *device_parent[MLX4_MFUNC_MAX];
447 };
448
449 struct mlx4_ib_iov_sysfs_attr {
450 void *ctx;
451 struct kobject *kobj;
452 unsigned long data;
453 u32 entry_num;
454 char name[15];
455 struct device_attribute dentry;
456 struct device *dev;
457 };
458
459 struct mlx4_ib_iov_sysfs_attr_ar {
460 struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
461 };
462
463 struct mlx4_ib_iov_port {
464 char name[100];
465 u8 num;
466 struct mlx4_ib_dev *dev;
467 struct list_head list;
468 struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
469 struct ib_port_attr attr;
470 struct kobject *cur_port;
471 struct kobject *admin_alias_parent;
472 struct kobject *gids_parent;
473 struct kobject *pkeys_parent;
474 struct kobject *mcgs_parent;
475 struct mlx4_ib_iov_sysfs_attr mcg_dentry;
476 };
477
478 struct mlx4_ib_dev {
479 struct ib_device ib_dev;
480 struct mlx4_dev *dev;
481 int num_ports;
482 void __iomem *uar_map;
483
484 struct mlx4_uar priv_uar;
485 u32 priv_pdn;
486 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
487
488 struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
489 struct ib_ah *sm_ah[MLX4_MAX_PORTS];
490 spinlock_t sm_lock;
491 struct mlx4_ib_sriov sriov;
492
493 struct mutex cap_mask_mutex;
494 bool ib_active;
495 struct mlx4_ib_iboe iboe;
496 int counters[MLX4_MAX_PORTS];
497 int *eq_table;
498 int eq_added;
499 struct kobject *iov_parent;
500 struct kobject *ports_parent;
501 struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
502 struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
503 struct pkey_mgt pkeys;
504 unsigned long *ib_uc_qpns_bitmap;
505 int steer_qpn_count;
506 int steer_qpn_base;
507 int steering_support;
508 };
509
510 struct ib_event_work {
511 struct work_struct work;
512 struct mlx4_ib_dev *ib_dev;
513 struct mlx4_eqe ib_eqe;
514 };
515
516 struct mlx4_ib_qp_tunnel_init_attr {
517 struct ib_qp_init_attr init_attr;
518 int slave;
519 enum ib_qp_type proxy_qp_type;
520 u8 port;
521 };
522
523 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
524 {
525 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
526 }
527
528 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
529 {
530 return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
531 }
532
533 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
534 {
535 return container_of(ibpd, struct mlx4_ib_pd, ibpd);
536 }
537
538 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
539 {
540 return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
541 }
542
543 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
544 {
545 return container_of(ibcq, struct mlx4_ib_cq, ibcq);
546 }
547
548 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
549 {
550 return container_of(mcq, struct mlx4_ib_cq, mcq);
551 }
552
553 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
554 {
555 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
556 }
557
558 static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
559 {
560 return container_of(ibmw, struct mlx4_ib_mw, ibmw);
561 }
562
563 static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
564 {
565 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
566 }
567
568 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
569 {
570 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
571 }
572
573 static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
574 {
575 return container_of(ibflow, struct mlx4_ib_flow, ibflow);
576 }
577
578 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
579 {
580 return container_of(ibqp, struct mlx4_ib_qp, ibqp);
581 }
582
583 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
584 {
585 return container_of(mqp, struct mlx4_ib_qp, mqp);
586 }
587
588 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
589 {
590 return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
591 }
592
593 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
594 {
595 return container_of(msrq, struct mlx4_ib_srq, msrq);
596 }
597
598 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
599 {
600 return container_of(ibah, struct mlx4_ib_ah, ibah);
601 }
602
603 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
604 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
605
606 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
607 struct mlx4_db *db);
608 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
609
610 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
611 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
612 struct ib_umem *umem);
613 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
614 u64 virt_addr, int access_flags,
615 struct ib_udata *udata);
616 int mlx4_ib_dereg_mr(struct ib_mr *mr);
617 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
618 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
619 struct ib_mw_bind *mw_bind);
620 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
621 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
622 int max_page_list_len);
623 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
624 int page_list_len);
625 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
626
627 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
628 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
629 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
630 struct ib_ucontext *context,
631 struct ib_udata *udata);
632 int mlx4_ib_destroy_cq(struct ib_cq *cq);
633 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
634 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
635 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
636 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
637
638 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
639 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
640 int mlx4_ib_destroy_ah(struct ib_ah *ah);
641
642 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
643 struct ib_srq_init_attr *init_attr,
644 struct ib_udata *udata);
645 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
646 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
647 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
648 int mlx4_ib_destroy_srq(struct ib_srq *srq);
649 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
650 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
651 struct ib_recv_wr **bad_wr);
652
653 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
654 struct ib_qp_init_attr *init_attr,
655 struct ib_udata *udata);
656 int mlx4_ib_destroy_qp(struct ib_qp *qp);
657 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
658 int attr_mask, struct ib_udata *udata);
659 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
660 struct ib_qp_init_attr *qp_init_attr);
661 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
662 struct ib_send_wr **bad_wr);
663 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
664 struct ib_recv_wr **bad_wr);
665
666 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
667 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
668 void *in_mad, void *response_mad);
669 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
670 struct ib_wc *in_wc, struct ib_grh *in_grh,
671 struct ib_mad *in_mad, struct ib_mad *out_mad);
672 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
673 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
674
675 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
676 struct ib_fmr_attr *fmr_attr);
677 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
678 u64 iova);
679 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
680 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
681 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
682 struct ib_port_attr *props, int netw_view);
683 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
684 u16 *pkey, int netw_view);
685
686 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
687 union ib_gid *gid, int netw_view);
688
689 static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
690 {
691 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
692
693 if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
694 return true;
695
696 return !!(ah->av.ib.g_slid & 0x80);
697 }
698
699 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
700 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
701 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
702 int mlx4_ib_mcg_init(void);
703 void mlx4_ib_mcg_destroy(void);
704
705 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
706
707 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
708 struct ib_sa_mad *sa_mad);
709 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
710 struct ib_sa_mad *mad);
711
712 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
713 union ib_gid *gid);
714
715 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
716 enum ib_event_type type);
717
718 void mlx4_ib_tunnels_update_work(struct work_struct *work);
719
720 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
721 enum ib_qp_type qpt, struct ib_wc *wc,
722 struct ib_grh *grh, struct ib_mad *mad);
723 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
724 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
725 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
726 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
727
728 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
729 struct ib_mad *mad);
730
731 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
732 struct ib_mad *mad);
733
734 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
735 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
736
737 /* alias guid support */
738 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
739 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
740 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
741 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
742
743 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
744 int block_num,
745 u8 port_num, u8 *p_data);
746
747 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
748 int block_num, u8 port_num,
749 u8 *p_data);
750
751 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
752 struct attribute *attr);
753 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
754 struct attribute *attr);
755 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
756
757 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
758
759 void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
760
761 __be64 mlx4_ib_gen_node_guid(void);
762
763 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
764 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
765 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
766 int is_attach);
767
768 #endif /* MLX4_IB_H */
This page took 0.074345 seconds and 6 git commands to generate.