IB/mlx4: Support the new memory registration API
[deliverable/linux.git] / drivers / infiniband / hw / mlx4 / mlx4_ib.h
1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef MLX4_IB_H
35 #define MLX4_IB_H
36
37 #include <linux/compiler.h>
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/idr.h>
41
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_umem.h>
44 #include <rdma/ib_mad.h>
45 #include <rdma/ib_sa.h>
46
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/doorbell.h>
49
50 #define MLX4_IB_DRV_NAME "mlx4_ib"
51
52 #ifdef pr_fmt
53 #undef pr_fmt
54 #endif
55 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
56
57 #define mlx4_ib_warn(ibdev, format, arg...) \
58 dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
59
60 enum {
61 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
62 MLX4_IB_MAX_HEADROOM = 2048
63 };
64
65 #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
66 #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
67
68 /*module param to indicate if SM assigns the alias_GUID*/
69 extern int mlx4_ib_sm_guid_assign;
70
71 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
72 #define MLX4_IB_UC_MAX_NUM_QPS 256
73
74 enum hw_bar_type {
75 HW_BAR_BF,
76 HW_BAR_DB,
77 HW_BAR_CLOCK,
78 HW_BAR_COUNT
79 };
80
81 struct mlx4_ib_vma_private_data {
82 struct vm_area_struct *vma;
83 };
84
85 struct mlx4_ib_ucontext {
86 struct ib_ucontext ibucontext;
87 struct mlx4_uar uar;
88 struct list_head db_page_list;
89 struct mutex db_page_mutex;
90 struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
91 };
92
93 struct mlx4_ib_pd {
94 struct ib_pd ibpd;
95 u32 pdn;
96 };
97
98 struct mlx4_ib_xrcd {
99 struct ib_xrcd ibxrcd;
100 u32 xrcdn;
101 struct ib_pd *pd;
102 struct ib_cq *cq;
103 };
104
105 struct mlx4_ib_cq_buf {
106 struct mlx4_buf buf;
107 struct mlx4_mtt mtt;
108 int entry_size;
109 };
110
111 struct mlx4_ib_cq_resize {
112 struct mlx4_ib_cq_buf buf;
113 int cqe;
114 };
115
116 struct mlx4_ib_cq {
117 struct ib_cq ibcq;
118 struct mlx4_cq mcq;
119 struct mlx4_ib_cq_buf buf;
120 struct mlx4_ib_cq_resize *resize_buf;
121 struct mlx4_db db;
122 spinlock_t lock;
123 struct mutex resize_mutex;
124 struct ib_umem *umem;
125 struct ib_umem *resize_umem;
126 int create_flags;
127 /* List of qps that it serves.*/
128 struct list_head send_qp_list;
129 struct list_head recv_qp_list;
130 };
131
132 #define MLX4_MR_PAGES_ALIGN 0x40
133
134 struct mlx4_ib_mr {
135 struct ib_mr ibmr;
136 __be64 *pages;
137 dma_addr_t page_map;
138 u32 npages;
139 u32 max_pages;
140 struct mlx4_mr mmr;
141 struct ib_umem *umem;
142 void *pages_alloc;
143 };
144
145 struct mlx4_ib_mw {
146 struct ib_mw ibmw;
147 struct mlx4_mw mmw;
148 };
149
150 struct mlx4_ib_fast_reg_page_list {
151 struct ib_fast_reg_page_list ibfrpl;
152 __be64 *mapped_page_list;
153 dma_addr_t map;
154 };
155
156 struct mlx4_ib_fmr {
157 struct ib_fmr ibfmr;
158 struct mlx4_fmr mfmr;
159 };
160
161 #define MAX_REGS_PER_FLOW 2
162
163 struct mlx4_flow_reg_id {
164 u64 id;
165 u64 mirror;
166 };
167
168 struct mlx4_ib_flow {
169 struct ib_flow ibflow;
170 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
171 struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
172 };
173
174 struct mlx4_ib_wq {
175 u64 *wrid;
176 spinlock_t lock;
177 int wqe_cnt;
178 int max_post;
179 int max_gs;
180 int offset;
181 int wqe_shift;
182 unsigned head;
183 unsigned tail;
184 };
185
186 enum mlx4_ib_qp_flags {
187 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
188 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
189 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
190 MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
191 MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
192 MLX4_IB_SRIOV_SQP = 1 << 31,
193 };
194
195 struct mlx4_ib_gid_entry {
196 struct list_head list;
197 union ib_gid gid;
198 int added;
199 u8 port;
200 };
201
202 enum mlx4_ib_qp_type {
203 /*
204 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
205 * here (and in that order) since the MAD layer uses them as
206 * indices into a 2-entry table.
207 */
208 MLX4_IB_QPT_SMI = IB_QPT_SMI,
209 MLX4_IB_QPT_GSI = IB_QPT_GSI,
210
211 MLX4_IB_QPT_RC = IB_QPT_RC,
212 MLX4_IB_QPT_UC = IB_QPT_UC,
213 MLX4_IB_QPT_UD = IB_QPT_UD,
214 MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
215 MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
216 MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
217 MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
218 MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
219
220 MLX4_IB_QPT_PROXY_SMI_OWNER = 1 << 16,
221 MLX4_IB_QPT_PROXY_SMI = 1 << 17,
222 MLX4_IB_QPT_PROXY_GSI = 1 << 18,
223 MLX4_IB_QPT_TUN_SMI_OWNER = 1 << 19,
224 MLX4_IB_QPT_TUN_SMI = 1 << 20,
225 MLX4_IB_QPT_TUN_GSI = 1 << 21,
226 };
227
228 #define MLX4_IB_QPT_ANY_SRIOV (MLX4_IB_QPT_PROXY_SMI_OWNER | \
229 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
230 MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
231
232 enum mlx4_ib_mad_ifc_flags {
233 MLX4_MAD_IFC_IGNORE_MKEY = 1,
234 MLX4_MAD_IFC_IGNORE_BKEY = 2,
235 MLX4_MAD_IFC_IGNORE_KEYS = (MLX4_MAD_IFC_IGNORE_MKEY |
236 MLX4_MAD_IFC_IGNORE_BKEY),
237 MLX4_MAD_IFC_NET_VIEW = 4,
238 };
239
240 enum {
241 MLX4_NUM_TUNNEL_BUFS = 256,
242 };
243
244 struct mlx4_ib_tunnel_header {
245 struct mlx4_av av;
246 __be32 remote_qpn;
247 __be32 qkey;
248 __be16 vlan;
249 u8 mac[6];
250 __be16 pkey_index;
251 u8 reserved[6];
252 };
253
254 struct mlx4_ib_buf {
255 void *addr;
256 dma_addr_t map;
257 };
258
259 struct mlx4_rcv_tunnel_hdr {
260 __be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
261 * 0x0 - no vlan was in the packet
262 * 0x01 - C-VLAN was in the packet */
263 u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
264 u8 reserved;
265 __be16 pkey_index;
266 __be16 sl_vid;
267 __be16 slid_mac_47_32;
268 __be32 mac_31_0;
269 };
270
271 struct mlx4_ib_proxy_sqp_hdr {
272 struct ib_grh grh;
273 struct mlx4_rcv_tunnel_hdr tun;
274 } __packed;
275
276 struct mlx4_roce_smac_vlan_info {
277 u64 smac;
278 int smac_index;
279 int smac_port;
280 u64 candidate_smac;
281 int candidate_smac_index;
282 int candidate_smac_port;
283 u16 vid;
284 int vlan_index;
285 int vlan_port;
286 u16 candidate_vid;
287 int candidate_vlan_index;
288 int candidate_vlan_port;
289 int update_vid;
290 };
291
292 struct mlx4_ib_qp {
293 struct ib_qp ibqp;
294 struct mlx4_qp mqp;
295 struct mlx4_buf buf;
296
297 struct mlx4_db db;
298 struct mlx4_ib_wq rq;
299
300 u32 doorbell_qpn;
301 __be32 sq_signal_bits;
302 unsigned sq_next_wqe;
303 int sq_max_wqes_per_wr;
304 int sq_spare_wqes;
305 struct mlx4_ib_wq sq;
306
307 enum mlx4_ib_qp_type mlx4_ib_qp_type;
308 struct ib_umem *umem;
309 struct mlx4_mtt mtt;
310 int buf_size;
311 struct mutex mutex;
312 u16 xrcdn;
313 u32 flags;
314 u8 port;
315 u8 alt_port;
316 u8 atomic_rd_en;
317 u8 resp_depth;
318 u8 sq_no_prefetch;
319 u8 state;
320 int mlx_type;
321 struct list_head gid_list;
322 struct list_head steering_rules;
323 struct mlx4_ib_buf *sqp_proxy_rcv;
324 struct mlx4_roce_smac_vlan_info pri;
325 struct mlx4_roce_smac_vlan_info alt;
326 u64 reg_id;
327 struct list_head qps_list;
328 struct list_head cq_recv_list;
329 struct list_head cq_send_list;
330 struct counter_index *counter_index;
331 };
332
333 struct mlx4_ib_srq {
334 struct ib_srq ibsrq;
335 struct mlx4_srq msrq;
336 struct mlx4_buf buf;
337 struct mlx4_db db;
338 u64 *wrid;
339 spinlock_t lock;
340 int head;
341 int tail;
342 u16 wqe_ctr;
343 struct ib_umem *umem;
344 struct mlx4_mtt mtt;
345 struct mutex mutex;
346 };
347
348 struct mlx4_ib_ah {
349 struct ib_ah ibah;
350 union mlx4_ext_av av;
351 };
352
353 /****************************************/
354 /* alias guid support */
355 /****************************************/
356 #define NUM_PORT_ALIAS_GUID 2
357 #define NUM_ALIAS_GUID_IN_REC 8
358 #define NUM_ALIAS_GUID_REC_IN_PORT 16
359 #define GUID_REC_SIZE 8
360 #define NUM_ALIAS_GUID_PER_PORT 128
361 #define MLX4_NOT_SET_GUID (0x00LL)
362 #define MLX4_GUID_FOR_DELETE_VAL (~(0x00LL))
363
364 enum mlx4_guid_alias_rec_status {
365 MLX4_GUID_INFO_STATUS_IDLE,
366 MLX4_GUID_INFO_STATUS_SET,
367 };
368
369 #define GUID_STATE_NEED_PORT_INIT 0x01
370
371 enum mlx4_guid_alias_rec_method {
372 MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
373 MLX4_GUID_INFO_RECORD_DELETE = IB_SA_METHOD_DELETE,
374 };
375
376 struct mlx4_sriov_alias_guid_info_rec_det {
377 u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
378 ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
379 enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
380 unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
381 u64 time_to_run;
382 };
383
384 struct mlx4_sriov_alias_guid_port_rec_det {
385 struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
386 struct workqueue_struct *wq;
387 struct delayed_work alias_guid_work;
388 u8 port;
389 u32 state_flags;
390 struct mlx4_sriov_alias_guid *parent;
391 struct list_head cb_list;
392 };
393
394 struct mlx4_sriov_alias_guid {
395 struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
396 spinlock_t ag_work_lock;
397 struct ib_sa_client *sa_client;
398 };
399
400 struct mlx4_ib_demux_work {
401 struct work_struct work;
402 struct mlx4_ib_dev *dev;
403 int slave;
404 int do_init;
405 u8 port;
406
407 };
408
409 struct mlx4_ib_tun_tx_buf {
410 struct mlx4_ib_buf buf;
411 struct ib_ah *ah;
412 };
413
414 struct mlx4_ib_demux_pv_qp {
415 struct ib_qp *qp;
416 enum ib_qp_type proxy_qpt;
417 struct mlx4_ib_buf *ring;
418 struct mlx4_ib_tun_tx_buf *tx_ring;
419 spinlock_t tx_lock;
420 unsigned tx_ix_head;
421 unsigned tx_ix_tail;
422 };
423
424 enum mlx4_ib_demux_pv_state {
425 DEMUX_PV_STATE_DOWN,
426 DEMUX_PV_STATE_STARTING,
427 DEMUX_PV_STATE_ACTIVE,
428 DEMUX_PV_STATE_DOWNING,
429 };
430
431 struct mlx4_ib_demux_pv_ctx {
432 int port;
433 int slave;
434 enum mlx4_ib_demux_pv_state state;
435 int has_smi;
436 struct ib_device *ib_dev;
437 struct ib_cq *cq;
438 struct ib_pd *pd;
439 struct work_struct work;
440 struct workqueue_struct *wq;
441 struct mlx4_ib_demux_pv_qp qp[2];
442 };
443
444 struct mlx4_ib_demux_ctx {
445 struct ib_device *ib_dev;
446 int port;
447 struct workqueue_struct *wq;
448 struct workqueue_struct *ud_wq;
449 spinlock_t ud_lock;
450 __be64 subnet_prefix;
451 __be64 guid_cache[128];
452 struct mlx4_ib_dev *dev;
453 /* the following lock protects both mcg_table and mcg_mgid0_list */
454 struct mutex mcg_table_lock;
455 struct rb_root mcg_table;
456 struct list_head mcg_mgid0_list;
457 struct workqueue_struct *mcg_wq;
458 struct mlx4_ib_demux_pv_ctx **tun;
459 atomic_t tid;
460 int flushing; /* flushing the work queue */
461 };
462
463 struct mlx4_ib_sriov {
464 struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
465 struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
466 /* when using this spinlock you should use "irq" because
467 * it may be called from interrupt context.*/
468 spinlock_t going_down_lock;
469 int is_going_down;
470
471 struct mlx4_sriov_alias_guid alias_guid;
472
473 /* CM paravirtualization fields */
474 struct list_head cm_list;
475 spinlock_t id_map_lock;
476 struct rb_root sl_id_map;
477 struct idr pv_id_table;
478 };
479
480 struct gid_cache_context {
481 int real_index;
482 int refcount;
483 };
484
485 struct gid_entry {
486 union ib_gid gid;
487 struct gid_cache_context *ctx;
488 };
489
490 struct mlx4_port_gid_table {
491 struct gid_entry gids[MLX4_MAX_PORT_GIDS];
492 };
493
494 struct mlx4_ib_iboe {
495 spinlock_t lock;
496 struct net_device *netdevs[MLX4_MAX_PORTS];
497 atomic64_t mac[MLX4_MAX_PORTS];
498 struct notifier_block nb;
499 struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
500 };
501
502 struct pkey_mgt {
503 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
504 u16 phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
505 struct list_head pkey_port_list[MLX4_MFUNC_MAX];
506 struct kobject *device_parent[MLX4_MFUNC_MAX];
507 };
508
509 struct mlx4_ib_iov_sysfs_attr {
510 void *ctx;
511 struct kobject *kobj;
512 unsigned long data;
513 u32 entry_num;
514 char name[15];
515 struct device_attribute dentry;
516 struct device *dev;
517 };
518
519 struct mlx4_ib_iov_sysfs_attr_ar {
520 struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
521 };
522
523 struct mlx4_ib_iov_port {
524 char name[100];
525 u8 num;
526 struct mlx4_ib_dev *dev;
527 struct list_head list;
528 struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
529 struct ib_port_attr attr;
530 struct kobject *cur_port;
531 struct kobject *admin_alias_parent;
532 struct kobject *gids_parent;
533 struct kobject *pkeys_parent;
534 struct kobject *mcgs_parent;
535 struct mlx4_ib_iov_sysfs_attr mcg_dentry;
536 };
537
538 struct counter_index {
539 struct list_head list;
540 u32 index;
541 u8 allocated;
542 };
543
544 struct mlx4_ib_counters {
545 struct list_head counters_list;
546 struct mutex mutex; /* mutex for accessing counters list */
547 u32 default_counter;
548 };
549
550 struct mlx4_ib_dev {
551 struct ib_device ib_dev;
552 struct mlx4_dev *dev;
553 int num_ports;
554 void __iomem *uar_map;
555
556 struct mlx4_uar priv_uar;
557 u32 priv_pdn;
558 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
559
560 struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
561 struct ib_ah *sm_ah[MLX4_MAX_PORTS];
562 spinlock_t sm_lock;
563 struct mlx4_ib_sriov sriov;
564
565 struct mutex cap_mask_mutex;
566 bool ib_active;
567 struct mlx4_ib_iboe iboe;
568 struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
569 int *eq_table;
570 struct kobject *iov_parent;
571 struct kobject *ports_parent;
572 struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
573 struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
574 struct pkey_mgt pkeys;
575 unsigned long *ib_uc_qpns_bitmap;
576 int steer_qpn_count;
577 int steer_qpn_base;
578 int steering_support;
579 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
580 /* lock when destroying qp1_proxy and getting netdev events */
581 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
582 u8 bond_next_port;
583 /* protect resources needed as part of reset flow */
584 spinlock_t reset_flow_resource_lock;
585 struct list_head qp_list;
586 };
587
588 struct ib_event_work {
589 struct work_struct work;
590 struct mlx4_ib_dev *ib_dev;
591 struct mlx4_eqe ib_eqe;
592 };
593
594 struct mlx4_ib_qp_tunnel_init_attr {
595 struct ib_qp_init_attr init_attr;
596 int slave;
597 enum ib_qp_type proxy_qp_type;
598 u8 port;
599 };
600
601 struct mlx4_uverbs_ex_query_device {
602 __u32 comp_mask;
603 __u32 reserved;
604 };
605
606 enum query_device_resp_mask {
607 QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
608 };
609
610 struct mlx4_uverbs_ex_query_device_resp {
611 __u32 comp_mask;
612 __u32 response_length;
613 __u64 hca_core_clock_offset;
614 };
615
616 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
617 {
618 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
619 }
620
621 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
622 {
623 return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
624 }
625
626 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
627 {
628 return container_of(ibpd, struct mlx4_ib_pd, ibpd);
629 }
630
631 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
632 {
633 return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
634 }
635
636 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
637 {
638 return container_of(ibcq, struct mlx4_ib_cq, ibcq);
639 }
640
641 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
642 {
643 return container_of(mcq, struct mlx4_ib_cq, mcq);
644 }
645
646 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
647 {
648 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
649 }
650
651 static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
652 {
653 return container_of(ibmw, struct mlx4_ib_mw, ibmw);
654 }
655
656 static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
657 {
658 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
659 }
660
661 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
662 {
663 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
664 }
665
666 static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
667 {
668 return container_of(ibflow, struct mlx4_ib_flow, ibflow);
669 }
670
671 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
672 {
673 return container_of(ibqp, struct mlx4_ib_qp, ibqp);
674 }
675
676 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
677 {
678 return container_of(mqp, struct mlx4_ib_qp, mqp);
679 }
680
681 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
682 {
683 return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
684 }
685
686 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
687 {
688 return container_of(msrq, struct mlx4_ib_srq, msrq);
689 }
690
691 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
692 {
693 return container_of(ibah, struct mlx4_ib_ah, ibah);
694 }
695
696 static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
697 {
698 dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
699
700 return dev->bond_next_port + 1;
701 }
702
703 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
704 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
705
706 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
707 struct mlx4_db *db);
708 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
709
710 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
711 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
712 struct ib_umem *umem);
713 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
714 u64 virt_addr, int access_flags,
715 struct ib_udata *udata);
716 int mlx4_ib_dereg_mr(struct ib_mr *mr);
717 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
718 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
719 struct ib_mw_bind *mw_bind);
720 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
721 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
722 enum ib_mr_type mr_type,
723 u32 max_num_sg);
724 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
725 struct scatterlist *sg,
726 int sg_nents);
727 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
728 int page_list_len);
729 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
730
731 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
732 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
733 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
734 const struct ib_cq_init_attr *attr,
735 struct ib_ucontext *context,
736 struct ib_udata *udata);
737 int mlx4_ib_destroy_cq(struct ib_cq *cq);
738 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
739 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
740 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
741 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
742
743 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
744 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
745 int mlx4_ib_destroy_ah(struct ib_ah *ah);
746
747 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
748 struct ib_srq_init_attr *init_attr,
749 struct ib_udata *udata);
750 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
751 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
752 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
753 int mlx4_ib_destroy_srq(struct ib_srq *srq);
754 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
755 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
756 struct ib_recv_wr **bad_wr);
757
758 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
759 struct ib_qp_init_attr *init_attr,
760 struct ib_udata *udata);
761 int mlx4_ib_destroy_qp(struct ib_qp *qp);
762 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
763 int attr_mask, struct ib_udata *udata);
764 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
765 struct ib_qp_init_attr *qp_init_attr);
766 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
767 struct ib_send_wr **bad_wr);
768 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
769 struct ib_recv_wr **bad_wr);
770
771 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
772 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
773 const void *in_mad, void *response_mad);
774 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
775 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
776 const struct ib_mad_hdr *in, size_t in_mad_size,
777 struct ib_mad_hdr *out, size_t *out_mad_size,
778 u16 *out_mad_pkey_index);
779 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
780 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
781
782 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
783 struct ib_fmr_attr *fmr_attr);
784 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
785 u64 iova);
786 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
787 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
788 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
789 struct ib_port_attr *props, int netw_view);
790 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
791 u16 *pkey, int netw_view);
792
793 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
794 union ib_gid *gid, int netw_view);
795
796 static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
797 {
798 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
799
800 if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
801 return true;
802
803 return !!(ah->av.ib.g_slid & 0x80);
804 }
805
806 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
807 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
808 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
809 int mlx4_ib_mcg_init(void);
810 void mlx4_ib_mcg_destroy(void);
811
812 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
813
814 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
815 struct ib_sa_mad *sa_mad);
816 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
817 struct ib_sa_mad *mad);
818
819 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
820 union ib_gid *gid);
821
822 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
823 enum ib_event_type type);
824
825 void mlx4_ib_tunnels_update_work(struct work_struct *work);
826
827 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
828 enum ib_qp_type qpt, struct ib_wc *wc,
829 struct ib_grh *grh, struct ib_mad *mad);
830
831 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
832 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
833 u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
834 u16 vlan_id, struct ib_mad *mad);
835
836 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
837
838 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
839 struct ib_mad *mad);
840
841 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
842 struct ib_mad *mad);
843
844 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
845 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
846
847 /* alias guid support */
848 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
849 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
850 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
851 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
852
853 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
854 int block_num,
855 u8 port_num, u8 *p_data);
856
857 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
858 int block_num, u8 port_num,
859 u8 *p_data);
860
861 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
862 struct attribute *attr);
863 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
864 struct attribute *attr);
865 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
866 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
867 int port, int slave_init);
868
869 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
870
871 void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
872
873 __be64 mlx4_ib_gen_node_guid(void);
874
875 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
876 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
877 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
878 int is_attach);
879 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
880 u64 start, u64 length, u64 virt_addr,
881 int mr_access_flags, struct ib_pd *pd,
882 struct ib_udata *udata);
883 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
884 u8 port_num, int index);
885
886 #endif /* MLX4_IB_H */
This page took 0.074638 seconds and 5 git commands to generate.