IB: new common API for draining queues
[deliverable/linux.git] / include / rdma / ib_verbs.h
1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/socket.h>
52 #include <linux/irq_poll.h>
53 #include <uapi/linux/if_ether.h>
54 #include <net/ipv6.h>
55 #include <net/ip.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58
59 #include <linux/atomic.h>
60 #include <linux/mmu_notifier.h>
61 #include <asm/uaccess.h>
62
63 extern struct workqueue_struct *ib_wq;
64 extern struct workqueue_struct *ib_comp_wq;
65
66 union ib_gid {
67 u8 raw[16];
68 struct {
69 __be64 subnet_prefix;
70 __be64 interface_id;
71 } global;
72 };
73
74 extern union ib_gid zgid;
75
76 enum ib_gid_type {
77 /* If link layer is Ethernet, this is RoCE V1 */
78 IB_GID_TYPE_IB = 0,
79 IB_GID_TYPE_ROCE = 0,
80 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
81 IB_GID_TYPE_SIZE
82 };
83
84 #define ROCE_V2_UDP_DPORT 4791
85 struct ib_gid_attr {
86 enum ib_gid_type gid_type;
87 struct net_device *ndev;
88 };
89
90 enum rdma_node_type {
91 /* IB values map to NodeInfo:NodeType. */
92 RDMA_NODE_IB_CA = 1,
93 RDMA_NODE_IB_SWITCH,
94 RDMA_NODE_IB_ROUTER,
95 RDMA_NODE_RNIC,
96 RDMA_NODE_USNIC,
97 RDMA_NODE_USNIC_UDP,
98 };
99
100 enum rdma_transport_type {
101 RDMA_TRANSPORT_IB,
102 RDMA_TRANSPORT_IWARP,
103 RDMA_TRANSPORT_USNIC,
104 RDMA_TRANSPORT_USNIC_UDP
105 };
106
107 enum rdma_protocol_type {
108 RDMA_PROTOCOL_IB,
109 RDMA_PROTOCOL_IBOE,
110 RDMA_PROTOCOL_IWARP,
111 RDMA_PROTOCOL_USNIC_UDP
112 };
113
114 __attribute_const__ enum rdma_transport_type
115 rdma_node_get_transport(enum rdma_node_type node_type);
116
117 enum rdma_network_type {
118 RDMA_NETWORK_IB,
119 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
120 RDMA_NETWORK_IPV4,
121 RDMA_NETWORK_IPV6
122 };
123
124 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
125 {
126 if (network_type == RDMA_NETWORK_IPV4 ||
127 network_type == RDMA_NETWORK_IPV6)
128 return IB_GID_TYPE_ROCE_UDP_ENCAP;
129
130 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
131 return IB_GID_TYPE_IB;
132 }
133
134 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
135 union ib_gid *gid)
136 {
137 if (gid_type == IB_GID_TYPE_IB)
138 return RDMA_NETWORK_IB;
139
140 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
141 return RDMA_NETWORK_IPV4;
142 else
143 return RDMA_NETWORK_IPV6;
144 }
145
146 enum rdma_link_layer {
147 IB_LINK_LAYER_UNSPECIFIED,
148 IB_LINK_LAYER_INFINIBAND,
149 IB_LINK_LAYER_ETHERNET,
150 };
151
152 enum ib_device_cap_flags {
153 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
154 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
155 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
156 IB_DEVICE_RAW_MULTI = (1 << 3),
157 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
158 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
159 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
160 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
161 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
162 IB_DEVICE_INIT_TYPE = (1 << 9),
163 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
164 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
165 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
166 IB_DEVICE_SRQ_RESIZE = (1 << 13),
167 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
168
169 /*
170 * This device supports a per-device lkey or stag that can be
171 * used without performing a memory registration for the local
172 * memory. Note that ULPs should never check this flag, but
173 * instead of use the local_dma_lkey flag in the ib_pd structure,
174 * which will always contain a usable lkey.
175 */
176 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
177 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
178 IB_DEVICE_MEM_WINDOW = (1 << 17),
179 /*
180 * Devices should set IB_DEVICE_UD_IP_SUM if they support
181 * insertion of UDP and TCP checksum on outgoing UD IPoIB
182 * messages and can verify the validity of checksum for
183 * incoming messages. Setting this flag implies that the
184 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
185 */
186 IB_DEVICE_UD_IP_CSUM = (1 << 18),
187 IB_DEVICE_UD_TSO = (1 << 19),
188 IB_DEVICE_XRC = (1 << 20),
189
190 /*
191 * This device supports the IB "base memory management extension",
192 * which includes support for fast registrations (IB_WR_REG_MR,
193 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
194 * also be set by any iWarp device which must support FRs to comply
195 * to the iWarp verbs spec. iWarp devices also support the
196 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
197 * stag.
198 */
199 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
200 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
201 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
202 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
203 IB_DEVICE_RC_IP_CSUM = (1 << 25),
204 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
205 /*
206 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
207 * support execution of WQEs that involve synchronization
208 * of I/O operations with single completion queue managed
209 * by hardware.
210 */
211 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
212 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
213 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
214 IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
215 };
216
217 enum ib_signature_prot_cap {
218 IB_PROT_T10DIF_TYPE_1 = 1,
219 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
220 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
221 };
222
223 enum ib_signature_guard_cap {
224 IB_GUARD_T10DIF_CRC = 1,
225 IB_GUARD_T10DIF_CSUM = 1 << 1,
226 };
227
228 enum ib_atomic_cap {
229 IB_ATOMIC_NONE,
230 IB_ATOMIC_HCA,
231 IB_ATOMIC_GLOB
232 };
233
234 enum ib_odp_general_cap_bits {
235 IB_ODP_SUPPORT = 1 << 0,
236 };
237
238 enum ib_odp_transport_cap_bits {
239 IB_ODP_SUPPORT_SEND = 1 << 0,
240 IB_ODP_SUPPORT_RECV = 1 << 1,
241 IB_ODP_SUPPORT_WRITE = 1 << 2,
242 IB_ODP_SUPPORT_READ = 1 << 3,
243 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
244 };
245
246 struct ib_odp_caps {
247 uint64_t general_caps;
248 struct {
249 uint32_t rc_odp_caps;
250 uint32_t uc_odp_caps;
251 uint32_t ud_odp_caps;
252 } per_transport_caps;
253 };
254
255 enum ib_cq_creation_flags {
256 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
257 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
258 };
259
260 struct ib_cq_init_attr {
261 unsigned int cqe;
262 int comp_vector;
263 u32 flags;
264 };
265
266 struct ib_device_attr {
267 u64 fw_ver;
268 __be64 sys_image_guid;
269 u64 max_mr_size;
270 u64 page_size_cap;
271 u32 vendor_id;
272 u32 vendor_part_id;
273 u32 hw_ver;
274 int max_qp;
275 int max_qp_wr;
276 int device_cap_flags;
277 int max_sge;
278 int max_sge_rd;
279 int max_cq;
280 int max_cqe;
281 int max_mr;
282 int max_pd;
283 int max_qp_rd_atom;
284 int max_ee_rd_atom;
285 int max_res_rd_atom;
286 int max_qp_init_rd_atom;
287 int max_ee_init_rd_atom;
288 enum ib_atomic_cap atomic_cap;
289 enum ib_atomic_cap masked_atomic_cap;
290 int max_ee;
291 int max_rdd;
292 int max_mw;
293 int max_raw_ipv6_qp;
294 int max_raw_ethy_qp;
295 int max_mcast_grp;
296 int max_mcast_qp_attach;
297 int max_total_mcast_qp_attach;
298 int max_ah;
299 int max_fmr;
300 int max_map_per_fmr;
301 int max_srq;
302 int max_srq_wr;
303 int max_srq_sge;
304 unsigned int max_fast_reg_page_list_len;
305 u16 max_pkeys;
306 u8 local_ca_ack_delay;
307 int sig_prot_cap;
308 int sig_guard_cap;
309 struct ib_odp_caps odp_caps;
310 uint64_t timestamp_mask;
311 uint64_t hca_core_clock; /* in KHZ */
312 };
313
314 enum ib_mtu {
315 IB_MTU_256 = 1,
316 IB_MTU_512 = 2,
317 IB_MTU_1024 = 3,
318 IB_MTU_2048 = 4,
319 IB_MTU_4096 = 5
320 };
321
322 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
323 {
324 switch (mtu) {
325 case IB_MTU_256: return 256;
326 case IB_MTU_512: return 512;
327 case IB_MTU_1024: return 1024;
328 case IB_MTU_2048: return 2048;
329 case IB_MTU_4096: return 4096;
330 default: return -1;
331 }
332 }
333
334 enum ib_port_state {
335 IB_PORT_NOP = 0,
336 IB_PORT_DOWN = 1,
337 IB_PORT_INIT = 2,
338 IB_PORT_ARMED = 3,
339 IB_PORT_ACTIVE = 4,
340 IB_PORT_ACTIVE_DEFER = 5
341 };
342
343 enum ib_port_cap_flags {
344 IB_PORT_SM = 1 << 1,
345 IB_PORT_NOTICE_SUP = 1 << 2,
346 IB_PORT_TRAP_SUP = 1 << 3,
347 IB_PORT_OPT_IPD_SUP = 1 << 4,
348 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
349 IB_PORT_SL_MAP_SUP = 1 << 6,
350 IB_PORT_MKEY_NVRAM = 1 << 7,
351 IB_PORT_PKEY_NVRAM = 1 << 8,
352 IB_PORT_LED_INFO_SUP = 1 << 9,
353 IB_PORT_SM_DISABLED = 1 << 10,
354 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
355 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
356 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
357 IB_PORT_CM_SUP = 1 << 16,
358 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
359 IB_PORT_REINIT_SUP = 1 << 18,
360 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
361 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
362 IB_PORT_DR_NOTICE_SUP = 1 << 21,
363 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
364 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
365 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
366 IB_PORT_CLIENT_REG_SUP = 1 << 25,
367 IB_PORT_IP_BASED_GIDS = 1 << 26,
368 };
369
370 enum ib_port_width {
371 IB_WIDTH_1X = 1,
372 IB_WIDTH_4X = 2,
373 IB_WIDTH_8X = 4,
374 IB_WIDTH_12X = 8
375 };
376
377 static inline int ib_width_enum_to_int(enum ib_port_width width)
378 {
379 switch (width) {
380 case IB_WIDTH_1X: return 1;
381 case IB_WIDTH_4X: return 4;
382 case IB_WIDTH_8X: return 8;
383 case IB_WIDTH_12X: return 12;
384 default: return -1;
385 }
386 }
387
388 enum ib_port_speed {
389 IB_SPEED_SDR = 1,
390 IB_SPEED_DDR = 2,
391 IB_SPEED_QDR = 4,
392 IB_SPEED_FDR10 = 8,
393 IB_SPEED_FDR = 16,
394 IB_SPEED_EDR = 32
395 };
396
397 struct ib_protocol_stats {
398 /* TBD... */
399 };
400
401 struct iw_protocol_stats {
402 u64 ipInReceives;
403 u64 ipInHdrErrors;
404 u64 ipInTooBigErrors;
405 u64 ipInNoRoutes;
406 u64 ipInAddrErrors;
407 u64 ipInUnknownProtos;
408 u64 ipInTruncatedPkts;
409 u64 ipInDiscards;
410 u64 ipInDelivers;
411 u64 ipOutForwDatagrams;
412 u64 ipOutRequests;
413 u64 ipOutDiscards;
414 u64 ipOutNoRoutes;
415 u64 ipReasmTimeout;
416 u64 ipReasmReqds;
417 u64 ipReasmOKs;
418 u64 ipReasmFails;
419 u64 ipFragOKs;
420 u64 ipFragFails;
421 u64 ipFragCreates;
422 u64 ipInMcastPkts;
423 u64 ipOutMcastPkts;
424 u64 ipInBcastPkts;
425 u64 ipOutBcastPkts;
426
427 u64 tcpRtoAlgorithm;
428 u64 tcpRtoMin;
429 u64 tcpRtoMax;
430 u64 tcpMaxConn;
431 u64 tcpActiveOpens;
432 u64 tcpPassiveOpens;
433 u64 tcpAttemptFails;
434 u64 tcpEstabResets;
435 u64 tcpCurrEstab;
436 u64 tcpInSegs;
437 u64 tcpOutSegs;
438 u64 tcpRetransSegs;
439 u64 tcpInErrs;
440 u64 tcpOutRsts;
441 };
442
443 union rdma_protocol_stats {
444 struct ib_protocol_stats ib;
445 struct iw_protocol_stats iw;
446 };
447
448 /* Define bits for the various functionality this port needs to be supported by
449 * the core.
450 */
451 /* Management 0x00000FFF */
452 #define RDMA_CORE_CAP_IB_MAD 0x00000001
453 #define RDMA_CORE_CAP_IB_SMI 0x00000002
454 #define RDMA_CORE_CAP_IB_CM 0x00000004
455 #define RDMA_CORE_CAP_IW_CM 0x00000008
456 #define RDMA_CORE_CAP_IB_SA 0x00000010
457 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
458
459 /* Address format 0x000FF000 */
460 #define RDMA_CORE_CAP_AF_IB 0x00001000
461 #define RDMA_CORE_CAP_ETH_AH 0x00002000
462
463 /* Protocol 0xFFF00000 */
464 #define RDMA_CORE_CAP_PROT_IB 0x00100000
465 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
466 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
467 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
468
469 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
470 | RDMA_CORE_CAP_IB_MAD \
471 | RDMA_CORE_CAP_IB_SMI \
472 | RDMA_CORE_CAP_IB_CM \
473 | RDMA_CORE_CAP_IB_SA \
474 | RDMA_CORE_CAP_AF_IB)
475 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
476 | RDMA_CORE_CAP_IB_MAD \
477 | RDMA_CORE_CAP_IB_CM \
478 | RDMA_CORE_CAP_AF_IB \
479 | RDMA_CORE_CAP_ETH_AH)
480 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
481 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
482 | RDMA_CORE_CAP_IB_MAD \
483 | RDMA_CORE_CAP_IB_CM \
484 | RDMA_CORE_CAP_AF_IB \
485 | RDMA_CORE_CAP_ETH_AH)
486 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
487 | RDMA_CORE_CAP_IW_CM)
488 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
489 | RDMA_CORE_CAP_OPA_MAD)
490
491 struct ib_port_attr {
492 enum ib_port_state state;
493 enum ib_mtu max_mtu;
494 enum ib_mtu active_mtu;
495 int gid_tbl_len;
496 u32 port_cap_flags;
497 u32 max_msg_sz;
498 u32 bad_pkey_cntr;
499 u32 qkey_viol_cntr;
500 u16 pkey_tbl_len;
501 u16 lid;
502 u16 sm_lid;
503 u8 lmc;
504 u8 max_vl_num;
505 u8 sm_sl;
506 u8 subnet_timeout;
507 u8 init_type_reply;
508 u8 active_width;
509 u8 active_speed;
510 u8 phys_state;
511 };
512
513 enum ib_device_modify_flags {
514 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
515 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
516 };
517
518 struct ib_device_modify {
519 u64 sys_image_guid;
520 char node_desc[64];
521 };
522
523 enum ib_port_modify_flags {
524 IB_PORT_SHUTDOWN = 1,
525 IB_PORT_INIT_TYPE = (1<<2),
526 IB_PORT_RESET_QKEY_CNTR = (1<<3)
527 };
528
529 struct ib_port_modify {
530 u32 set_port_cap_mask;
531 u32 clr_port_cap_mask;
532 u8 init_type;
533 };
534
535 enum ib_event_type {
536 IB_EVENT_CQ_ERR,
537 IB_EVENT_QP_FATAL,
538 IB_EVENT_QP_REQ_ERR,
539 IB_EVENT_QP_ACCESS_ERR,
540 IB_EVENT_COMM_EST,
541 IB_EVENT_SQ_DRAINED,
542 IB_EVENT_PATH_MIG,
543 IB_EVENT_PATH_MIG_ERR,
544 IB_EVENT_DEVICE_FATAL,
545 IB_EVENT_PORT_ACTIVE,
546 IB_EVENT_PORT_ERR,
547 IB_EVENT_LID_CHANGE,
548 IB_EVENT_PKEY_CHANGE,
549 IB_EVENT_SM_CHANGE,
550 IB_EVENT_SRQ_ERR,
551 IB_EVENT_SRQ_LIMIT_REACHED,
552 IB_EVENT_QP_LAST_WQE_REACHED,
553 IB_EVENT_CLIENT_REREGISTER,
554 IB_EVENT_GID_CHANGE,
555 };
556
557 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
558
559 struct ib_event {
560 struct ib_device *device;
561 union {
562 struct ib_cq *cq;
563 struct ib_qp *qp;
564 struct ib_srq *srq;
565 u8 port_num;
566 } element;
567 enum ib_event_type event;
568 };
569
570 struct ib_event_handler {
571 struct ib_device *device;
572 void (*handler)(struct ib_event_handler *, struct ib_event *);
573 struct list_head list;
574 };
575
576 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
577 do { \
578 (_ptr)->device = _device; \
579 (_ptr)->handler = _handler; \
580 INIT_LIST_HEAD(&(_ptr)->list); \
581 } while (0)
582
583 struct ib_global_route {
584 union ib_gid dgid;
585 u32 flow_label;
586 u8 sgid_index;
587 u8 hop_limit;
588 u8 traffic_class;
589 };
590
591 struct ib_grh {
592 __be32 version_tclass_flow;
593 __be16 paylen;
594 u8 next_hdr;
595 u8 hop_limit;
596 union ib_gid sgid;
597 union ib_gid dgid;
598 };
599
600 union rdma_network_hdr {
601 struct ib_grh ibgrh;
602 struct {
603 /* The IB spec states that if it's IPv4, the header
604 * is located in the last 20 bytes of the header.
605 */
606 u8 reserved[20];
607 struct iphdr roce4grh;
608 };
609 };
610
611 enum {
612 IB_MULTICAST_QPN = 0xffffff
613 };
614
615 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
616
617 enum ib_ah_flags {
618 IB_AH_GRH = 1
619 };
620
621 enum ib_rate {
622 IB_RATE_PORT_CURRENT = 0,
623 IB_RATE_2_5_GBPS = 2,
624 IB_RATE_5_GBPS = 5,
625 IB_RATE_10_GBPS = 3,
626 IB_RATE_20_GBPS = 6,
627 IB_RATE_30_GBPS = 4,
628 IB_RATE_40_GBPS = 7,
629 IB_RATE_60_GBPS = 8,
630 IB_RATE_80_GBPS = 9,
631 IB_RATE_120_GBPS = 10,
632 IB_RATE_14_GBPS = 11,
633 IB_RATE_56_GBPS = 12,
634 IB_RATE_112_GBPS = 13,
635 IB_RATE_168_GBPS = 14,
636 IB_RATE_25_GBPS = 15,
637 IB_RATE_100_GBPS = 16,
638 IB_RATE_200_GBPS = 17,
639 IB_RATE_300_GBPS = 18
640 };
641
642 /**
643 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
644 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
645 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
646 * @rate: rate to convert.
647 */
648 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
649
650 /**
651 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
652 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
653 * @rate: rate to convert.
654 */
655 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
656
657
658 /**
659 * enum ib_mr_type - memory region type
660 * @IB_MR_TYPE_MEM_REG: memory region that is used for
661 * normal registration
662 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
663 * signature operations (data-integrity
664 * capable regions)
665 */
666 enum ib_mr_type {
667 IB_MR_TYPE_MEM_REG,
668 IB_MR_TYPE_SIGNATURE,
669 };
670
671 /**
672 * Signature types
673 * IB_SIG_TYPE_NONE: Unprotected.
674 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
675 */
676 enum ib_signature_type {
677 IB_SIG_TYPE_NONE,
678 IB_SIG_TYPE_T10_DIF,
679 };
680
681 /**
682 * Signature T10-DIF block-guard types
683 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
684 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
685 */
686 enum ib_t10_dif_bg_type {
687 IB_T10DIF_CRC,
688 IB_T10DIF_CSUM
689 };
690
691 /**
692 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
693 * domain.
694 * @bg_type: T10-DIF block guard type (CRC|CSUM)
695 * @pi_interval: protection information interval.
696 * @bg: seed of guard computation.
697 * @app_tag: application tag of guard block
698 * @ref_tag: initial guard block reference tag.
699 * @ref_remap: Indicate wethear the reftag increments each block
700 * @app_escape: Indicate to skip block check if apptag=0xffff
701 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
702 * @apptag_check_mask: check bitmask of application tag.
703 */
704 struct ib_t10_dif_domain {
705 enum ib_t10_dif_bg_type bg_type;
706 u16 pi_interval;
707 u16 bg;
708 u16 app_tag;
709 u32 ref_tag;
710 bool ref_remap;
711 bool app_escape;
712 bool ref_escape;
713 u16 apptag_check_mask;
714 };
715
716 /**
717 * struct ib_sig_domain - Parameters for signature domain
718 * @sig_type: specific signauture type
719 * @sig: union of all signature domain attributes that may
720 * be used to set domain layout.
721 */
722 struct ib_sig_domain {
723 enum ib_signature_type sig_type;
724 union {
725 struct ib_t10_dif_domain dif;
726 } sig;
727 };
728
729 /**
730 * struct ib_sig_attrs - Parameters for signature handover operation
731 * @check_mask: bitmask for signature byte check (8 bytes)
732 * @mem: memory domain layout desciptor.
733 * @wire: wire domain layout desciptor.
734 */
735 struct ib_sig_attrs {
736 u8 check_mask;
737 struct ib_sig_domain mem;
738 struct ib_sig_domain wire;
739 };
740
741 enum ib_sig_err_type {
742 IB_SIG_BAD_GUARD,
743 IB_SIG_BAD_REFTAG,
744 IB_SIG_BAD_APPTAG,
745 };
746
747 /**
748 * struct ib_sig_err - signature error descriptor
749 */
750 struct ib_sig_err {
751 enum ib_sig_err_type err_type;
752 u32 expected;
753 u32 actual;
754 u64 sig_err_offset;
755 u32 key;
756 };
757
758 enum ib_mr_status_check {
759 IB_MR_CHECK_SIG_STATUS = 1,
760 };
761
762 /**
763 * struct ib_mr_status - Memory region status container
764 *
765 * @fail_status: Bitmask of MR checks status. For each
766 * failed check a corresponding status bit is set.
767 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
768 * failure.
769 */
770 struct ib_mr_status {
771 u32 fail_status;
772 struct ib_sig_err sig_err;
773 };
774
775 /**
776 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
777 * enum.
778 * @mult: multiple to convert.
779 */
780 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
781
782 struct ib_ah_attr {
783 struct ib_global_route grh;
784 u16 dlid;
785 u8 sl;
786 u8 src_path_bits;
787 u8 static_rate;
788 u8 ah_flags;
789 u8 port_num;
790 u8 dmac[ETH_ALEN];
791 };
792
793 enum ib_wc_status {
794 IB_WC_SUCCESS,
795 IB_WC_LOC_LEN_ERR,
796 IB_WC_LOC_QP_OP_ERR,
797 IB_WC_LOC_EEC_OP_ERR,
798 IB_WC_LOC_PROT_ERR,
799 IB_WC_WR_FLUSH_ERR,
800 IB_WC_MW_BIND_ERR,
801 IB_WC_BAD_RESP_ERR,
802 IB_WC_LOC_ACCESS_ERR,
803 IB_WC_REM_INV_REQ_ERR,
804 IB_WC_REM_ACCESS_ERR,
805 IB_WC_REM_OP_ERR,
806 IB_WC_RETRY_EXC_ERR,
807 IB_WC_RNR_RETRY_EXC_ERR,
808 IB_WC_LOC_RDD_VIOL_ERR,
809 IB_WC_REM_INV_RD_REQ_ERR,
810 IB_WC_REM_ABORT_ERR,
811 IB_WC_INV_EECN_ERR,
812 IB_WC_INV_EEC_STATE_ERR,
813 IB_WC_FATAL_ERR,
814 IB_WC_RESP_TIMEOUT_ERR,
815 IB_WC_GENERAL_ERR
816 };
817
818 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
819
820 enum ib_wc_opcode {
821 IB_WC_SEND,
822 IB_WC_RDMA_WRITE,
823 IB_WC_RDMA_READ,
824 IB_WC_COMP_SWAP,
825 IB_WC_FETCH_ADD,
826 IB_WC_LSO,
827 IB_WC_LOCAL_INV,
828 IB_WC_REG_MR,
829 IB_WC_MASKED_COMP_SWAP,
830 IB_WC_MASKED_FETCH_ADD,
831 /*
832 * Set value of IB_WC_RECV so consumers can test if a completion is a
833 * receive by testing (opcode & IB_WC_RECV).
834 */
835 IB_WC_RECV = 1 << 7,
836 IB_WC_RECV_RDMA_WITH_IMM
837 };
838
839 enum ib_wc_flags {
840 IB_WC_GRH = 1,
841 IB_WC_WITH_IMM = (1<<1),
842 IB_WC_WITH_INVALIDATE = (1<<2),
843 IB_WC_IP_CSUM_OK = (1<<3),
844 IB_WC_WITH_SMAC = (1<<4),
845 IB_WC_WITH_VLAN = (1<<5),
846 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
847 };
848
849 struct ib_wc {
850 union {
851 u64 wr_id;
852 struct ib_cqe *wr_cqe;
853 };
854 enum ib_wc_status status;
855 enum ib_wc_opcode opcode;
856 u32 vendor_err;
857 u32 byte_len;
858 struct ib_qp *qp;
859 union {
860 __be32 imm_data;
861 u32 invalidate_rkey;
862 } ex;
863 u32 src_qp;
864 int wc_flags;
865 u16 pkey_index;
866 u16 slid;
867 u8 sl;
868 u8 dlid_path_bits;
869 u8 port_num; /* valid only for DR SMPs on switches */
870 u8 smac[ETH_ALEN];
871 u16 vlan_id;
872 u8 network_hdr_type;
873 };
874
875 enum ib_cq_notify_flags {
876 IB_CQ_SOLICITED = 1 << 0,
877 IB_CQ_NEXT_COMP = 1 << 1,
878 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
879 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
880 };
881
882 enum ib_srq_type {
883 IB_SRQT_BASIC,
884 IB_SRQT_XRC
885 };
886
887 enum ib_srq_attr_mask {
888 IB_SRQ_MAX_WR = 1 << 0,
889 IB_SRQ_LIMIT = 1 << 1,
890 };
891
892 struct ib_srq_attr {
893 u32 max_wr;
894 u32 max_sge;
895 u32 srq_limit;
896 };
897
898 struct ib_srq_init_attr {
899 void (*event_handler)(struct ib_event *, void *);
900 void *srq_context;
901 struct ib_srq_attr attr;
902 enum ib_srq_type srq_type;
903
904 union {
905 struct {
906 struct ib_xrcd *xrcd;
907 struct ib_cq *cq;
908 } xrc;
909 } ext;
910 };
911
912 struct ib_qp_cap {
913 u32 max_send_wr;
914 u32 max_recv_wr;
915 u32 max_send_sge;
916 u32 max_recv_sge;
917 u32 max_inline_data;
918 };
919
920 enum ib_sig_type {
921 IB_SIGNAL_ALL_WR,
922 IB_SIGNAL_REQ_WR
923 };
924
925 enum ib_qp_type {
926 /*
927 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
928 * here (and in that order) since the MAD layer uses them as
929 * indices into a 2-entry table.
930 */
931 IB_QPT_SMI,
932 IB_QPT_GSI,
933
934 IB_QPT_RC,
935 IB_QPT_UC,
936 IB_QPT_UD,
937 IB_QPT_RAW_IPV6,
938 IB_QPT_RAW_ETHERTYPE,
939 IB_QPT_RAW_PACKET = 8,
940 IB_QPT_XRC_INI = 9,
941 IB_QPT_XRC_TGT,
942 IB_QPT_MAX,
943 /* Reserve a range for qp types internal to the low level driver.
944 * These qp types will not be visible at the IB core layer, so the
945 * IB_QPT_MAX usages should not be affected in the core layer
946 */
947 IB_QPT_RESERVED1 = 0x1000,
948 IB_QPT_RESERVED2,
949 IB_QPT_RESERVED3,
950 IB_QPT_RESERVED4,
951 IB_QPT_RESERVED5,
952 IB_QPT_RESERVED6,
953 IB_QPT_RESERVED7,
954 IB_QPT_RESERVED8,
955 IB_QPT_RESERVED9,
956 IB_QPT_RESERVED10,
957 };
958
959 enum ib_qp_create_flags {
960 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
961 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
962 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
963 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
964 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
965 IB_QP_CREATE_NETIF_QP = 1 << 5,
966 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
967 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
968 /* reserve bits 26-31 for low level drivers' internal use */
969 IB_QP_CREATE_RESERVED_START = 1 << 26,
970 IB_QP_CREATE_RESERVED_END = 1 << 31,
971 };
972
973 /*
974 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
975 * callback to destroy the passed in QP.
976 */
977
978 struct ib_qp_init_attr {
979 void (*event_handler)(struct ib_event *, void *);
980 void *qp_context;
981 struct ib_cq *send_cq;
982 struct ib_cq *recv_cq;
983 struct ib_srq *srq;
984 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
985 struct ib_qp_cap cap;
986 enum ib_sig_type sq_sig_type;
987 enum ib_qp_type qp_type;
988 enum ib_qp_create_flags create_flags;
989 u8 port_num; /* special QP types only */
990 };
991
992 struct ib_qp_open_attr {
993 void (*event_handler)(struct ib_event *, void *);
994 void *qp_context;
995 u32 qp_num;
996 enum ib_qp_type qp_type;
997 };
998
999 enum ib_rnr_timeout {
1000 IB_RNR_TIMER_655_36 = 0,
1001 IB_RNR_TIMER_000_01 = 1,
1002 IB_RNR_TIMER_000_02 = 2,
1003 IB_RNR_TIMER_000_03 = 3,
1004 IB_RNR_TIMER_000_04 = 4,
1005 IB_RNR_TIMER_000_06 = 5,
1006 IB_RNR_TIMER_000_08 = 6,
1007 IB_RNR_TIMER_000_12 = 7,
1008 IB_RNR_TIMER_000_16 = 8,
1009 IB_RNR_TIMER_000_24 = 9,
1010 IB_RNR_TIMER_000_32 = 10,
1011 IB_RNR_TIMER_000_48 = 11,
1012 IB_RNR_TIMER_000_64 = 12,
1013 IB_RNR_TIMER_000_96 = 13,
1014 IB_RNR_TIMER_001_28 = 14,
1015 IB_RNR_TIMER_001_92 = 15,
1016 IB_RNR_TIMER_002_56 = 16,
1017 IB_RNR_TIMER_003_84 = 17,
1018 IB_RNR_TIMER_005_12 = 18,
1019 IB_RNR_TIMER_007_68 = 19,
1020 IB_RNR_TIMER_010_24 = 20,
1021 IB_RNR_TIMER_015_36 = 21,
1022 IB_RNR_TIMER_020_48 = 22,
1023 IB_RNR_TIMER_030_72 = 23,
1024 IB_RNR_TIMER_040_96 = 24,
1025 IB_RNR_TIMER_061_44 = 25,
1026 IB_RNR_TIMER_081_92 = 26,
1027 IB_RNR_TIMER_122_88 = 27,
1028 IB_RNR_TIMER_163_84 = 28,
1029 IB_RNR_TIMER_245_76 = 29,
1030 IB_RNR_TIMER_327_68 = 30,
1031 IB_RNR_TIMER_491_52 = 31
1032 };
1033
1034 enum ib_qp_attr_mask {
1035 IB_QP_STATE = 1,
1036 IB_QP_CUR_STATE = (1<<1),
1037 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1038 IB_QP_ACCESS_FLAGS = (1<<3),
1039 IB_QP_PKEY_INDEX = (1<<4),
1040 IB_QP_PORT = (1<<5),
1041 IB_QP_QKEY = (1<<6),
1042 IB_QP_AV = (1<<7),
1043 IB_QP_PATH_MTU = (1<<8),
1044 IB_QP_TIMEOUT = (1<<9),
1045 IB_QP_RETRY_CNT = (1<<10),
1046 IB_QP_RNR_RETRY = (1<<11),
1047 IB_QP_RQ_PSN = (1<<12),
1048 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1049 IB_QP_ALT_PATH = (1<<14),
1050 IB_QP_MIN_RNR_TIMER = (1<<15),
1051 IB_QP_SQ_PSN = (1<<16),
1052 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1053 IB_QP_PATH_MIG_STATE = (1<<18),
1054 IB_QP_CAP = (1<<19),
1055 IB_QP_DEST_QPN = (1<<20),
1056 IB_QP_RESERVED1 = (1<<21),
1057 IB_QP_RESERVED2 = (1<<22),
1058 IB_QP_RESERVED3 = (1<<23),
1059 IB_QP_RESERVED4 = (1<<24),
1060 };
1061
1062 enum ib_qp_state {
1063 IB_QPS_RESET,
1064 IB_QPS_INIT,
1065 IB_QPS_RTR,
1066 IB_QPS_RTS,
1067 IB_QPS_SQD,
1068 IB_QPS_SQE,
1069 IB_QPS_ERR
1070 };
1071
1072 enum ib_mig_state {
1073 IB_MIG_MIGRATED,
1074 IB_MIG_REARM,
1075 IB_MIG_ARMED
1076 };
1077
1078 enum ib_mw_type {
1079 IB_MW_TYPE_1 = 1,
1080 IB_MW_TYPE_2 = 2
1081 };
1082
1083 struct ib_qp_attr {
1084 enum ib_qp_state qp_state;
1085 enum ib_qp_state cur_qp_state;
1086 enum ib_mtu path_mtu;
1087 enum ib_mig_state path_mig_state;
1088 u32 qkey;
1089 u32 rq_psn;
1090 u32 sq_psn;
1091 u32 dest_qp_num;
1092 int qp_access_flags;
1093 struct ib_qp_cap cap;
1094 struct ib_ah_attr ah_attr;
1095 struct ib_ah_attr alt_ah_attr;
1096 u16 pkey_index;
1097 u16 alt_pkey_index;
1098 u8 en_sqd_async_notify;
1099 u8 sq_draining;
1100 u8 max_rd_atomic;
1101 u8 max_dest_rd_atomic;
1102 u8 min_rnr_timer;
1103 u8 port_num;
1104 u8 timeout;
1105 u8 retry_cnt;
1106 u8 rnr_retry;
1107 u8 alt_port_num;
1108 u8 alt_timeout;
1109 };
1110
1111 enum ib_wr_opcode {
1112 IB_WR_RDMA_WRITE,
1113 IB_WR_RDMA_WRITE_WITH_IMM,
1114 IB_WR_SEND,
1115 IB_WR_SEND_WITH_IMM,
1116 IB_WR_RDMA_READ,
1117 IB_WR_ATOMIC_CMP_AND_SWP,
1118 IB_WR_ATOMIC_FETCH_AND_ADD,
1119 IB_WR_LSO,
1120 IB_WR_SEND_WITH_INV,
1121 IB_WR_RDMA_READ_WITH_INV,
1122 IB_WR_LOCAL_INV,
1123 IB_WR_REG_MR,
1124 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1125 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1126 IB_WR_REG_SIG_MR,
1127 /* reserve values for low level drivers' internal use.
1128 * These values will not be used at all in the ib core layer.
1129 */
1130 IB_WR_RESERVED1 = 0xf0,
1131 IB_WR_RESERVED2,
1132 IB_WR_RESERVED3,
1133 IB_WR_RESERVED4,
1134 IB_WR_RESERVED5,
1135 IB_WR_RESERVED6,
1136 IB_WR_RESERVED7,
1137 IB_WR_RESERVED8,
1138 IB_WR_RESERVED9,
1139 IB_WR_RESERVED10,
1140 };
1141
1142 enum ib_send_flags {
1143 IB_SEND_FENCE = 1,
1144 IB_SEND_SIGNALED = (1<<1),
1145 IB_SEND_SOLICITED = (1<<2),
1146 IB_SEND_INLINE = (1<<3),
1147 IB_SEND_IP_CSUM = (1<<4),
1148
1149 /* reserve bits 26-31 for low level drivers' internal use */
1150 IB_SEND_RESERVED_START = (1 << 26),
1151 IB_SEND_RESERVED_END = (1 << 31),
1152 };
1153
1154 struct ib_sge {
1155 u64 addr;
1156 u32 length;
1157 u32 lkey;
1158 };
1159
1160 struct ib_cqe {
1161 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1162 };
1163
1164 struct ib_send_wr {
1165 struct ib_send_wr *next;
1166 union {
1167 u64 wr_id;
1168 struct ib_cqe *wr_cqe;
1169 };
1170 struct ib_sge *sg_list;
1171 int num_sge;
1172 enum ib_wr_opcode opcode;
1173 int send_flags;
1174 union {
1175 __be32 imm_data;
1176 u32 invalidate_rkey;
1177 } ex;
1178 };
1179
1180 struct ib_rdma_wr {
1181 struct ib_send_wr wr;
1182 u64 remote_addr;
1183 u32 rkey;
1184 };
1185
1186 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1187 {
1188 return container_of(wr, struct ib_rdma_wr, wr);
1189 }
1190
1191 struct ib_atomic_wr {
1192 struct ib_send_wr wr;
1193 u64 remote_addr;
1194 u64 compare_add;
1195 u64 swap;
1196 u64 compare_add_mask;
1197 u64 swap_mask;
1198 u32 rkey;
1199 };
1200
1201 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1202 {
1203 return container_of(wr, struct ib_atomic_wr, wr);
1204 }
1205
1206 struct ib_ud_wr {
1207 struct ib_send_wr wr;
1208 struct ib_ah *ah;
1209 void *header;
1210 int hlen;
1211 int mss;
1212 u32 remote_qpn;
1213 u32 remote_qkey;
1214 u16 pkey_index; /* valid for GSI only */
1215 u8 port_num; /* valid for DR SMPs on switch only */
1216 };
1217
1218 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1219 {
1220 return container_of(wr, struct ib_ud_wr, wr);
1221 }
1222
1223 struct ib_reg_wr {
1224 struct ib_send_wr wr;
1225 struct ib_mr *mr;
1226 u32 key;
1227 int access;
1228 };
1229
1230 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1231 {
1232 return container_of(wr, struct ib_reg_wr, wr);
1233 }
1234
1235 struct ib_sig_handover_wr {
1236 struct ib_send_wr wr;
1237 struct ib_sig_attrs *sig_attrs;
1238 struct ib_mr *sig_mr;
1239 int access_flags;
1240 struct ib_sge *prot;
1241 };
1242
1243 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1244 {
1245 return container_of(wr, struct ib_sig_handover_wr, wr);
1246 }
1247
1248 struct ib_recv_wr {
1249 struct ib_recv_wr *next;
1250 union {
1251 u64 wr_id;
1252 struct ib_cqe *wr_cqe;
1253 };
1254 struct ib_sge *sg_list;
1255 int num_sge;
1256 };
1257
1258 enum ib_access_flags {
1259 IB_ACCESS_LOCAL_WRITE = 1,
1260 IB_ACCESS_REMOTE_WRITE = (1<<1),
1261 IB_ACCESS_REMOTE_READ = (1<<2),
1262 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1263 IB_ACCESS_MW_BIND = (1<<4),
1264 IB_ZERO_BASED = (1<<5),
1265 IB_ACCESS_ON_DEMAND = (1<<6),
1266 };
1267
1268 /*
1269 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1270 * are hidden here instead of a uapi header!
1271 */
1272 enum ib_mr_rereg_flags {
1273 IB_MR_REREG_TRANS = 1,
1274 IB_MR_REREG_PD = (1<<1),
1275 IB_MR_REREG_ACCESS = (1<<2),
1276 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1277 };
1278
1279 struct ib_fmr_attr {
1280 int max_pages;
1281 int max_maps;
1282 u8 page_shift;
1283 };
1284
1285 struct ib_umem;
1286
1287 struct ib_ucontext {
1288 struct ib_device *device;
1289 struct list_head pd_list;
1290 struct list_head mr_list;
1291 struct list_head mw_list;
1292 struct list_head cq_list;
1293 struct list_head qp_list;
1294 struct list_head srq_list;
1295 struct list_head ah_list;
1296 struct list_head xrcd_list;
1297 struct list_head rule_list;
1298 int closing;
1299
1300 struct pid *tgid;
1301 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1302 struct rb_root umem_tree;
1303 /*
1304 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1305 * mmu notifiers registration.
1306 */
1307 struct rw_semaphore umem_rwsem;
1308 void (*invalidate_range)(struct ib_umem *umem,
1309 unsigned long start, unsigned long end);
1310
1311 struct mmu_notifier mn;
1312 atomic_t notifier_count;
1313 /* A list of umems that don't have private mmu notifier counters yet. */
1314 struct list_head no_private_counters;
1315 int odp_mrs_count;
1316 #endif
1317 };
1318
1319 struct ib_uobject {
1320 u64 user_handle; /* handle given to us by userspace */
1321 struct ib_ucontext *context; /* associated user context */
1322 void *object; /* containing object */
1323 struct list_head list; /* link to context's list */
1324 int id; /* index into kernel idr */
1325 struct kref ref;
1326 struct rw_semaphore mutex; /* protects .live */
1327 struct rcu_head rcu; /* kfree_rcu() overhead */
1328 int live;
1329 };
1330
1331 struct ib_udata {
1332 const void __user *inbuf;
1333 void __user *outbuf;
1334 size_t inlen;
1335 size_t outlen;
1336 };
1337
1338 struct ib_pd {
1339 u32 local_dma_lkey;
1340 struct ib_device *device;
1341 struct ib_uobject *uobject;
1342 atomic_t usecnt; /* count all resources */
1343 struct ib_mr *local_mr;
1344 };
1345
1346 struct ib_xrcd {
1347 struct ib_device *device;
1348 atomic_t usecnt; /* count all exposed resources */
1349 struct inode *inode;
1350
1351 struct mutex tgt_qp_mutex;
1352 struct list_head tgt_qp_list;
1353 };
1354
1355 struct ib_ah {
1356 struct ib_device *device;
1357 struct ib_pd *pd;
1358 struct ib_uobject *uobject;
1359 };
1360
1361 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1362
1363 enum ib_poll_context {
1364 IB_POLL_DIRECT, /* caller context, no hw completions */
1365 IB_POLL_SOFTIRQ, /* poll from softirq context */
1366 IB_POLL_WORKQUEUE, /* poll from workqueue */
1367 };
1368
1369 struct ib_cq {
1370 struct ib_device *device;
1371 struct ib_uobject *uobject;
1372 ib_comp_handler comp_handler;
1373 void (*event_handler)(struct ib_event *, void *);
1374 void *cq_context;
1375 int cqe;
1376 atomic_t usecnt; /* count number of work queues */
1377 enum ib_poll_context poll_ctx;
1378 struct ib_wc *wc;
1379 union {
1380 struct irq_poll iop;
1381 struct work_struct work;
1382 };
1383 };
1384
1385 struct ib_srq {
1386 struct ib_device *device;
1387 struct ib_pd *pd;
1388 struct ib_uobject *uobject;
1389 void (*event_handler)(struct ib_event *, void *);
1390 void *srq_context;
1391 enum ib_srq_type srq_type;
1392 atomic_t usecnt;
1393
1394 union {
1395 struct {
1396 struct ib_xrcd *xrcd;
1397 struct ib_cq *cq;
1398 u32 srq_num;
1399 } xrc;
1400 } ext;
1401 };
1402
1403 struct ib_qp {
1404 struct ib_device *device;
1405 struct ib_pd *pd;
1406 struct ib_cq *send_cq;
1407 struct ib_cq *recv_cq;
1408 struct ib_srq *srq;
1409 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1410 struct list_head xrcd_list;
1411 /* count times opened, mcast attaches, flow attaches */
1412 atomic_t usecnt;
1413 struct list_head open_list;
1414 struct ib_qp *real_qp;
1415 struct ib_uobject *uobject;
1416 void (*event_handler)(struct ib_event *, void *);
1417 void *qp_context;
1418 u32 qp_num;
1419 enum ib_qp_type qp_type;
1420 };
1421
1422 struct ib_mr {
1423 struct ib_device *device;
1424 struct ib_pd *pd;
1425 struct ib_uobject *uobject;
1426 u32 lkey;
1427 u32 rkey;
1428 u64 iova;
1429 u32 length;
1430 unsigned int page_size;
1431 };
1432
1433 struct ib_mw {
1434 struct ib_device *device;
1435 struct ib_pd *pd;
1436 struct ib_uobject *uobject;
1437 u32 rkey;
1438 enum ib_mw_type type;
1439 };
1440
1441 struct ib_fmr {
1442 struct ib_device *device;
1443 struct ib_pd *pd;
1444 struct list_head list;
1445 u32 lkey;
1446 u32 rkey;
1447 };
1448
1449 /* Supported steering options */
1450 enum ib_flow_attr_type {
1451 /* steering according to rule specifications */
1452 IB_FLOW_ATTR_NORMAL = 0x0,
1453 /* default unicast and multicast rule -
1454 * receive all Eth traffic which isn't steered to any QP
1455 */
1456 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1457 /* default multicast rule -
1458 * receive all Eth multicast traffic which isn't steered to any QP
1459 */
1460 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1461 /* sniffer rule - receive all port traffic */
1462 IB_FLOW_ATTR_SNIFFER = 0x3
1463 };
1464
1465 /* Supported steering header types */
1466 enum ib_flow_spec_type {
1467 /* L2 headers*/
1468 IB_FLOW_SPEC_ETH = 0x20,
1469 IB_FLOW_SPEC_IB = 0x22,
1470 /* L3 header*/
1471 IB_FLOW_SPEC_IPV4 = 0x30,
1472 /* L4 headers*/
1473 IB_FLOW_SPEC_TCP = 0x40,
1474 IB_FLOW_SPEC_UDP = 0x41
1475 };
1476 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1477 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1478
1479 /* Flow steering rule priority is set according to it's domain.
1480 * Lower domain value means higher priority.
1481 */
1482 enum ib_flow_domain {
1483 IB_FLOW_DOMAIN_USER,
1484 IB_FLOW_DOMAIN_ETHTOOL,
1485 IB_FLOW_DOMAIN_RFS,
1486 IB_FLOW_DOMAIN_NIC,
1487 IB_FLOW_DOMAIN_NUM /* Must be last */
1488 };
1489
1490 struct ib_flow_eth_filter {
1491 u8 dst_mac[6];
1492 u8 src_mac[6];
1493 __be16 ether_type;
1494 __be16 vlan_tag;
1495 };
1496
1497 struct ib_flow_spec_eth {
1498 enum ib_flow_spec_type type;
1499 u16 size;
1500 struct ib_flow_eth_filter val;
1501 struct ib_flow_eth_filter mask;
1502 };
1503
1504 struct ib_flow_ib_filter {
1505 __be16 dlid;
1506 __u8 sl;
1507 };
1508
1509 struct ib_flow_spec_ib {
1510 enum ib_flow_spec_type type;
1511 u16 size;
1512 struct ib_flow_ib_filter val;
1513 struct ib_flow_ib_filter mask;
1514 };
1515
1516 struct ib_flow_ipv4_filter {
1517 __be32 src_ip;
1518 __be32 dst_ip;
1519 };
1520
1521 struct ib_flow_spec_ipv4 {
1522 enum ib_flow_spec_type type;
1523 u16 size;
1524 struct ib_flow_ipv4_filter val;
1525 struct ib_flow_ipv4_filter mask;
1526 };
1527
1528 struct ib_flow_tcp_udp_filter {
1529 __be16 dst_port;
1530 __be16 src_port;
1531 };
1532
1533 struct ib_flow_spec_tcp_udp {
1534 enum ib_flow_spec_type type;
1535 u16 size;
1536 struct ib_flow_tcp_udp_filter val;
1537 struct ib_flow_tcp_udp_filter mask;
1538 };
1539
1540 union ib_flow_spec {
1541 struct {
1542 enum ib_flow_spec_type type;
1543 u16 size;
1544 };
1545 struct ib_flow_spec_eth eth;
1546 struct ib_flow_spec_ib ib;
1547 struct ib_flow_spec_ipv4 ipv4;
1548 struct ib_flow_spec_tcp_udp tcp_udp;
1549 };
1550
1551 struct ib_flow_attr {
1552 enum ib_flow_attr_type type;
1553 u16 size;
1554 u16 priority;
1555 u32 flags;
1556 u8 num_of_specs;
1557 u8 port;
1558 /* Following are the optional layers according to user request
1559 * struct ib_flow_spec_xxx
1560 * struct ib_flow_spec_yyy
1561 */
1562 };
1563
1564 struct ib_flow {
1565 struct ib_qp *qp;
1566 struct ib_uobject *uobject;
1567 };
1568
1569 struct ib_mad_hdr;
1570 struct ib_grh;
1571
1572 enum ib_process_mad_flags {
1573 IB_MAD_IGNORE_MKEY = 1,
1574 IB_MAD_IGNORE_BKEY = 2,
1575 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1576 };
1577
1578 enum ib_mad_result {
1579 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1580 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1581 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1582 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1583 };
1584
1585 #define IB_DEVICE_NAME_MAX 64
1586
1587 struct ib_cache {
1588 rwlock_t lock;
1589 struct ib_event_handler event_handler;
1590 struct ib_pkey_cache **pkey_cache;
1591 struct ib_gid_table **gid_cache;
1592 u8 *lmc_cache;
1593 };
1594
1595 struct ib_dma_mapping_ops {
1596 int (*mapping_error)(struct ib_device *dev,
1597 u64 dma_addr);
1598 u64 (*map_single)(struct ib_device *dev,
1599 void *ptr, size_t size,
1600 enum dma_data_direction direction);
1601 void (*unmap_single)(struct ib_device *dev,
1602 u64 addr, size_t size,
1603 enum dma_data_direction direction);
1604 u64 (*map_page)(struct ib_device *dev,
1605 struct page *page, unsigned long offset,
1606 size_t size,
1607 enum dma_data_direction direction);
1608 void (*unmap_page)(struct ib_device *dev,
1609 u64 addr, size_t size,
1610 enum dma_data_direction direction);
1611 int (*map_sg)(struct ib_device *dev,
1612 struct scatterlist *sg, int nents,
1613 enum dma_data_direction direction);
1614 void (*unmap_sg)(struct ib_device *dev,
1615 struct scatterlist *sg, int nents,
1616 enum dma_data_direction direction);
1617 void (*sync_single_for_cpu)(struct ib_device *dev,
1618 u64 dma_handle,
1619 size_t size,
1620 enum dma_data_direction dir);
1621 void (*sync_single_for_device)(struct ib_device *dev,
1622 u64 dma_handle,
1623 size_t size,
1624 enum dma_data_direction dir);
1625 void *(*alloc_coherent)(struct ib_device *dev,
1626 size_t size,
1627 u64 *dma_handle,
1628 gfp_t flag);
1629 void (*free_coherent)(struct ib_device *dev,
1630 size_t size, void *cpu_addr,
1631 u64 dma_handle);
1632 };
1633
1634 struct iw_cm_verbs;
1635
1636 struct ib_port_immutable {
1637 int pkey_tbl_len;
1638 int gid_tbl_len;
1639 u32 core_cap_flags;
1640 u32 max_mad_size;
1641 };
1642
1643 struct ib_device {
1644 struct device *dma_device;
1645
1646 char name[IB_DEVICE_NAME_MAX];
1647
1648 struct list_head event_handler_list;
1649 spinlock_t event_handler_lock;
1650
1651 spinlock_t client_data_lock;
1652 struct list_head core_list;
1653 /* Access to the client_data_list is protected by the client_data_lock
1654 * spinlock and the lists_rwsem read-write semaphore */
1655 struct list_head client_data_list;
1656
1657 struct ib_cache cache;
1658 /**
1659 * port_immutable is indexed by port number
1660 */
1661 struct ib_port_immutable *port_immutable;
1662
1663 int num_comp_vectors;
1664
1665 struct iw_cm_verbs *iwcm;
1666
1667 int (*get_protocol_stats)(struct ib_device *device,
1668 union rdma_protocol_stats *stats);
1669 int (*query_device)(struct ib_device *device,
1670 struct ib_device_attr *device_attr,
1671 struct ib_udata *udata);
1672 int (*query_port)(struct ib_device *device,
1673 u8 port_num,
1674 struct ib_port_attr *port_attr);
1675 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1676 u8 port_num);
1677 /* When calling get_netdev, the HW vendor's driver should return the
1678 * net device of device @device at port @port_num or NULL if such
1679 * a net device doesn't exist. The vendor driver should call dev_hold
1680 * on this net device. The HW vendor's device driver must guarantee
1681 * that this function returns NULL before the net device reaches
1682 * NETDEV_UNREGISTER_FINAL state.
1683 */
1684 struct net_device *(*get_netdev)(struct ib_device *device,
1685 u8 port_num);
1686 int (*query_gid)(struct ib_device *device,
1687 u8 port_num, int index,
1688 union ib_gid *gid);
1689 /* When calling add_gid, the HW vendor's driver should
1690 * add the gid of device @device at gid index @index of
1691 * port @port_num to be @gid. Meta-info of that gid (for example,
1692 * the network device related to this gid is available
1693 * at @attr. @context allows the HW vendor driver to store extra
1694 * information together with a GID entry. The HW vendor may allocate
1695 * memory to contain this information and store it in @context when a
1696 * new GID entry is written to. Params are consistent until the next
1697 * call of add_gid or delete_gid. The function should return 0 on
1698 * success or error otherwise. The function could be called
1699 * concurrently for different ports. This function is only called
1700 * when roce_gid_table is used.
1701 */
1702 int (*add_gid)(struct ib_device *device,
1703 u8 port_num,
1704 unsigned int index,
1705 const union ib_gid *gid,
1706 const struct ib_gid_attr *attr,
1707 void **context);
1708 /* When calling del_gid, the HW vendor's driver should delete the
1709 * gid of device @device at gid index @index of port @port_num.
1710 * Upon the deletion of a GID entry, the HW vendor must free any
1711 * allocated memory. The caller will clear @context afterwards.
1712 * This function is only called when roce_gid_table is used.
1713 */
1714 int (*del_gid)(struct ib_device *device,
1715 u8 port_num,
1716 unsigned int index,
1717 void **context);
1718 int (*query_pkey)(struct ib_device *device,
1719 u8 port_num, u16 index, u16 *pkey);
1720 int (*modify_device)(struct ib_device *device,
1721 int device_modify_mask,
1722 struct ib_device_modify *device_modify);
1723 int (*modify_port)(struct ib_device *device,
1724 u8 port_num, int port_modify_mask,
1725 struct ib_port_modify *port_modify);
1726 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1727 struct ib_udata *udata);
1728 int (*dealloc_ucontext)(struct ib_ucontext *context);
1729 int (*mmap)(struct ib_ucontext *context,
1730 struct vm_area_struct *vma);
1731 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1732 struct ib_ucontext *context,
1733 struct ib_udata *udata);
1734 int (*dealloc_pd)(struct ib_pd *pd);
1735 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1736 struct ib_ah_attr *ah_attr);
1737 int (*modify_ah)(struct ib_ah *ah,
1738 struct ib_ah_attr *ah_attr);
1739 int (*query_ah)(struct ib_ah *ah,
1740 struct ib_ah_attr *ah_attr);
1741 int (*destroy_ah)(struct ib_ah *ah);
1742 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1743 struct ib_srq_init_attr *srq_init_attr,
1744 struct ib_udata *udata);
1745 int (*modify_srq)(struct ib_srq *srq,
1746 struct ib_srq_attr *srq_attr,
1747 enum ib_srq_attr_mask srq_attr_mask,
1748 struct ib_udata *udata);
1749 int (*query_srq)(struct ib_srq *srq,
1750 struct ib_srq_attr *srq_attr);
1751 int (*destroy_srq)(struct ib_srq *srq);
1752 int (*post_srq_recv)(struct ib_srq *srq,
1753 struct ib_recv_wr *recv_wr,
1754 struct ib_recv_wr **bad_recv_wr);
1755 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1756 struct ib_qp_init_attr *qp_init_attr,
1757 struct ib_udata *udata);
1758 int (*modify_qp)(struct ib_qp *qp,
1759 struct ib_qp_attr *qp_attr,
1760 int qp_attr_mask,
1761 struct ib_udata *udata);
1762 int (*query_qp)(struct ib_qp *qp,
1763 struct ib_qp_attr *qp_attr,
1764 int qp_attr_mask,
1765 struct ib_qp_init_attr *qp_init_attr);
1766 int (*destroy_qp)(struct ib_qp *qp);
1767 int (*post_send)(struct ib_qp *qp,
1768 struct ib_send_wr *send_wr,
1769 struct ib_send_wr **bad_send_wr);
1770 int (*post_recv)(struct ib_qp *qp,
1771 struct ib_recv_wr *recv_wr,
1772 struct ib_recv_wr **bad_recv_wr);
1773 struct ib_cq * (*create_cq)(struct ib_device *device,
1774 const struct ib_cq_init_attr *attr,
1775 struct ib_ucontext *context,
1776 struct ib_udata *udata);
1777 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1778 u16 cq_period);
1779 int (*destroy_cq)(struct ib_cq *cq);
1780 int (*resize_cq)(struct ib_cq *cq, int cqe,
1781 struct ib_udata *udata);
1782 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1783 struct ib_wc *wc);
1784 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1785 int (*req_notify_cq)(struct ib_cq *cq,
1786 enum ib_cq_notify_flags flags);
1787 int (*req_ncomp_notif)(struct ib_cq *cq,
1788 int wc_cnt);
1789 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1790 int mr_access_flags);
1791 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1792 u64 start, u64 length,
1793 u64 virt_addr,
1794 int mr_access_flags,
1795 struct ib_udata *udata);
1796 int (*rereg_user_mr)(struct ib_mr *mr,
1797 int flags,
1798 u64 start, u64 length,
1799 u64 virt_addr,
1800 int mr_access_flags,
1801 struct ib_pd *pd,
1802 struct ib_udata *udata);
1803 int (*dereg_mr)(struct ib_mr *mr);
1804 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
1805 enum ib_mr_type mr_type,
1806 u32 max_num_sg);
1807 int (*map_mr_sg)(struct ib_mr *mr,
1808 struct scatterlist *sg,
1809 int sg_nents);
1810 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1811 enum ib_mw_type type);
1812 int (*dealloc_mw)(struct ib_mw *mw);
1813 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1814 int mr_access_flags,
1815 struct ib_fmr_attr *fmr_attr);
1816 int (*map_phys_fmr)(struct ib_fmr *fmr,
1817 u64 *page_list, int list_len,
1818 u64 iova);
1819 int (*unmap_fmr)(struct list_head *fmr_list);
1820 int (*dealloc_fmr)(struct ib_fmr *fmr);
1821 int (*attach_mcast)(struct ib_qp *qp,
1822 union ib_gid *gid,
1823 u16 lid);
1824 int (*detach_mcast)(struct ib_qp *qp,
1825 union ib_gid *gid,
1826 u16 lid);
1827 int (*process_mad)(struct ib_device *device,
1828 int process_mad_flags,
1829 u8 port_num,
1830 const struct ib_wc *in_wc,
1831 const struct ib_grh *in_grh,
1832 const struct ib_mad_hdr *in_mad,
1833 size_t in_mad_size,
1834 struct ib_mad_hdr *out_mad,
1835 size_t *out_mad_size,
1836 u16 *out_mad_pkey_index);
1837 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1838 struct ib_ucontext *ucontext,
1839 struct ib_udata *udata);
1840 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1841 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1842 struct ib_flow_attr
1843 *flow_attr,
1844 int domain);
1845 int (*destroy_flow)(struct ib_flow *flow_id);
1846 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1847 struct ib_mr_status *mr_status);
1848 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
1849 void (*drain_rq)(struct ib_qp *qp);
1850 void (*drain_sq)(struct ib_qp *qp);
1851
1852 struct ib_dma_mapping_ops *dma_ops;
1853
1854 struct module *owner;
1855 struct device dev;
1856 struct kobject *ports_parent;
1857 struct list_head port_list;
1858
1859 enum {
1860 IB_DEV_UNINITIALIZED,
1861 IB_DEV_REGISTERED,
1862 IB_DEV_UNREGISTERED
1863 } reg_state;
1864
1865 int uverbs_abi_ver;
1866 u64 uverbs_cmd_mask;
1867 u64 uverbs_ex_cmd_mask;
1868
1869 char node_desc[64];
1870 __be64 node_guid;
1871 u32 local_dma_lkey;
1872 u16 is_switch:1;
1873 u8 node_type;
1874 u8 phys_port_cnt;
1875 struct ib_device_attr attrs;
1876
1877 /**
1878 * The following mandatory functions are used only at device
1879 * registration. Keep functions such as these at the end of this
1880 * structure to avoid cache line misses when accessing struct ib_device
1881 * in fast paths.
1882 */
1883 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
1884 };
1885
1886 struct ib_client {
1887 char *name;
1888 void (*add) (struct ib_device *);
1889 void (*remove)(struct ib_device *, void *client_data);
1890
1891 /* Returns the net_dev belonging to this ib_client and matching the
1892 * given parameters.
1893 * @dev: An RDMA device that the net_dev use for communication.
1894 * @port: A physical port number on the RDMA device.
1895 * @pkey: P_Key that the net_dev uses if applicable.
1896 * @gid: A GID that the net_dev uses to communicate.
1897 * @addr: An IP address the net_dev is configured with.
1898 * @client_data: The device's client data set by ib_set_client_data().
1899 *
1900 * An ib_client that implements a net_dev on top of RDMA devices
1901 * (such as IP over IB) should implement this callback, allowing the
1902 * rdma_cm module to find the right net_dev for a given request.
1903 *
1904 * The caller is responsible for calling dev_put on the returned
1905 * netdev. */
1906 struct net_device *(*get_net_dev_by_params)(
1907 struct ib_device *dev,
1908 u8 port,
1909 u16 pkey,
1910 const union ib_gid *gid,
1911 const struct sockaddr *addr,
1912 void *client_data);
1913 struct list_head list;
1914 };
1915
1916 struct ib_device *ib_alloc_device(size_t size);
1917 void ib_dealloc_device(struct ib_device *device);
1918
1919 int ib_register_device(struct ib_device *device,
1920 int (*port_callback)(struct ib_device *,
1921 u8, struct kobject *));
1922 void ib_unregister_device(struct ib_device *device);
1923
1924 int ib_register_client (struct ib_client *client);
1925 void ib_unregister_client(struct ib_client *client);
1926
1927 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1928 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1929 void *data);
1930
1931 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1932 {
1933 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1934 }
1935
1936 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1937 {
1938 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1939 }
1940
1941 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
1942 size_t offset,
1943 size_t len)
1944 {
1945 const void __user *p = udata->inbuf + offset;
1946 bool ret = false;
1947 u8 *buf;
1948
1949 if (len > USHRT_MAX)
1950 return false;
1951
1952 buf = kmalloc(len, GFP_KERNEL);
1953 if (!buf)
1954 return false;
1955
1956 if (copy_from_user(buf, p, len))
1957 goto free;
1958
1959 ret = !memchr_inv(buf, 0, len);
1960
1961 free:
1962 kfree(buf);
1963 return ret;
1964 }
1965
1966 /**
1967 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1968 * contains all required attributes and no attributes not allowed for
1969 * the given QP state transition.
1970 * @cur_state: Current QP state
1971 * @next_state: Next QP state
1972 * @type: QP type
1973 * @mask: Mask of supplied QP attributes
1974 * @ll : link layer of port
1975 *
1976 * This function is a helper function that a low-level driver's
1977 * modify_qp method can use to validate the consumer's input. It
1978 * checks that cur_state and next_state are valid QP states, that a
1979 * transition from cur_state to next_state is allowed by the IB spec,
1980 * and that the attribute mask supplied is allowed for the transition.
1981 */
1982 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1983 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1984 enum rdma_link_layer ll);
1985
1986 int ib_register_event_handler (struct ib_event_handler *event_handler);
1987 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1988 void ib_dispatch_event(struct ib_event *event);
1989
1990 int ib_query_port(struct ib_device *device,
1991 u8 port_num, struct ib_port_attr *port_attr);
1992
1993 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1994 u8 port_num);
1995
1996 /**
1997 * rdma_cap_ib_switch - Check if the device is IB switch
1998 * @device: Device to check
1999 *
2000 * Device driver is responsible for setting is_switch bit on
2001 * in ib_device structure at init time.
2002 *
2003 * Return: true if the device is IB switch.
2004 */
2005 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2006 {
2007 return device->is_switch;
2008 }
2009
2010 /**
2011 * rdma_start_port - Return the first valid port number for the device
2012 * specified
2013 *
2014 * @device: Device to be checked
2015 *
2016 * Return start port number
2017 */
2018 static inline u8 rdma_start_port(const struct ib_device *device)
2019 {
2020 return rdma_cap_ib_switch(device) ? 0 : 1;
2021 }
2022
2023 /**
2024 * rdma_end_port - Return the last valid port number for the device
2025 * specified
2026 *
2027 * @device: Device to be checked
2028 *
2029 * Return last port number
2030 */
2031 static inline u8 rdma_end_port(const struct ib_device *device)
2032 {
2033 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2034 }
2035
2036 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2037 {
2038 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2039 }
2040
2041 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2042 {
2043 return device->port_immutable[port_num].core_cap_flags &
2044 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2045 }
2046
2047 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2048 {
2049 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2050 }
2051
2052 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2053 {
2054 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2055 }
2056
2057 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2058 {
2059 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2060 }
2061
2062 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2063 {
2064 return rdma_protocol_ib(device, port_num) ||
2065 rdma_protocol_roce(device, port_num);
2066 }
2067
2068 /**
2069 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2070 * Management Datagrams.
2071 * @device: Device to check
2072 * @port_num: Port number to check
2073 *
2074 * Management Datagrams (MAD) are a required part of the InfiniBand
2075 * specification and are supported on all InfiniBand devices. A slightly
2076 * extended version are also supported on OPA interfaces.
2077 *
2078 * Return: true if the port supports sending/receiving of MAD packets.
2079 */
2080 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2081 {
2082 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2083 }
2084
2085 /**
2086 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2087 * Management Datagrams.
2088 * @device: Device to check
2089 * @port_num: Port number to check
2090 *
2091 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2092 * datagrams with their own versions. These OPA MADs share many but not all of
2093 * the characteristics of InfiniBand MADs.
2094 *
2095 * OPA MADs differ in the following ways:
2096 *
2097 * 1) MADs are variable size up to 2K
2098 * IBTA defined MADs remain fixed at 256 bytes
2099 * 2) OPA SMPs must carry valid PKeys
2100 * 3) OPA SMP packets are a different format
2101 *
2102 * Return: true if the port supports OPA MAD packet formats.
2103 */
2104 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2105 {
2106 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2107 == RDMA_CORE_CAP_OPA_MAD;
2108 }
2109
2110 /**
2111 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2112 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2113 * @device: Device to check
2114 * @port_num: Port number to check
2115 *
2116 * Each InfiniBand node is required to provide a Subnet Management Agent
2117 * that the subnet manager can access. Prior to the fabric being fully
2118 * configured by the subnet manager, the SMA is accessed via a well known
2119 * interface called the Subnet Management Interface (SMI). This interface
2120 * uses directed route packets to communicate with the SM to get around the
2121 * chicken and egg problem of the SM needing to know what's on the fabric
2122 * in order to configure the fabric, and needing to configure the fabric in
2123 * order to send packets to the devices on the fabric. These directed
2124 * route packets do not need the fabric fully configured in order to reach
2125 * their destination. The SMI is the only method allowed to send
2126 * directed route packets on an InfiniBand fabric.
2127 *
2128 * Return: true if the port provides an SMI.
2129 */
2130 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2131 {
2132 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2133 }
2134
2135 /**
2136 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2137 * Communication Manager.
2138 * @device: Device to check
2139 * @port_num: Port number to check
2140 *
2141 * The InfiniBand Communication Manager is one of many pre-defined General
2142 * Service Agents (GSA) that are accessed via the General Service
2143 * Interface (GSI). It's role is to facilitate establishment of connections
2144 * between nodes as well as other management related tasks for established
2145 * connections.
2146 *
2147 * Return: true if the port supports an IB CM (this does not guarantee that
2148 * a CM is actually running however).
2149 */
2150 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2151 {
2152 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2153 }
2154
2155 /**
2156 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2157 * Communication Manager.
2158 * @device: Device to check
2159 * @port_num: Port number to check
2160 *
2161 * Similar to above, but specific to iWARP connections which have a different
2162 * managment protocol than InfiniBand.
2163 *
2164 * Return: true if the port supports an iWARP CM (this does not guarantee that
2165 * a CM is actually running however).
2166 */
2167 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2168 {
2169 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2170 }
2171
2172 /**
2173 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2174 * Subnet Administration.
2175 * @device: Device to check
2176 * @port_num: Port number to check
2177 *
2178 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2179 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2180 * fabrics, devices should resolve routes to other hosts by contacting the
2181 * SA to query the proper route.
2182 *
2183 * Return: true if the port should act as a client to the fabric Subnet
2184 * Administration interface. This does not imply that the SA service is
2185 * running locally.
2186 */
2187 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2188 {
2189 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2190 }
2191
2192 /**
2193 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2194 * Multicast.
2195 * @device: Device to check
2196 * @port_num: Port number to check
2197 *
2198 * InfiniBand multicast registration is more complex than normal IPv4 or
2199 * IPv6 multicast registration. Each Host Channel Adapter must register
2200 * with the Subnet Manager when it wishes to join a multicast group. It
2201 * should do so only once regardless of how many queue pairs it subscribes
2202 * to this group. And it should leave the group only after all queue pairs
2203 * attached to the group have been detached.
2204 *
2205 * Return: true if the port must undertake the additional adminstrative
2206 * overhead of registering/unregistering with the SM and tracking of the
2207 * total number of queue pairs attached to the multicast group.
2208 */
2209 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2210 {
2211 return rdma_cap_ib_sa(device, port_num);
2212 }
2213
2214 /**
2215 * rdma_cap_af_ib - Check if the port of device has the capability
2216 * Native Infiniband Address.
2217 * @device: Device to check
2218 * @port_num: Port number to check
2219 *
2220 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2221 * GID. RoCE uses a different mechanism, but still generates a GID via
2222 * a prescribed mechanism and port specific data.
2223 *
2224 * Return: true if the port uses a GID address to identify devices on the
2225 * network.
2226 */
2227 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2228 {
2229 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2230 }
2231
2232 /**
2233 * rdma_cap_eth_ah - Check if the port of device has the capability
2234 * Ethernet Address Handle.
2235 * @device: Device to check
2236 * @port_num: Port number to check
2237 *
2238 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2239 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2240 * port. Normally, packet headers are generated by the sending host
2241 * adapter, but when sending connectionless datagrams, we must manually
2242 * inject the proper headers for the fabric we are communicating over.
2243 *
2244 * Return: true if we are running as a RoCE port and must force the
2245 * addition of a Global Route Header built from our Ethernet Address
2246 * Handle into our header list for connectionless packets.
2247 */
2248 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2249 {
2250 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2251 }
2252
2253 /**
2254 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2255 *
2256 * @device: Device
2257 * @port_num: Port number
2258 *
2259 * This MAD size includes the MAD headers and MAD payload. No other headers
2260 * are included.
2261 *
2262 * Return the max MAD size required by the Port. Will return 0 if the port
2263 * does not support MADs
2264 */
2265 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2266 {
2267 return device->port_immutable[port_num].max_mad_size;
2268 }
2269
2270 /**
2271 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2272 * @device: Device to check
2273 * @port_num: Port number to check
2274 *
2275 * RoCE GID table mechanism manages the various GIDs for a device.
2276 *
2277 * NOTE: if allocating the port's GID table has failed, this call will still
2278 * return true, but any RoCE GID table API will fail.
2279 *
2280 * Return: true if the port uses RoCE GID table mechanism in order to manage
2281 * its GIDs.
2282 */
2283 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2284 u8 port_num)
2285 {
2286 return rdma_protocol_roce(device, port_num) &&
2287 device->add_gid && device->del_gid;
2288 }
2289
2290 int ib_query_gid(struct ib_device *device,
2291 u8 port_num, int index, union ib_gid *gid,
2292 struct ib_gid_attr *attr);
2293
2294 int ib_query_pkey(struct ib_device *device,
2295 u8 port_num, u16 index, u16 *pkey);
2296
2297 int ib_modify_device(struct ib_device *device,
2298 int device_modify_mask,
2299 struct ib_device_modify *device_modify);
2300
2301 int ib_modify_port(struct ib_device *device,
2302 u8 port_num, int port_modify_mask,
2303 struct ib_port_modify *port_modify);
2304
2305 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2306 enum ib_gid_type gid_type, struct net_device *ndev,
2307 u8 *port_num, u16 *index);
2308
2309 int ib_find_pkey(struct ib_device *device,
2310 u8 port_num, u16 pkey, u16 *index);
2311
2312 struct ib_pd *ib_alloc_pd(struct ib_device *device);
2313
2314 void ib_dealloc_pd(struct ib_pd *pd);
2315
2316 /**
2317 * ib_create_ah - Creates an address handle for the given address vector.
2318 * @pd: The protection domain associated with the address handle.
2319 * @ah_attr: The attributes of the address vector.
2320 *
2321 * The address handle is used to reference a local or global destination
2322 * in all UD QP post sends.
2323 */
2324 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2325
2326 /**
2327 * ib_init_ah_from_wc - Initializes address handle attributes from a
2328 * work completion.
2329 * @device: Device on which the received message arrived.
2330 * @port_num: Port on which the received message arrived.
2331 * @wc: Work completion associated with the received message.
2332 * @grh: References the received global route header. This parameter is
2333 * ignored unless the work completion indicates that the GRH is valid.
2334 * @ah_attr: Returned attributes that can be used when creating an address
2335 * handle for replying to the message.
2336 */
2337 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2338 const struct ib_wc *wc, const struct ib_grh *grh,
2339 struct ib_ah_attr *ah_attr);
2340
2341 /**
2342 * ib_create_ah_from_wc - Creates an address handle associated with the
2343 * sender of the specified work completion.
2344 * @pd: The protection domain associated with the address handle.
2345 * @wc: Work completion information associated with a received message.
2346 * @grh: References the received global route header. This parameter is
2347 * ignored unless the work completion indicates that the GRH is valid.
2348 * @port_num: The outbound port number to associate with the address.
2349 *
2350 * The address handle is used to reference a local or global destination
2351 * in all UD QP post sends.
2352 */
2353 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2354 const struct ib_grh *grh, u8 port_num);
2355
2356 /**
2357 * ib_modify_ah - Modifies the address vector associated with an address
2358 * handle.
2359 * @ah: The address handle to modify.
2360 * @ah_attr: The new address vector attributes to associate with the
2361 * address handle.
2362 */
2363 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2364
2365 /**
2366 * ib_query_ah - Queries the address vector associated with an address
2367 * handle.
2368 * @ah: The address handle to query.
2369 * @ah_attr: The address vector attributes associated with the address
2370 * handle.
2371 */
2372 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2373
2374 /**
2375 * ib_destroy_ah - Destroys an address handle.
2376 * @ah: The address handle to destroy.
2377 */
2378 int ib_destroy_ah(struct ib_ah *ah);
2379
2380 /**
2381 * ib_create_srq - Creates a SRQ associated with the specified protection
2382 * domain.
2383 * @pd: The protection domain associated with the SRQ.
2384 * @srq_init_attr: A list of initial attributes required to create the
2385 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2386 * the actual capabilities of the created SRQ.
2387 *
2388 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2389 * requested size of the SRQ, and set to the actual values allocated
2390 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
2391 * will always be at least as large as the requested values.
2392 */
2393 struct ib_srq *ib_create_srq(struct ib_pd *pd,
2394 struct ib_srq_init_attr *srq_init_attr);
2395
2396 /**
2397 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2398 * @srq: The SRQ to modify.
2399 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
2400 * the current values of selected SRQ attributes are returned.
2401 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2402 * are being modified.
2403 *
2404 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2405 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2406 * the number of receives queued drops below the limit.
2407 */
2408 int ib_modify_srq(struct ib_srq *srq,
2409 struct ib_srq_attr *srq_attr,
2410 enum ib_srq_attr_mask srq_attr_mask);
2411
2412 /**
2413 * ib_query_srq - Returns the attribute list and current values for the
2414 * specified SRQ.
2415 * @srq: The SRQ to query.
2416 * @srq_attr: The attributes of the specified SRQ.
2417 */
2418 int ib_query_srq(struct ib_srq *srq,
2419 struct ib_srq_attr *srq_attr);
2420
2421 /**
2422 * ib_destroy_srq - Destroys the specified SRQ.
2423 * @srq: The SRQ to destroy.
2424 */
2425 int ib_destroy_srq(struct ib_srq *srq);
2426
2427 /**
2428 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2429 * @srq: The SRQ to post the work request on.
2430 * @recv_wr: A list of work requests to post on the receive queue.
2431 * @bad_recv_wr: On an immediate failure, this parameter will reference
2432 * the work request that failed to be posted on the QP.
2433 */
2434 static inline int ib_post_srq_recv(struct ib_srq *srq,
2435 struct ib_recv_wr *recv_wr,
2436 struct ib_recv_wr **bad_recv_wr)
2437 {
2438 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2439 }
2440
2441 /**
2442 * ib_create_qp - Creates a QP associated with the specified protection
2443 * domain.
2444 * @pd: The protection domain associated with the QP.
2445 * @qp_init_attr: A list of initial attributes required to create the
2446 * QP. If QP creation succeeds, then the attributes are updated to
2447 * the actual capabilities of the created QP.
2448 */
2449 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2450 struct ib_qp_init_attr *qp_init_attr);
2451
2452 /**
2453 * ib_modify_qp - Modifies the attributes for the specified QP and then
2454 * transitions the QP to the given state.
2455 * @qp: The QP to modify.
2456 * @qp_attr: On input, specifies the QP attributes to modify. On output,
2457 * the current values of selected QP attributes are returned.
2458 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2459 * are being modified.
2460 */
2461 int ib_modify_qp(struct ib_qp *qp,
2462 struct ib_qp_attr *qp_attr,
2463 int qp_attr_mask);
2464
2465 /**
2466 * ib_query_qp - Returns the attribute list and current values for the
2467 * specified QP.
2468 * @qp: The QP to query.
2469 * @qp_attr: The attributes of the specified QP.
2470 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2471 * @qp_init_attr: Additional attributes of the selected QP.
2472 *
2473 * The qp_attr_mask may be used to limit the query to gathering only the
2474 * selected attributes.
2475 */
2476 int ib_query_qp(struct ib_qp *qp,
2477 struct ib_qp_attr *qp_attr,
2478 int qp_attr_mask,
2479 struct ib_qp_init_attr *qp_init_attr);
2480
2481 /**
2482 * ib_destroy_qp - Destroys the specified QP.
2483 * @qp: The QP to destroy.
2484 */
2485 int ib_destroy_qp(struct ib_qp *qp);
2486
2487 /**
2488 * ib_open_qp - Obtain a reference to an existing sharable QP.
2489 * @xrcd - XRC domain
2490 * @qp_open_attr: Attributes identifying the QP to open.
2491 *
2492 * Returns a reference to a sharable QP.
2493 */
2494 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2495 struct ib_qp_open_attr *qp_open_attr);
2496
2497 /**
2498 * ib_close_qp - Release an external reference to a QP.
2499 * @qp: The QP handle to release
2500 *
2501 * The opened QP handle is released by the caller. The underlying
2502 * shared QP is not destroyed until all internal references are released.
2503 */
2504 int ib_close_qp(struct ib_qp *qp);
2505
2506 /**
2507 * ib_post_send - Posts a list of work requests to the send queue of
2508 * the specified QP.
2509 * @qp: The QP to post the work request on.
2510 * @send_wr: A list of work requests to post on the send queue.
2511 * @bad_send_wr: On an immediate failure, this parameter will reference
2512 * the work request that failed to be posted on the QP.
2513 *
2514 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2515 * error is returned, the QP state shall not be affected,
2516 * ib_post_send() will return an immediate error after queueing any
2517 * earlier work requests in the list.
2518 */
2519 static inline int ib_post_send(struct ib_qp *qp,
2520 struct ib_send_wr *send_wr,
2521 struct ib_send_wr **bad_send_wr)
2522 {
2523 return qp->device->post_send(qp, send_wr, bad_send_wr);
2524 }
2525
2526 /**
2527 * ib_post_recv - Posts a list of work requests to the receive queue of
2528 * the specified QP.
2529 * @qp: The QP to post the work request on.
2530 * @recv_wr: A list of work requests to post on the receive queue.
2531 * @bad_recv_wr: On an immediate failure, this parameter will reference
2532 * the work request that failed to be posted on the QP.
2533 */
2534 static inline int ib_post_recv(struct ib_qp *qp,
2535 struct ib_recv_wr *recv_wr,
2536 struct ib_recv_wr **bad_recv_wr)
2537 {
2538 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2539 }
2540
2541 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
2542 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
2543 void ib_free_cq(struct ib_cq *cq);
2544 int ib_process_cq_direct(struct ib_cq *cq, int budget);
2545
2546 /**
2547 * ib_create_cq - Creates a CQ on the specified device.
2548 * @device: The device on which to create the CQ.
2549 * @comp_handler: A user-specified callback that is invoked when a
2550 * completion event occurs on the CQ.
2551 * @event_handler: A user-specified callback that is invoked when an
2552 * asynchronous event not associated with a completion occurs on the CQ.
2553 * @cq_context: Context associated with the CQ returned to the user via
2554 * the associated completion and event handlers.
2555 * @cq_attr: The attributes the CQ should be created upon.
2556 *
2557 * Users can examine the cq structure to determine the actual CQ size.
2558 */
2559 struct ib_cq *ib_create_cq(struct ib_device *device,
2560 ib_comp_handler comp_handler,
2561 void (*event_handler)(struct ib_event *, void *),
2562 void *cq_context,
2563 const struct ib_cq_init_attr *cq_attr);
2564
2565 /**
2566 * ib_resize_cq - Modifies the capacity of the CQ.
2567 * @cq: The CQ to resize.
2568 * @cqe: The minimum size of the CQ.
2569 *
2570 * Users can examine the cq structure to determine the actual CQ size.
2571 */
2572 int ib_resize_cq(struct ib_cq *cq, int cqe);
2573
2574 /**
2575 * ib_modify_cq - Modifies moderation params of the CQ
2576 * @cq: The CQ to modify.
2577 * @cq_count: number of CQEs that will trigger an event
2578 * @cq_period: max period of time in usec before triggering an event
2579 *
2580 */
2581 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2582
2583 /**
2584 * ib_destroy_cq - Destroys the specified CQ.
2585 * @cq: The CQ to destroy.
2586 */
2587 int ib_destroy_cq(struct ib_cq *cq);
2588
2589 /**
2590 * ib_poll_cq - poll a CQ for completion(s)
2591 * @cq:the CQ being polled
2592 * @num_entries:maximum number of completions to return
2593 * @wc:array of at least @num_entries &struct ib_wc where completions
2594 * will be returned
2595 *
2596 * Poll a CQ for (possibly multiple) completions. If the return value
2597 * is < 0, an error occurred. If the return value is >= 0, it is the
2598 * number of completions returned. If the return value is
2599 * non-negative and < num_entries, then the CQ was emptied.
2600 */
2601 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2602 struct ib_wc *wc)
2603 {
2604 return cq->device->poll_cq(cq, num_entries, wc);
2605 }
2606
2607 /**
2608 * ib_peek_cq - Returns the number of unreaped completions currently
2609 * on the specified CQ.
2610 * @cq: The CQ to peek.
2611 * @wc_cnt: A minimum number of unreaped completions to check for.
2612 *
2613 * If the number of unreaped completions is greater than or equal to wc_cnt,
2614 * this function returns wc_cnt, otherwise, it returns the actual number of
2615 * unreaped completions.
2616 */
2617 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2618
2619 /**
2620 * ib_req_notify_cq - Request completion notification on a CQ.
2621 * @cq: The CQ to generate an event for.
2622 * @flags:
2623 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2624 * to request an event on the next solicited event or next work
2625 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2626 * may also be |ed in to request a hint about missed events, as
2627 * described below.
2628 *
2629 * Return Value:
2630 * < 0 means an error occurred while requesting notification
2631 * == 0 means notification was requested successfully, and if
2632 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2633 * were missed and it is safe to wait for another event. In
2634 * this case is it guaranteed that any work completions added
2635 * to the CQ since the last CQ poll will trigger a completion
2636 * notification event.
2637 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2638 * in. It means that the consumer must poll the CQ again to
2639 * make sure it is empty to avoid missing an event because of a
2640 * race between requesting notification and an entry being
2641 * added to the CQ. This return value means it is possible
2642 * (but not guaranteed) that a work completion has been added
2643 * to the CQ since the last poll without triggering a
2644 * completion notification event.
2645 */
2646 static inline int ib_req_notify_cq(struct ib_cq *cq,
2647 enum ib_cq_notify_flags flags)
2648 {
2649 return cq->device->req_notify_cq(cq, flags);
2650 }
2651
2652 /**
2653 * ib_req_ncomp_notif - Request completion notification when there are
2654 * at least the specified number of unreaped completions on the CQ.
2655 * @cq: The CQ to generate an event for.
2656 * @wc_cnt: The number of unreaped completions that should be on the
2657 * CQ before an event is generated.
2658 */
2659 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2660 {
2661 return cq->device->req_ncomp_notif ?
2662 cq->device->req_ncomp_notif(cq, wc_cnt) :
2663 -ENOSYS;
2664 }
2665
2666 /**
2667 * ib_get_dma_mr - Returns a memory region for system memory that is
2668 * usable for DMA.
2669 * @pd: The protection domain associated with the memory region.
2670 * @mr_access_flags: Specifies the memory access rights.
2671 *
2672 * Note that the ib_dma_*() functions defined below must be used
2673 * to create/destroy addresses used with the Lkey or Rkey returned
2674 * by ib_get_dma_mr().
2675 */
2676 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2677
2678 /**
2679 * ib_dma_mapping_error - check a DMA addr for error
2680 * @dev: The device for which the dma_addr was created
2681 * @dma_addr: The DMA address to check
2682 */
2683 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2684 {
2685 if (dev->dma_ops)
2686 return dev->dma_ops->mapping_error(dev, dma_addr);
2687 return dma_mapping_error(dev->dma_device, dma_addr);
2688 }
2689
2690 /**
2691 * ib_dma_map_single - Map a kernel virtual address to DMA address
2692 * @dev: The device for which the dma_addr is to be created
2693 * @cpu_addr: The kernel virtual address
2694 * @size: The size of the region in bytes
2695 * @direction: The direction of the DMA
2696 */
2697 static inline u64 ib_dma_map_single(struct ib_device *dev,
2698 void *cpu_addr, size_t size,
2699 enum dma_data_direction direction)
2700 {
2701 if (dev->dma_ops)
2702 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2703 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2704 }
2705
2706 /**
2707 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2708 * @dev: The device for which the DMA address was created
2709 * @addr: The DMA address
2710 * @size: The size of the region in bytes
2711 * @direction: The direction of the DMA
2712 */
2713 static inline void ib_dma_unmap_single(struct ib_device *dev,
2714 u64 addr, size_t size,
2715 enum dma_data_direction direction)
2716 {
2717 if (dev->dma_ops)
2718 dev->dma_ops->unmap_single(dev, addr, size, direction);
2719 else
2720 dma_unmap_single(dev->dma_device, addr, size, direction);
2721 }
2722
2723 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2724 void *cpu_addr, size_t size,
2725 enum dma_data_direction direction,
2726 struct dma_attrs *attrs)
2727 {
2728 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2729 direction, attrs);
2730 }
2731
2732 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2733 u64 addr, size_t size,
2734 enum dma_data_direction direction,
2735 struct dma_attrs *attrs)
2736 {
2737 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2738 direction, attrs);
2739 }
2740
2741 /**
2742 * ib_dma_map_page - Map a physical page to DMA address
2743 * @dev: The device for which the dma_addr is to be created
2744 * @page: The page to be mapped
2745 * @offset: The offset within the page
2746 * @size: The size of the region in bytes
2747 * @direction: The direction of the DMA
2748 */
2749 static inline u64 ib_dma_map_page(struct ib_device *dev,
2750 struct page *page,
2751 unsigned long offset,
2752 size_t size,
2753 enum dma_data_direction direction)
2754 {
2755 if (dev->dma_ops)
2756 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2757 return dma_map_page(dev->dma_device, page, offset, size, direction);
2758 }
2759
2760 /**
2761 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2762 * @dev: The device for which the DMA address was created
2763 * @addr: The DMA address
2764 * @size: The size of the region in bytes
2765 * @direction: The direction of the DMA
2766 */
2767 static inline void ib_dma_unmap_page(struct ib_device *dev,
2768 u64 addr, size_t size,
2769 enum dma_data_direction direction)
2770 {
2771 if (dev->dma_ops)
2772 dev->dma_ops->unmap_page(dev, addr, size, direction);
2773 else
2774 dma_unmap_page(dev->dma_device, addr, size, direction);
2775 }
2776
2777 /**
2778 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2779 * @dev: The device for which the DMA addresses are to be created
2780 * @sg: The array of scatter/gather entries
2781 * @nents: The number of scatter/gather entries
2782 * @direction: The direction of the DMA
2783 */
2784 static inline int ib_dma_map_sg(struct ib_device *dev,
2785 struct scatterlist *sg, int nents,
2786 enum dma_data_direction direction)
2787 {
2788 if (dev->dma_ops)
2789 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2790 return dma_map_sg(dev->dma_device, sg, nents, direction);
2791 }
2792
2793 /**
2794 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2795 * @dev: The device for which the DMA addresses were created
2796 * @sg: The array of scatter/gather entries
2797 * @nents: The number of scatter/gather entries
2798 * @direction: The direction of the DMA
2799 */
2800 static inline void ib_dma_unmap_sg(struct ib_device *dev,
2801 struct scatterlist *sg, int nents,
2802 enum dma_data_direction direction)
2803 {
2804 if (dev->dma_ops)
2805 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2806 else
2807 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2808 }
2809
2810 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2811 struct scatterlist *sg, int nents,
2812 enum dma_data_direction direction,
2813 struct dma_attrs *attrs)
2814 {
2815 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2816 }
2817
2818 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2819 struct scatterlist *sg, int nents,
2820 enum dma_data_direction direction,
2821 struct dma_attrs *attrs)
2822 {
2823 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2824 }
2825 /**
2826 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2827 * @dev: The device for which the DMA addresses were created
2828 * @sg: The scatter/gather entry
2829 *
2830 * Note: this function is obsolete. To do: change all occurrences of
2831 * ib_sg_dma_address() into sg_dma_address().
2832 */
2833 static inline u64 ib_sg_dma_address(struct ib_device *dev,
2834 struct scatterlist *sg)
2835 {
2836 return sg_dma_address(sg);
2837 }
2838
2839 /**
2840 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2841 * @dev: The device for which the DMA addresses were created
2842 * @sg: The scatter/gather entry
2843 *
2844 * Note: this function is obsolete. To do: change all occurrences of
2845 * ib_sg_dma_len() into sg_dma_len().
2846 */
2847 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2848 struct scatterlist *sg)
2849 {
2850 return sg_dma_len(sg);
2851 }
2852
2853 /**
2854 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2855 * @dev: The device for which the DMA address was created
2856 * @addr: The DMA address
2857 * @size: The size of the region in bytes
2858 * @dir: The direction of the DMA
2859 */
2860 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2861 u64 addr,
2862 size_t size,
2863 enum dma_data_direction dir)
2864 {
2865 if (dev->dma_ops)
2866 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2867 else
2868 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2869 }
2870
2871 /**
2872 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2873 * @dev: The device for which the DMA address was created
2874 * @addr: The DMA address
2875 * @size: The size of the region in bytes
2876 * @dir: The direction of the DMA
2877 */
2878 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2879 u64 addr,
2880 size_t size,
2881 enum dma_data_direction dir)
2882 {
2883 if (dev->dma_ops)
2884 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2885 else
2886 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2887 }
2888
2889 /**
2890 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2891 * @dev: The device for which the DMA address is requested
2892 * @size: The size of the region to allocate in bytes
2893 * @dma_handle: A pointer for returning the DMA address of the region
2894 * @flag: memory allocator flags
2895 */
2896 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2897 size_t size,
2898 u64 *dma_handle,
2899 gfp_t flag)
2900 {
2901 if (dev->dma_ops)
2902 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2903 else {
2904 dma_addr_t handle;
2905 void *ret;
2906
2907 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2908 *dma_handle = handle;
2909 return ret;
2910 }
2911 }
2912
2913 /**
2914 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2915 * @dev: The device for which the DMA addresses were allocated
2916 * @size: The size of the region
2917 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2918 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2919 */
2920 static inline void ib_dma_free_coherent(struct ib_device *dev,
2921 size_t size, void *cpu_addr,
2922 u64 dma_handle)
2923 {
2924 if (dev->dma_ops)
2925 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2926 else
2927 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2928 }
2929
2930 /**
2931 * ib_dereg_mr - Deregisters a memory region and removes it from the
2932 * HCA translation table.
2933 * @mr: The memory region to deregister.
2934 *
2935 * This function can fail, if the memory region has memory windows bound to it.
2936 */
2937 int ib_dereg_mr(struct ib_mr *mr);
2938
2939 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
2940 enum ib_mr_type mr_type,
2941 u32 max_num_sg);
2942
2943 /**
2944 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2945 * R_Key and L_Key.
2946 * @mr - struct ib_mr pointer to be updated.
2947 * @newkey - new key to be used.
2948 */
2949 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2950 {
2951 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2952 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2953 }
2954
2955 /**
2956 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2957 * for calculating a new rkey for type 2 memory windows.
2958 * @rkey - the rkey to increment.
2959 */
2960 static inline u32 ib_inc_rkey(u32 rkey)
2961 {
2962 const u32 mask = 0x000000ff;
2963 return ((rkey + 1) & mask) | (rkey & ~mask);
2964 }
2965
2966 /**
2967 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2968 * @pd: The protection domain associated with the unmapped region.
2969 * @mr_access_flags: Specifies the memory access rights.
2970 * @fmr_attr: Attributes of the unmapped region.
2971 *
2972 * A fast memory region must be mapped before it can be used as part of
2973 * a work request.
2974 */
2975 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2976 int mr_access_flags,
2977 struct ib_fmr_attr *fmr_attr);
2978
2979 /**
2980 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2981 * @fmr: The fast memory region to associate with the pages.
2982 * @page_list: An array of physical pages to map to the fast memory region.
2983 * @list_len: The number of pages in page_list.
2984 * @iova: The I/O virtual address to use with the mapped region.
2985 */
2986 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2987 u64 *page_list, int list_len,
2988 u64 iova)
2989 {
2990 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2991 }
2992
2993 /**
2994 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2995 * @fmr_list: A linked list of fast memory regions to unmap.
2996 */
2997 int ib_unmap_fmr(struct list_head *fmr_list);
2998
2999 /**
3000 * ib_dealloc_fmr - Deallocates a fast memory region.
3001 * @fmr: The fast memory region to deallocate.
3002 */
3003 int ib_dealloc_fmr(struct ib_fmr *fmr);
3004
3005 /**
3006 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3007 * @qp: QP to attach to the multicast group. The QP must be type
3008 * IB_QPT_UD.
3009 * @gid: Multicast group GID.
3010 * @lid: Multicast group LID in host byte order.
3011 *
3012 * In order to send and receive multicast packets, subnet
3013 * administration must have created the multicast group and configured
3014 * the fabric appropriately. The port associated with the specified
3015 * QP must also be a member of the multicast group.
3016 */
3017 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3018
3019 /**
3020 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3021 * @qp: QP to detach from the multicast group.
3022 * @gid: Multicast group GID.
3023 * @lid: Multicast group LID in host byte order.
3024 */
3025 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3026
3027 /**
3028 * ib_alloc_xrcd - Allocates an XRC domain.
3029 * @device: The device on which to allocate the XRC domain.
3030 */
3031 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3032
3033 /**
3034 * ib_dealloc_xrcd - Deallocates an XRC domain.
3035 * @xrcd: The XRC domain to deallocate.
3036 */
3037 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3038
3039 struct ib_flow *ib_create_flow(struct ib_qp *qp,
3040 struct ib_flow_attr *flow_attr, int domain);
3041 int ib_destroy_flow(struct ib_flow *flow_id);
3042
3043 static inline int ib_check_mr_access(int flags)
3044 {
3045 /*
3046 * Local write permission is required if remote write or
3047 * remote atomic permission is also requested.
3048 */
3049 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3050 !(flags & IB_ACCESS_LOCAL_WRITE))
3051 return -EINVAL;
3052
3053 return 0;
3054 }
3055
3056 /**
3057 * ib_check_mr_status: lightweight check of MR status.
3058 * This routine may provide status checks on a selected
3059 * ib_mr. first use is for signature status check.
3060 *
3061 * @mr: A memory region.
3062 * @check_mask: Bitmask of which checks to perform from
3063 * ib_mr_status_check enumeration.
3064 * @mr_status: The container of relevant status checks.
3065 * failed checks will be indicated in the status bitmask
3066 * and the relevant info shall be in the error item.
3067 */
3068 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3069 struct ib_mr_status *mr_status);
3070
3071 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3072 u16 pkey, const union ib_gid *gid,
3073 const struct sockaddr *addr);
3074
3075 int ib_map_mr_sg(struct ib_mr *mr,
3076 struct scatterlist *sg,
3077 int sg_nents,
3078 unsigned int page_size);
3079
3080 static inline int
3081 ib_map_mr_sg_zbva(struct ib_mr *mr,
3082 struct scatterlist *sg,
3083 int sg_nents,
3084 unsigned int page_size)
3085 {
3086 int n;
3087
3088 n = ib_map_mr_sg(mr, sg, sg_nents, page_size);
3089 mr->iova = 0;
3090
3091 return n;
3092 }
3093
3094 int ib_sg_to_pages(struct ib_mr *mr,
3095 struct scatterlist *sgl,
3096 int sg_nents,
3097 int (*set_page)(struct ib_mr *, u64));
3098
3099 void ib_drain_rq(struct ib_qp *qp);
3100 void ib_drain_sq(struct ib_qp *qp);
3101 void ib_drain_qp(struct ib_qp *qp);
3102 #endif /* IB_VERBS_H */
This page took 0.145326 seconds and 6 git commands to generate.