2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_addr.h>
53 #include "core_priv.h"
55 static const char * const ib_events
[] = {
56 [IB_EVENT_CQ_ERR
] = "CQ error",
57 [IB_EVENT_QP_FATAL
] = "QP fatal error",
58 [IB_EVENT_QP_REQ_ERR
] = "QP request error",
59 [IB_EVENT_QP_ACCESS_ERR
] = "QP access error",
60 [IB_EVENT_COMM_EST
] = "communication established",
61 [IB_EVENT_SQ_DRAINED
] = "send queue drained",
62 [IB_EVENT_PATH_MIG
] = "path migration successful",
63 [IB_EVENT_PATH_MIG_ERR
] = "path migration error",
64 [IB_EVENT_DEVICE_FATAL
] = "device fatal error",
65 [IB_EVENT_PORT_ACTIVE
] = "port active",
66 [IB_EVENT_PORT_ERR
] = "port error",
67 [IB_EVENT_LID_CHANGE
] = "LID change",
68 [IB_EVENT_PKEY_CHANGE
] = "P_key change",
69 [IB_EVENT_SM_CHANGE
] = "SM change",
70 [IB_EVENT_SRQ_ERR
] = "SRQ error",
71 [IB_EVENT_SRQ_LIMIT_REACHED
] = "SRQ limit reached",
72 [IB_EVENT_QP_LAST_WQE_REACHED
] = "last WQE reached",
73 [IB_EVENT_CLIENT_REREGISTER
] = "client reregister",
74 [IB_EVENT_GID_CHANGE
] = "GID changed",
77 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
)
81 return (index
< ARRAY_SIZE(ib_events
) && ib_events
[index
]) ?
82 ib_events
[index
] : "unrecognized event";
84 EXPORT_SYMBOL(ib_event_msg
);
86 static const char * const wc_statuses
[] = {
87 [IB_WC_SUCCESS
] = "success",
88 [IB_WC_LOC_LEN_ERR
] = "local length error",
89 [IB_WC_LOC_QP_OP_ERR
] = "local QP operation error",
90 [IB_WC_LOC_EEC_OP_ERR
] = "local EE context operation error",
91 [IB_WC_LOC_PROT_ERR
] = "local protection error",
92 [IB_WC_WR_FLUSH_ERR
] = "WR flushed",
93 [IB_WC_MW_BIND_ERR
] = "memory management operation error",
94 [IB_WC_BAD_RESP_ERR
] = "bad response error",
95 [IB_WC_LOC_ACCESS_ERR
] = "local access error",
96 [IB_WC_REM_INV_REQ_ERR
] = "invalid request error",
97 [IB_WC_REM_ACCESS_ERR
] = "remote access error",
98 [IB_WC_REM_OP_ERR
] = "remote operation error",
99 [IB_WC_RETRY_EXC_ERR
] = "transport retry counter exceeded",
100 [IB_WC_RNR_RETRY_EXC_ERR
] = "RNR retry counter exceeded",
101 [IB_WC_LOC_RDD_VIOL_ERR
] = "local RDD violation error",
102 [IB_WC_REM_INV_RD_REQ_ERR
] = "remote invalid RD request",
103 [IB_WC_REM_ABORT_ERR
] = "operation aborted",
104 [IB_WC_INV_EECN_ERR
] = "invalid EE context number",
105 [IB_WC_INV_EEC_STATE_ERR
] = "invalid EE context state",
106 [IB_WC_FATAL_ERR
] = "fatal error",
107 [IB_WC_RESP_TIMEOUT_ERR
] = "response timeout error",
108 [IB_WC_GENERAL_ERR
] = "general error",
111 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
)
113 size_t index
= status
;
115 return (index
< ARRAY_SIZE(wc_statuses
) && wc_statuses
[index
]) ?
116 wc_statuses
[index
] : "unrecognized status";
118 EXPORT_SYMBOL(ib_wc_status_msg
);
120 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
)
123 case IB_RATE_2_5_GBPS
: return 1;
124 case IB_RATE_5_GBPS
: return 2;
125 case IB_RATE_10_GBPS
: return 4;
126 case IB_RATE_20_GBPS
: return 8;
127 case IB_RATE_30_GBPS
: return 12;
128 case IB_RATE_40_GBPS
: return 16;
129 case IB_RATE_60_GBPS
: return 24;
130 case IB_RATE_80_GBPS
: return 32;
131 case IB_RATE_120_GBPS
: return 48;
135 EXPORT_SYMBOL(ib_rate_to_mult
);
137 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
)
140 case 1: return IB_RATE_2_5_GBPS
;
141 case 2: return IB_RATE_5_GBPS
;
142 case 4: return IB_RATE_10_GBPS
;
143 case 8: return IB_RATE_20_GBPS
;
144 case 12: return IB_RATE_30_GBPS
;
145 case 16: return IB_RATE_40_GBPS
;
146 case 24: return IB_RATE_60_GBPS
;
147 case 32: return IB_RATE_80_GBPS
;
148 case 48: return IB_RATE_120_GBPS
;
149 default: return IB_RATE_PORT_CURRENT
;
152 EXPORT_SYMBOL(mult_to_ib_rate
);
154 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
)
157 case IB_RATE_2_5_GBPS
: return 2500;
158 case IB_RATE_5_GBPS
: return 5000;
159 case IB_RATE_10_GBPS
: return 10000;
160 case IB_RATE_20_GBPS
: return 20000;
161 case IB_RATE_30_GBPS
: return 30000;
162 case IB_RATE_40_GBPS
: return 40000;
163 case IB_RATE_60_GBPS
: return 60000;
164 case IB_RATE_80_GBPS
: return 80000;
165 case IB_RATE_120_GBPS
: return 120000;
166 case IB_RATE_14_GBPS
: return 14062;
167 case IB_RATE_56_GBPS
: return 56250;
168 case IB_RATE_112_GBPS
: return 112500;
169 case IB_RATE_168_GBPS
: return 168750;
170 case IB_RATE_25_GBPS
: return 25781;
171 case IB_RATE_100_GBPS
: return 103125;
172 case IB_RATE_200_GBPS
: return 206250;
173 case IB_RATE_300_GBPS
: return 309375;
177 EXPORT_SYMBOL(ib_rate_to_mbps
);
179 __attribute_const__
enum rdma_transport_type
180 rdma_node_get_transport(enum rdma_node_type node_type
)
183 case RDMA_NODE_IB_CA
:
184 case RDMA_NODE_IB_SWITCH
:
185 case RDMA_NODE_IB_ROUTER
:
186 return RDMA_TRANSPORT_IB
;
188 return RDMA_TRANSPORT_IWARP
;
189 case RDMA_NODE_USNIC
:
190 return RDMA_TRANSPORT_USNIC
;
191 case RDMA_NODE_USNIC_UDP
:
192 return RDMA_TRANSPORT_USNIC_UDP
;
198 EXPORT_SYMBOL(rdma_node_get_transport
);
200 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
, u8 port_num
)
202 if (device
->get_link_layer
)
203 return device
->get_link_layer(device
, port_num
);
205 switch (rdma_node_get_transport(device
->node_type
)) {
206 case RDMA_TRANSPORT_IB
:
207 return IB_LINK_LAYER_INFINIBAND
;
208 case RDMA_TRANSPORT_IWARP
:
209 case RDMA_TRANSPORT_USNIC
:
210 case RDMA_TRANSPORT_USNIC_UDP
:
211 return IB_LINK_LAYER_ETHERNET
;
213 return IB_LINK_LAYER_UNSPECIFIED
;
216 EXPORT_SYMBOL(rdma_port_get_link_layer
);
218 /* Protection domains */
221 * ib_alloc_pd - Allocates an unused protection domain.
222 * @device: The device on which to allocate the protection domain.
224 * A protection domain object provides an association between QPs, shared
225 * receive queues, address handles, memory regions, and memory windows.
227 * Every PD has a local_dma_lkey which can be used as the lkey value for local
230 struct ib_pd
*ib_alloc_pd(struct ib_device
*device
)
234 pd
= device
->alloc_pd(device
, NULL
, NULL
);
241 atomic_set(&pd
->usecnt
, 0);
243 if (device
->attrs
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)
244 pd
->local_dma_lkey
= device
->local_dma_lkey
;
248 mr
= ib_get_dma_mr(pd
, IB_ACCESS_LOCAL_WRITE
);
251 return (struct ib_pd
*)mr
;
255 pd
->local_dma_lkey
= pd
->local_mr
->lkey
;
259 EXPORT_SYMBOL(ib_alloc_pd
);
262 * ib_dealloc_pd - Deallocates a protection domain.
263 * @pd: The protection domain to deallocate.
265 * It is an error to call this function while any resources in the pd still
266 * exist. The caller is responsible to synchronously destroy them and
267 * guarantee no new allocations will happen.
269 void ib_dealloc_pd(struct ib_pd
*pd
)
274 ret
= ib_dereg_mr(pd
->local_mr
);
279 /* uverbs manipulates usecnt with proper locking, while the kabi
280 requires the caller to guarantee we can't race here. */
281 WARN_ON(atomic_read(&pd
->usecnt
));
283 /* Making delalloc_pd a void return is a WIP, no driver should return
285 ret
= pd
->device
->dealloc_pd(pd
);
286 WARN_ONCE(ret
, "Infiniband HW driver failed dealloc_pd");
288 EXPORT_SYMBOL(ib_dealloc_pd
);
290 /* Address handles */
292 struct ib_ah
*ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
)
296 ah
= pd
->device
->create_ah(pd
, ah_attr
);
299 ah
->device
= pd
->device
;
302 atomic_inc(&pd
->usecnt
);
307 EXPORT_SYMBOL(ib_create_ah
);
309 static int ib_get_header_version(const union rdma_network_hdr
*hdr
)
311 const struct iphdr
*ip4h
= (struct iphdr
*)&hdr
->roce4grh
;
312 struct iphdr ip4h_checked
;
313 const struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)&hdr
->ibgrh
;
315 /* If it's IPv6, the version must be 6, otherwise, the first
316 * 20 bytes (before the IPv4 header) are garbled.
318 if (ip6h
->version
!= 6)
319 return (ip4h
->version
== 4) ? 4 : 0;
320 /* version may be 6 or 4 because the first 20 bytes could be garbled */
322 /* RoCE v2 requires no options, thus header length
329 * We can't write on scattered buffers so we need to copy to
332 memcpy(&ip4h_checked
, ip4h
, sizeof(ip4h_checked
));
333 ip4h_checked
.check
= 0;
334 ip4h_checked
.check
= ip_fast_csum((u8
*)&ip4h_checked
, 5);
335 /* if IPv4 header checksum is OK, believe it */
336 if (ip4h
->check
== ip4h_checked
.check
)
341 static enum rdma_network_type
ib_get_net_type_by_grh(struct ib_device
*device
,
343 const struct ib_grh
*grh
)
347 if (rdma_protocol_ib(device
, port_num
))
348 return RDMA_NETWORK_IB
;
350 grh_version
= ib_get_header_version((union rdma_network_hdr
*)grh
);
352 if (grh_version
== 4)
353 return RDMA_NETWORK_IPV4
;
355 if (grh
->next_hdr
== IPPROTO_UDP
)
356 return RDMA_NETWORK_IPV6
;
358 return RDMA_NETWORK_ROCE_V1
;
361 struct find_gid_index_context
{
363 enum ib_gid_type gid_type
;
366 static bool find_gid_index(const union ib_gid
*gid
,
367 const struct ib_gid_attr
*gid_attr
,
370 struct find_gid_index_context
*ctx
=
371 (struct find_gid_index_context
*)context
;
373 if (ctx
->gid_type
!= gid_attr
->gid_type
)
376 if ((!!(ctx
->vlan_id
!= 0xffff) == !is_vlan_dev(gid_attr
->ndev
)) ||
377 (is_vlan_dev(gid_attr
->ndev
) &&
378 vlan_dev_vlan_id(gid_attr
->ndev
) != ctx
->vlan_id
))
384 static int get_sgid_index_from_eth(struct ib_device
*device
, u8 port_num
,
385 u16 vlan_id
, const union ib_gid
*sgid
,
386 enum ib_gid_type gid_type
,
389 struct find_gid_index_context context
= {.vlan_id
= vlan_id
,
390 .gid_type
= gid_type
};
392 return ib_find_gid_by_filter(device
, sgid
, port_num
, find_gid_index
,
393 &context
, gid_index
);
396 static int get_gids_from_rdma_hdr(union rdma_network_hdr
*hdr
,
397 enum rdma_network_type net_type
,
398 union ib_gid
*sgid
, union ib_gid
*dgid
)
400 struct sockaddr_in src_in
;
401 struct sockaddr_in dst_in
;
402 __be32 src_saddr
, dst_saddr
;
407 if (net_type
== RDMA_NETWORK_IPV4
) {
408 memcpy(&src_in
.sin_addr
.s_addr
,
409 &hdr
->roce4grh
.saddr
, 4);
410 memcpy(&dst_in
.sin_addr
.s_addr
,
411 &hdr
->roce4grh
.daddr
, 4);
412 src_saddr
= src_in
.sin_addr
.s_addr
;
413 dst_saddr
= dst_in
.sin_addr
.s_addr
;
414 ipv6_addr_set_v4mapped(src_saddr
,
415 (struct in6_addr
*)sgid
);
416 ipv6_addr_set_v4mapped(dst_saddr
,
417 (struct in6_addr
*)dgid
);
419 } else if (net_type
== RDMA_NETWORK_IPV6
||
420 net_type
== RDMA_NETWORK_IB
) {
421 *dgid
= hdr
->ibgrh
.dgid
;
422 *sgid
= hdr
->ibgrh
.sgid
;
429 int ib_init_ah_from_wc(struct ib_device
*device
, u8 port_num
,
430 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
431 struct ib_ah_attr
*ah_attr
)
436 enum rdma_network_type net_type
= RDMA_NETWORK_IB
;
437 enum ib_gid_type gid_type
= IB_GID_TYPE_IB
;
442 memset(ah_attr
, 0, sizeof *ah_attr
);
443 if (rdma_cap_eth_ah(device
, port_num
)) {
444 if (wc
->wc_flags
& IB_WC_WITH_NETWORK_HDR_TYPE
)
445 net_type
= wc
->network_hdr_type
;
447 net_type
= ib_get_net_type_by_grh(device
, port_num
, grh
);
448 gid_type
= ib_network_to_gid_type(net_type
);
450 ret
= get_gids_from_rdma_hdr((union rdma_network_hdr
*)grh
, net_type
,
455 if (rdma_protocol_roce(device
, port_num
)) {
457 u16 vlan_id
= wc
->wc_flags
& IB_WC_WITH_VLAN
?
458 wc
->vlan_id
: 0xffff;
459 struct net_device
*idev
;
460 struct net_device
*resolved_dev
;
462 if (!(wc
->wc_flags
& IB_WC_GRH
))
465 if (!device
->get_netdev
)
468 idev
= device
->get_netdev(device
, port_num
);
472 ret
= rdma_addr_find_l2_eth_by_grh(&dgid
, &sgid
,
474 wc
->wc_flags
& IB_WC_WITH_VLAN
?
476 &if_index
, &hoplimit
);
482 resolved_dev
= dev_get_by_index(&init_net
, if_index
);
483 if (resolved_dev
->flags
& IFF_LOOPBACK
) {
484 dev_put(resolved_dev
);
486 dev_hold(resolved_dev
);
489 if (resolved_dev
!= idev
&& !rdma_is_upper_dev_rcu(idev
,
494 dev_put(resolved_dev
);
498 ret
= get_sgid_index_from_eth(device
, port_num
, vlan_id
,
499 &dgid
, gid_type
, &gid_index
);
504 ah_attr
->dlid
= wc
->slid
;
505 ah_attr
->sl
= wc
->sl
;
506 ah_attr
->src_path_bits
= wc
->dlid_path_bits
;
507 ah_attr
->port_num
= port_num
;
509 if (wc
->wc_flags
& IB_WC_GRH
) {
510 ah_attr
->ah_flags
= IB_AH_GRH
;
511 ah_attr
->grh
.dgid
= sgid
;
513 if (!rdma_cap_eth_ah(device
, port_num
)) {
514 ret
= ib_find_cached_gid_by_port(device
, &dgid
,
522 ah_attr
->grh
.sgid_index
= (u8
) gid_index
;
523 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
524 ah_attr
->grh
.flow_label
= flow_class
& 0xFFFFF;
525 ah_attr
->grh
.hop_limit
= hoplimit
;
526 ah_attr
->grh
.traffic_class
= (flow_class
>> 20) & 0xFF;
530 EXPORT_SYMBOL(ib_init_ah_from_wc
);
532 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
533 const struct ib_grh
*grh
, u8 port_num
)
535 struct ib_ah_attr ah_attr
;
538 ret
= ib_init_ah_from_wc(pd
->device
, port_num
, wc
, grh
, &ah_attr
);
542 return ib_create_ah(pd
, &ah_attr
);
544 EXPORT_SYMBOL(ib_create_ah_from_wc
);
546 int ib_modify_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
548 return ah
->device
->modify_ah
?
549 ah
->device
->modify_ah(ah
, ah_attr
) :
552 EXPORT_SYMBOL(ib_modify_ah
);
554 int ib_query_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
556 return ah
->device
->query_ah
?
557 ah
->device
->query_ah(ah
, ah_attr
) :
560 EXPORT_SYMBOL(ib_query_ah
);
562 int ib_destroy_ah(struct ib_ah
*ah
)
568 ret
= ah
->device
->destroy_ah(ah
);
570 atomic_dec(&pd
->usecnt
);
574 EXPORT_SYMBOL(ib_destroy_ah
);
576 /* Shared receive queues */
578 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
579 struct ib_srq_init_attr
*srq_init_attr
)
583 if (!pd
->device
->create_srq
)
584 return ERR_PTR(-ENOSYS
);
586 srq
= pd
->device
->create_srq(pd
, srq_init_attr
, NULL
);
589 srq
->device
= pd
->device
;
592 srq
->event_handler
= srq_init_attr
->event_handler
;
593 srq
->srq_context
= srq_init_attr
->srq_context
;
594 srq
->srq_type
= srq_init_attr
->srq_type
;
595 if (srq
->srq_type
== IB_SRQT_XRC
) {
596 srq
->ext
.xrc
.xrcd
= srq_init_attr
->ext
.xrc
.xrcd
;
597 srq
->ext
.xrc
.cq
= srq_init_attr
->ext
.xrc
.cq
;
598 atomic_inc(&srq
->ext
.xrc
.xrcd
->usecnt
);
599 atomic_inc(&srq
->ext
.xrc
.cq
->usecnt
);
601 atomic_inc(&pd
->usecnt
);
602 atomic_set(&srq
->usecnt
, 0);
607 EXPORT_SYMBOL(ib_create_srq
);
609 int ib_modify_srq(struct ib_srq
*srq
,
610 struct ib_srq_attr
*srq_attr
,
611 enum ib_srq_attr_mask srq_attr_mask
)
613 return srq
->device
->modify_srq
?
614 srq
->device
->modify_srq(srq
, srq_attr
, srq_attr_mask
, NULL
) :
617 EXPORT_SYMBOL(ib_modify_srq
);
619 int ib_query_srq(struct ib_srq
*srq
,
620 struct ib_srq_attr
*srq_attr
)
622 return srq
->device
->query_srq
?
623 srq
->device
->query_srq(srq
, srq_attr
) : -ENOSYS
;
625 EXPORT_SYMBOL(ib_query_srq
);
627 int ib_destroy_srq(struct ib_srq
*srq
)
630 enum ib_srq_type srq_type
;
631 struct ib_xrcd
*uninitialized_var(xrcd
);
632 struct ib_cq
*uninitialized_var(cq
);
635 if (atomic_read(&srq
->usecnt
))
639 srq_type
= srq
->srq_type
;
640 if (srq_type
== IB_SRQT_XRC
) {
641 xrcd
= srq
->ext
.xrc
.xrcd
;
642 cq
= srq
->ext
.xrc
.cq
;
645 ret
= srq
->device
->destroy_srq(srq
);
647 atomic_dec(&pd
->usecnt
);
648 if (srq_type
== IB_SRQT_XRC
) {
649 atomic_dec(&xrcd
->usecnt
);
650 atomic_dec(&cq
->usecnt
);
656 EXPORT_SYMBOL(ib_destroy_srq
);
660 static void __ib_shared_qp_event_handler(struct ib_event
*event
, void *context
)
662 struct ib_qp
*qp
= context
;
665 spin_lock_irqsave(&qp
->device
->event_handler_lock
, flags
);
666 list_for_each_entry(event
->element
.qp
, &qp
->open_list
, open_list
)
667 if (event
->element
.qp
->event_handler
)
668 event
->element
.qp
->event_handler(event
, event
->element
.qp
->qp_context
);
669 spin_unlock_irqrestore(&qp
->device
->event_handler_lock
, flags
);
672 static void __ib_insert_xrcd_qp(struct ib_xrcd
*xrcd
, struct ib_qp
*qp
)
674 mutex_lock(&xrcd
->tgt_qp_mutex
);
675 list_add(&qp
->xrcd_list
, &xrcd
->tgt_qp_list
);
676 mutex_unlock(&xrcd
->tgt_qp_mutex
);
679 static struct ib_qp
*__ib_open_qp(struct ib_qp
*real_qp
,
680 void (*event_handler
)(struct ib_event
*, void *),
686 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
688 return ERR_PTR(-ENOMEM
);
690 qp
->real_qp
= real_qp
;
691 atomic_inc(&real_qp
->usecnt
);
692 qp
->device
= real_qp
->device
;
693 qp
->event_handler
= event_handler
;
694 qp
->qp_context
= qp_context
;
695 qp
->qp_num
= real_qp
->qp_num
;
696 qp
->qp_type
= real_qp
->qp_type
;
698 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
699 list_add(&qp
->open_list
, &real_qp
->open_list
);
700 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
705 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
706 struct ib_qp_open_attr
*qp_open_attr
)
708 struct ib_qp
*qp
, *real_qp
;
710 if (qp_open_attr
->qp_type
!= IB_QPT_XRC_TGT
)
711 return ERR_PTR(-EINVAL
);
713 qp
= ERR_PTR(-EINVAL
);
714 mutex_lock(&xrcd
->tgt_qp_mutex
);
715 list_for_each_entry(real_qp
, &xrcd
->tgt_qp_list
, xrcd_list
) {
716 if (real_qp
->qp_num
== qp_open_attr
->qp_num
) {
717 qp
= __ib_open_qp(real_qp
, qp_open_attr
->event_handler
,
718 qp_open_attr
->qp_context
);
722 mutex_unlock(&xrcd
->tgt_qp_mutex
);
725 EXPORT_SYMBOL(ib_open_qp
);
727 static struct ib_qp
*ib_create_xrc_qp(struct ib_qp
*qp
,
728 struct ib_qp_init_attr
*qp_init_attr
)
730 struct ib_qp
*real_qp
= qp
;
732 qp
->event_handler
= __ib_shared_qp_event_handler
;
735 qp
->send_cq
= qp
->recv_cq
= NULL
;
737 qp
->xrcd
= qp_init_attr
->xrcd
;
738 atomic_inc(&qp_init_attr
->xrcd
->usecnt
);
739 INIT_LIST_HEAD(&qp
->open_list
);
741 qp
= __ib_open_qp(real_qp
, qp_init_attr
->event_handler
,
742 qp_init_attr
->qp_context
);
744 __ib_insert_xrcd_qp(qp_init_attr
->xrcd
, real_qp
);
746 real_qp
->device
->destroy_qp(real_qp
);
750 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
751 struct ib_qp_init_attr
*qp_init_attr
)
753 struct ib_device
*device
= pd
? pd
->device
: qp_init_attr
->xrcd
->device
;
757 if (qp_init_attr
->rwq_ind_tbl
&&
758 (qp_init_attr
->recv_cq
||
759 qp_init_attr
->srq
|| qp_init_attr
->cap
.max_recv_wr
||
760 qp_init_attr
->cap
.max_recv_sge
))
761 return ERR_PTR(-EINVAL
);
764 * If the callers is using the RDMA API calculate the resources
765 * needed for the RDMA READ/WRITE operations.
767 * Note that these callers need to pass in a port number.
769 if (qp_init_attr
->cap
.max_rdma_ctxs
)
770 rdma_rw_init_qp(device
, qp_init_attr
);
772 qp
= device
->create_qp(pd
, qp_init_attr
, NULL
);
779 qp
->qp_type
= qp_init_attr
->qp_type
;
780 qp
->rwq_ind_tbl
= qp_init_attr
->rwq_ind_tbl
;
782 atomic_set(&qp
->usecnt
, 0);
784 spin_lock_init(&qp
->mr_lock
);
785 INIT_LIST_HEAD(&qp
->rdma_mrs
);
786 INIT_LIST_HEAD(&qp
->sig_mrs
);
788 if (qp_init_attr
->qp_type
== IB_QPT_XRC_TGT
)
789 return ib_create_xrc_qp(qp
, qp_init_attr
);
791 qp
->event_handler
= qp_init_attr
->event_handler
;
792 qp
->qp_context
= qp_init_attr
->qp_context
;
793 if (qp_init_attr
->qp_type
== IB_QPT_XRC_INI
) {
797 qp
->recv_cq
= qp_init_attr
->recv_cq
;
798 if (qp_init_attr
->recv_cq
)
799 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
800 qp
->srq
= qp_init_attr
->srq
;
802 atomic_inc(&qp_init_attr
->srq
->usecnt
);
806 qp
->send_cq
= qp_init_attr
->send_cq
;
809 atomic_inc(&pd
->usecnt
);
810 if (qp_init_attr
->send_cq
)
811 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
812 if (qp_init_attr
->rwq_ind_tbl
)
813 atomic_inc(&qp
->rwq_ind_tbl
->usecnt
);
815 if (qp_init_attr
->cap
.max_rdma_ctxs
) {
816 ret
= rdma_rw_init_mrs(qp
, qp_init_attr
);
818 pr_err("failed to init MR pool ret= %d\n", ret
);
826 EXPORT_SYMBOL(ib_create_qp
);
828 static const struct {
830 enum ib_qp_attr_mask req_param
[IB_QPT_MAX
];
831 enum ib_qp_attr_mask opt_param
[IB_QPT_MAX
];
832 } qp_state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
834 [IB_QPS_RESET
] = { .valid
= 1 },
838 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
841 [IB_QPT_RAW_PACKET
] = IB_QP_PORT
,
842 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
845 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
848 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
851 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
854 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
856 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
862 [IB_QPS_RESET
] = { .valid
= 1 },
863 [IB_QPS_ERR
] = { .valid
= 1 },
867 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
870 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
873 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
876 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
879 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
882 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
884 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
891 [IB_QPT_UC
] = (IB_QP_AV
|
895 [IB_QPT_RC
] = (IB_QP_AV
|
899 IB_QP_MAX_DEST_RD_ATOMIC
|
900 IB_QP_MIN_RNR_TIMER
),
901 [IB_QPT_XRC_INI
] = (IB_QP_AV
|
905 [IB_QPT_XRC_TGT
] = (IB_QP_AV
|
909 IB_QP_MAX_DEST_RD_ATOMIC
|
910 IB_QP_MIN_RNR_TIMER
),
913 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
915 [IB_QPT_UC
] = (IB_QP_ALT_PATH
|
918 [IB_QPT_RC
] = (IB_QP_ALT_PATH
|
921 [IB_QPT_XRC_INI
] = (IB_QP_ALT_PATH
|
924 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_PATH
|
927 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
929 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
935 [IB_QPS_RESET
] = { .valid
= 1 },
936 [IB_QPS_ERR
] = { .valid
= 1 },
940 [IB_QPT_UD
] = IB_QP_SQ_PSN
,
941 [IB_QPT_UC
] = IB_QP_SQ_PSN
,
942 [IB_QPT_RC
] = (IB_QP_TIMEOUT
|
946 IB_QP_MAX_QP_RD_ATOMIC
),
947 [IB_QPT_XRC_INI
] = (IB_QP_TIMEOUT
|
951 IB_QP_MAX_QP_RD_ATOMIC
),
952 [IB_QPT_XRC_TGT
] = (IB_QP_TIMEOUT
|
954 [IB_QPT_SMI
] = IB_QP_SQ_PSN
,
955 [IB_QPT_GSI
] = IB_QP_SQ_PSN
,
958 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
960 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
963 IB_QP_PATH_MIG_STATE
),
964 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
967 IB_QP_MIN_RNR_TIMER
|
968 IB_QP_PATH_MIG_STATE
),
969 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
972 IB_QP_PATH_MIG_STATE
),
973 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
976 IB_QP_MIN_RNR_TIMER
|
977 IB_QP_PATH_MIG_STATE
),
978 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
980 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
986 [IB_QPS_RESET
] = { .valid
= 1 },
987 [IB_QPS_ERR
] = { .valid
= 1 },
991 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
993 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
996 IB_QP_PATH_MIG_STATE
),
997 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1000 IB_QP_PATH_MIG_STATE
|
1001 IB_QP_MIN_RNR_TIMER
),
1002 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1003 IB_QP_ACCESS_FLAGS
|
1005 IB_QP_PATH_MIG_STATE
),
1006 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1007 IB_QP_ACCESS_FLAGS
|
1009 IB_QP_PATH_MIG_STATE
|
1010 IB_QP_MIN_RNR_TIMER
),
1011 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1013 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1020 [IB_QPT_UD
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1021 [IB_QPT_UC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1022 [IB_QPT_RC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1023 [IB_QPT_XRC_INI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1024 [IB_QPT_XRC_TGT
] = IB_QP_EN_SQD_ASYNC_NOTIFY
, /* ??? */
1025 [IB_QPT_SMI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1026 [IB_QPT_GSI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
1031 [IB_QPS_RESET
] = { .valid
= 1 },
1032 [IB_QPS_ERR
] = { .valid
= 1 },
1036 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1038 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1040 IB_QP_ACCESS_FLAGS
|
1041 IB_QP_PATH_MIG_STATE
),
1042 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1044 IB_QP_ACCESS_FLAGS
|
1045 IB_QP_MIN_RNR_TIMER
|
1046 IB_QP_PATH_MIG_STATE
),
1047 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1049 IB_QP_ACCESS_FLAGS
|
1050 IB_QP_PATH_MIG_STATE
),
1051 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1053 IB_QP_ACCESS_FLAGS
|
1054 IB_QP_MIN_RNR_TIMER
|
1055 IB_QP_PATH_MIG_STATE
),
1056 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1058 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1065 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1067 [IB_QPT_UC
] = (IB_QP_AV
|
1069 IB_QP_ACCESS_FLAGS
|
1071 IB_QP_PATH_MIG_STATE
),
1072 [IB_QPT_RC
] = (IB_QP_PORT
|
1077 IB_QP_MAX_QP_RD_ATOMIC
|
1078 IB_QP_MAX_DEST_RD_ATOMIC
|
1080 IB_QP_ACCESS_FLAGS
|
1082 IB_QP_MIN_RNR_TIMER
|
1083 IB_QP_PATH_MIG_STATE
),
1084 [IB_QPT_XRC_INI
] = (IB_QP_PORT
|
1089 IB_QP_MAX_QP_RD_ATOMIC
|
1091 IB_QP_ACCESS_FLAGS
|
1093 IB_QP_PATH_MIG_STATE
),
1094 [IB_QPT_XRC_TGT
] = (IB_QP_PORT
|
1097 IB_QP_MAX_DEST_RD_ATOMIC
|
1099 IB_QP_ACCESS_FLAGS
|
1101 IB_QP_MIN_RNR_TIMER
|
1102 IB_QP_PATH_MIG_STATE
),
1103 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1105 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1111 [IB_QPS_RESET
] = { .valid
= 1 },
1112 [IB_QPS_ERR
] = { .valid
= 1 },
1116 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1118 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1119 IB_QP_ACCESS_FLAGS
),
1120 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1122 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1128 [IB_QPS_RESET
] = { .valid
= 1 },
1129 [IB_QPS_ERR
] = { .valid
= 1 }
1133 int ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
1134 enum ib_qp_type type
, enum ib_qp_attr_mask mask
,
1135 enum rdma_link_layer ll
)
1137 enum ib_qp_attr_mask req_param
, opt_param
;
1139 if (cur_state
< 0 || cur_state
> IB_QPS_ERR
||
1140 next_state
< 0 || next_state
> IB_QPS_ERR
)
1143 if (mask
& IB_QP_CUR_STATE
&&
1144 cur_state
!= IB_QPS_RTR
&& cur_state
!= IB_QPS_RTS
&&
1145 cur_state
!= IB_QPS_SQD
&& cur_state
!= IB_QPS_SQE
)
1148 if (!qp_state_table
[cur_state
][next_state
].valid
)
1151 req_param
= qp_state_table
[cur_state
][next_state
].req_param
[type
];
1152 opt_param
= qp_state_table
[cur_state
][next_state
].opt_param
[type
];
1154 if ((mask
& req_param
) != req_param
)
1157 if (mask
& ~(req_param
| opt_param
| IB_QP_STATE
))
1162 EXPORT_SYMBOL(ib_modify_qp_is_ok
);
1164 int ib_resolve_eth_dmac(struct ib_qp
*qp
,
1165 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
1169 if (*qp_attr_mask
& IB_QP_AV
) {
1170 if (qp_attr
->ah_attr
.port_num
< rdma_start_port(qp
->device
) ||
1171 qp_attr
->ah_attr
.port_num
> rdma_end_port(qp
->device
))
1174 if (!rdma_cap_eth_ah(qp
->device
, qp_attr
->ah_attr
.port_num
))
1177 if (rdma_link_local_addr((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
)) {
1178 rdma_get_ll_mac((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
,
1179 qp_attr
->ah_attr
.dmac
);
1182 struct ib_gid_attr sgid_attr
;
1186 ret
= ib_query_gid(qp
->device
,
1187 qp_attr
->ah_attr
.port_num
,
1188 qp_attr
->ah_attr
.grh
.sgid_index
,
1191 if (ret
|| !sgid_attr
.ndev
) {
1197 ifindex
= sgid_attr
.ndev
->ifindex
;
1199 ret
= rdma_addr_find_l2_eth_by_grh(&sgid
,
1200 &qp_attr
->ah_attr
.grh
.dgid
,
1201 qp_attr
->ah_attr
.dmac
,
1202 NULL
, &ifindex
, &hop_limit
);
1204 dev_put(sgid_attr
.ndev
);
1206 qp_attr
->ah_attr
.grh
.hop_limit
= hop_limit
;
1212 EXPORT_SYMBOL(ib_resolve_eth_dmac
);
1215 int ib_modify_qp(struct ib_qp
*qp
,
1216 struct ib_qp_attr
*qp_attr
,
1221 ret
= ib_resolve_eth_dmac(qp
, qp_attr
, &qp_attr_mask
);
1225 return qp
->device
->modify_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, NULL
);
1227 EXPORT_SYMBOL(ib_modify_qp
);
1229 int ib_query_qp(struct ib_qp
*qp
,
1230 struct ib_qp_attr
*qp_attr
,
1232 struct ib_qp_init_attr
*qp_init_attr
)
1234 return qp
->device
->query_qp
?
1235 qp
->device
->query_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, qp_init_attr
) :
1238 EXPORT_SYMBOL(ib_query_qp
);
1240 int ib_close_qp(struct ib_qp
*qp
)
1242 struct ib_qp
*real_qp
;
1243 unsigned long flags
;
1245 real_qp
= qp
->real_qp
;
1249 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1250 list_del(&qp
->open_list
);
1251 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1253 atomic_dec(&real_qp
->usecnt
);
1258 EXPORT_SYMBOL(ib_close_qp
);
1260 static int __ib_destroy_shared_qp(struct ib_qp
*qp
)
1262 struct ib_xrcd
*xrcd
;
1263 struct ib_qp
*real_qp
;
1266 real_qp
= qp
->real_qp
;
1267 xrcd
= real_qp
->xrcd
;
1269 mutex_lock(&xrcd
->tgt_qp_mutex
);
1271 if (atomic_read(&real_qp
->usecnt
) == 0)
1272 list_del(&real_qp
->xrcd_list
);
1275 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1278 ret
= ib_destroy_qp(real_qp
);
1280 atomic_dec(&xrcd
->usecnt
);
1282 __ib_insert_xrcd_qp(xrcd
, real_qp
);
1288 int ib_destroy_qp(struct ib_qp
*qp
)
1291 struct ib_cq
*scq
, *rcq
;
1293 struct ib_rwq_ind_table
*ind_tbl
;
1296 WARN_ON_ONCE(qp
->mrs_used
> 0);
1298 if (atomic_read(&qp
->usecnt
))
1301 if (qp
->real_qp
!= qp
)
1302 return __ib_destroy_shared_qp(qp
);
1308 ind_tbl
= qp
->rwq_ind_tbl
;
1311 rdma_rw_cleanup_mrs(qp
);
1313 ret
= qp
->device
->destroy_qp(qp
);
1316 atomic_dec(&pd
->usecnt
);
1318 atomic_dec(&scq
->usecnt
);
1320 atomic_dec(&rcq
->usecnt
);
1322 atomic_dec(&srq
->usecnt
);
1324 atomic_dec(&ind_tbl
->usecnt
);
1329 EXPORT_SYMBOL(ib_destroy_qp
);
1331 /* Completion queues */
1333 struct ib_cq
*ib_create_cq(struct ib_device
*device
,
1334 ib_comp_handler comp_handler
,
1335 void (*event_handler
)(struct ib_event
*, void *),
1337 const struct ib_cq_init_attr
*cq_attr
)
1341 cq
= device
->create_cq(device
, cq_attr
, NULL
, NULL
);
1344 cq
->device
= device
;
1346 cq
->comp_handler
= comp_handler
;
1347 cq
->event_handler
= event_handler
;
1348 cq
->cq_context
= cq_context
;
1349 atomic_set(&cq
->usecnt
, 0);
1354 EXPORT_SYMBOL(ib_create_cq
);
1356 int ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1358 return cq
->device
->modify_cq
?
1359 cq
->device
->modify_cq(cq
, cq_count
, cq_period
) : -ENOSYS
;
1361 EXPORT_SYMBOL(ib_modify_cq
);
1363 int ib_destroy_cq(struct ib_cq
*cq
)
1365 if (atomic_read(&cq
->usecnt
))
1368 return cq
->device
->destroy_cq(cq
);
1370 EXPORT_SYMBOL(ib_destroy_cq
);
1372 int ib_resize_cq(struct ib_cq
*cq
, int cqe
)
1374 return cq
->device
->resize_cq
?
1375 cq
->device
->resize_cq(cq
, cqe
, NULL
) : -ENOSYS
;
1377 EXPORT_SYMBOL(ib_resize_cq
);
1379 /* Memory regions */
1381 struct ib_mr
*ib_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
1386 err
= ib_check_mr_access(mr_access_flags
);
1388 return ERR_PTR(err
);
1390 mr
= pd
->device
->get_dma_mr(pd
, mr_access_flags
);
1393 mr
->device
= pd
->device
;
1396 atomic_inc(&pd
->usecnt
);
1397 mr
->need_inval
= false;
1402 EXPORT_SYMBOL(ib_get_dma_mr
);
1404 int ib_dereg_mr(struct ib_mr
*mr
)
1406 struct ib_pd
*pd
= mr
->pd
;
1409 ret
= mr
->device
->dereg_mr(mr
);
1411 atomic_dec(&pd
->usecnt
);
1415 EXPORT_SYMBOL(ib_dereg_mr
);
1418 * ib_alloc_mr() - Allocates a memory region
1419 * @pd: protection domain associated with the region
1420 * @mr_type: memory region type
1421 * @max_num_sg: maximum sg entries available for registration.
1424 * Memory registeration page/sg lists must not exceed max_num_sg.
1425 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1426 * max_num_sg * used_page_size.
1429 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
,
1430 enum ib_mr_type mr_type
,
1435 if (!pd
->device
->alloc_mr
)
1436 return ERR_PTR(-ENOSYS
);
1438 mr
= pd
->device
->alloc_mr(pd
, mr_type
, max_num_sg
);
1440 mr
->device
= pd
->device
;
1443 atomic_inc(&pd
->usecnt
);
1444 mr
->need_inval
= false;
1449 EXPORT_SYMBOL(ib_alloc_mr
);
1451 /* "Fast" memory regions */
1453 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
1454 int mr_access_flags
,
1455 struct ib_fmr_attr
*fmr_attr
)
1459 if (!pd
->device
->alloc_fmr
)
1460 return ERR_PTR(-ENOSYS
);
1462 fmr
= pd
->device
->alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
1464 fmr
->device
= pd
->device
;
1466 atomic_inc(&pd
->usecnt
);
1471 EXPORT_SYMBOL(ib_alloc_fmr
);
1473 int ib_unmap_fmr(struct list_head
*fmr_list
)
1477 if (list_empty(fmr_list
))
1480 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
1481 return fmr
->device
->unmap_fmr(fmr_list
);
1483 EXPORT_SYMBOL(ib_unmap_fmr
);
1485 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
1491 ret
= fmr
->device
->dealloc_fmr(fmr
);
1493 atomic_dec(&pd
->usecnt
);
1497 EXPORT_SYMBOL(ib_dealloc_fmr
);
1499 /* Multicast groups */
1501 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1505 if (!qp
->device
->attach_mcast
)
1507 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1510 ret
= qp
->device
->attach_mcast(qp
, gid
, lid
);
1512 atomic_inc(&qp
->usecnt
);
1515 EXPORT_SYMBOL(ib_attach_mcast
);
1517 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1521 if (!qp
->device
->detach_mcast
)
1523 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1526 ret
= qp
->device
->detach_mcast(qp
, gid
, lid
);
1528 atomic_dec(&qp
->usecnt
);
1531 EXPORT_SYMBOL(ib_detach_mcast
);
1533 struct ib_xrcd
*ib_alloc_xrcd(struct ib_device
*device
)
1535 struct ib_xrcd
*xrcd
;
1537 if (!device
->alloc_xrcd
)
1538 return ERR_PTR(-ENOSYS
);
1540 xrcd
= device
->alloc_xrcd(device
, NULL
, NULL
);
1541 if (!IS_ERR(xrcd
)) {
1542 xrcd
->device
= device
;
1544 atomic_set(&xrcd
->usecnt
, 0);
1545 mutex_init(&xrcd
->tgt_qp_mutex
);
1546 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
1551 EXPORT_SYMBOL(ib_alloc_xrcd
);
1553 int ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1558 if (atomic_read(&xrcd
->usecnt
))
1561 while (!list_empty(&xrcd
->tgt_qp_list
)) {
1562 qp
= list_entry(xrcd
->tgt_qp_list
.next
, struct ib_qp
, xrcd_list
);
1563 ret
= ib_destroy_qp(qp
);
1568 return xrcd
->device
->dealloc_xrcd(xrcd
);
1570 EXPORT_SYMBOL(ib_dealloc_xrcd
);
1573 * ib_create_wq - Creates a WQ associated with the specified protection
1575 * @pd: The protection domain associated with the WQ.
1576 * @wq_init_attr: A list of initial attributes required to create the
1577 * WQ. If WQ creation succeeds, then the attributes are updated to
1578 * the actual capabilities of the created WQ.
1580 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1581 * the requested size of the WQ, and set to the actual values allocated
1583 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1584 * at least as large as the requested values.
1586 struct ib_wq
*ib_create_wq(struct ib_pd
*pd
,
1587 struct ib_wq_init_attr
*wq_attr
)
1591 if (!pd
->device
->create_wq
)
1592 return ERR_PTR(-ENOSYS
);
1594 wq
= pd
->device
->create_wq(pd
, wq_attr
, NULL
);
1596 wq
->event_handler
= wq_attr
->event_handler
;
1597 wq
->wq_context
= wq_attr
->wq_context
;
1598 wq
->wq_type
= wq_attr
->wq_type
;
1599 wq
->cq
= wq_attr
->cq
;
1600 wq
->device
= pd
->device
;
1603 atomic_inc(&pd
->usecnt
);
1604 atomic_inc(&wq_attr
->cq
->usecnt
);
1605 atomic_set(&wq
->usecnt
, 0);
1609 EXPORT_SYMBOL(ib_create_wq
);
1612 * ib_destroy_wq - Destroys the specified WQ.
1613 * @wq: The WQ to destroy.
1615 int ib_destroy_wq(struct ib_wq
*wq
)
1618 struct ib_cq
*cq
= wq
->cq
;
1619 struct ib_pd
*pd
= wq
->pd
;
1621 if (atomic_read(&wq
->usecnt
))
1624 err
= wq
->device
->destroy_wq(wq
);
1626 atomic_dec(&pd
->usecnt
);
1627 atomic_dec(&cq
->usecnt
);
1631 EXPORT_SYMBOL(ib_destroy_wq
);
1634 * ib_modify_wq - Modifies the specified WQ.
1635 * @wq: The WQ to modify.
1636 * @wq_attr: On input, specifies the WQ attributes to modify.
1637 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1638 * are being modified.
1639 * On output, the current values of selected WQ attributes are returned.
1641 int ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
1646 if (!wq
->device
->modify_wq
)
1649 err
= wq
->device
->modify_wq(wq
, wq_attr
, wq_attr_mask
, NULL
);
1652 EXPORT_SYMBOL(ib_modify_wq
);
1655 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1656 * @device: The device on which to create the rwq indirection table.
1657 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1658 * create the Indirection Table.
1660 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1661 * than the created ib_rwq_ind_table object and the caller is responsible
1662 * for its memory allocation/free.
1664 struct ib_rwq_ind_table
*ib_create_rwq_ind_table(struct ib_device
*device
,
1665 struct ib_rwq_ind_table_init_attr
*init_attr
)
1667 struct ib_rwq_ind_table
*rwq_ind_table
;
1671 if (!device
->create_rwq_ind_table
)
1672 return ERR_PTR(-ENOSYS
);
1674 table_size
= (1 << init_attr
->log_ind_tbl_size
);
1675 rwq_ind_table
= device
->create_rwq_ind_table(device
,
1677 if (IS_ERR(rwq_ind_table
))
1678 return rwq_ind_table
;
1680 rwq_ind_table
->ind_tbl
= init_attr
->ind_tbl
;
1681 rwq_ind_table
->log_ind_tbl_size
= init_attr
->log_ind_tbl_size
;
1682 rwq_ind_table
->device
= device
;
1683 rwq_ind_table
->uobject
= NULL
;
1684 atomic_set(&rwq_ind_table
->usecnt
, 0);
1686 for (i
= 0; i
< table_size
; i
++)
1687 atomic_inc(&rwq_ind_table
->ind_tbl
[i
]->usecnt
);
1689 return rwq_ind_table
;
1691 EXPORT_SYMBOL(ib_create_rwq_ind_table
);
1694 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1695 * @wq_ind_table: The Indirection Table to destroy.
1697 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*rwq_ind_table
)
1700 u32 table_size
= (1 << rwq_ind_table
->log_ind_tbl_size
);
1701 struct ib_wq
**ind_tbl
= rwq_ind_table
->ind_tbl
;
1703 if (atomic_read(&rwq_ind_table
->usecnt
))
1706 err
= rwq_ind_table
->device
->destroy_rwq_ind_table(rwq_ind_table
);
1708 for (i
= 0; i
< table_size
; i
++)
1709 atomic_dec(&ind_tbl
[i
]->usecnt
);
1714 EXPORT_SYMBOL(ib_destroy_rwq_ind_table
);
1716 struct ib_flow
*ib_create_flow(struct ib_qp
*qp
,
1717 struct ib_flow_attr
*flow_attr
,
1720 struct ib_flow
*flow_id
;
1721 if (!qp
->device
->create_flow
)
1722 return ERR_PTR(-ENOSYS
);
1724 flow_id
= qp
->device
->create_flow(qp
, flow_attr
, domain
);
1725 if (!IS_ERR(flow_id
))
1726 atomic_inc(&qp
->usecnt
);
1729 EXPORT_SYMBOL(ib_create_flow
);
1731 int ib_destroy_flow(struct ib_flow
*flow_id
)
1734 struct ib_qp
*qp
= flow_id
->qp
;
1736 err
= qp
->device
->destroy_flow(flow_id
);
1738 atomic_dec(&qp
->usecnt
);
1741 EXPORT_SYMBOL(ib_destroy_flow
);
1743 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
1744 struct ib_mr_status
*mr_status
)
1746 return mr
->device
->check_mr_status
?
1747 mr
->device
->check_mr_status(mr
, check_mask
, mr_status
) : -ENOSYS
;
1749 EXPORT_SYMBOL(ib_check_mr_status
);
1751 int ib_set_vf_link_state(struct ib_device
*device
, int vf
, u8 port
,
1754 if (!device
->set_vf_link_state
)
1757 return device
->set_vf_link_state(device
, vf
, port
, state
);
1759 EXPORT_SYMBOL(ib_set_vf_link_state
);
1761 int ib_get_vf_config(struct ib_device
*device
, int vf
, u8 port
,
1762 struct ifla_vf_info
*info
)
1764 if (!device
->get_vf_config
)
1767 return device
->get_vf_config(device
, vf
, port
, info
);
1769 EXPORT_SYMBOL(ib_get_vf_config
);
1771 int ib_get_vf_stats(struct ib_device
*device
, int vf
, u8 port
,
1772 struct ifla_vf_stats
*stats
)
1774 if (!device
->get_vf_stats
)
1777 return device
->get_vf_stats(device
, vf
, port
, stats
);
1779 EXPORT_SYMBOL(ib_get_vf_stats
);
1781 int ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
1784 if (!device
->set_vf_guid
)
1787 return device
->set_vf_guid(device
, vf
, port
, guid
, type
);
1789 EXPORT_SYMBOL(ib_set_vf_guid
);
1792 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1793 * and set it the memory region.
1794 * @mr: memory region
1795 * @sg: dma mapped scatterlist
1796 * @sg_nents: number of entries in sg
1797 * @sg_offset: offset in bytes into sg
1798 * @page_size: page vector desired page size
1801 * - The first sg element is allowed to have an offset.
1802 * - Each sg element must be aligned to page_size (or physically
1803 * contiguous to the previous element). In case an sg element has a
1804 * non contiguous offset, the mapping prefix will not include it.
1805 * - The last sg element is allowed to have length less than page_size.
1806 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1807 * then only max_num_sg entries will be mapped.
1808 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
1809 * constraints holds and the page_size argument is ignored.
1811 * Returns the number of sg elements that were mapped to the memory region.
1813 * After this completes successfully, the memory region
1814 * is ready for registration.
1816 int ib_map_mr_sg(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
1817 unsigned int *sg_offset
, unsigned int page_size
)
1819 if (unlikely(!mr
->device
->map_mr_sg
))
1822 mr
->page_size
= page_size
;
1824 return mr
->device
->map_mr_sg(mr
, sg
, sg_nents
, sg_offset
);
1826 EXPORT_SYMBOL(ib_map_mr_sg
);
1829 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1831 * @mr: memory region
1832 * @sgl: dma mapped scatterlist
1833 * @sg_nents: number of entries in sg
1834 * @sg_offset_p: IN: start offset in bytes into sg
1835 * OUT: offset in bytes for element n of the sg of the first
1836 * byte that has not been processed where n is the return
1837 * value of this function.
1838 * @set_page: driver page assignment function pointer
1840 * Core service helper for drivers to convert the largest
1841 * prefix of given sg list to a page vector. The sg list
1842 * prefix converted is the prefix that meet the requirements
1845 * Returns the number of sg elements that were assigned to
1848 int ib_sg_to_pages(struct ib_mr
*mr
, struct scatterlist
*sgl
, int sg_nents
,
1849 unsigned int *sg_offset_p
, int (*set_page
)(struct ib_mr
*, u64
))
1851 struct scatterlist
*sg
;
1852 u64 last_end_dma_addr
= 0;
1853 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
1854 unsigned int last_page_off
= 0;
1855 u64 page_mask
= ~((u64
)mr
->page_size
- 1);
1858 if (unlikely(sg_nents
<= 0 || sg_offset
> sg_dma_len(&sgl
[0])))
1861 mr
->iova
= sg_dma_address(&sgl
[0]) + sg_offset
;
1864 for_each_sg(sgl
, sg
, sg_nents
, i
) {
1865 u64 dma_addr
= sg_dma_address(sg
) + sg_offset
;
1866 u64 prev_addr
= dma_addr
;
1867 unsigned int dma_len
= sg_dma_len(sg
) - sg_offset
;
1868 u64 end_dma_addr
= dma_addr
+ dma_len
;
1869 u64 page_addr
= dma_addr
& page_mask
;
1872 * For the second and later elements, check whether either the
1873 * end of element i-1 or the start of element i is not aligned
1874 * on a page boundary.
1876 if (i
&& (last_page_off
!= 0 || page_addr
!= dma_addr
)) {
1877 /* Stop mapping if there is a gap. */
1878 if (last_end_dma_addr
!= dma_addr
)
1882 * Coalesce this element with the last. If it is small
1883 * enough just update mr->length. Otherwise start
1884 * mapping from the next page.
1890 ret
= set_page(mr
, page_addr
);
1891 if (unlikely(ret
< 0)) {
1892 sg_offset
= prev_addr
- sg_dma_address(sg
);
1893 mr
->length
+= prev_addr
- dma_addr
;
1895 *sg_offset_p
= sg_offset
;
1896 return i
|| sg_offset
? i
: ret
;
1898 prev_addr
= page_addr
;
1900 page_addr
+= mr
->page_size
;
1901 } while (page_addr
< end_dma_addr
);
1903 mr
->length
+= dma_len
;
1904 last_end_dma_addr
= end_dma_addr
;
1905 last_page_off
= end_dma_addr
& ~page_mask
;
1914 EXPORT_SYMBOL(ib_sg_to_pages
);
1916 struct ib_drain_cqe
{
1918 struct completion done
;
1921 static void ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1923 struct ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
, struct ib_drain_cqe
,
1926 complete(&cqe
->done
);
1930 * Post a WR and block until its completion is reaped for the SQ.
1932 static void __ib_drain_sq(struct ib_qp
*qp
)
1934 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
1935 struct ib_drain_cqe sdrain
;
1936 struct ib_send_wr swr
= {}, *bad_swr
;
1939 if (qp
->send_cq
->poll_ctx
== IB_POLL_DIRECT
) {
1940 WARN_ONCE(qp
->send_cq
->poll_ctx
== IB_POLL_DIRECT
,
1941 "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1945 swr
.wr_cqe
= &sdrain
.cqe
;
1946 sdrain
.cqe
.done
= ib_drain_qp_done
;
1947 init_completion(&sdrain
.done
);
1949 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
1951 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
1955 ret
= ib_post_send(qp
, &swr
, &bad_swr
);
1957 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
1961 wait_for_completion(&sdrain
.done
);
1965 * Post a WR and block until its completion is reaped for the RQ.
1967 static void __ib_drain_rq(struct ib_qp
*qp
)
1969 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
1970 struct ib_drain_cqe rdrain
;
1971 struct ib_recv_wr rwr
= {}, *bad_rwr
;
1974 if (qp
->recv_cq
->poll_ctx
== IB_POLL_DIRECT
) {
1975 WARN_ONCE(qp
->recv_cq
->poll_ctx
== IB_POLL_DIRECT
,
1976 "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1980 rwr
.wr_cqe
= &rdrain
.cqe
;
1981 rdrain
.cqe
.done
= ib_drain_qp_done
;
1982 init_completion(&rdrain
.done
);
1984 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
1986 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
1990 ret
= ib_post_recv(qp
, &rwr
, &bad_rwr
);
1992 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
1996 wait_for_completion(&rdrain
.done
);
2000 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2002 * @qp: queue pair to drain
2004 * If the device has a provider-specific drain function, then
2005 * call that. Otherwise call the generic drain function
2010 * ensure there is room in the CQ and SQ for the drain work request and
2013 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
2016 * ensure that there are no other contexts that are posting WRs concurrently.
2017 * Otherwise the drain is not guaranteed.
2019 void ib_drain_sq(struct ib_qp
*qp
)
2021 if (qp
->device
->drain_sq
)
2022 qp
->device
->drain_sq(qp
);
2026 EXPORT_SYMBOL(ib_drain_sq
);
2029 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2031 * @qp: queue pair to drain
2033 * If the device has a provider-specific drain function, then
2034 * call that. Otherwise call the generic drain function
2039 * ensure there is room in the CQ and RQ for the drain work request and
2042 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
2045 * ensure that there are no other contexts that are posting WRs concurrently.
2046 * Otherwise the drain is not guaranteed.
2048 void ib_drain_rq(struct ib_qp
*qp
)
2050 if (qp
->device
->drain_rq
)
2051 qp
->device
->drain_rq(qp
);
2055 EXPORT_SYMBOL(ib_drain_rq
);
2058 * ib_drain_qp() - Block until all CQEs have been consumed by the
2059 * application on both the RQ and SQ.
2060 * @qp: queue pair to drain
2064 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2067 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
2070 * ensure that there are no other contexts that are posting WRs concurrently.
2071 * Otherwise the drain is not guaranteed.
2073 void ib_drain_qp(struct ib_qp
*qp
)
2079 EXPORT_SYMBOL(ib_drain_qp
);