2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_post_srq_receive - post a receive on a shared receive queue
41 * @ibsrq: the SRQ to post the receive on
42 * @wr: the list of work requests to post
43 * @bad_wr: the first WR to cause a problem is put here
45 * This may be called from interrupt context.
47 int ipath_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
48 struct ib_recv_wr
**bad_wr
)
50 struct ipath_srq
*srq
= to_isrq(ibsrq
);
51 struct ipath_ibdev
*dev
= to_idev(ibsrq
->device
);
55 for (; wr
; wr
= wr
->next
) {
56 struct ipath_rwqe
*wqe
;
60 if (wr
->num_sge
> srq
->rq
.max_sge
) {
66 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
67 next
= srq
->rq
.head
+ 1;
68 if (next
>= srq
->rq
.size
)
70 if (next
== srq
->rq
.tail
) {
71 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
77 wqe
= get_rwqe_ptr(&srq
->rq
, srq
->rq
.head
);
78 wqe
->wr_id
= wr
->wr_id
;
79 wqe
->sg_list
[0].mr
= NULL
;
80 wqe
->sg_list
[0].vaddr
= NULL
;
81 wqe
->sg_list
[0].length
= 0;
82 wqe
->sg_list
[0].sge_length
= 0;
84 for (i
= 0, j
= 0; i
< wr
->num_sge
; i
++) {
86 if (to_ipd(srq
->ibsrq
.pd
)->user
&&
87 wr
->sg_list
[i
].lkey
== 0) {
88 spin_unlock_irqrestore(&srq
->rq
.lock
,
94 if (wr
->sg_list
[i
].length
== 0)
96 if (!ipath_lkey_ok(&dev
->lk_table
,
99 IB_ACCESS_LOCAL_WRITE
)) {
100 spin_unlock_irqrestore(&srq
->rq
.lock
,
106 wqe
->length
+= wr
->sg_list
[i
].length
;
111 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
120 * ipath_create_srq - create a shared receive queue
121 * @ibpd: the protection domain of the SRQ to create
122 * @attr: the attributes of the SRQ
123 * @udata: not used by the InfiniPath verbs driver
125 struct ib_srq
*ipath_create_srq(struct ib_pd
*ibpd
,
126 struct ib_srq_init_attr
*srq_init_attr
,
127 struct ib_udata
*udata
)
129 struct ipath_ibdev
*dev
= to_idev(ibpd
->device
);
130 struct ipath_srq
*srq
;
134 if (dev
->n_srqs_allocated
== ib_ipath_max_srqs
) {
135 ret
= ERR_PTR(-ENOMEM
);
139 if (srq_init_attr
->attr
.max_wr
== 0) {
140 ret
= ERR_PTR(-EINVAL
);
144 if ((srq_init_attr
->attr
.max_sge
> ib_ipath_max_srq_sges
) ||
145 (srq_init_attr
->attr
.max_wr
> ib_ipath_max_srq_wrs
)) {
146 ret
= ERR_PTR(-EINVAL
);
150 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
152 ret
= ERR_PTR(-ENOMEM
);
157 * Need to use vmalloc() if we want to support large #s of entries.
159 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
160 sz
= sizeof(struct ipath_sge
) * srq_init_attr
->attr
.max_sge
+
161 sizeof(struct ipath_rwqe
);
162 srq
->rq
.wq
= vmalloc(srq
->rq
.size
* sz
);
165 ret
= ERR_PTR(-ENOMEM
);
170 * ib_create_srq() will initialize srq->ibsrq.
172 spin_lock_init(&srq
->rq
.lock
);
175 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
176 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
180 dev
->n_srqs_allocated
++;
187 * ipath_modify_srq - modify a shared receive queue
188 * @ibsrq: the SRQ to modify
189 * @attr: the new attributes of the SRQ
190 * @attr_mask: indicates which attributes to modify
191 * @udata: user data for ipathverbs.so
193 int ipath_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
194 enum ib_srq_attr_mask attr_mask
,
195 struct ib_udata
*udata
)
197 struct ipath_srq
*srq
= to_isrq(ibsrq
);
201 if (attr_mask
& IB_SRQ_MAX_WR
)
202 if ((attr
->max_wr
> ib_ipath_max_srq_wrs
) ||
203 (attr
->max_sge
> srq
->rq
.max_sge
)) {
208 if (attr_mask
& IB_SRQ_LIMIT
)
209 if (attr
->srq_limit
>= srq
->rq
.size
) {
214 if (attr_mask
& IB_SRQ_MAX_WR
) {
215 struct ipath_rwqe
*wq
, *p
;
218 sz
= sizeof(struct ipath_rwqe
) +
219 attr
->max_sge
* sizeof(struct ipath_sge
);
220 size
= attr
->max_wr
+ 1;
221 wq
= vmalloc(size
* sz
);
227 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
228 if (srq
->rq
.head
< srq
->rq
.tail
)
229 n
= srq
->rq
.size
+ srq
->rq
.head
- srq
->rq
.tail
;
231 n
= srq
->rq
.head
- srq
->rq
.tail
;
232 if (size
<= n
|| size
<= srq
->limit
) {
233 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
240 while (srq
->rq
.tail
!= srq
->rq
.head
) {
241 struct ipath_rwqe
*wqe
;
244 wqe
= get_rwqe_ptr(&srq
->rq
, srq
->rq
.tail
);
245 p
->wr_id
= wqe
->wr_id
;
246 p
->length
= wqe
->length
;
247 p
->num_sge
= wqe
->num_sge
;
248 for (i
= 0; i
< wqe
->num_sge
; i
++)
249 p
->sg_list
[i
] = wqe
->sg_list
[i
];
251 p
= (struct ipath_rwqe
*)((char *) p
+ sz
);
252 if (++srq
->rq
.tail
>= srq
->rq
.size
)
260 srq
->rq
.max_sge
= attr
->max_sge
;
261 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
264 if (attr_mask
& IB_SRQ_LIMIT
) {
265 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
266 srq
->limit
= attr
->srq_limit
;
267 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
275 int ipath_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
277 struct ipath_srq
*srq
= to_isrq(ibsrq
);
279 attr
->max_wr
= srq
->rq
.size
- 1;
280 attr
->max_sge
= srq
->rq
.max_sge
;
281 attr
->srq_limit
= srq
->limit
;
286 * ipath_destroy_srq - destroy a shared receive queue
287 * @ibsrq: the SRQ to destroy
289 int ipath_destroy_srq(struct ib_srq
*ibsrq
)
291 struct ipath_srq
*srq
= to_isrq(ibsrq
);
292 struct ipath_ibdev
*dev
= to_idev(ibsrq
->device
);
294 dev
->n_srqs_allocated
--;