Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/mlx4/qp.h> | |
34 | #include <linux/mlx4/srq.h> | |
35 | ||
36 | #include "mlx4_ib.h" | |
37 | #include "user.h" | |
38 | ||
39 | static void *get_wqe(struct mlx4_ib_srq *srq, int n) | |
40 | { | |
41 | int offset = n << srq->msrq.wqe_shift; | |
42 | ||
43 | if (srq->buf.nbufs == 1) | |
44 | return srq->buf.u.direct.buf + offset; | |
45 | else | |
46 | return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf + | |
47 | (offset & (PAGE_SIZE - 1)); | |
48 | } | |
49 | ||
50 | static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | |
51 | { | |
52 | struct ib_event event; | |
53 | struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; | |
54 | ||
55 | if (ibsrq->event_handler) { | |
56 | event.device = ibsrq->device; | |
57 | event.element.srq = ibsrq; | |
58 | switch (type) { | |
59 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | |
60 | event.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
61 | break; | |
62 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: | |
63 | event.event = IB_EVENT_SRQ_ERR; | |
64 | break; | |
65 | default: | |
66 | printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " | |
67 | "on SRQ %06x\n", type, srq->srqn); | |
68 | return; | |
69 | } | |
70 | ||
71 | ibsrq->event_handler(&event, ibsrq->srq_context); | |
72 | } | |
73 | } | |
74 | ||
75 | struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |
76 | struct ib_srq_init_attr *init_attr, | |
77 | struct ib_udata *udata) | |
78 | { | |
79 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
80 | struct mlx4_ib_srq *srq; | |
81 | struct mlx4_wqe_srq_next_seg *next; | |
82 | int desc_size; | |
83 | int buf_size; | |
84 | int err; | |
85 | int i; | |
86 | ||
87 | /* Sanity check SRQ size before proceeding */ | |
88 | if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || | |
89 | init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) | |
90 | return ERR_PTR(-EINVAL); | |
91 | ||
92 | srq = kmalloc(sizeof *srq, GFP_KERNEL); | |
93 | if (!srq) | |
94 | return ERR_PTR(-ENOMEM); | |
95 | ||
96 | mutex_init(&srq->mutex); | |
97 | spin_lock_init(&srq->lock); | |
98 | srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); | |
99 | srq->msrq.max_gs = init_attr->attr.max_sge; | |
100 | ||
101 | desc_size = max(32UL, | |
102 | roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + | |
103 | srq->msrq.max_gs * | |
104 | sizeof (struct mlx4_wqe_data_seg))); | |
105 | srq->msrq.wqe_shift = ilog2(desc_size); | |
106 | ||
107 | buf_size = srq->msrq.max * desc_size; | |
108 | ||
109 | if (pd->uobject) { | |
110 | struct mlx4_ib_create_srq ucmd; | |
111 | ||
112 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { | |
113 | err = -EFAULT; | |
114 | goto err_srq; | |
115 | } | |
116 | ||
117 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | |
118 | buf_size, 0); | |
119 | if (IS_ERR(srq->umem)) { | |
120 | err = PTR_ERR(srq->umem); | |
121 | goto err_srq; | |
122 | } | |
123 | ||
124 | err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), | |
125 | ilog2(srq->umem->page_size), &srq->mtt); | |
126 | if (err) | |
127 | goto err_buf; | |
128 | ||
129 | err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); | |
130 | if (err) | |
131 | goto err_mtt; | |
132 | ||
133 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), | |
134 | ucmd.db_addr, &srq->db); | |
135 | if (err) | |
136 | goto err_mtt; | |
137 | } else { | |
138 | err = mlx4_ib_db_alloc(dev, &srq->db, 0); | |
139 | if (err) | |
140 | goto err_srq; | |
141 | ||
142 | *srq->db.db = 0; | |
143 | ||
144 | if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { | |
145 | err = -ENOMEM; | |
146 | goto err_db; | |
147 | } | |
148 | ||
149 | srq->head = 0; | |
150 | srq->tail = srq->msrq.max - 1; | |
151 | srq->wqe_ctr = 0; | |
152 | ||
153 | for (i = 0; i < srq->msrq.max; ++i) { | |
154 | next = get_wqe(srq, i); | |
155 | next->next_wqe_index = | |
156 | cpu_to_be16((i + 1) & (srq->msrq.max - 1)); | |
157 | } | |
158 | ||
159 | err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, | |
160 | &srq->mtt); | |
161 | if (err) | |
162 | goto err_buf; | |
163 | ||
164 | err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); | |
165 | if (err) | |
166 | goto err_mtt; | |
167 | ||
168 | srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); | |
169 | if (!srq->wrid) { | |
170 | err = -ENOMEM; | |
171 | goto err_mtt; | |
172 | } | |
173 | } | |
174 | ||
175 | err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt, | |
176 | srq->db.dma, &srq->msrq); | |
177 | if (err) | |
178 | goto err_wrid; | |
179 | ||
180 | srq->msrq.event = mlx4_ib_srq_event; | |
181 | ||
182 | if (pd->uobject) | |
183 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { | |
184 | err = -EFAULT; | |
185 | goto err_wrid; | |
186 | } | |
187 | ||
188 | init_attr->attr.max_wr = srq->msrq.max - 1; | |
189 | ||
190 | return &srq->ibsrq; | |
191 | ||
192 | err_wrid: | |
193 | if (pd->uobject) | |
194 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); | |
195 | else | |
196 | kfree(srq->wrid); | |
197 | ||
198 | err_mtt: | |
199 | mlx4_mtt_cleanup(dev->dev, &srq->mtt); | |
200 | ||
201 | err_buf: | |
202 | if (pd->uobject) | |
203 | ib_umem_release(srq->umem); | |
204 | else | |
205 | mlx4_buf_free(dev->dev, buf_size, &srq->buf); | |
206 | ||
207 | err_db: | |
208 | if (!pd->uobject) | |
209 | mlx4_ib_db_free(dev, &srq->db); | |
210 | ||
211 | err_srq: | |
212 | kfree(srq); | |
213 | ||
214 | return ERR_PTR(err); | |
215 | } | |
216 | ||
217 | int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
218 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) | |
219 | { | |
220 | struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); | |
221 | struct mlx4_ib_srq *srq = to_msrq(ibsrq); | |
222 | int ret; | |
223 | ||
224 | /* We don't support resizing SRQs (yet?) */ | |
225 | if (attr_mask & IB_SRQ_MAX_WR) | |
226 | return -EINVAL; | |
227 | ||
228 | if (attr_mask & IB_SRQ_LIMIT) { | |
229 | if (attr->srq_limit >= srq->msrq.max) | |
230 | return -EINVAL; | |
231 | ||
232 | mutex_lock(&srq->mutex); | |
233 | ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit); | |
234 | mutex_unlock(&srq->mutex); | |
235 | ||
236 | if (ret) | |
237 | return ret; | |
238 | } | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
65541cb7 JM |
243 | int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) |
244 | { | |
245 | struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); | |
246 | struct mlx4_ib_srq *srq = to_msrq(ibsrq); | |
247 | int ret; | |
248 | int limit_watermark; | |
249 | ||
250 | ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark); | |
251 | if (ret) | |
252 | return ret; | |
253 | ||
d7dc3ccb | 254 | srq_attr->srq_limit = limit_watermark; |
65541cb7 JM |
255 | srq_attr->max_wr = srq->msrq.max - 1; |
256 | srq_attr->max_sge = srq->msrq.max_gs; | |
257 | ||
258 | return 0; | |
259 | } | |
260 | ||
225c7b1f RD |
261 | int mlx4_ib_destroy_srq(struct ib_srq *srq) |
262 | { | |
263 | struct mlx4_ib_dev *dev = to_mdev(srq->device); | |
264 | struct mlx4_ib_srq *msrq = to_msrq(srq); | |
265 | ||
266 | mlx4_srq_free(dev->dev, &msrq->msrq); | |
267 | mlx4_mtt_cleanup(dev->dev, &msrq->mtt); | |
268 | ||
269 | if (srq->uobject) { | |
270 | mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); | |
271 | ib_umem_release(msrq->umem); | |
272 | } else { | |
273 | kfree(msrq->wrid); | |
274 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, | |
275 | &msrq->buf); | |
276 | mlx4_ib_db_free(dev, &msrq->db); | |
277 | } | |
278 | ||
279 | kfree(msrq); | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) | |
285 | { | |
286 | struct mlx4_wqe_srq_next_seg *next; | |
287 | ||
288 | /* always called with interrupts disabled. */ | |
289 | spin_lock(&srq->lock); | |
290 | ||
291 | next = get_wqe(srq, srq->tail); | |
292 | next->next_wqe_index = cpu_to_be16(wqe_index); | |
293 | srq->tail = wqe_index; | |
294 | ||
295 | spin_unlock(&srq->lock); | |
296 | } | |
297 | ||
298 | int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
299 | struct ib_recv_wr **bad_wr) | |
300 | { | |
301 | struct mlx4_ib_srq *srq = to_msrq(ibsrq); | |
302 | struct mlx4_wqe_srq_next_seg *next; | |
303 | struct mlx4_wqe_data_seg *scat; | |
304 | unsigned long flags; | |
305 | int err = 0; | |
306 | int nreq; | |
307 | int i; | |
308 | ||
309 | spin_lock_irqsave(&srq->lock, flags); | |
310 | ||
311 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
312 | if (unlikely(wr->num_sge > srq->msrq.max_gs)) { | |
313 | err = -EINVAL; | |
314 | *bad_wr = wr; | |
315 | break; | |
316 | } | |
317 | ||
56a8c8b6 RD |
318 | if (unlikely(srq->head == srq->tail)) { |
319 | err = -ENOMEM; | |
320 | *bad_wr = wr; | |
321 | break; | |
322 | } | |
323 | ||
225c7b1f RD |
324 | srq->wrid[srq->head] = wr->wr_id; |
325 | ||
326 | next = get_wqe(srq, srq->head); | |
327 | srq->head = be16_to_cpu(next->next_wqe_index); | |
328 | scat = (struct mlx4_wqe_data_seg *) (next + 1); | |
329 | ||
330 | for (i = 0; i < wr->num_sge; ++i) { | |
331 | scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); | |
332 | scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); | |
333 | scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); | |
334 | } | |
335 | ||
336 | if (i < srq->msrq.max_gs) { | |
337 | scat[i].byte_count = 0; | |
338 | scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); | |
339 | scat[i].addr = 0; | |
340 | } | |
341 | } | |
342 | ||
343 | if (likely(nreq)) { | |
344 | srq->wqe_ctr += nreq; | |
345 | ||
346 | /* | |
347 | * Make sure that descriptors are written before | |
348 | * doorbell record. | |
349 | */ | |
350 | wmb(); | |
351 | ||
352 | *srq->db.db = cpu_to_be32(srq->wqe_ctr); | |
353 | } | |
354 | ||
355 | spin_unlock_irqrestore(&srq->lock, flags); | |
356 | ||
357 | return err; | |
358 | } |