Commit | Line | Data |
---|---|---|
ec34a922 RD |
1 | /* |
2 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ | |
33 | */ | |
34 | ||
35 | #include "mthca_dev.h" | |
36 | #include "mthca_cmd.h" | |
37 | #include "mthca_memfree.h" | |
38 | #include "mthca_wqe.h" | |
39 | ||
40 | enum { | |
41 | MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE | |
42 | }; | |
43 | ||
44 | struct mthca_tavor_srq_context { | |
45 | __be64 wqe_base_ds; /* low 6 bits is descriptor size */ | |
46 | __be32 state_pd; | |
47 | __be32 lkey; | |
48 | __be32 uar; | |
49 | __be32 wqe_cnt; | |
50 | u32 reserved[2]; | |
51 | }; | |
52 | ||
53 | struct mthca_arbel_srq_context { | |
54 | __be32 state_logsize_srqn; | |
55 | __be32 lkey; | |
56 | __be32 db_index; | |
57 | __be32 logstride_usrpage; | |
58 | __be64 wqe_base; | |
59 | __be32 eq_pd; | |
60 | __be16 limit_watermark; | |
61 | __be16 wqe_cnt; | |
62 | u16 reserved1; | |
63 | __be16 wqe_counter; | |
64 | u32 reserved2[3]; | |
65 | }; | |
66 | ||
67 | static void *get_wqe(struct mthca_srq *srq, int n) | |
68 | { | |
69 | if (srq->is_direct) | |
70 | return srq->queue.direct.buf + (n << srq->wqe_shift); | |
71 | else | |
72 | return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + | |
73 | ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); | |
74 | } | |
75 | ||
76 | /* | |
77 | * Return a pointer to the location within a WQE that we're using as a | |
78 | * link when the WQE is in the free list. We use an offset of 4 | |
79 | * because in the Tavor case, posting a WQE may overwrite the first | |
80 | * four bytes of the previous WQE. The offset avoids corrupting our | |
81 | * free list if the WQE has already completed and been put on the free | |
82 | * list when we post the next WQE. | |
83 | */ | |
84 | static inline int *wqe_to_link(void *wqe) | |
85 | { | |
86 | return (int *) (wqe + 4); | |
87 | } | |
88 | ||
89 | static void mthca_tavor_init_srq_context(struct mthca_dev *dev, | |
90 | struct mthca_pd *pd, | |
91 | struct mthca_srq *srq, | |
92 | struct mthca_tavor_srq_context *context) | |
93 | { | |
94 | memset(context, 0, sizeof *context); | |
95 | ||
96 | context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); | |
97 | context->state_pd = cpu_to_be32(pd->pd_num); | |
98 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); | |
99 | ||
100 | if (pd->ibpd.uobject) | |
101 | context->uar = | |
102 | cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); | |
103 | else | |
104 | context->uar = cpu_to_be32(dev->driver_uar.index); | |
105 | } | |
106 | ||
107 | static void mthca_arbel_init_srq_context(struct mthca_dev *dev, | |
108 | struct mthca_pd *pd, | |
109 | struct mthca_srq *srq, | |
110 | struct mthca_arbel_srq_context *context) | |
111 | { | |
112 | int logsize; | |
113 | ||
114 | memset(context, 0, sizeof *context); | |
115 | ||
116 | logsize = long_log2(srq->max) + srq->wqe_shift; | |
117 | context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); | |
118 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); | |
119 | context->db_index = cpu_to_be32(srq->db_index); | |
120 | context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); | |
121 | if (pd->ibpd.uobject) | |
122 | context->logstride_usrpage |= | |
123 | cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); | |
124 | else | |
125 | context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); | |
126 | context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); | |
127 | } | |
128 | ||
129 | static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) | |
130 | { | |
131 | mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, | |
132 | srq->is_direct, &srq->mr); | |
133 | kfree(srq->wrid); | |
134 | } | |
135 | ||
136 | static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, | |
137 | struct mthca_srq *srq) | |
138 | { | |
139 | struct mthca_data_seg *scatter; | |
140 | void *wqe; | |
141 | int err; | |
142 | int i; | |
143 | ||
144 | if (pd->ibpd.uobject) | |
145 | return 0; | |
146 | ||
147 | srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); | |
148 | if (!srq->wrid) | |
149 | return -ENOMEM; | |
150 | ||
151 | err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, | |
152 | MTHCA_MAX_DIRECT_SRQ_SIZE, | |
153 | &srq->queue, &srq->is_direct, pd, 1, &srq->mr); | |
154 | if (err) { | |
155 | kfree(srq->wrid); | |
156 | return err; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Now initialize the SRQ buffer so that all of the WQEs are | |
161 | * linked into the list of free WQEs. In addition, set the | |
162 | * scatter list L_Keys to the sentry value of 0x100. | |
163 | */ | |
164 | for (i = 0; i < srq->max; ++i) { | |
165 | wqe = get_wqe(srq, i); | |
166 | ||
167 | *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; | |
168 | ||
169 | for (scatter = wqe + sizeof (struct mthca_next_seg); | |
170 | (void *) scatter < wqe + (1 << srq->wqe_shift); | |
171 | ++scatter) | |
172 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
173 | } | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
178 | int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |
179 | struct ib_srq_attr *attr, struct mthca_srq *srq) | |
180 | { | |
181 | struct mthca_mailbox *mailbox; | |
182 | u8 status; | |
183 | int ds; | |
184 | int err; | |
185 | ||
186 | /* Sanity check SRQ size before proceeding */ | |
187 | if (attr->max_wr > 16 << 20 || attr->max_sge > 64) | |
188 | return -EINVAL; | |
189 | ||
190 | srq->max = attr->max_wr; | |
191 | srq->max_gs = attr->max_sge; | |
ec34a922 RD |
192 | srq->counter = 0; |
193 | ||
194 | if (mthca_is_memfree(dev)) | |
195 | srq->max = roundup_pow_of_two(srq->max + 1); | |
196 | ||
197 | ds = min(64UL, | |
198 | roundup_pow_of_two(sizeof (struct mthca_next_seg) + | |
199 | srq->max_gs * sizeof (struct mthca_data_seg))); | |
200 | srq->wqe_shift = long_log2(ds); | |
201 | ||
202 | srq->srqn = mthca_alloc(&dev->srq_table.alloc); | |
203 | if (srq->srqn == -1) | |
204 | return -ENOMEM; | |
205 | ||
206 | if (mthca_is_memfree(dev)) { | |
207 | err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); | |
208 | if (err) | |
209 | goto err_out; | |
210 | ||
211 | if (!pd->ibpd.uobject) { | |
212 | srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, | |
213 | srq->srqn, &srq->db); | |
214 | if (srq->db_index < 0) { | |
215 | err = -ENOMEM; | |
216 | goto err_out_icm; | |
217 | } | |
218 | } | |
219 | } | |
220 | ||
221 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | |
222 | if (IS_ERR(mailbox)) { | |
223 | err = PTR_ERR(mailbox); | |
224 | goto err_out_db; | |
225 | } | |
226 | ||
227 | err = mthca_alloc_srq_buf(dev, pd, srq); | |
228 | if (err) | |
229 | goto err_out_mailbox; | |
230 | ||
231 | spin_lock_init(&srq->lock); | |
232 | atomic_set(&srq->refcount, 1); | |
233 | init_waitqueue_head(&srq->wait); | |
234 | ||
235 | if (mthca_is_memfree(dev)) | |
236 | mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); | |
237 | else | |
238 | mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); | |
239 | ||
240 | err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); | |
241 | ||
242 | if (err) { | |
243 | mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); | |
244 | goto err_out_free_buf; | |
245 | } | |
246 | if (status) { | |
247 | mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", | |
248 | status); | |
249 | err = -EINVAL; | |
250 | goto err_out_free_buf; | |
251 | } | |
252 | ||
253 | spin_lock_irq(&dev->srq_table.lock); | |
254 | if (mthca_array_set(&dev->srq_table.srq, | |
255 | srq->srqn & (dev->limits.num_srqs - 1), | |
256 | srq)) { | |
257 | spin_unlock_irq(&dev->srq_table.lock); | |
258 | goto err_out_free_srq; | |
259 | } | |
260 | spin_unlock_irq(&dev->srq_table.lock); | |
261 | ||
262 | mthca_free_mailbox(dev, mailbox); | |
263 | ||
264 | srq->first_free = 0; | |
265 | srq->last_free = srq->max - 1; | |
d6cff021 | 266 | srq->last = get_wqe(srq, srq->max - 1); |
ec34a922 RD |
267 | |
268 | return 0; | |
269 | ||
270 | err_out_free_srq: | |
271 | err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); | |
272 | if (err) | |
273 | mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); | |
274 | else if (status) | |
275 | mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); | |
276 | ||
277 | err_out_free_buf: | |
278 | if (!pd->ibpd.uobject) | |
279 | mthca_free_srq_buf(dev, srq); | |
280 | ||
281 | err_out_mailbox: | |
282 | mthca_free_mailbox(dev, mailbox); | |
283 | ||
284 | err_out_db: | |
285 | if (!pd->ibpd.uobject && mthca_is_memfree(dev)) | |
286 | mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); | |
287 | ||
288 | err_out_icm: | |
289 | mthca_table_put(dev, dev->srq_table.table, srq->srqn); | |
290 | ||
291 | err_out: | |
292 | mthca_free(&dev->srq_table.alloc, srq->srqn); | |
293 | ||
294 | return err; | |
295 | } | |
296 | ||
297 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |
298 | { | |
299 | struct mthca_mailbox *mailbox; | |
300 | int err; | |
301 | u8 status; | |
302 | ||
303 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | |
304 | if (IS_ERR(mailbox)) { | |
305 | mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); | |
306 | return; | |
307 | } | |
308 | ||
309 | err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); | |
310 | if (err) | |
311 | mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); | |
312 | else if (status) | |
313 | mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); | |
314 | ||
315 | spin_lock_irq(&dev->srq_table.lock); | |
316 | mthca_array_clear(&dev->srq_table.srq, | |
317 | srq->srqn & (dev->limits.num_srqs - 1)); | |
318 | spin_unlock_irq(&dev->srq_table.lock); | |
319 | ||
320 | atomic_dec(&srq->refcount); | |
321 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | |
322 | ||
323 | if (!srq->ibsrq.uobject) { | |
324 | mthca_free_srq_buf(dev, srq); | |
325 | if (mthca_is_memfree(dev)) | |
326 | mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); | |
327 | } | |
328 | ||
329 | mthca_table_put(dev, dev->srq_table.table, srq->srqn); | |
330 | mthca_free(&dev->srq_table.alloc, srq->srqn); | |
331 | mthca_free_mailbox(dev, mailbox); | |
332 | } | |
333 | ||
334 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |
335 | enum ib_event_type event_type) | |
336 | { | |
337 | struct mthca_srq *srq; | |
338 | struct ib_event event; | |
339 | ||
340 | spin_lock(&dev->srq_table.lock); | |
341 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | |
342 | if (srq) | |
343 | atomic_inc(&srq->refcount); | |
344 | spin_unlock(&dev->srq_table.lock); | |
345 | ||
346 | if (!srq) { | |
347 | mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); | |
348 | return; | |
349 | } | |
350 | ||
351 | if (!srq->ibsrq.event_handler) | |
352 | goto out; | |
353 | ||
354 | event.device = &dev->ib_dev; | |
355 | event.event = event_type; | |
356 | event.element.srq = &srq->ibsrq; | |
357 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | |
358 | ||
359 | out: | |
360 | if (atomic_dec_and_test(&srq->refcount)) | |
361 | wake_up(&srq->wait); | |
362 | } | |
363 | ||
364 | /* | |
365 | * This function must be called with IRQs disabled. | |
366 | */ | |
367 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) | |
368 | { | |
369 | int ind; | |
370 | ||
371 | ind = wqe_addr >> srq->wqe_shift; | |
372 | ||
373 | spin_lock(&srq->lock); | |
374 | ||
375 | if (likely(srq->first_free >= 0)) | |
376 | *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; | |
377 | else | |
378 | srq->first_free = ind; | |
379 | ||
380 | *wqe_to_link(get_wqe(srq, ind)) = -1; | |
381 | srq->last_free = ind; | |
382 | ||
383 | spin_unlock(&srq->lock); | |
384 | } | |
385 | ||
386 | int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
387 | struct ib_recv_wr **bad_wr) | |
388 | { | |
389 | struct mthca_dev *dev = to_mdev(ibsrq->device); | |
390 | struct mthca_srq *srq = to_msrq(ibsrq); | |
391 | unsigned long flags; | |
392 | int err = 0; | |
393 | int first_ind; | |
394 | int ind; | |
395 | int next_ind; | |
396 | int nreq; | |
397 | int i; | |
398 | void *wqe; | |
399 | void *prev_wqe; | |
400 | ||
401 | spin_lock_irqsave(&srq->lock, flags); | |
402 | ||
403 | first_ind = srq->first_free; | |
404 | ||
405 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
406 | ind = srq->first_free; | |
407 | ||
408 | if (ind < 0) { | |
409 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | |
410 | err = -ENOMEM; | |
411 | *bad_wr = wr; | |
412 | return nreq; | |
413 | } | |
414 | ||
415 | wqe = get_wqe(srq, ind); | |
416 | next_ind = *wqe_to_link(wqe); | |
417 | prev_wqe = srq->last; | |
418 | srq->last = wqe; | |
419 | ||
420 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
421 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
422 | /* flags field will always remain 0 */ | |
423 | ||
424 | wqe += sizeof (struct mthca_next_seg); | |
425 | ||
426 | if (unlikely(wr->num_sge > srq->max_gs)) { | |
427 | err = -EINVAL; | |
428 | *bad_wr = wr; | |
429 | srq->last = prev_wqe; | |
430 | return nreq; | |
431 | } | |
432 | ||
433 | for (i = 0; i < wr->num_sge; ++i) { | |
434 | ((struct mthca_data_seg *) wqe)->byte_count = | |
435 | cpu_to_be32(wr->sg_list[i].length); | |
436 | ((struct mthca_data_seg *) wqe)->lkey = | |
437 | cpu_to_be32(wr->sg_list[i].lkey); | |
438 | ((struct mthca_data_seg *) wqe)->addr = | |
439 | cpu_to_be64(wr->sg_list[i].addr); | |
440 | wqe += sizeof (struct mthca_data_seg); | |
441 | } | |
442 | ||
443 | if (i < srq->max_gs) { | |
444 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
445 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
446 | ((struct mthca_data_seg *) wqe)->addr = 0; | |
447 | } | |
448 | ||
d6cff021 RD |
449 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
450 | cpu_to_be32((ind << srq->wqe_shift) | 1); | |
451 | wmb(); | |
452 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
453 | cpu_to_be32(MTHCA_NEXT_DBD); | |
ec34a922 RD |
454 | |
455 | srq->wrid[ind] = wr->wr_id; | |
456 | srq->first_free = next_ind; | |
457 | } | |
458 | ||
459 | return nreq; | |
460 | ||
461 | if (likely(nreq)) { | |
462 | __be32 doorbell[2]; | |
463 | ||
464 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | |
465 | doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); | |
466 | ||
467 | /* | |
468 | * Make sure that descriptors are written before | |
469 | * doorbell is rung. | |
470 | */ | |
471 | wmb(); | |
472 | ||
473 | mthca_write64(doorbell, | |
474 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
475 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
476 | } | |
477 | ||
478 | spin_unlock_irqrestore(&srq->lock, flags); | |
479 | return err; | |
480 | } | |
481 | ||
482 | int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
483 | struct ib_recv_wr **bad_wr) | |
484 | { | |
485 | struct mthca_dev *dev = to_mdev(ibsrq->device); | |
486 | struct mthca_srq *srq = to_msrq(ibsrq); | |
487 | unsigned long flags; | |
488 | int err = 0; | |
489 | int ind; | |
490 | int next_ind; | |
491 | int nreq; | |
492 | int i; | |
493 | void *wqe; | |
494 | ||
495 | spin_lock_irqsave(&srq->lock, flags); | |
496 | ||
497 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
498 | ind = srq->first_free; | |
499 | ||
500 | if (ind < 0) { | |
501 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | |
502 | err = -ENOMEM; | |
503 | *bad_wr = wr; | |
504 | return nreq; | |
505 | } | |
506 | ||
507 | wqe = get_wqe(srq, ind); | |
508 | next_ind = *wqe_to_link(wqe); | |
509 | ||
510 | ((struct mthca_next_seg *) wqe)->nda_op = | |
511 | cpu_to_be32((next_ind << srq->wqe_shift) | 1); | |
512 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
513 | /* flags field will always remain 0 */ | |
514 | ||
515 | wqe += sizeof (struct mthca_next_seg); | |
516 | ||
517 | if (unlikely(wr->num_sge > srq->max_gs)) { | |
518 | err = -EINVAL; | |
519 | *bad_wr = wr; | |
520 | return nreq; | |
521 | } | |
522 | ||
523 | for (i = 0; i < wr->num_sge; ++i) { | |
524 | ((struct mthca_data_seg *) wqe)->byte_count = | |
525 | cpu_to_be32(wr->sg_list[i].length); | |
526 | ((struct mthca_data_seg *) wqe)->lkey = | |
527 | cpu_to_be32(wr->sg_list[i].lkey); | |
528 | ((struct mthca_data_seg *) wqe)->addr = | |
529 | cpu_to_be64(wr->sg_list[i].addr); | |
530 | wqe += sizeof (struct mthca_data_seg); | |
531 | } | |
532 | ||
533 | if (i < srq->max_gs) { | |
534 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
535 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
536 | ((struct mthca_data_seg *) wqe)->addr = 0; | |
537 | } | |
538 | ||
539 | srq->wrid[ind] = wr->wr_id; | |
540 | srq->first_free = next_ind; | |
541 | } | |
542 | ||
543 | if (likely(nreq)) { | |
544 | srq->counter += nreq; | |
545 | ||
546 | /* | |
547 | * Make sure that descriptors are written before | |
548 | * we write doorbell record. | |
549 | */ | |
550 | wmb(); | |
551 | *srq->db = cpu_to_be32(srq->counter); | |
552 | } | |
553 | ||
554 | spin_unlock_irqrestore(&srq->lock, flags); | |
555 | return err; | |
556 | } | |
557 | ||
558 | int __devinit mthca_init_srq_table(struct mthca_dev *dev) | |
559 | { | |
560 | int err; | |
561 | ||
562 | if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) | |
563 | return 0; | |
564 | ||
565 | spin_lock_init(&dev->srq_table.lock); | |
566 | ||
567 | err = mthca_alloc_init(&dev->srq_table.alloc, | |
568 | dev->limits.num_srqs, | |
569 | dev->limits.num_srqs - 1, | |
570 | dev->limits.reserved_srqs); | |
571 | if (err) | |
572 | return err; | |
573 | ||
574 | err = mthca_array_init(&dev->srq_table.srq, | |
575 | dev->limits.num_srqs); | |
576 | if (err) | |
577 | mthca_alloc_cleanup(&dev->srq_table.alloc); | |
578 | ||
579 | return err; | |
580 | } | |
581 | ||
582 | void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) | |
583 | { | |
584 | if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) | |
585 | return; | |
586 | ||
587 | mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); | |
588 | mthca_alloc_cleanup(&dev->srq_table.alloc); | |
589 | } |