mlx4_core: Delete incorrect comment
[deliverable/linux.git] / drivers / infiniband / hw / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36
37#include "mlx4_ib.h"
38#include "user.h"
39
40static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
41{
42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
43 ibcq->comp_handler(ibcq, ibcq->cq_context);
44}
45
46static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
47{
48 struct ib_event event;
49 struct ib_cq *ibcq;
50
51 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
52 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
53 "on CQ %06x\n", type, cq->cqn);
54 return;
55 }
56
57 ibcq = &to_mibcq(cq)->ibcq;
58 if (ibcq->event_handler) {
59 event.device = ibcq->device;
60 event.event = IB_EVENT_CQ_ERR;
61 event.element.cq = ibcq;
62 ibcq->event_handler(&event, ibcq->cq_context);
63 }
64}
65
66static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
67{
1c69fc2a 68 return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
225c7b1f
RD
69}
70
71static void *get_cqe(struct mlx4_ib_cq *cq, int n)
72{
73 return get_cqe_from_buf(&cq->buf, n);
74}
75
76static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
77{
78 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
79
80 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
81 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
82}
83
84static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
85{
86 return get_sw_cqe(cq, cq->mcq.cons_index);
87}
88
3fdcb97f
EC
89int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
90{
91 struct mlx4_ib_cq *mcq = to_mcq(cq);
92 struct mlx4_ib_dev *dev = to_mdev(cq->device);
93
94 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
95}
96
bbf8eed1
VS
97static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
98{
99 int err;
100
101 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
102 PAGE_SIZE * 2, &buf->buf);
103
104 if (err)
105 goto out;
106
107 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
108 &buf->mtt);
109 if (err)
110 goto err_buf;
111
112 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
113 if (err)
114 goto err_mtt;
115
116 return 0;
117
118err_mtt:
119 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
120
121err_buf:
122 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
123 &buf->buf);
124
125out:
126 return err;
127}
128
129static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
130{
131 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
132}
133
134static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
135 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
136 u64 buf_addr, int cqe)
137{
138 int err;
139
140 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
cb9fbc5c 141 IB_ACCESS_LOCAL_WRITE, 1);
bbf8eed1
VS
142 if (IS_ERR(*umem))
143 return PTR_ERR(*umem);
144
145 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
146 ilog2((*umem)->page_size), &buf->mtt);
147 if (err)
148 goto err_buf;
149
150 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
151 if (err)
152 goto err_mtt;
153
154 return 0;
155
156err_mtt:
157 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
158
159err_buf:
160 ib_umem_release(*umem);
161
162 return err;
163}
164
225c7b1f
RD
165struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
166 struct ib_ucontext *context,
167 struct ib_udata *udata)
168{
169 struct mlx4_ib_dev *dev = to_mdev(ibdev);
170 struct mlx4_ib_cq *cq;
171 struct mlx4_uar *uar;
225c7b1f
RD
172 int err;
173
174 if (entries < 1 || entries > dev->dev->caps.max_cqes)
175 return ERR_PTR(-EINVAL);
176
177 cq = kmalloc(sizeof *cq, GFP_KERNEL);
178 if (!cq)
179 return ERR_PTR(-ENOMEM);
180
181 entries = roundup_pow_of_two(entries + 1);
182 cq->ibcq.cqe = entries - 1;
bbf8eed1 183 mutex_init(&cq->resize_mutex);
225c7b1f 184 spin_lock_init(&cq->lock);
bbf8eed1
VS
185 cq->resize_buf = NULL;
186 cq->resize_umem = NULL;
225c7b1f
RD
187
188 if (context) {
189 struct mlx4_ib_create_cq ucmd;
190
191 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
192 err = -EFAULT;
193 goto err_cq;
194 }
195
bbf8eed1
VS
196 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
197 ucmd.buf_addr, entries);
225c7b1f 198 if (err)
bbf8eed1 199 goto err_cq;
225c7b1f
RD
200
201 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
202 &cq->db);
203 if (err)
204 goto err_mtt;
205
206 uar = &to_mucontext(context)->uar;
207 } else {
6296883c 208 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
225c7b1f
RD
209 if (err)
210 goto err_cq;
211
212 cq->mcq.set_ci_db = cq->db.db;
213 cq->mcq.arm_db = cq->db.db + 1;
214 *cq->mcq.set_ci_db = 0;
215 *cq->mcq.arm_db = 0;
216
bbf8eed1 217 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
225c7b1f 218 if (err)
bbf8eed1 219 goto err_db;
225c7b1f
RD
220
221 uar = &dev->priv_uar;
222 }
223
224 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
b8dd786f 225 cq->db.dma, &cq->mcq, vector, 0);
225c7b1f
RD
226 if (err)
227 goto err_dbmap;
228
229 cq->mcq.comp = mlx4_ib_cq_comp;
230 cq->mcq.event = mlx4_ib_cq_event;
231
232 if (context)
233 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
234 err = -EFAULT;
235 goto err_dbmap;
236 }
237
238 return &cq->ibcq;
239
240err_dbmap:
241 if (context)
242 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
243
244err_mtt:
245 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
246
225c7b1f
RD
247 if (context)
248 ib_umem_release(cq->umem);
249 else
3ae15e16 250 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
225c7b1f
RD
251
252err_db:
253 if (!context)
6296883c 254 mlx4_db_free(dev->dev, &cq->db);
225c7b1f
RD
255
256err_cq:
257 kfree(cq);
258
259 return ERR_PTR(err);
260}
261
bbf8eed1
VS
262static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
263 int entries)
264{
265 int err;
266
267 if (cq->resize_buf)
268 return -EBUSY;
269
270 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
271 if (!cq->resize_buf)
272 return -ENOMEM;
273
274 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
275 if (err) {
276 kfree(cq->resize_buf);
277 cq->resize_buf = NULL;
278 return err;
279 }
280
281 cq->resize_buf->cqe = entries - 1;
282
283 return 0;
284}
285
286static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
287 int entries, struct ib_udata *udata)
288{
289 struct mlx4_ib_resize_cq ucmd;
290 int err;
291
292 if (cq->resize_umem)
293 return -EBUSY;
294
295 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
296 return -EFAULT;
297
298 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
299 if (!cq->resize_buf)
300 return -ENOMEM;
301
302 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
303 &cq->resize_umem, ucmd.buf_addr, entries);
304 if (err) {
305 kfree(cq->resize_buf);
306 cq->resize_buf = NULL;
307 return err;
308 }
309
310 cq->resize_buf->cqe = entries - 1;
311
312 return 0;
313}
314
315static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
316{
317 u32 i;
318
319 i = cq->mcq.cons_index;
320 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
321 ++i;
322
323 return i - cq->mcq.cons_index;
324}
325
326static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
327{
328 struct mlx4_cqe *cqe;
329 int i;
330
331 i = cq->mcq.cons_index;
332 cqe = get_cqe(cq, i & cq->ibcq.cqe);
333 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
334 memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
335 (i + 1) & cq->resize_buf->cqe),
336 get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
337 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
338 }
339 ++cq->mcq.cons_index;
340}
341
342int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
343{
344 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
345 struct mlx4_ib_cq *cq = to_mcq(ibcq);
42ab01c3 346 struct mlx4_mtt mtt;
bbf8eed1
VS
347 int outst_cqe;
348 int err;
349
350 mutex_lock(&cq->resize_mutex);
351
352 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
353 err = -EINVAL;
354 goto out;
355 }
356
357 entries = roundup_pow_of_two(entries + 1);
358 if (entries == ibcq->cqe + 1) {
359 err = 0;
360 goto out;
361 }
362
363 if (ibcq->uobject) {
364 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
365 if (err)
366 goto out;
367 } else {
368 /* Can't be smaller then the number of outstanding CQEs */
369 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
370 if (entries < outst_cqe + 1) {
371 err = 0;
372 goto out;
373 }
374
375 err = mlx4_alloc_resize_buf(dev, cq, entries);
376 if (err)
377 goto out;
378 }
379
42ab01c3
JM
380 mtt = cq->buf.mtt;
381
bbf8eed1
VS
382 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
383 if (err)
384 goto err_buf;
385
42ab01c3 386 mlx4_mtt_cleanup(dev->dev, &mtt);
bbf8eed1
VS
387 if (ibcq->uobject) {
388 cq->buf = cq->resize_buf->buf;
389 cq->ibcq.cqe = cq->resize_buf->cqe;
390 ib_umem_release(cq->umem);
391 cq->umem = cq->resize_umem;
392
393 kfree(cq->resize_buf);
394 cq->resize_buf = NULL;
395 cq->resize_umem = NULL;
396 } else {
397 spin_lock_irq(&cq->lock);
398 if (cq->resize_buf) {
399 mlx4_ib_cq_resize_copy_cqes(cq);
400 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
401 cq->buf = cq->resize_buf->buf;
402 cq->ibcq.cqe = cq->resize_buf->cqe;
403
404 kfree(cq->resize_buf);
405 cq->resize_buf = NULL;
406 }
407 spin_unlock_irq(&cq->lock);
408 }
409
410 goto out;
411
412err_buf:
42ab01c3 413 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
bbf8eed1
VS
414 if (!ibcq->uobject)
415 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
416 cq->resize_buf->cqe);
417
418 kfree(cq->resize_buf);
419 cq->resize_buf = NULL;
420
421 if (cq->resize_umem) {
422 ib_umem_release(cq->resize_umem);
423 cq->resize_umem = NULL;
424 }
425
426out:
427 mutex_unlock(&cq->resize_mutex);
428 return err;
429}
430
225c7b1f
RD
431int mlx4_ib_destroy_cq(struct ib_cq *cq)
432{
433 struct mlx4_ib_dev *dev = to_mdev(cq->device);
434 struct mlx4_ib_cq *mcq = to_mcq(cq);
435
436 mlx4_cq_free(dev->dev, &mcq->mcq);
437 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
438
439 if (cq->uobject) {
440 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
441 ib_umem_release(mcq->umem);
442 } else {
3ae15e16 443 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
6296883c 444 mlx4_db_free(dev->dev, &mcq->db);
225c7b1f
RD
445 }
446
447 kfree(mcq);
448
449 return 0;
450}
451
452static void dump_cqe(void *cqe)
453{
454 __be32 *buf = cqe;
455
456 printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
457 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
458 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
459 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
460}
461
462static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
463 struct ib_wc *wc)
464{
465 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
466 printk(KERN_DEBUG "local QP operation err "
467 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
468 "opcode = %02x)\n",
469 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
470 cqe->vendor_err_syndrome,
471 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
472 dump_cqe(cqe);
473 }
474
475 switch (cqe->syndrome) {
476 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
477 wc->status = IB_WC_LOC_LEN_ERR;
478 break;
479 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
480 wc->status = IB_WC_LOC_QP_OP_ERR;
481 break;
482 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
483 wc->status = IB_WC_LOC_PROT_ERR;
484 break;
485 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
486 wc->status = IB_WC_WR_FLUSH_ERR;
487 break;
488 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
489 wc->status = IB_WC_MW_BIND_ERR;
490 break;
491 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
492 wc->status = IB_WC_BAD_RESP_ERR;
493 break;
494 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
495 wc->status = IB_WC_LOC_ACCESS_ERR;
496 break;
497 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
498 wc->status = IB_WC_REM_INV_REQ_ERR;
499 break;
500 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
501 wc->status = IB_WC_REM_ACCESS_ERR;
502 break;
503 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
504 wc->status = IB_WC_REM_OP_ERR;
505 break;
506 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
507 wc->status = IB_WC_RETRY_EXC_ERR;
508 break;
509 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
510 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
511 break;
512 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
513 wc->status = IB_WC_REM_ABORT_ERR;
514 break;
515 default:
516 wc->status = IB_WC_GENERAL_ERR;
517 break;
518 }
519
520 wc->vendor_err = cqe->vendor_err_syndrome;
521}
522
f780a9f1 523static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
8ff095ec 524{
f780a9f1
YP
525 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
526 MLX4_CQE_STATUS_IPV4F |
527 MLX4_CQE_STATUS_IPV4OPT |
528 MLX4_CQE_STATUS_IPV6 |
529 MLX4_CQE_STATUS_IPOK)) ==
530 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
531 MLX4_CQE_STATUS_IPOK)) &&
532 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
533 MLX4_CQE_STATUS_TCP)) &&
8ff095ec
EC
534 checksum == cpu_to_be16(0xffff);
535}
536
225c7b1f
RD
537static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
538 struct mlx4_ib_qp **cur_qp,
539 struct ib_wc *wc)
540{
541 struct mlx4_cqe *cqe;
542 struct mlx4_qp *mqp;
543 struct mlx4_ib_wq *wq;
544 struct mlx4_ib_srq *srq;
545 int is_send;
546 int is_error;
b3226184 547 u32 g_mlpath_rqpn;
225c7b1f
RD
548 u16 wqe_ctr;
549
bbf8eed1 550repoll:
225c7b1f
RD
551 cqe = next_cqe_sw(cq);
552 if (!cqe)
553 return -EAGAIN;
554
555 ++cq->mcq.cons_index;
556
557 /*
558 * Make sure we read CQ entry contents after we've checked the
559 * ownership bit.
560 */
561 rmb();
562
563 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
564 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
565 MLX4_CQE_OPCODE_ERROR;
566
ea54b10c
JM
567 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
568 is_send)) {
569 printk(KERN_WARNING "Completion for NOP opcode detected!\n");
570 return -EINVAL;
571 }
572
bbf8eed1
VS
573 /* Resize CQ in progress */
574 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
575 if (cq->resize_buf) {
576 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
577
578 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
579 cq->buf = cq->resize_buf->buf;
580 cq->ibcq.cqe = cq->resize_buf->cqe;
581
582 kfree(cq->resize_buf);
583 cq->resize_buf = NULL;
584 }
585
586 goto repoll;
587 }
588
225c7b1f 589 if (!*cur_qp ||
f780a9f1 590 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
225c7b1f
RD
591 /*
592 * We do not have to take the QP table lock here,
593 * because CQs will be locked while QPs are removed
594 * from the table.
595 */
596 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
f780a9f1 597 be32_to_cpu(cqe->vlan_my_qpn));
225c7b1f
RD
598 if (unlikely(!mqp)) {
599 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
f780a9f1 600 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
225c7b1f
RD
601 return -EINVAL;
602 }
603
604 *cur_qp = to_mibqp(mqp);
605 }
606
607 wc->qp = &(*cur_qp)->ibqp;
608
609 if (is_send) {
610 wq = &(*cur_qp)->sq;
ea54b10c
JM
611 if (!(*cur_qp)->sq_signal_bits) {
612 wqe_ctr = be16_to_cpu(cqe->wqe_index);
613 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
614 }
0e6e7416 615 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
225c7b1f
RD
616 ++wq->tail;
617 } else if ((*cur_qp)->ibqp.srq) {
618 srq = to_msrq((*cur_qp)->ibqp.srq);
619 wqe_ctr = be16_to_cpu(cqe->wqe_index);
620 wc->wr_id = srq->wrid[wqe_ctr];
621 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
622 } else {
623 wq = &(*cur_qp)->rq;
0e6e7416 624 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
225c7b1f
RD
625 ++wq->tail;
626 }
627
628 if (unlikely(is_error)) {
629 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
630 return 0;
631 }
632
633 wc->status = IB_WC_SUCCESS;
634
635 if (is_send) {
636 wc->wc_flags = 0;
637 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
638 case MLX4_OPCODE_RDMA_WRITE_IMM:
639 wc->wc_flags |= IB_WC_WITH_IMM;
640 case MLX4_OPCODE_RDMA_WRITE:
641 wc->opcode = IB_WC_RDMA_WRITE;
642 break;
643 case MLX4_OPCODE_SEND_IMM:
644 wc->wc_flags |= IB_WC_WITH_IMM;
645 case MLX4_OPCODE_SEND:
95d04f07 646 case MLX4_OPCODE_SEND_INVAL:
225c7b1f
RD
647 wc->opcode = IB_WC_SEND;
648 break;
649 case MLX4_OPCODE_RDMA_READ:
19891915 650 wc->opcode = IB_WC_RDMA_READ;
225c7b1f
RD
651 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
652 break;
653 case MLX4_OPCODE_ATOMIC_CS:
654 wc->opcode = IB_WC_COMP_SWAP;
655 wc->byte_len = 8;
656 break;
657 case MLX4_OPCODE_ATOMIC_FA:
658 wc->opcode = IB_WC_FETCH_ADD;
659 wc->byte_len = 8;
660 break;
661 case MLX4_OPCODE_BIND_MW:
662 wc->opcode = IB_WC_BIND_MW;
663 break;
b832be1e
EC
664 case MLX4_OPCODE_LSO:
665 wc->opcode = IB_WC_LSO;
666 break;
95d04f07
RD
667 case MLX4_OPCODE_FMR:
668 wc->opcode = IB_WC_FAST_REG_MR;
669 break;
670 case MLX4_OPCODE_LOCAL_INVAL:
671 wc->opcode = IB_WC_LOCAL_INV;
672 break;
225c7b1f
RD
673 }
674 } else {
675 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
676
677 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
678 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
00f7ec36
SW
679 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
680 wc->wc_flags = IB_WC_WITH_IMM;
681 wc->ex.imm_data = cqe->immed_rss_invalid;
225c7b1f 682 break;
95d04f07
RD
683 case MLX4_RECV_OPCODE_SEND_INVAL:
684 wc->opcode = IB_WC_RECV;
685 wc->wc_flags = IB_WC_WITH_INVALIDATE;
686 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
687 break;
225c7b1f
RD
688 case MLX4_RECV_OPCODE_SEND:
689 wc->opcode = IB_WC_RECV;
690 wc->wc_flags = 0;
691 break;
692 case MLX4_RECV_OPCODE_SEND_IMM:
00f7ec36
SW
693 wc->opcode = IB_WC_RECV;
694 wc->wc_flags = IB_WC_WITH_IMM;
695 wc->ex.imm_data = cqe->immed_rss_invalid;
225c7b1f
RD
696 break;
697 }
698
699 wc->slid = be16_to_cpu(cqe->rlid);
f780a9f1 700 wc->sl = be16_to_cpu(cqe->sl_vid >> 12);
b3226184
RD
701 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
702 wc->src_qp = g_mlpath_rqpn & 0xffffff;
703 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
704 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
e1bb7843 705 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
f780a9f1 706 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
225c7b1f
RD
707 }
708
709 return 0;
710}
711
712int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
713{
714 struct mlx4_ib_cq *cq = to_mcq(ibcq);
715 struct mlx4_ib_qp *cur_qp = NULL;
716 unsigned long flags;
717 int npolled;
718 int err = 0;
719
720 spin_lock_irqsave(&cq->lock, flags);
721
722 for (npolled = 0; npolled < num_entries; ++npolled) {
723 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
724 if (err)
725 break;
726 }
727
728 if (npolled)
729 mlx4_cq_set_ci(&cq->mcq);
730
731 spin_unlock_irqrestore(&cq->lock, flags);
732
733 if (err == 0 || err == -EAGAIN)
734 return npolled;
735 else
736 return err;
737}
738
739int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
740{
741 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
742 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
743 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
744 to_mdev(ibcq->device)->uar_map,
745 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
746
747 return 0;
748}
749
750void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
751{
752 u32 prod_index;
753 int nfreed = 0;
082dee32
JM
754 struct mlx4_cqe *cqe, *dest;
755 u8 owner_bit;
225c7b1f
RD
756
757 /*
758 * First we need to find the current producer index, so we
759 * know where to start cleaning from. It doesn't matter if HW
760 * adds new entries after this loop -- the QP we're worried
761 * about is already in RESET, so the new entries won't come
762 * from our QP and therefore don't need to be checked.
763 */
764 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
765 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
766 break;
767
768 /*
769 * Now sweep backwards through the CQ, removing CQ entries
770 * that match our QP by copying older entries on top of them.
771 */
772 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
773 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
f780a9f1 774 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
225c7b1f
RD
775 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
776 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
777 ++nfreed;
082dee32
JM
778 } else if (nfreed) {
779 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
780 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
781 memcpy(dest, cqe, sizeof *cqe);
782 dest->owner_sr_opcode = owner_bit |
783 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
784 }
225c7b1f
RD
785 }
786
787 if (nfreed) {
788 cq->mcq.cons_index += nfreed;
789 /*
790 * Make sure update of buffer contents is done before
791 * updating consumer index.
792 */
793 wmb();
794 mlx4_cq_set_ci(&cq->mcq);
795 }
796}
797
798void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
799{
800 spin_lock_irq(&cq->lock);
801 __mlx4_ib_cq_clean(cq, qpn, srq);
802 spin_unlock_irq(&cq->lock);
803}
This page took 0.233538 seconds and 5 git commands to generate.