Merge tag 'armsoc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[deliverable/linux.git] / drivers / infiniband / hw / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
f3cca4b1 36#include <linux/mlx4/srq.h>
5a0e3ad6 37#include <linux/slab.h>
225c7b1f
RD
38
39#include "mlx4_ib.h"
40#include "user.h"
41
42static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43{
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46}
47
48static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49{
50 struct ib_event event;
51 struct ib_cq *ibcq;
52
53 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
987c8f8f 54 pr_warn("Unexpected event type %d "
225c7b1f
RD
55 "on CQ %06x\n", type, cq->cqn);
56 return;
57 }
58
59 ibcq = &to_mibcq(cq)->ibcq;
60 if (ibcq->event_handler) {
61 event.device = ibcq->device;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
65 }
66}
67
68static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69{
08ff3235 70 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
225c7b1f
RD
71}
72
73static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74{
75 return get_cqe_from_buf(&cq->buf, n);
76}
77
78static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79{
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
08ff3235 81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
225c7b1f 82
08ff3235 83 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
225c7b1f
RD
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85}
86
87static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88{
89 return get_sw_cqe(cq, cq->mcq.cons_index);
90}
91
3fdcb97f
EC
92int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93{
94 struct mlx4_ib_cq *mcq = to_mcq(cq);
95 struct mlx4_ib_dev *dev = to_mdev(cq->device);
96
97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98}
99
bbf8eed1
VS
100static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101{
102 int err;
103
08ff3235 104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
40f2287b 105 PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
bbf8eed1
VS
106
107 if (err)
108 goto out;
109
08ff3235 110 buf->entry_size = dev->dev->caps.cqe_size;
bbf8eed1
VS
111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 &buf->mtt);
113 if (err)
114 goto err_buf;
115
40f2287b 116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
bbf8eed1
VS
117 if (err)
118 goto err_mtt;
119
120 return 0;
121
122err_mtt:
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124
125err_buf:
08ff3235 126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
bbf8eed1
VS
127
128out:
129 return err;
130}
131
132static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133{
08ff3235 134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
bbf8eed1
VS
135}
136
137static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139 u64 buf_addr, int cqe)
140{
141 int err;
08ff3235 142 int cqe_size = dev->dev->caps.cqe_size;
bbf8eed1 143
08ff3235 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
cb9fbc5c 145 IB_ACCESS_LOCAL_WRITE, 1);
bbf8eed1
VS
146 if (IS_ERR(*umem))
147 return PTR_ERR(*umem);
148
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
150 ilog2((*umem)->page_size), &buf->mtt);
151 if (err)
152 goto err_buf;
153
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
155 if (err)
156 goto err_mtt;
157
158 return 0;
159
160err_mtt:
161 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
162
163err_buf:
164 ib_umem_release(*umem);
165
166 return err;
167}
168
4b664c43 169#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
bcf4c1ea
MB
170struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
171 const struct ib_cq_init_attr *attr,
225c7b1f
RD
172 struct ib_ucontext *context,
173 struct ib_udata *udata)
174{
bcf4c1ea
MB
175 int entries = attr->cqe;
176 int vector = attr->comp_vector;
225c7b1f
RD
177 struct mlx4_ib_dev *dev = to_mdev(ibdev);
178 struct mlx4_ib_cq *cq;
179 struct mlx4_uar *uar;
225c7b1f
RD
180 int err;
181
4b664c43 182 if (entries < 1 || entries > dev->dev->caps.max_cqes)
bcf4c1ea
MB
183 return ERR_PTR(-EINVAL);
184
4b664c43 185 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
225c7b1f
RD
186 return ERR_PTR(-EINVAL);
187
188 cq = kmalloc(sizeof *cq, GFP_KERNEL);
189 if (!cq)
190 return ERR_PTR(-ENOMEM);
191
192 entries = roundup_pow_of_two(entries + 1);
193 cq->ibcq.cqe = entries - 1;
bbf8eed1 194 mutex_init(&cq->resize_mutex);
225c7b1f 195 spin_lock_init(&cq->lock);
bbf8eed1
VS
196 cq->resize_buf = NULL;
197 cq->resize_umem = NULL;
4b664c43 198 cq->create_flags = attr->flags;
35f05dab
YH
199 INIT_LIST_HEAD(&cq->send_qp_list);
200 INIT_LIST_HEAD(&cq->recv_qp_list);
225c7b1f
RD
201
202 if (context) {
203 struct mlx4_ib_create_cq ucmd;
204
205 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
206 err = -EFAULT;
207 goto err_cq;
208 }
209
bbf8eed1
VS
210 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
211 ucmd.buf_addr, entries);
225c7b1f 212 if (err)
bbf8eed1 213 goto err_cq;
225c7b1f
RD
214
215 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
216 &cq->db);
217 if (err)
218 goto err_mtt;
219
220 uar = &to_mucontext(context)->uar;
221 } else {
40f2287b 222 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
225c7b1f
RD
223 if (err)
224 goto err_cq;
225
226 cq->mcq.set_ci_db = cq->db.db;
227 cq->mcq.arm_db = cq->db.db + 1;
228 *cq->mcq.set_ci_db = 0;
229 *cq->mcq.arm_db = 0;
230
bbf8eed1 231 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
225c7b1f 232 if (err)
bbf8eed1 233 goto err_db;
225c7b1f
RD
234
235 uar = &dev->priv_uar;
236 }
237
e605b743
SP
238 if (dev->eq_table)
239 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
240
225c7b1f 241 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
4b664c43
MB
242 cq->db.dma, &cq->mcq, vector, 0,
243 !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
225c7b1f
RD
244 if (err)
245 goto err_dbmap;
246
3dca0f42
MB
247 if (context)
248 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
249 else
250 cq->mcq.comp = mlx4_ib_cq_comp;
225c7b1f
RD
251 cq->mcq.event = mlx4_ib_cq_event;
252
253 if (context)
254 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
255 err = -EFAULT;
256 goto err_dbmap;
257 }
258
259 return &cq->ibcq;
260
261err_dbmap:
262 if (context)
263 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
264
265err_mtt:
266 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
267
225c7b1f
RD
268 if (context)
269 ib_umem_release(cq->umem);
270 else
3ae15e16 271 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
225c7b1f
RD
272
273err_db:
274 if (!context)
6296883c 275 mlx4_db_free(dev->dev, &cq->db);
225c7b1f
RD
276
277err_cq:
278 kfree(cq);
279
280 return ERR_PTR(err);
281}
282
bbf8eed1
VS
283static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
284 int entries)
285{
286 int err;
287
288 if (cq->resize_buf)
289 return -EBUSY;
290
291 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
292 if (!cq->resize_buf)
293 return -ENOMEM;
294
295 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
296 if (err) {
297 kfree(cq->resize_buf);
298 cq->resize_buf = NULL;
299 return err;
300 }
301
302 cq->resize_buf->cqe = entries - 1;
303
304 return 0;
305}
306
307static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
308 int entries, struct ib_udata *udata)
309{
310 struct mlx4_ib_resize_cq ucmd;
311 int err;
312
313 if (cq->resize_umem)
314 return -EBUSY;
315
316 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
317 return -EFAULT;
318
319 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
320 if (!cq->resize_buf)
321 return -ENOMEM;
322
323 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
324 &cq->resize_umem, ucmd.buf_addr, entries);
325 if (err) {
326 kfree(cq->resize_buf);
327 cq->resize_buf = NULL;
328 return err;
329 }
330
331 cq->resize_buf->cqe = entries - 1;
332
333 return 0;
334}
335
336static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
337{
338 u32 i;
339
340 i = cq->mcq.cons_index;
93b80ac2 341 while (get_sw_cqe(cq, i))
bbf8eed1
VS
342 ++i;
343
344 return i - cq->mcq.cons_index;
345}
346
347static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
348{
7798dbf4 349 struct mlx4_cqe *cqe, *new_cqe;
bbf8eed1 350 int i;
08ff3235
OG
351 int cqe_size = cq->buf.entry_size;
352 int cqe_inc = cqe_size == 64 ? 1 : 0;
bbf8eed1
VS
353
354 i = cq->mcq.cons_index;
355 cqe = get_cqe(cq, i & cq->ibcq.cqe);
08ff3235
OG
356 cqe += cqe_inc;
357
bbf8eed1 358 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
7798dbf4
JM
359 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
360 (i + 1) & cq->resize_buf->cqe);
08ff3235
OG
361 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
362 new_cqe += cqe_inc;
363
7798dbf4
JM
364 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
365 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
bbf8eed1 366 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
08ff3235 367 cqe += cqe_inc;
bbf8eed1
VS
368 }
369 ++cq->mcq.cons_index;
370}
371
372int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
373{
374 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
375 struct mlx4_ib_cq *cq = to_mcq(ibcq);
42ab01c3 376 struct mlx4_mtt mtt;
bbf8eed1
VS
377 int outst_cqe;
378 int err;
379
380 mutex_lock(&cq->resize_mutex);
8ab9406a 381 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
bbf8eed1
VS
382 err = -EINVAL;
383 goto out;
384 }
385
386 entries = roundup_pow_of_two(entries + 1);
387 if (entries == ibcq->cqe + 1) {
388 err = 0;
389 goto out;
390 }
391
8ab9406a 392 if (entries > dev->dev->caps.max_cqes + 1) {
79d3da9c
EC
393 err = -EINVAL;
394 goto out;
395 }
396
bbf8eed1
VS
397 if (ibcq->uobject) {
398 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
399 if (err)
400 goto out;
401 } else {
025dfdaf 402 /* Can't be smaller than the number of outstanding CQEs */
bbf8eed1
VS
403 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
404 if (entries < outst_cqe + 1) {
8ab9406a 405 err = -EINVAL;
bbf8eed1
VS
406 goto out;
407 }
408
409 err = mlx4_alloc_resize_buf(dev, cq, entries);
410 if (err)
411 goto out;
412 }
413
42ab01c3
JM
414 mtt = cq->buf.mtt;
415
bbf8eed1
VS
416 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
417 if (err)
418 goto err_buf;
419
42ab01c3 420 mlx4_mtt_cleanup(dev->dev, &mtt);
bbf8eed1
VS
421 if (ibcq->uobject) {
422 cq->buf = cq->resize_buf->buf;
423 cq->ibcq.cqe = cq->resize_buf->cqe;
424 ib_umem_release(cq->umem);
425 cq->umem = cq->resize_umem;
426
427 kfree(cq->resize_buf);
428 cq->resize_buf = NULL;
429 cq->resize_umem = NULL;
430 } else {
3afa9f19
VS
431 struct mlx4_ib_cq_buf tmp_buf;
432 int tmp_cqe = 0;
433
bbf8eed1
VS
434 spin_lock_irq(&cq->lock);
435 if (cq->resize_buf) {
436 mlx4_ib_cq_resize_copy_cqes(cq);
3afa9f19
VS
437 tmp_buf = cq->buf;
438 tmp_cqe = cq->ibcq.cqe;
bbf8eed1
VS
439 cq->buf = cq->resize_buf->buf;
440 cq->ibcq.cqe = cq->resize_buf->cqe;
441
442 kfree(cq->resize_buf);
443 cq->resize_buf = NULL;
444 }
445 spin_unlock_irq(&cq->lock);
3afa9f19
VS
446
447 if (tmp_cqe)
448 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
bbf8eed1
VS
449 }
450
451 goto out;
452
453err_buf:
42ab01c3 454 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
bbf8eed1
VS
455 if (!ibcq->uobject)
456 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
457 cq->resize_buf->cqe);
458
459 kfree(cq->resize_buf);
460 cq->resize_buf = NULL;
461
462 if (cq->resize_umem) {
463 ib_umem_release(cq->resize_umem);
464 cq->resize_umem = NULL;
465 }
466
467out:
468 mutex_unlock(&cq->resize_mutex);
08ff3235 469
bbf8eed1
VS
470 return err;
471}
472
225c7b1f
RD
473int mlx4_ib_destroy_cq(struct ib_cq *cq)
474{
475 struct mlx4_ib_dev *dev = to_mdev(cq->device);
476 struct mlx4_ib_cq *mcq = to_mcq(cq);
477
478 mlx4_cq_free(dev->dev, &mcq->mcq);
479 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
480
481 if (cq->uobject) {
482 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
483 ib_umem_release(mcq->umem);
484 } else {
3ae15e16 485 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
6296883c 486 mlx4_db_free(dev->dev, &mcq->db);
225c7b1f
RD
487 }
488
489 kfree(mcq);
490
491 return 0;
492}
493
494static void dump_cqe(void *cqe)
495{
496 __be32 *buf = cqe;
497
987c8f8f 498 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
225c7b1f
RD
499 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
500 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
501 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
502}
503
504static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
505 struct ib_wc *wc)
506{
507 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
987c8f8f 508 pr_debug("local QP operation err "
225c7b1f
RD
509 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
510 "opcode = %02x)\n",
511 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
512 cqe->vendor_err_syndrome,
513 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
514 dump_cqe(cqe);
515 }
516
517 switch (cqe->syndrome) {
518 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
519 wc->status = IB_WC_LOC_LEN_ERR;
520 break;
521 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
522 wc->status = IB_WC_LOC_QP_OP_ERR;
523 break;
524 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
525 wc->status = IB_WC_LOC_PROT_ERR;
526 break;
527 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
528 wc->status = IB_WC_WR_FLUSH_ERR;
529 break;
530 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
531 wc->status = IB_WC_MW_BIND_ERR;
532 break;
533 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
534 wc->status = IB_WC_BAD_RESP_ERR;
535 break;
536 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
537 wc->status = IB_WC_LOC_ACCESS_ERR;
538 break;
539 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
540 wc->status = IB_WC_REM_INV_REQ_ERR;
541 break;
542 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
543 wc->status = IB_WC_REM_ACCESS_ERR;
544 break;
545 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
546 wc->status = IB_WC_REM_OP_ERR;
547 break;
548 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
549 wc->status = IB_WC_RETRY_EXC_ERR;
550 break;
551 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
552 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
553 break;
554 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
555 wc->status = IB_WC_REM_ABORT_ERR;
556 break;
557 default:
558 wc->status = IB_WC_GENERAL_ERR;
559 break;
560 }
561
562 wc->vendor_err = cqe->vendor_err_syndrome;
563}
564
f780a9f1 565static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
8ff095ec 566{
f780a9f1
YP
567 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
568 MLX4_CQE_STATUS_IPV4F |
569 MLX4_CQE_STATUS_IPV4OPT |
570 MLX4_CQE_STATUS_IPV6 |
571 MLX4_CQE_STATUS_IPOK)) ==
572 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
573 MLX4_CQE_STATUS_IPOK)) &&
574 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
575 MLX4_CQE_STATUS_TCP)) &&
8ff095ec
EC
576 checksum == cpu_to_be16(0xffff);
577}
578
1ffeb2eb 579static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
5ea8bbfc 580 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
1ffeb2eb
JM
581{
582 struct mlx4_ib_proxy_sqp_hdr *hdr;
583
584 ib_dma_sync_single_for_cpu(qp->ibqp.device,
585 qp->sqp_proxy_rcv[tail].map,
586 sizeof (struct mlx4_ib_proxy_sqp_hdr),
587 DMA_FROM_DEVICE);
588 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
589 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
1ffeb2eb
JM
590 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
591 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
592 wc->dlid_path_bits = 0;
593
5ea8bbfc
JM
594 if (is_eth) {
595 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
596 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
597 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
598 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
599 } else {
600 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
601 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
602 }
603
1ffeb2eb
JM
604 return 0;
605}
606
35f05dab
YH
607static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
608 struct ib_wc *wc, int *npolled, int is_send)
609{
610 struct mlx4_ib_wq *wq;
611 unsigned cur;
612 int i;
613
614 wq = is_send ? &qp->sq : &qp->rq;
615 cur = wq->head - wq->tail;
616
617 if (cur == 0)
618 return;
619
620 for (i = 0; i < cur && *npolled < num_entries; i++) {
621 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
622 wc->status = IB_WC_WR_FLUSH_ERR;
623 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
624 wq->tail++;
625 (*npolled)++;
626 wc->qp = &qp->ibqp;
627 wc++;
628 }
629}
630
631static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
632 struct ib_wc *wc, int *npolled)
633{
634 struct mlx4_ib_qp *qp;
635
636 *npolled = 0;
637 /* Find uncompleted WQEs belonging to that cq and retrun
638 * simulated FLUSH_ERR completions
639 */
640 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
641 mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
642 if (*npolled >= num_entries)
643 goto out;
644 }
645
646 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
647 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
648 if (*npolled >= num_entries)
649 goto out;
650 }
651
652out:
653 return;
654}
655
225c7b1f
RD
656static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
657 struct mlx4_ib_qp **cur_qp,
658 struct ib_wc *wc)
659{
660 struct mlx4_cqe *cqe;
661 struct mlx4_qp *mqp;
662 struct mlx4_ib_wq *wq;
663 struct mlx4_ib_srq *srq;
f3cca4b1 664 struct mlx4_srq *msrq = NULL;
225c7b1f
RD
665 int is_send;
666 int is_error;
5ea8bbfc 667 int is_eth;
b3226184 668 u32 g_mlpath_rqpn;
225c7b1f 669 u16 wqe_ctr;
1ffeb2eb 670 unsigned tail = 0;
225c7b1f 671
bbf8eed1 672repoll:
225c7b1f
RD
673 cqe = next_cqe_sw(cq);
674 if (!cqe)
675 return -EAGAIN;
676
08ff3235
OG
677 if (cq->buf.entry_size == 64)
678 cqe++;
679
225c7b1f
RD
680 ++cq->mcq.cons_index;
681
682 /*
683 * Make sure we read CQ entry contents after we've checked the
684 * ownership bit.
685 */
686 rmb();
687
688 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
689 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
690 MLX4_CQE_OPCODE_ERROR;
691
ea54b10c
JM
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
693 is_send)) {
987c8f8f 694 pr_warn("Completion for NOP opcode detected!\n");
ea54b10c
JM
695 return -EINVAL;
696 }
697
bbf8eed1
VS
698 /* Resize CQ in progress */
699 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
700 if (cq->resize_buf) {
701 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
702
703 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
704 cq->buf = cq->resize_buf->buf;
705 cq->ibcq.cqe = cq->resize_buf->cqe;
706
707 kfree(cq->resize_buf);
708 cq->resize_buf = NULL;
709 }
710
711 goto repoll;
712 }
713
225c7b1f 714 if (!*cur_qp ||
f780a9f1 715 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
225c7b1f
RD
716 /*
717 * We do not have to take the QP table lock here,
718 * because CQs will be locked while QPs are removed
719 * from the table.
720 */
721 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
f780a9f1 722 be32_to_cpu(cqe->vlan_my_qpn));
225c7b1f 723 if (unlikely(!mqp)) {
987c8f8f 724 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
f780a9f1 725 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
225c7b1f
RD
726 return -EINVAL;
727 }
728
729 *cur_qp = to_mibqp(mqp);
730 }
731
732 wc->qp = &(*cur_qp)->ibqp;
733
f3cca4b1
SP
734 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
735 u32 srq_num;
736 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
737 srq_num = g_mlpath_rqpn & 0xffffff;
738 /* SRQ is also in the radix tree */
739 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
740 srq_num);
741 if (unlikely(!msrq)) {
742 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
743 cq->mcq.cqn, srq_num);
744 return -EINVAL;
745 }
746 }
747
225c7b1f
RD
748 if (is_send) {
749 wq = &(*cur_qp)->sq;
ea54b10c
JM
750 if (!(*cur_qp)->sq_signal_bits) {
751 wqe_ctr = be16_to_cpu(cqe->wqe_index);
752 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
753 }
0e6e7416 754 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
225c7b1f
RD
755 ++wq->tail;
756 } else if ((*cur_qp)->ibqp.srq) {
757 srq = to_msrq((*cur_qp)->ibqp.srq);
758 wqe_ctr = be16_to_cpu(cqe->wqe_index);
759 wc->wr_id = srq->wrid[wqe_ctr];
760 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
f3cca4b1
SP
761 } else if (msrq) {
762 srq = to_mibsrq(msrq);
763 wqe_ctr = be16_to_cpu(cqe->wqe_index);
764 wc->wr_id = srq->wrid[wqe_ctr];
765 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
225c7b1f
RD
766 } else {
767 wq = &(*cur_qp)->rq;
1ffeb2eb
JM
768 tail = wq->tail & (wq->wqe_cnt - 1);
769 wc->wr_id = wq->wrid[tail];
225c7b1f
RD
770 ++wq->tail;
771 }
772
773 if (unlikely(is_error)) {
774 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
775 return 0;
776 }
777
778 wc->status = IB_WC_SUCCESS;
779
780 if (is_send) {
781 wc->wc_flags = 0;
782 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
783 case MLX4_OPCODE_RDMA_WRITE_IMM:
784 wc->wc_flags |= IB_WC_WITH_IMM;
785 case MLX4_OPCODE_RDMA_WRITE:
786 wc->opcode = IB_WC_RDMA_WRITE;
787 break;
788 case MLX4_OPCODE_SEND_IMM:
789 wc->wc_flags |= IB_WC_WITH_IMM;
790 case MLX4_OPCODE_SEND:
95d04f07 791 case MLX4_OPCODE_SEND_INVAL:
225c7b1f
RD
792 wc->opcode = IB_WC_SEND;
793 break;
794 case MLX4_OPCODE_RDMA_READ:
19891915 795 wc->opcode = IB_WC_RDMA_READ;
225c7b1f
RD
796 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
797 break;
798 case MLX4_OPCODE_ATOMIC_CS:
799 wc->opcode = IB_WC_COMP_SWAP;
800 wc->byte_len = 8;
801 break;
802 case MLX4_OPCODE_ATOMIC_FA:
803 wc->opcode = IB_WC_FETCH_ADD;
804 wc->byte_len = 8;
805 break;
6fa8f719
VS
806 case MLX4_OPCODE_MASKED_ATOMIC_CS:
807 wc->opcode = IB_WC_MASKED_COMP_SWAP;
808 wc->byte_len = 8;
809 break;
810 case MLX4_OPCODE_MASKED_ATOMIC_FA:
811 wc->opcode = IB_WC_MASKED_FETCH_ADD;
812 wc->byte_len = 8;
813 break;
225c7b1f
RD
814 case MLX4_OPCODE_BIND_MW:
815 wc->opcode = IB_WC_BIND_MW;
816 break;
b832be1e
EC
817 case MLX4_OPCODE_LSO:
818 wc->opcode = IB_WC_LSO;
819 break;
95d04f07
RD
820 case MLX4_OPCODE_FMR:
821 wc->opcode = IB_WC_FAST_REG_MR;
822 break;
823 case MLX4_OPCODE_LOCAL_INVAL:
824 wc->opcode = IB_WC_LOCAL_INV;
825 break;
225c7b1f
RD
826 }
827 } else {
828 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
829
830 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
831 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
00f7ec36
SW
832 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
833 wc->wc_flags = IB_WC_WITH_IMM;
834 wc->ex.imm_data = cqe->immed_rss_invalid;
225c7b1f 835 break;
95d04f07
RD
836 case MLX4_RECV_OPCODE_SEND_INVAL:
837 wc->opcode = IB_WC_RECV;
838 wc->wc_flags = IB_WC_WITH_INVALIDATE;
839 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
840 break;
225c7b1f
RD
841 case MLX4_RECV_OPCODE_SEND:
842 wc->opcode = IB_WC_RECV;
843 wc->wc_flags = 0;
844 break;
845 case MLX4_RECV_OPCODE_SEND_IMM:
00f7ec36
SW
846 wc->opcode = IB_WC_RECV;
847 wc->wc_flags = IB_WC_WITH_IMM;
848 wc->ex.imm_data = cqe->immed_rss_invalid;
225c7b1f
RD
849 break;
850 }
851
5ea8bbfc
JM
852 is_eth = (rdma_port_get_link_layer(wc->qp->device,
853 (*cur_qp)->port) ==
854 IB_LINK_LAYER_ETHERNET);
1ffeb2eb
JM
855 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
856 if ((*cur_qp)->mlx4_ib_qp_type &
857 (MLX4_IB_QPT_PROXY_SMI_OWNER |
858 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
5ea8bbfc
JM
859 return use_tunnel_data(*cur_qp, cq, wc, tail,
860 cqe, is_eth);
1ffeb2eb
JM
861 }
862
225c7b1f 863 wc->slid = be16_to_cpu(cqe->rlid);
b3226184
RD
864 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
865 wc->src_qp = g_mlpath_rqpn & 0xffffff;
866 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
867 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
e1bb7843 868 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
d927d505
OG
869 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
870 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
5ea8bbfc 871 if (is_eth) {
9106c410 872 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
5ea8bbfc
JM
873 if (be32_to_cpu(cqe->vlan_my_qpn) &
874 MLX4_CQE_VLAN_PRESENT_MASK) {
875 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
876 MLX4_CQE_VID_MASK;
877 } else {
878 wc->vlan_id = 0xffff;
879 }
880 memcpy(wc->smac, cqe->smac, ETH_ALEN);
881 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
297e0dad 882 } else {
5ea8bbfc 883 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
297e0dad
MS
884 wc->vlan_id = 0xffff;
885 }
225c7b1f
RD
886 }
887
888 return 0;
889}
890
891int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
892{
893 struct mlx4_ib_cq *cq = to_mcq(ibcq);
894 struct mlx4_ib_qp *cur_qp = NULL;
895 unsigned long flags;
896 int npolled;
897 int err = 0;
35f05dab 898 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
225c7b1f
RD
899
900 spin_lock_irqsave(&cq->lock, flags);
35f05dab
YH
901 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
902 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
903 goto out;
904 }
225c7b1f
RD
905
906 for (npolled = 0; npolled < num_entries; ++npolled) {
907 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
908 if (err)
909 break;
910 }
911
3616f9ce 912 mlx4_cq_set_ci(&cq->mcq);
225c7b1f 913
35f05dab 914out:
225c7b1f
RD
915 spin_unlock_irqrestore(&cq->lock, flags);
916
917 if (err == 0 || err == -EAGAIN)
918 return npolled;
919 else
920 return err;
921}
922
923int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
924{
925 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
926 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
927 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
928 to_mdev(ibcq->device)->uar_map,
929 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
930
931 return 0;
932}
933
934void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
935{
936 u32 prod_index;
937 int nfreed = 0;
082dee32
JM
938 struct mlx4_cqe *cqe, *dest;
939 u8 owner_bit;
08ff3235 940 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
225c7b1f
RD
941
942 /*
943 * First we need to find the current producer index, so we
944 * know where to start cleaning from. It doesn't matter if HW
945 * adds new entries after this loop -- the QP we're worried
946 * about is already in RESET, so the new entries won't come
947 * from our QP and therefore don't need to be checked.
948 */
949 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
950 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
951 break;
952
953 /*
954 * Now sweep backwards through the CQ, removing CQ entries
955 * that match our QP by copying older entries on top of them.
956 */
957 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
958 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
08ff3235
OG
959 cqe += cqe_inc;
960
f780a9f1 961 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
225c7b1f
RD
962 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
963 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
964 ++nfreed;
082dee32
JM
965 } else if (nfreed) {
966 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
08ff3235
OG
967 dest += cqe_inc;
968
082dee32
JM
969 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
970 memcpy(dest, cqe, sizeof *cqe);
971 dest->owner_sr_opcode = owner_bit |
972 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
973 }
225c7b1f
RD
974 }
975
976 if (nfreed) {
977 cq->mcq.cons_index += nfreed;
978 /*
979 * Make sure update of buffer contents is done before
980 * updating consumer index.
981 */
982 wmb();
983 mlx4_cq_set_ci(&cq->mcq);
984 }
985}
986
987void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
988{
989 spin_lock_irq(&cq->lock);
990 __mlx4_ib_cq_clean(cq, qpn, srq);
991 spin_unlock_irq(&cq->lock);
992}
This page took 0.611915 seconds and 5 git commands to generate.