2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
37 #include <linux/init.h>
38 #include <linux/hardirq.h>
42 #include "mthca_dev.h"
43 #include "mthca_cmd.h"
44 #include "mthca_memfree.h"
47 MTHCA_MAX_DIRECT_CQ_SIZE
= 4 * PAGE_SIZE
51 MTHCA_CQ_ENTRY_SIZE
= 0x20
55 * Must be packed because start is 64 bits but only aligned to 32 bits.
57 struct mthca_cq_context
{
61 u32 error_eqn
; /* Tavor only */
65 u32 last_notified_index
;
66 u32 solicit_producer_index
;
70 u32 ci_db
; /* Arbel only */
71 u32 state_db
; /* Arbel only */
73 } __attribute__((packed
));
75 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
76 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
77 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
78 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
79 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
80 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
81 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
82 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
83 #define MTHCA_EQ_STATE_FIRED (10 << 8)
86 MTHCA_ERROR_CQE_OPCODE_MASK
= 0xfe
90 SYNDROME_LOCAL_LENGTH_ERR
= 0x01,
91 SYNDROME_LOCAL_QP_OP_ERR
= 0x02,
92 SYNDROME_LOCAL_EEC_OP_ERR
= 0x03,
93 SYNDROME_LOCAL_PROT_ERR
= 0x04,
94 SYNDROME_WR_FLUSH_ERR
= 0x05,
95 SYNDROME_MW_BIND_ERR
= 0x06,
96 SYNDROME_BAD_RESP_ERR
= 0x10,
97 SYNDROME_LOCAL_ACCESS_ERR
= 0x11,
98 SYNDROME_REMOTE_INVAL_REQ_ERR
= 0x12,
99 SYNDROME_REMOTE_ACCESS_ERR
= 0x13,
100 SYNDROME_REMOTE_OP_ERR
= 0x14,
101 SYNDROME_RETRY_EXC_ERR
= 0x15,
102 SYNDROME_RNR_RETRY_EXC_ERR
= 0x16,
103 SYNDROME_LOCAL_RDD_VIOL_ERR
= 0x20,
104 SYNDROME_REMOTE_INVAL_RD_REQ_ERR
= 0x21,
105 SYNDROME_REMOTE_ABORTED_ERR
= 0x22,
106 SYNDROME_INVAL_EECN_ERR
= 0x23,
107 SYNDROME_INVAL_EEC_STATE_ERR
= 0x24
116 u32 imm_etype_pkey_eec
;
125 struct mthca_err_cqe
{
138 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
139 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
141 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
142 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
143 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
144 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
145 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
147 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
148 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
149 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
151 static inline struct mthca_cqe
*get_cqe(struct mthca_cq
*cq
, int entry
)
154 return cq
->queue
.direct
.buf
+ (entry
* MTHCA_CQ_ENTRY_SIZE
);
156 return cq
->queue
.page_list
[entry
* MTHCA_CQ_ENTRY_SIZE
/ PAGE_SIZE
].buf
157 + (entry
* MTHCA_CQ_ENTRY_SIZE
) % PAGE_SIZE
;
160 static inline struct mthca_cqe
*cqe_sw(struct mthca_cq
*cq
, int i
)
162 struct mthca_cqe
*cqe
= get_cqe(cq
, i
);
163 return MTHCA_CQ_ENTRY_OWNER_HW
& cqe
->owner
? NULL
: cqe
;
166 static inline struct mthca_cqe
*next_cqe_sw(struct mthca_cq
*cq
)
168 return cqe_sw(cq
, cq
->cons_index
& cq
->ibcq
.cqe
);
171 static inline void set_cqe_hw(struct mthca_cqe
*cqe
)
173 cqe
->owner
= MTHCA_CQ_ENTRY_OWNER_HW
;
176 static void dump_cqe(struct mthca_dev
*dev
, void *cqe_ptr
)
178 __be32
*cqe
= cqe_ptr
;
180 (void) cqe
; /* avoid warning if mthca_dbg compiled away... */
181 mthca_dbg(dev
, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
182 be32_to_cpu(cqe
[0]), be32_to_cpu(cqe
[1]), be32_to_cpu(cqe
[2]),
183 be32_to_cpu(cqe
[3]), be32_to_cpu(cqe
[4]), be32_to_cpu(cqe
[5]),
184 be32_to_cpu(cqe
[6]), be32_to_cpu(cqe
[7]));
188 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
189 * should be correct before calling update_cons_index().
191 static inline void update_cons_index(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
196 if (mthca_is_memfree(dev
)) {
197 *cq
->set_ci_db
= cpu_to_be32(cq
->cons_index
);
200 doorbell
[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI
| cq
->cqn
);
201 doorbell
[1] = cpu_to_be32(incr
- 1);
203 mthca_write64(doorbell
,
204 dev
->kar
+ MTHCA_CQ_DOORBELL
,
205 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
209 void mthca_cq_event(struct mthca_dev
*dev
, u32 cqn
)
213 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
216 mthca_warn(dev
, "Completion event for bogus CQ %08x\n", cqn
);
222 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
225 void mthca_cq_clean(struct mthca_dev
*dev
, u32 cqn
, u32 qpn
)
228 struct mthca_cqe
*cqe
;
232 spin_lock_irq(&dev
->cq_table
.lock
);
233 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
235 atomic_inc(&cq
->refcount
);
236 spin_unlock_irq(&dev
->cq_table
.lock
);
241 spin_lock_irq(&cq
->lock
);
244 * First we need to find the current producer index, so we
245 * know where to start cleaning from. It doesn't matter if HW
246 * adds new entries after this loop -- the QP we're worried
247 * about is already in RESET, so the new entries won't come
248 * from our QP and therefore don't need to be checked.
250 for (prod_index
= cq
->cons_index
;
251 cqe_sw(cq
, prod_index
& cq
->ibcq
.cqe
);
253 if (prod_index
== cq
->cons_index
+ cq
->ibcq
.cqe
)
257 mthca_dbg(dev
, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
258 qpn
, cqn
, cq
->cons_index
, prod_index
);
261 * Now sweep backwards through the CQ, removing CQ entries
262 * that match our QP by copying older entries on top of them.
264 while (prod_index
> cq
->cons_index
) {
265 cqe
= get_cqe(cq
, (prod_index
- 1) & cq
->ibcq
.cqe
);
266 if (cqe
->my_qpn
== cpu_to_be32(qpn
))
269 memcpy(get_cqe(cq
, (prod_index
- 1 + nfreed
) &
272 MTHCA_CQ_ENTRY_SIZE
);
278 cq
->cons_index
+= nfreed
;
279 update_cons_index(dev
, cq
, nfreed
);
282 spin_unlock_irq(&cq
->lock
);
283 if (atomic_dec_and_test(&cq
->refcount
))
287 static int handle_error_cqe(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
288 struct mthca_qp
*qp
, int wqe_index
, int is_send
,
289 struct mthca_err_cqe
*cqe
,
290 struct ib_wc
*entry
, int *free_cqe
)
296 if (cqe
->syndrome
== SYNDROME_LOCAL_QP_OP_ERR
) {
297 mthca_dbg(dev
, "local QP operation err "
298 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
299 be32_to_cpu(cqe
->my_qpn
), be32_to_cpu(cqe
->wqe
),
300 cq
->cqn
, cq
->cons_index
);
305 * For completions in error, only work request ID, status (and
306 * freed resource count for RD) have to be set.
308 switch (cqe
->syndrome
) {
309 case SYNDROME_LOCAL_LENGTH_ERR
:
310 entry
->status
= IB_WC_LOC_LEN_ERR
;
312 case SYNDROME_LOCAL_QP_OP_ERR
:
313 entry
->status
= IB_WC_LOC_QP_OP_ERR
;
315 case SYNDROME_LOCAL_EEC_OP_ERR
:
316 entry
->status
= IB_WC_LOC_EEC_OP_ERR
;
318 case SYNDROME_LOCAL_PROT_ERR
:
319 entry
->status
= IB_WC_LOC_PROT_ERR
;
321 case SYNDROME_WR_FLUSH_ERR
:
322 entry
->status
= IB_WC_WR_FLUSH_ERR
;
324 case SYNDROME_MW_BIND_ERR
:
325 entry
->status
= IB_WC_MW_BIND_ERR
;
327 case SYNDROME_BAD_RESP_ERR
:
328 entry
->status
= IB_WC_BAD_RESP_ERR
;
330 case SYNDROME_LOCAL_ACCESS_ERR
:
331 entry
->status
= IB_WC_LOC_ACCESS_ERR
;
333 case SYNDROME_REMOTE_INVAL_REQ_ERR
:
334 entry
->status
= IB_WC_REM_INV_REQ_ERR
;
336 case SYNDROME_REMOTE_ACCESS_ERR
:
337 entry
->status
= IB_WC_REM_ACCESS_ERR
;
339 case SYNDROME_REMOTE_OP_ERR
:
340 entry
->status
= IB_WC_REM_OP_ERR
;
342 case SYNDROME_RETRY_EXC_ERR
:
343 entry
->status
= IB_WC_RETRY_EXC_ERR
;
345 case SYNDROME_RNR_RETRY_EXC_ERR
:
346 entry
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
348 case SYNDROME_LOCAL_RDD_VIOL_ERR
:
349 entry
->status
= IB_WC_LOC_RDD_VIOL_ERR
;
351 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR
:
352 entry
->status
= IB_WC_REM_INV_RD_REQ_ERR
;
354 case SYNDROME_REMOTE_ABORTED_ERR
:
355 entry
->status
= IB_WC_REM_ABORT_ERR
;
357 case SYNDROME_INVAL_EECN_ERR
:
358 entry
->status
= IB_WC_INV_EECN_ERR
;
360 case SYNDROME_INVAL_EEC_STATE_ERR
:
361 entry
->status
= IB_WC_INV_EEC_STATE_ERR
;
364 entry
->status
= IB_WC_GENERAL_ERR
;
368 err
= mthca_free_err_wqe(dev
, qp
, is_send
, wqe_index
, &dbd
, &new_wqe
);
373 * If we're at the end of the WQE chain, or we've used up our
374 * doorbell count, free the CQE. Otherwise just update it for
375 * the next poll operation.
377 * This does not apply to mem-free HCAs: they don't use the
378 * doorbell count field, and so we should always free the CQE.
380 if (mthca_is_memfree(dev
) ||
381 !(new_wqe
& cpu_to_be32(0x3f)) || (!cqe
->db_cnt
&& dbd
))
384 cqe
->db_cnt
= cpu_to_be16(be16_to_cpu(cqe
->db_cnt
) - dbd
);
386 cqe
->syndrome
= SYNDROME_WR_FLUSH_ERR
;
393 static inline int mthca_poll_one(struct mthca_dev
*dev
,
395 struct mthca_qp
**cur_qp
,
400 struct mthca_cqe
*cqe
;
407 cqe
= next_cqe_sw(cq
);
412 * Make sure we read CQ entry contents after we've checked the
418 mthca_dbg(dev
, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
419 cq
->cqn
, cq
->cons_index
, be32_to_cpu(cqe
->my_qpn
),
420 be32_to_cpu(cqe
->wqe
));
424 is_error
= (cqe
->opcode
& MTHCA_ERROR_CQE_OPCODE_MASK
) ==
425 MTHCA_ERROR_CQE_OPCODE_MASK
;
426 is_send
= is_error
? cqe
->opcode
& 0x01 : cqe
->is_send
& 0x80;
428 if (!*cur_qp
|| be32_to_cpu(cqe
->my_qpn
) != (*cur_qp
)->qpn
) {
430 * We do not have to take the QP table lock here,
431 * because CQs will be locked while QPs are removed
434 *cur_qp
= mthca_array_get(&dev
->qp_table
.qp
,
435 be32_to_cpu(cqe
->my_qpn
) &
436 (dev
->limits
.num_qps
- 1));
438 mthca_warn(dev
, "CQ entry for unknown QP %06x\n",
439 be32_to_cpu(cqe
->my_qpn
) & 0xffffff);
445 entry
->qp_num
= (*cur_qp
)->qpn
;
449 wqe_index
= ((be32_to_cpu(cqe
->wqe
) - (*cur_qp
)->send_wqe_offset
)
451 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
+
455 wqe_index
= be32_to_cpu(cqe
->wqe
) >> wq
->wqe_shift
;
456 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
];
459 if (wq
->last_comp
< wqe_index
)
460 wq
->tail
+= wqe_index
- wq
->last_comp
;
462 wq
->tail
+= wqe_index
+ wq
->max
- wq
->last_comp
;
464 wq
->last_comp
= wqe_index
;
467 mthca_dbg(dev
, "%s completion for QP %06x, index %d (nr %d)\n",
468 is_send
? "Send" : "Receive",
469 (*cur_qp
)->qpn
, wqe_index
, wq
->max
);
472 err
= handle_error_cqe(dev
, cq
, *cur_qp
, wqe_index
, is_send
,
473 (struct mthca_err_cqe
*) cqe
,
480 switch (cqe
->opcode
) {
481 case MTHCA_OPCODE_RDMA_WRITE
:
482 entry
->opcode
= IB_WC_RDMA_WRITE
;
484 case MTHCA_OPCODE_RDMA_WRITE_IMM
:
485 entry
->opcode
= IB_WC_RDMA_WRITE
;
486 entry
->wc_flags
|= IB_WC_WITH_IMM
;
488 case MTHCA_OPCODE_SEND
:
489 entry
->opcode
= IB_WC_SEND
;
491 case MTHCA_OPCODE_SEND_IMM
:
492 entry
->opcode
= IB_WC_SEND
;
493 entry
->wc_flags
|= IB_WC_WITH_IMM
;
495 case MTHCA_OPCODE_RDMA_READ
:
496 entry
->opcode
= IB_WC_RDMA_READ
;
497 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
499 case MTHCA_OPCODE_ATOMIC_CS
:
500 entry
->opcode
= IB_WC_COMP_SWAP
;
501 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
503 case MTHCA_OPCODE_ATOMIC_FA
:
504 entry
->opcode
= IB_WC_FETCH_ADD
;
505 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
507 case MTHCA_OPCODE_BIND_MW
:
508 entry
->opcode
= IB_WC_BIND_MW
;
511 entry
->opcode
= MTHCA_OPCODE_INVALID
;
515 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
516 switch (cqe
->opcode
& 0x1f) {
517 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE
:
518 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE
:
519 entry
->wc_flags
= IB_WC_WITH_IMM
;
520 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
521 entry
->opcode
= IB_WC_RECV
;
523 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
524 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
525 entry
->wc_flags
= IB_WC_WITH_IMM
;
526 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
527 entry
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
531 entry
->opcode
= IB_WC_RECV
;
534 entry
->slid
= be16_to_cpu(cqe
->rlid
);
535 entry
->sl
= be16_to_cpu(cqe
->sl_g_mlpath
) >> 12;
536 entry
->src_qp
= be32_to_cpu(cqe
->rqpn
) & 0xffffff;
537 entry
->dlid_path_bits
= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x7f;
538 entry
->pkey_index
= be32_to_cpu(cqe
->imm_etype_pkey_eec
) >> 16;
539 entry
->wc_flags
|= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x80 ?
543 entry
->status
= IB_WC_SUCCESS
;
546 if (likely(free_cqe
)) {
555 int mthca_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
558 struct mthca_dev
*dev
= to_mdev(ibcq
->device
);
559 struct mthca_cq
*cq
= to_mcq(ibcq
);
560 struct mthca_qp
*qp
= NULL
;
566 spin_lock_irqsave(&cq
->lock
, flags
);
568 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
569 err
= mthca_poll_one(dev
, cq
, &qp
,
570 &freed
, entry
+ npolled
);
577 update_cons_index(dev
, cq
, freed
);
580 spin_unlock_irqrestore(&cq
->lock
, flags
);
582 return err
== 0 || err
== -EAGAIN
? npolled
: err
;
585 int mthca_tavor_arm_cq(struct ib_cq
*cq
, enum ib_cq_notify notify
)
589 doorbell
[0] = cpu_to_be32((notify
== IB_CQ_SOLICITED
?
590 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL
:
591 MTHCA_TAVOR_CQ_DB_REQ_NOT
) |
593 doorbell
[1] = 0xffffffff;
595 mthca_write64(doorbell
,
596 to_mdev(cq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
597 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq
->device
)->doorbell_lock
));
602 int mthca_arbel_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify notify
)
604 struct mthca_cq
*cq
= to_mcq(ibcq
);
610 ci
= cpu_to_be32(cq
->cons_index
);
613 doorbell
[1] = cpu_to_be32((cq
->cqn
<< 8) | (2 << 5) | (sn
<< 3) |
614 (notify
== IB_CQ_SOLICITED
? 1 : 2));
616 mthca_write_db_rec(doorbell
, cq
->arm_db
);
619 * Make sure that the doorbell record in host memory is
620 * written before ringing the doorbell via PCI MMIO.
624 doorbell
[0] = cpu_to_be32((sn
<< 28) |
625 (notify
== IB_CQ_SOLICITED
?
626 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL
:
627 MTHCA_ARBEL_CQ_DB_REQ_NOT
) |
631 mthca_write64(doorbell
,
632 to_mdev(ibcq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
633 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->doorbell_lock
));
638 static void mthca_free_cq_buf(struct mthca_dev
*dev
, struct mthca_cq
*cq
)
644 dma_free_coherent(&dev
->pdev
->dev
,
645 (cq
->ibcq
.cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
,
646 cq
->queue
.direct
.buf
,
647 pci_unmap_addr(&cq
->queue
.direct
,
650 size
= (cq
->ibcq
.cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
;
651 for (i
= 0; i
< (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
; ++i
)
652 if (cq
->queue
.page_list
[i
].buf
)
653 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
654 cq
->queue
.page_list
[i
].buf
,
655 pci_unmap_addr(&cq
->queue
.page_list
[i
],
658 kfree(cq
->queue
.page_list
);
662 static int mthca_alloc_cq_buf(struct mthca_dev
*dev
, int size
,
667 u64
*dma_list
= NULL
;
671 if (size
<= MTHCA_MAX_DIRECT_CQ_SIZE
) {
674 shift
= get_order(size
) + PAGE_SHIFT
;
676 cq
->queue
.direct
.buf
= dma_alloc_coherent(&dev
->pdev
->dev
,
677 size
, &t
, GFP_KERNEL
);
678 if (!cq
->queue
.direct
.buf
)
681 pci_unmap_addr_set(&cq
->queue
.direct
, mapping
, t
);
683 memset(cq
->queue
.direct
.buf
, 0, size
);
685 while (t
& ((1 << shift
) - 1)) {
690 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
694 for (i
= 0; i
< npages
; ++i
)
695 dma_list
[i
] = t
+ i
* (1 << shift
);
698 npages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
701 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
705 cq
->queue
.page_list
= kmalloc(npages
* sizeof *cq
->queue
.page_list
,
707 if (!cq
->queue
.page_list
)
710 for (i
= 0; i
< npages
; ++i
)
711 cq
->queue
.page_list
[i
].buf
= NULL
;
713 for (i
= 0; i
< npages
; ++i
) {
714 cq
->queue
.page_list
[i
].buf
=
715 dma_alloc_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
717 if (!cq
->queue
.page_list
[i
].buf
)
721 pci_unmap_addr_set(&cq
->queue
.page_list
[i
], mapping
, t
);
723 memset(cq
->queue
.page_list
[i
].buf
, 0, PAGE_SIZE
);
727 err
= mthca_mr_alloc_phys(dev
, dev
->driver_pd
.pd_num
,
728 dma_list
, shift
, npages
,
730 MTHCA_MPT_FLAG_LOCAL_WRITE
|
731 MTHCA_MPT_FLAG_LOCAL_READ
,
741 mthca_free_cq_buf(dev
, cq
);
749 int mthca_init_cq(struct mthca_dev
*dev
, int nent
,
750 struct mthca_ucontext
*ctx
, u32 pdn
,
753 int size
= nent
* MTHCA_CQ_ENTRY_SIZE
;
754 struct mthca_mailbox
*mailbox
;
755 struct mthca_cq_context
*cq_context
;
762 cq
->ibcq
.cqe
= nent
- 1;
763 cq
->is_kernel
= !ctx
;
765 cq
->cqn
= mthca_alloc(&dev
->cq_table
.alloc
);
769 if (mthca_is_memfree(dev
)) {
770 err
= mthca_table_get(dev
, dev
->cq_table
.table
, cq
->cqn
);
779 cq
->set_ci_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
,
780 cq
->cqn
, &cq
->set_ci_db
);
781 if (cq
->set_ci_db_index
< 0)
784 cq
->arm_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_ARM
,
785 cq
->cqn
, &cq
->arm_db
);
786 if (cq
->arm_db_index
< 0)
791 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
795 cq_context
= mailbox
->buf
;
798 err
= mthca_alloc_cq_buf(dev
, size
, cq
);
800 goto err_out_mailbox
;
802 for (i
= 0; i
< nent
; ++i
)
803 set_cqe_hw(get_cqe(cq
, i
));
806 spin_lock_init(&cq
->lock
);
807 atomic_set(&cq
->refcount
, 1);
808 init_waitqueue_head(&cq
->wait
);
810 memset(cq_context
, 0, sizeof *cq_context
);
811 cq_context
->flags
= cpu_to_be32(MTHCA_CQ_STATUS_OK
|
812 MTHCA_CQ_STATE_DISARMED
|
814 cq_context
->start
= cpu_to_be64(0);
815 cq_context
->logsize_usrpage
= cpu_to_be32((ffs(nent
) - 1) << 24);
817 cq_context
->logsize_usrpage
|= cpu_to_be32(ctx
->uar
.index
);
819 cq_context
->logsize_usrpage
|= cpu_to_be32(dev
->driver_uar
.index
);
820 cq_context
->error_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
);
821 cq_context
->comp_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].eqn
);
822 cq_context
->pd
= cpu_to_be32(pdn
);
823 cq_context
->lkey
= cpu_to_be32(cq
->mr
.ibmr
.lkey
);
824 cq_context
->cqn
= cpu_to_be32(cq
->cqn
);
826 if (mthca_is_memfree(dev
)) {
827 cq_context
->ci_db
= cpu_to_be32(cq
->set_ci_db_index
);
828 cq_context
->state_db
= cpu_to_be32(cq
->arm_db_index
);
831 err
= mthca_SW2HW_CQ(dev
, mailbox
, cq
->cqn
, &status
);
833 mthca_warn(dev
, "SW2HW_CQ failed (%d)\n", err
);
834 goto err_out_free_mr
;
838 mthca_warn(dev
, "SW2HW_CQ returned status 0x%02x\n",
841 goto err_out_free_mr
;
844 spin_lock_irq(&dev
->cq_table
.lock
);
845 if (mthca_array_set(&dev
->cq_table
.cq
,
846 cq
->cqn
& (dev
->limits
.num_cqs
- 1),
848 spin_unlock_irq(&dev
->cq_table
.lock
);
849 goto err_out_free_mr
;
851 spin_unlock_irq(&dev
->cq_table
.lock
);
855 mthca_free_mailbox(dev
, mailbox
);
861 mthca_free_mr(dev
, &cq
->mr
);
862 mthca_free_cq_buf(dev
, cq
);
866 mthca_free_mailbox(dev
, mailbox
);
869 if (cq
->is_kernel
&& mthca_is_memfree(dev
))
870 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
873 if (cq
->is_kernel
&& mthca_is_memfree(dev
))
874 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
877 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
880 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
885 void mthca_free_cq(struct mthca_dev
*dev
,
888 struct mthca_mailbox
*mailbox
;
894 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
895 if (IS_ERR(mailbox
)) {
896 mthca_warn(dev
, "No memory for mailbox to free CQ.\n");
900 err
= mthca_HW2SW_CQ(dev
, mailbox
, cq
->cqn
, &status
);
902 mthca_warn(dev
, "HW2SW_CQ failed (%d)\n", err
);
904 mthca_warn(dev
, "HW2SW_CQ returned status 0x%02x\n", status
);
907 u32
*ctx
= mailbox
->buf
;
910 printk(KERN_ERR
"context for CQN %x (cons index %x, next sw %d)\n",
911 cq
->cqn
, cq
->cons_index
,
912 cq
->is_kernel
? !!next_cqe_sw(cq
) : 0);
913 for (j
= 0; j
< 16; ++j
)
914 printk(KERN_ERR
"[%2x] %08x\n", j
* 4, be32_to_cpu(ctx
[j
]));
917 spin_lock_irq(&dev
->cq_table
.lock
);
918 mthca_array_clear(&dev
->cq_table
.cq
,
919 cq
->cqn
& (dev
->limits
.num_cqs
- 1));
920 spin_unlock_irq(&dev
->cq_table
.lock
);
922 if (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
)
923 synchronize_irq(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].msi_x_vector
);
925 synchronize_irq(dev
->pdev
->irq
);
927 atomic_dec(&cq
->refcount
);
928 wait_event(cq
->wait
, !atomic_read(&cq
->refcount
));
931 mthca_free_mr(dev
, &cq
->mr
);
932 mthca_free_cq_buf(dev
, cq
);
933 if (mthca_is_memfree(dev
)) {
934 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
935 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
939 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
940 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
941 mthca_free_mailbox(dev
, mailbox
);
944 int __devinit
mthca_init_cq_table(struct mthca_dev
*dev
)
948 spin_lock_init(&dev
->cq_table
.lock
);
950 err
= mthca_alloc_init(&dev
->cq_table
.alloc
,
953 dev
->limits
.reserved_cqs
);
957 err
= mthca_array_init(&dev
->cq_table
.cq
,
958 dev
->limits
.num_cqs
);
960 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);
965 void __devexit
mthca_cleanup_cq_table(struct mthca_dev
*dev
)
967 mthca_array_cleanup(&dev
->cq_table
.cq
, dev
->limits
.num_cqs
);
968 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);