Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/kref.h> | |
34 | #include <rdma/ib_umem.h> | |
a8237b32 | 35 | #include <rdma/ib_user_verbs.h> |
b636401f | 36 | #include <rdma/ib_cache.h> |
e126ba97 EC |
37 | #include "mlx5_ib.h" |
38 | #include "user.h" | |
39 | ||
40 | static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) | |
41 | { | |
42 | struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; | |
43 | ||
44 | ibcq->comp_handler(ibcq, ibcq->cq_context); | |
45 | } | |
46 | ||
47 | static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) | |
48 | { | |
49 | struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); | |
50 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
51 | struct ib_cq *ibcq = &cq->ibcq; | |
52 | struct ib_event event; | |
53 | ||
54 | if (type != MLX5_EVENT_TYPE_CQ_ERROR) { | |
55 | mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", | |
56 | type, mcq->cqn); | |
57 | return; | |
58 | } | |
59 | ||
60 | if (ibcq->event_handler) { | |
61 | event.device = &dev->ib_dev; | |
62 | event.event = IB_EVENT_CQ_ERR; | |
63 | event.element.cq = ibcq; | |
64 | ibcq->event_handler(&event, ibcq->cq_context); | |
65 | } | |
66 | } | |
67 | ||
68 | static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) | |
69 | { | |
70 | return mlx5_buf_offset(&buf->buf, n * size); | |
71 | } | |
72 | ||
73 | static void *get_cqe(struct mlx5_ib_cq *cq, int n) | |
74 | { | |
75 | return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); | |
76 | } | |
77 | ||
bde51583 EC |
78 | static u8 sw_ownership_bit(int n, int nent) |
79 | { | |
80 | return (n & nent) ? 1 : 0; | |
81 | } | |
82 | ||
e126ba97 EC |
83 | static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) |
84 | { | |
85 | void *cqe = get_cqe(cq, n & cq->ibcq.cqe); | |
86 | struct mlx5_cqe64 *cqe64; | |
87 | ||
88 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | |
bde51583 EC |
89 | |
90 | if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && | |
91 | !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { | |
92 | return cqe; | |
93 | } else { | |
94 | return NULL; | |
95 | } | |
e126ba97 EC |
96 | } |
97 | ||
98 | static void *next_cqe_sw(struct mlx5_ib_cq *cq) | |
99 | { | |
100 | return get_sw_cqe(cq, cq->mcq.cons_index); | |
101 | } | |
102 | ||
103 | static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) | |
104 | { | |
105 | switch (wq->wr_data[idx]) { | |
106 | case MLX5_IB_WR_UMR: | |
107 | return 0; | |
108 | ||
109 | case IB_WR_LOCAL_INV: | |
110 | return IB_WC_LOCAL_INV; | |
111 | ||
8a187ee5 SG |
112 | case IB_WR_REG_MR: |
113 | return IB_WC_REG_MR; | |
114 | ||
e126ba97 EC |
115 | default: |
116 | pr_warn("unknown completion status\n"); | |
117 | return 0; | |
118 | } | |
119 | } | |
120 | ||
121 | static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |
122 | struct mlx5_ib_wq *wq, int idx) | |
123 | { | |
124 | wc->wc_flags = 0; | |
125 | switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { | |
126 | case MLX5_OPCODE_RDMA_WRITE_IMM: | |
127 | wc->wc_flags |= IB_WC_WITH_IMM; | |
128 | case MLX5_OPCODE_RDMA_WRITE: | |
129 | wc->opcode = IB_WC_RDMA_WRITE; | |
130 | break; | |
131 | case MLX5_OPCODE_SEND_IMM: | |
132 | wc->wc_flags |= IB_WC_WITH_IMM; | |
133 | case MLX5_OPCODE_SEND: | |
134 | case MLX5_OPCODE_SEND_INVAL: | |
135 | wc->opcode = IB_WC_SEND; | |
136 | break; | |
137 | case MLX5_OPCODE_RDMA_READ: | |
138 | wc->opcode = IB_WC_RDMA_READ; | |
139 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); | |
140 | break; | |
141 | case MLX5_OPCODE_ATOMIC_CS: | |
142 | wc->opcode = IB_WC_COMP_SWAP; | |
143 | wc->byte_len = 8; | |
144 | break; | |
145 | case MLX5_OPCODE_ATOMIC_FA: | |
146 | wc->opcode = IB_WC_FETCH_ADD; | |
147 | wc->byte_len = 8; | |
148 | break; | |
149 | case MLX5_OPCODE_ATOMIC_MASKED_CS: | |
150 | wc->opcode = IB_WC_MASKED_COMP_SWAP; | |
151 | wc->byte_len = 8; | |
152 | break; | |
153 | case MLX5_OPCODE_ATOMIC_MASKED_FA: | |
154 | wc->opcode = IB_WC_MASKED_FETCH_ADD; | |
155 | wc->byte_len = 8; | |
156 | break; | |
e126ba97 EC |
157 | case MLX5_OPCODE_UMR: |
158 | wc->opcode = get_umr_comp(wq, idx); | |
159 | break; | |
160 | } | |
161 | } | |
162 | ||
163 | enum { | |
164 | MLX5_GRH_IN_BUFFER = 1, | |
165 | MLX5_GRH_IN_CQE = 2, | |
166 | }; | |
167 | ||
168 | static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |
169 | struct mlx5_ib_qp *qp) | |
170 | { | |
cb34be6d | 171 | enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); |
e126ba97 EC |
172 | struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); |
173 | struct mlx5_ib_srq *srq; | |
174 | struct mlx5_ib_wq *wq; | |
175 | u16 wqe_ctr; | |
176 | u8 g; | |
177 | ||
178 | if (qp->ibqp.srq || qp->ibqp.xrcd) { | |
179 | struct mlx5_core_srq *msrq = NULL; | |
180 | ||
181 | if (qp->ibqp.xrcd) { | |
9603b61d | 182 | msrq = mlx5_core_get_srq(dev->mdev, |
e126ba97 EC |
183 | be32_to_cpu(cqe->srqn)); |
184 | srq = to_mibsrq(msrq); | |
185 | } else { | |
186 | srq = to_msrq(qp->ibqp.srq); | |
187 | } | |
188 | if (srq) { | |
189 | wqe_ctr = be16_to_cpu(cqe->wqe_counter); | |
190 | wc->wr_id = srq->wrid[wqe_ctr]; | |
191 | mlx5_ib_free_srq_wqe(srq, wqe_ctr); | |
192 | if (msrq && atomic_dec_and_test(&msrq->refcount)) | |
193 | complete(&msrq->free); | |
194 | } | |
195 | } else { | |
196 | wq = &qp->rq; | |
197 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | |
198 | ++wq->tail; | |
199 | } | |
200 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); | |
201 | ||
202 | switch (cqe->op_own >> 4) { | |
203 | case MLX5_CQE_RESP_WR_IMM: | |
204 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | |
205 | wc->wc_flags = IB_WC_WITH_IMM; | |
206 | wc->ex.imm_data = cqe->imm_inval_pkey; | |
207 | break; | |
208 | case MLX5_CQE_RESP_SEND: | |
209 | wc->opcode = IB_WC_RECV; | |
c7ce833b ES |
210 | wc->wc_flags = IB_WC_IP_CSUM_OK; |
211 | if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && | |
212 | (cqe->hds_ip_ext & CQE_L4_OK)))) | |
213 | wc->wc_flags = 0; | |
e126ba97 EC |
214 | break; |
215 | case MLX5_CQE_RESP_SEND_IMM: | |
216 | wc->opcode = IB_WC_RECV; | |
217 | wc->wc_flags = IB_WC_WITH_IMM; | |
218 | wc->ex.imm_data = cqe->imm_inval_pkey; | |
219 | break; | |
220 | case MLX5_CQE_RESP_SEND_INV: | |
221 | wc->opcode = IB_WC_RECV; | |
222 | wc->wc_flags = IB_WC_WITH_INVALIDATE; | |
223 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); | |
224 | break; | |
225 | } | |
226 | wc->slid = be16_to_cpu(cqe->slid); | |
227 | wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; | |
228 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; | |
229 | wc->dlid_path_bits = cqe->ml_path; | |
230 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; | |
231 | wc->wc_flags |= g ? IB_WC_GRH : 0; | |
b636401f SG |
232 | if (unlikely(is_qp1(qp->ibqp.qp_type))) { |
233 | u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; | |
234 | ||
235 | ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, | |
236 | &wc->pkey_index); | |
237 | } else { | |
238 | wc->pkey_index = 0; | |
239 | } | |
cb34be6d AS |
240 | |
241 | if (ll != IB_LINK_LAYER_ETHERNET) | |
242 | return; | |
243 | ||
244 | switch (wc->sl & 0x3) { | |
245 | case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH: | |
246 | wc->network_hdr_type = RDMA_NETWORK_IB; | |
247 | break; | |
248 | case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6: | |
249 | wc->network_hdr_type = RDMA_NETWORK_IPV6; | |
250 | break; | |
251 | case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4: | |
252 | wc->network_hdr_type = RDMA_NETWORK_IPV4; | |
253 | break; | |
254 | } | |
255 | wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; | |
e126ba97 EC |
256 | } |
257 | ||
258 | static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) | |
259 | { | |
260 | __be32 *p = (__be32 *)cqe; | |
261 | int i; | |
262 | ||
263 | mlx5_ib_warn(dev, "dump error cqe\n"); | |
264 | for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) | |
265 | pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), | |
266 | be32_to_cpu(p[1]), be32_to_cpu(p[2]), | |
267 | be32_to_cpu(p[3])); | |
268 | } | |
269 | ||
270 | static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, | |
271 | struct mlx5_err_cqe *cqe, | |
272 | struct ib_wc *wc) | |
273 | { | |
274 | int dump = 1; | |
275 | ||
276 | switch (cqe->syndrome) { | |
277 | case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: | |
278 | wc->status = IB_WC_LOC_LEN_ERR; | |
279 | break; | |
280 | case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: | |
281 | wc->status = IB_WC_LOC_QP_OP_ERR; | |
282 | break; | |
283 | case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: | |
284 | wc->status = IB_WC_LOC_PROT_ERR; | |
285 | break; | |
286 | case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: | |
287 | dump = 0; | |
288 | wc->status = IB_WC_WR_FLUSH_ERR; | |
289 | break; | |
290 | case MLX5_CQE_SYNDROME_MW_BIND_ERR: | |
291 | wc->status = IB_WC_MW_BIND_ERR; | |
292 | break; | |
293 | case MLX5_CQE_SYNDROME_BAD_RESP_ERR: | |
294 | wc->status = IB_WC_BAD_RESP_ERR; | |
295 | break; | |
296 | case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: | |
297 | wc->status = IB_WC_LOC_ACCESS_ERR; | |
298 | break; | |
299 | case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: | |
300 | wc->status = IB_WC_REM_INV_REQ_ERR; | |
301 | break; | |
302 | case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: | |
303 | wc->status = IB_WC_REM_ACCESS_ERR; | |
304 | break; | |
305 | case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: | |
306 | wc->status = IB_WC_REM_OP_ERR; | |
307 | break; | |
308 | case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: | |
309 | wc->status = IB_WC_RETRY_EXC_ERR; | |
310 | dump = 0; | |
311 | break; | |
312 | case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: | |
313 | wc->status = IB_WC_RNR_RETRY_EXC_ERR; | |
314 | dump = 0; | |
315 | break; | |
316 | case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: | |
317 | wc->status = IB_WC_REM_ABORT_ERR; | |
318 | break; | |
319 | default: | |
320 | wc->status = IB_WC_GENERAL_ERR; | |
321 | break; | |
322 | } | |
323 | ||
324 | wc->vendor_err = cqe->vendor_err_synd; | |
325 | if (dump) | |
326 | dump_cqe(dev, cqe); | |
327 | } | |
328 | ||
329 | static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) | |
330 | { | |
331 | /* TBD: waiting decision | |
332 | */ | |
333 | return 0; | |
334 | } | |
335 | ||
336 | static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) | |
337 | { | |
338 | struct mlx5_wqe_data_seg *dpseg; | |
339 | void *addr; | |
340 | ||
341 | dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + | |
342 | sizeof(struct mlx5_wqe_raddr_seg) + | |
343 | sizeof(struct mlx5_wqe_atomic_seg); | |
344 | addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); | |
345 | return addr; | |
346 | } | |
347 | ||
348 | static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | |
349 | uint16_t idx) | |
350 | { | |
351 | void *addr; | |
352 | int byte_count; | |
353 | int i; | |
354 | ||
355 | if (!is_atomic_response(qp, idx)) | |
356 | return; | |
357 | ||
358 | byte_count = be32_to_cpu(cqe64->byte_cnt); | |
359 | addr = mlx5_get_atomic_laddr(qp, idx); | |
360 | ||
361 | if (byte_count == 4) { | |
362 | *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); | |
363 | } else { | |
364 | for (i = 0; i < byte_count; i += 8) { | |
365 | *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); | |
366 | addr += 8; | |
367 | } | |
368 | } | |
369 | ||
370 | return; | |
371 | } | |
372 | ||
373 | static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | |
374 | u16 tail, u16 head) | |
375 | { | |
f241e749 | 376 | u16 idx; |
e126ba97 EC |
377 | |
378 | do { | |
379 | idx = tail & (qp->sq.wqe_cnt - 1); | |
380 | handle_atomic(qp, cqe64, idx); | |
381 | if (idx == head) | |
382 | break; | |
383 | ||
384 | tail = qp->sq.w_list[idx].next; | |
385 | } while (1); | |
386 | tail = qp->sq.w_list[idx].next; | |
387 | qp->sq.last_poll = tail; | |
388 | } | |
389 | ||
bde51583 EC |
390 | static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) |
391 | { | |
9603b61d | 392 | mlx5_buf_free(dev->mdev, &buf->buf); |
bde51583 EC |
393 | } |
394 | ||
d5436ba0 SG |
395 | static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, |
396 | struct ib_sig_err *item) | |
397 | { | |
398 | u16 syndrome = be16_to_cpu(cqe->syndrome); | |
399 | ||
400 | #define GUARD_ERR (1 << 13) | |
401 | #define APPTAG_ERR (1 << 12) | |
402 | #define REFTAG_ERR (1 << 11) | |
403 | ||
404 | if (syndrome & GUARD_ERR) { | |
405 | item->err_type = IB_SIG_BAD_GUARD; | |
406 | item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; | |
407 | item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; | |
408 | } else | |
409 | if (syndrome & REFTAG_ERR) { | |
410 | item->err_type = IB_SIG_BAD_REFTAG; | |
411 | item->expected = be32_to_cpu(cqe->expected_reftag); | |
412 | item->actual = be32_to_cpu(cqe->actual_reftag); | |
413 | } else | |
414 | if (syndrome & APPTAG_ERR) { | |
415 | item->err_type = IB_SIG_BAD_APPTAG; | |
416 | item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; | |
417 | item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; | |
418 | } else { | |
419 | pr_err("Got signature completion error with bad syndrome %04x\n", | |
420 | syndrome); | |
421 | } | |
422 | ||
423 | item->sig_err_offset = be64_to_cpu(cqe->err_offset); | |
424 | item->key = be32_to_cpu(cqe->mkey); | |
425 | } | |
426 | ||
e126ba97 EC |
427 | static int mlx5_poll_one(struct mlx5_ib_cq *cq, |
428 | struct mlx5_ib_qp **cur_qp, | |
429 | struct ib_wc *wc) | |
430 | { | |
431 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
432 | struct mlx5_err_cqe *err_cqe; | |
433 | struct mlx5_cqe64 *cqe64; | |
434 | struct mlx5_core_qp *mqp; | |
435 | struct mlx5_ib_wq *wq; | |
d5436ba0 | 436 | struct mlx5_sig_err_cqe *sig_err_cqe; |
a606b0f6 | 437 | struct mlx5_core_mkey *mmkey; |
d5436ba0 | 438 | struct mlx5_ib_mr *mr; |
e126ba97 EC |
439 | uint8_t opcode; |
440 | uint32_t qpn; | |
441 | u16 wqe_ctr; | |
442 | void *cqe; | |
443 | int idx; | |
444 | ||
bde51583 | 445 | repoll: |
e126ba97 EC |
446 | cqe = next_cqe_sw(cq); |
447 | if (!cqe) | |
448 | return -EAGAIN; | |
449 | ||
450 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | |
451 | ||
452 | ++cq->mcq.cons_index; | |
453 | ||
454 | /* Make sure we read CQ entry contents after we've checked the | |
455 | * ownership bit. | |
456 | */ | |
457 | rmb(); | |
458 | ||
bde51583 EC |
459 | opcode = cqe64->op_own >> 4; |
460 | if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { | |
461 | if (likely(cq->resize_buf)) { | |
462 | free_cq_buf(dev, &cq->buf); | |
463 | cq->buf = *cq->resize_buf; | |
464 | kfree(cq->resize_buf); | |
465 | cq->resize_buf = NULL; | |
466 | goto repoll; | |
467 | } else { | |
468 | mlx5_ib_warn(dev, "unexpected resize cqe\n"); | |
469 | } | |
470 | } | |
e126ba97 EC |
471 | |
472 | qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; | |
473 | if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { | |
474 | /* We do not have to take the QP table lock here, | |
475 | * because CQs will be locked while QPs are removed | |
476 | * from the table. | |
477 | */ | |
9603b61d | 478 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); |
e126ba97 EC |
479 | if (unlikely(!mqp)) { |
480 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", | |
481 | cq->mcq.cqn, qpn); | |
482 | return -EINVAL; | |
483 | } | |
484 | ||
485 | *cur_qp = to_mibqp(mqp); | |
486 | } | |
487 | ||
488 | wc->qp = &(*cur_qp)->ibqp; | |
e126ba97 EC |
489 | switch (opcode) { |
490 | case MLX5_CQE_REQ: | |
491 | wq = &(*cur_qp)->sq; | |
492 | wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | |
493 | idx = wqe_ctr & (wq->wqe_cnt - 1); | |
494 | handle_good_req(wc, cqe64, wq, idx); | |
495 | handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); | |
496 | wc->wr_id = wq->wrid[idx]; | |
497 | wq->tail = wq->wqe_head[idx] + 1; | |
498 | wc->status = IB_WC_SUCCESS; | |
499 | break; | |
500 | case MLX5_CQE_RESP_WR_IMM: | |
501 | case MLX5_CQE_RESP_SEND: | |
502 | case MLX5_CQE_RESP_SEND_IMM: | |
503 | case MLX5_CQE_RESP_SEND_INV: | |
504 | handle_responder(wc, cqe64, *cur_qp); | |
505 | wc->status = IB_WC_SUCCESS; | |
506 | break; | |
507 | case MLX5_CQE_RESIZE_CQ: | |
508 | break; | |
509 | case MLX5_CQE_REQ_ERR: | |
510 | case MLX5_CQE_RESP_ERR: | |
511 | err_cqe = (struct mlx5_err_cqe *)cqe64; | |
512 | mlx5_handle_error_cqe(dev, err_cqe, wc); | |
513 | mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", | |
514 | opcode == MLX5_CQE_REQ_ERR ? | |
515 | "Requestor" : "Responder", cq->mcq.cqn); | |
516 | mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", | |
517 | err_cqe->syndrome, err_cqe->vendor_err_synd); | |
518 | if (opcode == MLX5_CQE_REQ_ERR) { | |
519 | wq = &(*cur_qp)->sq; | |
520 | wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | |
521 | idx = wqe_ctr & (wq->wqe_cnt - 1); | |
522 | wc->wr_id = wq->wrid[idx]; | |
523 | wq->tail = wq->wqe_head[idx] + 1; | |
524 | } else { | |
525 | struct mlx5_ib_srq *srq; | |
526 | ||
527 | if ((*cur_qp)->ibqp.srq) { | |
528 | srq = to_msrq((*cur_qp)->ibqp.srq); | |
529 | wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | |
530 | wc->wr_id = srq->wrid[wqe_ctr]; | |
531 | mlx5_ib_free_srq_wqe(srq, wqe_ctr); | |
532 | } else { | |
533 | wq = &(*cur_qp)->rq; | |
534 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | |
535 | ++wq->tail; | |
536 | } | |
537 | } | |
538 | break; | |
d5436ba0 SG |
539 | case MLX5_CQE_SIG_ERR: |
540 | sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; | |
541 | ||
a606b0f6 MB |
542 | read_lock(&dev->mdev->priv.mkey_table.lock); |
543 | mmkey = __mlx5_mr_lookup(dev->mdev, | |
544 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); | |
545 | if (unlikely(!mmkey)) { | |
546 | read_unlock(&dev->mdev->priv.mkey_table.lock); | |
d5436ba0 SG |
547 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", |
548 | cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); | |
549 | return -EINVAL; | |
550 | } | |
551 | ||
a606b0f6 | 552 | mr = to_mibmr(mmkey); |
d5436ba0 SG |
553 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); |
554 | mr->sig->sig_err_exists = true; | |
555 | mr->sig->sigerr_count++; | |
556 | ||
557 | mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n", | |
558 | cq->mcq.cqn, mr->sig->err_item.key, | |
559 | mr->sig->err_item.err_type, | |
560 | mr->sig->err_item.sig_err_offset, | |
561 | mr->sig->err_item.expected, | |
562 | mr->sig->err_item.actual); | |
563 | ||
a606b0f6 | 564 | read_unlock(&dev->mdev->priv.mkey_table.lock); |
d5436ba0 | 565 | goto repoll; |
e126ba97 EC |
566 | } |
567 | ||
568 | return 0; | |
569 | } | |
570 | ||
25361e02 HE |
571 | static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, |
572 | struct ib_wc *wc) | |
573 | { | |
574 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
575 | struct mlx5_ib_wc *soft_wc, *next; | |
576 | int npolled = 0; | |
577 | ||
578 | list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { | |
579 | if (npolled >= num_entries) | |
580 | break; | |
581 | ||
582 | mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", | |
583 | cq->mcq.cqn); | |
584 | ||
585 | wc[npolled++] = soft_wc->wc; | |
586 | list_del(&soft_wc->list); | |
587 | kfree(soft_wc); | |
588 | } | |
589 | ||
590 | return npolled; | |
591 | } | |
592 | ||
e126ba97 EC |
593 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) |
594 | { | |
595 | struct mlx5_ib_cq *cq = to_mcq(ibcq); | |
596 | struct mlx5_ib_qp *cur_qp = NULL; | |
597 | unsigned long flags; | |
25361e02 | 598 | int soft_polled = 0; |
e126ba97 EC |
599 | int npolled; |
600 | int err = 0; | |
601 | ||
602 | spin_lock_irqsave(&cq->lock, flags); | |
603 | ||
25361e02 HE |
604 | if (unlikely(!list_empty(&cq->wc_list))) |
605 | soft_polled = poll_soft_wc(cq, num_entries, wc); | |
606 | ||
607 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { | |
608 | err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); | |
e126ba97 EC |
609 | if (err) |
610 | break; | |
611 | } | |
612 | ||
613 | if (npolled) | |
614 | mlx5_cq_set_ci(&cq->mcq); | |
615 | ||
616 | spin_unlock_irqrestore(&cq->lock, flags); | |
617 | ||
618 | if (err == 0 || err == -EAGAIN) | |
25361e02 | 619 | return soft_polled + npolled; |
e126ba97 EC |
620 | else |
621 | return err; | |
622 | } | |
623 | ||
624 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | |
625 | { | |
ce0f7509 | 626 | struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; |
25361e02 | 627 | struct mlx5_ib_cq *cq = to_mcq(ibcq); |
ce0f7509 | 628 | void __iomem *uar_page = mdev->priv.uuari.uars[0].map; |
25361e02 HE |
629 | unsigned long irq_flags; |
630 | int ret = 0; | |
631 | ||
632 | spin_lock_irqsave(&cq->lock, irq_flags); | |
633 | if (cq->notify_flags != IB_CQ_NEXT_COMP) | |
634 | cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; | |
ce0f7509 | 635 | |
25361e02 HE |
636 | if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) |
637 | ret = 1; | |
638 | spin_unlock_irqrestore(&cq->lock, irq_flags); | |
639 | ||
640 | mlx5_cq_arm(&cq->mcq, | |
e126ba97 EC |
641 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? |
642 | MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, | |
ce0f7509 SM |
643 | uar_page, |
644 | MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), | |
645 | to_mcq(ibcq)->mcq.cons_index); | |
e126ba97 | 646 | |
25361e02 | 647 | return ret; |
e126ba97 EC |
648 | } |
649 | ||
650 | static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, | |
651 | int nent, int cqe_size) | |
652 | { | |
653 | int err; | |
654 | ||
64ffaa21 | 655 | err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); |
e126ba97 EC |
656 | if (err) |
657 | return err; | |
658 | ||
659 | buf->cqe_size = cqe_size; | |
bde51583 | 660 | buf->nent = nent; |
e126ba97 EC |
661 | |
662 | return 0; | |
663 | } | |
664 | ||
e126ba97 EC |
665 | static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, |
666 | struct ib_ucontext *context, struct mlx5_ib_cq *cq, | |
667 | int entries, struct mlx5_create_cq_mbox_in **cqb, | |
668 | int *cqe_size, int *index, int *inlen) | |
669 | { | |
670 | struct mlx5_ib_create_cq ucmd; | |
a8237b32 | 671 | size_t ucmdlen; |
e126ba97 EC |
672 | int page_shift; |
673 | int npages; | |
674 | int ncont; | |
675 | int err; | |
676 | ||
a8237b32 YD |
677 | ucmdlen = |
678 | (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < | |
679 | sizeof(ucmd)) ? (sizeof(ucmd) - | |
680 | sizeof(ucmd.reserved)) : sizeof(ucmd); | |
681 | ||
682 | if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) | |
e126ba97 EC |
683 | return -EFAULT; |
684 | ||
a8237b32 YD |
685 | if (ucmdlen == sizeof(ucmd) && |
686 | ucmd.reserved != 0) | |
687 | return -EINVAL; | |
688 | ||
e126ba97 EC |
689 | if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) |
690 | return -EINVAL; | |
691 | ||
692 | *cqe_size = ucmd.cqe_size; | |
693 | ||
694 | cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, | |
695 | entries * ucmd.cqe_size, | |
696 | IB_ACCESS_LOCAL_WRITE, 1); | |
697 | if (IS_ERR(cq->buf.umem)) { | |
698 | err = PTR_ERR(cq->buf.umem); | |
699 | return err; | |
700 | } | |
701 | ||
702 | err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, | |
703 | &cq->db); | |
704 | if (err) | |
705 | goto err_umem; | |
706 | ||
707 | mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, | |
708 | &ncont, NULL); | |
709 | mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", | |
710 | ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); | |
711 | ||
712 | *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; | |
713 | *cqb = mlx5_vzalloc(*inlen); | |
714 | if (!*cqb) { | |
715 | err = -ENOMEM; | |
716 | goto err_db; | |
717 | } | |
718 | mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); | |
cf1c5e1f | 719 | (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
e126ba97 EC |
720 | |
721 | *index = to_mucontext(context)->uuari.uars[0].index; | |
722 | ||
723 | return 0; | |
724 | ||
725 | err_db: | |
726 | mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | |
727 | ||
728 | err_umem: | |
729 | ib_umem_release(cq->buf.umem); | |
730 | return err; | |
731 | } | |
732 | ||
733 | static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) | |
734 | { | |
735 | mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | |
736 | ib_umem_release(cq->buf.umem); | |
737 | } | |
738 | ||
bde51583 | 739 | static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) |
e126ba97 EC |
740 | { |
741 | int i; | |
742 | void *cqe; | |
743 | struct mlx5_cqe64 *cqe64; | |
744 | ||
bde51583 EC |
745 | for (i = 0; i < buf->nent; i++) { |
746 | cqe = get_cqe_from_buf(buf, i, buf->cqe_size); | |
747 | cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; | |
748 | cqe64->op_own = MLX5_CQE_INVALID << 4; | |
e126ba97 EC |
749 | } |
750 | } | |
751 | ||
752 | static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |
753 | int entries, int cqe_size, | |
754 | struct mlx5_create_cq_mbox_in **cqb, | |
755 | int *index, int *inlen) | |
756 | { | |
757 | int err; | |
758 | ||
9603b61d | 759 | err = mlx5_db_alloc(dev->mdev, &cq->db); |
e126ba97 EC |
760 | if (err) |
761 | return err; | |
762 | ||
763 | cq->mcq.set_ci_db = cq->db.db; | |
764 | cq->mcq.arm_db = cq->db.db + 1; | |
e126ba97 EC |
765 | cq->mcq.cqe_sz = cqe_size; |
766 | ||
767 | err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); | |
768 | if (err) | |
769 | goto err_db; | |
770 | ||
bde51583 | 771 | init_cq_buf(cq, &cq->buf); |
e126ba97 EC |
772 | |
773 | *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; | |
774 | *cqb = mlx5_vzalloc(*inlen); | |
775 | if (!*cqb) { | |
776 | err = -ENOMEM; | |
777 | goto err_buf; | |
778 | } | |
779 | mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); | |
780 | ||
1b77d2bd | 781 | (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
9603b61d | 782 | *index = dev->mdev->priv.uuari.uars[0].index; |
e126ba97 EC |
783 | |
784 | return 0; | |
785 | ||
786 | err_buf: | |
787 | free_cq_buf(dev, &cq->buf); | |
788 | ||
789 | err_db: | |
9603b61d | 790 | mlx5_db_free(dev->mdev, &cq->db); |
e126ba97 EC |
791 | return err; |
792 | } | |
793 | ||
794 | static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) | |
795 | { | |
796 | free_cq_buf(dev, &cq->buf); | |
9603b61d | 797 | mlx5_db_free(dev->mdev, &cq->db); |
e126ba97 EC |
798 | } |
799 | ||
25361e02 HE |
800 | static void notify_soft_wc_handler(struct work_struct *work) |
801 | { | |
802 | struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, | |
803 | notify_work); | |
804 | ||
805 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); | |
806 | } | |
807 | ||
bcf4c1ea MB |
808 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, |
809 | const struct ib_cq_init_attr *attr, | |
810 | struct ib_ucontext *context, | |
e126ba97 EC |
811 | struct ib_udata *udata) |
812 | { | |
bcf4c1ea MB |
813 | int entries = attr->cqe; |
814 | int vector = attr->comp_vector; | |
e126ba97 EC |
815 | struct mlx5_create_cq_mbox_in *cqb = NULL; |
816 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | |
817 | struct mlx5_ib_cq *cq; | |
818 | int uninitialized_var(index); | |
819 | int uninitialized_var(inlen); | |
820 | int cqe_size; | |
0b6e26ce | 821 | unsigned int irqn; |
e126ba97 EC |
822 | int eqn; |
823 | int err; | |
824 | ||
51ee86a4 EC |
825 | if (entries < 0) |
826 | return ERR_PTR(-EINVAL); | |
827 | ||
34356f64 | 828 | if (check_cq_create_flags(attr->flags)) |
972ecb82 MB |
829 | return ERR_PTR(-EOPNOTSUPP); |
830 | ||
e126ba97 | 831 | entries = roundup_pow_of_two(entries + 1); |
938fe83c | 832 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) |
e126ba97 EC |
833 | return ERR_PTR(-EINVAL); |
834 | ||
835 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | |
836 | if (!cq) | |
837 | return ERR_PTR(-ENOMEM); | |
838 | ||
839 | cq->ibcq.cqe = entries - 1; | |
840 | mutex_init(&cq->resize_mutex); | |
841 | spin_lock_init(&cq->lock); | |
842 | cq->resize_buf = NULL; | |
843 | cq->resize_umem = NULL; | |
051f2630 | 844 | cq->create_flags = attr->flags; |
e126ba97 EC |
845 | |
846 | if (context) { | |
847 | err = create_cq_user(dev, udata, context, cq, entries, | |
848 | &cqb, &cqe_size, &index, &inlen); | |
849 | if (err) | |
850 | goto err_create; | |
851 | } else { | |
852 | /* for now choose 64 bytes till we have a proper interface */ | |
853 | cqe_size = 64; | |
854 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, | |
855 | &index, &inlen); | |
856 | if (err) | |
857 | goto err_create; | |
25361e02 HE |
858 | |
859 | INIT_WORK(&cq->notify_work, notify_soft_wc_handler); | |
e126ba97 EC |
860 | } |
861 | ||
862 | cq->cqe_size = cqe_size; | |
863 | cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; | |
051f2630 LR |
864 | |
865 | if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) | |
866 | cqb->ctx.cqe_sz_flags |= (1 << 1); | |
867 | ||
e126ba97 | 868 | cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); |
233d05d2 | 869 | err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); |
e126ba97 EC |
870 | if (err) |
871 | goto err_cqb; | |
872 | ||
873 | cqb->ctx.c_eqn = cpu_to_be16(eqn); | |
874 | cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); | |
875 | ||
9603b61d | 876 | err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); |
e126ba97 EC |
877 | if (err) |
878 | goto err_cqb; | |
879 | ||
880 | mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); | |
881 | cq->mcq.irqn = irqn; | |
c16d2750 MB |
882 | if (context) |
883 | cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; | |
884 | else | |
885 | cq->mcq.comp = mlx5_ib_cq_comp; | |
e126ba97 EC |
886 | cq->mcq.event = mlx5_ib_cq_event; |
887 | ||
25361e02 HE |
888 | INIT_LIST_HEAD(&cq->wc_list); |
889 | ||
e126ba97 EC |
890 | if (context) |
891 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { | |
892 | err = -EFAULT; | |
893 | goto err_cmd; | |
894 | } | |
895 | ||
896 | ||
479163f4 | 897 | kvfree(cqb); |
e126ba97 EC |
898 | return &cq->ibcq; |
899 | ||
900 | err_cmd: | |
9603b61d | 901 | mlx5_core_destroy_cq(dev->mdev, &cq->mcq); |
e126ba97 EC |
902 | |
903 | err_cqb: | |
479163f4 | 904 | kvfree(cqb); |
e126ba97 EC |
905 | if (context) |
906 | destroy_cq_user(cq, context); | |
907 | else | |
908 | destroy_cq_kernel(dev, cq); | |
909 | ||
910 | err_create: | |
911 | kfree(cq); | |
912 | ||
913 | return ERR_PTR(err); | |
914 | } | |
915 | ||
916 | ||
917 | int mlx5_ib_destroy_cq(struct ib_cq *cq) | |
918 | { | |
919 | struct mlx5_ib_dev *dev = to_mdev(cq->device); | |
920 | struct mlx5_ib_cq *mcq = to_mcq(cq); | |
921 | struct ib_ucontext *context = NULL; | |
922 | ||
923 | if (cq->uobject) | |
924 | context = cq->uobject->context; | |
925 | ||
9603b61d | 926 | mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); |
e126ba97 EC |
927 | if (context) |
928 | destroy_cq_user(mcq, context); | |
929 | else | |
930 | destroy_cq_kernel(dev, mcq); | |
931 | ||
932 | kfree(mcq); | |
933 | ||
934 | return 0; | |
935 | } | |
936 | ||
cfd8f1d4 | 937 | static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) |
e126ba97 | 938 | { |
cfd8f1d4 | 939 | return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); |
e126ba97 EC |
940 | } |
941 | ||
942 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) | |
943 | { | |
944 | struct mlx5_cqe64 *cqe64, *dest64; | |
945 | void *cqe, *dest; | |
946 | u32 prod_index; | |
947 | int nfreed = 0; | |
948 | u8 owner_bit; | |
949 | ||
950 | if (!cq) | |
951 | return; | |
952 | ||
953 | /* First we need to find the current producer index, so we | |
954 | * know where to start cleaning from. It doesn't matter if HW | |
955 | * adds new entries after this loop -- the QP we're worried | |
956 | * about is already in RESET, so the new entries won't come | |
957 | * from our QP and therefore don't need to be checked. | |
958 | */ | |
959 | for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) | |
960 | if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) | |
961 | break; | |
962 | ||
963 | /* Now sweep backwards through the CQ, removing CQ entries | |
964 | * that match our QP by copying older entries on top of them. | |
965 | */ | |
966 | while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { | |
967 | cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); | |
968 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | |
cfd8f1d4 ML |
969 | if (is_equal_rsn(cqe64, rsn)) { |
970 | if (srq && (ntohl(cqe64->srqn) & 0xffffff)) | |
e126ba97 EC |
971 | mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); |
972 | ++nfreed; | |
973 | } else if (nfreed) { | |
974 | dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); | |
975 | dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; | |
976 | owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; | |
977 | memcpy(dest, cqe, cq->mcq.cqe_sz); | |
978 | dest64->op_own = owner_bit | | |
979 | (dest64->op_own & ~MLX5_CQE_OWNER_MASK); | |
980 | } | |
981 | } | |
982 | ||
983 | if (nfreed) { | |
984 | cq->mcq.cons_index += nfreed; | |
985 | /* Make sure update of buffer contents is done before | |
986 | * updating consumer index. | |
987 | */ | |
988 | wmb(); | |
989 | mlx5_cq_set_ci(&cq->mcq); | |
990 | } | |
991 | } | |
992 | ||
993 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) | |
994 | { | |
995 | if (!cq) | |
996 | return; | |
997 | ||
998 | spin_lock_irq(&cq->lock); | |
999 | __mlx5_ib_cq_clean(cq, qpn, srq); | |
1000 | spin_unlock_irq(&cq->lock); | |
1001 | } | |
1002 | ||
1003 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |
1004 | { | |
3bdb31f6 EC |
1005 | struct mlx5_modify_cq_mbox_in *in; |
1006 | struct mlx5_ib_dev *dev = to_mdev(cq->device); | |
1007 | struct mlx5_ib_cq *mcq = to_mcq(cq); | |
1008 | int err; | |
1009 | u32 fsel; | |
1010 | ||
938fe83c | 1011 | if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) |
3bdb31f6 EC |
1012 | return -ENOSYS; |
1013 | ||
1014 | in = kzalloc(sizeof(*in), GFP_KERNEL); | |
1015 | if (!in) | |
1016 | return -ENOMEM; | |
1017 | ||
1018 | in->cqn = cpu_to_be32(mcq->mcq.cqn); | |
1019 | fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); | |
1020 | in->ctx.cq_period = cpu_to_be16(cq_period); | |
1021 | in->ctx.cq_max_count = cpu_to_be16(cq_count); | |
1022 | in->field_select = cpu_to_be32(fsel); | |
9603b61d | 1023 | err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in)); |
3bdb31f6 EC |
1024 | kfree(in); |
1025 | ||
1026 | if (err) | |
1027 | mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); | |
1028 | ||
1029 | return err; | |
e126ba97 EC |
1030 | } |
1031 | ||
bde51583 EC |
1032 | static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, |
1033 | int entries, struct ib_udata *udata, int *npas, | |
1034 | int *page_shift, int *cqe_size) | |
1035 | { | |
1036 | struct mlx5_ib_resize_cq ucmd; | |
1037 | struct ib_umem *umem; | |
1038 | int err; | |
1039 | int npages; | |
1040 | struct ib_ucontext *context = cq->buf.umem->context; | |
1041 | ||
57761d8d EC |
1042 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); |
1043 | if (err) | |
1044 | return err; | |
1045 | ||
1046 | if (ucmd.reserved0 || ucmd.reserved1) | |
1047 | return -EINVAL; | |
bde51583 EC |
1048 | |
1049 | umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, | |
1050 | IB_ACCESS_LOCAL_WRITE, 1); | |
1051 | if (IS_ERR(umem)) { | |
1052 | err = PTR_ERR(umem); | |
1053 | return err; | |
1054 | } | |
1055 | ||
1056 | mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, | |
1057 | npas, NULL); | |
1058 | ||
1059 | cq->resize_umem = umem; | |
1060 | *cqe_size = ucmd.cqe_size; | |
1061 | ||
1062 | return 0; | |
1063 | } | |
1064 | ||
1065 | static void un_resize_user(struct mlx5_ib_cq *cq) | |
1066 | { | |
1067 | ib_umem_release(cq->resize_umem); | |
1068 | } | |
1069 | ||
1070 | static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |
1071 | int entries, int cqe_size) | |
1072 | { | |
1073 | int err; | |
1074 | ||
1075 | cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); | |
1076 | if (!cq->resize_buf) | |
1077 | return -ENOMEM; | |
1078 | ||
1079 | err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); | |
1080 | if (err) | |
1081 | goto ex; | |
1082 | ||
1083 | init_cq_buf(cq, cq->resize_buf); | |
1084 | ||
1085 | return 0; | |
1086 | ||
1087 | ex: | |
1088 | kfree(cq->resize_buf); | |
1089 | return err; | |
1090 | } | |
1091 | ||
1092 | static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) | |
1093 | { | |
1094 | free_cq_buf(dev, cq->resize_buf); | |
1095 | cq->resize_buf = NULL; | |
1096 | } | |
1097 | ||
1098 | static int copy_resize_cqes(struct mlx5_ib_cq *cq) | |
1099 | { | |
1100 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | |
1101 | struct mlx5_cqe64 *scqe64; | |
1102 | struct mlx5_cqe64 *dcqe64; | |
1103 | void *start_cqe; | |
1104 | void *scqe; | |
1105 | void *dcqe; | |
1106 | int ssize; | |
1107 | int dsize; | |
1108 | int i; | |
1109 | u8 sw_own; | |
1110 | ||
1111 | ssize = cq->buf.cqe_size; | |
1112 | dsize = cq->resize_buf->cqe_size; | |
1113 | if (ssize != dsize) { | |
1114 | mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); | |
1115 | return -EINVAL; | |
1116 | } | |
1117 | ||
1118 | i = cq->mcq.cons_index; | |
1119 | scqe = get_sw_cqe(cq, i); | |
1120 | scqe64 = ssize == 64 ? scqe : scqe + 64; | |
1121 | start_cqe = scqe; | |
1122 | if (!scqe) { | |
1123 | mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); | |
1124 | return -EINVAL; | |
1125 | } | |
1126 | ||
1127 | while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { | |
1128 | dcqe = get_cqe_from_buf(cq->resize_buf, | |
1129 | (i + 1) & (cq->resize_buf->nent), | |
1130 | dsize); | |
1131 | dcqe64 = dsize == 64 ? dcqe : dcqe + 64; | |
1132 | sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); | |
1133 | memcpy(dcqe, scqe, dsize); | |
1134 | dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; | |
1135 | ||
1136 | ++i; | |
1137 | scqe = get_sw_cqe(cq, i); | |
1138 | scqe64 = ssize == 64 ? scqe : scqe + 64; | |
1139 | if (!scqe) { | |
1140 | mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); | |
1141 | return -EINVAL; | |
1142 | } | |
1143 | ||
1144 | if (scqe == start_cqe) { | |
1145 | pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", | |
1146 | cq->mcq.cqn); | |
1147 | return -ENOMEM; | |
1148 | } | |
1149 | } | |
1150 | ++cq->mcq.cons_index; | |
1151 | return 0; | |
1152 | } | |
1153 | ||
e126ba97 EC |
1154 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) |
1155 | { | |
bde51583 EC |
1156 | struct mlx5_ib_dev *dev = to_mdev(ibcq->device); |
1157 | struct mlx5_ib_cq *cq = to_mcq(ibcq); | |
1158 | struct mlx5_modify_cq_mbox_in *in; | |
1159 | int err; | |
1160 | int npas; | |
1161 | int page_shift; | |
1162 | int inlen; | |
1163 | int uninitialized_var(cqe_size); | |
1164 | unsigned long flags; | |
1165 | ||
938fe83c | 1166 | if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { |
bde51583 EC |
1167 | pr_info("Firmware does not support resize CQ\n"); |
1168 | return -ENOSYS; | |
1169 | } | |
1170 | ||
1171 | if (entries < 1) | |
1172 | return -EINVAL; | |
1173 | ||
1174 | entries = roundup_pow_of_two(entries + 1); | |
938fe83c | 1175 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
bde51583 EC |
1176 | return -EINVAL; |
1177 | ||
1178 | if (entries == ibcq->cqe + 1) | |
1179 | return 0; | |
1180 | ||
1181 | mutex_lock(&cq->resize_mutex); | |
1182 | if (udata) { | |
1183 | err = resize_user(dev, cq, entries, udata, &npas, &page_shift, | |
1184 | &cqe_size); | |
1185 | } else { | |
1186 | cqe_size = 64; | |
1187 | err = resize_kernel(dev, cq, entries, cqe_size); | |
1188 | if (!err) { | |
1189 | npas = cq->resize_buf->buf.npages; | |
1190 | page_shift = cq->resize_buf->buf.page_shift; | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | if (err) | |
1195 | goto ex; | |
1196 | ||
1197 | inlen = sizeof(*in) + npas * sizeof(in->pas[0]); | |
1198 | in = mlx5_vzalloc(inlen); | |
1199 | if (!in) { | |
1200 | err = -ENOMEM; | |
1201 | goto ex_resize; | |
1202 | } | |
1203 | ||
1204 | if (udata) | |
1205 | mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, | |
1206 | in->pas, 0); | |
1207 | else | |
1208 | mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); | |
1209 | ||
1210 | in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | | |
1211 | MLX5_MODIFY_CQ_MASK_PG_OFFSET | | |
1212 | MLX5_MODIFY_CQ_MASK_PG_SIZE); | |
1213 | in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; | |
1214 | in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; | |
1215 | in->ctx.page_offset = 0; | |
1216 | in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); | |
1217 | in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); | |
1218 | in->cqn = cpu_to_be32(cq->mcq.cqn); | |
1219 | ||
9603b61d | 1220 | err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); |
bde51583 EC |
1221 | if (err) |
1222 | goto ex_alloc; | |
1223 | ||
1224 | if (udata) { | |
1225 | cq->ibcq.cqe = entries - 1; | |
1226 | ib_umem_release(cq->buf.umem); | |
1227 | cq->buf.umem = cq->resize_umem; | |
1228 | cq->resize_umem = NULL; | |
1229 | } else { | |
1230 | struct mlx5_ib_cq_buf tbuf; | |
1231 | int resized = 0; | |
1232 | ||
1233 | spin_lock_irqsave(&cq->lock, flags); | |
1234 | if (cq->resize_buf) { | |
1235 | err = copy_resize_cqes(cq); | |
1236 | if (!err) { | |
1237 | tbuf = cq->buf; | |
1238 | cq->buf = *cq->resize_buf; | |
1239 | kfree(cq->resize_buf); | |
1240 | cq->resize_buf = NULL; | |
1241 | resized = 1; | |
1242 | } | |
1243 | } | |
1244 | cq->ibcq.cqe = entries - 1; | |
1245 | spin_unlock_irqrestore(&cq->lock, flags); | |
1246 | if (resized) | |
1247 | free_cq_buf(dev, &tbuf); | |
1248 | } | |
1249 | mutex_unlock(&cq->resize_mutex); | |
1250 | ||
479163f4 | 1251 | kvfree(in); |
bde51583 EC |
1252 | return 0; |
1253 | ||
1254 | ex_alloc: | |
479163f4 | 1255 | kvfree(in); |
bde51583 EC |
1256 | |
1257 | ex_resize: | |
1258 | if (udata) | |
1259 | un_resize_user(cq); | |
1260 | else | |
1261 | un_resize_kernel(dev, cq); | |
1262 | ex: | |
1263 | mutex_unlock(&cq->resize_mutex); | |
1264 | return err; | |
e126ba97 EC |
1265 | } |
1266 | ||
1267 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) | |
1268 | { | |
1269 | struct mlx5_ib_cq *cq; | |
1270 | ||
1271 | if (!ibcq) | |
1272 | return 128; | |
1273 | ||
1274 | cq = to_mcq(ibcq); | |
1275 | return cq->cqe_size; | |
1276 | } | |
25361e02 HE |
1277 | |
1278 | /* Called from atomic context */ | |
1279 | int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) | |
1280 | { | |
1281 | struct mlx5_ib_wc *soft_wc; | |
1282 | struct mlx5_ib_cq *cq = to_mcq(ibcq); | |
1283 | unsigned long flags; | |
1284 | ||
1285 | soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); | |
1286 | if (!soft_wc) | |
1287 | return -ENOMEM; | |
1288 | ||
1289 | soft_wc->wc = *wc; | |
1290 | spin_lock_irqsave(&cq->lock, flags); | |
1291 | list_add_tail(&soft_wc->list, &cq->wc_list); | |
1292 | if (cq->notify_flags == IB_CQ_NEXT_COMP || | |
1293 | wc->status != IB_WC_SUCCESS) { | |
1294 | cq->notify_flags = 0; | |
1295 | schedule_work(&cq->notify_work); | |
1296 | } | |
1297 | spin_unlock_irqrestore(&cq->lock, flags); | |
1298 | ||
1299 | return 0; | |
1300 | } |