Commit | Line | Data |
---|---|---|
fe2caefc PP |
1 | /******************************************************************* |
2 | * This file is part of the Emulex RoCE Device Driver for * | |
3 | * RoCE (RDMA over Converged Ethernet) adapters. * | |
4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | |
5 | * EMULEX and SLI are trademarks of Emulex. * | |
6 | * www.emulex.com * | |
7 | * * | |
8 | * This program is free software; you can redistribute it and/or * | |
9 | * modify it under the terms of version 2 of the GNU General * | |
10 | * Public License as published by the Free Software Foundation. * | |
11 | * This program is distributed in the hope that it will be useful. * | |
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
17 | * more details, a copy of which can be found in the file COPYING * | |
18 | * included with this package. * | |
19 | * | |
20 | * Contact Information: | |
21 | * linux-drivers@emulex.com | |
22 | * | |
23 | * Emulex | |
24 | * 3333 Susan Street | |
25 | * Costa Mesa, CA 92626 | |
26 | *******************************************************************/ | |
27 | ||
28 | #include <linux/dma-mapping.h> | |
29 | #include <rdma/ib_verbs.h> | |
30 | #include <rdma/ib_user_verbs.h> | |
31 | #include <rdma/iw_cm.h> | |
32 | #include <rdma/ib_umem.h> | |
33 | #include <rdma/ib_addr.h> | |
34 | ||
35 | #include "ocrdma.h" | |
36 | #include "ocrdma_hw.h" | |
37 | #include "ocrdma_verbs.h" | |
38 | #include "ocrdma_abi.h" | |
39 | ||
40 | int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |
41 | { | |
42 | if (index > 1) | |
43 | return -EINVAL; | |
44 | ||
45 | *pkey = 0xffff; | |
46 | return 0; | |
47 | } | |
48 | ||
49 | int ocrdma_query_gid(struct ib_device *ibdev, u8 port, | |
50 | int index, union ib_gid *sgid) | |
51 | { | |
52 | struct ocrdma_dev *dev; | |
53 | ||
54 | dev = get_ocrdma_dev(ibdev); | |
55 | memset(sgid, 0, sizeof(*sgid)); | |
7b33dc2b | 56 | if (index >= OCRDMA_MAX_SGID) |
fe2caefc PP |
57 | return -EINVAL; |
58 | ||
59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); | |
60 | ||
61 | return 0; | |
62 | } | |
63 | ||
64 | int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |
65 | { | |
66 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
67 | ||
68 | memset(attr, 0, sizeof *attr); | |
69 | memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], | |
70 | min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); | |
71 | ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); | |
72 | attr->max_mr_size = ~0ull; | |
73 | attr->page_size_cap = 0xffff000; | |
74 | attr->vendor_id = dev->nic_info.pdev->vendor; | |
75 | attr->vendor_part_id = dev->nic_info.pdev->device; | |
76 | attr->hw_ver = 0; | |
77 | attr->max_qp = dev->attr.max_qp; | |
78 | attr->max_ah = dev->attr.max_qp; | |
79 | attr->max_qp_wr = dev->attr.max_wqe; | |
80 | ||
81 | attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | | |
82 | IB_DEVICE_RC_RNR_NAK_GEN | | |
83 | IB_DEVICE_SHUTDOWN_PORT | | |
84 | IB_DEVICE_SYS_IMAGE_GUID | | |
85 | IB_DEVICE_LOCAL_DMA_LKEY; | |
634c5796 MV |
86 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); |
87 | attr->max_sge_rd = 0; | |
fe2caefc PP |
88 | attr->max_cq = dev->attr.max_cq; |
89 | attr->max_cqe = dev->attr.max_cqe; | |
90 | attr->max_mr = dev->attr.max_mr; | |
91 | attr->max_mw = 0; | |
92 | attr->max_pd = dev->attr.max_pd; | |
93 | attr->atomic_cap = 0; | |
94 | attr->max_fmr = 0; | |
95 | attr->max_map_per_fmr = 0; | |
96 | attr->max_qp_rd_atom = | |
97 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); | |
98 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; | |
99 | attr->max_srq = (dev->attr.max_qp - 1); | |
d1e09ebf | 100 | attr->max_srq_sge = dev->attr.max_srq_sge; |
fe2caefc PP |
101 | attr->max_srq_wr = dev->attr.max_rqe; |
102 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | |
103 | attr->max_fast_reg_page_list_len = 0; | |
104 | attr->max_pkeys = 1; | |
105 | return 0; | |
106 | } | |
107 | ||
108 | int ocrdma_query_port(struct ib_device *ibdev, | |
109 | u8 port, struct ib_port_attr *props) | |
110 | { | |
111 | enum ib_port_state port_state; | |
112 | struct ocrdma_dev *dev; | |
113 | struct net_device *netdev; | |
114 | ||
115 | dev = get_ocrdma_dev(ibdev); | |
116 | if (port > 1) { | |
117 | ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | |
118 | dev->id, port); | |
119 | return -EINVAL; | |
120 | } | |
121 | netdev = dev->nic_info.netdev; | |
122 | if (netif_running(netdev) && netif_oper_up(netdev)) { | |
123 | port_state = IB_PORT_ACTIVE; | |
124 | props->phys_state = 5; | |
125 | } else { | |
126 | port_state = IB_PORT_DOWN; | |
127 | props->phys_state = 3; | |
128 | } | |
129 | props->max_mtu = IB_MTU_4096; | |
130 | props->active_mtu = iboe_get_mtu(netdev->mtu); | |
131 | props->lid = 0; | |
132 | props->lmc = 0; | |
133 | props->sm_lid = 0; | |
134 | props->sm_sl = 0; | |
135 | props->state = port_state; | |
136 | props->port_cap_flags = | |
137 | IB_PORT_CM_SUP | | |
138 | IB_PORT_REINIT_SUP | | |
139 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; | |
140 | props->gid_tbl_len = OCRDMA_MAX_SGID; | |
141 | props->pkey_tbl_len = 1; | |
142 | props->bad_pkey_cntr = 0; | |
143 | props->qkey_viol_cntr = 0; | |
144 | props->active_width = IB_WIDTH_1X; | |
145 | props->active_speed = 4; | |
146 | props->max_msg_sz = 0x80000000; | |
147 | props->max_vl_num = 4; | |
148 | return 0; | |
149 | } | |
150 | ||
151 | int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, | |
152 | struct ib_port_modify *props) | |
153 | { | |
154 | struct ocrdma_dev *dev; | |
155 | ||
156 | dev = get_ocrdma_dev(ibdev); | |
157 | if (port > 1) { | |
158 | ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | |
159 | dev->id, port); | |
160 | return -EINVAL; | |
161 | } | |
162 | return 0; | |
163 | } | |
164 | ||
165 | static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
166 | unsigned long len) | |
167 | { | |
168 | struct ocrdma_mm *mm; | |
169 | ||
170 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | |
171 | if (mm == NULL) | |
172 | return -ENOMEM; | |
173 | mm->key.phy_addr = phy_addr; | |
174 | mm->key.len = len; | |
175 | INIT_LIST_HEAD(&mm->entry); | |
176 | ||
177 | mutex_lock(&uctx->mm_list_lock); | |
178 | list_add_tail(&mm->entry, &uctx->mm_head); | |
179 | mutex_unlock(&uctx->mm_list_lock); | |
180 | return 0; | |
181 | } | |
182 | ||
183 | static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
184 | unsigned long len) | |
185 | { | |
186 | struct ocrdma_mm *mm, *tmp; | |
187 | ||
188 | mutex_lock(&uctx->mm_list_lock); | |
189 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | |
190 | if (len != mm->key.len || phy_addr != mm->key.phy_addr) | |
191 | continue; | |
192 | ||
193 | list_del(&mm->entry); | |
194 | kfree(mm); | |
195 | break; | |
196 | } | |
197 | mutex_unlock(&uctx->mm_list_lock); | |
198 | } | |
199 | ||
200 | static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
201 | unsigned long len) | |
202 | { | |
203 | bool found = false; | |
204 | struct ocrdma_mm *mm; | |
205 | ||
206 | mutex_lock(&uctx->mm_list_lock); | |
207 | list_for_each_entry(mm, &uctx->mm_head, entry) { | |
208 | if (len != mm->key.len || phy_addr != mm->key.phy_addr) | |
209 | continue; | |
210 | ||
211 | found = true; | |
212 | break; | |
213 | } | |
214 | mutex_unlock(&uctx->mm_list_lock); | |
215 | return found; | |
216 | } | |
217 | ||
218 | struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | |
219 | struct ib_udata *udata) | |
220 | { | |
221 | int status; | |
222 | struct ocrdma_ucontext *ctx; | |
223 | struct ocrdma_alloc_ucontext_resp resp; | |
224 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
225 | struct pci_dev *pdev = dev->nic_info.pdev; | |
226 | u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); | |
227 | ||
228 | if (!udata) | |
229 | return ERR_PTR(-EFAULT); | |
230 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
231 | if (!ctx) | |
232 | return ERR_PTR(-ENOMEM); | |
233 | ctx->dev = dev; | |
234 | INIT_LIST_HEAD(&ctx->mm_head); | |
235 | mutex_init(&ctx->mm_list_lock); | |
236 | ||
237 | ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, | |
238 | &ctx->ah_tbl.pa, GFP_KERNEL); | |
239 | if (!ctx->ah_tbl.va) { | |
240 | kfree(ctx); | |
241 | return ERR_PTR(-ENOMEM); | |
242 | } | |
243 | memset(ctx->ah_tbl.va, 0, map_len); | |
244 | ctx->ah_tbl.len = map_len; | |
245 | ||
246 | resp.ah_tbl_len = ctx->ah_tbl.len; | |
247 | resp.ah_tbl_page = ctx->ah_tbl.pa; | |
248 | ||
249 | status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); | |
250 | if (status) | |
251 | goto map_err; | |
252 | resp.dev_id = dev->id; | |
253 | resp.max_inline_data = dev->attr.max_inline_data; | |
254 | resp.wqe_size = dev->attr.wqe_size; | |
255 | resp.rqe_size = dev->attr.rqe_size; | |
256 | resp.dpp_wqe_size = dev->attr.wqe_size; | |
257 | resp.rsvd = 0; | |
258 | ||
259 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); | |
260 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); | |
261 | if (status) | |
262 | goto cpy_err; | |
263 | return &ctx->ibucontext; | |
264 | ||
265 | cpy_err: | |
266 | ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); | |
267 | map_err: | |
268 | dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, | |
269 | ctx->ah_tbl.pa); | |
270 | kfree(ctx); | |
271 | return ERR_PTR(status); | |
272 | } | |
273 | ||
274 | int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) | |
275 | { | |
276 | struct ocrdma_mm *mm, *tmp; | |
277 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); | |
278 | struct pci_dev *pdev = uctx->dev->nic_info.pdev; | |
279 | ||
280 | ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); | |
281 | dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, | |
282 | uctx->ah_tbl.pa); | |
283 | ||
284 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | |
285 | list_del(&mm->entry); | |
286 | kfree(mm); | |
287 | } | |
288 | kfree(uctx); | |
289 | return 0; | |
290 | } | |
291 | ||
292 | int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |
293 | { | |
294 | struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); | |
295 | struct ocrdma_dev *dev = ucontext->dev; | |
296 | unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; | |
297 | u64 unmapped_db = (u64) dev->nic_info.unmapped_db; | |
298 | unsigned long len = (vma->vm_end - vma->vm_start); | |
299 | int status = 0; | |
300 | bool found; | |
301 | ||
302 | if (vma->vm_start & (PAGE_SIZE - 1)) | |
303 | return -EINVAL; | |
304 | found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); | |
305 | if (!found) | |
306 | return -EINVAL; | |
307 | ||
308 | if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + | |
309 | dev->nic_info.db_total_size)) && | |
310 | (len <= dev->nic_info.db_page_size)) { | |
311 | /* doorbell mapping */ | |
312 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | |
313 | len, vma->vm_page_prot); | |
314 | } else if (dev->nic_info.dpp_unmapped_len && | |
315 | (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && | |
316 | (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + | |
317 | dev->nic_info.dpp_unmapped_len)) && | |
318 | (len <= dev->nic_info.dpp_unmapped_len)) { | |
319 | /* dpp area mapping */ | |
320 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
321 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | |
322 | len, vma->vm_page_prot); | |
323 | } else { | |
324 | /* queue memory mapping */ | |
325 | status = remap_pfn_range(vma, vma->vm_start, | |
326 | vma->vm_pgoff, len, vma->vm_page_prot); | |
327 | } | |
328 | return status; | |
329 | } | |
330 | ||
331 | static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd, | |
332 | struct ib_ucontext *ib_ctx, | |
333 | struct ib_udata *udata) | |
334 | { | |
335 | int status; | |
336 | u64 db_page_addr; | |
da496438 | 337 | u64 dpp_page_addr = 0; |
fe2caefc PP |
338 | u32 db_page_size; |
339 | struct ocrdma_alloc_pd_uresp rsp; | |
340 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); | |
341 | ||
342 | rsp.id = pd->id; | |
343 | rsp.dpp_enabled = pd->dpp_enabled; | |
344 | db_page_addr = pd->dev->nic_info.unmapped_db + | |
345 | (pd->id * pd->dev->nic_info.db_page_size); | |
346 | db_page_size = pd->dev->nic_info.db_page_size; | |
347 | ||
348 | status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); | |
349 | if (status) | |
350 | return status; | |
351 | ||
352 | if (pd->dpp_enabled) { | |
353 | dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr + | |
354 | (pd->id * OCRDMA_DPP_PAGE_SIZE); | |
355 | status = ocrdma_add_mmap(uctx, dpp_page_addr, | |
356 | OCRDMA_DPP_PAGE_SIZE); | |
357 | if (status) | |
358 | goto dpp_map_err; | |
359 | rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); | |
360 | rsp.dpp_page_addr_lo = dpp_page_addr; | |
361 | } | |
362 | ||
363 | status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); | |
364 | if (status) | |
365 | goto ucopy_err; | |
366 | ||
367 | pd->uctx = uctx; | |
368 | return 0; | |
369 | ||
370 | ucopy_err: | |
da496438 RD |
371 | if (pd->dpp_enabled) |
372 | ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE); | |
fe2caefc PP |
373 | dpp_map_err: |
374 | ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); | |
375 | return status; | |
376 | } | |
377 | ||
378 | struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, | |
379 | struct ib_ucontext *context, | |
380 | struct ib_udata *udata) | |
381 | { | |
382 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
383 | struct ocrdma_pd *pd; | |
384 | int status; | |
385 | ||
386 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | |
387 | if (!pd) | |
388 | return ERR_PTR(-ENOMEM); | |
389 | pd->dev = dev; | |
390 | if (udata && context) { | |
391 | pd->dpp_enabled = (dev->nic_info.dev_family == | |
392 | OCRDMA_GEN2_FAMILY) ? true : false; | |
393 | pd->num_dpp_qp = | |
394 | pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; | |
395 | } | |
396 | status = ocrdma_mbx_alloc_pd(dev, pd); | |
397 | if (status) { | |
398 | kfree(pd); | |
399 | return ERR_PTR(status); | |
400 | } | |
401 | atomic_set(&pd->use_cnt, 0); | |
402 | ||
403 | if (udata && context) { | |
404 | status = ocrdma_copy_pd_uresp(pd, context, udata); | |
405 | if (status) | |
406 | goto err; | |
407 | } | |
408 | return &pd->ibpd; | |
409 | ||
410 | err: | |
411 | ocrdma_dealloc_pd(&pd->ibpd); | |
412 | return ERR_PTR(status); | |
413 | } | |
414 | ||
415 | int ocrdma_dealloc_pd(struct ib_pd *ibpd) | |
416 | { | |
417 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
418 | struct ocrdma_dev *dev = pd->dev; | |
419 | int status; | |
420 | u64 usr_db; | |
421 | ||
422 | if (atomic_read(&pd->use_cnt)) { | |
423 | ocrdma_err("%s(%d) pd=0x%x is in use.\n", | |
424 | __func__, dev->id, pd->id); | |
425 | status = -EFAULT; | |
426 | goto dealloc_err; | |
427 | } | |
428 | status = ocrdma_mbx_dealloc_pd(dev, pd); | |
429 | if (pd->uctx) { | |
430 | u64 dpp_db = dev->nic_info.dpp_unmapped_addr + | |
431 | (pd->id * OCRDMA_DPP_PAGE_SIZE); | |
432 | if (pd->dpp_enabled) | |
433 | ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE); | |
434 | usr_db = dev->nic_info.unmapped_db + | |
435 | (pd->id * dev->nic_info.db_page_size); | |
436 | ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); | |
437 | } | |
438 | kfree(pd); | |
439 | dealloc_err: | |
440 | return status; | |
441 | } | |
442 | ||
443 | static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, | |
444 | int acc, u32 num_pbls, | |
445 | u32 addr_check) | |
446 | { | |
447 | int status; | |
448 | struct ocrdma_mr *mr; | |
449 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
450 | struct ocrdma_dev *dev = pd->dev; | |
451 | ||
452 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { | |
453 | ocrdma_err("%s(%d) leaving err, invalid access rights\n", | |
454 | __func__, dev->id); | |
455 | return ERR_PTR(-EINVAL); | |
456 | } | |
457 | ||
458 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
459 | if (!mr) | |
460 | return ERR_PTR(-ENOMEM); | |
461 | mr->hwmr.dev = dev; | |
462 | mr->hwmr.fr_mr = 0; | |
463 | mr->hwmr.local_rd = 1; | |
464 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
465 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
466 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
467 | mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; | |
468 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
469 | mr->hwmr.num_pbls = num_pbls; | |
470 | ||
471 | status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check); | |
472 | if (status) { | |
473 | kfree(mr); | |
474 | return ERR_PTR(-ENOMEM); | |
475 | } | |
476 | mr->pd = pd; | |
477 | atomic_inc(&pd->use_cnt); | |
478 | mr->ibmr.lkey = mr->hwmr.lkey; | |
479 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
480 | mr->ibmr.rkey = mr->hwmr.lkey; | |
481 | return mr; | |
482 | } | |
483 | ||
484 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) | |
485 | { | |
486 | struct ocrdma_mr *mr; | |
487 | ||
488 | mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE); | |
55a8d62a DC |
489 | if (IS_ERR(mr)) |
490 | return ERR_CAST(mr); | |
fe2caefc PP |
491 | |
492 | return &mr->ibmr; | |
493 | } | |
494 | ||
495 | static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, | |
496 | struct ocrdma_hw_mr *mr) | |
497 | { | |
498 | struct pci_dev *pdev = dev->nic_info.pdev; | |
499 | int i = 0; | |
500 | ||
501 | if (mr->pbl_table) { | |
502 | for (i = 0; i < mr->num_pbls; i++) { | |
503 | if (!mr->pbl_table[i].va) | |
504 | continue; | |
505 | dma_free_coherent(&pdev->dev, mr->pbl_size, | |
506 | mr->pbl_table[i].va, | |
507 | mr->pbl_table[i].pa); | |
508 | } | |
509 | kfree(mr->pbl_table); | |
510 | mr->pbl_table = NULL; | |
511 | } | |
512 | } | |
513 | ||
514 | static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes) | |
515 | { | |
516 | u32 num_pbls = 0; | |
517 | u32 idx = 0; | |
518 | int status = 0; | |
519 | u32 pbl_size; | |
520 | ||
521 | do { | |
522 | pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); | |
523 | if (pbl_size > MAX_OCRDMA_PBL_SIZE) { | |
524 | status = -EFAULT; | |
525 | break; | |
526 | } | |
527 | num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); | |
528 | num_pbls = num_pbls / (pbl_size / sizeof(u64)); | |
529 | idx++; | |
530 | } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl); | |
531 | ||
532 | mr->hwmr.num_pbes = num_pbes; | |
533 | mr->hwmr.num_pbls = num_pbls; | |
534 | mr->hwmr.pbl_size = pbl_size; | |
535 | return status; | |
536 | } | |
537 | ||
538 | static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) | |
539 | { | |
540 | int status = 0; | |
541 | int i; | |
542 | u32 dma_len = mr->pbl_size; | |
543 | struct pci_dev *pdev = dev->nic_info.pdev; | |
544 | void *va; | |
545 | dma_addr_t pa; | |
546 | ||
547 | mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * | |
548 | mr->num_pbls, GFP_KERNEL); | |
549 | ||
550 | if (!mr->pbl_table) | |
551 | return -ENOMEM; | |
552 | ||
553 | for (i = 0; i < mr->num_pbls; i++) { | |
554 | va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | |
555 | if (!va) { | |
556 | ocrdma_free_mr_pbl_tbl(dev, mr); | |
557 | status = -ENOMEM; | |
558 | break; | |
559 | } | |
560 | memset(va, 0, dma_len); | |
561 | mr->pbl_table[i].va = va; | |
562 | mr->pbl_table[i].pa = pa; | |
563 | } | |
564 | return status; | |
565 | } | |
566 | ||
567 | static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, | |
568 | u32 num_pbes) | |
569 | { | |
570 | struct ocrdma_pbe *pbe; | |
571 | struct ib_umem_chunk *chunk; | |
572 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; | |
573 | struct ib_umem *umem = mr->umem; | |
574 | int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; | |
575 | ||
576 | if (!mr->hwmr.num_pbes) | |
577 | return; | |
578 | ||
579 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
580 | pbe_cnt = 0; | |
581 | ||
582 | shift = ilog2(umem->page_size); | |
583 | ||
584 | list_for_each_entry(chunk, &umem->chunk_list, list) { | |
585 | /* get all the dma regions from the chunk. */ | |
586 | for (i = 0; i < chunk->nmap; i++) { | |
587 | pages = sg_dma_len(&chunk->page_list[i]) >> shift; | |
588 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | |
589 | /* store the page address in pbe */ | |
590 | pbe->pa_lo = | |
591 | cpu_to_le32(sg_dma_address | |
592 | (&chunk->page_list[i]) + | |
593 | (umem->page_size * pg_cnt)); | |
594 | pbe->pa_hi = | |
595 | cpu_to_le32(upper_32_bits | |
596 | ((sg_dma_address | |
597 | (&chunk->page_list[i]) + | |
598 | umem->page_size * pg_cnt))); | |
599 | pbe_cnt += 1; | |
600 | total_num_pbes += 1; | |
601 | pbe++; | |
602 | ||
603 | /* if done building pbes, issue the mbx cmd. */ | |
604 | if (total_num_pbes == num_pbes) | |
605 | return; | |
606 | ||
607 | /* if the given pbl is full storing the pbes, | |
608 | * move to next pbl. | |
609 | */ | |
610 | if (pbe_cnt == | |
611 | (mr->hwmr.pbl_size / sizeof(u64))) { | |
612 | pbl_tbl++; | |
613 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
614 | pbe_cnt = 0; | |
615 | } | |
616 | } | |
617 | } | |
618 | } | |
619 | } | |
620 | ||
621 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | |
622 | u64 usr_addr, int acc, struct ib_udata *udata) | |
623 | { | |
624 | int status = -ENOMEM; | |
625 | struct ocrdma_dev *dev; | |
626 | struct ocrdma_mr *mr; | |
627 | struct ocrdma_pd *pd; | |
fe2caefc PP |
628 | u32 num_pbes; |
629 | ||
630 | pd = get_ocrdma_pd(ibpd); | |
631 | dev = pd->dev; | |
fe2caefc PP |
632 | |
633 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) | |
634 | return ERR_PTR(-EINVAL); | |
635 | ||
636 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
637 | if (!mr) | |
638 | return ERR_PTR(status); | |
639 | mr->hwmr.dev = dev; | |
640 | mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); | |
641 | if (IS_ERR(mr->umem)) { | |
642 | status = -EFAULT; | |
643 | goto umem_err; | |
644 | } | |
645 | num_pbes = ib_umem_page_count(mr->umem); | |
646 | status = ocrdma_get_pbl_info(mr, num_pbes); | |
647 | if (status) | |
648 | goto umem_err; | |
649 | ||
650 | mr->hwmr.pbe_size = mr->umem->page_size; | |
651 | mr->hwmr.fbo = mr->umem->offset; | |
652 | mr->hwmr.va = usr_addr; | |
653 | mr->hwmr.len = len; | |
654 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
655 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
656 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
657 | mr->hwmr.local_rd = 1; | |
658 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
659 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
660 | if (status) | |
661 | goto umem_err; | |
662 | build_user_pbes(dev, mr, num_pbes); | |
663 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); | |
664 | if (status) | |
665 | goto mbx_err; | |
666 | mr->pd = pd; | |
667 | atomic_inc(&pd->use_cnt); | |
668 | mr->ibmr.lkey = mr->hwmr.lkey; | |
669 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
670 | mr->ibmr.rkey = mr->hwmr.lkey; | |
671 | ||
672 | return &mr->ibmr; | |
673 | ||
674 | mbx_err: | |
675 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
676 | umem_err: | |
677 | kfree(mr); | |
678 | return ERR_PTR(status); | |
679 | } | |
680 | ||
681 | int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |
682 | { | |
683 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); | |
684 | struct ocrdma_dev *dev = mr->hwmr.dev; | |
685 | int status; | |
686 | ||
687 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); | |
688 | ||
689 | if (mr->hwmr.fr_mr == 0) | |
690 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
691 | ||
692 | atomic_dec(&mr->pd->use_cnt); | |
693 | /* it could be user registered memory. */ | |
694 | if (mr->umem) | |
695 | ib_umem_release(mr->umem); | |
696 | kfree(mr); | |
697 | return status; | |
698 | } | |
699 | ||
700 | static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, | |
701 | struct ib_ucontext *ib_ctx) | |
702 | { | |
703 | int status; | |
704 | struct ocrdma_ucontext *uctx; | |
705 | struct ocrdma_create_cq_uresp uresp; | |
706 | ||
707 | uresp.cq_id = cq->id; | |
708 | uresp.page_size = cq->len; | |
709 | uresp.num_pages = 1; | |
710 | uresp.max_hw_cqe = cq->max_hw_cqe; | |
711 | uresp.page_addr[0] = cq->pa; | |
712 | uresp.db_page_addr = cq->dev->nic_info.unmapped_db; | |
713 | uresp.db_page_size = cq->dev->nic_info.db_page_size; | |
714 | uresp.phase_change = cq->phase_change ? 1 : 0; | |
715 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
716 | if (status) { | |
717 | ocrdma_err("%s(%d) copy error cqid=0x%x.\n", | |
718 | __func__, cq->dev->id, cq->id); | |
719 | goto err; | |
720 | } | |
721 | uctx = get_ocrdma_ucontext(ib_ctx); | |
722 | status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); | |
723 | if (status) | |
724 | goto err; | |
725 | status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); | |
726 | if (status) { | |
727 | ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); | |
728 | goto err; | |
729 | } | |
730 | cq->ucontext = uctx; | |
731 | err: | |
732 | return status; | |
733 | } | |
734 | ||
735 | struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | |
736 | struct ib_ucontext *ib_ctx, | |
737 | struct ib_udata *udata) | |
738 | { | |
739 | struct ocrdma_cq *cq; | |
740 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
741 | int status; | |
742 | struct ocrdma_create_cq_ureq ureq; | |
743 | ||
744 | if (udata) { | |
745 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | |
746 | return ERR_PTR(-EFAULT); | |
747 | } else | |
748 | ureq.dpp_cq = 0; | |
749 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | |
750 | if (!cq) | |
751 | return ERR_PTR(-ENOMEM); | |
752 | ||
753 | spin_lock_init(&cq->cq_lock); | |
754 | spin_lock_init(&cq->comp_handler_lock); | |
755 | atomic_set(&cq->use_cnt, 0); | |
756 | INIT_LIST_HEAD(&cq->sq_head); | |
757 | INIT_LIST_HEAD(&cq->rq_head); | |
758 | cq->dev = dev; | |
759 | ||
760 | status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq); | |
761 | if (status) { | |
762 | kfree(cq); | |
763 | return ERR_PTR(status); | |
764 | } | |
765 | if (ib_ctx) { | |
766 | status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx); | |
767 | if (status) | |
768 | goto ctx_err; | |
769 | } | |
770 | cq->phase = OCRDMA_CQE_VALID; | |
771 | cq->arm_needed = true; | |
772 | dev->cq_tbl[cq->id] = cq; | |
773 | ||
774 | return &cq->ibcq; | |
775 | ||
776 | ctx_err: | |
777 | ocrdma_mbx_destroy_cq(dev, cq); | |
778 | kfree(cq); | |
779 | return ERR_PTR(status); | |
780 | } | |
781 | ||
782 | int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, | |
783 | struct ib_udata *udata) | |
784 | { | |
785 | int status = 0; | |
786 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | |
787 | ||
788 | if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { | |
789 | status = -EINVAL; | |
790 | return status; | |
791 | } | |
792 | ibcq->cqe = new_cnt; | |
793 | return status; | |
794 | } | |
795 | ||
796 | int ocrdma_destroy_cq(struct ib_cq *ibcq) | |
797 | { | |
798 | int status; | |
799 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | |
800 | struct ocrdma_dev *dev = cq->dev; | |
801 | ||
802 | if (atomic_read(&cq->use_cnt)) | |
803 | return -EINVAL; | |
804 | ||
805 | status = ocrdma_mbx_destroy_cq(dev, cq); | |
806 | ||
807 | if (cq->ucontext) { | |
808 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len); | |
809 | ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db, | |
810 | dev->nic_info.db_page_size); | |
811 | } | |
812 | dev->cq_tbl[cq->id] = NULL; | |
813 | ||
814 | kfree(cq); | |
815 | return status; | |
816 | } | |
817 | ||
818 | static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | |
819 | { | |
820 | int status = -EINVAL; | |
821 | ||
822 | if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { | |
823 | dev->qp_tbl[qp->id] = qp; | |
824 | status = 0; | |
825 | } | |
826 | return status; | |
827 | } | |
828 | ||
829 | static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | |
830 | { | |
831 | dev->qp_tbl[qp->id] = NULL; | |
832 | } | |
833 | ||
834 | static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |
835 | struct ib_qp_init_attr *attrs) | |
836 | { | |
837 | if (attrs->qp_type != IB_QPT_GSI && | |
838 | attrs->qp_type != IB_QPT_RC && | |
839 | attrs->qp_type != IB_QPT_UD) { | |
840 | ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", | |
841 | __func__, dev->id, attrs->qp_type); | |
842 | return -EINVAL; | |
843 | } | |
844 | if (attrs->cap.max_send_wr > dev->attr.max_wqe) { | |
845 | ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", | |
846 | __func__, dev->id, attrs->cap.max_send_wr); | |
847 | ocrdma_err("%s(%d) supported send_wr=0x%x\n", | |
848 | __func__, dev->id, dev->attr.max_wqe); | |
849 | return -EINVAL; | |
850 | } | |
851 | if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { | |
852 | ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", | |
853 | __func__, dev->id, attrs->cap.max_recv_wr); | |
854 | ocrdma_err("%s(%d) supported recv_wr=0x%x\n", | |
855 | __func__, dev->id, dev->attr.max_rqe); | |
856 | return -EINVAL; | |
857 | } | |
858 | if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { | |
859 | ocrdma_err("%s(%d) unsupported inline data size=0x%x" | |
860 | " requested\n", __func__, dev->id, | |
861 | attrs->cap.max_inline_data); | |
862 | ocrdma_err("%s(%d) supported inline data size=0x%x\n", | |
863 | __func__, dev->id, dev->attr.max_inline_data); | |
864 | return -EINVAL; | |
865 | } | |
866 | if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { | |
867 | ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", | |
868 | __func__, dev->id, attrs->cap.max_send_sge); | |
869 | ocrdma_err("%s(%d) supported send_sge=0x%x\n", | |
870 | __func__, dev->id, dev->attr.max_send_sge); | |
871 | return -EINVAL; | |
872 | } | |
873 | if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { | |
874 | ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", | |
875 | __func__, dev->id, attrs->cap.max_recv_sge); | |
876 | ocrdma_err("%s(%d) supported recv_sge=0x%x\n", | |
877 | __func__, dev->id, dev->attr.max_recv_sge); | |
878 | return -EINVAL; | |
879 | } | |
880 | /* unprivileged user space cannot create special QP */ | |
881 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | |
882 | ocrdma_err | |
883 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", | |
884 | __func__, dev->id, attrs->qp_type); | |
885 | return -EINVAL; | |
886 | } | |
887 | /* allow creating only one GSI type of QP */ | |
888 | if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { | |
889 | ocrdma_err("%s(%d) GSI special QPs already created.\n", | |
890 | __func__, dev->id); | |
891 | return -EINVAL; | |
892 | } | |
893 | /* verify consumer QPs are not trying to use GSI QP's CQ */ | |
894 | if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { | |
895 | if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || | |
9e8fa040 RD |
896 | (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || |
897 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || | |
898 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { | |
fe2caefc PP |
899 | ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", |
900 | __func__, dev->id); | |
901 | return -EINVAL; | |
902 | } | |
903 | } | |
904 | return 0; | |
905 | } | |
906 | ||
907 | static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |
908 | struct ib_udata *udata, int dpp_offset, | |
909 | int dpp_credit_lmt, int srq) | |
910 | { | |
911 | int status = 0; | |
912 | u64 usr_db; | |
913 | struct ocrdma_create_qp_uresp uresp; | |
914 | struct ocrdma_dev *dev = qp->dev; | |
915 | struct ocrdma_pd *pd = qp->pd; | |
916 | ||
917 | memset(&uresp, 0, sizeof(uresp)); | |
918 | usr_db = dev->nic_info.unmapped_db + | |
919 | (pd->id * dev->nic_info.db_page_size); | |
920 | uresp.qp_id = qp->id; | |
921 | uresp.sq_dbid = qp->sq.dbid; | |
922 | uresp.num_sq_pages = 1; | |
923 | uresp.sq_page_size = qp->sq.len; | |
924 | uresp.sq_page_addr[0] = qp->sq.pa; | |
925 | uresp.num_wqe_allocated = qp->sq.max_cnt; | |
926 | if (!srq) { | |
927 | uresp.rq_dbid = qp->rq.dbid; | |
928 | uresp.num_rq_pages = 1; | |
929 | uresp.rq_page_size = qp->rq.len; | |
930 | uresp.rq_page_addr[0] = qp->rq.pa; | |
931 | uresp.num_rqe_allocated = qp->rq.max_cnt; | |
932 | } | |
933 | uresp.db_page_addr = usr_db; | |
934 | uresp.db_page_size = dev->nic_info.db_page_size; | |
935 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | |
936 | uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; | |
937 | uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ? | |
938 | OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET; | |
939 | uresp.db_shift = (qp->id < 128) ? 24 : 16; | |
940 | } else { | |
941 | uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET; | |
942 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | |
943 | uresp.db_shift = 16; | |
944 | } | |
fe2caefc PP |
945 | |
946 | if (qp->dpp_enabled) { | |
947 | uresp.dpp_credit = dpp_credit_lmt; | |
948 | uresp.dpp_offset = dpp_offset; | |
949 | } | |
950 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
951 | if (status) { | |
952 | ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); | |
953 | goto err; | |
954 | } | |
955 | status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], | |
956 | uresp.sq_page_size); | |
957 | if (status) | |
958 | goto err; | |
959 | ||
960 | if (!srq) { | |
961 | status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], | |
962 | uresp.rq_page_size); | |
963 | if (status) | |
964 | goto rq_map_err; | |
965 | } | |
966 | return status; | |
967 | rq_map_err: | |
968 | ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); | |
969 | err: | |
970 | return status; | |
971 | } | |
972 | ||
973 | static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | |
974 | struct ocrdma_pd *pd) | |
975 | { | |
976 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | |
977 | qp->sq_db = dev->nic_info.db + | |
978 | (pd->id * dev->nic_info.db_page_size) + | |
979 | OCRDMA_DB_GEN2_SQ_OFFSET; | |
980 | qp->rq_db = dev->nic_info.db + | |
981 | (pd->id * dev->nic_info.db_page_size) + | |
982 | ((qp->id < 128) ? | |
983 | OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET); | |
984 | } else { | |
985 | qp->sq_db = dev->nic_info.db + | |
986 | (pd->id * dev->nic_info.db_page_size) + | |
987 | OCRDMA_DB_SQ_OFFSET; | |
988 | qp->rq_db = dev->nic_info.db + | |
989 | (pd->id * dev->nic_info.db_page_size) + | |
990 | OCRDMA_DB_RQ_OFFSET; | |
991 | } | |
992 | } | |
993 | ||
994 | static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) | |
995 | { | |
996 | qp->wqe_wr_id_tbl = | |
997 | kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, | |
998 | GFP_KERNEL); | |
999 | if (qp->wqe_wr_id_tbl == NULL) | |
1000 | return -ENOMEM; | |
1001 | qp->rqe_wr_id_tbl = | |
1002 | kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); | |
1003 | if (qp->rqe_wr_id_tbl == NULL) | |
1004 | return -ENOMEM; | |
1005 | ||
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, | |
1010 | struct ocrdma_pd *pd, | |
1011 | struct ib_qp_init_attr *attrs) | |
1012 | { | |
1013 | qp->pd = pd; | |
1014 | spin_lock_init(&qp->q_lock); | |
1015 | INIT_LIST_HEAD(&qp->sq_entry); | |
1016 | INIT_LIST_HEAD(&qp->rq_entry); | |
1017 | ||
1018 | qp->qp_type = attrs->qp_type; | |
1019 | qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; | |
1020 | qp->max_inline_data = attrs->cap.max_inline_data; | |
1021 | qp->sq.max_sges = attrs->cap.max_send_sge; | |
1022 | qp->rq.max_sges = attrs->cap.max_recv_sge; | |
1023 | qp->state = OCRDMA_QPS_RST; | |
1024 | } | |
1025 | ||
1026 | static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd) | |
1027 | { | |
1028 | atomic_inc(&pd->use_cnt); | |
1029 | atomic_inc(&qp->sq_cq->use_cnt); | |
1030 | atomic_inc(&qp->rq_cq->use_cnt); | |
1031 | if (qp->srq) | |
1032 | atomic_inc(&qp->srq->use_cnt); | |
1033 | qp->ibqp.qp_num = qp->id; | |
1034 | } | |
1035 | ||
1036 | static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, | |
1037 | struct ib_qp_init_attr *attrs) | |
1038 | { | |
1039 | if (attrs->qp_type == IB_QPT_GSI) { | |
1040 | dev->gsi_qp_created = 1; | |
1041 | dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); | |
1042 | dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); | |
1043 | } | |
1044 | } | |
1045 | ||
1046 | struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, | |
1047 | struct ib_qp_init_attr *attrs, | |
1048 | struct ib_udata *udata) | |
1049 | { | |
1050 | int status; | |
1051 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
1052 | struct ocrdma_qp *qp; | |
1053 | struct ocrdma_dev *dev = pd->dev; | |
1054 | struct ocrdma_create_qp_ureq ureq; | |
1055 | u16 dpp_credit_lmt, dpp_offset; | |
1056 | ||
1057 | status = ocrdma_check_qp_params(ibpd, dev, attrs); | |
1058 | if (status) | |
1059 | goto gen_err; | |
1060 | ||
1061 | memset(&ureq, 0, sizeof(ureq)); | |
1062 | if (udata) { | |
1063 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | |
1064 | return ERR_PTR(-EFAULT); | |
1065 | } | |
1066 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
1067 | if (!qp) { | |
1068 | status = -ENOMEM; | |
1069 | goto gen_err; | |
1070 | } | |
1071 | qp->dev = dev; | |
1072 | ocrdma_set_qp_init_params(qp, pd, attrs); | |
1073 | ||
1074 | mutex_lock(&dev->dev_lock); | |
1075 | status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, | |
1076 | ureq.dpp_cq_id, | |
1077 | &dpp_offset, &dpp_credit_lmt); | |
1078 | if (status) | |
1079 | goto mbx_err; | |
1080 | ||
1081 | /* user space QP's wr_id table are managed in library */ | |
1082 | if (udata == NULL) { | |
1083 | qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | | |
1084 | OCRDMA_QP_FAST_REG); | |
1085 | status = ocrdma_alloc_wr_id_tbl(qp); | |
1086 | if (status) | |
1087 | goto map_err; | |
1088 | } | |
1089 | ||
1090 | status = ocrdma_add_qpn_map(dev, qp); | |
1091 | if (status) | |
1092 | goto map_err; | |
1093 | ocrdma_set_qp_db(dev, qp, pd); | |
1094 | if (udata) { | |
1095 | status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, | |
1096 | dpp_credit_lmt, | |
1097 | (attrs->srq != NULL)); | |
1098 | if (status) | |
1099 | goto cpy_err; | |
1100 | } | |
1101 | ocrdma_store_gsi_qp_cq(dev, attrs); | |
1102 | ocrdma_set_qp_use_cnt(qp, pd); | |
1103 | mutex_unlock(&dev->dev_lock); | |
1104 | return &qp->ibqp; | |
1105 | ||
1106 | cpy_err: | |
1107 | ocrdma_del_qpn_map(dev, qp); | |
1108 | map_err: | |
1109 | ocrdma_mbx_destroy_qp(dev, qp); | |
1110 | mbx_err: | |
1111 | mutex_unlock(&dev->dev_lock); | |
1112 | kfree(qp->wqe_wr_id_tbl); | |
1113 | kfree(qp->rqe_wr_id_tbl); | |
1114 | kfree(qp); | |
1115 | ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); | |
1116 | gen_err: | |
1117 | return ERR_PTR(status); | |
1118 | } | |
1119 | ||
1120 | int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1121 | int attr_mask) | |
1122 | { | |
1123 | int status = 0; | |
1124 | struct ocrdma_qp *qp; | |
1125 | struct ocrdma_dev *dev; | |
1126 | enum ib_qp_state old_qps; | |
1127 | ||
1128 | qp = get_ocrdma_qp(ibqp); | |
1129 | dev = qp->dev; | |
1130 | if (attr_mask & IB_QP_STATE) | |
1131 | status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps); | |
1132 | /* if new and previous states are same hw doesn't need to | |
1133 | * know about it. | |
1134 | */ | |
1135 | if (status < 0) | |
1136 | return status; | |
1137 | status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps); | |
1138 | return status; | |
1139 | } | |
1140 | ||
1141 | int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1142 | int attr_mask, struct ib_udata *udata) | |
1143 | { | |
1144 | unsigned long flags; | |
1145 | int status = -EINVAL; | |
1146 | struct ocrdma_qp *qp; | |
1147 | struct ocrdma_dev *dev; | |
1148 | enum ib_qp_state old_qps, new_qps; | |
1149 | ||
1150 | qp = get_ocrdma_qp(ibqp); | |
1151 | dev = qp->dev; | |
1152 | ||
1153 | /* syncronize with multiple context trying to change, retrive qps */ | |
1154 | mutex_lock(&dev->dev_lock); | |
1155 | /* syncronize with wqe, rqe posting and cqe processing contexts */ | |
1156 | spin_lock_irqsave(&qp->q_lock, flags); | |
1157 | old_qps = get_ibqp_state(qp->state); | |
1158 | if (attr_mask & IB_QP_STATE) | |
1159 | new_qps = attr->qp_state; | |
1160 | else | |
1161 | new_qps = old_qps; | |
1162 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1163 | ||
1164 | if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { | |
1165 | ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " | |
1166 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | |
1167 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | |
1168 | old_qps, new_qps); | |
1169 | goto param_err; | |
1170 | } | |
1171 | ||
1172 | status = _ocrdma_modify_qp(ibqp, attr, attr_mask); | |
1173 | if (status > 0) | |
1174 | status = 0; | |
1175 | param_err: | |
1176 | mutex_unlock(&dev->dev_lock); | |
1177 | return status; | |
1178 | } | |
1179 | ||
1180 | static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) | |
1181 | { | |
1182 | switch (mtu) { | |
1183 | case 256: | |
1184 | return IB_MTU_256; | |
1185 | case 512: | |
1186 | return IB_MTU_512; | |
1187 | case 1024: | |
1188 | return IB_MTU_1024; | |
1189 | case 2048: | |
1190 | return IB_MTU_2048; | |
1191 | case 4096: | |
1192 | return IB_MTU_4096; | |
1193 | default: | |
1194 | return IB_MTU_1024; | |
1195 | } | |
1196 | } | |
1197 | ||
1198 | static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) | |
1199 | { | |
1200 | int ib_qp_acc_flags = 0; | |
1201 | ||
1202 | if (qp_cap_flags & OCRDMA_QP_INB_WR) | |
1203 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; | |
1204 | if (qp_cap_flags & OCRDMA_QP_INB_RD) | |
1205 | ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; | |
1206 | return ib_qp_acc_flags; | |
1207 | } | |
1208 | ||
1209 | int ocrdma_query_qp(struct ib_qp *ibqp, | |
1210 | struct ib_qp_attr *qp_attr, | |
1211 | int attr_mask, struct ib_qp_init_attr *qp_init_attr) | |
1212 | { | |
1213 | int status; | |
1214 | u32 qp_state; | |
1215 | struct ocrdma_qp_params params; | |
1216 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
1217 | struct ocrdma_dev *dev = qp->dev; | |
1218 | ||
1219 | memset(¶ms, 0, sizeof(params)); | |
1220 | mutex_lock(&dev->dev_lock); | |
1221 | status = ocrdma_mbx_query_qp(dev, qp, ¶ms); | |
1222 | mutex_unlock(&dev->dev_lock); | |
1223 | if (status) | |
1224 | goto mbx_err; | |
1225 | qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); | |
1226 | qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); | |
1227 | qp_attr->path_mtu = | |
1228 | ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & | |
1229 | OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> | |
1230 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; | |
1231 | qp_attr->path_mig_state = IB_MIG_MIGRATED; | |
1232 | qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; | |
1233 | qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; | |
1234 | qp_attr->dest_qp_num = | |
1235 | params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; | |
1236 | ||
1237 | qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); | |
1238 | qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; | |
1239 | qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; | |
1240 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | |
1241 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | |
1242 | qp_attr->cap.max_inline_data = dev->attr.max_inline_data; | |
1243 | qp_init_attr->cap = qp_attr->cap; | |
1244 | memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0], | |
1245 | sizeof(params.dgid)); | |
1246 | qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & | |
1247 | OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; | |
1248 | qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; | |
1249 | qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & | |
1250 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | |
1251 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | |
1252 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | |
1253 | OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> | |
1254 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; | |
1255 | ||
1256 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | |
1257 | qp_attr->ah_attr.port_num = 1; | |
1258 | qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & | |
1259 | OCRDMA_QP_PARAMS_SL_MASK) >> | |
1260 | OCRDMA_QP_PARAMS_SL_SHIFT; | |
1261 | qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & | |
1262 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> | |
1263 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; | |
1264 | qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & | |
1265 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> | |
1266 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; | |
1267 | qp_attr->retry_cnt = | |
1268 | (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> | |
1269 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; | |
1270 | qp_attr->min_rnr_timer = 0; | |
1271 | qp_attr->pkey_index = 0; | |
1272 | qp_attr->port_num = 1; | |
1273 | qp_attr->ah_attr.src_path_bits = 0; | |
1274 | qp_attr->ah_attr.static_rate = 0; | |
1275 | qp_attr->alt_pkey_index = 0; | |
1276 | qp_attr->alt_port_num = 0; | |
1277 | qp_attr->alt_timeout = 0; | |
1278 | memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); | |
1279 | qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> | |
1280 | OCRDMA_QP_PARAMS_STATE_SHIFT; | |
1281 | qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; | |
1282 | qp_attr->max_dest_rd_atomic = | |
1283 | params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; | |
1284 | qp_attr->max_rd_atomic = | |
1285 | params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; | |
1286 | qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & | |
1287 | OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; | |
1288 | mbx_err: | |
1289 | return status; | |
1290 | } | |
1291 | ||
1292 | static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) | |
1293 | { | |
1294 | int i = idx / 32; | |
1295 | unsigned int mask = (1 << (idx % 32)); | |
1296 | ||
1297 | if (srq->idx_bit_fields[i] & mask) | |
1298 | srq->idx_bit_fields[i] &= ~mask; | |
1299 | else | |
1300 | srq->idx_bit_fields[i] |= mask; | |
1301 | } | |
1302 | ||
1303 | static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) | |
1304 | { | |
1305 | int free_cnt; | |
1306 | if (q->head >= q->tail) | |
1307 | free_cnt = (q->max_cnt - q->head) + q->tail; | |
1308 | else | |
1309 | free_cnt = q->tail - q->head; | |
fe2caefc PP |
1310 | return free_cnt; |
1311 | } | |
1312 | ||
1313 | static int is_hw_sq_empty(struct ocrdma_qp *qp) | |
1314 | { | |
1315 | return (qp->sq.tail == qp->sq.head && | |
1316 | ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0); | |
1317 | } | |
1318 | ||
1319 | static int is_hw_rq_empty(struct ocrdma_qp *qp) | |
1320 | { | |
1321 | return (qp->rq.tail == qp->rq.head) ? 1 : 0; | |
1322 | } | |
1323 | ||
1324 | static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) | |
1325 | { | |
1326 | return q->va + (q->head * q->entry_size); | |
1327 | } | |
1328 | ||
1329 | static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, | |
1330 | u32 idx) | |
1331 | { | |
1332 | return q->va + (idx * q->entry_size); | |
1333 | } | |
1334 | ||
1335 | static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) | |
1336 | { | |
1337 | q->head = (q->head + 1) & q->max_wqe_idx; | |
1338 | } | |
1339 | ||
1340 | static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) | |
1341 | { | |
1342 | q->tail = (q->tail + 1) & q->max_wqe_idx; | |
1343 | } | |
1344 | ||
1345 | /* discard the cqe for a given QP */ | |
1346 | static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) | |
1347 | { | |
1348 | unsigned long cq_flags; | |
1349 | unsigned long flags; | |
1350 | int discard_cnt = 0; | |
1351 | u32 cur_getp, stop_getp; | |
1352 | struct ocrdma_cqe *cqe; | |
1353 | u32 qpn = 0; | |
1354 | ||
1355 | spin_lock_irqsave(&cq->cq_lock, cq_flags); | |
1356 | ||
1357 | /* traverse through the CQEs in the hw CQ, | |
1358 | * find the matching CQE for a given qp, | |
1359 | * mark the matching one discarded by clearing qpn. | |
1360 | * ring the doorbell in the poll_cq() as | |
1361 | * we don't complete out of order cqe. | |
1362 | */ | |
1363 | ||
1364 | cur_getp = cq->getp; | |
1365 | /* find upto when do we reap the cq. */ | |
1366 | stop_getp = cur_getp; | |
1367 | do { | |
1368 | if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) | |
1369 | break; | |
1370 | ||
1371 | cqe = cq->va + cur_getp; | |
1372 | /* if (a) done reaping whole hw cq, or | |
1373 | * (b) qp_xq becomes empty. | |
1374 | * then exit | |
1375 | */ | |
1376 | qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; | |
1377 | /* if previously discarded cqe found, skip that too. */ | |
1378 | /* check for matching qp */ | |
1379 | if (qpn == 0 || qpn != qp->id) | |
1380 | goto skip_cqe; | |
1381 | ||
1382 | /* mark cqe discarded so that it is not picked up later | |
1383 | * in the poll_cq(). | |
1384 | */ | |
1385 | discard_cnt += 1; | |
1386 | cqe->cmn.qpn = 0; | |
1387 | if (is_cqe_for_sq(cqe)) | |
1388 | ocrdma_hwq_inc_tail(&qp->sq); | |
1389 | else { | |
1390 | if (qp->srq) { | |
1391 | spin_lock_irqsave(&qp->srq->q_lock, flags); | |
1392 | ocrdma_hwq_inc_tail(&qp->srq->rq); | |
1393 | ocrdma_srq_toggle_bit(qp->srq, cur_getp); | |
1394 | spin_unlock_irqrestore(&qp->srq->q_lock, flags); | |
1395 | ||
1396 | } else | |
1397 | ocrdma_hwq_inc_tail(&qp->rq); | |
1398 | } | |
1399 | skip_cqe: | |
1400 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | |
1401 | } while (cur_getp != stop_getp); | |
1402 | spin_unlock_irqrestore(&cq->cq_lock, cq_flags); | |
1403 | } | |
1404 | ||
1405 | static void ocrdma_del_flush_qp(struct ocrdma_qp *qp) | |
1406 | { | |
1407 | int found = false; | |
1408 | unsigned long flags; | |
1409 | struct ocrdma_dev *dev = qp->dev; | |
1410 | /* sync with any active CQ poll */ | |
1411 | ||
1412 | spin_lock_irqsave(&dev->flush_q_lock, flags); | |
1413 | found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); | |
1414 | if (found) | |
1415 | list_del(&qp->sq_entry); | |
1416 | if (!qp->srq) { | |
1417 | found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); | |
1418 | if (found) | |
1419 | list_del(&qp->rq_entry); | |
1420 | } | |
1421 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | |
1422 | } | |
1423 | ||
1424 | int ocrdma_destroy_qp(struct ib_qp *ibqp) | |
1425 | { | |
1426 | int status; | |
1427 | struct ocrdma_pd *pd; | |
1428 | struct ocrdma_qp *qp; | |
1429 | struct ocrdma_dev *dev; | |
1430 | struct ib_qp_attr attrs; | |
1431 | int attr_mask = IB_QP_STATE; | |
d19081e0 | 1432 | unsigned long flags; |
fe2caefc PP |
1433 | |
1434 | qp = get_ocrdma_qp(ibqp); | |
1435 | dev = qp->dev; | |
1436 | ||
1437 | attrs.qp_state = IB_QPS_ERR; | |
1438 | pd = qp->pd; | |
1439 | ||
1440 | /* change the QP state to ERROR */ | |
1441 | _ocrdma_modify_qp(ibqp, &attrs, attr_mask); | |
1442 | ||
1443 | /* ensure that CQEs for newly created QP (whose id may be same with | |
1444 | * one which just getting destroyed are same), dont get | |
1445 | * discarded until the old CQEs are discarded. | |
1446 | */ | |
1447 | mutex_lock(&dev->dev_lock); | |
1448 | status = ocrdma_mbx_destroy_qp(dev, qp); | |
1449 | ||
1450 | /* | |
1451 | * acquire CQ lock while destroy is in progress, in order to | |
1452 | * protect against proessing in-flight CQEs for this QP. | |
1453 | */ | |
d19081e0 | 1454 | spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); |
fe2caefc | 1455 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) |
d19081e0 | 1456 | spin_lock(&qp->rq_cq->cq_lock); |
fe2caefc PP |
1457 | |
1458 | ocrdma_del_qpn_map(dev, qp); | |
1459 | ||
1460 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) | |
d19081e0 DC |
1461 | spin_unlock(&qp->rq_cq->cq_lock); |
1462 | spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); | |
fe2caefc PP |
1463 | |
1464 | if (!pd->uctx) { | |
1465 | ocrdma_discard_cqes(qp, qp->sq_cq); | |
1466 | ocrdma_discard_cqes(qp, qp->rq_cq); | |
1467 | } | |
1468 | mutex_unlock(&dev->dev_lock); | |
1469 | ||
1470 | if (pd->uctx) { | |
1471 | ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len); | |
1472 | if (!qp->srq) | |
1473 | ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len); | |
1474 | } | |
1475 | ||
1476 | ocrdma_del_flush_qp(qp); | |
1477 | ||
1478 | atomic_dec(&qp->pd->use_cnt); | |
1479 | atomic_dec(&qp->sq_cq->use_cnt); | |
1480 | atomic_dec(&qp->rq_cq->use_cnt); | |
1481 | if (qp->srq) | |
1482 | atomic_dec(&qp->srq->use_cnt); | |
1483 | kfree(qp->wqe_wr_id_tbl); | |
1484 | kfree(qp->rqe_wr_id_tbl); | |
1485 | kfree(qp); | |
1486 | return status; | |
1487 | } | |
1488 | ||
1489 | static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) | |
1490 | { | |
1491 | int status; | |
1492 | struct ocrdma_create_srq_uresp uresp; | |
1493 | ||
1494 | uresp.rq_dbid = srq->rq.dbid; | |
1495 | uresp.num_rq_pages = 1; | |
1496 | uresp.rq_page_addr[0] = srq->rq.pa; | |
1497 | uresp.rq_page_size = srq->rq.len; | |
1498 | uresp.db_page_addr = srq->dev->nic_info.unmapped_db + | |
1499 | (srq->pd->id * srq->dev->nic_info.db_page_size); | |
1500 | uresp.db_page_size = srq->dev->nic_info.db_page_size; | |
1501 | uresp.num_rqe_allocated = srq->rq.max_cnt; | |
fe2caefc PP |
1502 | if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { |
1503 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; | |
1504 | uresp.db_shift = 24; | |
1505 | } else { | |
1506 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | |
1507 | uresp.db_shift = 16; | |
1508 | } | |
1509 | ||
1510 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
1511 | if (status) | |
1512 | return status; | |
1513 | status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], | |
1514 | uresp.rq_page_size); | |
1515 | if (status) | |
1516 | return status; | |
1517 | return status; | |
1518 | } | |
1519 | ||
1520 | struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, | |
1521 | struct ib_srq_init_attr *init_attr, | |
1522 | struct ib_udata *udata) | |
1523 | { | |
1524 | int status = -ENOMEM; | |
1525 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
1526 | struct ocrdma_dev *dev = pd->dev; | |
1527 | struct ocrdma_srq *srq; | |
1528 | ||
1529 | if (init_attr->attr.max_sge > dev->attr.max_recv_sge) | |
1530 | return ERR_PTR(-EINVAL); | |
1531 | if (init_attr->attr.max_wr > dev->attr.max_rqe) | |
1532 | return ERR_PTR(-EINVAL); | |
1533 | ||
1534 | srq = kzalloc(sizeof(*srq), GFP_KERNEL); | |
1535 | if (!srq) | |
1536 | return ERR_PTR(status); | |
1537 | ||
1538 | spin_lock_init(&srq->q_lock); | |
1539 | srq->dev = dev; | |
1540 | srq->pd = pd; | |
1541 | srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); | |
1542 | status = ocrdma_mbx_create_srq(srq, init_attr, pd); | |
1543 | if (status) | |
1544 | goto err; | |
1545 | ||
1546 | if (udata == NULL) { | |
1547 | srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, | |
1548 | GFP_KERNEL); | |
1549 | if (srq->rqe_wr_id_tbl == NULL) | |
1550 | goto arm_err; | |
1551 | ||
1552 | srq->bit_fields_len = (srq->rq.max_cnt / 32) + | |
1553 | (srq->rq.max_cnt % 32 ? 1 : 0); | |
1554 | srq->idx_bit_fields = | |
1555 | kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); | |
1556 | if (srq->idx_bit_fields == NULL) | |
1557 | goto arm_err; | |
1558 | memset(srq->idx_bit_fields, 0xff, | |
1559 | srq->bit_fields_len * sizeof(u32)); | |
1560 | } | |
1561 | ||
1562 | if (init_attr->attr.srq_limit) { | |
1563 | status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); | |
1564 | if (status) | |
1565 | goto arm_err; | |
1566 | } | |
1567 | ||
1568 | atomic_set(&srq->use_cnt, 0); | |
1569 | if (udata) { | |
1570 | status = ocrdma_copy_srq_uresp(srq, udata); | |
1571 | if (status) | |
1572 | goto arm_err; | |
1573 | } | |
1574 | ||
1575 | atomic_inc(&pd->use_cnt); | |
1576 | return &srq->ibsrq; | |
1577 | ||
1578 | arm_err: | |
1579 | ocrdma_mbx_destroy_srq(dev, srq); | |
1580 | err: | |
1581 | kfree(srq->rqe_wr_id_tbl); | |
1582 | kfree(srq->idx_bit_fields); | |
1583 | kfree(srq); | |
1584 | return ERR_PTR(status); | |
1585 | } | |
1586 | ||
1587 | int ocrdma_modify_srq(struct ib_srq *ibsrq, | |
1588 | struct ib_srq_attr *srq_attr, | |
1589 | enum ib_srq_attr_mask srq_attr_mask, | |
1590 | struct ib_udata *udata) | |
1591 | { | |
1592 | int status = 0; | |
1593 | struct ocrdma_srq *srq; | |
fe2caefc PP |
1594 | |
1595 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1596 | if (srq_attr_mask & IB_SRQ_MAX_WR) |
1597 | status = -EINVAL; | |
1598 | else | |
1599 | status = ocrdma_mbx_modify_srq(srq, srq_attr); | |
1600 | return status; | |
1601 | } | |
1602 | ||
1603 | int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |
1604 | { | |
1605 | int status; | |
1606 | struct ocrdma_srq *srq; | |
fe2caefc PP |
1607 | |
1608 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1609 | status = ocrdma_mbx_query_srq(srq, srq_attr); |
1610 | return status; | |
1611 | } | |
1612 | ||
1613 | int ocrdma_destroy_srq(struct ib_srq *ibsrq) | |
1614 | { | |
1615 | int status; | |
1616 | struct ocrdma_srq *srq; | |
1617 | struct ocrdma_dev *dev; | |
1618 | ||
1619 | srq = get_ocrdma_srq(ibsrq); | |
1620 | dev = srq->dev; | |
1621 | if (atomic_read(&srq->use_cnt)) { | |
1622 | ocrdma_err("%s(%d) err, srq=0x%x in use\n", | |
1623 | __func__, dev->id, srq->id); | |
1624 | return -EAGAIN; | |
1625 | } | |
1626 | ||
1627 | status = ocrdma_mbx_destroy_srq(dev, srq); | |
1628 | ||
1629 | if (srq->pd->uctx) | |
1630 | ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); | |
1631 | ||
1632 | atomic_dec(&srq->pd->use_cnt); | |
1633 | kfree(srq->idx_bit_fields); | |
1634 | kfree(srq->rqe_wr_id_tbl); | |
1635 | kfree(srq); | |
1636 | return status; | |
1637 | } | |
1638 | ||
1639 | /* unprivileged verbs and their support functions. */ | |
1640 | static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, | |
1641 | struct ocrdma_hdr_wqe *hdr, | |
1642 | struct ib_send_wr *wr) | |
1643 | { | |
1644 | struct ocrdma_ewqe_ud_hdr *ud_hdr = | |
1645 | (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); | |
1646 | struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); | |
1647 | ||
1648 | ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; | |
1649 | if (qp->qp_type == IB_QPT_GSI) | |
1650 | ud_hdr->qkey = qp->qkey; | |
1651 | else | |
1652 | ud_hdr->qkey = wr->wr.ud.remote_qkey; | |
1653 | ud_hdr->rsvd_ahid = ah->id; | |
1654 | } | |
1655 | ||
1656 | static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, | |
1657 | struct ocrdma_sge *sge, int num_sge, | |
1658 | struct ib_sge *sg_list) | |
1659 | { | |
1660 | int i; | |
1661 | ||
1662 | for (i = 0; i < num_sge; i++) { | |
1663 | sge[i].lrkey = sg_list[i].lkey; | |
1664 | sge[i].addr_lo = sg_list[i].addr; | |
1665 | sge[i].addr_hi = upper_32_bits(sg_list[i].addr); | |
1666 | sge[i].len = sg_list[i].length; | |
1667 | hdr->total_len += sg_list[i].length; | |
1668 | } | |
1669 | if (num_sge == 0) | |
1670 | memset(sge, 0, sizeof(*sge)); | |
1671 | } | |
1672 | ||
1673 | static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, | |
1674 | struct ocrdma_hdr_wqe *hdr, | |
1675 | struct ocrdma_sge *sge, | |
1676 | struct ib_send_wr *wr, u32 wqe_size) | |
1677 | { | |
1678 | if (wr->send_flags & IB_SEND_INLINE) { | |
1679 | if (wr->sg_list[0].length > qp->max_inline_data) { | |
1680 | ocrdma_err("%s() supported_len=0x%x," | |
1681 | " unspported len req=0x%x\n", __func__, | |
1682 | qp->max_inline_data, wr->sg_list[0].length); | |
1683 | return -EINVAL; | |
1684 | } | |
1685 | memcpy(sge, | |
1686 | (void *)(unsigned long)wr->sg_list[0].addr, | |
1687 | wr->sg_list[0].length); | |
1688 | hdr->total_len = wr->sg_list[0].length; | |
1689 | wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); | |
1690 | hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); | |
1691 | } else { | |
1692 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | |
1693 | if (wr->num_sge) | |
1694 | wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); | |
1695 | else | |
1696 | wqe_size += sizeof(struct ocrdma_sge); | |
1697 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1698 | } | |
1699 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
1700 | return 0; | |
1701 | } | |
1702 | ||
1703 | static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1704 | struct ib_send_wr *wr) | |
1705 | { | |
1706 | int status; | |
1707 | struct ocrdma_sge *sge; | |
1708 | u32 wqe_size = sizeof(*hdr); | |
1709 | ||
1710 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | |
1711 | ocrdma_build_ud_hdr(qp, hdr, wr); | |
1712 | sge = (struct ocrdma_sge *)(hdr + 2); | |
1713 | wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); | |
1714 | } else | |
1715 | sge = (struct ocrdma_sge *)(hdr + 1); | |
1716 | ||
1717 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | |
1718 | return status; | |
1719 | } | |
1720 | ||
1721 | static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1722 | struct ib_send_wr *wr) | |
1723 | { | |
1724 | int status; | |
1725 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | |
1726 | struct ocrdma_sge *sge = ext_rw + 1; | |
1727 | u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); | |
1728 | ||
1729 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | |
1730 | if (status) | |
1731 | return status; | |
1732 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | |
1733 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | |
1734 | ext_rw->lrkey = wr->wr.rdma.rkey; | |
1735 | ext_rw->len = hdr->total_len; | |
1736 | return 0; | |
1737 | } | |
1738 | ||
1739 | static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1740 | struct ib_send_wr *wr) | |
1741 | { | |
1742 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | |
1743 | struct ocrdma_sge *sge = ext_rw + 1; | |
1744 | u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + | |
1745 | sizeof(struct ocrdma_hdr_wqe); | |
1746 | ||
1747 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | |
1748 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
1749 | hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); | |
1750 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1751 | ||
1752 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | |
1753 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | |
1754 | ext_rw->lrkey = wr->wr.rdma.rkey; | |
1755 | ext_rw->len = hdr->total_len; | |
1756 | } | |
1757 | ||
1758 | static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) | |
1759 | { | |
1760 | u32 val = qp->sq.dbid | (1 << 16); | |
1761 | ||
1762 | iowrite32(val, qp->sq_db); | |
1763 | } | |
1764 | ||
1765 | int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1766 | struct ib_send_wr **bad_wr) | |
1767 | { | |
1768 | int status = 0; | |
1769 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
1770 | struct ocrdma_hdr_wqe *hdr; | |
1771 | unsigned long flags; | |
1772 | ||
1773 | spin_lock_irqsave(&qp->q_lock, flags); | |
1774 | if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { | |
1775 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1776 | return -EINVAL; | |
1777 | } | |
1778 | ||
1779 | while (wr) { | |
1780 | if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || | |
1781 | wr->num_sge > qp->sq.max_sges) { | |
1782 | status = -ENOMEM; | |
1783 | break; | |
1784 | } | |
1785 | hdr = ocrdma_hwq_head(&qp->sq); | |
1786 | hdr->cw = 0; | |
1787 | if (wr->send_flags & IB_SEND_SIGNALED) | |
1788 | hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); | |
1789 | if (wr->send_flags & IB_SEND_FENCE) | |
1790 | hdr->cw |= | |
1791 | (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); | |
1792 | if (wr->send_flags & IB_SEND_SOLICITED) | |
1793 | hdr->cw |= | |
1794 | (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); | |
1795 | hdr->total_len = 0; | |
1796 | switch (wr->opcode) { | |
1797 | case IB_WR_SEND_WITH_IMM: | |
1798 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | |
1799 | hdr->immdt = ntohl(wr->ex.imm_data); | |
1800 | case IB_WR_SEND: | |
1801 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | |
1802 | ocrdma_build_send(qp, hdr, wr); | |
1803 | break; | |
1804 | case IB_WR_SEND_WITH_INV: | |
1805 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | |
1806 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | |
1807 | hdr->lkey = wr->ex.invalidate_rkey; | |
1808 | status = ocrdma_build_send(qp, hdr, wr); | |
1809 | break; | |
1810 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1811 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | |
1812 | hdr->immdt = ntohl(wr->ex.imm_data); | |
1813 | case IB_WR_RDMA_WRITE: | |
1814 | hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); | |
1815 | status = ocrdma_build_write(qp, hdr, wr); | |
1816 | break; | |
1817 | case IB_WR_RDMA_READ_WITH_INV: | |
1818 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | |
1819 | case IB_WR_RDMA_READ: | |
1820 | ocrdma_build_read(qp, hdr, wr); | |
1821 | break; | |
1822 | case IB_WR_LOCAL_INV: | |
1823 | hdr->cw |= | |
1824 | (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); | |
1825 | hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) / | |
1826 | OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; | |
1827 | hdr->lkey = wr->ex.invalidate_rkey; | |
1828 | break; | |
1829 | default: | |
1830 | status = -EINVAL; | |
1831 | break; | |
1832 | } | |
1833 | if (status) { | |
1834 | *bad_wr = wr; | |
1835 | break; | |
1836 | } | |
1837 | if (wr->send_flags & IB_SEND_SIGNALED) | |
1838 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; | |
1839 | else | |
1840 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; | |
1841 | qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; | |
1842 | ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & | |
1843 | OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); | |
1844 | /* make sure wqe is written before adapter can access it */ | |
1845 | wmb(); | |
1846 | /* inform hw to start processing it */ | |
1847 | ocrdma_ring_sq_db(qp); | |
1848 | ||
1849 | /* update pointer, counter for next wr */ | |
1850 | ocrdma_hwq_inc_head(&qp->sq); | |
1851 | wr = wr->next; | |
1852 | } | |
1853 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1854 | return status; | |
1855 | } | |
1856 | ||
1857 | static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) | |
1858 | { | |
1859 | u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); | |
1860 | ||
1861 | iowrite32(val, qp->rq_db); | |
1862 | } | |
1863 | ||
1864 | static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, | |
1865 | u16 tag) | |
1866 | { | |
1867 | u32 wqe_size = 0; | |
1868 | struct ocrdma_sge *sge; | |
1869 | if (wr->num_sge) | |
1870 | wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); | |
1871 | else | |
1872 | wqe_size = sizeof(*sge) + sizeof(*rqe); | |
1873 | ||
1874 | rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << | |
1875 | OCRDMA_WQE_SIZE_SHIFT); | |
1876 | rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); | |
1877 | rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1878 | rqe->total_len = 0; | |
1879 | rqe->rsvd_tag = tag; | |
1880 | sge = (struct ocrdma_sge *)(rqe + 1); | |
1881 | ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); | |
1882 | ocrdma_cpu_to_le32(rqe, wqe_size); | |
1883 | } | |
1884 | ||
1885 | int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1886 | struct ib_recv_wr **bad_wr) | |
1887 | { | |
1888 | int status = 0; | |
1889 | unsigned long flags; | |
1890 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
1891 | struct ocrdma_hdr_wqe *rqe; | |
1892 | ||
1893 | spin_lock_irqsave(&qp->q_lock, flags); | |
1894 | if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { | |
1895 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1896 | *bad_wr = wr; | |
1897 | return -EINVAL; | |
1898 | } | |
1899 | while (wr) { | |
1900 | if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || | |
1901 | wr->num_sge > qp->rq.max_sges) { | |
1902 | *bad_wr = wr; | |
1903 | status = -ENOMEM; | |
1904 | break; | |
1905 | } | |
1906 | rqe = ocrdma_hwq_head(&qp->rq); | |
1907 | ocrdma_build_rqe(rqe, wr, 0); | |
1908 | ||
1909 | qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; | |
1910 | /* make sure rqe is written before adapter can access it */ | |
1911 | wmb(); | |
1912 | ||
1913 | /* inform hw to start processing it */ | |
1914 | ocrdma_ring_rq_db(qp); | |
1915 | ||
1916 | /* update pointer, counter for next wr */ | |
1917 | ocrdma_hwq_inc_head(&qp->rq); | |
1918 | wr = wr->next; | |
1919 | } | |
1920 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1921 | return status; | |
1922 | } | |
1923 | ||
1924 | /* cqe for srq's rqe can potentially arrive out of order. | |
1925 | * index gives the entry in the shadow table where to store | |
1926 | * the wr_id. tag/index is returned in cqe to reference back | |
1927 | * for a given rqe. | |
1928 | */ | |
1929 | static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) | |
1930 | { | |
1931 | int row = 0; | |
1932 | int indx = 0; | |
1933 | ||
1934 | for (row = 0; row < srq->bit_fields_len; row++) { | |
1935 | if (srq->idx_bit_fields[row]) { | |
1936 | indx = ffs(srq->idx_bit_fields[row]); | |
1937 | indx = (row * 32) + (indx - 1); | |
1938 | if (indx >= srq->rq.max_cnt) | |
1939 | BUG(); | |
1940 | ocrdma_srq_toggle_bit(srq, indx); | |
1941 | break; | |
1942 | } | |
1943 | } | |
1944 | ||
1945 | if (row == srq->bit_fields_len) | |
1946 | BUG(); | |
1947 | return indx; | |
1948 | } | |
1949 | ||
1950 | static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) | |
1951 | { | |
1952 | u32 val = srq->rq.dbid | (1 << 16); | |
1953 | ||
1954 | iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); | |
1955 | } | |
1956 | ||
1957 | int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
1958 | struct ib_recv_wr **bad_wr) | |
1959 | { | |
1960 | int status = 0; | |
1961 | unsigned long flags; | |
1962 | struct ocrdma_srq *srq; | |
1963 | struct ocrdma_hdr_wqe *rqe; | |
1964 | u16 tag; | |
1965 | ||
1966 | srq = get_ocrdma_srq(ibsrq); | |
1967 | ||
1968 | spin_lock_irqsave(&srq->q_lock, flags); | |
1969 | while (wr) { | |
1970 | if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || | |
1971 | wr->num_sge > srq->rq.max_sges) { | |
1972 | status = -ENOMEM; | |
1973 | *bad_wr = wr; | |
1974 | break; | |
1975 | } | |
1976 | tag = ocrdma_srq_get_idx(srq); | |
1977 | rqe = ocrdma_hwq_head(&srq->rq); | |
1978 | ocrdma_build_rqe(rqe, wr, tag); | |
1979 | ||
1980 | srq->rqe_wr_id_tbl[tag] = wr->wr_id; | |
1981 | /* make sure rqe is written before adapter can perform DMA */ | |
1982 | wmb(); | |
1983 | /* inform hw to start processing it */ | |
1984 | ocrdma_ring_srq_db(srq); | |
1985 | /* update pointer, counter for next wr */ | |
1986 | ocrdma_hwq_inc_head(&srq->rq); | |
1987 | wr = wr->next; | |
1988 | } | |
1989 | spin_unlock_irqrestore(&srq->q_lock, flags); | |
1990 | return status; | |
1991 | } | |
1992 | ||
1993 | static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) | |
1994 | { | |
1995 | enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR; | |
1996 | ||
1997 | switch (status) { | |
1998 | case OCRDMA_CQE_GENERAL_ERR: | |
1999 | ibwc_status = IB_WC_GENERAL_ERR; | |
2000 | break; | |
2001 | case OCRDMA_CQE_LOC_LEN_ERR: | |
2002 | ibwc_status = IB_WC_LOC_LEN_ERR; | |
2003 | break; | |
2004 | case OCRDMA_CQE_LOC_QP_OP_ERR: | |
2005 | ibwc_status = IB_WC_LOC_QP_OP_ERR; | |
2006 | break; | |
2007 | case OCRDMA_CQE_LOC_EEC_OP_ERR: | |
2008 | ibwc_status = IB_WC_LOC_EEC_OP_ERR; | |
2009 | break; | |
2010 | case OCRDMA_CQE_LOC_PROT_ERR: | |
2011 | ibwc_status = IB_WC_LOC_PROT_ERR; | |
2012 | break; | |
2013 | case OCRDMA_CQE_WR_FLUSH_ERR: | |
2014 | ibwc_status = IB_WC_WR_FLUSH_ERR; | |
2015 | break; | |
2016 | case OCRDMA_CQE_MW_BIND_ERR: | |
2017 | ibwc_status = IB_WC_MW_BIND_ERR; | |
2018 | break; | |
2019 | case OCRDMA_CQE_BAD_RESP_ERR: | |
2020 | ibwc_status = IB_WC_BAD_RESP_ERR; | |
2021 | break; | |
2022 | case OCRDMA_CQE_LOC_ACCESS_ERR: | |
2023 | ibwc_status = IB_WC_LOC_ACCESS_ERR; | |
2024 | break; | |
2025 | case OCRDMA_CQE_REM_INV_REQ_ERR: | |
2026 | ibwc_status = IB_WC_REM_INV_REQ_ERR; | |
2027 | break; | |
2028 | case OCRDMA_CQE_REM_ACCESS_ERR: | |
2029 | ibwc_status = IB_WC_REM_ACCESS_ERR; | |
2030 | break; | |
2031 | case OCRDMA_CQE_REM_OP_ERR: | |
2032 | ibwc_status = IB_WC_REM_OP_ERR; | |
2033 | break; | |
2034 | case OCRDMA_CQE_RETRY_EXC_ERR: | |
2035 | ibwc_status = IB_WC_RETRY_EXC_ERR; | |
2036 | break; | |
2037 | case OCRDMA_CQE_RNR_RETRY_EXC_ERR: | |
2038 | ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; | |
2039 | break; | |
2040 | case OCRDMA_CQE_LOC_RDD_VIOL_ERR: | |
2041 | ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; | |
2042 | break; | |
2043 | case OCRDMA_CQE_REM_INV_RD_REQ_ERR: | |
2044 | ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; | |
2045 | break; | |
2046 | case OCRDMA_CQE_REM_ABORT_ERR: | |
2047 | ibwc_status = IB_WC_REM_ABORT_ERR; | |
2048 | break; | |
2049 | case OCRDMA_CQE_INV_EECN_ERR: | |
2050 | ibwc_status = IB_WC_INV_EECN_ERR; | |
2051 | break; | |
2052 | case OCRDMA_CQE_INV_EEC_STATE_ERR: | |
2053 | ibwc_status = IB_WC_INV_EEC_STATE_ERR; | |
2054 | break; | |
2055 | case OCRDMA_CQE_FATAL_ERR: | |
2056 | ibwc_status = IB_WC_FATAL_ERR; | |
2057 | break; | |
2058 | case OCRDMA_CQE_RESP_TIMEOUT_ERR: | |
2059 | ibwc_status = IB_WC_RESP_TIMEOUT_ERR; | |
2060 | break; | |
2061 | default: | |
2062 | ibwc_status = IB_WC_GENERAL_ERR; | |
2063 | break; | |
2064 | }; | |
2065 | return ibwc_status; | |
2066 | } | |
2067 | ||
2068 | static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | |
2069 | u32 wqe_idx) | |
2070 | { | |
2071 | struct ocrdma_hdr_wqe *hdr; | |
2072 | struct ocrdma_sge *rw; | |
2073 | int opcode; | |
2074 | ||
2075 | hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); | |
2076 | ||
2077 | ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; | |
2078 | /* Undo the hdr->cw swap */ | |
2079 | opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; | |
2080 | switch (opcode) { | |
2081 | case OCRDMA_WRITE: | |
2082 | ibwc->opcode = IB_WC_RDMA_WRITE; | |
2083 | break; | |
2084 | case OCRDMA_READ: | |
2085 | rw = (struct ocrdma_sge *)(hdr + 1); | |
2086 | ibwc->opcode = IB_WC_RDMA_READ; | |
2087 | ibwc->byte_len = rw->len; | |
2088 | break; | |
2089 | case OCRDMA_SEND: | |
2090 | ibwc->opcode = IB_WC_SEND; | |
2091 | break; | |
2092 | case OCRDMA_LKEY_INV: | |
2093 | ibwc->opcode = IB_WC_LOCAL_INV; | |
2094 | break; | |
2095 | default: | |
2096 | ibwc->status = IB_WC_GENERAL_ERR; | |
2097 | ocrdma_err("%s() invalid opcode received = 0x%x\n", | |
2098 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | |
2099 | break; | |
2100 | }; | |
2101 | } | |
2102 | ||
2103 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, | |
2104 | struct ocrdma_cqe *cqe) | |
2105 | { | |
2106 | if (is_cqe_for_sq(cqe)) { | |
2107 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2108 | cqe->flags_status_srcqpn) & | |
2109 | ~OCRDMA_CQE_STATUS_MASK); | |
2110 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2111 | cqe->flags_status_srcqpn) | | |
2112 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2113 | OCRDMA_CQE_STATUS_SHIFT)); | |
2114 | } else { | |
2115 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | |
2116 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2117 | cqe->flags_status_srcqpn) & | |
2118 | ~OCRDMA_CQE_UD_STATUS_MASK); | |
2119 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2120 | cqe->flags_status_srcqpn) | | |
2121 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2122 | OCRDMA_CQE_UD_STATUS_SHIFT)); | |
2123 | } else { | |
2124 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2125 | cqe->flags_status_srcqpn) & | |
2126 | ~OCRDMA_CQE_STATUS_MASK); | |
2127 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2128 | cqe->flags_status_srcqpn) | | |
2129 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2130 | OCRDMA_CQE_STATUS_SHIFT)); | |
2131 | } | |
2132 | } | |
2133 | } | |
2134 | ||
2135 | static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2136 | struct ocrdma_qp *qp, int status) | |
2137 | { | |
2138 | bool expand = false; | |
2139 | ||
2140 | ibwc->byte_len = 0; | |
2141 | ibwc->qp = &qp->ibqp; | |
2142 | ibwc->status = ocrdma_to_ibwc_err(status); | |
2143 | ||
2144 | ocrdma_flush_qp(qp); | |
2145 | ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL); | |
2146 | ||
2147 | /* if wqe/rqe pending for which cqe needs to be returned, | |
2148 | * trigger inflating it. | |
2149 | */ | |
2150 | if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { | |
2151 | expand = true; | |
2152 | ocrdma_set_cqe_status_flushed(qp, cqe); | |
2153 | } | |
2154 | return expand; | |
2155 | } | |
2156 | ||
2157 | static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2158 | struct ocrdma_qp *qp, int status) | |
2159 | { | |
2160 | ibwc->opcode = IB_WC_RECV; | |
2161 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2162 | ocrdma_hwq_inc_tail(&qp->rq); | |
2163 | ||
2164 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | |
2165 | } | |
2166 | ||
2167 | static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2168 | struct ocrdma_qp *qp, int status) | |
2169 | { | |
2170 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | |
2171 | ocrdma_hwq_inc_tail(&qp->sq); | |
2172 | ||
2173 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | |
2174 | } | |
2175 | ||
2176 | ||
2177 | static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, | |
2178 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc, | |
2179 | bool *polled, bool *stop) | |
2180 | { | |
2181 | bool expand; | |
2182 | int status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2183 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2184 | ||
2185 | /* when hw sq is empty, but rq is not empty, so we continue | |
2186 | * to keep the cqe in order to get the cq event again. | |
2187 | */ | |
2188 | if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { | |
2189 | /* when cq for rq and sq is same, it is safe to return | |
2190 | * flush cqe for RQEs. | |
2191 | */ | |
2192 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | |
2193 | *polled = true; | |
2194 | status = OCRDMA_CQE_WR_FLUSH_ERR; | |
2195 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | |
2196 | } else { | |
2197 | /* stop processing further cqe as this cqe is used for | |
2198 | * triggering cq event on buddy cq of RQ. | |
2199 | * When QP is destroyed, this cqe will be removed | |
2200 | * from the cq's hardware q. | |
2201 | */ | |
2202 | *polled = false; | |
2203 | *stop = true; | |
2204 | expand = false; | |
2205 | } | |
2206 | } else { | |
2207 | *polled = true; | |
2208 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | |
2209 | } | |
2210 | return expand; | |
2211 | } | |
2212 | ||
2213 | static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, | |
2214 | struct ocrdma_cqe *cqe, | |
2215 | struct ib_wc *ibwc, bool *polled) | |
2216 | { | |
2217 | bool expand = false; | |
2218 | int tail = qp->sq.tail; | |
2219 | u32 wqe_idx; | |
2220 | ||
2221 | if (!qp->wqe_wr_id_tbl[tail].signaled) { | |
fe2caefc PP |
2222 | *polled = false; /* WC cannot be consumed yet */ |
2223 | } else { | |
2224 | ibwc->status = IB_WC_SUCCESS; | |
2225 | ibwc->wc_flags = 0; | |
2226 | ibwc->qp = &qp->ibqp; | |
2227 | ocrdma_update_wc(qp, ibwc, tail); | |
2228 | *polled = true; | |
fe2caefc | 2229 | } |
ae3bca90 PP |
2230 | wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; |
2231 | if (tail != wqe_idx) | |
2232 | expand = true; /* Coalesced CQE can't be consumed yet */ | |
2233 | ||
fe2caefc PP |
2234 | ocrdma_hwq_inc_tail(&qp->sq); |
2235 | return expand; | |
2236 | } | |
2237 | ||
2238 | static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2239 | struct ib_wc *ibwc, bool *polled, bool *stop) | |
2240 | { | |
2241 | int status; | |
2242 | bool expand; | |
2243 | ||
2244 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2245 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2246 | ||
2247 | if (status == OCRDMA_CQE_SUCCESS) | |
2248 | expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); | |
2249 | else | |
2250 | expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); | |
2251 | return expand; | |
2252 | } | |
2253 | ||
2254 | static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) | |
2255 | { | |
2256 | int status; | |
2257 | ||
2258 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2259 | OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; | |
2260 | ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & | |
2261 | OCRDMA_CQE_SRCQP_MASK; | |
2262 | ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & | |
2263 | OCRDMA_CQE_PKEY_MASK; | |
2264 | ibwc->wc_flags = IB_WC_GRH; | |
2265 | ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> | |
2266 | OCRDMA_CQE_UD_XFER_LEN_SHIFT); | |
2267 | return status; | |
2268 | } | |
2269 | ||
2270 | static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, | |
2271 | struct ocrdma_cqe *cqe, | |
2272 | struct ocrdma_qp *qp) | |
2273 | { | |
2274 | unsigned long flags; | |
2275 | struct ocrdma_srq *srq; | |
2276 | u32 wqe_idx; | |
2277 | ||
2278 | srq = get_ocrdma_srq(qp->ibqp.srq); | |
2279 | wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT; | |
2280 | ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; | |
2281 | spin_lock_irqsave(&srq->q_lock, flags); | |
2282 | ocrdma_srq_toggle_bit(srq, wqe_idx); | |
2283 | spin_unlock_irqrestore(&srq->q_lock, flags); | |
2284 | ocrdma_hwq_inc_tail(&srq->rq); | |
2285 | } | |
2286 | ||
2287 | static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2288 | struct ib_wc *ibwc, bool *polled, bool *stop, | |
2289 | int status) | |
2290 | { | |
2291 | bool expand; | |
2292 | ||
2293 | /* when hw_rq is empty, but wq is not empty, so continue | |
2294 | * to keep the cqe to get the cq event again. | |
2295 | */ | |
2296 | if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { | |
2297 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | |
2298 | *polled = true; | |
2299 | status = OCRDMA_CQE_WR_FLUSH_ERR; | |
2300 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | |
2301 | } else { | |
2302 | *polled = false; | |
2303 | *stop = true; | |
2304 | expand = false; | |
2305 | } | |
a3698a9b PP |
2306 | } else { |
2307 | *polled = true; | |
fe2caefc | 2308 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); |
a3698a9b | 2309 | } |
fe2caefc PP |
2310 | return expand; |
2311 | } | |
2312 | ||
2313 | static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, | |
2314 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc) | |
2315 | { | |
2316 | ibwc->opcode = IB_WC_RECV; | |
2317 | ibwc->qp = &qp->ibqp; | |
2318 | ibwc->status = IB_WC_SUCCESS; | |
2319 | ||
2320 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) | |
2321 | ocrdma_update_ud_rcqe(ibwc, cqe); | |
2322 | else | |
2323 | ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); | |
2324 | ||
2325 | if (is_cqe_imm(cqe)) { | |
2326 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | |
2327 | ibwc->wc_flags |= IB_WC_WITH_IMM; | |
2328 | } else if (is_cqe_wr_imm(cqe)) { | |
2329 | ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | |
2330 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | |
2331 | ibwc->wc_flags |= IB_WC_WITH_IMM; | |
2332 | } else if (is_cqe_invalidated(cqe)) { | |
2333 | ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); | |
2334 | ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; | |
2335 | } | |
2336 | if (qp->ibqp.srq) | |
2337 | ocrdma_update_free_srq_cqe(ibwc, cqe, qp); | |
2338 | else { | |
2339 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2340 | ocrdma_hwq_inc_tail(&qp->rq); | |
2341 | } | |
2342 | } | |
2343 | ||
2344 | static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2345 | struct ib_wc *ibwc, bool *polled, bool *stop) | |
2346 | { | |
2347 | int status; | |
2348 | bool expand = false; | |
2349 | ||
2350 | ibwc->wc_flags = 0; | |
2351 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) | |
2352 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2353 | OCRDMA_CQE_UD_STATUS_MASK) >> | |
2354 | OCRDMA_CQE_UD_STATUS_SHIFT; | |
2355 | else | |
2356 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2357 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2358 | ||
2359 | if (status == OCRDMA_CQE_SUCCESS) { | |
2360 | *polled = true; | |
2361 | ocrdma_poll_success_rcqe(qp, cqe, ibwc); | |
2362 | } else { | |
2363 | expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, | |
2364 | status); | |
2365 | } | |
2366 | return expand; | |
2367 | } | |
2368 | ||
2369 | static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, | |
2370 | u16 cur_getp) | |
2371 | { | |
2372 | if (cq->phase_change) { | |
2373 | if (cur_getp == 0) | |
2374 | cq->phase = (~cq->phase & OCRDMA_CQE_VALID); | |
2375 | } else | |
2376 | /* clear valid bit */ | |
2377 | cqe->flags_status_srcqpn = 0; | |
2378 | } | |
2379 | ||
2380 | static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, | |
2381 | struct ib_wc *ibwc) | |
2382 | { | |
2383 | u16 qpn = 0; | |
2384 | int i = 0; | |
2385 | bool expand = false; | |
2386 | int polled_hw_cqes = 0; | |
2387 | struct ocrdma_qp *qp = NULL; | |
2388 | struct ocrdma_dev *dev = cq->dev; | |
2389 | struct ocrdma_cqe *cqe; | |
2390 | u16 cur_getp; bool polled = false; bool stop = false; | |
2391 | ||
2392 | cur_getp = cq->getp; | |
2393 | while (num_entries) { | |
2394 | cqe = cq->va + cur_getp; | |
2395 | /* check whether valid cqe or not */ | |
2396 | if (!is_cqe_valid(cq, cqe)) | |
2397 | break; | |
2398 | qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); | |
2399 | /* ignore discarded cqe */ | |
2400 | if (qpn == 0) | |
2401 | goto skip_cqe; | |
2402 | qp = dev->qp_tbl[qpn]; | |
2403 | BUG_ON(qp == NULL); | |
2404 | ||
2405 | if (is_cqe_for_sq(cqe)) { | |
2406 | expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, | |
2407 | &stop); | |
2408 | } else { | |
2409 | expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, | |
2410 | &stop); | |
2411 | } | |
2412 | if (expand) | |
2413 | goto expand_cqe; | |
2414 | if (stop) | |
2415 | goto stop_cqe; | |
2416 | /* clear qpn to avoid duplicate processing by discard_cqe() */ | |
2417 | cqe->cmn.qpn = 0; | |
2418 | skip_cqe: | |
2419 | polled_hw_cqes += 1; | |
2420 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | |
2421 | ocrdma_change_cq_phase(cq, cqe, cur_getp); | |
2422 | expand_cqe: | |
2423 | if (polled) { | |
2424 | num_entries -= 1; | |
2425 | i += 1; | |
2426 | ibwc = ibwc + 1; | |
2427 | polled = false; | |
2428 | } | |
2429 | } | |
2430 | stop_cqe: | |
2431 | cq->getp = cur_getp; | |
2432 | if (polled_hw_cqes || expand || stop) { | |
2433 | ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited, | |
2434 | polled_hw_cqes); | |
2435 | } | |
2436 | return i; | |
2437 | } | |
2438 | ||
2439 | /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ | |
2440 | static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, | |
2441 | struct ocrdma_qp *qp, struct ib_wc *ibwc) | |
2442 | { | |
2443 | int err_cqes = 0; | |
2444 | ||
2445 | while (num_entries) { | |
2446 | if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) | |
2447 | break; | |
2448 | if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { | |
2449 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | |
2450 | ocrdma_hwq_inc_tail(&qp->sq); | |
2451 | } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { | |
2452 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2453 | ocrdma_hwq_inc_tail(&qp->rq); | |
2454 | } else | |
2455 | return err_cqes; | |
2456 | ibwc->byte_len = 0; | |
2457 | ibwc->status = IB_WC_WR_FLUSH_ERR; | |
2458 | ibwc = ibwc + 1; | |
2459 | err_cqes += 1; | |
2460 | num_entries -= 1; | |
2461 | } | |
2462 | return err_cqes; | |
2463 | } | |
2464 | ||
2465 | int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
2466 | { | |
2467 | int cqes_to_poll = num_entries; | |
2468 | struct ocrdma_cq *cq = NULL; | |
2469 | unsigned long flags; | |
2470 | struct ocrdma_dev *dev; | |
2471 | int num_os_cqe = 0, err_cqes = 0; | |
2472 | struct ocrdma_qp *qp; | |
2473 | ||
2474 | cq = get_ocrdma_cq(ibcq); | |
2475 | dev = cq->dev; | |
2476 | ||
2477 | /* poll cqes from adapter CQ */ | |
2478 | spin_lock_irqsave(&cq->cq_lock, flags); | |
2479 | num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); | |
2480 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
2481 | cqes_to_poll -= num_os_cqe; | |
2482 | ||
2483 | if (cqes_to_poll) { | |
2484 | wc = wc + num_os_cqe; | |
2485 | /* adapter returns single error cqe when qp moves to | |
2486 | * error state. So insert error cqes with wc_status as | |
2487 | * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ | |
2488 | * respectively which uses this CQ. | |
2489 | */ | |
2490 | spin_lock_irqsave(&dev->flush_q_lock, flags); | |
2491 | list_for_each_entry(qp, &cq->sq_head, sq_entry) { | |
2492 | if (cqes_to_poll == 0) | |
2493 | break; | |
2494 | err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); | |
2495 | cqes_to_poll -= err_cqes; | |
2496 | num_os_cqe += err_cqes; | |
2497 | wc = wc + err_cqes; | |
2498 | } | |
2499 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | |
2500 | } | |
2501 | return num_os_cqe; | |
2502 | } | |
2503 | ||
2504 | int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |
2505 | { | |
2506 | struct ocrdma_cq *cq; | |
2507 | unsigned long flags; | |
2508 | struct ocrdma_dev *dev; | |
2509 | u16 cq_id; | |
2510 | u16 cur_getp; | |
2511 | struct ocrdma_cqe *cqe; | |
2512 | ||
2513 | cq = get_ocrdma_cq(ibcq); | |
2514 | cq_id = cq->id; | |
2515 | dev = cq->dev; | |
2516 | ||
2517 | spin_lock_irqsave(&cq->cq_lock, flags); | |
2518 | if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) | |
2519 | cq->armed = true; | |
2520 | if (cq_flags & IB_CQ_SOLICITED) | |
2521 | cq->solicited = true; | |
2522 | ||
2523 | cur_getp = cq->getp; | |
2524 | cqe = cq->va + cur_getp; | |
2525 | ||
2526 | /* check whether any valid cqe exist or not, if not then safe to | |
2527 | * arm. If cqe is not yet consumed, then let it get consumed and then | |
2528 | * we arm it to avoid false interrupts. | |
2529 | */ | |
2530 | if (!is_cqe_valid(cq, cqe) || cq->arm_needed) { | |
2531 | cq->arm_needed = false; | |
2532 | ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0); | |
2533 | } | |
2534 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
2535 | return 0; | |
2536 | } |