RDMA/ocrdma: For ERX2 irrespective of Qid, num_posted offset is 24
[deliverable/linux.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
CommitLineData
fe2caefc
PP
1/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
7b33dc2b 56 if (index >= OCRDMA_MAX_SGID)
fe2caefc
PP
57 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
64int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65{
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 attr->max_mr_size = ~0ull;
73 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
76 attr->hw_ver = 0;
77 attr->max_qp = dev->attr.max_qp;
d3cb6c0b 78 attr->max_ah = OCRDMA_MAX_AH;
fe2caefc
PP
79 attr->max_qp_wr = dev->attr.max_wqe;
80
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
85 IB_DEVICE_LOCAL_DMA_LKEY;
634c5796 86 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
45e86b33 87 attr->max_sge_rd = dev->attr.max_rdma_sge;
fe2caefc
PP
88 attr->max_cq = dev->attr.max_cq;
89 attr->max_cqe = dev->attr.max_cqe;
90 attr->max_mr = dev->attr.max_mr;
91 attr->max_mw = 0;
92 attr->max_pd = dev->attr.max_pd;
93 attr->atomic_cap = 0;
94 attr->max_fmr = 0;
95 attr->max_map_per_fmr = 0;
96 attr->max_qp_rd_atom =
97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
7c33880c 99 attr->max_srq = dev->attr.max_srq;
d1e09ebf 100 attr->max_srq_sge = dev->attr.max_srq_sge;
fe2caefc
PP
101 attr->max_srq_wr = dev->attr.max_rqe;
102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
103 attr->max_fast_reg_page_list_len = 0;
104 attr->max_pkeys = 1;
105 return 0;
106}
107
108int ocrdma_query_port(struct ib_device *ibdev,
109 u8 port, struct ib_port_attr *props)
110{
111 enum ib_port_state port_state;
112 struct ocrdma_dev *dev;
113 struct net_device *netdev;
114
115 dev = get_ocrdma_dev(ibdev);
116 if (port > 1) {
ef99c4c2
NG
117 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
118 dev->id, port);
fe2caefc
PP
119 return -EINVAL;
120 }
121 netdev = dev->nic_info.netdev;
122 if (netif_running(netdev) && netif_oper_up(netdev)) {
123 port_state = IB_PORT_ACTIVE;
124 props->phys_state = 5;
125 } else {
126 port_state = IB_PORT_DOWN;
127 props->phys_state = 3;
128 }
129 props->max_mtu = IB_MTU_4096;
130 props->active_mtu = iboe_get_mtu(netdev->mtu);
131 props->lid = 0;
132 props->lmc = 0;
133 props->sm_lid = 0;
134 props->sm_sl = 0;
135 props->state = port_state;
136 props->port_cap_flags =
137 IB_PORT_CM_SUP |
138 IB_PORT_REINIT_SUP |
139 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
140 props->gid_tbl_len = OCRDMA_MAX_SGID;
141 props->pkey_tbl_len = 1;
142 props->bad_pkey_cntr = 0;
143 props->qkey_viol_cntr = 0;
144 props->active_width = IB_WIDTH_1X;
145 props->active_speed = 4;
146 props->max_msg_sz = 0x80000000;
147 props->max_vl_num = 4;
148 return 0;
149}
150
151int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
152 struct ib_port_modify *props)
153{
154 struct ocrdma_dev *dev;
155
156 dev = get_ocrdma_dev(ibdev);
157 if (port > 1) {
ef99c4c2 158 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
fe2caefc
PP
159 return -EINVAL;
160 }
161 return 0;
162}
163
164static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
165 unsigned long len)
166{
167 struct ocrdma_mm *mm;
168
169 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
170 if (mm == NULL)
171 return -ENOMEM;
172 mm->key.phy_addr = phy_addr;
173 mm->key.len = len;
174 INIT_LIST_HEAD(&mm->entry);
175
176 mutex_lock(&uctx->mm_list_lock);
177 list_add_tail(&mm->entry, &uctx->mm_head);
178 mutex_unlock(&uctx->mm_list_lock);
179 return 0;
180}
181
182static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
183 unsigned long len)
184{
185 struct ocrdma_mm *mm, *tmp;
186
187 mutex_lock(&uctx->mm_list_lock);
188 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
43a6b402 189 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
fe2caefc
PP
190 continue;
191
192 list_del(&mm->entry);
193 kfree(mm);
194 break;
195 }
196 mutex_unlock(&uctx->mm_list_lock);
197}
198
199static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
200 unsigned long len)
201{
202 bool found = false;
203 struct ocrdma_mm *mm;
204
205 mutex_lock(&uctx->mm_list_lock);
206 list_for_each_entry(mm, &uctx->mm_head, entry) {
43a6b402 207 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
fe2caefc
PP
208 continue;
209
210 found = true;
211 break;
212 }
213 mutex_unlock(&uctx->mm_list_lock);
214 return found;
215}
216
217struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
218 struct ib_udata *udata)
219{
220 int status;
221 struct ocrdma_ucontext *ctx;
222 struct ocrdma_alloc_ucontext_resp resp;
223 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
224 struct pci_dev *pdev = dev->nic_info.pdev;
225 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
226
227 if (!udata)
228 return ERR_PTR(-EFAULT);
229 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
230 if (!ctx)
231 return ERR_PTR(-ENOMEM);
fe2caefc
PP
232 INIT_LIST_HEAD(&ctx->mm_head);
233 mutex_init(&ctx->mm_list_lock);
234
235 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
236 &ctx->ah_tbl.pa, GFP_KERNEL);
237 if (!ctx->ah_tbl.va) {
238 kfree(ctx);
239 return ERR_PTR(-ENOMEM);
240 }
241 memset(ctx->ah_tbl.va, 0, map_len);
242 ctx->ah_tbl.len = map_len;
243
63ea3749 244 memset(&resp, 0, sizeof(resp));
fe2caefc
PP
245 resp.ah_tbl_len = ctx->ah_tbl.len;
246 resp.ah_tbl_page = ctx->ah_tbl.pa;
247
248 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
249 if (status)
250 goto map_err;
251 resp.dev_id = dev->id;
252 resp.max_inline_data = dev->attr.max_inline_data;
253 resp.wqe_size = dev->attr.wqe_size;
254 resp.rqe_size = dev->attr.rqe_size;
255 resp.dpp_wqe_size = dev->attr.wqe_size;
fe2caefc
PP
256
257 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
258 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
259 if (status)
260 goto cpy_err;
261 return &ctx->ibucontext;
262
263cpy_err:
264 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
265map_err:
266 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
267 ctx->ah_tbl.pa);
268 kfree(ctx);
269 return ERR_PTR(status);
270}
271
272int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
273{
274 struct ocrdma_mm *mm, *tmp;
275 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
1afc0454
NG
276 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
277 struct pci_dev *pdev = dev->nic_info.pdev;
fe2caefc
PP
278
279 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
280 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
281 uctx->ah_tbl.pa);
282
283 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
284 list_del(&mm->entry);
285 kfree(mm);
286 }
287 kfree(uctx);
288 return 0;
289}
290
291int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
292{
293 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
1afc0454 294 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
fe2caefc
PP
295 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
296 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
297 unsigned long len = (vma->vm_end - vma->vm_start);
298 int status = 0;
299 bool found;
300
301 if (vma->vm_start & (PAGE_SIZE - 1))
302 return -EINVAL;
303 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
304 if (!found)
305 return -EINVAL;
306
307 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
308 dev->nic_info.db_total_size)) &&
309 (len <= dev->nic_info.db_page_size)) {
43a6b402
NG
310 if (vma->vm_flags & VM_READ)
311 return -EPERM;
312
313 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
fe2caefc
PP
314 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
315 len, vma->vm_page_prot);
316 } else if (dev->nic_info.dpp_unmapped_len &&
317 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
318 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
319 dev->nic_info.dpp_unmapped_len)) &&
320 (len <= dev->nic_info.dpp_unmapped_len)) {
43a6b402
NG
321 if (vma->vm_flags & VM_READ)
322 return -EPERM;
323
fe2caefc
PP
324 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
325 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
326 len, vma->vm_page_prot);
327 } else {
fe2caefc
PP
328 status = remap_pfn_range(vma, vma->vm_start,
329 vma->vm_pgoff, len, vma->vm_page_prot);
330 }
331 return status;
332}
333
45e86b33 334static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
fe2caefc
PP
335 struct ib_ucontext *ib_ctx,
336 struct ib_udata *udata)
337{
338 int status;
339 u64 db_page_addr;
da496438 340 u64 dpp_page_addr = 0;
fe2caefc
PP
341 u32 db_page_size;
342 struct ocrdma_alloc_pd_uresp rsp;
343 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
344
63ea3749 345 memset(&rsp, 0, sizeof(rsp));
fe2caefc
PP
346 rsp.id = pd->id;
347 rsp.dpp_enabled = pd->dpp_enabled;
f99b1649
NG
348 db_page_addr = dev->nic_info.unmapped_db +
349 (pd->id * dev->nic_info.db_page_size);
350 db_page_size = dev->nic_info.db_page_size;
fe2caefc
PP
351
352 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
353 if (status)
354 return status;
355
356 if (pd->dpp_enabled) {
f99b1649 357 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
43a6b402 358 (pd->id * PAGE_SIZE);
fe2caefc 359 status = ocrdma_add_mmap(uctx, dpp_page_addr,
43a6b402 360 PAGE_SIZE);
fe2caefc
PP
361 if (status)
362 goto dpp_map_err;
363 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
364 rsp.dpp_page_addr_lo = dpp_page_addr;
365 }
366
367 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
368 if (status)
369 goto ucopy_err;
370
371 pd->uctx = uctx;
372 return 0;
373
374ucopy_err:
da496438 375 if (pd->dpp_enabled)
43a6b402 376 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
fe2caefc
PP
377dpp_map_err:
378 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
379 return status;
380}
381
382struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
383 struct ib_ucontext *context,
384 struct ib_udata *udata)
385{
386 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
387 struct ocrdma_pd *pd;
388 int status;
389
390 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
391 if (!pd)
392 return ERR_PTR(-ENOMEM);
fe2caefc 393 if (udata && context) {
f99b1649
NG
394 pd->dpp_enabled =
395 (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY);
fe2caefc
PP
396 pd->num_dpp_qp =
397 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
398 }
43a6b402 399retry:
fe2caefc
PP
400 status = ocrdma_mbx_alloc_pd(dev, pd);
401 if (status) {
43a6b402
NG
402 /* try for pd with out dpp */
403 if (pd->dpp_enabled) {
404 pd->dpp_enabled = false;
405 pd->num_dpp_qp = 0;
406 goto retry;
407 } else {
408 kfree(pd);
409 return ERR_PTR(status);
410 }
fe2caefc 411 }
fe2caefc
PP
412
413 if (udata && context) {
45e86b33 414 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
fe2caefc
PP
415 if (status)
416 goto err;
417 }
418 return &pd->ibpd;
419
420err:
45e86b33
NG
421 status = ocrdma_mbx_dealloc_pd(dev, pd);
422 kfree(pd);
fe2caefc
PP
423 return ERR_PTR(status);
424}
425
426int ocrdma_dealloc_pd(struct ib_pd *ibpd)
427{
428 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
f99b1649 429 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
430 int status;
431 u64 usr_db;
432
fe2caefc
PP
433 status = ocrdma_mbx_dealloc_pd(dev, pd);
434 if (pd->uctx) {
435 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
43a6b402 436 (pd->id * PAGE_SIZE);
fe2caefc 437 if (pd->dpp_enabled)
43a6b402 438 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
fe2caefc
PP
439 usr_db = dev->nic_info.unmapped_db +
440 (pd->id * dev->nic_info.db_page_size);
441 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
442 }
443 kfree(pd);
fe2caefc
PP
444 return status;
445}
446
1afc0454
NG
447static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
448 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
fe2caefc
PP
449{
450 int status;
fe2caefc 451
fe2caefc
PP
452 mr->hwmr.fr_mr = 0;
453 mr->hwmr.local_rd = 1;
454 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
455 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
456 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
457 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
458 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
459 mr->hwmr.num_pbls = num_pbls;
460
f99b1649
NG
461 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
462 if (status)
463 return status;
464
fe2caefc
PP
465 mr->ibmr.lkey = mr->hwmr.lkey;
466 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
467 mr->ibmr.rkey = mr->hwmr.lkey;
f99b1649 468 return 0;
fe2caefc
PP
469}
470
471struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
472{
f99b1649 473 int status;
fe2caefc 474 struct ocrdma_mr *mr;
f99b1649
NG
475 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
476 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
477
478 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
479 pr_err("%s err, invalid access rights\n", __func__);
480 return ERR_PTR(-EINVAL);
481 }
fe2caefc 482
f99b1649
NG
483 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
484 if (!mr)
485 return ERR_PTR(-ENOMEM);
486
1afc0454 487 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
f99b1649
NG
488 OCRDMA_ADDR_CHECK_DISABLE);
489 if (status) {
490 kfree(mr);
491 return ERR_PTR(status);
492 }
fe2caefc
PP
493
494 return &mr->ibmr;
495}
496
497static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
498 struct ocrdma_hw_mr *mr)
499{
500 struct pci_dev *pdev = dev->nic_info.pdev;
501 int i = 0;
502
503 if (mr->pbl_table) {
504 for (i = 0; i < mr->num_pbls; i++) {
505 if (!mr->pbl_table[i].va)
506 continue;
507 dma_free_coherent(&pdev->dev, mr->pbl_size,
508 mr->pbl_table[i].va,
509 mr->pbl_table[i].pa);
510 }
511 kfree(mr->pbl_table);
512 mr->pbl_table = NULL;
513 }
514}
515
1afc0454
NG
516static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
517 u32 num_pbes)
fe2caefc
PP
518{
519 u32 num_pbls = 0;
520 u32 idx = 0;
521 int status = 0;
522 u32 pbl_size;
523
524 do {
525 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
526 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
527 status = -EFAULT;
528 break;
529 }
530 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
531 num_pbls = num_pbls / (pbl_size / sizeof(u64));
532 idx++;
1afc0454 533 } while (num_pbls >= dev->attr.max_num_mr_pbl);
fe2caefc
PP
534
535 mr->hwmr.num_pbes = num_pbes;
536 mr->hwmr.num_pbls = num_pbls;
537 mr->hwmr.pbl_size = pbl_size;
538 return status;
539}
540
541static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
542{
543 int status = 0;
544 int i;
545 u32 dma_len = mr->pbl_size;
546 struct pci_dev *pdev = dev->nic_info.pdev;
547 void *va;
548 dma_addr_t pa;
549
550 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
551 mr->num_pbls, GFP_KERNEL);
552
553 if (!mr->pbl_table)
554 return -ENOMEM;
555
556 for (i = 0; i < mr->num_pbls; i++) {
557 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
558 if (!va) {
559 ocrdma_free_mr_pbl_tbl(dev, mr);
560 status = -ENOMEM;
561 break;
562 }
563 memset(va, 0, dma_len);
564 mr->pbl_table[i].va = va;
565 mr->pbl_table[i].pa = pa;
566 }
567 return status;
568}
569
570static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
571 u32 num_pbes)
572{
573 struct ocrdma_pbe *pbe;
574 struct ib_umem_chunk *chunk;
575 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
576 struct ib_umem *umem = mr->umem;
577 int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
578
579 if (!mr->hwmr.num_pbes)
580 return;
581
582 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
583 pbe_cnt = 0;
584
585 shift = ilog2(umem->page_size);
586
587 list_for_each_entry(chunk, &umem->chunk_list, list) {
588 /* get all the dma regions from the chunk. */
589 for (i = 0; i < chunk->nmap; i++) {
590 pages = sg_dma_len(&chunk->page_list[i]) >> shift;
591 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
592 /* store the page address in pbe */
593 pbe->pa_lo =
594 cpu_to_le32(sg_dma_address
595 (&chunk->page_list[i]) +
596 (umem->page_size * pg_cnt));
597 pbe->pa_hi =
598 cpu_to_le32(upper_32_bits
599 ((sg_dma_address
600 (&chunk->page_list[i]) +
601 umem->page_size * pg_cnt)));
602 pbe_cnt += 1;
603 total_num_pbes += 1;
604 pbe++;
605
606 /* if done building pbes, issue the mbx cmd. */
607 if (total_num_pbes == num_pbes)
608 return;
609
610 /* if the given pbl is full storing the pbes,
611 * move to next pbl.
612 */
613 if (pbe_cnt ==
614 (mr->hwmr.pbl_size / sizeof(u64))) {
615 pbl_tbl++;
616 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
617 pbe_cnt = 0;
618 }
619 }
620 }
621 }
622}
623
624struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
625 u64 usr_addr, int acc, struct ib_udata *udata)
626{
627 int status = -ENOMEM;
f99b1649 628 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
629 struct ocrdma_mr *mr;
630 struct ocrdma_pd *pd;
fe2caefc
PP
631 u32 num_pbes;
632
633 pd = get_ocrdma_pd(ibpd);
fe2caefc
PP
634
635 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
636 return ERR_PTR(-EINVAL);
637
638 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
639 if (!mr)
640 return ERR_PTR(status);
fe2caefc
PP
641 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
642 if (IS_ERR(mr->umem)) {
643 status = -EFAULT;
644 goto umem_err;
645 }
646 num_pbes = ib_umem_page_count(mr->umem);
1afc0454 647 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
fe2caefc
PP
648 if (status)
649 goto umem_err;
650
651 mr->hwmr.pbe_size = mr->umem->page_size;
652 mr->hwmr.fbo = mr->umem->offset;
653 mr->hwmr.va = usr_addr;
654 mr->hwmr.len = len;
655 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
656 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
657 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
658 mr->hwmr.local_rd = 1;
659 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
660 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
661 if (status)
662 goto umem_err;
663 build_user_pbes(dev, mr, num_pbes);
664 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
665 if (status)
666 goto mbx_err;
fe2caefc
PP
667 mr->ibmr.lkey = mr->hwmr.lkey;
668 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
669 mr->ibmr.rkey = mr->hwmr.lkey;
670
671 return &mr->ibmr;
672
673mbx_err:
674 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
675umem_err:
676 kfree(mr);
677 return ERR_PTR(status);
678}
679
680int ocrdma_dereg_mr(struct ib_mr *ib_mr)
681{
682 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
1afc0454 683 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
fe2caefc
PP
684 int status;
685
686 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
687
688 if (mr->hwmr.fr_mr == 0)
689 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
690
fe2caefc
PP
691 /* it could be user registered memory. */
692 if (mr->umem)
693 ib_umem_release(mr->umem);
694 kfree(mr);
695 return status;
696}
697
1afc0454
NG
698static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
699 struct ib_udata *udata,
fe2caefc
PP
700 struct ib_ucontext *ib_ctx)
701{
702 int status;
703 struct ocrdma_ucontext *uctx;
704 struct ocrdma_create_cq_uresp uresp;
705
63ea3749 706 memset(&uresp, 0, sizeof(uresp));
fe2caefc 707 uresp.cq_id = cq->id;
43a6b402 708 uresp.page_size = PAGE_ALIGN(cq->len);
fe2caefc
PP
709 uresp.num_pages = 1;
710 uresp.max_hw_cqe = cq->max_hw_cqe;
711 uresp.page_addr[0] = cq->pa;
1afc0454
NG
712 uresp.db_page_addr = dev->nic_info.unmapped_db;
713 uresp.db_page_size = dev->nic_info.db_page_size;
fe2caefc
PP
714 uresp.phase_change = cq->phase_change ? 1 : 0;
715 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
716 if (status) {
ef99c4c2 717 pr_err("%s(%d) copy error cqid=0x%x.\n",
1afc0454 718 __func__, dev->id, cq->id);
fe2caefc
PP
719 goto err;
720 }
721 uctx = get_ocrdma_ucontext(ib_ctx);
722 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
723 if (status)
724 goto err;
725 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
726 if (status) {
727 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
728 goto err;
729 }
730 cq->ucontext = uctx;
731err:
732 return status;
733}
734
735struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
736 struct ib_ucontext *ib_ctx,
737 struct ib_udata *udata)
738{
739 struct ocrdma_cq *cq;
740 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
741 int status;
742 struct ocrdma_create_cq_ureq ureq;
743
744 if (udata) {
745 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
746 return ERR_PTR(-EFAULT);
747 } else
748 ureq.dpp_cq = 0;
749 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
750 if (!cq)
751 return ERR_PTR(-ENOMEM);
752
753 spin_lock_init(&cq->cq_lock);
754 spin_lock_init(&cq->comp_handler_lock);
fe2caefc
PP
755 INIT_LIST_HEAD(&cq->sq_head);
756 INIT_LIST_HEAD(&cq->rq_head);
fe2caefc
PP
757
758 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
759 if (status) {
760 kfree(cq);
761 return ERR_PTR(status);
762 }
763 if (ib_ctx) {
1afc0454 764 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
fe2caefc
PP
765 if (status)
766 goto ctx_err;
767 }
768 cq->phase = OCRDMA_CQE_VALID;
769 cq->arm_needed = true;
770 dev->cq_tbl[cq->id] = cq;
771
772 return &cq->ibcq;
773
774ctx_err:
775 ocrdma_mbx_destroy_cq(dev, cq);
776 kfree(cq);
777 return ERR_PTR(status);
778}
779
780int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
781 struct ib_udata *udata)
782{
783 int status = 0;
784 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
785
786 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
787 status = -EINVAL;
788 return status;
789 }
790 ibcq->cqe = new_cnt;
791 return status;
792}
793
794int ocrdma_destroy_cq(struct ib_cq *ibcq)
795{
796 int status;
797 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1afc0454 798 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
fe2caefc 799
fe2caefc
PP
800 status = ocrdma_mbx_destroy_cq(dev, cq);
801
802 if (cq->ucontext) {
43a6b402
NG
803 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
804 PAGE_ALIGN(cq->len));
fe2caefc
PP
805 ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
806 dev->nic_info.db_page_size);
807 }
808 dev->cq_tbl[cq->id] = NULL;
809
810 kfree(cq);
811 return status;
812}
813
814static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
815{
816 int status = -EINVAL;
817
818 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
819 dev->qp_tbl[qp->id] = qp;
820 status = 0;
821 }
822 return status;
823}
824
825static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
826{
827 dev->qp_tbl[qp->id] = NULL;
828}
829
830static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
831 struct ib_qp_init_attr *attrs)
832{
43a6b402
NG
833 if ((attrs->qp_type != IB_QPT_GSI) &&
834 (attrs->qp_type != IB_QPT_RC) &&
835 (attrs->qp_type != IB_QPT_UC) &&
836 (attrs->qp_type != IB_QPT_UD)) {
ef99c4c2
NG
837 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
838 __func__, dev->id, attrs->qp_type);
fe2caefc
PP
839 return -EINVAL;
840 }
43a6b402
NG
841 /* Skip the check for QP1 to support CM size of 128 */
842 if ((attrs->qp_type != IB_QPT_GSI) &&
843 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
ef99c4c2
NG
844 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
845 __func__, dev->id, attrs->cap.max_send_wr);
846 pr_err("%s(%d) supported send_wr=0x%x\n",
847 __func__, dev->id, dev->attr.max_wqe);
fe2caefc
PP
848 return -EINVAL;
849 }
850 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
ef99c4c2
NG
851 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
852 __func__, dev->id, attrs->cap.max_recv_wr);
853 pr_err("%s(%d) supported recv_wr=0x%x\n",
854 __func__, dev->id, dev->attr.max_rqe);
fe2caefc
PP
855 return -EINVAL;
856 }
857 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
ef99c4c2
NG
858 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
859 __func__, dev->id, attrs->cap.max_inline_data);
860 pr_err("%s(%d) supported inline data size=0x%x\n",
861 __func__, dev->id, dev->attr.max_inline_data);
fe2caefc
PP
862 return -EINVAL;
863 }
864 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
ef99c4c2
NG
865 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
866 __func__, dev->id, attrs->cap.max_send_sge);
867 pr_err("%s(%d) supported send_sge=0x%x\n",
868 __func__, dev->id, dev->attr.max_send_sge);
fe2caefc
PP
869 return -EINVAL;
870 }
871 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
ef99c4c2
NG
872 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
873 __func__, dev->id, attrs->cap.max_recv_sge);
874 pr_err("%s(%d) supported recv_sge=0x%x\n",
875 __func__, dev->id, dev->attr.max_recv_sge);
fe2caefc
PP
876 return -EINVAL;
877 }
878 /* unprivileged user space cannot create special QP */
879 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
ef99c4c2 880 pr_err
fe2caefc
PP
881 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
882 __func__, dev->id, attrs->qp_type);
883 return -EINVAL;
884 }
885 /* allow creating only one GSI type of QP */
886 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
ef99c4c2
NG
887 pr_err("%s(%d) GSI special QPs already created.\n",
888 __func__, dev->id);
fe2caefc
PP
889 return -EINVAL;
890 }
891 /* verify consumer QPs are not trying to use GSI QP's CQ */
892 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
893 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
43a6b402 894 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
ef99c4c2 895 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
43a6b402 896 __func__, dev->id);
fe2caefc
PP
897 return -EINVAL;
898 }
899 }
900 return 0;
901}
902
903static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
904 struct ib_udata *udata, int dpp_offset,
905 int dpp_credit_lmt, int srq)
906{
907 int status = 0;
908 u64 usr_db;
909 struct ocrdma_create_qp_uresp uresp;
910 struct ocrdma_dev *dev = qp->dev;
911 struct ocrdma_pd *pd = qp->pd;
912
913 memset(&uresp, 0, sizeof(uresp));
914 usr_db = dev->nic_info.unmapped_db +
915 (pd->id * dev->nic_info.db_page_size);
916 uresp.qp_id = qp->id;
917 uresp.sq_dbid = qp->sq.dbid;
918 uresp.num_sq_pages = 1;
43a6b402 919 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
fe2caefc
PP
920 uresp.sq_page_addr[0] = qp->sq.pa;
921 uresp.num_wqe_allocated = qp->sq.max_cnt;
922 if (!srq) {
923 uresp.rq_dbid = qp->rq.dbid;
924 uresp.num_rq_pages = 1;
43a6b402 925 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
fe2caefc
PP
926 uresp.rq_page_addr[0] = qp->rq.pa;
927 uresp.num_rqe_allocated = qp->rq.max_cnt;
928 }
929 uresp.db_page_addr = usr_db;
930 uresp.db_page_size = dev->nic_info.db_page_size;
931 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
932 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
f11220ee
NG
933 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
934 uresp.db_shift = 24;
fe2caefc
PP
935 } else {
936 uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
937 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
938 uresp.db_shift = 16;
939 }
fe2caefc
PP
940
941 if (qp->dpp_enabled) {
942 uresp.dpp_credit = dpp_credit_lmt;
943 uresp.dpp_offset = dpp_offset;
944 }
945 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
946 if (status) {
ef99c4c2 947 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
fe2caefc
PP
948 goto err;
949 }
950 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
951 uresp.sq_page_size);
952 if (status)
953 goto err;
954
955 if (!srq) {
956 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
957 uresp.rq_page_size);
958 if (status)
959 goto rq_map_err;
960 }
961 return status;
962rq_map_err:
963 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
964err:
965 return status;
966}
967
968static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
969 struct ocrdma_pd *pd)
970{
971 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
972 qp->sq_db = dev->nic_info.db +
973 (pd->id * dev->nic_info.db_page_size) +
974 OCRDMA_DB_GEN2_SQ_OFFSET;
975 qp->rq_db = dev->nic_info.db +
976 (pd->id * dev->nic_info.db_page_size) +
f11220ee 977 OCRDMA_DB_GEN2_RQ_OFFSET;
fe2caefc
PP
978 } else {
979 qp->sq_db = dev->nic_info.db +
980 (pd->id * dev->nic_info.db_page_size) +
981 OCRDMA_DB_SQ_OFFSET;
982 qp->rq_db = dev->nic_info.db +
983 (pd->id * dev->nic_info.db_page_size) +
984 OCRDMA_DB_RQ_OFFSET;
985 }
986}
987
988static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
989{
990 qp->wqe_wr_id_tbl =
991 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
992 GFP_KERNEL);
993 if (qp->wqe_wr_id_tbl == NULL)
994 return -ENOMEM;
995 qp->rqe_wr_id_tbl =
996 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
997 if (qp->rqe_wr_id_tbl == NULL)
998 return -ENOMEM;
999
1000 return 0;
1001}
1002
1003static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1004 struct ocrdma_pd *pd,
1005 struct ib_qp_init_attr *attrs)
1006{
1007 qp->pd = pd;
1008 spin_lock_init(&qp->q_lock);
1009 INIT_LIST_HEAD(&qp->sq_entry);
1010 INIT_LIST_HEAD(&qp->rq_entry);
1011
1012 qp->qp_type = attrs->qp_type;
1013 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1014 qp->max_inline_data = attrs->cap.max_inline_data;
1015 qp->sq.max_sges = attrs->cap.max_send_sge;
1016 qp->rq.max_sges = attrs->cap.max_recv_sge;
1017 qp->state = OCRDMA_QPS_RST;
1018}
1019
fe2caefc
PP
1020
1021static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1022 struct ib_qp_init_attr *attrs)
1023{
1024 if (attrs->qp_type == IB_QPT_GSI) {
1025 dev->gsi_qp_created = 1;
1026 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1027 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1028 }
1029}
1030
1031struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1032 struct ib_qp_init_attr *attrs,
1033 struct ib_udata *udata)
1034{
1035 int status;
1036 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1037 struct ocrdma_qp *qp;
f99b1649 1038 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
1039 struct ocrdma_create_qp_ureq ureq;
1040 u16 dpp_credit_lmt, dpp_offset;
1041
1042 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1043 if (status)
1044 goto gen_err;
1045
1046 memset(&ureq, 0, sizeof(ureq));
1047 if (udata) {
1048 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1049 return ERR_PTR(-EFAULT);
1050 }
1051 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1052 if (!qp) {
1053 status = -ENOMEM;
1054 goto gen_err;
1055 }
1056 qp->dev = dev;
1057 ocrdma_set_qp_init_params(qp, pd, attrs);
43a6b402
NG
1058 if (udata == NULL)
1059 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1060 OCRDMA_QP_FAST_REG);
fe2caefc
PP
1061
1062 mutex_lock(&dev->dev_lock);
1063 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1064 ureq.dpp_cq_id,
1065 &dpp_offset, &dpp_credit_lmt);
1066 if (status)
1067 goto mbx_err;
1068
1069 /* user space QP's wr_id table are managed in library */
1070 if (udata == NULL) {
fe2caefc
PP
1071 status = ocrdma_alloc_wr_id_tbl(qp);
1072 if (status)
1073 goto map_err;
1074 }
1075
1076 status = ocrdma_add_qpn_map(dev, qp);
1077 if (status)
1078 goto map_err;
1079 ocrdma_set_qp_db(dev, qp, pd);
1080 if (udata) {
1081 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1082 dpp_credit_lmt,
1083 (attrs->srq != NULL));
1084 if (status)
1085 goto cpy_err;
1086 }
1087 ocrdma_store_gsi_qp_cq(dev, attrs);
27159f50 1088 qp->ibqp.qp_num = qp->id;
fe2caefc
PP
1089 mutex_unlock(&dev->dev_lock);
1090 return &qp->ibqp;
1091
1092cpy_err:
1093 ocrdma_del_qpn_map(dev, qp);
1094map_err:
1095 ocrdma_mbx_destroy_qp(dev, qp);
1096mbx_err:
1097 mutex_unlock(&dev->dev_lock);
1098 kfree(qp->wqe_wr_id_tbl);
1099 kfree(qp->rqe_wr_id_tbl);
1100 kfree(qp);
ef99c4c2 1101 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
fe2caefc
PP
1102gen_err:
1103 return ERR_PTR(status);
1104}
1105
45e86b33
NG
1106
1107static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
1108{
1109 if (qp->db_cache) {
1110 u32 val = qp->rq.dbid | (qp->db_cache <<
1111 ocrdma_get_num_posted_shift(qp));
1112 iowrite32(val, qp->rq_db);
1113 qp->db_cache = 0;
1114 }
1115}
1116
fe2caefc
PP
1117int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1118 int attr_mask)
1119{
1120 int status = 0;
1121 struct ocrdma_qp *qp;
1122 struct ocrdma_dev *dev;
1123 enum ib_qp_state old_qps;
1124
1125 qp = get_ocrdma_qp(ibqp);
1126 dev = qp->dev;
1127 if (attr_mask & IB_QP_STATE)
057729cb 1128 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
fe2caefc
PP
1129 /* if new and previous states are same hw doesn't need to
1130 * know about it.
1131 */
1132 if (status < 0)
1133 return status;
1134 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
45e86b33
NG
1135 if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
1136 ocrdma_flush_rq_db(qp);
1137
fe2caefc
PP
1138 return status;
1139}
1140
1141int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1142 int attr_mask, struct ib_udata *udata)
1143{
1144 unsigned long flags;
1145 int status = -EINVAL;
1146 struct ocrdma_qp *qp;
1147 struct ocrdma_dev *dev;
1148 enum ib_qp_state old_qps, new_qps;
1149
1150 qp = get_ocrdma_qp(ibqp);
1151 dev = qp->dev;
1152
1153 /* syncronize with multiple context trying to change, retrive qps */
1154 mutex_lock(&dev->dev_lock);
1155 /* syncronize with wqe, rqe posting and cqe processing contexts */
1156 spin_lock_irqsave(&qp->q_lock, flags);
1157 old_qps = get_ibqp_state(qp->state);
1158 if (attr_mask & IB_QP_STATE)
1159 new_qps = attr->qp_state;
1160 else
1161 new_qps = old_qps;
1162 spin_unlock_irqrestore(&qp->q_lock, flags);
1163
1164 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
ef99c4c2
NG
1165 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1166 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1167 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1168 old_qps, new_qps);
fe2caefc
PP
1169 goto param_err;
1170 }
1171
1172 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1173 if (status > 0)
1174 status = 0;
1175param_err:
1176 mutex_unlock(&dev->dev_lock);
1177 return status;
1178}
1179
1180static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1181{
1182 switch (mtu) {
1183 case 256:
1184 return IB_MTU_256;
1185 case 512:
1186 return IB_MTU_512;
1187 case 1024:
1188 return IB_MTU_1024;
1189 case 2048:
1190 return IB_MTU_2048;
1191 case 4096:
1192 return IB_MTU_4096;
1193 default:
1194 return IB_MTU_1024;
1195 }
1196}
1197
1198static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1199{
1200 int ib_qp_acc_flags = 0;
1201
1202 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1203 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1204 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1205 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1206 return ib_qp_acc_flags;
1207}
1208
1209int ocrdma_query_qp(struct ib_qp *ibqp,
1210 struct ib_qp_attr *qp_attr,
1211 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1212{
1213 int status;
1214 u32 qp_state;
1215 struct ocrdma_qp_params params;
1216 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1217 struct ocrdma_dev *dev = qp->dev;
1218
1219 memset(&params, 0, sizeof(params));
1220 mutex_lock(&dev->dev_lock);
1221 status = ocrdma_mbx_query_qp(dev, qp, &params);
1222 mutex_unlock(&dev->dev_lock);
1223 if (status)
1224 goto mbx_err;
1225 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1226 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1227 qp_attr->path_mtu =
1228 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1229 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1230 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1231 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1232 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1233 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1234 qp_attr->dest_qp_num =
1235 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1236
1237 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1238 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1239 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1240 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1241 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1242 qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
1243 qp_init_attr->cap = qp_attr->cap;
1244 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1245 sizeof(params.dgid));
1246 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1247 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1248 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1249 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1250 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1251 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1252 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1253 OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
1254 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1255
1256 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1257 qp_attr->ah_attr.port_num = 1;
1258 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1259 OCRDMA_QP_PARAMS_SL_MASK) >>
1260 OCRDMA_QP_PARAMS_SL_SHIFT;
1261 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1262 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1263 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1264 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1265 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1266 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1267 qp_attr->retry_cnt =
1268 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1269 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1270 qp_attr->min_rnr_timer = 0;
1271 qp_attr->pkey_index = 0;
1272 qp_attr->port_num = 1;
1273 qp_attr->ah_attr.src_path_bits = 0;
1274 qp_attr->ah_attr.static_rate = 0;
1275 qp_attr->alt_pkey_index = 0;
1276 qp_attr->alt_port_num = 0;
1277 qp_attr->alt_timeout = 0;
1278 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1279 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1280 OCRDMA_QP_PARAMS_STATE_SHIFT;
1281 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1282 qp_attr->max_dest_rd_atomic =
1283 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1284 qp_attr->max_rd_atomic =
1285 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1286 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1287 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1288mbx_err:
1289 return status;
1290}
1291
1292static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1293{
1294 int i = idx / 32;
1295 unsigned int mask = (1 << (idx % 32));
1296
1297 if (srq->idx_bit_fields[i] & mask)
1298 srq->idx_bit_fields[i] &= ~mask;
1299 else
1300 srq->idx_bit_fields[i] |= mask;
1301}
1302
1303static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1304{
43a6b402 1305 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
fe2caefc
PP
1306}
1307
1308static int is_hw_sq_empty(struct ocrdma_qp *qp)
1309{
43a6b402 1310 return (qp->sq.tail == qp->sq.head);
fe2caefc
PP
1311}
1312
1313static int is_hw_rq_empty(struct ocrdma_qp *qp)
1314{
43a6b402 1315 return (qp->rq.tail == qp->rq.head);
fe2caefc
PP
1316}
1317
1318static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1319{
1320 return q->va + (q->head * q->entry_size);
1321}
1322
1323static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1324 u32 idx)
1325{
1326 return q->va + (idx * q->entry_size);
1327}
1328
1329static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1330{
1331 q->head = (q->head + 1) & q->max_wqe_idx;
1332}
1333
1334static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1335{
1336 q->tail = (q->tail + 1) & q->max_wqe_idx;
1337}
1338
1339/* discard the cqe for a given QP */
1340static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1341{
1342 unsigned long cq_flags;
1343 unsigned long flags;
1344 int discard_cnt = 0;
1345 u32 cur_getp, stop_getp;
1346 struct ocrdma_cqe *cqe;
1347 u32 qpn = 0;
1348
1349 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1350
1351 /* traverse through the CQEs in the hw CQ,
1352 * find the matching CQE for a given qp,
1353 * mark the matching one discarded by clearing qpn.
1354 * ring the doorbell in the poll_cq() as
1355 * we don't complete out of order cqe.
1356 */
1357
1358 cur_getp = cq->getp;
1359 /* find upto when do we reap the cq. */
1360 stop_getp = cur_getp;
1361 do {
1362 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1363 break;
1364
1365 cqe = cq->va + cur_getp;
1366 /* if (a) done reaping whole hw cq, or
1367 * (b) qp_xq becomes empty.
1368 * then exit
1369 */
1370 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1371 /* if previously discarded cqe found, skip that too. */
1372 /* check for matching qp */
1373 if (qpn == 0 || qpn != qp->id)
1374 goto skip_cqe;
1375
1376 /* mark cqe discarded so that it is not picked up later
1377 * in the poll_cq().
1378 */
1379 discard_cnt += 1;
1380 cqe->cmn.qpn = 0;
f99b1649 1381 if (is_cqe_for_sq(cqe)) {
fe2caefc 1382 ocrdma_hwq_inc_tail(&qp->sq);
f99b1649 1383 } else {
fe2caefc
PP
1384 if (qp->srq) {
1385 spin_lock_irqsave(&qp->srq->q_lock, flags);
1386 ocrdma_hwq_inc_tail(&qp->srq->rq);
1387 ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1388 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1389
f99b1649 1390 } else {
fe2caefc 1391 ocrdma_hwq_inc_tail(&qp->rq);
f99b1649 1392 }
fe2caefc
PP
1393 }
1394skip_cqe:
1395 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1396 } while (cur_getp != stop_getp);
1397 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1398}
1399
f11220ee 1400void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
fe2caefc
PP
1401{
1402 int found = false;
1403 unsigned long flags;
1404 struct ocrdma_dev *dev = qp->dev;
1405 /* sync with any active CQ poll */
1406
1407 spin_lock_irqsave(&dev->flush_q_lock, flags);
1408 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1409 if (found)
1410 list_del(&qp->sq_entry);
1411 if (!qp->srq) {
1412 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1413 if (found)
1414 list_del(&qp->rq_entry);
1415 }
1416 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1417}
1418
1419int ocrdma_destroy_qp(struct ib_qp *ibqp)
1420{
1421 int status;
1422 struct ocrdma_pd *pd;
1423 struct ocrdma_qp *qp;
1424 struct ocrdma_dev *dev;
1425 struct ib_qp_attr attrs;
1426 int attr_mask = IB_QP_STATE;
d19081e0 1427 unsigned long flags;
fe2caefc
PP
1428
1429 qp = get_ocrdma_qp(ibqp);
1430 dev = qp->dev;
1431
1432 attrs.qp_state = IB_QPS_ERR;
1433 pd = qp->pd;
1434
1435 /* change the QP state to ERROR */
1436 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1437
1438 /* ensure that CQEs for newly created QP (whose id may be same with
1439 * one which just getting destroyed are same), dont get
1440 * discarded until the old CQEs are discarded.
1441 */
1442 mutex_lock(&dev->dev_lock);
1443 status = ocrdma_mbx_destroy_qp(dev, qp);
1444
1445 /*
1446 * acquire CQ lock while destroy is in progress, in order to
1447 * protect against proessing in-flight CQEs for this QP.
1448 */
d19081e0 1449 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
fe2caefc 1450 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
d19081e0 1451 spin_lock(&qp->rq_cq->cq_lock);
fe2caefc
PP
1452
1453 ocrdma_del_qpn_map(dev, qp);
1454
1455 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
d19081e0
DC
1456 spin_unlock(&qp->rq_cq->cq_lock);
1457 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
fe2caefc
PP
1458
1459 if (!pd->uctx) {
1460 ocrdma_discard_cqes(qp, qp->sq_cq);
1461 ocrdma_discard_cqes(qp, qp->rq_cq);
1462 }
1463 mutex_unlock(&dev->dev_lock);
1464
1465 if (pd->uctx) {
43a6b402
NG
1466 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1467 PAGE_ALIGN(qp->sq.len));
fe2caefc 1468 if (!qp->srq)
43a6b402
NG
1469 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1470 PAGE_ALIGN(qp->rq.len));
fe2caefc
PP
1471 }
1472
1473 ocrdma_del_flush_qp(qp);
1474
fe2caefc
PP
1475 kfree(qp->wqe_wr_id_tbl);
1476 kfree(qp->rqe_wr_id_tbl);
1477 kfree(qp);
1478 return status;
1479}
1480
1afc0454
NG
1481static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1482 struct ib_udata *udata)
fe2caefc
PP
1483{
1484 int status;
1485 struct ocrdma_create_srq_uresp uresp;
1486
63ea3749 1487 memset(&uresp, 0, sizeof(uresp));
fe2caefc
PP
1488 uresp.rq_dbid = srq->rq.dbid;
1489 uresp.num_rq_pages = 1;
1490 uresp.rq_page_addr[0] = srq->rq.pa;
1491 uresp.rq_page_size = srq->rq.len;
1afc0454
NG
1492 uresp.db_page_addr = dev->nic_info.unmapped_db +
1493 (srq->pd->id * dev->nic_info.db_page_size);
1494 uresp.db_page_size = dev->nic_info.db_page_size;
fe2caefc 1495 uresp.num_rqe_allocated = srq->rq.max_cnt;
1afc0454 1496 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
f11220ee 1497 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
fe2caefc
PP
1498 uresp.db_shift = 24;
1499 } else {
1500 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1501 uresp.db_shift = 16;
1502 }
1503
1504 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1505 if (status)
1506 return status;
1507 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1508 uresp.rq_page_size);
1509 if (status)
1510 return status;
1511 return status;
1512}
1513
1514struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1515 struct ib_srq_init_attr *init_attr,
1516 struct ib_udata *udata)
1517{
1518 int status = -ENOMEM;
1519 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
f99b1649 1520 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
1521 struct ocrdma_srq *srq;
1522
1523 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1524 return ERR_PTR(-EINVAL);
1525 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1526 return ERR_PTR(-EINVAL);
1527
1528 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1529 if (!srq)
1530 return ERR_PTR(status);
1531
1532 spin_lock_init(&srq->q_lock);
fe2caefc
PP
1533 srq->pd = pd;
1534 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1afc0454 1535 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
fe2caefc
PP
1536 if (status)
1537 goto err;
1538
1539 if (udata == NULL) {
1540 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1541 GFP_KERNEL);
1542 if (srq->rqe_wr_id_tbl == NULL)
1543 goto arm_err;
1544
1545 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1546 (srq->rq.max_cnt % 32 ? 1 : 0);
1547 srq->idx_bit_fields =
1548 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1549 if (srq->idx_bit_fields == NULL)
1550 goto arm_err;
1551 memset(srq->idx_bit_fields, 0xff,
1552 srq->bit_fields_len * sizeof(u32));
1553 }
1554
1555 if (init_attr->attr.srq_limit) {
1556 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1557 if (status)
1558 goto arm_err;
1559 }
1560
fe2caefc 1561 if (udata) {
1afc0454 1562 status = ocrdma_copy_srq_uresp(dev, srq, udata);
fe2caefc
PP
1563 if (status)
1564 goto arm_err;
1565 }
1566
fe2caefc
PP
1567 return &srq->ibsrq;
1568
1569arm_err:
1570 ocrdma_mbx_destroy_srq(dev, srq);
1571err:
1572 kfree(srq->rqe_wr_id_tbl);
1573 kfree(srq->idx_bit_fields);
1574 kfree(srq);
1575 return ERR_PTR(status);
1576}
1577
1578int ocrdma_modify_srq(struct ib_srq *ibsrq,
1579 struct ib_srq_attr *srq_attr,
1580 enum ib_srq_attr_mask srq_attr_mask,
1581 struct ib_udata *udata)
1582{
1583 int status = 0;
1584 struct ocrdma_srq *srq;
fe2caefc
PP
1585
1586 srq = get_ocrdma_srq(ibsrq);
fe2caefc
PP
1587 if (srq_attr_mask & IB_SRQ_MAX_WR)
1588 status = -EINVAL;
1589 else
1590 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1591 return status;
1592}
1593
1594int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1595{
1596 int status;
1597 struct ocrdma_srq *srq;
fe2caefc
PP
1598
1599 srq = get_ocrdma_srq(ibsrq);
fe2caefc
PP
1600 status = ocrdma_mbx_query_srq(srq, srq_attr);
1601 return status;
1602}
1603
1604int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1605{
1606 int status;
1607 struct ocrdma_srq *srq;
1afc0454 1608 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
fe2caefc
PP
1609
1610 srq = get_ocrdma_srq(ibsrq);
fe2caefc
PP
1611
1612 status = ocrdma_mbx_destroy_srq(dev, srq);
1613
1614 if (srq->pd->uctx)
43a6b402
NG
1615 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1616 PAGE_ALIGN(srq->rq.len));
fe2caefc 1617
fe2caefc
PP
1618 kfree(srq->idx_bit_fields);
1619 kfree(srq->rqe_wr_id_tbl);
1620 kfree(srq);
1621 return status;
1622}
1623
1624/* unprivileged verbs and their support functions. */
1625static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1626 struct ocrdma_hdr_wqe *hdr,
1627 struct ib_send_wr *wr)
1628{
1629 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1630 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1631 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1632
1633 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1634 if (qp->qp_type == IB_QPT_GSI)
1635 ud_hdr->qkey = qp->qkey;
1636 else
1637 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1638 ud_hdr->rsvd_ahid = ah->id;
1639}
1640
1641static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1642 struct ocrdma_sge *sge, int num_sge,
1643 struct ib_sge *sg_list)
1644{
1645 int i;
1646
1647 for (i = 0; i < num_sge; i++) {
1648 sge[i].lrkey = sg_list[i].lkey;
1649 sge[i].addr_lo = sg_list[i].addr;
1650 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1651 sge[i].len = sg_list[i].length;
1652 hdr->total_len += sg_list[i].length;
1653 }
1654 if (num_sge == 0)
1655 memset(sge, 0, sizeof(*sge));
1656}
1657
1658static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1659 struct ocrdma_hdr_wqe *hdr,
1660 struct ocrdma_sge *sge,
1661 struct ib_send_wr *wr, u32 wqe_size)
1662{
43a6b402 1663 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
fe2caefc 1664 if (wr->sg_list[0].length > qp->max_inline_data) {
ef99c4c2
NG
1665 pr_err("%s() supported_len=0x%x,\n"
1666 " unspported len req=0x%x\n", __func__,
1667 qp->max_inline_data, wr->sg_list[0].length);
fe2caefc
PP
1668 return -EINVAL;
1669 }
1670 memcpy(sge,
1671 (void *)(unsigned long)wr->sg_list[0].addr,
1672 wr->sg_list[0].length);
1673 hdr->total_len = wr->sg_list[0].length;
1674 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
43a6b402
NG
1675 if (0 == wr->sg_list[0].length)
1676 wqe_size += sizeof(struct ocrdma_sge);
fe2caefc
PP
1677 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1678 } else {
1679 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1680 if (wr->num_sge)
1681 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1682 else
1683 wqe_size += sizeof(struct ocrdma_sge);
1684 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1685 }
1686 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1687 return 0;
1688}
1689
1690static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1691 struct ib_send_wr *wr)
1692{
1693 int status;
1694 struct ocrdma_sge *sge;
1695 u32 wqe_size = sizeof(*hdr);
1696
1697 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1698 ocrdma_build_ud_hdr(qp, hdr, wr);
1699 sge = (struct ocrdma_sge *)(hdr + 2);
1700 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
f99b1649 1701 } else {
fe2caefc 1702 sge = (struct ocrdma_sge *)(hdr + 1);
f99b1649 1703 }
fe2caefc
PP
1704
1705 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1706 return status;
1707}
1708
1709static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1710 struct ib_send_wr *wr)
1711{
1712 int status;
1713 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1714 struct ocrdma_sge *sge = ext_rw + 1;
1715 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1716
1717 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1718 if (status)
1719 return status;
1720 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1721 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1722 ext_rw->lrkey = wr->wr.rdma.rkey;
1723 ext_rw->len = hdr->total_len;
1724 return 0;
1725}
1726
1727static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1728 struct ib_send_wr *wr)
1729{
1730 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1731 struct ocrdma_sge *sge = ext_rw + 1;
1732 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1733 sizeof(struct ocrdma_hdr_wqe);
1734
1735 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1736 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1737 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1738 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1739
1740 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1741 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1742 ext_rw->lrkey = wr->wr.rdma.rkey;
1743 ext_rw->len = hdr->total_len;
1744}
1745
7c33880c
NG
1746static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
1747 struct ocrdma_hw_mr *hwmr)
1748{
1749 int i;
1750 u64 buf_addr = 0;
1751 int num_pbes;
1752 struct ocrdma_pbe *pbe;
1753
1754 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1755 num_pbes = 0;
1756
1757 /* go through the OS phy regions & fill hw pbe entries into pbls. */
1758 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
1759 /* number of pbes can be more for one OS buf, when
1760 * buffers are of different sizes.
1761 * split the ib_buf to one or more pbes.
1762 */
1763 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
1764 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
1765 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
1766 num_pbes += 1;
1767 pbe++;
1768
1769 /* if the pbl is full storing the pbes,
1770 * move to next pbl.
1771 */
1772 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
1773 pbl_tbl++;
1774 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1775 }
1776 }
1777 return;
1778}
1779
1780static int get_encoded_page_size(int pg_sz)
1781{
1782 /* Max size is 256M 4096 << 16 */
1783 int i = 0;
1784 for (; i < 17; i++)
1785 if (pg_sz == (4096 << i))
1786 break;
1787 return i;
1788}
1789
1790
1791static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1792 struct ib_send_wr *wr)
1793{
1794 u64 fbo;
1795 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
1796 struct ocrdma_mr *mr;
1797 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
1798
1799 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
1800
1801 if ((wr->wr.fast_reg.page_list_len >
1802 qp->dev->attr.max_pages_per_frmr) ||
1803 (wr->wr.fast_reg.length > 0xffffffffULL))
1804 return -EINVAL;
1805
1806 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
1807 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1808
1809 if (wr->wr.fast_reg.page_list_len == 0)
1810 BUG();
1811 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
1812 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
1813 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
1814 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
1815 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
1816 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
1817 hdr->lkey = wr->wr.fast_reg.rkey;
1818 hdr->total_len = wr->wr.fast_reg.length;
1819
1820 fbo = wr->wr.fast_reg.iova_start -
1821 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
1822
1823 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
1824 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
1825 fast_reg->fbo_hi = upper_32_bits(fbo);
1826 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
1827 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
1828 fast_reg->size_sge =
1829 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
1830 mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) &
1831 (OCRDMA_MAX_STAG - 1)];
1832 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
1833 return 0;
1834}
1835
fe2caefc
PP
1836static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
1837{
1838 u32 val = qp->sq.dbid | (1 << 16);
1839
1840 iowrite32(val, qp->sq_db);
1841}
1842
1843int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1844 struct ib_send_wr **bad_wr)
1845{
1846 int status = 0;
1847 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1848 struct ocrdma_hdr_wqe *hdr;
1849 unsigned long flags;
1850
1851 spin_lock_irqsave(&qp->q_lock, flags);
1852 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
1853 spin_unlock_irqrestore(&qp->q_lock, flags);
f6ddcf71 1854 *bad_wr = wr;
fe2caefc
PP
1855 return -EINVAL;
1856 }
1857
1858 while (wr) {
1859 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
1860 wr->num_sge > qp->sq.max_sges) {
f6ddcf71 1861 *bad_wr = wr;
fe2caefc
PP
1862 status = -ENOMEM;
1863 break;
1864 }
1865 hdr = ocrdma_hwq_head(&qp->sq);
1866 hdr->cw = 0;
1867 if (wr->send_flags & IB_SEND_SIGNALED)
1868 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1869 if (wr->send_flags & IB_SEND_FENCE)
1870 hdr->cw |=
1871 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
1872 if (wr->send_flags & IB_SEND_SOLICITED)
1873 hdr->cw |=
1874 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
1875 hdr->total_len = 0;
1876 switch (wr->opcode) {
1877 case IB_WR_SEND_WITH_IMM:
1878 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1879 hdr->immdt = ntohl(wr->ex.imm_data);
1880 case IB_WR_SEND:
1881 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1882 ocrdma_build_send(qp, hdr, wr);
1883 break;
1884 case IB_WR_SEND_WITH_INV:
1885 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1886 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1887 hdr->lkey = wr->ex.invalidate_rkey;
1888 status = ocrdma_build_send(qp, hdr, wr);
1889 break;
1890 case IB_WR_RDMA_WRITE_WITH_IMM:
1891 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1892 hdr->immdt = ntohl(wr->ex.imm_data);
1893 case IB_WR_RDMA_WRITE:
1894 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
1895 status = ocrdma_build_write(qp, hdr, wr);
1896 break;
1897 case IB_WR_RDMA_READ_WITH_INV:
1898 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1899 case IB_WR_RDMA_READ:
1900 ocrdma_build_read(qp, hdr, wr);
1901 break;
1902 case IB_WR_LOCAL_INV:
1903 hdr->cw |=
1904 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
7c33880c
NG
1905 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
1906 sizeof(struct ocrdma_sge)) /
fe2caefc
PP
1907 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
1908 hdr->lkey = wr->ex.invalidate_rkey;
1909 break;
7c33880c
NG
1910 case IB_WR_FAST_REG_MR:
1911 status = ocrdma_build_fr(qp, hdr, wr);
1912 break;
fe2caefc
PP
1913 default:
1914 status = -EINVAL;
1915 break;
1916 }
1917 if (status) {
1918 *bad_wr = wr;
1919 break;
1920 }
1921 if (wr->send_flags & IB_SEND_SIGNALED)
1922 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
1923 else
1924 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
1925 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
1926 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
1927 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
1928 /* make sure wqe is written before adapter can access it */
1929 wmb();
1930 /* inform hw to start processing it */
1931 ocrdma_ring_sq_db(qp);
1932
1933 /* update pointer, counter for next wr */
1934 ocrdma_hwq_inc_head(&qp->sq);
1935 wr = wr->next;
1936 }
1937 spin_unlock_irqrestore(&qp->q_lock, flags);
1938 return status;
1939}
1940
1941static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
1942{
df176ea0 1943 u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
fe2caefc 1944
45e86b33
NG
1945 if (qp->state != OCRDMA_QPS_INIT)
1946 iowrite32(val, qp->rq_db);
1947 else
1948 qp->db_cache++;
fe2caefc
PP
1949}
1950
1951static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
1952 u16 tag)
1953{
1954 u32 wqe_size = 0;
1955 struct ocrdma_sge *sge;
1956 if (wr->num_sge)
1957 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
1958 else
1959 wqe_size = sizeof(*sge) + sizeof(*rqe);
1960
1961 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
1962 OCRDMA_WQE_SIZE_SHIFT);
1963 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1964 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1965 rqe->total_len = 0;
1966 rqe->rsvd_tag = tag;
1967 sge = (struct ocrdma_sge *)(rqe + 1);
1968 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
1969 ocrdma_cpu_to_le32(rqe, wqe_size);
1970}
1971
1972int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1973 struct ib_recv_wr **bad_wr)
1974{
1975 int status = 0;
1976 unsigned long flags;
1977 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1978 struct ocrdma_hdr_wqe *rqe;
1979
1980 spin_lock_irqsave(&qp->q_lock, flags);
1981 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
1982 spin_unlock_irqrestore(&qp->q_lock, flags);
1983 *bad_wr = wr;
1984 return -EINVAL;
1985 }
1986 while (wr) {
1987 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
1988 wr->num_sge > qp->rq.max_sges) {
1989 *bad_wr = wr;
1990 status = -ENOMEM;
1991 break;
1992 }
1993 rqe = ocrdma_hwq_head(&qp->rq);
1994 ocrdma_build_rqe(rqe, wr, 0);
1995
1996 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
1997 /* make sure rqe is written before adapter can access it */
1998 wmb();
1999
2000 /* inform hw to start processing it */
2001 ocrdma_ring_rq_db(qp);
2002
2003 /* update pointer, counter for next wr */
2004 ocrdma_hwq_inc_head(&qp->rq);
2005 wr = wr->next;
2006 }
2007 spin_unlock_irqrestore(&qp->q_lock, flags);
2008 return status;
2009}
2010
2011/* cqe for srq's rqe can potentially arrive out of order.
2012 * index gives the entry in the shadow table where to store
2013 * the wr_id. tag/index is returned in cqe to reference back
2014 * for a given rqe.
2015 */
2016static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2017{
2018 int row = 0;
2019 int indx = 0;
2020
2021 for (row = 0; row < srq->bit_fields_len; row++) {
2022 if (srq->idx_bit_fields[row]) {
2023 indx = ffs(srq->idx_bit_fields[row]);
2024 indx = (row * 32) + (indx - 1);
2025 if (indx >= srq->rq.max_cnt)
2026 BUG();
2027 ocrdma_srq_toggle_bit(srq, indx);
2028 break;
2029 }
2030 }
2031
2032 if (row == srq->bit_fields_len)
2033 BUG();
2034 return indx;
2035}
2036
2037static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2038{
2039 u32 val = srq->rq.dbid | (1 << 16);
2040
2041 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2042}
2043
2044int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2045 struct ib_recv_wr **bad_wr)
2046{
2047 int status = 0;
2048 unsigned long flags;
2049 struct ocrdma_srq *srq;
2050 struct ocrdma_hdr_wqe *rqe;
2051 u16 tag;
2052
2053 srq = get_ocrdma_srq(ibsrq);
2054
2055 spin_lock_irqsave(&srq->q_lock, flags);
2056 while (wr) {
2057 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2058 wr->num_sge > srq->rq.max_sges) {
2059 status = -ENOMEM;
2060 *bad_wr = wr;
2061 break;
2062 }
2063 tag = ocrdma_srq_get_idx(srq);
2064 rqe = ocrdma_hwq_head(&srq->rq);
2065 ocrdma_build_rqe(rqe, wr, tag);
2066
2067 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2068 /* make sure rqe is written before adapter can perform DMA */
2069 wmb();
2070 /* inform hw to start processing it */
2071 ocrdma_ring_srq_db(srq);
2072 /* update pointer, counter for next wr */
2073 ocrdma_hwq_inc_head(&srq->rq);
2074 wr = wr->next;
2075 }
2076 spin_unlock_irqrestore(&srq->q_lock, flags);
2077 return status;
2078}
2079
2080static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2081{
f99b1649 2082 enum ib_wc_status ibwc_status;
fe2caefc
PP
2083
2084 switch (status) {
2085 case OCRDMA_CQE_GENERAL_ERR:
2086 ibwc_status = IB_WC_GENERAL_ERR;
2087 break;
2088 case OCRDMA_CQE_LOC_LEN_ERR:
2089 ibwc_status = IB_WC_LOC_LEN_ERR;
2090 break;
2091 case OCRDMA_CQE_LOC_QP_OP_ERR:
2092 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2093 break;
2094 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2095 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2096 break;
2097 case OCRDMA_CQE_LOC_PROT_ERR:
2098 ibwc_status = IB_WC_LOC_PROT_ERR;
2099 break;
2100 case OCRDMA_CQE_WR_FLUSH_ERR:
2101 ibwc_status = IB_WC_WR_FLUSH_ERR;
2102 break;
2103 case OCRDMA_CQE_MW_BIND_ERR:
2104 ibwc_status = IB_WC_MW_BIND_ERR;
2105 break;
2106 case OCRDMA_CQE_BAD_RESP_ERR:
2107 ibwc_status = IB_WC_BAD_RESP_ERR;
2108 break;
2109 case OCRDMA_CQE_LOC_ACCESS_ERR:
2110 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2111 break;
2112 case OCRDMA_CQE_REM_INV_REQ_ERR:
2113 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2114 break;
2115 case OCRDMA_CQE_REM_ACCESS_ERR:
2116 ibwc_status = IB_WC_REM_ACCESS_ERR;
2117 break;
2118 case OCRDMA_CQE_REM_OP_ERR:
2119 ibwc_status = IB_WC_REM_OP_ERR;
2120 break;
2121 case OCRDMA_CQE_RETRY_EXC_ERR:
2122 ibwc_status = IB_WC_RETRY_EXC_ERR;
2123 break;
2124 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2125 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2126 break;
2127 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2128 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2129 break;
2130 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2131 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2132 break;
2133 case OCRDMA_CQE_REM_ABORT_ERR:
2134 ibwc_status = IB_WC_REM_ABORT_ERR;
2135 break;
2136 case OCRDMA_CQE_INV_EECN_ERR:
2137 ibwc_status = IB_WC_INV_EECN_ERR;
2138 break;
2139 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2140 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2141 break;
2142 case OCRDMA_CQE_FATAL_ERR:
2143 ibwc_status = IB_WC_FATAL_ERR;
2144 break;
2145 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2146 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2147 break;
2148 default:
2149 ibwc_status = IB_WC_GENERAL_ERR;
2150 break;
2151 };
2152 return ibwc_status;
2153}
2154
2155static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2156 u32 wqe_idx)
2157{
2158 struct ocrdma_hdr_wqe *hdr;
2159 struct ocrdma_sge *rw;
2160 int opcode;
2161
2162 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2163
2164 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2165 /* Undo the hdr->cw swap */
2166 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2167 switch (opcode) {
2168 case OCRDMA_WRITE:
2169 ibwc->opcode = IB_WC_RDMA_WRITE;
2170 break;
2171 case OCRDMA_READ:
2172 rw = (struct ocrdma_sge *)(hdr + 1);
2173 ibwc->opcode = IB_WC_RDMA_READ;
2174 ibwc->byte_len = rw->len;
2175 break;
2176 case OCRDMA_SEND:
2177 ibwc->opcode = IB_WC_SEND;
2178 break;
7c33880c
NG
2179 case OCRDMA_FR_MR:
2180 ibwc->opcode = IB_WC_FAST_REG_MR;
2181 break;
fe2caefc
PP
2182 case OCRDMA_LKEY_INV:
2183 ibwc->opcode = IB_WC_LOCAL_INV;
2184 break;
2185 default:
2186 ibwc->status = IB_WC_GENERAL_ERR;
ef99c4c2
NG
2187 pr_err("%s() invalid opcode received = 0x%x\n",
2188 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
fe2caefc
PP
2189 break;
2190 };
2191}
2192
2193static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2194 struct ocrdma_cqe *cqe)
2195{
2196 if (is_cqe_for_sq(cqe)) {
2197 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2198 cqe->flags_status_srcqpn) &
2199 ~OCRDMA_CQE_STATUS_MASK);
2200 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2201 cqe->flags_status_srcqpn) |
2202 (OCRDMA_CQE_WR_FLUSH_ERR <<
2203 OCRDMA_CQE_STATUS_SHIFT));
2204 } else {
2205 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2206 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2207 cqe->flags_status_srcqpn) &
2208 ~OCRDMA_CQE_UD_STATUS_MASK);
2209 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2210 cqe->flags_status_srcqpn) |
2211 (OCRDMA_CQE_WR_FLUSH_ERR <<
2212 OCRDMA_CQE_UD_STATUS_SHIFT));
2213 } else {
2214 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2215 cqe->flags_status_srcqpn) &
2216 ~OCRDMA_CQE_STATUS_MASK);
2217 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2218 cqe->flags_status_srcqpn) |
2219 (OCRDMA_CQE_WR_FLUSH_ERR <<
2220 OCRDMA_CQE_STATUS_SHIFT));
2221 }
2222 }
2223}
2224
2225static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2226 struct ocrdma_qp *qp, int status)
2227{
2228 bool expand = false;
2229
2230 ibwc->byte_len = 0;
2231 ibwc->qp = &qp->ibqp;
2232 ibwc->status = ocrdma_to_ibwc_err(status);
2233
2234 ocrdma_flush_qp(qp);
057729cb 2235 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
fe2caefc
PP
2236
2237 /* if wqe/rqe pending for which cqe needs to be returned,
2238 * trigger inflating it.
2239 */
2240 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2241 expand = true;
2242 ocrdma_set_cqe_status_flushed(qp, cqe);
2243 }
2244 return expand;
2245}
2246
2247static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2248 struct ocrdma_qp *qp, int status)
2249{
2250 ibwc->opcode = IB_WC_RECV;
2251 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2252 ocrdma_hwq_inc_tail(&qp->rq);
2253
2254 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2255}
2256
2257static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2258 struct ocrdma_qp *qp, int status)
2259{
2260 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2261 ocrdma_hwq_inc_tail(&qp->sq);
2262
2263 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2264}
2265
2266
2267static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2268 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2269 bool *polled, bool *stop)
2270{
2271 bool expand;
2272 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2273 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2274
2275 /* when hw sq is empty, but rq is not empty, so we continue
2276 * to keep the cqe in order to get the cq event again.
2277 */
2278 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2279 /* when cq for rq and sq is same, it is safe to return
2280 * flush cqe for RQEs.
2281 */
2282 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2283 *polled = true;
2284 status = OCRDMA_CQE_WR_FLUSH_ERR;
2285 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2286 } else {
2287 /* stop processing further cqe as this cqe is used for
2288 * triggering cq event on buddy cq of RQ.
2289 * When QP is destroyed, this cqe will be removed
2290 * from the cq's hardware q.
2291 */
2292 *polled = false;
2293 *stop = true;
2294 expand = false;
2295 }
2296 } else {
2297 *polled = true;
2298 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2299 }
2300 return expand;
2301}
2302
2303static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2304 struct ocrdma_cqe *cqe,
2305 struct ib_wc *ibwc, bool *polled)
2306{
2307 bool expand = false;
2308 int tail = qp->sq.tail;
2309 u32 wqe_idx;
2310
2311 if (!qp->wqe_wr_id_tbl[tail].signaled) {
fe2caefc
PP
2312 *polled = false; /* WC cannot be consumed yet */
2313 } else {
2314 ibwc->status = IB_WC_SUCCESS;
2315 ibwc->wc_flags = 0;
2316 ibwc->qp = &qp->ibqp;
2317 ocrdma_update_wc(qp, ibwc, tail);
2318 *polled = true;
fe2caefc 2319 }
43a6b402
NG
2320 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2321 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
ae3bca90
PP
2322 if (tail != wqe_idx)
2323 expand = true; /* Coalesced CQE can't be consumed yet */
2324
fe2caefc
PP
2325 ocrdma_hwq_inc_tail(&qp->sq);
2326 return expand;
2327}
2328
2329static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2330 struct ib_wc *ibwc, bool *polled, bool *stop)
2331{
2332 int status;
2333 bool expand;
2334
2335 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2336 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2337
2338 if (status == OCRDMA_CQE_SUCCESS)
2339 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2340 else
2341 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2342 return expand;
2343}
2344
2345static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2346{
2347 int status;
2348
2349 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2350 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2351 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2352 OCRDMA_CQE_SRCQP_MASK;
2353 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2354 OCRDMA_CQE_PKEY_MASK;
2355 ibwc->wc_flags = IB_WC_GRH;
2356 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2357 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2358 return status;
2359}
2360
2361static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2362 struct ocrdma_cqe *cqe,
2363 struct ocrdma_qp *qp)
2364{
2365 unsigned long flags;
2366 struct ocrdma_srq *srq;
2367 u32 wqe_idx;
2368
2369 srq = get_ocrdma_srq(qp->ibqp.srq);
43a6b402
NG
2370 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2371 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
fe2caefc
PP
2372 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2373 spin_lock_irqsave(&srq->q_lock, flags);
2374 ocrdma_srq_toggle_bit(srq, wqe_idx);
2375 spin_unlock_irqrestore(&srq->q_lock, flags);
2376 ocrdma_hwq_inc_tail(&srq->rq);
2377}
2378
2379static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2380 struct ib_wc *ibwc, bool *polled, bool *stop,
2381 int status)
2382{
2383 bool expand;
2384
2385 /* when hw_rq is empty, but wq is not empty, so continue
2386 * to keep the cqe to get the cq event again.
2387 */
2388 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2389 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2390 *polled = true;
2391 status = OCRDMA_CQE_WR_FLUSH_ERR;
2392 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2393 } else {
2394 *polled = false;
2395 *stop = true;
2396 expand = false;
2397 }
a3698a9b
PP
2398 } else {
2399 *polled = true;
fe2caefc 2400 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
a3698a9b 2401 }
fe2caefc
PP
2402 return expand;
2403}
2404
2405static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2406 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2407{
2408 ibwc->opcode = IB_WC_RECV;
2409 ibwc->qp = &qp->ibqp;
2410 ibwc->status = IB_WC_SUCCESS;
2411
2412 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2413 ocrdma_update_ud_rcqe(ibwc, cqe);
2414 else
2415 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2416
2417 if (is_cqe_imm(cqe)) {
2418 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2419 ibwc->wc_flags |= IB_WC_WITH_IMM;
2420 } else if (is_cqe_wr_imm(cqe)) {
2421 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2422 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2423 ibwc->wc_flags |= IB_WC_WITH_IMM;
2424 } else if (is_cqe_invalidated(cqe)) {
2425 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2426 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2427 }
f99b1649 2428 if (qp->ibqp.srq) {
fe2caefc 2429 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
f99b1649 2430 } else {
fe2caefc
PP
2431 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2432 ocrdma_hwq_inc_tail(&qp->rq);
2433 }
2434}
2435
2436static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2437 struct ib_wc *ibwc, bool *polled, bool *stop)
2438{
2439 int status;
2440 bool expand = false;
2441
2442 ibwc->wc_flags = 0;
f99b1649 2443 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
fe2caefc
PP
2444 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2445 OCRDMA_CQE_UD_STATUS_MASK) >>
2446 OCRDMA_CQE_UD_STATUS_SHIFT;
f99b1649 2447 } else {
fe2caefc
PP
2448 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2449 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
f99b1649 2450 }
fe2caefc
PP
2451
2452 if (status == OCRDMA_CQE_SUCCESS) {
2453 *polled = true;
2454 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2455 } else {
2456 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2457 status);
2458 }
2459 return expand;
2460}
2461
2462static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2463 u16 cur_getp)
2464{
2465 if (cq->phase_change) {
2466 if (cur_getp == 0)
2467 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
f99b1649 2468 } else {
fe2caefc
PP
2469 /* clear valid bit */
2470 cqe->flags_status_srcqpn = 0;
f99b1649 2471 }
fe2caefc
PP
2472}
2473
2474static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2475 struct ib_wc *ibwc)
2476{
2477 u16 qpn = 0;
2478 int i = 0;
2479 bool expand = false;
2480 int polled_hw_cqes = 0;
2481 struct ocrdma_qp *qp = NULL;
1afc0454 2482 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
fe2caefc
PP
2483 struct ocrdma_cqe *cqe;
2484 u16 cur_getp; bool polled = false; bool stop = false;
2485
2486 cur_getp = cq->getp;
2487 while (num_entries) {
2488 cqe = cq->va + cur_getp;
2489 /* check whether valid cqe or not */
2490 if (!is_cqe_valid(cq, cqe))
2491 break;
2492 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2493 /* ignore discarded cqe */
2494 if (qpn == 0)
2495 goto skip_cqe;
2496 qp = dev->qp_tbl[qpn];
2497 BUG_ON(qp == NULL);
2498
2499 if (is_cqe_for_sq(cqe)) {
2500 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2501 &stop);
2502 } else {
2503 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2504 &stop);
2505 }
2506 if (expand)
2507 goto expand_cqe;
2508 if (stop)
2509 goto stop_cqe;
2510 /* clear qpn to avoid duplicate processing by discard_cqe() */
2511 cqe->cmn.qpn = 0;
2512skip_cqe:
2513 polled_hw_cqes += 1;
2514 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2515 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2516expand_cqe:
2517 if (polled) {
2518 num_entries -= 1;
2519 i += 1;
2520 ibwc = ibwc + 1;
2521 polled = false;
2522 }
2523 }
2524stop_cqe:
2525 cq->getp = cur_getp;
2526 if (polled_hw_cqes || expand || stop) {
2527 ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
2528 polled_hw_cqes);
2529 }
2530 return i;
2531}
2532
2533/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2534static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2535 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2536{
2537 int err_cqes = 0;
2538
2539 while (num_entries) {
2540 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2541 break;
2542 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2543 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2544 ocrdma_hwq_inc_tail(&qp->sq);
2545 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2546 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2547 ocrdma_hwq_inc_tail(&qp->rq);
f99b1649 2548 } else {
fe2caefc 2549 return err_cqes;
f99b1649 2550 }
fe2caefc
PP
2551 ibwc->byte_len = 0;
2552 ibwc->status = IB_WC_WR_FLUSH_ERR;
2553 ibwc = ibwc + 1;
2554 err_cqes += 1;
2555 num_entries -= 1;
2556 }
2557 return err_cqes;
2558}
2559
2560int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2561{
2562 int cqes_to_poll = num_entries;
1afc0454
NG
2563 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2564 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
fe2caefc
PP
2565 int num_os_cqe = 0, err_cqes = 0;
2566 struct ocrdma_qp *qp;
1afc0454 2567 unsigned long flags;
fe2caefc
PP
2568
2569 /* poll cqes from adapter CQ */
2570 spin_lock_irqsave(&cq->cq_lock, flags);
2571 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2572 spin_unlock_irqrestore(&cq->cq_lock, flags);
2573 cqes_to_poll -= num_os_cqe;
2574
2575 if (cqes_to_poll) {
2576 wc = wc + num_os_cqe;
2577 /* adapter returns single error cqe when qp moves to
2578 * error state. So insert error cqes with wc_status as
2579 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2580 * respectively which uses this CQ.
2581 */
2582 spin_lock_irqsave(&dev->flush_q_lock, flags);
2583 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2584 if (cqes_to_poll == 0)
2585 break;
2586 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2587 cqes_to_poll -= err_cqes;
2588 num_os_cqe += err_cqes;
2589 wc = wc + err_cqes;
2590 }
2591 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2592 }
2593 return num_os_cqe;
2594}
2595
2596int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2597{
1afc0454
NG
2598 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2599 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
fe2caefc
PP
2600 u16 cq_id;
2601 u16 cur_getp;
2602 struct ocrdma_cqe *cqe;
1afc0454 2603 unsigned long flags;
fe2caefc 2604
fe2caefc 2605 cq_id = cq->id;
fe2caefc
PP
2606
2607 spin_lock_irqsave(&cq->cq_lock, flags);
2608 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2609 cq->armed = true;
2610 if (cq_flags & IB_CQ_SOLICITED)
2611 cq->solicited = true;
2612
2613 cur_getp = cq->getp;
2614 cqe = cq->va + cur_getp;
2615
2616 /* check whether any valid cqe exist or not, if not then safe to
2617 * arm. If cqe is not yet consumed, then let it get consumed and then
2618 * we arm it to avoid false interrupts.
2619 */
2620 if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
2621 cq->arm_needed = false;
2622 ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
2623 }
2624 spin_unlock_irqrestore(&cq->cq_lock, flags);
2625 return 0;
2626}
7c33880c
NG
2627
2628struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2629{
2630 int status;
2631 struct ocrdma_mr *mr;
2632 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2633 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2634
2635 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2636 return ERR_PTR(-EINVAL);
2637
2638 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2639 if (!mr)
2640 return ERR_PTR(-ENOMEM);
2641
2642 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2643 if (status)
2644 goto pbl_err;
2645 mr->hwmr.fr_mr = 1;
2646 mr->hwmr.remote_rd = 0;
2647 mr->hwmr.remote_wr = 0;
2648 mr->hwmr.local_rd = 0;
2649 mr->hwmr.local_wr = 0;
2650 mr->hwmr.mw_bind = 0;
2651 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2652 if (status)
2653 goto pbl_err;
2654 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2655 if (status)
2656 goto mbx_err;
2657 mr->ibmr.rkey = mr->hwmr.lkey;
2658 mr->ibmr.lkey = mr->hwmr.lkey;
2659 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64) mr;
2660 return &mr->ibmr;
2661mbx_err:
2662 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2663pbl_err:
2664 kfree(mr);
2665 return ERR_PTR(-ENOMEM);
2666}
2667
2668struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
2669 *ibdev,
2670 int page_list_len)
2671{
2672 struct ib_fast_reg_page_list *frmr_list;
2673 int size;
2674
2675 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
2676 frmr_list = kzalloc(size, GFP_KERNEL);
2677 if (!frmr_list)
2678 return ERR_PTR(-ENOMEM);
2679 frmr_list->page_list = (u64 *)(frmr_list + 1);
2680 return frmr_list;
2681}
2682
2683void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
2684{
2685 kfree(page_list);
2686}
This page took 0.201304 seconds and 5 git commands to generate.