Commit | Line | Data |
---|---|---|
fe2caefc PP |
1 | /******************************************************************* |
2 | * This file is part of the Emulex RoCE Device Driver for * | |
3 | * RoCE (RDMA over Converged Ethernet) adapters. * | |
4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | |
5 | * EMULEX and SLI are trademarks of Emulex. * | |
6 | * www.emulex.com * | |
7 | * * | |
8 | * This program is free software; you can redistribute it and/or * | |
9 | * modify it under the terms of version 2 of the GNU General * | |
10 | * Public License as published by the Free Software Foundation. * | |
11 | * This program is distributed in the hope that it will be useful. * | |
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
17 | * more details, a copy of which can be found in the file COPYING * | |
18 | * included with this package. * | |
19 | * | |
20 | * Contact Information: | |
21 | * linux-drivers@emulex.com | |
22 | * | |
23 | * Emulex | |
24 | * 3333 Susan Street | |
25 | * Costa Mesa, CA 92626 | |
26 | *******************************************************************/ | |
27 | ||
28 | #include <linux/dma-mapping.h> | |
29 | #include <rdma/ib_verbs.h> | |
30 | #include <rdma/ib_user_verbs.h> | |
31 | #include <rdma/iw_cm.h> | |
32 | #include <rdma/ib_umem.h> | |
33 | #include <rdma/ib_addr.h> | |
34 | ||
35 | #include "ocrdma.h" | |
36 | #include "ocrdma_hw.h" | |
37 | #include "ocrdma_verbs.h" | |
38 | #include "ocrdma_abi.h" | |
39 | ||
40 | int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |
41 | { | |
42 | if (index > 1) | |
43 | return -EINVAL; | |
44 | ||
45 | *pkey = 0xffff; | |
46 | return 0; | |
47 | } | |
48 | ||
49 | int ocrdma_query_gid(struct ib_device *ibdev, u8 port, | |
50 | int index, union ib_gid *sgid) | |
51 | { | |
52 | struct ocrdma_dev *dev; | |
53 | ||
54 | dev = get_ocrdma_dev(ibdev); | |
55 | memset(sgid, 0, sizeof(*sgid)); | |
fad51b7d | 56 | if (index > OCRDMA_MAX_SGID) |
fe2caefc PP |
57 | return -EINVAL; |
58 | ||
59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); | |
60 | ||
61 | return 0; | |
62 | } | |
63 | ||
64 | int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |
65 | { | |
66 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
67 | ||
68 | memset(attr, 0, sizeof *attr); | |
69 | memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], | |
70 | min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); | |
71 | ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); | |
72 | attr->max_mr_size = ~0ull; | |
73 | attr->page_size_cap = 0xffff000; | |
74 | attr->vendor_id = dev->nic_info.pdev->vendor; | |
75 | attr->vendor_part_id = dev->nic_info.pdev->device; | |
76 | attr->hw_ver = 0; | |
77 | attr->max_qp = dev->attr.max_qp; | |
d3cb6c0b | 78 | attr->max_ah = OCRDMA_MAX_AH; |
fe2caefc PP |
79 | attr->max_qp_wr = dev->attr.max_wqe; |
80 | ||
81 | attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | | |
82 | IB_DEVICE_RC_RNR_NAK_GEN | | |
83 | IB_DEVICE_SHUTDOWN_PORT | | |
84 | IB_DEVICE_SYS_IMAGE_GUID | | |
2b51a9b9 NG |
85 | IB_DEVICE_LOCAL_DMA_LKEY | |
86 | IB_DEVICE_MEM_MGT_EXTENSIONS; | |
634c5796 | 87 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); |
c43e9ab8 | 88 | attr->max_sge_rd = 0; |
fe2caefc PP |
89 | attr->max_cq = dev->attr.max_cq; |
90 | attr->max_cqe = dev->attr.max_cqe; | |
91 | attr->max_mr = dev->attr.max_mr; | |
ac578aef | 92 | attr->max_mw = dev->attr.max_mw; |
fe2caefc PP |
93 | attr->max_pd = dev->attr.max_pd; |
94 | attr->atomic_cap = 0; | |
95 | attr->max_fmr = 0; | |
96 | attr->max_map_per_fmr = 0; | |
97 | attr->max_qp_rd_atom = | |
98 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); | |
99 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; | |
7c33880c | 100 | attr->max_srq = dev->attr.max_srq; |
d1e09ebf | 101 | attr->max_srq_sge = dev->attr.max_srq_sge; |
fe2caefc PP |
102 | attr->max_srq_wr = dev->attr.max_rqe; |
103 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | |
104 | attr->max_fast_reg_page_list_len = 0; | |
105 | attr->max_pkeys = 1; | |
106 | return 0; | |
107 | } | |
108 | ||
f24ceba6 NG |
109 | static inline void get_link_speed_and_width(struct ocrdma_dev *dev, |
110 | u8 *ib_speed, u8 *ib_width) | |
111 | { | |
112 | int status; | |
113 | u8 speed; | |
114 | ||
115 | status = ocrdma_mbx_get_link_speed(dev, &speed); | |
116 | if (status) | |
117 | speed = OCRDMA_PHYS_LINK_SPEED_ZERO; | |
118 | ||
119 | switch (speed) { | |
120 | case OCRDMA_PHYS_LINK_SPEED_1GBPS: | |
121 | *ib_speed = IB_SPEED_SDR; | |
122 | *ib_width = IB_WIDTH_1X; | |
123 | break; | |
124 | ||
125 | case OCRDMA_PHYS_LINK_SPEED_10GBPS: | |
126 | *ib_speed = IB_SPEED_QDR; | |
127 | *ib_width = IB_WIDTH_1X; | |
128 | break; | |
129 | ||
130 | case OCRDMA_PHYS_LINK_SPEED_20GBPS: | |
131 | *ib_speed = IB_SPEED_DDR; | |
132 | *ib_width = IB_WIDTH_4X; | |
133 | break; | |
134 | ||
135 | case OCRDMA_PHYS_LINK_SPEED_40GBPS: | |
136 | *ib_speed = IB_SPEED_QDR; | |
137 | *ib_width = IB_WIDTH_4X; | |
138 | break; | |
139 | ||
140 | default: | |
141 | /* Unsupported */ | |
142 | *ib_speed = IB_SPEED_SDR; | |
143 | *ib_width = IB_WIDTH_1X; | |
2b50176d | 144 | } |
f24ceba6 NG |
145 | } |
146 | ||
fe2caefc PP |
147 | int ocrdma_query_port(struct ib_device *ibdev, |
148 | u8 port, struct ib_port_attr *props) | |
149 | { | |
150 | enum ib_port_state port_state; | |
151 | struct ocrdma_dev *dev; | |
152 | struct net_device *netdev; | |
153 | ||
154 | dev = get_ocrdma_dev(ibdev); | |
155 | if (port > 1) { | |
ef99c4c2 NG |
156 | pr_err("%s(%d) invalid_port=0x%x\n", __func__, |
157 | dev->id, port); | |
fe2caefc PP |
158 | return -EINVAL; |
159 | } | |
160 | netdev = dev->nic_info.netdev; | |
161 | if (netif_running(netdev) && netif_oper_up(netdev)) { | |
162 | port_state = IB_PORT_ACTIVE; | |
163 | props->phys_state = 5; | |
164 | } else { | |
165 | port_state = IB_PORT_DOWN; | |
166 | props->phys_state = 3; | |
167 | } | |
168 | props->max_mtu = IB_MTU_4096; | |
169 | props->active_mtu = iboe_get_mtu(netdev->mtu); | |
170 | props->lid = 0; | |
171 | props->lmc = 0; | |
172 | props->sm_lid = 0; | |
173 | props->sm_sl = 0; | |
174 | props->state = port_state; | |
175 | props->port_cap_flags = | |
176 | IB_PORT_CM_SUP | | |
177 | IB_PORT_REINIT_SUP | | |
b4a26a27 | 178 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; |
fe2caefc PP |
179 | props->gid_tbl_len = OCRDMA_MAX_SGID; |
180 | props->pkey_tbl_len = 1; | |
181 | props->bad_pkey_cntr = 0; | |
182 | props->qkey_viol_cntr = 0; | |
f24ceba6 NG |
183 | get_link_speed_and_width(dev, &props->active_speed, |
184 | &props->active_width); | |
fe2caefc PP |
185 | props->max_msg_sz = 0x80000000; |
186 | props->max_vl_num = 4; | |
187 | return 0; | |
188 | } | |
189 | ||
190 | int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, | |
191 | struct ib_port_modify *props) | |
192 | { | |
193 | struct ocrdma_dev *dev; | |
194 | ||
195 | dev = get_ocrdma_dev(ibdev); | |
196 | if (port > 1) { | |
ef99c4c2 | 197 | pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); |
fe2caefc PP |
198 | return -EINVAL; |
199 | } | |
200 | return 0; | |
201 | } | |
202 | ||
203 | static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
204 | unsigned long len) | |
205 | { | |
206 | struct ocrdma_mm *mm; | |
207 | ||
208 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | |
209 | if (mm == NULL) | |
210 | return -ENOMEM; | |
211 | mm->key.phy_addr = phy_addr; | |
212 | mm->key.len = len; | |
213 | INIT_LIST_HEAD(&mm->entry); | |
214 | ||
215 | mutex_lock(&uctx->mm_list_lock); | |
216 | list_add_tail(&mm->entry, &uctx->mm_head); | |
217 | mutex_unlock(&uctx->mm_list_lock); | |
218 | return 0; | |
219 | } | |
220 | ||
221 | static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
222 | unsigned long len) | |
223 | { | |
224 | struct ocrdma_mm *mm, *tmp; | |
225 | ||
226 | mutex_lock(&uctx->mm_list_lock); | |
227 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | |
43a6b402 | 228 | if (len != mm->key.len && phy_addr != mm->key.phy_addr) |
fe2caefc PP |
229 | continue; |
230 | ||
231 | list_del(&mm->entry); | |
232 | kfree(mm); | |
233 | break; | |
234 | } | |
235 | mutex_unlock(&uctx->mm_list_lock); | |
236 | } | |
237 | ||
238 | static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
239 | unsigned long len) | |
240 | { | |
241 | bool found = false; | |
242 | struct ocrdma_mm *mm; | |
243 | ||
244 | mutex_lock(&uctx->mm_list_lock); | |
245 | list_for_each_entry(mm, &uctx->mm_head, entry) { | |
43a6b402 | 246 | if (len != mm->key.len && phy_addr != mm->key.phy_addr) |
fe2caefc PP |
247 | continue; |
248 | ||
249 | found = true; | |
250 | break; | |
251 | } | |
252 | mutex_unlock(&uctx->mm_list_lock); | |
253 | return found; | |
254 | } | |
255 | ||
cffce990 NG |
256 | static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, |
257 | struct ocrdma_ucontext *uctx, | |
258 | struct ib_udata *udata) | |
259 | { | |
260 | struct ocrdma_pd *pd = NULL; | |
261 | int status = 0; | |
262 | ||
263 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | |
264 | if (!pd) | |
265 | return ERR_PTR(-ENOMEM); | |
266 | ||
267 | if (udata && uctx) { | |
268 | pd->dpp_enabled = | |
21c3391a | 269 | ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; |
cffce990 NG |
270 | pd->num_dpp_qp = |
271 | pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; | |
272 | } | |
273 | ||
274 | retry: | |
275 | status = ocrdma_mbx_alloc_pd(dev, pd); | |
276 | if (status) { | |
277 | if (pd->dpp_enabled) { | |
278 | pd->dpp_enabled = false; | |
279 | pd->num_dpp_qp = 0; | |
280 | goto retry; | |
281 | } else { | |
282 | kfree(pd); | |
283 | return ERR_PTR(status); | |
284 | } | |
285 | } | |
286 | ||
287 | return pd; | |
288 | } | |
289 | ||
290 | static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, | |
291 | struct ocrdma_pd *pd) | |
292 | { | |
293 | return (uctx->cntxt_pd == pd ? true : false); | |
294 | } | |
295 | ||
296 | static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, | |
297 | struct ocrdma_pd *pd) | |
298 | { | |
299 | int status = 0; | |
300 | ||
301 | status = ocrdma_mbx_dealloc_pd(dev, pd); | |
302 | kfree(pd); | |
303 | return status; | |
304 | } | |
305 | ||
306 | static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, | |
307 | struct ocrdma_ucontext *uctx, | |
308 | struct ib_udata *udata) | |
309 | { | |
310 | int status = 0; | |
311 | ||
312 | uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); | |
313 | if (IS_ERR(uctx->cntxt_pd)) { | |
314 | status = PTR_ERR(uctx->cntxt_pd); | |
315 | uctx->cntxt_pd = NULL; | |
316 | goto err; | |
317 | } | |
318 | ||
319 | uctx->cntxt_pd->uctx = uctx; | |
320 | uctx->cntxt_pd->ibpd.device = &dev->ibdev; | |
321 | err: | |
322 | return status; | |
323 | } | |
324 | ||
325 | static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) | |
326 | { | |
327 | int status = 0; | |
328 | struct ocrdma_pd *pd = uctx->cntxt_pd; | |
329 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); | |
330 | ||
331 | BUG_ON(uctx->pd_in_use); | |
332 | uctx->cntxt_pd = NULL; | |
333 | status = _ocrdma_dealloc_pd(dev, pd); | |
334 | return status; | |
335 | } | |
336 | ||
337 | static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) | |
338 | { | |
339 | struct ocrdma_pd *pd = NULL; | |
340 | ||
341 | mutex_lock(&uctx->mm_list_lock); | |
342 | if (!uctx->pd_in_use) { | |
343 | uctx->pd_in_use = true; | |
344 | pd = uctx->cntxt_pd; | |
345 | } | |
346 | mutex_unlock(&uctx->mm_list_lock); | |
347 | ||
348 | return pd; | |
349 | } | |
350 | ||
351 | static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) | |
352 | { | |
353 | mutex_lock(&uctx->mm_list_lock); | |
354 | uctx->pd_in_use = false; | |
355 | mutex_unlock(&uctx->mm_list_lock); | |
356 | } | |
357 | ||
fe2caefc PP |
358 | struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, |
359 | struct ib_udata *udata) | |
360 | { | |
361 | int status; | |
362 | struct ocrdma_ucontext *ctx; | |
363 | struct ocrdma_alloc_ucontext_resp resp; | |
364 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
365 | struct pci_dev *pdev = dev->nic_info.pdev; | |
366 | u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); | |
367 | ||
368 | if (!udata) | |
369 | return ERR_PTR(-EFAULT); | |
370 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
371 | if (!ctx) | |
372 | return ERR_PTR(-ENOMEM); | |
fe2caefc PP |
373 | INIT_LIST_HEAD(&ctx->mm_head); |
374 | mutex_init(&ctx->mm_list_lock); | |
375 | ||
376 | ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, | |
377 | &ctx->ah_tbl.pa, GFP_KERNEL); | |
378 | if (!ctx->ah_tbl.va) { | |
379 | kfree(ctx); | |
380 | return ERR_PTR(-ENOMEM); | |
381 | } | |
382 | memset(ctx->ah_tbl.va, 0, map_len); | |
383 | ctx->ah_tbl.len = map_len; | |
384 | ||
63ea3749 | 385 | memset(&resp, 0, sizeof(resp)); |
fe2caefc PP |
386 | resp.ah_tbl_len = ctx->ah_tbl.len; |
387 | resp.ah_tbl_page = ctx->ah_tbl.pa; | |
388 | ||
389 | status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); | |
390 | if (status) | |
391 | goto map_err; | |
cffce990 NG |
392 | |
393 | status = ocrdma_alloc_ucontext_pd(dev, ctx, udata); | |
394 | if (status) | |
395 | goto pd_err; | |
396 | ||
fe2caefc PP |
397 | resp.dev_id = dev->id; |
398 | resp.max_inline_data = dev->attr.max_inline_data; | |
399 | resp.wqe_size = dev->attr.wqe_size; | |
400 | resp.rqe_size = dev->attr.rqe_size; | |
401 | resp.dpp_wqe_size = dev->attr.wqe_size; | |
fe2caefc PP |
402 | |
403 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); | |
404 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); | |
405 | if (status) | |
406 | goto cpy_err; | |
407 | return &ctx->ibucontext; | |
408 | ||
409 | cpy_err: | |
cffce990 | 410 | pd_err: |
fe2caefc PP |
411 | ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); |
412 | map_err: | |
413 | dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, | |
414 | ctx->ah_tbl.pa); | |
415 | kfree(ctx); | |
416 | return ERR_PTR(status); | |
417 | } | |
418 | ||
419 | int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) | |
420 | { | |
cffce990 | 421 | int status = 0; |
fe2caefc PP |
422 | struct ocrdma_mm *mm, *tmp; |
423 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); | |
1afc0454 NG |
424 | struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); |
425 | struct pci_dev *pdev = dev->nic_info.pdev; | |
fe2caefc | 426 | |
cffce990 NG |
427 | status = ocrdma_dealloc_ucontext_pd(uctx); |
428 | ||
fe2caefc PP |
429 | ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); |
430 | dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, | |
431 | uctx->ah_tbl.pa); | |
432 | ||
433 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | |
434 | list_del(&mm->entry); | |
435 | kfree(mm); | |
436 | } | |
437 | kfree(uctx); | |
cffce990 | 438 | return status; |
fe2caefc PP |
439 | } |
440 | ||
441 | int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |
442 | { | |
443 | struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); | |
1afc0454 | 444 | struct ocrdma_dev *dev = get_ocrdma_dev(context->device); |
fe2caefc PP |
445 | unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; |
446 | u64 unmapped_db = (u64) dev->nic_info.unmapped_db; | |
447 | unsigned long len = (vma->vm_end - vma->vm_start); | |
448 | int status = 0; | |
449 | bool found; | |
450 | ||
451 | if (vma->vm_start & (PAGE_SIZE - 1)) | |
452 | return -EINVAL; | |
453 | found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); | |
454 | if (!found) | |
455 | return -EINVAL; | |
456 | ||
457 | if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + | |
458 | dev->nic_info.db_total_size)) && | |
459 | (len <= dev->nic_info.db_page_size)) { | |
43a6b402 NG |
460 | if (vma->vm_flags & VM_READ) |
461 | return -EPERM; | |
462 | ||
463 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
fe2caefc PP |
464 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
465 | len, vma->vm_page_prot); | |
466 | } else if (dev->nic_info.dpp_unmapped_len && | |
467 | (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && | |
468 | (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + | |
469 | dev->nic_info.dpp_unmapped_len)) && | |
470 | (len <= dev->nic_info.dpp_unmapped_len)) { | |
43a6b402 NG |
471 | if (vma->vm_flags & VM_READ) |
472 | return -EPERM; | |
473 | ||
fe2caefc PP |
474 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
475 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | |
476 | len, vma->vm_page_prot); | |
477 | } else { | |
fe2caefc PP |
478 | status = remap_pfn_range(vma, vma->vm_start, |
479 | vma->vm_pgoff, len, vma->vm_page_prot); | |
480 | } | |
481 | return status; | |
482 | } | |
483 | ||
45e86b33 | 484 | static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, |
fe2caefc PP |
485 | struct ib_ucontext *ib_ctx, |
486 | struct ib_udata *udata) | |
487 | { | |
488 | int status; | |
489 | u64 db_page_addr; | |
da496438 | 490 | u64 dpp_page_addr = 0; |
fe2caefc PP |
491 | u32 db_page_size; |
492 | struct ocrdma_alloc_pd_uresp rsp; | |
493 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); | |
494 | ||
63ea3749 | 495 | memset(&rsp, 0, sizeof(rsp)); |
fe2caefc PP |
496 | rsp.id = pd->id; |
497 | rsp.dpp_enabled = pd->dpp_enabled; | |
cffce990 | 498 | db_page_addr = ocrdma_get_db_addr(dev, pd->id); |
f99b1649 | 499 | db_page_size = dev->nic_info.db_page_size; |
fe2caefc PP |
500 | |
501 | status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); | |
502 | if (status) | |
503 | return status; | |
504 | ||
505 | if (pd->dpp_enabled) { | |
f99b1649 | 506 | dpp_page_addr = dev->nic_info.dpp_unmapped_addr + |
43a6b402 | 507 | (pd->id * PAGE_SIZE); |
fe2caefc | 508 | status = ocrdma_add_mmap(uctx, dpp_page_addr, |
43a6b402 | 509 | PAGE_SIZE); |
fe2caefc PP |
510 | if (status) |
511 | goto dpp_map_err; | |
512 | rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); | |
513 | rsp.dpp_page_addr_lo = dpp_page_addr; | |
514 | } | |
515 | ||
516 | status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); | |
517 | if (status) | |
518 | goto ucopy_err; | |
519 | ||
520 | pd->uctx = uctx; | |
521 | return 0; | |
522 | ||
523 | ucopy_err: | |
da496438 | 524 | if (pd->dpp_enabled) |
43a6b402 | 525 | ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); |
fe2caefc PP |
526 | dpp_map_err: |
527 | ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); | |
528 | return status; | |
529 | } | |
530 | ||
531 | struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, | |
532 | struct ib_ucontext *context, | |
533 | struct ib_udata *udata) | |
534 | { | |
535 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
536 | struct ocrdma_pd *pd; | |
cffce990 | 537 | struct ocrdma_ucontext *uctx = NULL; |
fe2caefc | 538 | int status; |
cffce990 | 539 | u8 is_uctx_pd = false; |
fe2caefc | 540 | |
fe2caefc | 541 | if (udata && context) { |
cffce990 NG |
542 | uctx = get_ocrdma_ucontext(context); |
543 | pd = ocrdma_get_ucontext_pd(uctx); | |
544 | if (pd) { | |
545 | is_uctx_pd = true; | |
546 | goto pd_mapping; | |
43a6b402 | 547 | } |
fe2caefc | 548 | } |
fe2caefc | 549 | |
cffce990 NG |
550 | pd = _ocrdma_alloc_pd(dev, uctx, udata); |
551 | if (IS_ERR(pd)) { | |
552 | status = PTR_ERR(pd); | |
553 | goto exit; | |
554 | } | |
555 | ||
556 | pd_mapping: | |
fe2caefc | 557 | if (udata && context) { |
45e86b33 | 558 | status = ocrdma_copy_pd_uresp(dev, pd, context, udata); |
fe2caefc PP |
559 | if (status) |
560 | goto err; | |
561 | } | |
562 | return &pd->ibpd; | |
563 | ||
564 | err: | |
cffce990 NG |
565 | if (is_uctx_pd) { |
566 | ocrdma_release_ucontext_pd(uctx); | |
567 | } else { | |
568 | status = ocrdma_mbx_dealloc_pd(dev, pd); | |
569 | kfree(pd); | |
570 | } | |
571 | exit: | |
fe2caefc PP |
572 | return ERR_PTR(status); |
573 | } | |
574 | ||
575 | int ocrdma_dealloc_pd(struct ib_pd *ibpd) | |
576 | { | |
577 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
f99b1649 | 578 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
cffce990 NG |
579 | struct ocrdma_ucontext *uctx = NULL; |
580 | int status = 0; | |
fe2caefc PP |
581 | u64 usr_db; |
582 | ||
cffce990 NG |
583 | uctx = pd->uctx; |
584 | if (uctx) { | |
fe2caefc | 585 | u64 dpp_db = dev->nic_info.dpp_unmapped_addr + |
cffce990 | 586 | (pd->id * PAGE_SIZE); |
fe2caefc | 587 | if (pd->dpp_enabled) |
43a6b402 | 588 | ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); |
cffce990 | 589 | usr_db = ocrdma_get_db_addr(dev, pd->id); |
fe2caefc | 590 | ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); |
cffce990 NG |
591 | |
592 | if (is_ucontext_pd(uctx, pd)) { | |
593 | ocrdma_release_ucontext_pd(uctx); | |
594 | return status; | |
595 | } | |
fe2caefc | 596 | } |
cffce990 | 597 | status = _ocrdma_dealloc_pd(dev, pd); |
fe2caefc PP |
598 | return status; |
599 | } | |
600 | ||
1afc0454 NG |
601 | static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, |
602 | u32 pdid, int acc, u32 num_pbls, u32 addr_check) | |
fe2caefc PP |
603 | { |
604 | int status; | |
fe2caefc | 605 | |
fe2caefc PP |
606 | mr->hwmr.fr_mr = 0; |
607 | mr->hwmr.local_rd = 1; | |
608 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
609 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
610 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
611 | mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; | |
612 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
613 | mr->hwmr.num_pbls = num_pbls; | |
614 | ||
f99b1649 NG |
615 | status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); |
616 | if (status) | |
617 | return status; | |
618 | ||
fe2caefc PP |
619 | mr->ibmr.lkey = mr->hwmr.lkey; |
620 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
621 | mr->ibmr.rkey = mr->hwmr.lkey; | |
f99b1649 | 622 | return 0; |
fe2caefc PP |
623 | } |
624 | ||
625 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) | |
626 | { | |
f99b1649 | 627 | int status; |
fe2caefc | 628 | struct ocrdma_mr *mr; |
f99b1649 NG |
629 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
630 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | |
631 | ||
632 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { | |
633 | pr_err("%s err, invalid access rights\n", __func__); | |
634 | return ERR_PTR(-EINVAL); | |
635 | } | |
fe2caefc | 636 | |
f99b1649 NG |
637 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
638 | if (!mr) | |
639 | return ERR_PTR(-ENOMEM); | |
640 | ||
1afc0454 | 641 | status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, |
f99b1649 NG |
642 | OCRDMA_ADDR_CHECK_DISABLE); |
643 | if (status) { | |
644 | kfree(mr); | |
645 | return ERR_PTR(status); | |
646 | } | |
fe2caefc PP |
647 | |
648 | return &mr->ibmr; | |
649 | } | |
650 | ||
651 | static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, | |
652 | struct ocrdma_hw_mr *mr) | |
653 | { | |
654 | struct pci_dev *pdev = dev->nic_info.pdev; | |
655 | int i = 0; | |
656 | ||
657 | if (mr->pbl_table) { | |
658 | for (i = 0; i < mr->num_pbls; i++) { | |
659 | if (!mr->pbl_table[i].va) | |
660 | continue; | |
661 | dma_free_coherent(&pdev->dev, mr->pbl_size, | |
662 | mr->pbl_table[i].va, | |
663 | mr->pbl_table[i].pa); | |
664 | } | |
665 | kfree(mr->pbl_table); | |
666 | mr->pbl_table = NULL; | |
667 | } | |
668 | } | |
669 | ||
1afc0454 NG |
670 | static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr, |
671 | u32 num_pbes) | |
fe2caefc PP |
672 | { |
673 | u32 num_pbls = 0; | |
674 | u32 idx = 0; | |
675 | int status = 0; | |
676 | u32 pbl_size; | |
677 | ||
678 | do { | |
679 | pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); | |
680 | if (pbl_size > MAX_OCRDMA_PBL_SIZE) { | |
681 | status = -EFAULT; | |
682 | break; | |
683 | } | |
684 | num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); | |
685 | num_pbls = num_pbls / (pbl_size / sizeof(u64)); | |
686 | idx++; | |
1afc0454 | 687 | } while (num_pbls >= dev->attr.max_num_mr_pbl); |
fe2caefc PP |
688 | |
689 | mr->hwmr.num_pbes = num_pbes; | |
690 | mr->hwmr.num_pbls = num_pbls; | |
691 | mr->hwmr.pbl_size = pbl_size; | |
692 | return status; | |
693 | } | |
694 | ||
695 | static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) | |
696 | { | |
697 | int status = 0; | |
698 | int i; | |
699 | u32 dma_len = mr->pbl_size; | |
700 | struct pci_dev *pdev = dev->nic_info.pdev; | |
701 | void *va; | |
702 | dma_addr_t pa; | |
703 | ||
704 | mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * | |
705 | mr->num_pbls, GFP_KERNEL); | |
706 | ||
707 | if (!mr->pbl_table) | |
708 | return -ENOMEM; | |
709 | ||
710 | for (i = 0; i < mr->num_pbls; i++) { | |
711 | va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | |
712 | if (!va) { | |
713 | ocrdma_free_mr_pbl_tbl(dev, mr); | |
714 | status = -ENOMEM; | |
715 | break; | |
716 | } | |
717 | memset(va, 0, dma_len); | |
718 | mr->pbl_table[i].va = va; | |
719 | mr->pbl_table[i].pa = pa; | |
720 | } | |
721 | return status; | |
722 | } | |
723 | ||
724 | static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, | |
725 | u32 num_pbes) | |
726 | { | |
727 | struct ocrdma_pbe *pbe; | |
728 | struct ib_umem_chunk *chunk; | |
729 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; | |
730 | struct ib_umem *umem = mr->umem; | |
731 | int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; | |
732 | ||
733 | if (!mr->hwmr.num_pbes) | |
734 | return; | |
735 | ||
736 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
737 | pbe_cnt = 0; | |
738 | ||
739 | shift = ilog2(umem->page_size); | |
740 | ||
741 | list_for_each_entry(chunk, &umem->chunk_list, list) { | |
742 | /* get all the dma regions from the chunk. */ | |
743 | for (i = 0; i < chunk->nmap; i++) { | |
744 | pages = sg_dma_len(&chunk->page_list[i]) >> shift; | |
745 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | |
746 | /* store the page address in pbe */ | |
747 | pbe->pa_lo = | |
748 | cpu_to_le32(sg_dma_address | |
749 | (&chunk->page_list[i]) + | |
750 | (umem->page_size * pg_cnt)); | |
751 | pbe->pa_hi = | |
752 | cpu_to_le32(upper_32_bits | |
753 | ((sg_dma_address | |
754 | (&chunk->page_list[i]) + | |
755 | umem->page_size * pg_cnt))); | |
756 | pbe_cnt += 1; | |
757 | total_num_pbes += 1; | |
758 | pbe++; | |
759 | ||
760 | /* if done building pbes, issue the mbx cmd. */ | |
761 | if (total_num_pbes == num_pbes) | |
762 | return; | |
763 | ||
764 | /* if the given pbl is full storing the pbes, | |
765 | * move to next pbl. | |
766 | */ | |
767 | if (pbe_cnt == | |
768 | (mr->hwmr.pbl_size / sizeof(u64))) { | |
769 | pbl_tbl++; | |
770 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
771 | pbe_cnt = 0; | |
772 | } | |
773 | } | |
774 | } | |
775 | } | |
776 | } | |
777 | ||
778 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | |
779 | u64 usr_addr, int acc, struct ib_udata *udata) | |
780 | { | |
781 | int status = -ENOMEM; | |
f99b1649 | 782 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
fe2caefc PP |
783 | struct ocrdma_mr *mr; |
784 | struct ocrdma_pd *pd; | |
fe2caefc PP |
785 | u32 num_pbes; |
786 | ||
787 | pd = get_ocrdma_pd(ibpd); | |
fe2caefc PP |
788 | |
789 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) | |
790 | return ERR_PTR(-EINVAL); | |
791 | ||
792 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
793 | if (!mr) | |
794 | return ERR_PTR(status); | |
fe2caefc PP |
795 | mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); |
796 | if (IS_ERR(mr->umem)) { | |
797 | status = -EFAULT; | |
798 | goto umem_err; | |
799 | } | |
800 | num_pbes = ib_umem_page_count(mr->umem); | |
1afc0454 | 801 | status = ocrdma_get_pbl_info(dev, mr, num_pbes); |
fe2caefc PP |
802 | if (status) |
803 | goto umem_err; | |
804 | ||
805 | mr->hwmr.pbe_size = mr->umem->page_size; | |
806 | mr->hwmr.fbo = mr->umem->offset; | |
807 | mr->hwmr.va = usr_addr; | |
808 | mr->hwmr.len = len; | |
809 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
810 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
811 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
812 | mr->hwmr.local_rd = 1; | |
813 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
814 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
815 | if (status) | |
816 | goto umem_err; | |
817 | build_user_pbes(dev, mr, num_pbes); | |
818 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); | |
819 | if (status) | |
820 | goto mbx_err; | |
fe2caefc PP |
821 | mr->ibmr.lkey = mr->hwmr.lkey; |
822 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
823 | mr->ibmr.rkey = mr->hwmr.lkey; | |
824 | ||
825 | return &mr->ibmr; | |
826 | ||
827 | mbx_err: | |
828 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
829 | umem_err: | |
830 | kfree(mr); | |
831 | return ERR_PTR(status); | |
832 | } | |
833 | ||
834 | int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |
835 | { | |
836 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); | |
1afc0454 | 837 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); |
fe2caefc PP |
838 | int status; |
839 | ||
840 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); | |
841 | ||
9d1878a3 | 842 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); |
fe2caefc | 843 | |
fe2caefc PP |
844 | /* it could be user registered memory. */ |
845 | if (mr->umem) | |
846 | ib_umem_release(mr->umem); | |
847 | kfree(mr); | |
848 | return status; | |
849 | } | |
850 | ||
1afc0454 NG |
851 | static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, |
852 | struct ib_udata *udata, | |
fe2caefc PP |
853 | struct ib_ucontext *ib_ctx) |
854 | { | |
855 | int status; | |
cffce990 | 856 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); |
fe2caefc PP |
857 | struct ocrdma_create_cq_uresp uresp; |
858 | ||
63ea3749 | 859 | memset(&uresp, 0, sizeof(uresp)); |
fe2caefc | 860 | uresp.cq_id = cq->id; |
43a6b402 | 861 | uresp.page_size = PAGE_ALIGN(cq->len); |
fe2caefc PP |
862 | uresp.num_pages = 1; |
863 | uresp.max_hw_cqe = cq->max_hw_cqe; | |
864 | uresp.page_addr[0] = cq->pa; | |
cffce990 | 865 | uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); |
1afc0454 | 866 | uresp.db_page_size = dev->nic_info.db_page_size; |
fe2caefc PP |
867 | uresp.phase_change = cq->phase_change ? 1 : 0; |
868 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
869 | if (status) { | |
ef99c4c2 | 870 | pr_err("%s(%d) copy error cqid=0x%x.\n", |
1afc0454 | 871 | __func__, dev->id, cq->id); |
fe2caefc PP |
872 | goto err; |
873 | } | |
fe2caefc PP |
874 | status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); |
875 | if (status) | |
876 | goto err; | |
877 | status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); | |
878 | if (status) { | |
879 | ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); | |
880 | goto err; | |
881 | } | |
882 | cq->ucontext = uctx; | |
883 | err: | |
884 | return status; | |
885 | } | |
886 | ||
887 | struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | |
888 | struct ib_ucontext *ib_ctx, | |
889 | struct ib_udata *udata) | |
890 | { | |
891 | struct ocrdma_cq *cq; | |
892 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
cffce990 NG |
893 | struct ocrdma_ucontext *uctx = NULL; |
894 | u16 pd_id = 0; | |
fe2caefc PP |
895 | int status; |
896 | struct ocrdma_create_cq_ureq ureq; | |
897 | ||
898 | if (udata) { | |
899 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | |
900 | return ERR_PTR(-EFAULT); | |
901 | } else | |
902 | ureq.dpp_cq = 0; | |
903 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | |
904 | if (!cq) | |
905 | return ERR_PTR(-ENOMEM); | |
906 | ||
907 | spin_lock_init(&cq->cq_lock); | |
908 | spin_lock_init(&cq->comp_handler_lock); | |
fe2caefc PP |
909 | INIT_LIST_HEAD(&cq->sq_head); |
910 | INIT_LIST_HEAD(&cq->rq_head); | |
ea617626 | 911 | cq->first_arm = true; |
fe2caefc | 912 | |
cffce990 NG |
913 | if (ib_ctx) { |
914 | uctx = get_ocrdma_ucontext(ib_ctx); | |
915 | pd_id = uctx->cntxt_pd->id; | |
916 | } | |
917 | ||
918 | status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); | |
fe2caefc PP |
919 | if (status) { |
920 | kfree(cq); | |
921 | return ERR_PTR(status); | |
922 | } | |
923 | if (ib_ctx) { | |
1afc0454 | 924 | status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); |
fe2caefc PP |
925 | if (status) |
926 | goto ctx_err; | |
927 | } | |
928 | cq->phase = OCRDMA_CQE_VALID; | |
fe2caefc | 929 | dev->cq_tbl[cq->id] = cq; |
fe2caefc PP |
930 | return &cq->ibcq; |
931 | ||
932 | ctx_err: | |
933 | ocrdma_mbx_destroy_cq(dev, cq); | |
934 | kfree(cq); | |
935 | return ERR_PTR(status); | |
936 | } | |
937 | ||
938 | int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, | |
939 | struct ib_udata *udata) | |
940 | { | |
941 | int status = 0; | |
942 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | |
943 | ||
944 | if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { | |
945 | status = -EINVAL; | |
946 | return status; | |
947 | } | |
948 | ibcq->cqe = new_cnt; | |
949 | return status; | |
950 | } | |
951 | ||
ea617626 DS |
952 | static void ocrdma_flush_cq(struct ocrdma_cq *cq) |
953 | { | |
954 | int cqe_cnt; | |
955 | int valid_count = 0; | |
956 | unsigned long flags; | |
957 | ||
958 | struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); | |
959 | struct ocrdma_cqe *cqe = NULL; | |
960 | ||
961 | cqe = cq->va; | |
962 | cqe_cnt = cq->cqe_cnt; | |
963 | ||
964 | /* Last irq might have scheduled a polling thread | |
965 | * sync-up with it before hard flushing. | |
966 | */ | |
967 | spin_lock_irqsave(&cq->cq_lock, flags); | |
968 | while (cqe_cnt) { | |
969 | if (is_cqe_valid(cq, cqe)) | |
970 | valid_count++; | |
971 | cqe++; | |
972 | cqe_cnt--; | |
973 | } | |
974 | ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); | |
975 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
976 | } | |
977 | ||
fe2caefc PP |
978 | int ocrdma_destroy_cq(struct ib_cq *ibcq) |
979 | { | |
980 | int status; | |
981 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | |
ea617626 | 982 | struct ocrdma_eq *eq = NULL; |
1afc0454 | 983 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); |
cffce990 | 984 | int pdid = 0; |
ea617626 | 985 | u32 irq, indx; |
fe2caefc | 986 | |
ea617626 DS |
987 | dev->cq_tbl[cq->id] = NULL; |
988 | indx = ocrdma_get_eq_table_index(dev, cq->eqn); | |
989 | if (indx == -EINVAL) | |
990 | BUG(); | |
fe2caefc | 991 | |
ea617626 DS |
992 | eq = &dev->eq_tbl[indx]; |
993 | irq = ocrdma_get_irq(dev, eq); | |
994 | synchronize_irq(irq); | |
995 | ocrdma_flush_cq(cq); | |
996 | ||
997 | status = ocrdma_mbx_destroy_cq(dev, cq); | |
fe2caefc | 998 | if (cq->ucontext) { |
cffce990 | 999 | pdid = cq->ucontext->cntxt_pd->id; |
43a6b402 NG |
1000 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, |
1001 | PAGE_ALIGN(cq->len)); | |
cffce990 NG |
1002 | ocrdma_del_mmap(cq->ucontext, |
1003 | ocrdma_get_db_addr(dev, pdid), | |
fe2caefc PP |
1004 | dev->nic_info.db_page_size); |
1005 | } | |
fe2caefc PP |
1006 | |
1007 | kfree(cq); | |
1008 | return status; | |
1009 | } | |
1010 | ||
1011 | static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | |
1012 | { | |
1013 | int status = -EINVAL; | |
1014 | ||
1015 | if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { | |
1016 | dev->qp_tbl[qp->id] = qp; | |
1017 | status = 0; | |
1018 | } | |
1019 | return status; | |
1020 | } | |
1021 | ||
1022 | static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | |
1023 | { | |
1024 | dev->qp_tbl[qp->id] = NULL; | |
1025 | } | |
1026 | ||
1027 | static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |
1028 | struct ib_qp_init_attr *attrs) | |
1029 | { | |
43a6b402 NG |
1030 | if ((attrs->qp_type != IB_QPT_GSI) && |
1031 | (attrs->qp_type != IB_QPT_RC) && | |
1032 | (attrs->qp_type != IB_QPT_UC) && | |
1033 | (attrs->qp_type != IB_QPT_UD)) { | |
ef99c4c2 NG |
1034 | pr_err("%s(%d) unsupported qp type=0x%x requested\n", |
1035 | __func__, dev->id, attrs->qp_type); | |
fe2caefc PP |
1036 | return -EINVAL; |
1037 | } | |
43a6b402 NG |
1038 | /* Skip the check for QP1 to support CM size of 128 */ |
1039 | if ((attrs->qp_type != IB_QPT_GSI) && | |
1040 | (attrs->cap.max_send_wr > dev->attr.max_wqe)) { | |
ef99c4c2 NG |
1041 | pr_err("%s(%d) unsupported send_wr=0x%x requested\n", |
1042 | __func__, dev->id, attrs->cap.max_send_wr); | |
1043 | pr_err("%s(%d) supported send_wr=0x%x\n", | |
1044 | __func__, dev->id, dev->attr.max_wqe); | |
fe2caefc PP |
1045 | return -EINVAL; |
1046 | } | |
1047 | if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { | |
ef99c4c2 NG |
1048 | pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", |
1049 | __func__, dev->id, attrs->cap.max_recv_wr); | |
1050 | pr_err("%s(%d) supported recv_wr=0x%x\n", | |
1051 | __func__, dev->id, dev->attr.max_rqe); | |
fe2caefc PP |
1052 | return -EINVAL; |
1053 | } | |
1054 | if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { | |
ef99c4c2 NG |
1055 | pr_err("%s(%d) unsupported inline data size=0x%x requested\n", |
1056 | __func__, dev->id, attrs->cap.max_inline_data); | |
1057 | pr_err("%s(%d) supported inline data size=0x%x\n", | |
1058 | __func__, dev->id, dev->attr.max_inline_data); | |
fe2caefc PP |
1059 | return -EINVAL; |
1060 | } | |
1061 | if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { | |
ef99c4c2 NG |
1062 | pr_err("%s(%d) unsupported send_sge=0x%x requested\n", |
1063 | __func__, dev->id, attrs->cap.max_send_sge); | |
1064 | pr_err("%s(%d) supported send_sge=0x%x\n", | |
1065 | __func__, dev->id, dev->attr.max_send_sge); | |
fe2caefc PP |
1066 | return -EINVAL; |
1067 | } | |
1068 | if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { | |
ef99c4c2 NG |
1069 | pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", |
1070 | __func__, dev->id, attrs->cap.max_recv_sge); | |
1071 | pr_err("%s(%d) supported recv_sge=0x%x\n", | |
1072 | __func__, dev->id, dev->attr.max_recv_sge); | |
fe2caefc PP |
1073 | return -EINVAL; |
1074 | } | |
1075 | /* unprivileged user space cannot create special QP */ | |
1076 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | |
ef99c4c2 | 1077 | pr_err |
fe2caefc PP |
1078 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", |
1079 | __func__, dev->id, attrs->qp_type); | |
1080 | return -EINVAL; | |
1081 | } | |
1082 | /* allow creating only one GSI type of QP */ | |
1083 | if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { | |
ef99c4c2 NG |
1084 | pr_err("%s(%d) GSI special QPs already created.\n", |
1085 | __func__, dev->id); | |
fe2caefc PP |
1086 | return -EINVAL; |
1087 | } | |
1088 | /* verify consumer QPs are not trying to use GSI QP's CQ */ | |
1089 | if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { | |
1090 | if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || | |
43a6b402 | 1091 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { |
ef99c4c2 | 1092 | pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", |
43a6b402 | 1093 | __func__, dev->id); |
fe2caefc PP |
1094 | return -EINVAL; |
1095 | } | |
1096 | } | |
1097 | return 0; | |
1098 | } | |
1099 | ||
1100 | static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |
1101 | struct ib_udata *udata, int dpp_offset, | |
1102 | int dpp_credit_lmt, int srq) | |
1103 | { | |
1104 | int status = 0; | |
1105 | u64 usr_db; | |
1106 | struct ocrdma_create_qp_uresp uresp; | |
1107 | struct ocrdma_dev *dev = qp->dev; | |
1108 | struct ocrdma_pd *pd = qp->pd; | |
1109 | ||
1110 | memset(&uresp, 0, sizeof(uresp)); | |
1111 | usr_db = dev->nic_info.unmapped_db + | |
1112 | (pd->id * dev->nic_info.db_page_size); | |
1113 | uresp.qp_id = qp->id; | |
1114 | uresp.sq_dbid = qp->sq.dbid; | |
1115 | uresp.num_sq_pages = 1; | |
43a6b402 | 1116 | uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); |
fe2caefc PP |
1117 | uresp.sq_page_addr[0] = qp->sq.pa; |
1118 | uresp.num_wqe_allocated = qp->sq.max_cnt; | |
1119 | if (!srq) { | |
1120 | uresp.rq_dbid = qp->rq.dbid; | |
1121 | uresp.num_rq_pages = 1; | |
43a6b402 | 1122 | uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); |
fe2caefc PP |
1123 | uresp.rq_page_addr[0] = qp->rq.pa; |
1124 | uresp.num_rqe_allocated = qp->rq.max_cnt; | |
1125 | } | |
1126 | uresp.db_page_addr = usr_db; | |
1127 | uresp.db_page_size = dev->nic_info.db_page_size; | |
2df84fa8 DS |
1128 | uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; |
1129 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; | |
1130 | uresp.db_shift = OCRDMA_DB_RQ_SHIFT; | |
fe2caefc PP |
1131 | |
1132 | if (qp->dpp_enabled) { | |
1133 | uresp.dpp_credit = dpp_credit_lmt; | |
1134 | uresp.dpp_offset = dpp_offset; | |
1135 | } | |
1136 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
1137 | if (status) { | |
ef99c4c2 | 1138 | pr_err("%s(%d) user copy error.\n", __func__, dev->id); |
fe2caefc PP |
1139 | goto err; |
1140 | } | |
1141 | status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], | |
1142 | uresp.sq_page_size); | |
1143 | if (status) | |
1144 | goto err; | |
1145 | ||
1146 | if (!srq) { | |
1147 | status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], | |
1148 | uresp.rq_page_size); | |
1149 | if (status) | |
1150 | goto rq_map_err; | |
1151 | } | |
1152 | return status; | |
1153 | rq_map_err: | |
1154 | ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); | |
1155 | err: | |
1156 | return status; | |
1157 | } | |
1158 | ||
1159 | static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | |
1160 | struct ocrdma_pd *pd) | |
1161 | { | |
21c3391a | 1162 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
fe2caefc PP |
1163 | qp->sq_db = dev->nic_info.db + |
1164 | (pd->id * dev->nic_info.db_page_size) + | |
1165 | OCRDMA_DB_GEN2_SQ_OFFSET; | |
1166 | qp->rq_db = dev->nic_info.db + | |
1167 | (pd->id * dev->nic_info.db_page_size) + | |
f11220ee | 1168 | OCRDMA_DB_GEN2_RQ_OFFSET; |
fe2caefc PP |
1169 | } else { |
1170 | qp->sq_db = dev->nic_info.db + | |
1171 | (pd->id * dev->nic_info.db_page_size) + | |
1172 | OCRDMA_DB_SQ_OFFSET; | |
1173 | qp->rq_db = dev->nic_info.db + | |
1174 | (pd->id * dev->nic_info.db_page_size) + | |
1175 | OCRDMA_DB_RQ_OFFSET; | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) | |
1180 | { | |
1181 | qp->wqe_wr_id_tbl = | |
1182 | kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, | |
1183 | GFP_KERNEL); | |
1184 | if (qp->wqe_wr_id_tbl == NULL) | |
1185 | return -ENOMEM; | |
1186 | qp->rqe_wr_id_tbl = | |
1187 | kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); | |
1188 | if (qp->rqe_wr_id_tbl == NULL) | |
1189 | return -ENOMEM; | |
1190 | ||
1191 | return 0; | |
1192 | } | |
1193 | ||
1194 | static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, | |
1195 | struct ocrdma_pd *pd, | |
1196 | struct ib_qp_init_attr *attrs) | |
1197 | { | |
1198 | qp->pd = pd; | |
1199 | spin_lock_init(&qp->q_lock); | |
1200 | INIT_LIST_HEAD(&qp->sq_entry); | |
1201 | INIT_LIST_HEAD(&qp->rq_entry); | |
1202 | ||
1203 | qp->qp_type = attrs->qp_type; | |
1204 | qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; | |
1205 | qp->max_inline_data = attrs->cap.max_inline_data; | |
1206 | qp->sq.max_sges = attrs->cap.max_send_sge; | |
1207 | qp->rq.max_sges = attrs->cap.max_recv_sge; | |
1208 | qp->state = OCRDMA_QPS_RST; | |
2b51a9b9 | 1209 | qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; |
fe2caefc PP |
1210 | } |
1211 | ||
fe2caefc PP |
1212 | static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, |
1213 | struct ib_qp_init_attr *attrs) | |
1214 | { | |
1215 | if (attrs->qp_type == IB_QPT_GSI) { | |
1216 | dev->gsi_qp_created = 1; | |
1217 | dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); | |
1218 | dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); | |
1219 | } | |
1220 | } | |
1221 | ||
1222 | struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, | |
1223 | struct ib_qp_init_attr *attrs, | |
1224 | struct ib_udata *udata) | |
1225 | { | |
1226 | int status; | |
1227 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
1228 | struct ocrdma_qp *qp; | |
f99b1649 | 1229 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
fe2caefc PP |
1230 | struct ocrdma_create_qp_ureq ureq; |
1231 | u16 dpp_credit_lmt, dpp_offset; | |
1232 | ||
1233 | status = ocrdma_check_qp_params(ibpd, dev, attrs); | |
1234 | if (status) | |
1235 | goto gen_err; | |
1236 | ||
1237 | memset(&ureq, 0, sizeof(ureq)); | |
1238 | if (udata) { | |
1239 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | |
1240 | return ERR_PTR(-EFAULT); | |
1241 | } | |
1242 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
1243 | if (!qp) { | |
1244 | status = -ENOMEM; | |
1245 | goto gen_err; | |
1246 | } | |
1247 | qp->dev = dev; | |
1248 | ocrdma_set_qp_init_params(qp, pd, attrs); | |
43a6b402 NG |
1249 | if (udata == NULL) |
1250 | qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | | |
1251 | OCRDMA_QP_FAST_REG); | |
fe2caefc PP |
1252 | |
1253 | mutex_lock(&dev->dev_lock); | |
1254 | status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, | |
1255 | ureq.dpp_cq_id, | |
1256 | &dpp_offset, &dpp_credit_lmt); | |
1257 | if (status) | |
1258 | goto mbx_err; | |
1259 | ||
1260 | /* user space QP's wr_id table are managed in library */ | |
1261 | if (udata == NULL) { | |
fe2caefc PP |
1262 | status = ocrdma_alloc_wr_id_tbl(qp); |
1263 | if (status) | |
1264 | goto map_err; | |
1265 | } | |
1266 | ||
1267 | status = ocrdma_add_qpn_map(dev, qp); | |
1268 | if (status) | |
1269 | goto map_err; | |
1270 | ocrdma_set_qp_db(dev, qp, pd); | |
1271 | if (udata) { | |
1272 | status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, | |
1273 | dpp_credit_lmt, | |
1274 | (attrs->srq != NULL)); | |
1275 | if (status) | |
1276 | goto cpy_err; | |
1277 | } | |
1278 | ocrdma_store_gsi_qp_cq(dev, attrs); | |
27159f50 | 1279 | qp->ibqp.qp_num = qp->id; |
fe2caefc PP |
1280 | mutex_unlock(&dev->dev_lock); |
1281 | return &qp->ibqp; | |
1282 | ||
1283 | cpy_err: | |
1284 | ocrdma_del_qpn_map(dev, qp); | |
1285 | map_err: | |
1286 | ocrdma_mbx_destroy_qp(dev, qp); | |
1287 | mbx_err: | |
1288 | mutex_unlock(&dev->dev_lock); | |
1289 | kfree(qp->wqe_wr_id_tbl); | |
1290 | kfree(qp->rqe_wr_id_tbl); | |
1291 | kfree(qp); | |
ef99c4c2 | 1292 | pr_err("%s(%d) error=%d\n", __func__, dev->id, status); |
fe2caefc PP |
1293 | gen_err: |
1294 | return ERR_PTR(status); | |
1295 | } | |
1296 | ||
1297 | int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1298 | int attr_mask) | |
1299 | { | |
1300 | int status = 0; | |
1301 | struct ocrdma_qp *qp; | |
1302 | struct ocrdma_dev *dev; | |
1303 | enum ib_qp_state old_qps; | |
1304 | ||
1305 | qp = get_ocrdma_qp(ibqp); | |
1306 | dev = qp->dev; | |
1307 | if (attr_mask & IB_QP_STATE) | |
057729cb | 1308 | status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); |
fe2caefc PP |
1309 | /* if new and previous states are same hw doesn't need to |
1310 | * know about it. | |
1311 | */ | |
1312 | if (status < 0) | |
1313 | return status; | |
bc1b04ab | 1314 | status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); |
45e86b33 | 1315 | |
fe2caefc PP |
1316 | return status; |
1317 | } | |
1318 | ||
1319 | int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1320 | int attr_mask, struct ib_udata *udata) | |
1321 | { | |
1322 | unsigned long flags; | |
1323 | int status = -EINVAL; | |
1324 | struct ocrdma_qp *qp; | |
1325 | struct ocrdma_dev *dev; | |
1326 | enum ib_qp_state old_qps, new_qps; | |
1327 | ||
1328 | qp = get_ocrdma_qp(ibqp); | |
1329 | dev = qp->dev; | |
1330 | ||
1331 | /* syncronize with multiple context trying to change, retrive qps */ | |
1332 | mutex_lock(&dev->dev_lock); | |
1333 | /* syncronize with wqe, rqe posting and cqe processing contexts */ | |
1334 | spin_lock_irqsave(&qp->q_lock, flags); | |
1335 | old_qps = get_ibqp_state(qp->state); | |
1336 | if (attr_mask & IB_QP_STATE) | |
1337 | new_qps = attr->qp_state; | |
1338 | else | |
1339 | new_qps = old_qps; | |
1340 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1341 | ||
dd5f03be | 1342 | if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, |
37721d85 | 1343 | IB_LINK_LAYER_ETHERNET)) { |
ef99c4c2 NG |
1344 | pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" |
1345 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | |
1346 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | |
1347 | old_qps, new_qps); | |
fe2caefc PP |
1348 | goto param_err; |
1349 | } | |
1350 | ||
1351 | status = _ocrdma_modify_qp(ibqp, attr, attr_mask); | |
1352 | if (status > 0) | |
1353 | status = 0; | |
1354 | param_err: | |
1355 | mutex_unlock(&dev->dev_lock); | |
1356 | return status; | |
1357 | } | |
1358 | ||
1359 | static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) | |
1360 | { | |
1361 | switch (mtu) { | |
1362 | case 256: | |
1363 | return IB_MTU_256; | |
1364 | case 512: | |
1365 | return IB_MTU_512; | |
1366 | case 1024: | |
1367 | return IB_MTU_1024; | |
1368 | case 2048: | |
1369 | return IB_MTU_2048; | |
1370 | case 4096: | |
1371 | return IB_MTU_4096; | |
1372 | default: | |
1373 | return IB_MTU_1024; | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) | |
1378 | { | |
1379 | int ib_qp_acc_flags = 0; | |
1380 | ||
1381 | if (qp_cap_flags & OCRDMA_QP_INB_WR) | |
1382 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; | |
1383 | if (qp_cap_flags & OCRDMA_QP_INB_RD) | |
1384 | ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; | |
1385 | return ib_qp_acc_flags; | |
1386 | } | |
1387 | ||
1388 | int ocrdma_query_qp(struct ib_qp *ibqp, | |
1389 | struct ib_qp_attr *qp_attr, | |
1390 | int attr_mask, struct ib_qp_init_attr *qp_init_attr) | |
1391 | { | |
1392 | int status; | |
1393 | u32 qp_state; | |
1394 | struct ocrdma_qp_params params; | |
1395 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
1396 | struct ocrdma_dev *dev = qp->dev; | |
1397 | ||
1398 | memset(¶ms, 0, sizeof(params)); | |
1399 | mutex_lock(&dev->dev_lock); | |
1400 | status = ocrdma_mbx_query_qp(dev, qp, ¶ms); | |
1401 | mutex_unlock(&dev->dev_lock); | |
1402 | if (status) | |
1403 | goto mbx_err; | |
1404 | qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); | |
1405 | qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); | |
1406 | qp_attr->path_mtu = | |
1407 | ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & | |
1408 | OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> | |
1409 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; | |
1410 | qp_attr->path_mig_state = IB_MIG_MIGRATED; | |
1411 | qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; | |
1412 | qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; | |
1413 | qp_attr->dest_qp_num = | |
1414 | params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; | |
1415 | ||
1416 | qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); | |
1417 | qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; | |
1418 | qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; | |
1419 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | |
1420 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | |
c43e9ab8 | 1421 | qp_attr->cap.max_inline_data = qp->max_inline_data; |
fe2caefc PP |
1422 | qp_init_attr->cap = qp_attr->cap; |
1423 | memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0], | |
1424 | sizeof(params.dgid)); | |
1425 | qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & | |
1426 | OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; | |
1427 | qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; | |
1428 | qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & | |
1429 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | |
1430 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | |
1431 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | |
a61d93d9 | 1432 | OCRDMA_QP_PARAMS_TCLASS_MASK) >> |
fe2caefc PP |
1433 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; |
1434 | ||
1435 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | |
1436 | qp_attr->ah_attr.port_num = 1; | |
1437 | qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & | |
1438 | OCRDMA_QP_PARAMS_SL_MASK) >> | |
1439 | OCRDMA_QP_PARAMS_SL_SHIFT; | |
1440 | qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & | |
1441 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> | |
1442 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; | |
1443 | qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & | |
1444 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> | |
1445 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; | |
1446 | qp_attr->retry_cnt = | |
1447 | (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> | |
1448 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; | |
1449 | qp_attr->min_rnr_timer = 0; | |
1450 | qp_attr->pkey_index = 0; | |
1451 | qp_attr->port_num = 1; | |
1452 | qp_attr->ah_attr.src_path_bits = 0; | |
1453 | qp_attr->ah_attr.static_rate = 0; | |
1454 | qp_attr->alt_pkey_index = 0; | |
1455 | qp_attr->alt_port_num = 0; | |
1456 | qp_attr->alt_timeout = 0; | |
1457 | memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); | |
1458 | qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> | |
1459 | OCRDMA_QP_PARAMS_STATE_SHIFT; | |
1460 | qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; | |
1461 | qp_attr->max_dest_rd_atomic = | |
1462 | params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; | |
1463 | qp_attr->max_rd_atomic = | |
1464 | params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; | |
1465 | qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & | |
1466 | OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; | |
1467 | mbx_err: | |
1468 | return status; | |
1469 | } | |
1470 | ||
1471 | static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) | |
1472 | { | |
1473 | int i = idx / 32; | |
1474 | unsigned int mask = (1 << (idx % 32)); | |
1475 | ||
1476 | if (srq->idx_bit_fields[i] & mask) | |
1477 | srq->idx_bit_fields[i] &= ~mask; | |
1478 | else | |
1479 | srq->idx_bit_fields[i] |= mask; | |
1480 | } | |
1481 | ||
1482 | static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) | |
1483 | { | |
43a6b402 | 1484 | return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; |
fe2caefc PP |
1485 | } |
1486 | ||
1487 | static int is_hw_sq_empty(struct ocrdma_qp *qp) | |
1488 | { | |
43a6b402 | 1489 | return (qp->sq.tail == qp->sq.head); |
fe2caefc PP |
1490 | } |
1491 | ||
1492 | static int is_hw_rq_empty(struct ocrdma_qp *qp) | |
1493 | { | |
43a6b402 | 1494 | return (qp->rq.tail == qp->rq.head); |
fe2caefc PP |
1495 | } |
1496 | ||
1497 | static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) | |
1498 | { | |
1499 | return q->va + (q->head * q->entry_size); | |
1500 | } | |
1501 | ||
1502 | static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, | |
1503 | u32 idx) | |
1504 | { | |
1505 | return q->va + (idx * q->entry_size); | |
1506 | } | |
1507 | ||
1508 | static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) | |
1509 | { | |
1510 | q->head = (q->head + 1) & q->max_wqe_idx; | |
1511 | } | |
1512 | ||
1513 | static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) | |
1514 | { | |
1515 | q->tail = (q->tail + 1) & q->max_wqe_idx; | |
1516 | } | |
1517 | ||
1518 | /* discard the cqe for a given QP */ | |
1519 | static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) | |
1520 | { | |
1521 | unsigned long cq_flags; | |
1522 | unsigned long flags; | |
1523 | int discard_cnt = 0; | |
1524 | u32 cur_getp, stop_getp; | |
1525 | struct ocrdma_cqe *cqe; | |
cf5788ad | 1526 | u32 qpn = 0, wqe_idx = 0; |
fe2caefc PP |
1527 | |
1528 | spin_lock_irqsave(&cq->cq_lock, cq_flags); | |
1529 | ||
1530 | /* traverse through the CQEs in the hw CQ, | |
1531 | * find the matching CQE for a given qp, | |
1532 | * mark the matching one discarded by clearing qpn. | |
1533 | * ring the doorbell in the poll_cq() as | |
1534 | * we don't complete out of order cqe. | |
1535 | */ | |
1536 | ||
1537 | cur_getp = cq->getp; | |
1538 | /* find upto when do we reap the cq. */ | |
1539 | stop_getp = cur_getp; | |
1540 | do { | |
1541 | if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) | |
1542 | break; | |
1543 | ||
1544 | cqe = cq->va + cur_getp; | |
1545 | /* if (a) done reaping whole hw cq, or | |
1546 | * (b) qp_xq becomes empty. | |
1547 | * then exit | |
1548 | */ | |
1549 | qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; | |
1550 | /* if previously discarded cqe found, skip that too. */ | |
1551 | /* check for matching qp */ | |
1552 | if (qpn == 0 || qpn != qp->id) | |
1553 | goto skip_cqe; | |
1554 | ||
f99b1649 | 1555 | if (is_cqe_for_sq(cqe)) { |
fe2caefc | 1556 | ocrdma_hwq_inc_tail(&qp->sq); |
f99b1649 | 1557 | } else { |
fe2caefc | 1558 | if (qp->srq) { |
cf5788ad SX |
1559 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> |
1560 | OCRDMA_CQE_BUFTAG_SHIFT) & | |
1561 | qp->srq->rq.max_wqe_idx; | |
1562 | if (wqe_idx < 1) | |
1563 | BUG(); | |
fe2caefc PP |
1564 | spin_lock_irqsave(&qp->srq->q_lock, flags); |
1565 | ocrdma_hwq_inc_tail(&qp->srq->rq); | |
cf5788ad | 1566 | ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); |
fe2caefc PP |
1567 | spin_unlock_irqrestore(&qp->srq->q_lock, flags); |
1568 | ||
f99b1649 | 1569 | } else { |
fe2caefc | 1570 | ocrdma_hwq_inc_tail(&qp->rq); |
f99b1649 | 1571 | } |
fe2caefc | 1572 | } |
cf5788ad SX |
1573 | /* mark cqe discarded so that it is not picked up later |
1574 | * in the poll_cq(). | |
1575 | */ | |
1576 | discard_cnt += 1; | |
1577 | cqe->cmn.qpn = 0; | |
fe2caefc PP |
1578 | skip_cqe: |
1579 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | |
1580 | } while (cur_getp != stop_getp); | |
1581 | spin_unlock_irqrestore(&cq->cq_lock, cq_flags); | |
1582 | } | |
1583 | ||
f11220ee | 1584 | void ocrdma_del_flush_qp(struct ocrdma_qp *qp) |
fe2caefc PP |
1585 | { |
1586 | int found = false; | |
1587 | unsigned long flags; | |
1588 | struct ocrdma_dev *dev = qp->dev; | |
1589 | /* sync with any active CQ poll */ | |
1590 | ||
1591 | spin_lock_irqsave(&dev->flush_q_lock, flags); | |
1592 | found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); | |
1593 | if (found) | |
1594 | list_del(&qp->sq_entry); | |
1595 | if (!qp->srq) { | |
1596 | found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); | |
1597 | if (found) | |
1598 | list_del(&qp->rq_entry); | |
1599 | } | |
1600 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | |
1601 | } | |
1602 | ||
1603 | int ocrdma_destroy_qp(struct ib_qp *ibqp) | |
1604 | { | |
1605 | int status; | |
1606 | struct ocrdma_pd *pd; | |
1607 | struct ocrdma_qp *qp; | |
1608 | struct ocrdma_dev *dev; | |
1609 | struct ib_qp_attr attrs; | |
1610 | int attr_mask = IB_QP_STATE; | |
d19081e0 | 1611 | unsigned long flags; |
fe2caefc PP |
1612 | |
1613 | qp = get_ocrdma_qp(ibqp); | |
1614 | dev = qp->dev; | |
1615 | ||
1616 | attrs.qp_state = IB_QPS_ERR; | |
1617 | pd = qp->pd; | |
1618 | ||
1619 | /* change the QP state to ERROR */ | |
1620 | _ocrdma_modify_qp(ibqp, &attrs, attr_mask); | |
1621 | ||
1622 | /* ensure that CQEs for newly created QP (whose id may be same with | |
1623 | * one which just getting destroyed are same), dont get | |
1624 | * discarded until the old CQEs are discarded. | |
1625 | */ | |
1626 | mutex_lock(&dev->dev_lock); | |
1627 | status = ocrdma_mbx_destroy_qp(dev, qp); | |
1628 | ||
1629 | /* | |
1630 | * acquire CQ lock while destroy is in progress, in order to | |
1631 | * protect against proessing in-flight CQEs for this QP. | |
1632 | */ | |
d19081e0 | 1633 | spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); |
fe2caefc | 1634 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) |
d19081e0 | 1635 | spin_lock(&qp->rq_cq->cq_lock); |
fe2caefc PP |
1636 | |
1637 | ocrdma_del_qpn_map(dev, qp); | |
1638 | ||
1639 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) | |
d19081e0 DC |
1640 | spin_unlock(&qp->rq_cq->cq_lock); |
1641 | spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); | |
fe2caefc PP |
1642 | |
1643 | if (!pd->uctx) { | |
1644 | ocrdma_discard_cqes(qp, qp->sq_cq); | |
1645 | ocrdma_discard_cqes(qp, qp->rq_cq); | |
1646 | } | |
1647 | mutex_unlock(&dev->dev_lock); | |
1648 | ||
1649 | if (pd->uctx) { | |
43a6b402 NG |
1650 | ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, |
1651 | PAGE_ALIGN(qp->sq.len)); | |
fe2caefc | 1652 | if (!qp->srq) |
43a6b402 NG |
1653 | ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, |
1654 | PAGE_ALIGN(qp->rq.len)); | |
fe2caefc PP |
1655 | } |
1656 | ||
1657 | ocrdma_del_flush_qp(qp); | |
1658 | ||
fe2caefc PP |
1659 | kfree(qp->wqe_wr_id_tbl); |
1660 | kfree(qp->rqe_wr_id_tbl); | |
1661 | kfree(qp); | |
1662 | return status; | |
1663 | } | |
1664 | ||
1afc0454 NG |
1665 | static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, |
1666 | struct ib_udata *udata) | |
fe2caefc PP |
1667 | { |
1668 | int status; | |
1669 | struct ocrdma_create_srq_uresp uresp; | |
1670 | ||
63ea3749 | 1671 | memset(&uresp, 0, sizeof(uresp)); |
fe2caefc PP |
1672 | uresp.rq_dbid = srq->rq.dbid; |
1673 | uresp.num_rq_pages = 1; | |
1674 | uresp.rq_page_addr[0] = srq->rq.pa; | |
1675 | uresp.rq_page_size = srq->rq.len; | |
1afc0454 NG |
1676 | uresp.db_page_addr = dev->nic_info.unmapped_db + |
1677 | (srq->pd->id * dev->nic_info.db_page_size); | |
1678 | uresp.db_page_size = dev->nic_info.db_page_size; | |
fe2caefc | 1679 | uresp.num_rqe_allocated = srq->rq.max_cnt; |
21c3391a | 1680 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
f11220ee | 1681 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; |
fe2caefc PP |
1682 | uresp.db_shift = 24; |
1683 | } else { | |
1684 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | |
1685 | uresp.db_shift = 16; | |
1686 | } | |
1687 | ||
1688 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
1689 | if (status) | |
1690 | return status; | |
1691 | status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], | |
1692 | uresp.rq_page_size); | |
1693 | if (status) | |
1694 | return status; | |
1695 | return status; | |
1696 | } | |
1697 | ||
1698 | struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, | |
1699 | struct ib_srq_init_attr *init_attr, | |
1700 | struct ib_udata *udata) | |
1701 | { | |
1702 | int status = -ENOMEM; | |
1703 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
f99b1649 | 1704 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
fe2caefc PP |
1705 | struct ocrdma_srq *srq; |
1706 | ||
1707 | if (init_attr->attr.max_sge > dev->attr.max_recv_sge) | |
1708 | return ERR_PTR(-EINVAL); | |
1709 | if (init_attr->attr.max_wr > dev->attr.max_rqe) | |
1710 | return ERR_PTR(-EINVAL); | |
1711 | ||
1712 | srq = kzalloc(sizeof(*srq), GFP_KERNEL); | |
1713 | if (!srq) | |
1714 | return ERR_PTR(status); | |
1715 | ||
1716 | spin_lock_init(&srq->q_lock); | |
fe2caefc PP |
1717 | srq->pd = pd; |
1718 | srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); | |
1afc0454 | 1719 | status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); |
fe2caefc PP |
1720 | if (status) |
1721 | goto err; | |
1722 | ||
1723 | if (udata == NULL) { | |
1724 | srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, | |
1725 | GFP_KERNEL); | |
1726 | if (srq->rqe_wr_id_tbl == NULL) | |
1727 | goto arm_err; | |
1728 | ||
1729 | srq->bit_fields_len = (srq->rq.max_cnt / 32) + | |
1730 | (srq->rq.max_cnt % 32 ? 1 : 0); | |
1731 | srq->idx_bit_fields = | |
1732 | kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); | |
1733 | if (srq->idx_bit_fields == NULL) | |
1734 | goto arm_err; | |
1735 | memset(srq->idx_bit_fields, 0xff, | |
1736 | srq->bit_fields_len * sizeof(u32)); | |
1737 | } | |
1738 | ||
1739 | if (init_attr->attr.srq_limit) { | |
1740 | status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); | |
1741 | if (status) | |
1742 | goto arm_err; | |
1743 | } | |
1744 | ||
fe2caefc | 1745 | if (udata) { |
1afc0454 | 1746 | status = ocrdma_copy_srq_uresp(dev, srq, udata); |
fe2caefc PP |
1747 | if (status) |
1748 | goto arm_err; | |
1749 | } | |
1750 | ||
fe2caefc PP |
1751 | return &srq->ibsrq; |
1752 | ||
1753 | arm_err: | |
1754 | ocrdma_mbx_destroy_srq(dev, srq); | |
1755 | err: | |
1756 | kfree(srq->rqe_wr_id_tbl); | |
1757 | kfree(srq->idx_bit_fields); | |
1758 | kfree(srq); | |
1759 | return ERR_PTR(status); | |
1760 | } | |
1761 | ||
1762 | int ocrdma_modify_srq(struct ib_srq *ibsrq, | |
1763 | struct ib_srq_attr *srq_attr, | |
1764 | enum ib_srq_attr_mask srq_attr_mask, | |
1765 | struct ib_udata *udata) | |
1766 | { | |
1767 | int status = 0; | |
1768 | struct ocrdma_srq *srq; | |
fe2caefc PP |
1769 | |
1770 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1771 | if (srq_attr_mask & IB_SRQ_MAX_WR) |
1772 | status = -EINVAL; | |
1773 | else | |
1774 | status = ocrdma_mbx_modify_srq(srq, srq_attr); | |
1775 | return status; | |
1776 | } | |
1777 | ||
1778 | int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |
1779 | { | |
1780 | int status; | |
1781 | struct ocrdma_srq *srq; | |
fe2caefc PP |
1782 | |
1783 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1784 | status = ocrdma_mbx_query_srq(srq, srq_attr); |
1785 | return status; | |
1786 | } | |
1787 | ||
1788 | int ocrdma_destroy_srq(struct ib_srq *ibsrq) | |
1789 | { | |
1790 | int status; | |
1791 | struct ocrdma_srq *srq; | |
1afc0454 | 1792 | struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); |
fe2caefc PP |
1793 | |
1794 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1795 | |
1796 | status = ocrdma_mbx_destroy_srq(dev, srq); | |
1797 | ||
1798 | if (srq->pd->uctx) | |
43a6b402 NG |
1799 | ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, |
1800 | PAGE_ALIGN(srq->rq.len)); | |
fe2caefc | 1801 | |
fe2caefc PP |
1802 | kfree(srq->idx_bit_fields); |
1803 | kfree(srq->rqe_wr_id_tbl); | |
1804 | kfree(srq); | |
1805 | return status; | |
1806 | } | |
1807 | ||
1808 | /* unprivileged verbs and their support functions. */ | |
1809 | static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, | |
1810 | struct ocrdma_hdr_wqe *hdr, | |
1811 | struct ib_send_wr *wr) | |
1812 | { | |
1813 | struct ocrdma_ewqe_ud_hdr *ud_hdr = | |
1814 | (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); | |
1815 | struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); | |
1816 | ||
1817 | ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; | |
1818 | if (qp->qp_type == IB_QPT_GSI) | |
1819 | ud_hdr->qkey = qp->qkey; | |
1820 | else | |
1821 | ud_hdr->qkey = wr->wr.ud.remote_qkey; | |
1822 | ud_hdr->rsvd_ahid = ah->id; | |
1823 | } | |
1824 | ||
1825 | static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, | |
1826 | struct ocrdma_sge *sge, int num_sge, | |
1827 | struct ib_sge *sg_list) | |
1828 | { | |
1829 | int i; | |
1830 | ||
1831 | for (i = 0; i < num_sge; i++) { | |
1832 | sge[i].lrkey = sg_list[i].lkey; | |
1833 | sge[i].addr_lo = sg_list[i].addr; | |
1834 | sge[i].addr_hi = upper_32_bits(sg_list[i].addr); | |
1835 | sge[i].len = sg_list[i].length; | |
1836 | hdr->total_len += sg_list[i].length; | |
1837 | } | |
1838 | if (num_sge == 0) | |
1839 | memset(sge, 0, sizeof(*sge)); | |
1840 | } | |
1841 | ||
117e6dd1 NG |
1842 | static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) |
1843 | { | |
1844 | uint32_t total_len = 0, i; | |
1845 | ||
1846 | for (i = 0; i < num_sge; i++) | |
1847 | total_len += sg_list[i].length; | |
1848 | return total_len; | |
1849 | } | |
1850 | ||
1851 | ||
fe2caefc PP |
1852 | static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, |
1853 | struct ocrdma_hdr_wqe *hdr, | |
1854 | struct ocrdma_sge *sge, | |
1855 | struct ib_send_wr *wr, u32 wqe_size) | |
1856 | { | |
117e6dd1 NG |
1857 | int i; |
1858 | char *dpp_addr; | |
1859 | ||
43a6b402 | 1860 | if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { |
117e6dd1 NG |
1861 | hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); |
1862 | if (unlikely(hdr->total_len > qp->max_inline_data)) { | |
ef99c4c2 NG |
1863 | pr_err("%s() supported_len=0x%x,\n" |
1864 | " unspported len req=0x%x\n", __func__, | |
117e6dd1 | 1865 | qp->max_inline_data, hdr->total_len); |
fe2caefc PP |
1866 | return -EINVAL; |
1867 | } | |
117e6dd1 NG |
1868 | dpp_addr = (char *)sge; |
1869 | for (i = 0; i < wr->num_sge; i++) { | |
1870 | memcpy(dpp_addr, | |
1871 | (void *)(unsigned long)wr->sg_list[i].addr, | |
1872 | wr->sg_list[i].length); | |
1873 | dpp_addr += wr->sg_list[i].length; | |
1874 | } | |
1875 | ||
fe2caefc | 1876 | wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); |
117e6dd1 | 1877 | if (0 == hdr->total_len) |
43a6b402 | 1878 | wqe_size += sizeof(struct ocrdma_sge); |
fe2caefc PP |
1879 | hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); |
1880 | } else { | |
1881 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | |
1882 | if (wr->num_sge) | |
1883 | wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); | |
1884 | else | |
1885 | wqe_size += sizeof(struct ocrdma_sge); | |
1886 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1887 | } | |
1888 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
1889 | return 0; | |
1890 | } | |
1891 | ||
1892 | static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1893 | struct ib_send_wr *wr) | |
1894 | { | |
1895 | int status; | |
1896 | struct ocrdma_sge *sge; | |
1897 | u32 wqe_size = sizeof(*hdr); | |
1898 | ||
1899 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | |
1900 | ocrdma_build_ud_hdr(qp, hdr, wr); | |
1901 | sge = (struct ocrdma_sge *)(hdr + 2); | |
1902 | wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); | |
f99b1649 | 1903 | } else { |
fe2caefc | 1904 | sge = (struct ocrdma_sge *)(hdr + 1); |
f99b1649 | 1905 | } |
fe2caefc PP |
1906 | |
1907 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | |
1908 | return status; | |
1909 | } | |
1910 | ||
1911 | static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1912 | struct ib_send_wr *wr) | |
1913 | { | |
1914 | int status; | |
1915 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | |
1916 | struct ocrdma_sge *sge = ext_rw + 1; | |
1917 | u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); | |
1918 | ||
1919 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | |
1920 | if (status) | |
1921 | return status; | |
1922 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | |
1923 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | |
1924 | ext_rw->lrkey = wr->wr.rdma.rkey; | |
1925 | ext_rw->len = hdr->total_len; | |
1926 | return 0; | |
1927 | } | |
1928 | ||
1929 | static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1930 | struct ib_send_wr *wr) | |
1931 | { | |
1932 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | |
1933 | struct ocrdma_sge *sge = ext_rw + 1; | |
1934 | u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + | |
1935 | sizeof(struct ocrdma_hdr_wqe); | |
1936 | ||
1937 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | |
1938 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
1939 | hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); | |
1940 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1941 | ||
1942 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | |
1943 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | |
1944 | ext_rw->lrkey = wr->wr.rdma.rkey; | |
1945 | ext_rw->len = hdr->total_len; | |
1946 | } | |
1947 | ||
7c33880c NG |
1948 | static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, |
1949 | struct ocrdma_hw_mr *hwmr) | |
1950 | { | |
1951 | int i; | |
1952 | u64 buf_addr = 0; | |
1953 | int num_pbes; | |
1954 | struct ocrdma_pbe *pbe; | |
1955 | ||
1956 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
1957 | num_pbes = 0; | |
1958 | ||
1959 | /* go through the OS phy regions & fill hw pbe entries into pbls. */ | |
1960 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | |
1961 | /* number of pbes can be more for one OS buf, when | |
1962 | * buffers are of different sizes. | |
1963 | * split the ib_buf to one or more pbes. | |
1964 | */ | |
1965 | buf_addr = wr->wr.fast_reg.page_list->page_list[i]; | |
1966 | pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); | |
1967 | pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); | |
1968 | num_pbes += 1; | |
1969 | pbe++; | |
1970 | ||
1971 | /* if the pbl is full storing the pbes, | |
1972 | * move to next pbl. | |
1973 | */ | |
1974 | if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { | |
1975 | pbl_tbl++; | |
1976 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
1977 | } | |
1978 | } | |
1979 | return; | |
1980 | } | |
1981 | ||
1982 | static int get_encoded_page_size(int pg_sz) | |
1983 | { | |
1984 | /* Max size is 256M 4096 << 16 */ | |
1985 | int i = 0; | |
1986 | for (; i < 17; i++) | |
1987 | if (pg_sz == (4096 << i)) | |
1988 | break; | |
1989 | return i; | |
1990 | } | |
1991 | ||
1992 | ||
1993 | static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1994 | struct ib_send_wr *wr) | |
1995 | { | |
1996 | u64 fbo; | |
1997 | struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); | |
1998 | struct ocrdma_mr *mr; | |
1999 | u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); | |
2000 | ||
2001 | wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); | |
2002 | ||
d5e3f378 | 2003 | if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) |
7c33880c NG |
2004 | return -EINVAL; |
2005 | ||
2006 | hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); | |
2007 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
2008 | ||
2009 | if (wr->wr.fast_reg.page_list_len == 0) | |
2010 | BUG(); | |
2011 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE) | |
2012 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; | |
2013 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE) | |
2014 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; | |
2015 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ) | |
2016 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; | |
2017 | hdr->lkey = wr->wr.fast_reg.rkey; | |
2018 | hdr->total_len = wr->wr.fast_reg.length; | |
2019 | ||
2020 | fbo = wr->wr.fast_reg.iova_start - | |
2021 | (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK); | |
2022 | ||
2023 | fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start); | |
2024 | fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff); | |
2025 | fast_reg->fbo_hi = upper_32_bits(fbo); | |
2026 | fast_reg->fbo_lo = (u32) fbo & 0xffffffff; | |
2027 | fast_reg->num_sges = wr->wr.fast_reg.page_list_len; | |
2028 | fast_reg->size_sge = | |
2029 | get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); | |
fad51b7d | 2030 | mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) & |
7c33880c NG |
2031 | (OCRDMA_MAX_STAG - 1)]; |
2032 | build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); | |
2033 | return 0; | |
2034 | } | |
2035 | ||
fe2caefc PP |
2036 | static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) |
2037 | { | |
2df84fa8 | 2038 | u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); |
fe2caefc PP |
2039 | |
2040 | iowrite32(val, qp->sq_db); | |
2041 | } | |
2042 | ||
2043 | int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
2044 | struct ib_send_wr **bad_wr) | |
2045 | { | |
2046 | int status = 0; | |
2047 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
2048 | struct ocrdma_hdr_wqe *hdr; | |
2049 | unsigned long flags; | |
2050 | ||
2051 | spin_lock_irqsave(&qp->q_lock, flags); | |
2052 | if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { | |
2053 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
f6ddcf71 | 2054 | *bad_wr = wr; |
fe2caefc PP |
2055 | return -EINVAL; |
2056 | } | |
2057 | ||
2058 | while (wr) { | |
2059 | if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || | |
2060 | wr->num_sge > qp->sq.max_sges) { | |
f6ddcf71 | 2061 | *bad_wr = wr; |
fe2caefc PP |
2062 | status = -ENOMEM; |
2063 | break; | |
2064 | } | |
2065 | hdr = ocrdma_hwq_head(&qp->sq); | |
2066 | hdr->cw = 0; | |
2b51a9b9 | 2067 | if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) |
fe2caefc PP |
2068 | hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); |
2069 | if (wr->send_flags & IB_SEND_FENCE) | |
2070 | hdr->cw |= | |
2071 | (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); | |
2072 | if (wr->send_flags & IB_SEND_SOLICITED) | |
2073 | hdr->cw |= | |
2074 | (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); | |
2075 | hdr->total_len = 0; | |
2076 | switch (wr->opcode) { | |
2077 | case IB_WR_SEND_WITH_IMM: | |
2078 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | |
2079 | hdr->immdt = ntohl(wr->ex.imm_data); | |
2080 | case IB_WR_SEND: | |
2081 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | |
2082 | ocrdma_build_send(qp, hdr, wr); | |
2083 | break; | |
2084 | case IB_WR_SEND_WITH_INV: | |
2085 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | |
2086 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | |
2087 | hdr->lkey = wr->ex.invalidate_rkey; | |
2088 | status = ocrdma_build_send(qp, hdr, wr); | |
2089 | break; | |
2090 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
2091 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | |
2092 | hdr->immdt = ntohl(wr->ex.imm_data); | |
2093 | case IB_WR_RDMA_WRITE: | |
2094 | hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); | |
2095 | status = ocrdma_build_write(qp, hdr, wr); | |
2096 | break; | |
2097 | case IB_WR_RDMA_READ_WITH_INV: | |
2098 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | |
2099 | case IB_WR_RDMA_READ: | |
2100 | ocrdma_build_read(qp, hdr, wr); | |
2101 | break; | |
2102 | case IB_WR_LOCAL_INV: | |
2103 | hdr->cw |= | |
2104 | (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); | |
7c33880c NG |
2105 | hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) + |
2106 | sizeof(struct ocrdma_sge)) / | |
fe2caefc PP |
2107 | OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; |
2108 | hdr->lkey = wr->ex.invalidate_rkey; | |
2109 | break; | |
7c33880c NG |
2110 | case IB_WR_FAST_REG_MR: |
2111 | status = ocrdma_build_fr(qp, hdr, wr); | |
2112 | break; | |
fe2caefc PP |
2113 | default: |
2114 | status = -EINVAL; | |
2115 | break; | |
2116 | } | |
2117 | if (status) { | |
2118 | *bad_wr = wr; | |
2119 | break; | |
2120 | } | |
2b51a9b9 | 2121 | if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) |
fe2caefc PP |
2122 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; |
2123 | else | |
2124 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; | |
2125 | qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; | |
2126 | ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & | |
2127 | OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); | |
2128 | /* make sure wqe is written before adapter can access it */ | |
2129 | wmb(); | |
2130 | /* inform hw to start processing it */ | |
2131 | ocrdma_ring_sq_db(qp); | |
2132 | ||
2133 | /* update pointer, counter for next wr */ | |
2134 | ocrdma_hwq_inc_head(&qp->sq); | |
2135 | wr = wr->next; | |
2136 | } | |
2137 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
2138 | return status; | |
2139 | } | |
2140 | ||
2141 | static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) | |
2142 | { | |
2df84fa8 | 2143 | u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); |
fe2caefc | 2144 | |
2df84fa8 | 2145 | iowrite32(val, qp->rq_db); |
fe2caefc PP |
2146 | } |
2147 | ||
2148 | static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, | |
2149 | u16 tag) | |
2150 | { | |
2151 | u32 wqe_size = 0; | |
2152 | struct ocrdma_sge *sge; | |
2153 | if (wr->num_sge) | |
2154 | wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); | |
2155 | else | |
2156 | wqe_size = sizeof(*sge) + sizeof(*rqe); | |
2157 | ||
2158 | rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << | |
2159 | OCRDMA_WQE_SIZE_SHIFT); | |
2160 | rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); | |
2161 | rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
2162 | rqe->total_len = 0; | |
2163 | rqe->rsvd_tag = tag; | |
2164 | sge = (struct ocrdma_sge *)(rqe + 1); | |
2165 | ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); | |
2166 | ocrdma_cpu_to_le32(rqe, wqe_size); | |
2167 | } | |
2168 | ||
2169 | int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
2170 | struct ib_recv_wr **bad_wr) | |
2171 | { | |
2172 | int status = 0; | |
2173 | unsigned long flags; | |
2174 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
2175 | struct ocrdma_hdr_wqe *rqe; | |
2176 | ||
2177 | spin_lock_irqsave(&qp->q_lock, flags); | |
2178 | if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { | |
2179 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
2180 | *bad_wr = wr; | |
2181 | return -EINVAL; | |
2182 | } | |
2183 | while (wr) { | |
2184 | if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || | |
2185 | wr->num_sge > qp->rq.max_sges) { | |
2186 | *bad_wr = wr; | |
2187 | status = -ENOMEM; | |
2188 | break; | |
2189 | } | |
2190 | rqe = ocrdma_hwq_head(&qp->rq); | |
2191 | ocrdma_build_rqe(rqe, wr, 0); | |
2192 | ||
2193 | qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; | |
2194 | /* make sure rqe is written before adapter can access it */ | |
2195 | wmb(); | |
2196 | ||
2197 | /* inform hw to start processing it */ | |
2198 | ocrdma_ring_rq_db(qp); | |
2199 | ||
2200 | /* update pointer, counter for next wr */ | |
2201 | ocrdma_hwq_inc_head(&qp->rq); | |
2202 | wr = wr->next; | |
2203 | } | |
2204 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
2205 | return status; | |
2206 | } | |
2207 | ||
2208 | /* cqe for srq's rqe can potentially arrive out of order. | |
2209 | * index gives the entry in the shadow table where to store | |
2210 | * the wr_id. tag/index is returned in cqe to reference back | |
2211 | * for a given rqe. | |
2212 | */ | |
2213 | static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) | |
2214 | { | |
2215 | int row = 0; | |
2216 | int indx = 0; | |
2217 | ||
2218 | for (row = 0; row < srq->bit_fields_len; row++) { | |
2219 | if (srq->idx_bit_fields[row]) { | |
2220 | indx = ffs(srq->idx_bit_fields[row]); | |
2221 | indx = (row * 32) + (indx - 1); | |
2222 | if (indx >= srq->rq.max_cnt) | |
2223 | BUG(); | |
2224 | ocrdma_srq_toggle_bit(srq, indx); | |
2225 | break; | |
2226 | } | |
2227 | } | |
2228 | ||
2229 | if (row == srq->bit_fields_len) | |
2230 | BUG(); | |
cf5788ad | 2231 | return indx + 1; /* Use from index 1 */ |
fe2caefc PP |
2232 | } |
2233 | ||
2234 | static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) | |
2235 | { | |
2236 | u32 val = srq->rq.dbid | (1 << 16); | |
2237 | ||
2238 | iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); | |
2239 | } | |
2240 | ||
2241 | int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
2242 | struct ib_recv_wr **bad_wr) | |
2243 | { | |
2244 | int status = 0; | |
2245 | unsigned long flags; | |
2246 | struct ocrdma_srq *srq; | |
2247 | struct ocrdma_hdr_wqe *rqe; | |
2248 | u16 tag; | |
2249 | ||
2250 | srq = get_ocrdma_srq(ibsrq); | |
2251 | ||
2252 | spin_lock_irqsave(&srq->q_lock, flags); | |
2253 | while (wr) { | |
2254 | if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || | |
2255 | wr->num_sge > srq->rq.max_sges) { | |
2256 | status = -ENOMEM; | |
2257 | *bad_wr = wr; | |
2258 | break; | |
2259 | } | |
2260 | tag = ocrdma_srq_get_idx(srq); | |
2261 | rqe = ocrdma_hwq_head(&srq->rq); | |
2262 | ocrdma_build_rqe(rqe, wr, tag); | |
2263 | ||
2264 | srq->rqe_wr_id_tbl[tag] = wr->wr_id; | |
2265 | /* make sure rqe is written before adapter can perform DMA */ | |
2266 | wmb(); | |
2267 | /* inform hw to start processing it */ | |
2268 | ocrdma_ring_srq_db(srq); | |
2269 | /* update pointer, counter for next wr */ | |
2270 | ocrdma_hwq_inc_head(&srq->rq); | |
2271 | wr = wr->next; | |
2272 | } | |
2273 | spin_unlock_irqrestore(&srq->q_lock, flags); | |
2274 | return status; | |
2275 | } | |
2276 | ||
2277 | static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) | |
2278 | { | |
f99b1649 | 2279 | enum ib_wc_status ibwc_status; |
fe2caefc PP |
2280 | |
2281 | switch (status) { | |
2282 | case OCRDMA_CQE_GENERAL_ERR: | |
2283 | ibwc_status = IB_WC_GENERAL_ERR; | |
2284 | break; | |
2285 | case OCRDMA_CQE_LOC_LEN_ERR: | |
2286 | ibwc_status = IB_WC_LOC_LEN_ERR; | |
2287 | break; | |
2288 | case OCRDMA_CQE_LOC_QP_OP_ERR: | |
2289 | ibwc_status = IB_WC_LOC_QP_OP_ERR; | |
2290 | break; | |
2291 | case OCRDMA_CQE_LOC_EEC_OP_ERR: | |
2292 | ibwc_status = IB_WC_LOC_EEC_OP_ERR; | |
2293 | break; | |
2294 | case OCRDMA_CQE_LOC_PROT_ERR: | |
2295 | ibwc_status = IB_WC_LOC_PROT_ERR; | |
2296 | break; | |
2297 | case OCRDMA_CQE_WR_FLUSH_ERR: | |
2298 | ibwc_status = IB_WC_WR_FLUSH_ERR; | |
2299 | break; | |
2300 | case OCRDMA_CQE_MW_BIND_ERR: | |
2301 | ibwc_status = IB_WC_MW_BIND_ERR; | |
2302 | break; | |
2303 | case OCRDMA_CQE_BAD_RESP_ERR: | |
2304 | ibwc_status = IB_WC_BAD_RESP_ERR; | |
2305 | break; | |
2306 | case OCRDMA_CQE_LOC_ACCESS_ERR: | |
2307 | ibwc_status = IB_WC_LOC_ACCESS_ERR; | |
2308 | break; | |
2309 | case OCRDMA_CQE_REM_INV_REQ_ERR: | |
2310 | ibwc_status = IB_WC_REM_INV_REQ_ERR; | |
2311 | break; | |
2312 | case OCRDMA_CQE_REM_ACCESS_ERR: | |
2313 | ibwc_status = IB_WC_REM_ACCESS_ERR; | |
2314 | break; | |
2315 | case OCRDMA_CQE_REM_OP_ERR: | |
2316 | ibwc_status = IB_WC_REM_OP_ERR; | |
2317 | break; | |
2318 | case OCRDMA_CQE_RETRY_EXC_ERR: | |
2319 | ibwc_status = IB_WC_RETRY_EXC_ERR; | |
2320 | break; | |
2321 | case OCRDMA_CQE_RNR_RETRY_EXC_ERR: | |
2322 | ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; | |
2323 | break; | |
2324 | case OCRDMA_CQE_LOC_RDD_VIOL_ERR: | |
2325 | ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; | |
2326 | break; | |
2327 | case OCRDMA_CQE_REM_INV_RD_REQ_ERR: | |
2328 | ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; | |
2329 | break; | |
2330 | case OCRDMA_CQE_REM_ABORT_ERR: | |
2331 | ibwc_status = IB_WC_REM_ABORT_ERR; | |
2332 | break; | |
2333 | case OCRDMA_CQE_INV_EECN_ERR: | |
2334 | ibwc_status = IB_WC_INV_EECN_ERR; | |
2335 | break; | |
2336 | case OCRDMA_CQE_INV_EEC_STATE_ERR: | |
2337 | ibwc_status = IB_WC_INV_EEC_STATE_ERR; | |
2338 | break; | |
2339 | case OCRDMA_CQE_FATAL_ERR: | |
2340 | ibwc_status = IB_WC_FATAL_ERR; | |
2341 | break; | |
2342 | case OCRDMA_CQE_RESP_TIMEOUT_ERR: | |
2343 | ibwc_status = IB_WC_RESP_TIMEOUT_ERR; | |
2344 | break; | |
2345 | default: | |
2346 | ibwc_status = IB_WC_GENERAL_ERR; | |
2347 | break; | |
2b50176d | 2348 | } |
fe2caefc PP |
2349 | return ibwc_status; |
2350 | } | |
2351 | ||
2352 | static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | |
2353 | u32 wqe_idx) | |
2354 | { | |
2355 | struct ocrdma_hdr_wqe *hdr; | |
2356 | struct ocrdma_sge *rw; | |
2357 | int opcode; | |
2358 | ||
2359 | hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); | |
2360 | ||
2361 | ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; | |
2362 | /* Undo the hdr->cw swap */ | |
2363 | opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; | |
2364 | switch (opcode) { | |
2365 | case OCRDMA_WRITE: | |
2366 | ibwc->opcode = IB_WC_RDMA_WRITE; | |
2367 | break; | |
2368 | case OCRDMA_READ: | |
2369 | rw = (struct ocrdma_sge *)(hdr + 1); | |
2370 | ibwc->opcode = IB_WC_RDMA_READ; | |
2371 | ibwc->byte_len = rw->len; | |
2372 | break; | |
2373 | case OCRDMA_SEND: | |
2374 | ibwc->opcode = IB_WC_SEND; | |
2375 | break; | |
7c33880c NG |
2376 | case OCRDMA_FR_MR: |
2377 | ibwc->opcode = IB_WC_FAST_REG_MR; | |
2378 | break; | |
fe2caefc PP |
2379 | case OCRDMA_LKEY_INV: |
2380 | ibwc->opcode = IB_WC_LOCAL_INV; | |
2381 | break; | |
2382 | default: | |
2383 | ibwc->status = IB_WC_GENERAL_ERR; | |
ef99c4c2 NG |
2384 | pr_err("%s() invalid opcode received = 0x%x\n", |
2385 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | |
fe2caefc | 2386 | break; |
2b50176d | 2387 | } |
fe2caefc PP |
2388 | } |
2389 | ||
2390 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, | |
2391 | struct ocrdma_cqe *cqe) | |
2392 | { | |
2393 | if (is_cqe_for_sq(cqe)) { | |
2394 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2395 | cqe->flags_status_srcqpn) & | |
2396 | ~OCRDMA_CQE_STATUS_MASK); | |
2397 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2398 | cqe->flags_status_srcqpn) | | |
2399 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2400 | OCRDMA_CQE_STATUS_SHIFT)); | |
2401 | } else { | |
2402 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | |
2403 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2404 | cqe->flags_status_srcqpn) & | |
2405 | ~OCRDMA_CQE_UD_STATUS_MASK); | |
2406 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2407 | cqe->flags_status_srcqpn) | | |
2408 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2409 | OCRDMA_CQE_UD_STATUS_SHIFT)); | |
2410 | } else { | |
2411 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2412 | cqe->flags_status_srcqpn) & | |
2413 | ~OCRDMA_CQE_STATUS_MASK); | |
2414 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2415 | cqe->flags_status_srcqpn) | | |
2416 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2417 | OCRDMA_CQE_STATUS_SHIFT)); | |
2418 | } | |
2419 | } | |
2420 | } | |
2421 | ||
2422 | static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2423 | struct ocrdma_qp *qp, int status) | |
2424 | { | |
2425 | bool expand = false; | |
2426 | ||
2427 | ibwc->byte_len = 0; | |
2428 | ibwc->qp = &qp->ibqp; | |
2429 | ibwc->status = ocrdma_to_ibwc_err(status); | |
2430 | ||
2431 | ocrdma_flush_qp(qp); | |
057729cb | 2432 | ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); |
fe2caefc PP |
2433 | |
2434 | /* if wqe/rqe pending for which cqe needs to be returned, | |
2435 | * trigger inflating it. | |
2436 | */ | |
2437 | if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { | |
2438 | expand = true; | |
2439 | ocrdma_set_cqe_status_flushed(qp, cqe); | |
2440 | } | |
2441 | return expand; | |
2442 | } | |
2443 | ||
2444 | static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2445 | struct ocrdma_qp *qp, int status) | |
2446 | { | |
2447 | ibwc->opcode = IB_WC_RECV; | |
2448 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2449 | ocrdma_hwq_inc_tail(&qp->rq); | |
2450 | ||
2451 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | |
2452 | } | |
2453 | ||
2454 | static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2455 | struct ocrdma_qp *qp, int status) | |
2456 | { | |
2457 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | |
2458 | ocrdma_hwq_inc_tail(&qp->sq); | |
2459 | ||
2460 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | |
2461 | } | |
2462 | ||
2463 | ||
2464 | static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, | |
2465 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc, | |
2466 | bool *polled, bool *stop) | |
2467 | { | |
2468 | bool expand; | |
2469 | int status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2470 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2471 | ||
2472 | /* when hw sq is empty, but rq is not empty, so we continue | |
2473 | * to keep the cqe in order to get the cq event again. | |
2474 | */ | |
2475 | if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { | |
2476 | /* when cq for rq and sq is same, it is safe to return | |
2477 | * flush cqe for RQEs. | |
2478 | */ | |
2479 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | |
2480 | *polled = true; | |
2481 | status = OCRDMA_CQE_WR_FLUSH_ERR; | |
2482 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | |
2483 | } else { | |
2484 | /* stop processing further cqe as this cqe is used for | |
2485 | * triggering cq event on buddy cq of RQ. | |
2486 | * When QP is destroyed, this cqe will be removed | |
2487 | * from the cq's hardware q. | |
2488 | */ | |
2489 | *polled = false; | |
2490 | *stop = true; | |
2491 | expand = false; | |
2492 | } | |
2493 | } else { | |
2494 | *polled = true; | |
2495 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | |
2496 | } | |
2497 | return expand; | |
2498 | } | |
2499 | ||
2500 | static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, | |
2501 | struct ocrdma_cqe *cqe, | |
2502 | struct ib_wc *ibwc, bool *polled) | |
2503 | { | |
2504 | bool expand = false; | |
2505 | int tail = qp->sq.tail; | |
2506 | u32 wqe_idx; | |
2507 | ||
2508 | if (!qp->wqe_wr_id_tbl[tail].signaled) { | |
fe2caefc PP |
2509 | *polled = false; /* WC cannot be consumed yet */ |
2510 | } else { | |
2511 | ibwc->status = IB_WC_SUCCESS; | |
2512 | ibwc->wc_flags = 0; | |
2513 | ibwc->qp = &qp->ibqp; | |
2514 | ocrdma_update_wc(qp, ibwc, tail); | |
2515 | *polled = true; | |
fe2caefc | 2516 | } |
43a6b402 NG |
2517 | wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & |
2518 | OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; | |
ae3bca90 PP |
2519 | if (tail != wqe_idx) |
2520 | expand = true; /* Coalesced CQE can't be consumed yet */ | |
2521 | ||
fe2caefc PP |
2522 | ocrdma_hwq_inc_tail(&qp->sq); |
2523 | return expand; | |
2524 | } | |
2525 | ||
2526 | static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2527 | struct ib_wc *ibwc, bool *polled, bool *stop) | |
2528 | { | |
2529 | int status; | |
2530 | bool expand; | |
2531 | ||
2532 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2533 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2534 | ||
2535 | if (status == OCRDMA_CQE_SUCCESS) | |
2536 | expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); | |
2537 | else | |
2538 | expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); | |
2539 | return expand; | |
2540 | } | |
2541 | ||
2542 | static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) | |
2543 | { | |
2544 | int status; | |
2545 | ||
2546 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2547 | OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; | |
2548 | ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & | |
2549 | OCRDMA_CQE_SRCQP_MASK; | |
2550 | ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & | |
2551 | OCRDMA_CQE_PKEY_MASK; | |
2552 | ibwc->wc_flags = IB_WC_GRH; | |
2553 | ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> | |
2554 | OCRDMA_CQE_UD_XFER_LEN_SHIFT); | |
2555 | return status; | |
2556 | } | |
2557 | ||
2558 | static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, | |
2559 | struct ocrdma_cqe *cqe, | |
2560 | struct ocrdma_qp *qp) | |
2561 | { | |
2562 | unsigned long flags; | |
2563 | struct ocrdma_srq *srq; | |
2564 | u32 wqe_idx; | |
2565 | ||
2566 | srq = get_ocrdma_srq(qp->ibqp.srq); | |
43a6b402 | 2567 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> |
cf5788ad SX |
2568 | OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; |
2569 | if (wqe_idx < 1) | |
2570 | BUG(); | |
2571 | ||
fe2caefc PP |
2572 | ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; |
2573 | spin_lock_irqsave(&srq->q_lock, flags); | |
cf5788ad | 2574 | ocrdma_srq_toggle_bit(srq, wqe_idx - 1); |
fe2caefc PP |
2575 | spin_unlock_irqrestore(&srq->q_lock, flags); |
2576 | ocrdma_hwq_inc_tail(&srq->rq); | |
2577 | } | |
2578 | ||
2579 | static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2580 | struct ib_wc *ibwc, bool *polled, bool *stop, | |
2581 | int status) | |
2582 | { | |
2583 | bool expand; | |
2584 | ||
2585 | /* when hw_rq is empty, but wq is not empty, so continue | |
2586 | * to keep the cqe to get the cq event again. | |
2587 | */ | |
2588 | if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { | |
2589 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | |
2590 | *polled = true; | |
2591 | status = OCRDMA_CQE_WR_FLUSH_ERR; | |
2592 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | |
2593 | } else { | |
2594 | *polled = false; | |
2595 | *stop = true; | |
2596 | expand = false; | |
2597 | } | |
a3698a9b PP |
2598 | } else { |
2599 | *polled = true; | |
fe2caefc | 2600 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); |
a3698a9b | 2601 | } |
fe2caefc PP |
2602 | return expand; |
2603 | } | |
2604 | ||
2605 | static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, | |
2606 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc) | |
2607 | { | |
2608 | ibwc->opcode = IB_WC_RECV; | |
2609 | ibwc->qp = &qp->ibqp; | |
2610 | ibwc->status = IB_WC_SUCCESS; | |
2611 | ||
2612 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) | |
2613 | ocrdma_update_ud_rcqe(ibwc, cqe); | |
2614 | else | |
2615 | ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); | |
2616 | ||
2617 | if (is_cqe_imm(cqe)) { | |
2618 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | |
2619 | ibwc->wc_flags |= IB_WC_WITH_IMM; | |
2620 | } else if (is_cqe_wr_imm(cqe)) { | |
2621 | ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | |
2622 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | |
2623 | ibwc->wc_flags |= IB_WC_WITH_IMM; | |
2624 | } else if (is_cqe_invalidated(cqe)) { | |
2625 | ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); | |
2626 | ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; | |
2627 | } | |
f99b1649 | 2628 | if (qp->ibqp.srq) { |
fe2caefc | 2629 | ocrdma_update_free_srq_cqe(ibwc, cqe, qp); |
f99b1649 | 2630 | } else { |
fe2caefc PP |
2631 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; |
2632 | ocrdma_hwq_inc_tail(&qp->rq); | |
2633 | } | |
2634 | } | |
2635 | ||
2636 | static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2637 | struct ib_wc *ibwc, bool *polled, bool *stop) | |
2638 | { | |
2639 | int status; | |
2640 | bool expand = false; | |
2641 | ||
2642 | ibwc->wc_flags = 0; | |
f99b1649 | 2643 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { |
fe2caefc PP |
2644 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & |
2645 | OCRDMA_CQE_UD_STATUS_MASK) >> | |
2646 | OCRDMA_CQE_UD_STATUS_SHIFT; | |
f99b1649 | 2647 | } else { |
fe2caefc PP |
2648 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & |
2649 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
f99b1649 | 2650 | } |
fe2caefc PP |
2651 | |
2652 | if (status == OCRDMA_CQE_SUCCESS) { | |
2653 | *polled = true; | |
2654 | ocrdma_poll_success_rcqe(qp, cqe, ibwc); | |
2655 | } else { | |
2656 | expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, | |
2657 | status); | |
2658 | } | |
2659 | return expand; | |
2660 | } | |
2661 | ||
2662 | static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, | |
2663 | u16 cur_getp) | |
2664 | { | |
2665 | if (cq->phase_change) { | |
2666 | if (cur_getp == 0) | |
2667 | cq->phase = (~cq->phase & OCRDMA_CQE_VALID); | |
f99b1649 | 2668 | } else { |
fe2caefc PP |
2669 | /* clear valid bit */ |
2670 | cqe->flags_status_srcqpn = 0; | |
f99b1649 | 2671 | } |
fe2caefc PP |
2672 | } |
2673 | ||
2674 | static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, | |
2675 | struct ib_wc *ibwc) | |
2676 | { | |
2677 | u16 qpn = 0; | |
2678 | int i = 0; | |
2679 | bool expand = false; | |
2680 | int polled_hw_cqes = 0; | |
2681 | struct ocrdma_qp *qp = NULL; | |
1afc0454 | 2682 | struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); |
fe2caefc PP |
2683 | struct ocrdma_cqe *cqe; |
2684 | u16 cur_getp; bool polled = false; bool stop = false; | |
2685 | ||
2686 | cur_getp = cq->getp; | |
2687 | while (num_entries) { | |
2688 | cqe = cq->va + cur_getp; | |
2689 | /* check whether valid cqe or not */ | |
2690 | if (!is_cqe_valid(cq, cqe)) | |
2691 | break; | |
2692 | qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); | |
2693 | /* ignore discarded cqe */ | |
2694 | if (qpn == 0) | |
2695 | goto skip_cqe; | |
2696 | qp = dev->qp_tbl[qpn]; | |
2697 | BUG_ON(qp == NULL); | |
2698 | ||
2699 | if (is_cqe_for_sq(cqe)) { | |
2700 | expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, | |
2701 | &stop); | |
2702 | } else { | |
2703 | expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, | |
2704 | &stop); | |
2705 | } | |
2706 | if (expand) | |
2707 | goto expand_cqe; | |
2708 | if (stop) | |
2709 | goto stop_cqe; | |
2710 | /* clear qpn to avoid duplicate processing by discard_cqe() */ | |
2711 | cqe->cmn.qpn = 0; | |
2712 | skip_cqe: | |
2713 | polled_hw_cqes += 1; | |
2714 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | |
2715 | ocrdma_change_cq_phase(cq, cqe, cur_getp); | |
2716 | expand_cqe: | |
2717 | if (polled) { | |
2718 | num_entries -= 1; | |
2719 | i += 1; | |
2720 | ibwc = ibwc + 1; | |
2721 | polled = false; | |
2722 | } | |
2723 | } | |
2724 | stop_cqe: | |
2725 | cq->getp = cur_getp; | |
ea617626 DS |
2726 | if (cq->deferred_arm) { |
2727 | ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol, | |
2728 | polled_hw_cqes); | |
2729 | cq->deferred_arm = false; | |
2730 | cq->deferred_sol = false; | |
2731 | } else { | |
2732 | /* We need to pop the CQE. No need to arm */ | |
2733 | ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol, | |
fe2caefc | 2734 | polled_hw_cqes); |
ea617626 | 2735 | cq->deferred_sol = false; |
fe2caefc | 2736 | } |
ea617626 | 2737 | |
fe2caefc PP |
2738 | return i; |
2739 | } | |
2740 | ||
2741 | /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ | |
2742 | static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, | |
2743 | struct ocrdma_qp *qp, struct ib_wc *ibwc) | |
2744 | { | |
2745 | int err_cqes = 0; | |
2746 | ||
2747 | while (num_entries) { | |
2748 | if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) | |
2749 | break; | |
2750 | if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { | |
2751 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | |
2752 | ocrdma_hwq_inc_tail(&qp->sq); | |
2753 | } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { | |
2754 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2755 | ocrdma_hwq_inc_tail(&qp->rq); | |
f99b1649 | 2756 | } else { |
fe2caefc | 2757 | return err_cqes; |
f99b1649 | 2758 | } |
fe2caefc PP |
2759 | ibwc->byte_len = 0; |
2760 | ibwc->status = IB_WC_WR_FLUSH_ERR; | |
2761 | ibwc = ibwc + 1; | |
2762 | err_cqes += 1; | |
2763 | num_entries -= 1; | |
2764 | } | |
2765 | return err_cqes; | |
2766 | } | |
2767 | ||
2768 | int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
2769 | { | |
2770 | int cqes_to_poll = num_entries; | |
1afc0454 NG |
2771 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
2772 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | |
fe2caefc PP |
2773 | int num_os_cqe = 0, err_cqes = 0; |
2774 | struct ocrdma_qp *qp; | |
1afc0454 | 2775 | unsigned long flags; |
fe2caefc PP |
2776 | |
2777 | /* poll cqes from adapter CQ */ | |
2778 | spin_lock_irqsave(&cq->cq_lock, flags); | |
2779 | num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); | |
2780 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
2781 | cqes_to_poll -= num_os_cqe; | |
2782 | ||
2783 | if (cqes_to_poll) { | |
2784 | wc = wc + num_os_cqe; | |
2785 | /* adapter returns single error cqe when qp moves to | |
2786 | * error state. So insert error cqes with wc_status as | |
2787 | * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ | |
2788 | * respectively which uses this CQ. | |
2789 | */ | |
2790 | spin_lock_irqsave(&dev->flush_q_lock, flags); | |
2791 | list_for_each_entry(qp, &cq->sq_head, sq_entry) { | |
2792 | if (cqes_to_poll == 0) | |
2793 | break; | |
2794 | err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); | |
2795 | cqes_to_poll -= err_cqes; | |
2796 | num_os_cqe += err_cqes; | |
2797 | wc = wc + err_cqes; | |
2798 | } | |
2799 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | |
2800 | } | |
2801 | return num_os_cqe; | |
2802 | } | |
2803 | ||
2804 | int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |
2805 | { | |
1afc0454 NG |
2806 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
2807 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | |
fe2caefc | 2808 | u16 cq_id; |
1afc0454 | 2809 | unsigned long flags; |
ea617626 | 2810 | bool arm_needed = false, sol_needed = false; |
fe2caefc | 2811 | |
fe2caefc | 2812 | cq_id = cq->id; |
fe2caefc PP |
2813 | |
2814 | spin_lock_irqsave(&cq->cq_lock, flags); | |
2815 | if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) | |
ea617626 | 2816 | arm_needed = true; |
fe2caefc | 2817 | if (cq_flags & IB_CQ_SOLICITED) |
ea617626 | 2818 | sol_needed = true; |
fe2caefc | 2819 | |
ea617626 DS |
2820 | if (cq->first_arm) { |
2821 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); | |
2822 | cq->first_arm = false; | |
2823 | goto skip_defer; | |
fe2caefc | 2824 | } |
ea617626 DS |
2825 | cq->deferred_arm = true; |
2826 | ||
2827 | skip_defer: | |
2828 | cq->deferred_sol = sol_needed; | |
fe2caefc | 2829 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
ea617626 | 2830 | |
fe2caefc PP |
2831 | return 0; |
2832 | } | |
7c33880c NG |
2833 | |
2834 | struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) | |
2835 | { | |
2836 | int status; | |
2837 | struct ocrdma_mr *mr; | |
2838 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
2839 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | |
2840 | ||
2841 | if (max_page_list_len > dev->attr.max_pages_per_frmr) | |
2842 | return ERR_PTR(-EINVAL); | |
2843 | ||
2844 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
2845 | if (!mr) | |
2846 | return ERR_PTR(-ENOMEM); | |
2847 | ||
2848 | status = ocrdma_get_pbl_info(dev, mr, max_page_list_len); | |
2849 | if (status) | |
2850 | goto pbl_err; | |
2851 | mr->hwmr.fr_mr = 1; | |
2852 | mr->hwmr.remote_rd = 0; | |
2853 | mr->hwmr.remote_wr = 0; | |
2854 | mr->hwmr.local_rd = 0; | |
2855 | mr->hwmr.local_wr = 0; | |
2856 | mr->hwmr.mw_bind = 0; | |
2857 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
2858 | if (status) | |
2859 | goto pbl_err; | |
2860 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); | |
2861 | if (status) | |
2862 | goto mbx_err; | |
2863 | mr->ibmr.rkey = mr->hwmr.lkey; | |
2864 | mr->ibmr.lkey = mr->hwmr.lkey; | |
fad51b7d | 2865 | dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64)mr; |
7c33880c NG |
2866 | return &mr->ibmr; |
2867 | mbx_err: | |
2868 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
2869 | pbl_err: | |
2870 | kfree(mr); | |
2871 | return ERR_PTR(-ENOMEM); | |
2872 | } | |
2873 | ||
2874 | struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device | |
2875 | *ibdev, | |
2876 | int page_list_len) | |
2877 | { | |
2878 | struct ib_fast_reg_page_list *frmr_list; | |
2879 | int size; | |
2880 | ||
2881 | size = sizeof(*frmr_list) + (page_list_len * sizeof(u64)); | |
2882 | frmr_list = kzalloc(size, GFP_KERNEL); | |
2883 | if (!frmr_list) | |
2884 | return ERR_PTR(-ENOMEM); | |
2885 | frmr_list->page_list = (u64 *)(frmr_list + 1); | |
2886 | return frmr_list; | |
2887 | } | |
2888 | ||
2889 | void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list) | |
2890 | { | |
2891 | kfree(page_list); | |
2892 | } | |
cffce990 NG |
2893 | |
2894 | #define MAX_KERNEL_PBE_SIZE 65536 | |
2895 | static inline int count_kernel_pbes(struct ib_phys_buf *buf_list, | |
2896 | int buf_cnt, u32 *pbe_size) | |
2897 | { | |
2898 | u64 total_size = 0; | |
2899 | u64 buf_size = 0; | |
2900 | int i; | |
2901 | *pbe_size = roundup(buf_list[0].size, PAGE_SIZE); | |
2902 | *pbe_size = roundup_pow_of_two(*pbe_size); | |
2903 | ||
2904 | /* find the smallest PBE size that we can have */ | |
2905 | for (i = 0; i < buf_cnt; i++) { | |
2906 | /* first addr may not be page aligned, so ignore checking */ | |
2907 | if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) || | |
2908 | (buf_list[i].size & ~PAGE_MASK))) { | |
2909 | return 0; | |
2910 | } | |
2911 | ||
2912 | /* if configured PBE size is greater then the chosen one, | |
2913 | * reduce the PBE size. | |
2914 | */ | |
2915 | buf_size = roundup(buf_list[i].size, PAGE_SIZE); | |
2916 | /* pbe_size has to be even multiple of 4K 1,2,4,8...*/ | |
2917 | buf_size = roundup_pow_of_two(buf_size); | |
2918 | if (*pbe_size > buf_size) | |
2919 | *pbe_size = buf_size; | |
2920 | ||
2921 | total_size += buf_size; | |
2922 | } | |
2923 | *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ? | |
2924 | (MAX_KERNEL_PBE_SIZE) : (*pbe_size); | |
2925 | ||
2926 | /* num_pbes = total_size / (*pbe_size); this is implemented below. */ | |
2927 | ||
2928 | return total_size >> ilog2(*pbe_size); | |
2929 | } | |
2930 | ||
2931 | static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt, | |
2932 | u32 pbe_size, struct ocrdma_pbl *pbl_tbl, | |
2933 | struct ocrdma_hw_mr *hwmr) | |
2934 | { | |
2935 | int i; | |
2936 | int idx; | |
2937 | int pbes_per_buf = 0; | |
2938 | u64 buf_addr = 0; | |
2939 | int num_pbes; | |
2940 | struct ocrdma_pbe *pbe; | |
2941 | int total_num_pbes = 0; | |
2942 | ||
2943 | if (!hwmr->num_pbes) | |
2944 | return; | |
2945 | ||
2946 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
2947 | num_pbes = 0; | |
2948 | ||
2949 | /* go through the OS phy regions & fill hw pbe entries into pbls. */ | |
2950 | for (i = 0; i < ib_buf_cnt; i++) { | |
2951 | buf_addr = buf_list[i].addr; | |
2952 | pbes_per_buf = | |
2953 | roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) / | |
2954 | pbe_size; | |
2955 | hwmr->len += buf_list[i].size; | |
2956 | /* number of pbes can be more for one OS buf, when | |
2957 | * buffers are of different sizes. | |
2958 | * split the ib_buf to one or more pbes. | |
2959 | */ | |
2960 | for (idx = 0; idx < pbes_per_buf; idx++) { | |
2961 | /* we program always page aligned addresses, | |
2962 | * first unaligned address is taken care by fbo. | |
2963 | */ | |
2964 | if (i == 0) { | |
2965 | /* for non zero fbo, assign the | |
2966 | * start of the page. | |
2967 | */ | |
2968 | pbe->pa_lo = | |
2969 | cpu_to_le32((u32) (buf_addr & PAGE_MASK)); | |
2970 | pbe->pa_hi = | |
2971 | cpu_to_le32((u32) upper_32_bits(buf_addr)); | |
2972 | } else { | |
2973 | pbe->pa_lo = | |
2974 | cpu_to_le32((u32) (buf_addr & 0xffffffff)); | |
2975 | pbe->pa_hi = | |
2976 | cpu_to_le32((u32) upper_32_bits(buf_addr)); | |
2977 | } | |
2978 | buf_addr += pbe_size; | |
2979 | num_pbes += 1; | |
2980 | total_num_pbes += 1; | |
2981 | pbe++; | |
2982 | ||
2983 | if (total_num_pbes == hwmr->num_pbes) | |
2984 | goto mr_tbl_done; | |
2985 | /* if the pbl is full storing the pbes, | |
2986 | * move to next pbl. | |
2987 | */ | |
2988 | if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { | |
2989 | pbl_tbl++; | |
2990 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
2991 | num_pbes = 0; | |
2992 | } | |
2993 | } | |
2994 | } | |
2995 | mr_tbl_done: | |
2996 | return; | |
2997 | } | |
2998 | ||
2999 | struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd, | |
3000 | struct ib_phys_buf *buf_list, | |
3001 | int buf_cnt, int acc, u64 *iova_start) | |
3002 | { | |
3003 | int status = -ENOMEM; | |
3004 | struct ocrdma_mr *mr; | |
3005 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
3006 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | |
3007 | u32 num_pbes; | |
3008 | u32 pbe_size = 0; | |
3009 | ||
3010 | if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE)) | |
3011 | return ERR_PTR(-EINVAL); | |
3012 | ||
3013 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
3014 | if (!mr) | |
3015 | return ERR_PTR(status); | |
3016 | ||
3017 | num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size); | |
3018 | if (num_pbes == 0) { | |
3019 | status = -EINVAL; | |
3020 | goto pbl_err; | |
3021 | } | |
3022 | status = ocrdma_get_pbl_info(dev, mr, num_pbes); | |
3023 | if (status) | |
3024 | goto pbl_err; | |
3025 | ||
3026 | mr->hwmr.pbe_size = pbe_size; | |
3027 | mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK); | |
3028 | mr->hwmr.va = *iova_start; | |
3029 | mr->hwmr.local_rd = 1; | |
3030 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
3031 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
3032 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
3033 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
3034 | mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; | |
3035 | ||
3036 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
3037 | if (status) | |
3038 | goto pbl_err; | |
3039 | build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table, | |
3040 | &mr->hwmr); | |
3041 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); | |
3042 | if (status) | |
3043 | goto mbx_err; | |
3044 | ||
3045 | mr->ibmr.lkey = mr->hwmr.lkey; | |
3046 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
3047 | mr->ibmr.rkey = mr->hwmr.lkey; | |
3048 | return &mr->ibmr; | |
3049 | ||
3050 | mbx_err: | |
3051 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
3052 | pbl_err: | |
3053 | kfree(mr); | |
3054 | return ERR_PTR(status); | |
3055 | } |