RDMA/cxgb4: Use vmalloc() for debugfs QP dump
[deliverable/linux.git] / drivers / infiniband / hw / cxgb4 / mem.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
60063497 34#include <linux/atomic.h>
cfdda9d7
SW
35
36#include "iw_cxgb4.h"
37
38#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96
40
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42 void *data)
43{
44 struct sk_buff *skb;
45 struct ulp_mem_io *req;
46 struct ulptx_idata *sc;
47 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait;
50
51 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54 c4iw_init_wr_wait(&wr_wait);
55 for (i = 0; i < num_wqe; i++) {
56
57 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58 len;
59 wr_len = roundup(sizeof *req + sizeof *sc +
60 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61
d3c814e8 62 skb = alloc_skb(wr_len, GFP_KERNEL);
cfdda9d7
SW
63 if (!skb)
64 return -ENOMEM;
65 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66
67 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68 memset(req, 0, wr_len);
69 INIT_ULPTX_WR(req, wr_len, 0, 0);
70
71 if (i == (num_wqe-1)) {
72 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73 FW_WR_COMPL(1));
c8e081a1 74 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
cfdda9d7
SW
75 } else
76 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86
87 sc = (struct ulptx_idata *)(req + 1);
88 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
89 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90
91 to_dp = (u8 *)(sc + 1);
92 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93 if (data)
94 memcpy(to_dp, from_dp, copy_len);
95 else
96 memset(to_dp, 0, copy_len);
97 if (copy_len % T4_ULPTX_MIN_IO)
98 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99 (copy_len % T4_ULPTX_MIN_IO));
100 ret = c4iw_ofld_send(rdev, skb);
101 if (ret)
102 return ret;
103 len -= C4IW_MAX_INLINE_SIZE;
104 }
105
aadc4df3 106 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
cfdda9d7
SW
107 return ret;
108}
109
110/*
111 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
113 * pbl_size and pbl_addr
114 * OUT: stag index
115 */
116static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
117 u32 *stag, u8 stag_state, u32 pdid,
118 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
119 int bind_enabled, u32 zbva, u64 to,
120 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
121{
122 int err;
123 struct fw_ri_tpte tpt;
124 u32 stag_idx;
125 static atomic_t key;
126
127 if (c4iw_fatal_error(rdev))
128 return -EIO;
129
130 stag_state = stag_state > 0;
131 stag_idx = (*stag) >> 8;
132
133 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
134 stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
135 &rdev->resource.tpt_fifo_lock);
136 if (!stag_idx)
137 return -ENOMEM;
8d81ef34
VP
138 mutex_lock(&rdev->stats.lock);
139 rdev->stats.stag.cur += 32;
140 if (rdev->stats.stag.cur > rdev->stats.stag.max)
141 rdev->stats.stag.max = rdev->stats.stag.cur;
142 mutex_unlock(&rdev->stats.lock);
cfdda9d7
SW
143 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
144 }
145 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
146 __func__, stag_state, type, pdid, stag_idx);
147
148 /* write TPT entry */
149 if (reset_tpt_entry)
150 memset(&tpt, 0, sizeof(tpt));
151 else {
152 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
153 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
154 V_FW_RI_TPTE_STAGSTATE(stag_state) |
155 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
156 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
157 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
158 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
159 FW_RI_VA_BASED_TO))|
160 V_FW_RI_TPTE_PS(page_size));
161 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
162 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
163 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
164 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
165 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
166 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
167 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
168 }
169 err = write_adapter_mem(rdev, stag_idx +
170 (rdev->lldi.vr->stag.start >> 5),
171 sizeof(tpt), &tpt);
172
8d81ef34 173 if (reset_tpt_entry) {
cfdda9d7
SW
174 c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
175 &rdev->resource.tpt_fifo_lock);
8d81ef34
VP
176 mutex_lock(&rdev->stats.lock);
177 rdev->stats.stag.cur -= 32;
178 mutex_unlock(&rdev->stats.lock);
179 }
cfdda9d7
SW
180 return err;
181}
182
183static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
184 u32 pbl_addr, u32 pbl_size)
185{
186 int err;
187
188 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
189 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
190 pbl_size);
191
192 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
193 return err;
194}
195
196static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
197 u32 pbl_addr)
198{
199 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
200 pbl_size, pbl_addr);
201}
202
203static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
204{
205 *stag = T4_STAG_UNSET;
206 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
207 0UL, 0, 0, 0, 0);
208}
209
210static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
211{
212 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
213 0);
214}
215
216static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
217 u32 pbl_size, u32 pbl_addr)
218{
219 *stag = T4_STAG_UNSET;
220 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
221 0UL, 0, 0, pbl_size, pbl_addr);
222}
223
224static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
225{
226 u32 mmid;
227
228 mhp->attr.state = 1;
229 mhp->attr.stag = stag;
230 mmid = stag >> 8;
231 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
232 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
233 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
234}
235
236static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
237 struct c4iw_mr *mhp, int shift)
238{
239 u32 stag = T4_STAG_UNSET;
240 int ret;
241
242 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
243 FW_RI_STAG_NSMR, mhp->attr.perms,
244 mhp->attr.mw_bind_enable, mhp->attr.zbva,
245 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
246 mhp->attr.pbl_size, mhp->attr.pbl_addr);
247 if (ret)
248 return ret;
249
250 ret = finish_mem_reg(mhp, stag);
251 if (ret)
252 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
253 mhp->attr.pbl_addr);
254 return ret;
255}
256
257static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
258 struct c4iw_mr *mhp, int shift, int npages)
259{
260 u32 stag;
261 int ret;
262
263 if (npages > mhp->attr.pbl_size)
264 return -ENOMEM;
265
266 stag = mhp->attr.stag;
267 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
268 FW_RI_STAG_NSMR, mhp->attr.perms,
269 mhp->attr.mw_bind_enable, mhp->attr.zbva,
270 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
271 mhp->attr.pbl_size, mhp->attr.pbl_addr);
272 if (ret)
273 return ret;
274
275 ret = finish_mem_reg(mhp, stag);
276 if (ret)
277 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
278 mhp->attr.pbl_addr);
279
280 return ret;
281}
282
283static int alloc_pbl(struct c4iw_mr *mhp, int npages)
284{
285 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
286 npages << 3);
287
288 if (!mhp->attr.pbl_addr)
289 return -ENOMEM;
290
291 mhp->attr.pbl_size = npages;
292
293 return 0;
294}
295
296static int build_phys_page_list(struct ib_phys_buf *buffer_list,
297 int num_phys_buf, u64 *iova_start,
298 u64 *total_size, int *npages,
299 int *shift, __be64 **page_list)
300{
301 u64 mask;
302 int i, j, n;
303
304 mask = 0;
305 *total_size = 0;
306 for (i = 0; i < num_phys_buf; ++i) {
307 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
308 return -EINVAL;
309 if (i != 0 && i != num_phys_buf - 1 &&
310 (buffer_list[i].size & ~PAGE_MASK))
311 return -EINVAL;
312 *total_size += buffer_list[i].size;
313 if (i > 0)
314 mask |= buffer_list[i].addr;
315 else
316 mask |= buffer_list[i].addr & PAGE_MASK;
317 if (i != num_phys_buf - 1)
318 mask |= buffer_list[i].addr + buffer_list[i].size;
319 else
320 mask |= (buffer_list[i].addr + buffer_list[i].size +
321 PAGE_SIZE - 1) & PAGE_MASK;
322 }
323
324 if (*total_size > 0xFFFFFFFFULL)
325 return -ENOMEM;
326
327 /* Find largest page shift we can use to cover buffers */
328 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
329 if ((1ULL << *shift) & mask)
330 break;
331
332 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
333 buffer_list[0].addr &= ~0ull << *shift;
334
335 *npages = 0;
336 for (i = 0; i < num_phys_buf; ++i)
337 *npages += (buffer_list[i].size +
338 (1ULL << *shift) - 1) >> *shift;
339
340 if (!*npages)
341 return -EINVAL;
342
343 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
344 if (!*page_list)
345 return -ENOMEM;
346
347 n = 0;
348 for (i = 0; i < num_phys_buf; ++i)
349 for (j = 0;
350 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
351 ++j)
352 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
353 ((u64) j << *shift));
354
355 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
356 __func__, (unsigned long long)*iova_start,
357 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
358 *npages);
359
360 return 0;
361
362}
363
364int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
365 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
366 int num_phys_buf, int acc, u64 *iova_start)
367{
368
369 struct c4iw_mr mh, *mhp;
370 struct c4iw_pd *php;
371 struct c4iw_dev *rhp;
372 __be64 *page_list = NULL;
373 int shift = 0;
374 u64 total_size;
375 int npages;
376 int ret;
377
378 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
379
380 /* There can be no memory windows */
381 if (atomic_read(&mr->usecnt))
382 return -EINVAL;
383
384 mhp = to_c4iw_mr(mr);
385 rhp = mhp->rhp;
386 php = to_c4iw_pd(mr->pd);
387
388 /* make sure we are on the same adapter */
389 if (rhp != php->rhp)
390 return -EINVAL;
391
392 memcpy(&mh, mhp, sizeof *mhp);
393
394 if (mr_rereg_mask & IB_MR_REREG_PD)
395 php = to_c4iw_pd(pd);
396 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
397 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
398 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
399 IB_ACCESS_MW_BIND;
400 }
401 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
402 ret = build_phys_page_list(buffer_list, num_phys_buf,
403 iova_start,
404 &total_size, &npages,
405 &shift, &page_list);
406 if (ret)
407 return ret;
408 }
409
410 ret = reregister_mem(rhp, php, &mh, shift, npages);
411 kfree(page_list);
412 if (ret)
413 return ret;
414 if (mr_rereg_mask & IB_MR_REREG_PD)
415 mhp->attr.pdid = php->pdid;
416 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
417 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
418 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
419 mhp->attr.zbva = 0;
420 mhp->attr.va_fbo = *iova_start;
421 mhp->attr.page_size = shift - 12;
422 mhp->attr.len = (u32) total_size;
423 mhp->attr.pbl_size = npages;
424 }
425
426 return 0;
427}
428
429struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
430 struct ib_phys_buf *buffer_list,
431 int num_phys_buf, int acc, u64 *iova_start)
432{
433 __be64 *page_list;
434 int shift;
435 u64 total_size;
436 int npages;
437 struct c4iw_dev *rhp;
438 struct c4iw_pd *php;
439 struct c4iw_mr *mhp;
440 int ret;
441
442 PDBG("%s ib_pd %p\n", __func__, pd);
443 php = to_c4iw_pd(pd);
444 rhp = php->rhp;
445
446 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
447 if (!mhp)
448 return ERR_PTR(-ENOMEM);
449
450 mhp->rhp = rhp;
451
452 /* First check that we have enough alignment */
453 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
454 ret = -EINVAL;
455 goto err;
456 }
457
458 if (num_phys_buf > 1 &&
459 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
460 ret = -EINVAL;
461 goto err;
462 }
463
464 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
465 &total_size, &npages, &shift,
466 &page_list);
467 if (ret)
468 goto err;
469
470 ret = alloc_pbl(mhp, npages);
471 if (ret) {
472 kfree(page_list);
473 goto err_pbl;
474 }
475
476 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
477 npages);
478 kfree(page_list);
479 if (ret)
480 goto err_pbl;
481
482 mhp->attr.pdid = php->pdid;
483 mhp->attr.zbva = 0;
484
485 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
486 mhp->attr.va_fbo = *iova_start;
487 mhp->attr.page_size = shift - 12;
488
489 mhp->attr.len = (u32) total_size;
490 mhp->attr.pbl_size = npages;
491 ret = register_mem(rhp, php, mhp, shift);
492 if (ret)
493 goto err_pbl;
494
495 return &mhp->ibmr;
496
497err_pbl:
498 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
499 mhp->attr.pbl_size << 3);
500
501err:
502 kfree(mhp);
503 return ERR_PTR(ret);
504
505}
506
507struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
508{
509 struct c4iw_dev *rhp;
510 struct c4iw_pd *php;
511 struct c4iw_mr *mhp;
512 int ret;
513 u32 stag = T4_STAG_UNSET;
514
515 PDBG("%s ib_pd %p\n", __func__, pd);
516 php = to_c4iw_pd(pd);
517 rhp = php->rhp;
518
519 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
520 if (!mhp)
521 return ERR_PTR(-ENOMEM);
522
523 mhp->rhp = rhp;
524 mhp->attr.pdid = php->pdid;
525 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
526 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
527 mhp->attr.zbva = 0;
528 mhp->attr.va_fbo = 0;
529 mhp->attr.page_size = 0;
530 mhp->attr.len = ~0UL;
531 mhp->attr.pbl_size = 0;
532
533 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
534 FW_RI_STAG_NSMR, mhp->attr.perms,
535 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
536 if (ret)
537 goto err1;
538
539 ret = finish_mem_reg(mhp, stag);
540 if (ret)
541 goto err2;
542 return &mhp->ibmr;
543err2:
544 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
545 mhp->attr.pbl_addr);
546err1:
547 kfree(mhp);
548 return ERR_PTR(ret);
549}
550
551struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
552 u64 virt, int acc, struct ib_udata *udata)
553{
554 __be64 *pages;
555 int shift, n, len;
556 int i, j, k;
557 int err = 0;
558 struct ib_umem_chunk *chunk;
559 struct c4iw_dev *rhp;
560 struct c4iw_pd *php;
561 struct c4iw_mr *mhp;
562
563 PDBG("%s ib_pd %p\n", __func__, pd);
564
565 if (length == ~0ULL)
566 return ERR_PTR(-EINVAL);
567
568 if ((length + start) < start)
569 return ERR_PTR(-EINVAL);
570
571 php = to_c4iw_pd(pd);
572 rhp = php->rhp;
573 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
574 if (!mhp)
575 return ERR_PTR(-ENOMEM);
576
577 mhp->rhp = rhp;
578
579 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
580 if (IS_ERR(mhp->umem)) {
581 err = PTR_ERR(mhp->umem);
582 kfree(mhp);
583 return ERR_PTR(err);
584 }
585
586 shift = ffs(mhp->umem->page_size) - 1;
587
588 n = 0;
589 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
590 n += chunk->nents;
591
592 err = alloc_pbl(mhp, n);
593 if (err)
594 goto err;
595
596 pages = (__be64 *) __get_free_page(GFP_KERNEL);
597 if (!pages) {
598 err = -ENOMEM;
599 goto err_pbl;
600 }
601
602 i = n = 0;
603
604 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
605 for (j = 0; j < chunk->nmap; ++j) {
606 len = sg_dma_len(&chunk->page_list[j]) >> shift;
607 for (k = 0; k < len; ++k) {
608 pages[i++] = cpu_to_be64(sg_dma_address(
609 &chunk->page_list[j]) +
610 mhp->umem->page_size * k);
611 if (i == PAGE_SIZE / sizeof *pages) {
612 err = write_pbl(&mhp->rhp->rdev,
613 pages,
614 mhp->attr.pbl_addr + (n << 3), i);
615 if (err)
616 goto pbl_done;
617 n += i;
618 i = 0;
619 }
620 }
621 }
622
623 if (i)
624 err = write_pbl(&mhp->rhp->rdev, pages,
625 mhp->attr.pbl_addr + (n << 3), i);
626
627pbl_done:
628 free_page((unsigned long) pages);
629 if (err)
630 goto err_pbl;
631
632 mhp->attr.pdid = php->pdid;
633 mhp->attr.zbva = 0;
634 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
635 mhp->attr.va_fbo = virt;
636 mhp->attr.page_size = shift - 12;
301c2c3f 637 mhp->attr.len = length;
cfdda9d7
SW
638
639 err = register_mem(rhp, php, mhp, shift);
640 if (err)
641 goto err_pbl;
642
643 return &mhp->ibmr;
644
645err_pbl:
646 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
647 mhp->attr.pbl_size << 3);
648
649err:
650 ib_umem_release(mhp->umem);
651 kfree(mhp);
652 return ERR_PTR(err);
653}
654
655struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
656{
657 struct c4iw_dev *rhp;
658 struct c4iw_pd *php;
659 struct c4iw_mw *mhp;
660 u32 mmid;
661 u32 stag = 0;
662 int ret;
663
664 php = to_c4iw_pd(pd);
665 rhp = php->rhp;
666 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
667 if (!mhp)
668 return ERR_PTR(-ENOMEM);
669 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
670 if (ret) {
671 kfree(mhp);
672 return ERR_PTR(ret);
673 }
674 mhp->rhp = rhp;
675 mhp->attr.pdid = php->pdid;
676 mhp->attr.type = FW_RI_STAG_MW;
677 mhp->attr.stag = stag;
678 mmid = (stag) >> 8;
679 mhp->ibmw.rkey = stag;
680 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
681 deallocate_window(&rhp->rdev, mhp->attr.stag);
682 kfree(mhp);
683 return ERR_PTR(-ENOMEM);
684 }
685 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
686 return &(mhp->ibmw);
687}
688
689int c4iw_dealloc_mw(struct ib_mw *mw)
690{
691 struct c4iw_dev *rhp;
692 struct c4iw_mw *mhp;
693 u32 mmid;
694
695 mhp = to_c4iw_mw(mw);
696 rhp = mhp->rhp;
697 mmid = (mw->rkey) >> 8;
698 deallocate_window(&rhp->rdev, mhp->attr.stag);
699 remove_handle(rhp, &rhp->mmidr, mmid);
700 kfree(mhp);
701 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
702 return 0;
703}
704
705struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
706{
707 struct c4iw_dev *rhp;
708 struct c4iw_pd *php;
709 struct c4iw_mr *mhp;
710 u32 mmid;
711 u32 stag = 0;
712 int ret = 0;
713
714 php = to_c4iw_pd(pd);
715 rhp = php->rhp;
716 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
841dba9a
SW
717 if (!mhp) {
718 ret = -ENOMEM;
cfdda9d7 719 goto err;
841dba9a 720 }
cfdda9d7
SW
721
722 mhp->rhp = rhp;
723 ret = alloc_pbl(mhp, pbl_depth);
724 if (ret)
725 goto err1;
726 mhp->attr.pbl_size = pbl_depth;
727 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
728 mhp->attr.pbl_size, mhp->attr.pbl_addr);
729 if (ret)
730 goto err2;
731 mhp->attr.pdid = php->pdid;
732 mhp->attr.type = FW_RI_STAG_NSMR;
733 mhp->attr.stag = stag;
734 mhp->attr.state = 1;
735 mmid = (stag) >> 8;
736 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
841dba9a
SW
737 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
738 ret = -ENOMEM;
cfdda9d7 739 goto err3;
841dba9a 740 }
cfdda9d7
SW
741
742 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
743 return &(mhp->ibmr);
744err3:
745 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
746 mhp->attr.pbl_addr);
747err2:
748 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
749 mhp->attr.pbl_size << 3);
750err1:
751 kfree(mhp);
752err:
753 return ERR_PTR(ret);
754}
755
756struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
757 int page_list_len)
758{
759 struct c4iw_fr_page_list *c4pl;
760 struct c4iw_dev *dev = to_c4iw_dev(device);
761 dma_addr_t dma_addr;
762 int size = sizeof *c4pl + page_list_len * sizeof(u64);
763
cfdda9d7
SW
764 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
765 &dma_addr, GFP_KERNEL);
766 if (!c4pl)
767 return ERR_PTR(-ENOMEM);
768
f38926aa 769 dma_unmap_addr_set(c4pl, mapping, dma_addr);
cfdda9d7
SW
770 c4pl->dma_addr = dma_addr;
771 c4pl->dev = dev;
772 c4pl->size = size;
773 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
774 c4pl->ibpl.max_page_list_len = page_list_len;
775
776 return &c4pl->ibpl;
777}
778
779void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
780{
781 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
782
783 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
f38926aa 784 c4pl, dma_unmap_addr(c4pl, mapping));
cfdda9d7
SW
785}
786
787int c4iw_dereg_mr(struct ib_mr *ib_mr)
788{
789 struct c4iw_dev *rhp;
790 struct c4iw_mr *mhp;
791 u32 mmid;
792
793 PDBG("%s ib_mr %p\n", __func__, ib_mr);
794 /* There can be no memory windows */
795 if (atomic_read(&ib_mr->usecnt))
796 return -EINVAL;
797
798 mhp = to_c4iw_mr(ib_mr);
799 rhp = mhp->rhp;
800 mmid = mhp->attr.stag >> 8;
801 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
802 mhp->attr.pbl_addr);
803 if (mhp->attr.pbl_size)
804 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
805 mhp->attr.pbl_size << 3);
806 remove_handle(rhp, &rhp->mmidr, mmid);
807 if (mhp->kva)
808 kfree((void *) (unsigned long) mhp->kva);
809 if (mhp->umem)
810 ib_umem_release(mhp->umem);
811 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
812 kfree(mhp);
813 return 0;
814}
This page took 0.211887 seconds and 5 git commands to generate.