Merge tag 'renesas-fixes-for-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / mr.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <rdma/ib_umem.h>
52 #include <rdma/ib_smi.h>
53
54 #include "hfi.h"
55
56 /* Fast memory region */
57 struct hfi1_fmr {
58 struct ib_fmr ibfmr;
59 struct hfi1_mregion mr; /* must be last */
60 };
61
62 static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr)
63 {
64 return container_of(ibfmr, struct hfi1_fmr, ibfmr);
65 }
66
67 static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd,
68 int count)
69 {
70 int m, i = 0;
71 int rval = 0;
72
73 m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
74 for (; i < m; i++) {
75 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
76 if (!mr->map[i])
77 goto bail;
78 }
79 mr->mapsz = m;
80 init_completion(&mr->comp);
81 /* count returning the ptr to user */
82 atomic_set(&mr->refcount, 1);
83 mr->pd = pd;
84 mr->max_segs = count;
85 out:
86 return rval;
87 bail:
88 while (i)
89 kfree(mr->map[--i]);
90 rval = -ENOMEM;
91 goto out;
92 }
93
94 static void deinit_mregion(struct hfi1_mregion *mr)
95 {
96 int i = mr->mapsz;
97
98 mr->mapsz = 0;
99 while (i)
100 kfree(mr->map[--i]);
101 }
102
103
104 /**
105 * hfi1_get_dma_mr - get a DMA memory region
106 * @pd: protection domain for this memory region
107 * @acc: access flags
108 *
109 * Returns the memory region on success, otherwise returns an errno.
110 * Note that all DMA addresses should be created via the
111 * struct ib_dma_mapping_ops functions (see dma.c).
112 */
113 struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc)
114 {
115 struct hfi1_mr *mr = NULL;
116 struct ib_mr *ret;
117 int rval;
118
119 if (to_ipd(pd)->user) {
120 ret = ERR_PTR(-EPERM);
121 goto bail;
122 }
123
124 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
125 if (!mr) {
126 ret = ERR_PTR(-ENOMEM);
127 goto bail;
128 }
129
130 rval = init_mregion(&mr->mr, pd, 0);
131 if (rval) {
132 ret = ERR_PTR(rval);
133 goto bail;
134 }
135
136
137 rval = hfi1_alloc_lkey(&mr->mr, 1);
138 if (rval) {
139 ret = ERR_PTR(rval);
140 goto bail_mregion;
141 }
142
143 mr->mr.access_flags = acc;
144 ret = &mr->ibmr;
145 done:
146 return ret;
147
148 bail_mregion:
149 deinit_mregion(&mr->mr);
150 bail:
151 kfree(mr);
152 goto done;
153 }
154
155 static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd)
156 {
157 struct hfi1_mr *mr;
158 int rval = -ENOMEM;
159 int m;
160
161 /* Allocate struct plus pointers to first level page tables. */
162 m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
163 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
164 if (!mr)
165 goto bail;
166
167 rval = init_mregion(&mr->mr, pd, count);
168 if (rval)
169 goto bail;
170 /*
171 * ib_reg_phys_mr() will initialize mr->ibmr except for
172 * lkey and rkey.
173 */
174 rval = hfi1_alloc_lkey(&mr->mr, 0);
175 if (rval)
176 goto bail_mregion;
177 mr->ibmr.lkey = mr->mr.lkey;
178 mr->ibmr.rkey = mr->mr.lkey;
179 done:
180 return mr;
181
182 bail_mregion:
183 deinit_mregion(&mr->mr);
184 bail:
185 kfree(mr);
186 mr = ERR_PTR(rval);
187 goto done;
188 }
189
190 /**
191 * hfi1_reg_phys_mr - register a physical memory region
192 * @pd: protection domain for this memory region
193 * @buffer_list: pointer to the list of physical buffers to register
194 * @num_phys_buf: the number of physical buffers to register
195 * @iova_start: the starting address passed over IB which maps to this MR
196 *
197 * Returns the memory region on success, otherwise returns an errno.
198 */
199 struct ib_mr *hfi1_reg_phys_mr(struct ib_pd *pd,
200 struct ib_phys_buf *buffer_list,
201 int num_phys_buf, int acc, u64 *iova_start)
202 {
203 struct hfi1_mr *mr;
204 int n, m, i;
205 struct ib_mr *ret;
206
207 mr = alloc_mr(num_phys_buf, pd);
208 if (IS_ERR(mr)) {
209 ret = (struct ib_mr *)mr;
210 goto bail;
211 }
212
213 mr->mr.user_base = *iova_start;
214 mr->mr.iova = *iova_start;
215 mr->mr.access_flags = acc;
216
217 m = 0;
218 n = 0;
219 for (i = 0; i < num_phys_buf; i++) {
220 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
221 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
222 mr->mr.length += buffer_list[i].size;
223 n++;
224 if (n == HFI1_SEGSZ) {
225 m++;
226 n = 0;
227 }
228 }
229
230 ret = &mr->ibmr;
231
232 bail:
233 return ret;
234 }
235
236 /**
237 * hfi1_reg_user_mr - register a userspace memory region
238 * @pd: protection domain for this memory region
239 * @start: starting userspace address
240 * @length: length of region to register
241 * @mr_access_flags: access flags for this memory region
242 * @udata: unused by the driver
243 *
244 * Returns the memory region on success, otherwise returns an errno.
245 */
246 struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
247 u64 virt_addr, int mr_access_flags,
248 struct ib_udata *udata)
249 {
250 struct hfi1_mr *mr;
251 struct ib_umem *umem;
252 struct scatterlist *sg;
253 int n, m, entry;
254 struct ib_mr *ret;
255
256 if (length == 0) {
257 ret = ERR_PTR(-EINVAL);
258 goto bail;
259 }
260
261 umem = ib_umem_get(pd->uobject->context, start, length,
262 mr_access_flags, 0);
263 if (IS_ERR(umem))
264 return (void *) umem;
265
266 n = umem->nmap;
267
268 mr = alloc_mr(n, pd);
269 if (IS_ERR(mr)) {
270 ret = (struct ib_mr *)mr;
271 ib_umem_release(umem);
272 goto bail;
273 }
274
275 mr->mr.user_base = start;
276 mr->mr.iova = virt_addr;
277 mr->mr.length = length;
278 mr->mr.offset = ib_umem_offset(umem);
279 mr->mr.access_flags = mr_access_flags;
280 mr->umem = umem;
281
282 if (is_power_of_2(umem->page_size))
283 mr->mr.page_shift = ilog2(umem->page_size);
284 m = 0;
285 n = 0;
286 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
287 void *vaddr;
288
289 vaddr = page_address(sg_page(sg));
290 if (!vaddr) {
291 ret = ERR_PTR(-EINVAL);
292 goto bail;
293 }
294 mr->mr.map[m]->segs[n].vaddr = vaddr;
295 mr->mr.map[m]->segs[n].length = umem->page_size;
296 n++;
297 if (n == HFI1_SEGSZ) {
298 m++;
299 n = 0;
300 }
301 }
302 ret = &mr->ibmr;
303
304 bail:
305 return ret;
306 }
307
308 /**
309 * hfi1_dereg_mr - unregister and free a memory region
310 * @ibmr: the memory region to free
311 *
312 * Returns 0 on success.
313 *
314 * Note that this is called to free MRs created by hfi1_get_dma_mr()
315 * or hfi1_reg_user_mr().
316 */
317 int hfi1_dereg_mr(struct ib_mr *ibmr)
318 {
319 struct hfi1_mr *mr = to_imr(ibmr);
320 int ret = 0;
321 unsigned long timeout;
322
323 hfi1_free_lkey(&mr->mr);
324
325 hfi1_put_mr(&mr->mr); /* will set completion if last */
326 timeout = wait_for_completion_timeout(&mr->mr.comp,
327 5 * HZ);
328 if (!timeout) {
329 dd_dev_err(
330 dd_from_ibdev(mr->mr.pd->device),
331 "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n",
332 mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
333 hfi1_get_mr(&mr->mr);
334 ret = -EBUSY;
335 goto out;
336 }
337 deinit_mregion(&mr->mr);
338 if (mr->umem)
339 ib_umem_release(mr->umem);
340 kfree(mr);
341 out:
342 return ret;
343 }
344
345 /*
346 * Allocate a memory region usable with the
347 * IB_WR_FAST_REG_MR send work request.
348 *
349 * Return the memory region on success, otherwise return an errno.
350 */
351 struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
352 enum ib_mr_type mr_type,
353 u32 max_num_sg)
354 {
355 struct hfi1_mr *mr;
356
357 if (mr_type != IB_MR_TYPE_MEM_REG)
358 return ERR_PTR(-EINVAL);
359
360 mr = alloc_mr(max_num_sg, pd);
361 if (IS_ERR(mr))
362 return (struct ib_mr *)mr;
363
364 return &mr->ibmr;
365 }
366
367 struct ib_fast_reg_page_list *
368 hfi1_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
369 {
370 unsigned size = page_list_len * sizeof(u64);
371 struct ib_fast_reg_page_list *pl;
372
373 if (size > PAGE_SIZE)
374 return ERR_PTR(-EINVAL);
375
376 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
377 if (!pl)
378 return ERR_PTR(-ENOMEM);
379
380 pl->page_list = kzalloc(size, GFP_KERNEL);
381 if (!pl->page_list)
382 goto err_free;
383
384 return pl;
385
386 err_free:
387 kfree(pl);
388 return ERR_PTR(-ENOMEM);
389 }
390
391 void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
392 {
393 kfree(pl->page_list);
394 kfree(pl);
395 }
396
397 /**
398 * hfi1_alloc_fmr - allocate a fast memory region
399 * @pd: the protection domain for this memory region
400 * @mr_access_flags: access flags for this memory region
401 * @fmr_attr: fast memory region attributes
402 *
403 * Returns the memory region on success, otherwise returns an errno.
404 */
405 struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
406 struct ib_fmr_attr *fmr_attr)
407 {
408 struct hfi1_fmr *fmr;
409 int m;
410 struct ib_fmr *ret;
411 int rval = -ENOMEM;
412
413 /* Allocate struct plus pointers to first level page tables. */
414 m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
415 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
416 if (!fmr)
417 goto bail;
418
419 rval = init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
420 if (rval)
421 goto bail;
422
423 /*
424 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
425 * rkey.
426 */
427 rval = hfi1_alloc_lkey(&fmr->mr, 0);
428 if (rval)
429 goto bail_mregion;
430 fmr->ibfmr.rkey = fmr->mr.lkey;
431 fmr->ibfmr.lkey = fmr->mr.lkey;
432 /*
433 * Resources are allocated but no valid mapping (RKEY can't be
434 * used).
435 */
436 fmr->mr.access_flags = mr_access_flags;
437 fmr->mr.max_segs = fmr_attr->max_pages;
438 fmr->mr.page_shift = fmr_attr->page_shift;
439
440 ret = &fmr->ibfmr;
441 done:
442 return ret;
443
444 bail_mregion:
445 deinit_mregion(&fmr->mr);
446 bail:
447 kfree(fmr);
448 ret = ERR_PTR(rval);
449 goto done;
450 }
451
452 /**
453 * hfi1_map_phys_fmr - set up a fast memory region
454 * @ibmfr: the fast memory region to set up
455 * @page_list: the list of pages to associate with the fast memory region
456 * @list_len: the number of pages to associate with the fast memory region
457 * @iova: the virtual address of the start of the fast memory region
458 *
459 * This may be called from interrupt context.
460 */
461
462 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
463 int list_len, u64 iova)
464 {
465 struct hfi1_fmr *fmr = to_ifmr(ibfmr);
466 struct hfi1_lkey_table *rkt;
467 unsigned long flags;
468 int m, n, i;
469 u32 ps;
470 int ret;
471
472 i = atomic_read(&fmr->mr.refcount);
473 if (i > 2)
474 return -EBUSY;
475
476 if (list_len > fmr->mr.max_segs) {
477 ret = -EINVAL;
478 goto bail;
479 }
480 rkt = &to_idev(ibfmr->device)->lk_table;
481 spin_lock_irqsave(&rkt->lock, flags);
482 fmr->mr.user_base = iova;
483 fmr->mr.iova = iova;
484 ps = 1 << fmr->mr.page_shift;
485 fmr->mr.length = list_len * ps;
486 m = 0;
487 n = 0;
488 for (i = 0; i < list_len; i++) {
489 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
490 fmr->mr.map[m]->segs[n].length = ps;
491 if (++n == HFI1_SEGSZ) {
492 m++;
493 n = 0;
494 }
495 }
496 spin_unlock_irqrestore(&rkt->lock, flags);
497 ret = 0;
498
499 bail:
500 return ret;
501 }
502
503 /**
504 * hfi1_unmap_fmr - unmap fast memory regions
505 * @fmr_list: the list of fast memory regions to unmap
506 *
507 * Returns 0 on success.
508 */
509 int hfi1_unmap_fmr(struct list_head *fmr_list)
510 {
511 struct hfi1_fmr *fmr;
512 struct hfi1_lkey_table *rkt;
513 unsigned long flags;
514
515 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
516 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
517 spin_lock_irqsave(&rkt->lock, flags);
518 fmr->mr.user_base = 0;
519 fmr->mr.iova = 0;
520 fmr->mr.length = 0;
521 spin_unlock_irqrestore(&rkt->lock, flags);
522 }
523 return 0;
524 }
525
526 /**
527 * hfi1_dealloc_fmr - deallocate a fast memory region
528 * @ibfmr: the fast memory region to deallocate
529 *
530 * Returns 0 on success.
531 */
532 int hfi1_dealloc_fmr(struct ib_fmr *ibfmr)
533 {
534 struct hfi1_fmr *fmr = to_ifmr(ibfmr);
535 int ret = 0;
536 unsigned long timeout;
537
538 hfi1_free_lkey(&fmr->mr);
539 hfi1_put_mr(&fmr->mr); /* will set completion if last */
540 timeout = wait_for_completion_timeout(&fmr->mr.comp,
541 5 * HZ);
542 if (!timeout) {
543 hfi1_get_mr(&fmr->mr);
544 ret = -EBUSY;
545 goto out;
546 }
547 deinit_mregion(&fmr->mr);
548 kfree(fmr);
549 out:
550 return ret;
551 }
This page took 0.066392 seconds and 5 git commands to generate.