3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <rdma/ib_umem.h>
52 #include <rdma/ib_smi.h>
56 /* Fast memory region */
59 struct hfi1_mregion mr
; /* must be last */
62 static inline struct hfi1_fmr
*to_ifmr(struct ib_fmr
*ibfmr
)
64 return container_of(ibfmr
, struct hfi1_fmr
, ibfmr
);
67 static int init_mregion(struct hfi1_mregion
*mr
, struct ib_pd
*pd
,
73 m
= (count
+ HFI1_SEGSZ
- 1) / HFI1_SEGSZ
;
75 mr
->map
[i
] = kzalloc(sizeof(*mr
->map
[0]), GFP_KERNEL
);
80 init_completion(&mr
->comp
);
81 /* count returning the ptr to user */
82 atomic_set(&mr
->refcount
, 1);
94 static void deinit_mregion(struct hfi1_mregion
*mr
)
105 * hfi1_get_dma_mr - get a DMA memory region
106 * @pd: protection domain for this memory region
109 * Returns the memory region on success, otherwise returns an errno.
110 * Note that all DMA addresses should be created via the
111 * struct ib_dma_mapping_ops functions (see dma.c).
113 struct ib_mr
*hfi1_get_dma_mr(struct ib_pd
*pd
, int acc
)
115 struct hfi1_mr
*mr
= NULL
;
119 if (to_ipd(pd
)->user
) {
120 ret
= ERR_PTR(-EPERM
);
124 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
126 ret
= ERR_PTR(-ENOMEM
);
130 rval
= init_mregion(&mr
->mr
, pd
, 0);
137 rval
= hfi1_alloc_lkey(&mr
->mr
, 1);
143 mr
->mr
.access_flags
= acc
;
149 deinit_mregion(&mr
->mr
);
155 static struct hfi1_mr
*alloc_mr(int count
, struct ib_pd
*pd
)
161 /* Allocate struct plus pointers to first level page tables. */
162 m
= (count
+ HFI1_SEGSZ
- 1) / HFI1_SEGSZ
;
163 mr
= kzalloc(sizeof(*mr
) + m
* sizeof(mr
->mr
.map
[0]), GFP_KERNEL
);
167 rval
= init_mregion(&mr
->mr
, pd
, count
);
171 * ib_reg_phys_mr() will initialize mr->ibmr except for
174 rval
= hfi1_alloc_lkey(&mr
->mr
, 0);
177 mr
->ibmr
.lkey
= mr
->mr
.lkey
;
178 mr
->ibmr
.rkey
= mr
->mr
.lkey
;
183 deinit_mregion(&mr
->mr
);
191 * hfi1_reg_phys_mr - register a physical memory region
192 * @pd: protection domain for this memory region
193 * @buffer_list: pointer to the list of physical buffers to register
194 * @num_phys_buf: the number of physical buffers to register
195 * @iova_start: the starting address passed over IB which maps to this MR
197 * Returns the memory region on success, otherwise returns an errno.
199 struct ib_mr
*hfi1_reg_phys_mr(struct ib_pd
*pd
,
200 struct ib_phys_buf
*buffer_list
,
201 int num_phys_buf
, int acc
, u64
*iova_start
)
207 mr
= alloc_mr(num_phys_buf
, pd
);
209 ret
= (struct ib_mr
*)mr
;
213 mr
->mr
.user_base
= *iova_start
;
214 mr
->mr
.iova
= *iova_start
;
215 mr
->mr
.access_flags
= acc
;
219 for (i
= 0; i
< num_phys_buf
; i
++) {
220 mr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *) buffer_list
[i
].addr
;
221 mr
->mr
.map
[m
]->segs
[n
].length
= buffer_list
[i
].size
;
222 mr
->mr
.length
+= buffer_list
[i
].size
;
224 if (n
== HFI1_SEGSZ
) {
237 * hfi1_reg_user_mr - register a userspace memory region
238 * @pd: protection domain for this memory region
239 * @start: starting userspace address
240 * @length: length of region to register
241 * @mr_access_flags: access flags for this memory region
242 * @udata: unused by the driver
244 * Returns the memory region on success, otherwise returns an errno.
246 struct ib_mr
*hfi1_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
247 u64 virt_addr
, int mr_access_flags
,
248 struct ib_udata
*udata
)
251 struct ib_umem
*umem
;
252 struct scatterlist
*sg
;
257 ret
= ERR_PTR(-EINVAL
);
261 umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
264 return (void *) umem
;
268 mr
= alloc_mr(n
, pd
);
270 ret
= (struct ib_mr
*)mr
;
271 ib_umem_release(umem
);
275 mr
->mr
.user_base
= start
;
276 mr
->mr
.iova
= virt_addr
;
277 mr
->mr
.length
= length
;
278 mr
->mr
.offset
= ib_umem_offset(umem
);
279 mr
->mr
.access_flags
= mr_access_flags
;
282 if (is_power_of_2(umem
->page_size
))
283 mr
->mr
.page_shift
= ilog2(umem
->page_size
);
286 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
289 vaddr
= page_address(sg_page(sg
));
291 ret
= ERR_PTR(-EINVAL
);
294 mr
->mr
.map
[m
]->segs
[n
].vaddr
= vaddr
;
295 mr
->mr
.map
[m
]->segs
[n
].length
= umem
->page_size
;
297 if (n
== HFI1_SEGSZ
) {
309 * hfi1_dereg_mr - unregister and free a memory region
310 * @ibmr: the memory region to free
312 * Returns 0 on success.
314 * Note that this is called to free MRs created by hfi1_get_dma_mr()
315 * or hfi1_reg_user_mr().
317 int hfi1_dereg_mr(struct ib_mr
*ibmr
)
319 struct hfi1_mr
*mr
= to_imr(ibmr
);
321 unsigned long timeout
;
323 hfi1_free_lkey(&mr
->mr
);
325 hfi1_put_mr(&mr
->mr
); /* will set completion if last */
326 timeout
= wait_for_completion_timeout(&mr
->mr
.comp
,
330 dd_from_ibdev(mr
->mr
.pd
->device
),
331 "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n",
332 mr
, mr
->mr
.pd
, atomic_read(&mr
->mr
.refcount
));
333 hfi1_get_mr(&mr
->mr
);
337 deinit_mregion(&mr
->mr
);
339 ib_umem_release(mr
->umem
);
346 * Allocate a memory region usable with the
347 * IB_WR_FAST_REG_MR send work request.
349 * Return the memory region on success, otherwise return an errno.
351 struct ib_mr
*hfi1_alloc_mr(struct ib_pd
*pd
,
352 enum ib_mr_type mr_type
,
357 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
358 return ERR_PTR(-EINVAL
);
360 mr
= alloc_mr(max_num_sg
, pd
);
362 return (struct ib_mr
*)mr
;
367 struct ib_fast_reg_page_list
*
368 hfi1_alloc_fast_reg_page_list(struct ib_device
*ibdev
, int page_list_len
)
370 unsigned size
= page_list_len
* sizeof(u64
);
371 struct ib_fast_reg_page_list
*pl
;
373 if (size
> PAGE_SIZE
)
374 return ERR_PTR(-EINVAL
);
376 pl
= kzalloc(sizeof(*pl
), GFP_KERNEL
);
378 return ERR_PTR(-ENOMEM
);
380 pl
->page_list
= kzalloc(size
, GFP_KERNEL
);
388 return ERR_PTR(-ENOMEM
);
391 void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list
*pl
)
393 kfree(pl
->page_list
);
398 * hfi1_alloc_fmr - allocate a fast memory region
399 * @pd: the protection domain for this memory region
400 * @mr_access_flags: access flags for this memory region
401 * @fmr_attr: fast memory region attributes
403 * Returns the memory region on success, otherwise returns an errno.
405 struct ib_fmr
*hfi1_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
406 struct ib_fmr_attr
*fmr_attr
)
408 struct hfi1_fmr
*fmr
;
413 /* Allocate struct plus pointers to first level page tables. */
414 m
= (fmr_attr
->max_pages
+ HFI1_SEGSZ
- 1) / HFI1_SEGSZ
;
415 fmr
= kzalloc(sizeof(*fmr
) + m
* sizeof(fmr
->mr
.map
[0]), GFP_KERNEL
);
419 rval
= init_mregion(&fmr
->mr
, pd
, fmr_attr
->max_pages
);
424 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
427 rval
= hfi1_alloc_lkey(&fmr
->mr
, 0);
430 fmr
->ibfmr
.rkey
= fmr
->mr
.lkey
;
431 fmr
->ibfmr
.lkey
= fmr
->mr
.lkey
;
433 * Resources are allocated but no valid mapping (RKEY can't be
436 fmr
->mr
.access_flags
= mr_access_flags
;
437 fmr
->mr
.max_segs
= fmr_attr
->max_pages
;
438 fmr
->mr
.page_shift
= fmr_attr
->page_shift
;
445 deinit_mregion(&fmr
->mr
);
453 * hfi1_map_phys_fmr - set up a fast memory region
454 * @ibmfr: the fast memory region to set up
455 * @page_list: the list of pages to associate with the fast memory region
456 * @list_len: the number of pages to associate with the fast memory region
457 * @iova: the virtual address of the start of the fast memory region
459 * This may be called from interrupt context.
462 int hfi1_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
463 int list_len
, u64 iova
)
465 struct hfi1_fmr
*fmr
= to_ifmr(ibfmr
);
466 struct hfi1_lkey_table
*rkt
;
472 i
= atomic_read(&fmr
->mr
.refcount
);
476 if (list_len
> fmr
->mr
.max_segs
) {
480 rkt
= &to_idev(ibfmr
->device
)->lk_table
;
481 spin_lock_irqsave(&rkt
->lock
, flags
);
482 fmr
->mr
.user_base
= iova
;
484 ps
= 1 << fmr
->mr
.page_shift
;
485 fmr
->mr
.length
= list_len
* ps
;
488 for (i
= 0; i
< list_len
; i
++) {
489 fmr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *) page_list
[i
];
490 fmr
->mr
.map
[m
]->segs
[n
].length
= ps
;
491 if (++n
== HFI1_SEGSZ
) {
496 spin_unlock_irqrestore(&rkt
->lock
, flags
);
504 * hfi1_unmap_fmr - unmap fast memory regions
505 * @fmr_list: the list of fast memory regions to unmap
507 * Returns 0 on success.
509 int hfi1_unmap_fmr(struct list_head
*fmr_list
)
511 struct hfi1_fmr
*fmr
;
512 struct hfi1_lkey_table
*rkt
;
515 list_for_each_entry(fmr
, fmr_list
, ibfmr
.list
) {
516 rkt
= &to_idev(fmr
->ibfmr
.device
)->lk_table
;
517 spin_lock_irqsave(&rkt
->lock
, flags
);
518 fmr
->mr
.user_base
= 0;
521 spin_unlock_irqrestore(&rkt
->lock
, flags
);
527 * hfi1_dealloc_fmr - deallocate a fast memory region
528 * @ibfmr: the fast memory region to deallocate
530 * Returns 0 on success.
532 int hfi1_dealloc_fmr(struct ib_fmr
*ibfmr
)
534 struct hfi1_fmr
*fmr
= to_ifmr(ibfmr
);
536 unsigned long timeout
;
538 hfi1_free_lkey(&fmr
->mr
);
539 hfi1_put_mr(&fmr
->mr
); /* will set completion if last */
540 timeout
= wait_for_completion_timeout(&fmr
->mr
.comp
,
543 hfi1_get_mr(&fmr
->mr
);
547 deinit_mregion(&fmr
->mr
);
This page took 0.066392 seconds and 5 git commands to generate.