Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
5a0e3ad6 | 34 | #include <linux/slab.h> |
b2a239df | 35 | #include <rdma/ib_user_verbs.h> |
5a0e3ad6 | 36 | |
225c7b1f RD |
37 | #include "mlx4_ib.h" |
38 | ||
39 | static u32 convert_access(int acc) | |
40 | { | |
41 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) | | |
42 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | | |
43 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | | |
44 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | | |
804d6a89 | 45 | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | |
225c7b1f RD |
46 | MLX4_PERM_LOCAL_READ; |
47 | } | |
48 | ||
804d6a89 SM |
49 | static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) |
50 | { | |
51 | switch (type) { | |
52 | case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; | |
53 | case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; | |
54 | default: return -1; | |
55 | } | |
56 | } | |
57 | ||
225c7b1f RD |
58 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) |
59 | { | |
60 | struct mlx4_ib_mr *mr; | |
61 | int err; | |
62 | ||
1b2cd0fc | 63 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
225c7b1f RD |
64 | if (!mr) |
65 | return ERR_PTR(-ENOMEM); | |
66 | ||
67 | err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, | |
68 | ~0ull, convert_access(acc), 0, 0, &mr->mmr); | |
69 | if (err) | |
70 | goto err_free; | |
71 | ||
72 | err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); | |
73 | if (err) | |
74 | goto err_mr; | |
75 | ||
76 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | |
77 | mr->umem = NULL; | |
78 | ||
79 | return &mr->ibmr; | |
80 | ||
81 | err_mr: | |
61083720 | 82 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
225c7b1f RD |
83 | |
84 | err_free: | |
85 | kfree(mr); | |
86 | ||
87 | return ERR_PTR(err); | |
88 | } | |
89 | ||
90 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | |
91 | struct ib_umem *umem) | |
92 | { | |
93 | u64 *pages; | |
eeb8461e | 94 | int i, k, entry; |
225c7b1f RD |
95 | int n; |
96 | int len; | |
97 | int err = 0; | |
eeb8461e | 98 | struct scatterlist *sg; |
225c7b1f RD |
99 | |
100 | pages = (u64 *) __get_free_page(GFP_KERNEL); | |
101 | if (!pages) | |
102 | return -ENOMEM; | |
103 | ||
104 | i = n = 0; | |
105 | ||
eeb8461e YH |
106 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
107 | len = sg_dma_len(sg) >> mtt->page_shift; | |
108 | for (k = 0; k < len; ++k) { | |
109 | pages[i++] = sg_dma_address(sg) + | |
110 | umem->page_size * k; | |
111 | /* | |
112 | * Be friendly to mlx4_write_mtt() and | |
113 | * pass it chunks of appropriate size. | |
114 | */ | |
115 | if (i == PAGE_SIZE / sizeof (u64)) { | |
116 | err = mlx4_write_mtt(dev->dev, mtt, n, | |
117 | i, pages); | |
118 | if (err) | |
119 | goto out; | |
120 | n += i; | |
121 | i = 0; | |
225c7b1f RD |
122 | } |
123 | } | |
eeb8461e | 124 | } |
225c7b1f RD |
125 | |
126 | if (i) | |
127 | err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); | |
128 | ||
129 | out: | |
130 | free_page((unsigned long) pages); | |
131 | return err; | |
132 | } | |
133 | ||
134 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |
135 | u64 virt_addr, int access_flags, | |
136 | struct ib_udata *udata) | |
137 | { | |
138 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
139 | struct mlx4_ib_mr *mr; | |
140 | int shift; | |
141 | int err; | |
142 | int n; | |
143 | ||
1b2cd0fc | 144 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
225c7b1f RD |
145 | if (!mr) |
146 | return ERR_PTR(-ENOMEM); | |
147 | ||
9376932d MB |
148 | /* Force registering the memory as writable. */ |
149 | /* Used for memory re-registeration. HCA protects the access */ | |
cb9fbc5c | 150 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
9376932d | 151 | access_flags | IB_ACCESS_LOCAL_WRITE, 0); |
225c7b1f RD |
152 | if (IS_ERR(mr->umem)) { |
153 | err = PTR_ERR(mr->umem); | |
154 | goto err_free; | |
155 | } | |
156 | ||
157 | n = ib_umem_page_count(mr->umem); | |
158 | shift = ilog2(mr->umem->page_size); | |
159 | ||
160 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, | |
161 | convert_access(access_flags), n, shift, &mr->mmr); | |
162 | if (err) | |
163 | goto err_umem; | |
164 | ||
165 | err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); | |
166 | if (err) | |
167 | goto err_mr; | |
168 | ||
169 | err = mlx4_mr_enable(dev->dev, &mr->mmr); | |
170 | if (err) | |
171 | goto err_mr; | |
172 | ||
173 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | |
174 | ||
175 | return &mr->ibmr; | |
176 | ||
177 | err_mr: | |
61083720 | 178 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
225c7b1f RD |
179 | |
180 | err_umem: | |
181 | ib_umem_release(mr->umem); | |
182 | ||
183 | err_free: | |
184 | kfree(mr); | |
185 | ||
186 | return ERR_PTR(err); | |
187 | } | |
188 | ||
9376932d MB |
189 | int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, |
190 | u64 start, u64 length, u64 virt_addr, | |
191 | int mr_access_flags, struct ib_pd *pd, | |
192 | struct ib_udata *udata) | |
193 | { | |
194 | struct mlx4_ib_dev *dev = to_mdev(mr->device); | |
195 | struct mlx4_ib_mr *mmr = to_mmr(mr); | |
196 | struct mlx4_mpt_entry *mpt_entry; | |
197 | struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; | |
198 | int err; | |
199 | ||
200 | /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, | |
201 | * we assume that the calls can't run concurrently. Otherwise, a | |
202 | * race exists. | |
203 | */ | |
204 | err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); | |
205 | ||
206 | if (err) | |
207 | return err; | |
208 | ||
209 | if (flags & IB_MR_REREG_PD) { | |
210 | err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, | |
211 | to_mpd(pd)->pdn); | |
212 | ||
213 | if (err) | |
214 | goto release_mpt_entry; | |
215 | } | |
216 | ||
217 | if (flags & IB_MR_REREG_ACCESS) { | |
218 | err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, | |
219 | convert_access(mr_access_flags)); | |
220 | ||
221 | if (err) | |
222 | goto release_mpt_entry; | |
223 | } | |
224 | ||
225 | if (flags & IB_MR_REREG_TRANS) { | |
226 | int shift; | |
9376932d MB |
227 | int n; |
228 | ||
229 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | |
230 | ib_umem_release(mmr->umem); | |
231 | mmr->umem = ib_umem_get(mr->uobject->context, start, length, | |
232 | mr_access_flags | | |
233 | IB_ACCESS_LOCAL_WRITE, | |
234 | 0); | |
235 | if (IS_ERR(mmr->umem)) { | |
236 | err = PTR_ERR(mmr->umem); | |
4ff0acca | 237 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ |
9376932d MB |
238 | mmr->umem = NULL; |
239 | goto release_mpt_entry; | |
240 | } | |
241 | n = ib_umem_page_count(mmr->umem); | |
242 | shift = ilog2(mmr->umem->page_size); | |
243 | ||
9376932d MB |
244 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
245 | virt_addr, length, n, shift, | |
246 | *pmpt_entry); | |
247 | if (err) { | |
248 | ib_umem_release(mmr->umem); | |
249 | goto release_mpt_entry; | |
250 | } | |
4ff0acca MB |
251 | mmr->mmr.iova = virt_addr; |
252 | mmr->mmr.size = length; | |
9376932d MB |
253 | |
254 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | |
255 | if (err) { | |
256 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | |
257 | ib_umem_release(mmr->umem); | |
258 | goto release_mpt_entry; | |
259 | } | |
260 | } | |
261 | ||
262 | /* If we couldn't transfer the MR to the HCA, just remember to | |
263 | * return a failure. But dereg_mr will free the resources. | |
264 | */ | |
265 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | |
4ff0acca MB |
266 | if (!err && flags & IB_MR_REREG_ACCESS) |
267 | mmr->mmr.access = mr_access_flags; | |
9376932d MB |
268 | |
269 | release_mpt_entry: | |
270 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | |
271 | ||
272 | return err; | |
273 | } | |
274 | ||
1b2cd0fc SG |
275 | static int |
276 | mlx4_alloc_priv_pages(struct ib_device *device, | |
277 | struct mlx4_ib_mr *mr, | |
278 | int max_pages) | |
279 | { | |
280 | int size = max_pages * sizeof(u64); | |
281 | int add_size; | |
282 | int ret; | |
283 | ||
284 | add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | |
285 | ||
286 | mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); | |
287 | if (!mr->pages_alloc) | |
288 | return -ENOMEM; | |
289 | ||
290 | mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); | |
291 | ||
292 | mr->page_map = dma_map_single(device->dma_device, mr->pages, | |
293 | size, DMA_TO_DEVICE); | |
294 | ||
295 | if (dma_mapping_error(device->dma_device, mr->page_map)) { | |
296 | ret = -ENOMEM; | |
297 | goto err; | |
298 | } | |
299 | ||
300 | return 0; | |
301 | err: | |
302 | kfree(mr->pages_alloc); | |
303 | ||
304 | return ret; | |
305 | } | |
306 | ||
307 | static void | |
308 | mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |
309 | { | |
310 | if (mr->pages) { | |
311 | struct ib_device *device = mr->ibmr.device; | |
312 | int size = mr->max_pages * sizeof(u64); | |
313 | ||
314 | dma_unmap_single(device->dma_device, mr->page_map, | |
315 | size, DMA_TO_DEVICE); | |
316 | kfree(mr->pages_alloc); | |
317 | mr->pages = NULL; | |
318 | } | |
319 | } | |
320 | ||
225c7b1f RD |
321 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
322 | { | |
323 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
61083720 | 324 | int ret; |
225c7b1f | 325 | |
1b2cd0fc SG |
326 | mlx4_free_priv_pages(mr); |
327 | ||
61083720 SM |
328 | ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); |
329 | if (ret) | |
330 | return ret; | |
225c7b1f RD |
331 | if (mr->umem) |
332 | ib_umem_release(mr->umem); | |
333 | kfree(mr); | |
334 | ||
335 | return 0; | |
336 | } | |
8ad11fb6 | 337 | |
b2a239df MB |
338 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
339 | struct ib_udata *udata) | |
804d6a89 SM |
340 | { |
341 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
342 | struct mlx4_ib_mw *mw; | |
343 | int err; | |
344 | ||
345 | mw = kmalloc(sizeof(*mw), GFP_KERNEL); | |
346 | if (!mw) | |
347 | return ERR_PTR(-ENOMEM); | |
348 | ||
349 | err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, | |
350 | to_mlx4_type(type), &mw->mmw); | |
351 | if (err) | |
352 | goto err_free; | |
353 | ||
354 | err = mlx4_mw_enable(dev->dev, &mw->mmw); | |
355 | if (err) | |
356 | goto err_mw; | |
357 | ||
358 | mw->ibmw.rkey = mw->mmw.key; | |
359 | ||
360 | return &mw->ibmw; | |
361 | ||
362 | err_mw: | |
363 | mlx4_mw_free(dev->dev, &mw->mmw); | |
364 | ||
365 | err_free: | |
366 | kfree(mw); | |
367 | ||
368 | return ERR_PTR(err); | |
369 | } | |
370 | ||
371 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) | |
372 | { | |
373 | struct mlx4_ib_mw *mw = to_mmw(ibmw); | |
374 | ||
375 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); | |
376 | kfree(mw); | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
679e34d1 SG |
381 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
382 | enum ib_mr_type mr_type, | |
383 | u32 max_num_sg) | |
95d04f07 RD |
384 | { |
385 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
386 | struct mlx4_ib_mr *mr; | |
387 | int err; | |
388 | ||
679e34d1 SG |
389 | if (mr_type != IB_MR_TYPE_MEM_REG || |
390 | max_num_sg > MLX4_MAX_FAST_REG_PAGES) | |
391 | return ERR_PTR(-EINVAL); | |
392 | ||
1b2cd0fc | 393 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
95d04f07 RD |
394 | if (!mr) |
395 | return ERR_PTR(-ENOMEM); | |
396 | ||
397 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, | |
679e34d1 | 398 | max_num_sg, 0, &mr->mmr); |
95d04f07 RD |
399 | if (err) |
400 | goto err_free; | |
401 | ||
1b2cd0fc SG |
402 | err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); |
403 | if (err) | |
404 | goto err_free_mr; | |
405 | ||
406 | mr->max_pages = max_num_sg; | |
407 | ||
95d04f07 RD |
408 | err = mlx4_mr_enable(dev->dev, &mr->mmr); |
409 | if (err) | |
1b2cd0fc | 410 | goto err_free_pl; |
95d04f07 | 411 | |
4c246edd | 412 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
7f3abf5c | 413 | mr->umem = NULL; |
4c246edd | 414 | |
95d04f07 RD |
415 | return &mr->ibmr; |
416 | ||
1b2cd0fc SG |
417 | err_free_pl: |
418 | mlx4_free_priv_pages(mr); | |
419 | err_free_mr: | |
61083720 | 420 | (void) mlx4_mr_free(dev->dev, &mr->mmr); |
95d04f07 RD |
421 | err_free: |
422 | kfree(mr); | |
423 | return ERR_PTR(err); | |
424 | } | |
425 | ||
8ad11fb6 JM |
426 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, |
427 | struct ib_fmr_attr *fmr_attr) | |
428 | { | |
429 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
430 | struct mlx4_ib_fmr *fmr; | |
431 | int err = -ENOMEM; | |
432 | ||
433 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); | |
434 | if (!fmr) | |
435 | return ERR_PTR(-ENOMEM); | |
436 | ||
437 | err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), | |
438 | fmr_attr->max_pages, fmr_attr->max_maps, | |
439 | fmr_attr->page_shift, &fmr->mfmr); | |
440 | if (err) | |
441 | goto err_free; | |
442 | ||
e6028c0e | 443 | err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); |
8ad11fb6 JM |
444 | if (err) |
445 | goto err_mr; | |
446 | ||
447 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; | |
448 | ||
449 | return &fmr->ibfmr; | |
450 | ||
451 | err_mr: | |
61083720 | 452 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); |
8ad11fb6 JM |
453 | |
454 | err_free: | |
455 | kfree(fmr); | |
456 | ||
457 | return ERR_PTR(err); | |
458 | } | |
459 | ||
460 | int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |
461 | int npages, u64 iova) | |
462 | { | |
463 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
464 | struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device); | |
465 | ||
466 | return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, | |
467 | &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | |
468 | } | |
469 | ||
470 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list) | |
471 | { | |
472 | struct ib_fmr *ibfmr; | |
473 | int err; | |
474 | struct mlx4_dev *mdev = NULL; | |
475 | ||
476 | list_for_each_entry(ibfmr, fmr_list, list) { | |
477 | if (mdev && to_mdev(ibfmr->device)->dev != mdev) | |
478 | return -EINVAL; | |
479 | mdev = to_mdev(ibfmr->device)->dev; | |
480 | } | |
481 | ||
482 | if (!mdev) | |
483 | return 0; | |
484 | ||
485 | list_for_each_entry(ibfmr, fmr_list, list) { | |
486 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
487 | ||
488 | mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | |
489 | } | |
490 | ||
491 | /* | |
492 | * Make sure all MPT status updates are visible before issuing | |
493 | * SYNC_TPT firmware command. | |
494 | */ | |
495 | wmb(); | |
496 | ||
497 | err = mlx4_SYNC_TPT(mdev); | |
498 | if (err) | |
987c8f8f | 499 | pr_warn("SYNC_TPT error %d when " |
8ad11fb6 JM |
500 | "unmapping FMRs\n", err); |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
505 | int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) | |
506 | { | |
507 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
508 | struct mlx4_ib_dev *dev = to_mdev(ibfmr->device); | |
509 | int err; | |
510 | ||
511 | err = mlx4_fmr_free(dev->dev, &ifmr->mfmr); | |
512 | ||
513 | if (!err) | |
514 | kfree(ifmr); | |
515 | ||
516 | return err; | |
517 | } | |
1b2cd0fc SG |
518 | |
519 | static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) | |
520 | { | |
521 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
522 | ||
523 | if (unlikely(mr->npages == mr->max_pages)) | |
524 | return -ENOMEM; | |
525 | ||
526 | mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); | |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
ff2ba993 | 531 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
9aa8b321 | 532 | unsigned int *sg_offset) |
1b2cd0fc SG |
533 | { |
534 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
535 | int rc; | |
536 | ||
537 | mr->npages = 0; | |
538 | ||
539 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, | |
540 | sizeof(u64) * mr->max_pages, | |
541 | DMA_TO_DEVICE); | |
542 | ||
ff2ba993 | 543 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); |
1b2cd0fc SG |
544 | |
545 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, | |
546 | sizeof(u64) * mr->max_pages, | |
547 | DMA_TO_DEVICE); | |
548 | ||
549 | return rc; | |
550 | } |