RDS: Implement atomic operations
[deliverable/linux.git] / net / rds / ib_rdma.c
CommitLineData
08b48a1e
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
5a0e3ad6 34#include <linux/slab.h>
08b48a1e
AG
35
36#include "rds.h"
08b48a1e
AG
37#include "ib.h"
38
39
40/*
41 * This is stored as mr->r_trans_private.
42 */
43struct rds_ib_mr {
44 struct rds_ib_device *device;
45 struct rds_ib_mr_pool *pool;
46 struct ib_fmr *fmr;
47 struct list_head list;
48 unsigned int remap_count;
49
50 struct scatterlist *sg;
51 unsigned int sg_len;
52 u64 *dma;
53 int sg_dma_len;
54};
55
56/*
57 * Our own little FMR pool
58 */
59struct rds_ib_mr_pool {
60 struct mutex flush_lock; /* serialize fmr invalidate */
61 struct work_struct flush_worker; /* flush worker */
62
63 spinlock_t list_lock; /* protect variables below */
64 atomic_t item_count; /* total # of MRs */
65 atomic_t dirty_count; /* # dirty of MRs */
66 struct list_head drop_list; /* MRs that have reached their max_maps limit */
67 struct list_head free_list; /* unused MRs */
68 struct list_head clean_list; /* unused & unamapped MRs */
69 atomic_t free_pinned; /* memory pinned by free MRs */
70 unsigned long max_items;
71 unsigned long max_items_soft;
72 unsigned long max_free_pinned;
73 struct ib_fmr_attr fmr_attr;
74};
75
76static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
77static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
78static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
79
80static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
81{
82 struct rds_ib_device *rds_ibdev;
83 struct rds_ib_ipaddr *i_ipaddr;
84
85 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
86 spin_lock_irq(&rds_ibdev->spinlock);
87 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
88 if (i_ipaddr->ipaddr == ipaddr) {
89 spin_unlock_irq(&rds_ibdev->spinlock);
90 return rds_ibdev;
91 }
92 }
93 spin_unlock_irq(&rds_ibdev->spinlock);
94 }
95
96 return NULL;
97}
98
99static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
100{
101 struct rds_ib_ipaddr *i_ipaddr;
102
103 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
104 if (!i_ipaddr)
105 return -ENOMEM;
106
107 i_ipaddr->ipaddr = ipaddr;
108
109 spin_lock_irq(&rds_ibdev->spinlock);
110 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
111 spin_unlock_irq(&rds_ibdev->spinlock);
112
113 return 0;
114}
115
116static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
117{
118 struct rds_ib_ipaddr *i_ipaddr, *next;
119
120 spin_lock_irq(&rds_ibdev->spinlock);
121 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) {
122 if (i_ipaddr->ipaddr == ipaddr) {
123 list_del(&i_ipaddr->list);
124 kfree(i_ipaddr);
125 break;
126 }
127 }
128 spin_unlock_irq(&rds_ibdev->spinlock);
129}
130
131int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
132{
133 struct rds_ib_device *rds_ibdev_old;
134
135 rds_ibdev_old = rds_ib_get_device(ipaddr);
136 if (rds_ibdev_old)
137 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
138
139 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
140}
141
745cbcca 142void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
08b48a1e
AG
143{
144 struct rds_ib_connection *ic = conn->c_transport_data;
145
146 /* conn was previously on the nodev_conns_list */
147 spin_lock_irq(&ib_nodev_conns_lock);
148 BUG_ON(list_empty(&ib_nodev_conns));
149 BUG_ON(list_empty(&ic->ib_node));
150 list_del(&ic->ib_node);
08b48a1e
AG
151
152 spin_lock_irq(&rds_ibdev->spinlock);
153 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
154 spin_unlock_irq(&rds_ibdev->spinlock);
745cbcca 155 spin_unlock_irq(&ib_nodev_conns_lock);
08b48a1e
AG
156
157 ic->rds_ibdev = rds_ibdev;
08b48a1e
AG
158}
159
745cbcca 160void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
08b48a1e 161{
745cbcca 162 struct rds_ib_connection *ic = conn->c_transport_data;
08b48a1e 163
745cbcca
AG
164 /* place conn on nodev_conns_list */
165 spin_lock(&ib_nodev_conns_lock);
08b48a1e 166
745cbcca
AG
167 spin_lock_irq(&rds_ibdev->spinlock);
168 BUG_ON(list_empty(&ic->ib_node));
169 list_del(&ic->ib_node);
170 spin_unlock_irq(&rds_ibdev->spinlock);
171
172 list_add_tail(&ic->ib_node, &ib_nodev_conns);
173
174 spin_unlock(&ib_nodev_conns_lock);
175
176 ic->rds_ibdev = NULL;
08b48a1e
AG
177}
178
745cbcca 179void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
08b48a1e
AG
180{
181 struct rds_ib_connection *ic, *_ic;
182 LIST_HEAD(tmp_list);
183
184 /* avoid calling conn_destroy with irqs off */
745cbcca
AG
185 spin_lock_irq(list_lock);
186 list_splice(list, &tmp_list);
187 INIT_LIST_HEAD(list);
188 spin_unlock_irq(list_lock);
08b48a1e 189
433d308d 190 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
08b48a1e 191 rds_conn_destroy(ic->conn);
08b48a1e
AG
192}
193
194struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
195{
196 struct rds_ib_mr_pool *pool;
197
198 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
199 if (!pool)
200 return ERR_PTR(-ENOMEM);
201
202 INIT_LIST_HEAD(&pool->free_list);
203 INIT_LIST_HEAD(&pool->drop_list);
204 INIT_LIST_HEAD(&pool->clean_list);
205 mutex_init(&pool->flush_lock);
206 spin_lock_init(&pool->list_lock);
207 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
208
209 pool->fmr_attr.max_pages = fmr_message_size;
210 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
a870d627 211 pool->fmr_attr.page_shift = PAGE_SHIFT;
08b48a1e
AG
212 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
213
214 /* We never allow more than max_items MRs to be allocated.
215 * When we exceed more than max_items_soft, we start freeing
216 * items more aggressively.
217 * Make sure that max_items > max_items_soft > max_items / 2
218 */
219 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
220 pool->max_items = rds_ibdev->max_fmrs;
221
222 return pool;
223}
224
225void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
226{
227 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
228
229 iinfo->rdma_mr_max = pool->max_items;
230 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
231}
232
233void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
234{
235 flush_workqueue(rds_wq);
236 rds_ib_flush_mr_pool(pool, 1);
571c02fa
AG
237 WARN_ON(atomic_read(&pool->item_count));
238 WARN_ON(atomic_read(&pool->free_pinned));
08b48a1e
AG
239 kfree(pool);
240}
241
242static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
243{
244 struct rds_ib_mr *ibmr = NULL;
245 unsigned long flags;
246
247 spin_lock_irqsave(&pool->list_lock, flags);
248 if (!list_empty(&pool->clean_list)) {
249 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
250 list_del_init(&ibmr->list);
251 }
252 spin_unlock_irqrestore(&pool->list_lock, flags);
253
254 return ibmr;
255}
256
257static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
258{
259 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
260 struct rds_ib_mr *ibmr = NULL;
261 int err = 0, iter = 0;
262
263 while (1) {
264 ibmr = rds_ib_reuse_fmr(pool);
265 if (ibmr)
266 return ibmr;
267
268 /* No clean MRs - now we have the choice of either
269 * allocating a fresh MR up to the limit imposed by the
270 * driver, or flush any dirty unused MRs.
271 * We try to avoid stalling in the send path if possible,
272 * so we allocate as long as we're allowed to.
273 *
274 * We're fussy with enforcing the FMR limit, though. If the driver
275 * tells us we can't use more than N fmrs, we shouldn't start
276 * arguing with it */
277 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
278 break;
279
280 atomic_dec(&pool->item_count);
281
282 if (++iter > 2) {
283 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
284 return ERR_PTR(-EAGAIN);
285 }
286
287 /* We do have some empty MRs. Flush them out. */
288 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
289 rds_ib_flush_mr_pool(pool, 0);
290 }
291
292 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
293 if (!ibmr) {
294 err = -ENOMEM;
295 goto out_no_cigar;
296 }
297
298 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
299 (IB_ACCESS_LOCAL_WRITE |
300 IB_ACCESS_REMOTE_READ |
15133f6e
AG
301 IB_ACCESS_REMOTE_WRITE|
302 IB_ACCESS_REMOTE_ATOMIC),
303
08b48a1e
AG
304 &pool->fmr_attr);
305 if (IS_ERR(ibmr->fmr)) {
306 err = PTR_ERR(ibmr->fmr);
307 ibmr->fmr = NULL;
308 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
309 goto out_no_cigar;
310 }
311
312 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
313 return ibmr;
314
315out_no_cigar:
316 if (ibmr) {
317 if (ibmr->fmr)
318 ib_dealloc_fmr(ibmr->fmr);
319 kfree(ibmr);
320 }
321 atomic_dec(&pool->item_count);
322 return ERR_PTR(err);
323}
324
325static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
326 struct scatterlist *sg, unsigned int nents)
327{
328 struct ib_device *dev = rds_ibdev->dev;
329 struct scatterlist *scat = sg;
330 u64 io_addr = 0;
331 u64 *dma_pages;
332 u32 len;
333 int page_cnt, sg_dma_len;
334 int i, j;
335 int ret;
336
337 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
338 DMA_BIDIRECTIONAL);
339 if (unlikely(!sg_dma_len)) {
340 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
341 return -EBUSY;
342 }
343
344 len = 0;
345 page_cnt = 0;
346
347 for (i = 0; i < sg_dma_len; ++i) {
348 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
349 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
350
a870d627 351 if (dma_addr & ~PAGE_MASK) {
08b48a1e
AG
352 if (i > 0)
353 return -EINVAL;
354 else
355 ++page_cnt;
356 }
a870d627 357 if ((dma_addr + dma_len) & ~PAGE_MASK) {
08b48a1e
AG
358 if (i < sg_dma_len - 1)
359 return -EINVAL;
360 else
361 ++page_cnt;
362 }
363
364 len += dma_len;
365 }
366
a870d627 367 page_cnt += len >> PAGE_SHIFT;
08b48a1e
AG
368 if (page_cnt > fmr_message_size)
369 return -EINVAL;
370
371 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
372 if (!dma_pages)
373 return -ENOMEM;
374
375 page_cnt = 0;
376 for (i = 0; i < sg_dma_len; ++i) {
377 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
378 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
379
a870d627 380 for (j = 0; j < dma_len; j += PAGE_SIZE)
08b48a1e 381 dma_pages[page_cnt++] =
a870d627 382 (dma_addr & PAGE_MASK) + j;
08b48a1e
AG
383 }
384
385 ret = ib_map_phys_fmr(ibmr->fmr,
386 dma_pages, page_cnt, io_addr);
387 if (ret)
388 goto out;
389
390 /* Success - we successfully remapped the MR, so we can
391 * safely tear down the old mapping. */
392 rds_ib_teardown_mr(ibmr);
393
394 ibmr->sg = scat;
395 ibmr->sg_len = nents;
396 ibmr->sg_dma_len = sg_dma_len;
397 ibmr->remap_count++;
398
399 rds_ib_stats_inc(s_ib_rdma_mr_used);
400 ret = 0;
401
402out:
403 kfree(dma_pages);
404
405 return ret;
406}
407
408void rds_ib_sync_mr(void *trans_private, int direction)
409{
410 struct rds_ib_mr *ibmr = trans_private;
411 struct rds_ib_device *rds_ibdev = ibmr->device;
412
413 switch (direction) {
414 case DMA_FROM_DEVICE:
415 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
416 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
417 break;
418 case DMA_TO_DEVICE:
419 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
420 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
421 break;
422 }
423}
424
425static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
426{
427 struct rds_ib_device *rds_ibdev = ibmr->device;
428
429 if (ibmr->sg_dma_len) {
430 ib_dma_unmap_sg(rds_ibdev->dev,
431 ibmr->sg, ibmr->sg_len,
432 DMA_BIDIRECTIONAL);
433 ibmr->sg_dma_len = 0;
434 }
435
436 /* Release the s/g list */
437 if (ibmr->sg_len) {
438 unsigned int i;
439
440 for (i = 0; i < ibmr->sg_len; ++i) {
441 struct page *page = sg_page(&ibmr->sg[i]);
442
443 /* FIXME we need a way to tell a r/w MR
444 * from a r/o MR */
9e2effba 445 BUG_ON(irqs_disabled());
08b48a1e
AG
446 set_page_dirty(page);
447 put_page(page);
448 }
449 kfree(ibmr->sg);
450
451 ibmr->sg = NULL;
452 ibmr->sg_len = 0;
453 }
454}
455
456static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
457{
458 unsigned int pinned = ibmr->sg_len;
459
460 __rds_ib_teardown_mr(ibmr);
461 if (pinned) {
462 struct rds_ib_device *rds_ibdev = ibmr->device;
463 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
464
465 atomic_sub(pinned, &pool->free_pinned);
466 }
467}
468
469static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
470{
471 unsigned int item_count;
472
473 item_count = atomic_read(&pool->item_count);
474 if (free_all)
475 return item_count;
476
477 return 0;
478}
479
480/*
481 * Flush our pool of MRs.
482 * At a minimum, all currently unused MRs are unmapped.
483 * If the number of MRs allocated exceeds the limit, we also try
484 * to free as many MRs as needed to get back to this limit.
485 */
486static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
487{
488 struct rds_ib_mr *ibmr, *next;
489 LIST_HEAD(unmap_list);
490 LIST_HEAD(fmr_list);
491 unsigned long unpinned = 0;
492 unsigned long flags;
493 unsigned int nfreed = 0, ncleaned = 0, free_goal;
494 int ret = 0;
495
496 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
497
498 mutex_lock(&pool->flush_lock);
499
500 spin_lock_irqsave(&pool->list_lock, flags);
501 /* Get the list of all MRs to be dropped. Ordering matters -
502 * we want to put drop_list ahead of free_list. */
503 list_splice_init(&pool->free_list, &unmap_list);
504 list_splice_init(&pool->drop_list, &unmap_list);
505 if (free_all)
506 list_splice_init(&pool->clean_list, &unmap_list);
507 spin_unlock_irqrestore(&pool->list_lock, flags);
508
509 free_goal = rds_ib_flush_goal(pool, free_all);
510
511 if (list_empty(&unmap_list))
512 goto out;
513
514 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
515 list_for_each_entry(ibmr, &unmap_list, list)
516 list_add(&ibmr->fmr->list, &fmr_list);
517 ret = ib_unmap_fmr(&fmr_list);
518 if (ret)
519 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
520
521 /* Now we can destroy the DMA mapping and unpin any pages */
522 list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
523 unpinned += ibmr->sg_len;
524 __rds_ib_teardown_mr(ibmr);
525 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
526 rds_ib_stats_inc(s_ib_rdma_mr_free);
527 list_del(&ibmr->list);
528 ib_dealloc_fmr(ibmr->fmr);
529 kfree(ibmr);
530 nfreed++;
531 }
532 ncleaned++;
533 }
534
535 spin_lock_irqsave(&pool->list_lock, flags);
536 list_splice(&unmap_list, &pool->clean_list);
537 spin_unlock_irqrestore(&pool->list_lock, flags);
538
539 atomic_sub(unpinned, &pool->free_pinned);
540 atomic_sub(ncleaned, &pool->dirty_count);
541 atomic_sub(nfreed, &pool->item_count);
542
543out:
544 mutex_unlock(&pool->flush_lock);
545 return ret;
546}
547
548static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
549{
550 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
551
552 rds_ib_flush_mr_pool(pool, 0);
553}
554
555void rds_ib_free_mr(void *trans_private, int invalidate)
556{
557 struct rds_ib_mr *ibmr = trans_private;
558 struct rds_ib_device *rds_ibdev = ibmr->device;
559 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
560 unsigned long flags;
561
562 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
563
564 /* Return it to the pool's free list */
565 spin_lock_irqsave(&pool->list_lock, flags);
566 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
567 list_add(&ibmr->list, &pool->drop_list);
568 else
569 list_add(&ibmr->list, &pool->free_list);
570
571 atomic_add(ibmr->sg_len, &pool->free_pinned);
572 atomic_inc(&pool->dirty_count);
573 spin_unlock_irqrestore(&pool->list_lock, flags);
574
575 /* If we've pinned too many pages, request a flush */
f64f9e71
JP
576 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
577 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
08b48a1e
AG
578 queue_work(rds_wq, &pool->flush_worker);
579
580 if (invalidate) {
581 if (likely(!in_interrupt())) {
582 rds_ib_flush_mr_pool(pool, 0);
583 } else {
584 /* We get here if the user created a MR marked
585 * as use_once and invalidate at the same time. */
586 queue_work(rds_wq, &pool->flush_worker);
587 }
588 }
589}
590
591void rds_ib_flush_mrs(void)
592{
593 struct rds_ib_device *rds_ibdev;
594
595 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
596 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
597
598 if (pool)
599 rds_ib_flush_mr_pool(pool, 0);
600 }
601}
602
603void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
604 struct rds_sock *rs, u32 *key_ret)
605{
606 struct rds_ib_device *rds_ibdev;
607 struct rds_ib_mr *ibmr = NULL;
608 int ret;
609
610 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
611 if (!rds_ibdev) {
612 ret = -ENODEV;
613 goto out;
614 }
615
616 if (!rds_ibdev->mr_pool) {
617 ret = -ENODEV;
618 goto out;
619 }
620
621 ibmr = rds_ib_alloc_fmr(rds_ibdev);
622 if (IS_ERR(ibmr))
623 return ibmr;
624
625 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
626 if (ret == 0)
627 *key_ret = ibmr->fmr->rkey;
628 else
629 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
630
631 ibmr->device = rds_ibdev;
632
633 out:
634 if (ret) {
635 if (ibmr)
636 rds_ib_free_mr(ibmr, 0);
637 ibmr = ERR_PTR(ret);
638 }
639 return ibmr;
640}
This page took 0.152817 seconds and 5 git commands to generate.