Commit | Line | Data |
---|---|---|
08b48a1e AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
764f2dd9 | 35 | #include <linux/rculist.h> |
08b48a1e AG |
36 | |
37 | #include "rds.h" | |
08b48a1e AG |
38 | #include "ib.h" |
39 | ||
40 | ||
41 | /* | |
42 | * This is stored as mr->r_trans_private. | |
43 | */ | |
44 | struct rds_ib_mr { | |
45 | struct rds_ib_device *device; | |
46 | struct rds_ib_mr_pool *pool; | |
47 | struct ib_fmr *fmr; | |
48 | struct list_head list; | |
49 | unsigned int remap_count; | |
50 | ||
51 | struct scatterlist *sg; | |
52 | unsigned int sg_len; | |
53 | u64 *dma; | |
54 | int sg_dma_len; | |
55 | }; | |
56 | ||
57 | /* | |
58 | * Our own little FMR pool | |
59 | */ | |
60 | struct rds_ib_mr_pool { | |
61 | struct mutex flush_lock; /* serialize fmr invalidate */ | |
62 | struct work_struct flush_worker; /* flush worker */ | |
63 | ||
64 | spinlock_t list_lock; /* protect variables below */ | |
65 | atomic_t item_count; /* total # of MRs */ | |
66 | atomic_t dirty_count; /* # dirty of MRs */ | |
67 | struct list_head drop_list; /* MRs that have reached their max_maps limit */ | |
68 | struct list_head free_list; /* unused MRs */ | |
69 | struct list_head clean_list; /* unused & unamapped MRs */ | |
70 | atomic_t free_pinned; /* memory pinned by free MRs */ | |
71 | unsigned long max_items; | |
72 | unsigned long max_items_soft; | |
73 | unsigned long max_free_pinned; | |
74 | struct ib_fmr_attr fmr_attr; | |
75 | }; | |
76 | ||
77 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); | |
78 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); | |
79 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work); | |
80 | ||
81 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) | |
82 | { | |
83 | struct rds_ib_device *rds_ibdev; | |
84 | struct rds_ib_ipaddr *i_ipaddr; | |
85 | ||
86 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { | |
764f2dd9 CM |
87 | rcu_read_lock(); |
88 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { | |
08b48a1e | 89 | if (i_ipaddr->ipaddr == ipaddr) { |
764f2dd9 | 90 | rcu_read_unlock(); |
08b48a1e AG |
91 | return rds_ibdev; |
92 | } | |
93 | } | |
764f2dd9 | 94 | rcu_read_unlock(); |
08b48a1e AG |
95 | } |
96 | ||
97 | return NULL; | |
98 | } | |
99 | ||
100 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
101 | { | |
102 | struct rds_ib_ipaddr *i_ipaddr; | |
103 | ||
104 | i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); | |
105 | if (!i_ipaddr) | |
106 | return -ENOMEM; | |
107 | ||
108 | i_ipaddr->ipaddr = ipaddr; | |
109 | ||
110 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 111 | list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list); |
08b48a1e AG |
112 | spin_unlock_irq(&rds_ibdev->spinlock); |
113 | ||
114 | return 0; | |
115 | } | |
116 | ||
117 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
118 | { | |
4a81802b | 119 | struct rds_ib_ipaddr *i_ipaddr; |
764f2dd9 CM |
120 | struct rds_ib_ipaddr *to_free = NULL; |
121 | ||
08b48a1e AG |
122 | |
123 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 124 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 125 | if (i_ipaddr->ipaddr == ipaddr) { |
764f2dd9 CM |
126 | list_del_rcu(&i_ipaddr->list); |
127 | to_free = i_ipaddr; | |
08b48a1e AG |
128 | break; |
129 | } | |
130 | } | |
131 | spin_unlock_irq(&rds_ibdev->spinlock); | |
764f2dd9 CM |
132 | |
133 | if (to_free) { | |
134 | synchronize_rcu(); | |
135 | kfree(to_free); | |
136 | } | |
08b48a1e AG |
137 | } |
138 | ||
139 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
140 | { | |
141 | struct rds_ib_device *rds_ibdev_old; | |
142 | ||
143 | rds_ibdev_old = rds_ib_get_device(ipaddr); | |
144 | if (rds_ibdev_old) | |
145 | rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); | |
146 | ||
147 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr); | |
148 | } | |
149 | ||
745cbcca | 150 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e AG |
151 | { |
152 | struct rds_ib_connection *ic = conn->c_transport_data; | |
153 | ||
154 | /* conn was previously on the nodev_conns_list */ | |
155 | spin_lock_irq(&ib_nodev_conns_lock); | |
156 | BUG_ON(list_empty(&ib_nodev_conns)); | |
157 | BUG_ON(list_empty(&ic->ib_node)); | |
158 | list_del(&ic->ib_node); | |
08b48a1e AG |
159 | |
160 | spin_lock_irq(&rds_ibdev->spinlock); | |
161 | list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); | |
162 | spin_unlock_irq(&rds_ibdev->spinlock); | |
745cbcca | 163 | spin_unlock_irq(&ib_nodev_conns_lock); |
08b48a1e AG |
164 | |
165 | ic->rds_ibdev = rds_ibdev; | |
08b48a1e AG |
166 | } |
167 | ||
745cbcca | 168 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e | 169 | { |
745cbcca | 170 | struct rds_ib_connection *ic = conn->c_transport_data; |
08b48a1e | 171 | |
745cbcca AG |
172 | /* place conn on nodev_conns_list */ |
173 | spin_lock(&ib_nodev_conns_lock); | |
08b48a1e | 174 | |
745cbcca AG |
175 | spin_lock_irq(&rds_ibdev->spinlock); |
176 | BUG_ON(list_empty(&ic->ib_node)); | |
177 | list_del(&ic->ib_node); | |
178 | spin_unlock_irq(&rds_ibdev->spinlock); | |
179 | ||
180 | list_add_tail(&ic->ib_node, &ib_nodev_conns); | |
181 | ||
182 | spin_unlock(&ib_nodev_conns_lock); | |
183 | ||
184 | ic->rds_ibdev = NULL; | |
08b48a1e AG |
185 | } |
186 | ||
745cbcca | 187 | void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) |
08b48a1e AG |
188 | { |
189 | struct rds_ib_connection *ic, *_ic; | |
190 | LIST_HEAD(tmp_list); | |
191 | ||
192 | /* avoid calling conn_destroy with irqs off */ | |
745cbcca AG |
193 | spin_lock_irq(list_lock); |
194 | list_splice(list, &tmp_list); | |
195 | INIT_LIST_HEAD(list); | |
196 | spin_unlock_irq(list_lock); | |
08b48a1e | 197 | |
433d308d | 198 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) |
08b48a1e | 199 | rds_conn_destroy(ic->conn); |
08b48a1e AG |
200 | } |
201 | ||
202 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) | |
203 | { | |
204 | struct rds_ib_mr_pool *pool; | |
205 | ||
206 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
207 | if (!pool) | |
208 | return ERR_PTR(-ENOMEM); | |
209 | ||
210 | INIT_LIST_HEAD(&pool->free_list); | |
211 | INIT_LIST_HEAD(&pool->drop_list); | |
212 | INIT_LIST_HEAD(&pool->clean_list); | |
213 | mutex_init(&pool->flush_lock); | |
214 | spin_lock_init(&pool->list_lock); | |
215 | INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); | |
216 | ||
217 | pool->fmr_attr.max_pages = fmr_message_size; | |
218 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; | |
a870d627 | 219 | pool->fmr_attr.page_shift = PAGE_SHIFT; |
08b48a1e AG |
220 | pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; |
221 | ||
222 | /* We never allow more than max_items MRs to be allocated. | |
223 | * When we exceed more than max_items_soft, we start freeing | |
224 | * items more aggressively. | |
225 | * Make sure that max_items > max_items_soft > max_items / 2 | |
226 | */ | |
227 | pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; | |
228 | pool->max_items = rds_ibdev->max_fmrs; | |
229 | ||
230 | return pool; | |
231 | } | |
232 | ||
233 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) | |
234 | { | |
235 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
236 | ||
237 | iinfo->rdma_mr_max = pool->max_items; | |
238 | iinfo->rdma_mr_size = pool->fmr_attr.max_pages; | |
239 | } | |
240 | ||
241 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | |
242 | { | |
243 | flush_workqueue(rds_wq); | |
244 | rds_ib_flush_mr_pool(pool, 1); | |
571c02fa AG |
245 | WARN_ON(atomic_read(&pool->item_count)); |
246 | WARN_ON(atomic_read(&pool->free_pinned)); | |
08b48a1e AG |
247 | kfree(pool); |
248 | } | |
249 | ||
250 | static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) | |
251 | { | |
252 | struct rds_ib_mr *ibmr = NULL; | |
253 | unsigned long flags; | |
254 | ||
255 | spin_lock_irqsave(&pool->list_lock, flags); | |
256 | if (!list_empty(&pool->clean_list)) { | |
257 | ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); | |
258 | list_del_init(&ibmr->list); | |
259 | } | |
260 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
261 | ||
262 | return ibmr; | |
263 | } | |
264 | ||
265 | static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) | |
266 | { | |
267 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
268 | struct rds_ib_mr *ibmr = NULL; | |
269 | int err = 0, iter = 0; | |
270 | ||
271 | while (1) { | |
272 | ibmr = rds_ib_reuse_fmr(pool); | |
273 | if (ibmr) | |
274 | return ibmr; | |
275 | ||
276 | /* No clean MRs - now we have the choice of either | |
277 | * allocating a fresh MR up to the limit imposed by the | |
278 | * driver, or flush any dirty unused MRs. | |
279 | * We try to avoid stalling in the send path if possible, | |
280 | * so we allocate as long as we're allowed to. | |
281 | * | |
282 | * We're fussy with enforcing the FMR limit, though. If the driver | |
283 | * tells us we can't use more than N fmrs, we shouldn't start | |
284 | * arguing with it */ | |
285 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) | |
286 | break; | |
287 | ||
288 | atomic_dec(&pool->item_count); | |
289 | ||
290 | if (++iter > 2) { | |
291 | rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); | |
292 | return ERR_PTR(-EAGAIN); | |
293 | } | |
294 | ||
295 | /* We do have some empty MRs. Flush them out. */ | |
296 | rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); | |
297 | rds_ib_flush_mr_pool(pool, 0); | |
298 | } | |
299 | ||
e4c52c98 | 300 | ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev)); |
08b48a1e AG |
301 | if (!ibmr) { |
302 | err = -ENOMEM; | |
303 | goto out_no_cigar; | |
304 | } | |
305 | ||
38a4e5e6 CM |
306 | memset(ibmr, 0, sizeof(*ibmr)); |
307 | ||
08b48a1e AG |
308 | ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, |
309 | (IB_ACCESS_LOCAL_WRITE | | |
310 | IB_ACCESS_REMOTE_READ | | |
15133f6e AG |
311 | IB_ACCESS_REMOTE_WRITE| |
312 | IB_ACCESS_REMOTE_ATOMIC), | |
313 | ||
08b48a1e AG |
314 | &pool->fmr_attr); |
315 | if (IS_ERR(ibmr->fmr)) { | |
316 | err = PTR_ERR(ibmr->fmr); | |
317 | ibmr->fmr = NULL; | |
318 | printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err); | |
319 | goto out_no_cigar; | |
320 | } | |
321 | ||
322 | rds_ib_stats_inc(s_ib_rdma_mr_alloc); | |
323 | return ibmr; | |
324 | ||
325 | out_no_cigar: | |
326 | if (ibmr) { | |
327 | if (ibmr->fmr) | |
328 | ib_dealloc_fmr(ibmr->fmr); | |
329 | kfree(ibmr); | |
330 | } | |
331 | atomic_dec(&pool->item_count); | |
332 | return ERR_PTR(err); | |
333 | } | |
334 | ||
335 | static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, | |
336 | struct scatterlist *sg, unsigned int nents) | |
337 | { | |
338 | struct ib_device *dev = rds_ibdev->dev; | |
339 | struct scatterlist *scat = sg; | |
340 | u64 io_addr = 0; | |
341 | u64 *dma_pages; | |
342 | u32 len; | |
343 | int page_cnt, sg_dma_len; | |
344 | int i, j; | |
345 | int ret; | |
346 | ||
347 | sg_dma_len = ib_dma_map_sg(dev, sg, nents, | |
348 | DMA_BIDIRECTIONAL); | |
349 | if (unlikely(!sg_dma_len)) { | |
350 | printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n"); | |
351 | return -EBUSY; | |
352 | } | |
353 | ||
354 | len = 0; | |
355 | page_cnt = 0; | |
356 | ||
357 | for (i = 0; i < sg_dma_len; ++i) { | |
358 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | |
359 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | |
360 | ||
a870d627 | 361 | if (dma_addr & ~PAGE_MASK) { |
08b48a1e AG |
362 | if (i > 0) |
363 | return -EINVAL; | |
364 | else | |
365 | ++page_cnt; | |
366 | } | |
a870d627 | 367 | if ((dma_addr + dma_len) & ~PAGE_MASK) { |
08b48a1e AG |
368 | if (i < sg_dma_len - 1) |
369 | return -EINVAL; | |
370 | else | |
371 | ++page_cnt; | |
372 | } | |
373 | ||
374 | len += dma_len; | |
375 | } | |
376 | ||
a870d627 | 377 | page_cnt += len >> PAGE_SHIFT; |
08b48a1e AG |
378 | if (page_cnt > fmr_message_size) |
379 | return -EINVAL; | |
380 | ||
e4c52c98 AG |
381 | dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, |
382 | rdsibdev_to_node(rds_ibdev)); | |
08b48a1e AG |
383 | if (!dma_pages) |
384 | return -ENOMEM; | |
385 | ||
386 | page_cnt = 0; | |
387 | for (i = 0; i < sg_dma_len; ++i) { | |
388 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | |
389 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | |
390 | ||
a870d627 | 391 | for (j = 0; j < dma_len; j += PAGE_SIZE) |
08b48a1e | 392 | dma_pages[page_cnt++] = |
a870d627 | 393 | (dma_addr & PAGE_MASK) + j; |
08b48a1e AG |
394 | } |
395 | ||
396 | ret = ib_map_phys_fmr(ibmr->fmr, | |
397 | dma_pages, page_cnt, io_addr); | |
398 | if (ret) | |
399 | goto out; | |
400 | ||
401 | /* Success - we successfully remapped the MR, so we can | |
402 | * safely tear down the old mapping. */ | |
403 | rds_ib_teardown_mr(ibmr); | |
404 | ||
405 | ibmr->sg = scat; | |
406 | ibmr->sg_len = nents; | |
407 | ibmr->sg_dma_len = sg_dma_len; | |
408 | ibmr->remap_count++; | |
409 | ||
410 | rds_ib_stats_inc(s_ib_rdma_mr_used); | |
411 | ret = 0; | |
412 | ||
413 | out: | |
414 | kfree(dma_pages); | |
415 | ||
416 | return ret; | |
417 | } | |
418 | ||
419 | void rds_ib_sync_mr(void *trans_private, int direction) | |
420 | { | |
421 | struct rds_ib_mr *ibmr = trans_private; | |
422 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
423 | ||
424 | switch (direction) { | |
425 | case DMA_FROM_DEVICE: | |
426 | ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, | |
427 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
428 | break; | |
429 | case DMA_TO_DEVICE: | |
430 | ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, | |
431 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
432 | break; | |
433 | } | |
434 | } | |
435 | ||
436 | static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |
437 | { | |
438 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
439 | ||
440 | if (ibmr->sg_dma_len) { | |
441 | ib_dma_unmap_sg(rds_ibdev->dev, | |
442 | ibmr->sg, ibmr->sg_len, | |
443 | DMA_BIDIRECTIONAL); | |
444 | ibmr->sg_dma_len = 0; | |
445 | } | |
446 | ||
447 | /* Release the s/g list */ | |
448 | if (ibmr->sg_len) { | |
449 | unsigned int i; | |
450 | ||
451 | for (i = 0; i < ibmr->sg_len; ++i) { | |
452 | struct page *page = sg_page(&ibmr->sg[i]); | |
453 | ||
454 | /* FIXME we need a way to tell a r/w MR | |
455 | * from a r/o MR */ | |
9e2effba | 456 | BUG_ON(irqs_disabled()); |
08b48a1e AG |
457 | set_page_dirty(page); |
458 | put_page(page); | |
459 | } | |
460 | kfree(ibmr->sg); | |
461 | ||
462 | ibmr->sg = NULL; | |
463 | ibmr->sg_len = 0; | |
464 | } | |
465 | } | |
466 | ||
467 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |
468 | { | |
469 | unsigned int pinned = ibmr->sg_len; | |
470 | ||
471 | __rds_ib_teardown_mr(ibmr); | |
472 | if (pinned) { | |
473 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
474 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
475 | ||
476 | atomic_sub(pinned, &pool->free_pinned); | |
477 | } | |
478 | } | |
479 | ||
480 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) | |
481 | { | |
482 | unsigned int item_count; | |
483 | ||
484 | item_count = atomic_read(&pool->item_count); | |
485 | if (free_all) | |
486 | return item_count; | |
487 | ||
488 | return 0; | |
489 | } | |
490 | ||
491 | /* | |
492 | * Flush our pool of MRs. | |
493 | * At a minimum, all currently unused MRs are unmapped. | |
494 | * If the number of MRs allocated exceeds the limit, we also try | |
495 | * to free as many MRs as needed to get back to this limit. | |
496 | */ | |
497 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) | |
498 | { | |
499 | struct rds_ib_mr *ibmr, *next; | |
500 | LIST_HEAD(unmap_list); | |
501 | LIST_HEAD(fmr_list); | |
502 | unsigned long unpinned = 0; | |
503 | unsigned long flags; | |
504 | unsigned int nfreed = 0, ncleaned = 0, free_goal; | |
505 | int ret = 0; | |
506 | ||
507 | rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); | |
508 | ||
509 | mutex_lock(&pool->flush_lock); | |
510 | ||
511 | spin_lock_irqsave(&pool->list_lock, flags); | |
512 | /* Get the list of all MRs to be dropped. Ordering matters - | |
513 | * we want to put drop_list ahead of free_list. */ | |
514 | list_splice_init(&pool->free_list, &unmap_list); | |
515 | list_splice_init(&pool->drop_list, &unmap_list); | |
516 | if (free_all) | |
517 | list_splice_init(&pool->clean_list, &unmap_list); | |
518 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
519 | ||
520 | free_goal = rds_ib_flush_goal(pool, free_all); | |
521 | ||
522 | if (list_empty(&unmap_list)) | |
523 | goto out; | |
524 | ||
525 | /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ | |
526 | list_for_each_entry(ibmr, &unmap_list, list) | |
527 | list_add(&ibmr->fmr->list, &fmr_list); | |
528 | ret = ib_unmap_fmr(&fmr_list); | |
529 | if (ret) | |
530 | printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); | |
531 | ||
532 | /* Now we can destroy the DMA mapping and unpin any pages */ | |
533 | list_for_each_entry_safe(ibmr, next, &unmap_list, list) { | |
534 | unpinned += ibmr->sg_len; | |
535 | __rds_ib_teardown_mr(ibmr); | |
536 | if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { | |
537 | rds_ib_stats_inc(s_ib_rdma_mr_free); | |
538 | list_del(&ibmr->list); | |
539 | ib_dealloc_fmr(ibmr->fmr); | |
540 | kfree(ibmr); | |
541 | nfreed++; | |
542 | } | |
543 | ncleaned++; | |
544 | } | |
545 | ||
546 | spin_lock_irqsave(&pool->list_lock, flags); | |
547 | list_splice(&unmap_list, &pool->clean_list); | |
548 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
549 | ||
550 | atomic_sub(unpinned, &pool->free_pinned); | |
551 | atomic_sub(ncleaned, &pool->dirty_count); | |
552 | atomic_sub(nfreed, &pool->item_count); | |
553 | ||
554 | out: | |
555 | mutex_unlock(&pool->flush_lock); | |
556 | return ret; | |
557 | } | |
558 | ||
559 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | |
560 | { | |
561 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); | |
562 | ||
563 | rds_ib_flush_mr_pool(pool, 0); | |
564 | } | |
565 | ||
566 | void rds_ib_free_mr(void *trans_private, int invalidate) | |
567 | { | |
568 | struct rds_ib_mr *ibmr = trans_private; | |
569 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
570 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
571 | unsigned long flags; | |
572 | ||
573 | rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); | |
574 | ||
575 | /* Return it to the pool's free list */ | |
576 | spin_lock_irqsave(&pool->list_lock, flags); | |
577 | if (ibmr->remap_count >= pool->fmr_attr.max_maps) | |
578 | list_add(&ibmr->list, &pool->drop_list); | |
579 | else | |
580 | list_add(&ibmr->list, &pool->free_list); | |
581 | ||
582 | atomic_add(ibmr->sg_len, &pool->free_pinned); | |
583 | atomic_inc(&pool->dirty_count); | |
584 | spin_unlock_irqrestore(&pool->list_lock, flags); | |
585 | ||
586 | /* If we've pinned too many pages, request a flush */ | |
f64f9e71 JP |
587 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
588 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | |
08b48a1e AG |
589 | queue_work(rds_wq, &pool->flush_worker); |
590 | ||
591 | if (invalidate) { | |
592 | if (likely(!in_interrupt())) { | |
593 | rds_ib_flush_mr_pool(pool, 0); | |
594 | } else { | |
595 | /* We get here if the user created a MR marked | |
596 | * as use_once and invalidate at the same time. */ | |
597 | queue_work(rds_wq, &pool->flush_worker); | |
598 | } | |
599 | } | |
600 | } | |
601 | ||
602 | void rds_ib_flush_mrs(void) | |
603 | { | |
604 | struct rds_ib_device *rds_ibdev; | |
605 | ||
606 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { | |
607 | struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | |
608 | ||
609 | if (pool) | |
610 | rds_ib_flush_mr_pool(pool, 0); | |
611 | } | |
612 | } | |
613 | ||
614 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |
615 | struct rds_sock *rs, u32 *key_ret) | |
616 | { | |
617 | struct rds_ib_device *rds_ibdev; | |
618 | struct rds_ib_mr *ibmr = NULL; | |
619 | int ret; | |
620 | ||
621 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); | |
622 | if (!rds_ibdev) { | |
623 | ret = -ENODEV; | |
624 | goto out; | |
625 | } | |
626 | ||
627 | if (!rds_ibdev->mr_pool) { | |
628 | ret = -ENODEV; | |
629 | goto out; | |
630 | } | |
631 | ||
632 | ibmr = rds_ib_alloc_fmr(rds_ibdev); | |
633 | if (IS_ERR(ibmr)) | |
634 | return ibmr; | |
635 | ||
636 | ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); | |
637 | if (ret == 0) | |
638 | *key_ret = ibmr->fmr->rkey; | |
639 | else | |
640 | printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); | |
641 | ||
642 | ibmr->device = rds_ibdev; | |
643 | ||
644 | out: | |
645 | if (ret) { | |
646 | if (ibmr) | |
647 | rds_ib_free_mr(ibmr, 0); | |
648 | ibmr = ERR_PTR(ret); | |
649 | } | |
650 | return ibmr; | |
651 | } |