Commit | Line | Data |
---|---|---|
08b48a1e AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
764f2dd9 | 35 | #include <linux/rculist.h> |
1bc144b6 | 36 | #include <linux/llist.h> |
08b48a1e | 37 | |
f6df683f | 38 | #include "ib_mr.h" |
39 | ||
40 | struct workqueue_struct *rds_ib_mr_wq; | |
08b48a1e | 41 | |
6fa70da6 CM |
42 | static DEFINE_PER_CPU(unsigned long, clean_list_grace); |
43 | #define CLEAN_LIST_BUSY_BIT 0 | |
08b48a1e | 44 | |
08b48a1e AG |
45 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) |
46 | { | |
47 | struct rds_ib_device *rds_ibdev; | |
48 | struct rds_ib_ipaddr *i_ipaddr; | |
49 | ||
ea819867 ZB |
50 | rcu_read_lock(); |
51 | list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) { | |
764f2dd9 | 52 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 53 | if (i_ipaddr->ipaddr == ipaddr) { |
3e0249f9 | 54 | atomic_inc(&rds_ibdev->refcount); |
764f2dd9 | 55 | rcu_read_unlock(); |
08b48a1e AG |
56 | return rds_ibdev; |
57 | } | |
58 | } | |
08b48a1e | 59 | } |
ea819867 | 60 | rcu_read_unlock(); |
08b48a1e AG |
61 | |
62 | return NULL; | |
63 | } | |
64 | ||
65 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
66 | { | |
67 | struct rds_ib_ipaddr *i_ipaddr; | |
68 | ||
69 | i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); | |
70 | if (!i_ipaddr) | |
71 | return -ENOMEM; | |
72 | ||
73 | i_ipaddr->ipaddr = ipaddr; | |
74 | ||
75 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 76 | list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list); |
08b48a1e AG |
77 | spin_unlock_irq(&rds_ibdev->spinlock); |
78 | ||
79 | return 0; | |
80 | } | |
81 | ||
82 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
83 | { | |
4a81802b | 84 | struct rds_ib_ipaddr *i_ipaddr; |
764f2dd9 CM |
85 | struct rds_ib_ipaddr *to_free = NULL; |
86 | ||
08b48a1e AG |
87 | |
88 | spin_lock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 89 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
08b48a1e | 90 | if (i_ipaddr->ipaddr == ipaddr) { |
764f2dd9 CM |
91 | list_del_rcu(&i_ipaddr->list); |
92 | to_free = i_ipaddr; | |
08b48a1e AG |
93 | break; |
94 | } | |
95 | } | |
96 | spin_unlock_irq(&rds_ibdev->spinlock); | |
764f2dd9 | 97 | |
59fe4606 SS |
98 | if (to_free) |
99 | kfree_rcu(to_free, rcu); | |
08b48a1e AG |
100 | } |
101 | ||
102 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | |
103 | { | |
104 | struct rds_ib_device *rds_ibdev_old; | |
105 | ||
106 | rds_ibdev_old = rds_ib_get_device(ipaddr); | |
e1f475a7 | 107 | if (!rds_ibdev_old) |
108 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr); | |
109 | ||
110 | if (rds_ibdev_old != rds_ibdev) { | |
08b48a1e | 111 | rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); |
3e0249f9 | 112 | rds_ib_dev_put(rds_ibdev_old); |
e1f475a7 | 113 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr); |
3e0249f9 | 114 | } |
e1f475a7 | 115 | rds_ib_dev_put(rds_ibdev_old); |
08b48a1e | 116 | |
e1f475a7 | 117 | return 0; |
08b48a1e AG |
118 | } |
119 | ||
745cbcca | 120 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e AG |
121 | { |
122 | struct rds_ib_connection *ic = conn->c_transport_data; | |
123 | ||
124 | /* conn was previously on the nodev_conns_list */ | |
125 | spin_lock_irq(&ib_nodev_conns_lock); | |
126 | BUG_ON(list_empty(&ib_nodev_conns)); | |
127 | BUG_ON(list_empty(&ic->ib_node)); | |
128 | list_del(&ic->ib_node); | |
08b48a1e | 129 | |
aef3ea33 | 130 | spin_lock(&rds_ibdev->spinlock); |
08b48a1e | 131 | list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); |
aef3ea33 | 132 | spin_unlock(&rds_ibdev->spinlock); |
745cbcca | 133 | spin_unlock_irq(&ib_nodev_conns_lock); |
08b48a1e AG |
134 | |
135 | ic->rds_ibdev = rds_ibdev; | |
3e0249f9 | 136 | atomic_inc(&rds_ibdev->refcount); |
08b48a1e AG |
137 | } |
138 | ||
745cbcca | 139 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
08b48a1e | 140 | { |
745cbcca | 141 | struct rds_ib_connection *ic = conn->c_transport_data; |
08b48a1e | 142 | |
745cbcca AG |
143 | /* place conn on nodev_conns_list */ |
144 | spin_lock(&ib_nodev_conns_lock); | |
08b48a1e | 145 | |
745cbcca AG |
146 | spin_lock_irq(&rds_ibdev->spinlock); |
147 | BUG_ON(list_empty(&ic->ib_node)); | |
148 | list_del(&ic->ib_node); | |
149 | spin_unlock_irq(&rds_ibdev->spinlock); | |
150 | ||
151 | list_add_tail(&ic->ib_node, &ib_nodev_conns); | |
152 | ||
153 | spin_unlock(&ib_nodev_conns_lock); | |
154 | ||
155 | ic->rds_ibdev = NULL; | |
3e0249f9 | 156 | rds_ib_dev_put(rds_ibdev); |
08b48a1e AG |
157 | } |
158 | ||
8aeb1ba6 | 159 | void rds_ib_destroy_nodev_conns(void) |
08b48a1e AG |
160 | { |
161 | struct rds_ib_connection *ic, *_ic; | |
162 | LIST_HEAD(tmp_list); | |
163 | ||
164 | /* avoid calling conn_destroy with irqs off */ | |
8aeb1ba6 ZB |
165 | spin_lock_irq(&ib_nodev_conns_lock); |
166 | list_splice(&ib_nodev_conns, &tmp_list); | |
167 | spin_unlock_irq(&ib_nodev_conns_lock); | |
08b48a1e | 168 | |
433d308d | 169 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) |
08b48a1e | 170 | rds_conn_destroy(ic->conn); |
08b48a1e AG |
171 | } |
172 | ||
08b48a1e AG |
173 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) |
174 | { | |
06766513 | 175 | struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; |
08b48a1e | 176 | |
06766513 SS |
177 | iinfo->rdma_mr_max = pool_1m->max_items; |
178 | iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; | |
08b48a1e AG |
179 | } |
180 | ||
f6df683f | 181 | struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) |
08b48a1e AG |
182 | { |
183 | struct rds_ib_mr *ibmr = NULL; | |
1bc144b6 | 184 | struct llist_node *ret; |
6fa70da6 | 185 | unsigned long *flag; |
08b48a1e | 186 | |
6fa70da6 | 187 | preempt_disable(); |
903ceff7 | 188 | flag = this_cpu_ptr(&clean_list_grace); |
6fa70da6 | 189 | set_bit(CLEAN_LIST_BUSY_BIT, flag); |
1bc144b6 | 190 | ret = llist_del_first(&pool->clean_list); |
6fa70da6 | 191 | if (ret) |
1bc144b6 | 192 | ibmr = llist_entry(ret, struct rds_ib_mr, llnode); |
08b48a1e | 193 | |
6fa70da6 CM |
194 | clear_bit(CLEAN_LIST_BUSY_BIT, flag); |
195 | preempt_enable(); | |
08b48a1e AG |
196 | return ibmr; |
197 | } | |
198 | ||
6fa70da6 CM |
199 | static inline void wait_clean_list_grace(void) |
200 | { | |
201 | int cpu; | |
202 | unsigned long *flag; | |
203 | ||
204 | for_each_online_cpu(cpu) { | |
205 | flag = &per_cpu(clean_list_grace, cpu); | |
206 | while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) | |
207 | cpu_relax(); | |
208 | } | |
209 | } | |
210 | ||
08b48a1e AG |
211 | void rds_ib_sync_mr(void *trans_private, int direction) |
212 | { | |
213 | struct rds_ib_mr *ibmr = trans_private; | |
214 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
215 | ||
216 | switch (direction) { | |
217 | case DMA_FROM_DEVICE: | |
218 | ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, | |
219 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
220 | break; | |
221 | case DMA_TO_DEVICE: | |
222 | ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, | |
223 | ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | |
224 | break; | |
225 | } | |
226 | } | |
227 | ||
f6df683f | 228 | void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
08b48a1e AG |
229 | { |
230 | struct rds_ib_device *rds_ibdev = ibmr->device; | |
231 | ||
232 | if (ibmr->sg_dma_len) { | |
233 | ib_dma_unmap_sg(rds_ibdev->dev, | |
234 | ibmr->sg, ibmr->sg_len, | |
235 | DMA_BIDIRECTIONAL); | |
236 | ibmr->sg_dma_len = 0; | |
237 | } | |
238 | ||
239 | /* Release the s/g list */ | |
240 | if (ibmr->sg_len) { | |
241 | unsigned int i; | |
242 | ||
243 | for (i = 0; i < ibmr->sg_len; ++i) { | |
244 | struct page *page = sg_page(&ibmr->sg[i]); | |
245 | ||
246 | /* FIXME we need a way to tell a r/w MR | |
247 | * from a r/o MR */ | |
5c240fa2 | 248 | WARN_ON(!page->mapping && irqs_disabled()); |
08b48a1e AG |
249 | set_page_dirty(page); |
250 | put_page(page); | |
251 | } | |
252 | kfree(ibmr->sg); | |
253 | ||
254 | ibmr->sg = NULL; | |
255 | ibmr->sg_len = 0; | |
256 | } | |
257 | } | |
258 | ||
f6df683f | 259 | void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
08b48a1e AG |
260 | { |
261 | unsigned int pinned = ibmr->sg_len; | |
262 | ||
263 | __rds_ib_teardown_mr(ibmr); | |
264 | if (pinned) { | |
26139dc1 | 265 | struct rds_ib_mr_pool *pool = ibmr->pool; |
08b48a1e AG |
266 | |
267 | atomic_sub(pinned, &pool->free_pinned); | |
268 | } | |
269 | } | |
270 | ||
271 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) | |
272 | { | |
273 | unsigned int item_count; | |
274 | ||
275 | item_count = atomic_read(&pool->item_count); | |
276 | if (free_all) | |
277 | return item_count; | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
6fa70da6 | 282 | /* |
1bc144b6 | 283 | * given an llist of mrs, put them all into the list_head for more processing |
6fa70da6 | 284 | */ |
6116c203 WW |
285 | static unsigned int llist_append_to_list(struct llist_head *llist, |
286 | struct list_head *list) | |
6fa70da6 CM |
287 | { |
288 | struct rds_ib_mr *ibmr; | |
1bc144b6 HY |
289 | struct llist_node *node; |
290 | struct llist_node *next; | |
6116c203 | 291 | unsigned int count = 0; |
1bc144b6 HY |
292 | |
293 | node = llist_del_all(llist); | |
294 | while (node) { | |
295 | next = node->next; | |
296 | ibmr = llist_entry(node, struct rds_ib_mr, llnode); | |
6fa70da6 | 297 | list_add_tail(&ibmr->unmap_list, list); |
1bc144b6 | 298 | node = next; |
6116c203 | 299 | count++; |
6fa70da6 | 300 | } |
6116c203 | 301 | return count; |
6fa70da6 CM |
302 | } |
303 | ||
304 | /* | |
1bc144b6 HY |
305 | * this takes a list head of mrs and turns it into linked llist nodes |
306 | * of clusters. Each cluster has linked llist nodes of | |
307 | * MR_CLUSTER_SIZE mrs that are ready for reuse. | |
6fa70da6 | 308 | */ |
1bc144b6 HY |
309 | static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, |
310 | struct list_head *list, | |
311 | struct llist_node **nodes_head, | |
312 | struct llist_node **nodes_tail) | |
6fa70da6 CM |
313 | { |
314 | struct rds_ib_mr *ibmr; | |
1bc144b6 HY |
315 | struct llist_node *cur = NULL; |
316 | struct llist_node **next = nodes_head; | |
6fa70da6 CM |
317 | |
318 | list_for_each_entry(ibmr, list, unmap_list) { | |
1bc144b6 HY |
319 | cur = &ibmr->llnode; |
320 | *next = cur; | |
321 | next = &cur->next; | |
6fa70da6 | 322 | } |
1bc144b6 HY |
323 | *next = NULL; |
324 | *nodes_tail = cur; | |
6fa70da6 CM |
325 | } |
326 | ||
08b48a1e AG |
327 | /* |
328 | * Flush our pool of MRs. | |
329 | * At a minimum, all currently unused MRs are unmapped. | |
330 | * If the number of MRs allocated exceeds the limit, we also try | |
331 | * to free as many MRs as needed to get back to this limit. | |
332 | */ | |
f6df683f | 333 | int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, |
334 | int free_all, struct rds_ib_mr **ibmr_ret) | |
08b48a1e | 335 | { |
490ea596 | 336 | struct rds_ib_mr *ibmr; |
1bc144b6 HY |
337 | struct llist_node *clean_nodes; |
338 | struct llist_node *clean_tail; | |
08b48a1e | 339 | LIST_HEAD(unmap_list); |
08b48a1e | 340 | unsigned long unpinned = 0; |
6116c203 | 341 | unsigned int nfreed = 0, dirty_to_clean = 0, free_goal; |
08b48a1e | 342 | |
06766513 SS |
343 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
344 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush); | |
345 | else | |
346 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush); | |
08b48a1e | 347 | |
6fa70da6 CM |
348 | if (ibmr_ret) { |
349 | DEFINE_WAIT(wait); | |
06766513 | 350 | while (!mutex_trylock(&pool->flush_lock)) { |
f6df683f | 351 | ibmr = rds_ib_reuse_mr(pool); |
6fa70da6 CM |
352 | if (ibmr) { |
353 | *ibmr_ret = ibmr; | |
354 | finish_wait(&pool->flush_wait, &wait); | |
355 | goto out_nolock; | |
356 | } | |
357 | ||
358 | prepare_to_wait(&pool->flush_wait, &wait, | |
359 | TASK_UNINTERRUPTIBLE); | |
1bc144b6 | 360 | if (llist_empty(&pool->clean_list)) |
6fa70da6 CM |
361 | schedule(); |
362 | ||
f6df683f | 363 | ibmr = rds_ib_reuse_mr(pool); |
6fa70da6 CM |
364 | if (ibmr) { |
365 | *ibmr_ret = ibmr; | |
366 | finish_wait(&pool->flush_wait, &wait); | |
367 | goto out_nolock; | |
368 | } | |
369 | } | |
370 | finish_wait(&pool->flush_wait, &wait); | |
371 | } else | |
372 | mutex_lock(&pool->flush_lock); | |
373 | ||
374 | if (ibmr_ret) { | |
f6df683f | 375 | ibmr = rds_ib_reuse_mr(pool); |
6fa70da6 CM |
376 | if (ibmr) { |
377 | *ibmr_ret = ibmr; | |
378 | goto out; | |
379 | } | |
380 | } | |
08b48a1e | 381 | |
08b48a1e | 382 | /* Get the list of all MRs to be dropped. Ordering matters - |
6fa70da6 CM |
383 | * we want to put drop_list ahead of free_list. |
384 | */ | |
6116c203 WW |
385 | dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list); |
386 | dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list); | |
08b48a1e | 387 | if (free_all) |
1bc144b6 | 388 | llist_append_to_list(&pool->clean_list, &unmap_list); |
08b48a1e AG |
389 | |
390 | free_goal = rds_ib_flush_goal(pool, free_all); | |
391 | ||
392 | if (list_empty(&unmap_list)) | |
393 | goto out; | |
394 | ||
490ea596 | 395 | rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal); |
08b48a1e | 396 | |
6fa70da6 CM |
397 | if (!list_empty(&unmap_list)) { |
398 | /* we have to make sure that none of the things we're about | |
399 | * to put on the clean list would race with other cpus trying | |
1bc144b6 | 400 | * to pull items off. The llist would explode if we managed to |
6fa70da6 | 401 | * remove something from the clean list and then add it back again |
1bc144b6 | 402 | * while another CPU was spinning on that same item in llist_del_first. |
6fa70da6 | 403 | * |
1bc144b6 | 404 | * This is pretty unlikely, but just in case wait for an llist grace period |
6fa70da6 CM |
405 | * here before adding anything back into the clean list. |
406 | */ | |
407 | wait_clean_list_grace(); | |
408 | ||
1bc144b6 | 409 | list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); |
6fa70da6 | 410 | if (ibmr_ret) |
1bc144b6 | 411 | *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); |
6fa70da6 | 412 | |
1bc144b6 HY |
413 | /* more than one entry in llist nodes */ |
414 | if (clean_nodes->next) | |
415 | llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); | |
6fa70da6 CM |
416 | |
417 | } | |
08b48a1e AG |
418 | |
419 | atomic_sub(unpinned, &pool->free_pinned); | |
6116c203 | 420 | atomic_sub(dirty_to_clean, &pool->dirty_count); |
08b48a1e AG |
421 | atomic_sub(nfreed, &pool->item_count); |
422 | ||
423 | out: | |
424 | mutex_unlock(&pool->flush_lock); | |
6fa70da6 CM |
425 | if (waitqueue_active(&pool->flush_wait)) |
426 | wake_up(&pool->flush_wait); | |
427 | out_nolock: | |
490ea596 | 428 | return 0; |
429 | } | |
430 | ||
431 | struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) | |
432 | { | |
433 | struct rds_ib_mr *ibmr = NULL; | |
434 | int iter = 0; | |
435 | ||
436 | if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10) | |
437 | queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); | |
438 | ||
439 | while (1) { | |
440 | ibmr = rds_ib_reuse_mr(pool); | |
441 | if (ibmr) | |
442 | return ibmr; | |
443 | ||
444 | if (atomic_inc_return(&pool->item_count) <= pool->max_items) | |
445 | break; | |
446 | ||
447 | atomic_dec(&pool->item_count); | |
448 | ||
449 | if (++iter > 2) { | |
450 | if (pool->pool_type == RDS_IB_MR_8K_POOL) | |
451 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted); | |
452 | else | |
453 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted); | |
454 | return ERR_PTR(-EAGAIN); | |
455 | } | |
456 | ||
457 | /* We do have some empty MRs. Flush them out. */ | |
458 | if (pool->pool_type == RDS_IB_MR_8K_POOL) | |
459 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait); | |
460 | else | |
461 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait); | |
462 | ||
463 | rds_ib_flush_mr_pool(pool, 0, &ibmr); | |
464 | if (ibmr) | |
465 | return ibmr; | |
466 | } | |
467 | ||
468 | return ibmr; | |
08b48a1e AG |
469 | } |
470 | ||
471 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | |
472 | { | |
7a0ff5db | 473 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
08b48a1e | 474 | |
6fa70da6 | 475 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e AG |
476 | } |
477 | ||
478 | void rds_ib_free_mr(void *trans_private, int invalidate) | |
479 | { | |
480 | struct rds_ib_mr *ibmr = trans_private; | |
26139dc1 | 481 | struct rds_ib_mr_pool *pool = ibmr->pool; |
08b48a1e | 482 | struct rds_ib_device *rds_ibdev = ibmr->device; |
08b48a1e AG |
483 | |
484 | rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); | |
485 | ||
486 | /* Return it to the pool's free list */ | |
490ea596 | 487 | rds_ib_free_fmr_list(ibmr); |
08b48a1e AG |
488 | |
489 | atomic_add(ibmr->sg_len, &pool->free_pinned); | |
490 | atomic_inc(&pool->dirty_count); | |
08b48a1e AG |
491 | |
492 | /* If we've pinned too many pages, request a flush */ | |
f64f9e71 | 493 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
ef5217a6 | 494 | atomic_read(&pool->dirty_count) >= pool->max_items / 5) |
f6df683f | 495 | queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); |
08b48a1e AG |
496 | |
497 | if (invalidate) { | |
498 | if (likely(!in_interrupt())) { | |
6fa70da6 | 499 | rds_ib_flush_mr_pool(pool, 0, NULL); |
08b48a1e AG |
500 | } else { |
501 | /* We get here if the user created a MR marked | |
ad1d7dc0 | 502 | * as use_once and invalidate at the same time. |
503 | */ | |
f6df683f | 504 | queue_delayed_work(rds_ib_mr_wq, |
ad1d7dc0 | 505 | &pool->flush_worker, 10); |
08b48a1e AG |
506 | } |
507 | } | |
3e0249f9 ZB |
508 | |
509 | rds_ib_dev_put(rds_ibdev); | |
08b48a1e AG |
510 | } |
511 | ||
512 | void rds_ib_flush_mrs(void) | |
513 | { | |
514 | struct rds_ib_device *rds_ibdev; | |
515 | ||
ea819867 | 516 | down_read(&rds_ib_devices_lock); |
08b48a1e | 517 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { |
06766513 SS |
518 | if (rds_ibdev->mr_8k_pool) |
519 | rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL); | |
08b48a1e | 520 | |
06766513 SS |
521 | if (rds_ibdev->mr_1m_pool) |
522 | rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL); | |
08b48a1e | 523 | } |
ea819867 | 524 | up_read(&rds_ib_devices_lock); |
08b48a1e AG |
525 | } |
526 | ||
527 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |
528 | struct rds_sock *rs, u32 *key_ret) | |
529 | { | |
530 | struct rds_ib_device *rds_ibdev; | |
531 | struct rds_ib_mr *ibmr = NULL; | |
532 | int ret; | |
533 | ||
534 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); | |
535 | if (!rds_ibdev) { | |
536 | ret = -ENODEV; | |
537 | goto out; | |
538 | } | |
539 | ||
06766513 | 540 | if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { |
08b48a1e AG |
541 | ret = -ENODEV; |
542 | goto out; | |
543 | } | |
544 | ||
490ea596 | 545 | ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); |
546 | if (ibmr) | |
547 | rds_ibdev = NULL; | |
08b48a1e AG |
548 | |
549 | out: | |
490ea596 | 550 | if (!ibmr) |
551 | pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); | |
552 | ||
3e0249f9 ZB |
553 | if (rds_ibdev) |
554 | rds_ib_dev_put(rds_ibdev); | |
490ea596 | 555 | |
08b48a1e AG |
556 | return ibmr; |
557 | } | |
6fa70da6 | 558 | |
f6df683f | 559 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) |
560 | { | |
561 | cancel_delayed_work_sync(&pool->flush_worker); | |
562 | rds_ib_flush_mr_pool(pool, 1, NULL); | |
563 | WARN_ON(atomic_read(&pool->item_count)); | |
564 | WARN_ON(atomic_read(&pool->free_pinned)); | |
565 | kfree(pool); | |
566 | } | |
567 | ||
568 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev, | |
569 | int pool_type) | |
570 | { | |
571 | struct rds_ib_mr_pool *pool; | |
572 | ||
573 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | |
574 | if (!pool) | |
575 | return ERR_PTR(-ENOMEM); | |
576 | ||
577 | pool->pool_type = pool_type; | |
578 | init_llist_head(&pool->free_list); | |
579 | init_llist_head(&pool->drop_list); | |
580 | init_llist_head(&pool->clean_list); | |
581 | mutex_init(&pool->flush_lock); | |
582 | init_waitqueue_head(&pool->flush_wait); | |
583 | INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); | |
584 | ||
585 | if (pool_type == RDS_IB_MR_1M_POOL) { | |
586 | /* +1 allows for unaligned MRs */ | |
587 | pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1; | |
588 | pool->max_items = RDS_MR_1M_POOL_SIZE; | |
589 | } else { | |
590 | /* pool_type == RDS_IB_MR_8K_POOL */ | |
591 | pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1; | |
592 | pool->max_items = RDS_MR_8K_POOL_SIZE; | |
593 | } | |
594 | ||
595 | pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4; | |
596 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; | |
597 | pool->fmr_attr.page_shift = PAGE_SHIFT; | |
598 | pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4; | |
599 | ||
600 | return pool; | |
601 | } | |
602 | ||
603 | int rds_ib_mr_init(void) | |
604 | { | |
605 | rds_ib_mr_wq = create_workqueue("rds_mr_flushd"); | |
606 | if (!rds_ib_mr_wq) | |
607 | return -ENOMEM; | |
608 | return 0; | |
609 | } | |
610 | ||
611 | /* By the time this is called all the IB devices should have been torn down and | |
612 | * had their pools freed. As each pool is freed its work struct is waited on, | |
613 | * so the pool flushing work queue should be idle by the time we get here. | |
614 | */ | |
615 | void rds_ib_mr_exit(void) | |
616 | { | |
617 | destroy_workqueue(rds_ib_mr_wq); | |
618 | } |