Merge upstream 2.6.13-rc3 into ieee80211 branch of netdev-2.6.
[deliverable/linux.git] / drivers / infiniband / core / fmr_pool.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: fmr_pool.c 1349 2004-12-16 21:09:43Z roland $
33 */
34
35 #include <linux/errno.h>
36 #include <linux/spinlock.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
40
41 #include <ib_fmr_pool.h>
42
43 #include "core_priv.h"
44
45 enum {
46 IB_FMR_MAX_REMAPS = 32,
47
48 IB_FMR_HASH_BITS = 8,
49 IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
50 IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
51 };
52
53 /*
54 * If an FMR is not in use, then the list member will point to either
55 * its pool's free_list (if the FMR can be mapped again; that is,
56 * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the
57 * FMR needs to be unmapped before being remapped). In either of
58 * these cases it is a bug if the ref_count is not 0. In other words,
59 * if ref_count is > 0, then the list member must not be linked into
60 * either free_list or dirty_list.
61 *
62 * The cache_node member is used to link the FMR into a cache bucket
63 * (if caching is enabled). This is independent of the reference
64 * count of the FMR. When a valid FMR is released, its ref_count is
65 * decremented, and if ref_count reaches 0, the FMR is placed in
66 * either free_list or dirty_list as appropriate. However, it is not
67 * removed from the cache and may be "revived" if a call to
68 * ib_fmr_register_physical() occurs before the FMR is remapped. In
69 * this case we just increment the ref_count and remove the FMR from
70 * free_list/dirty_list.
71 *
72 * Before we remap an FMR from free_list, we remove it from the cache
73 * (to prevent another user from obtaining a stale FMR). When an FMR
74 * is released, we add it to the tail of the free list, so that our
75 * cache eviction policy is "least recently used."
76 *
77 * All manipulation of ref_count, list and cache_node is protected by
78 * pool_lock to maintain consistency.
79 */
80
81 struct ib_fmr_pool {
82 spinlock_t pool_lock;
83
84 int pool_size;
85 int max_pages;
86 int dirty_watermark;
87 int dirty_len;
88 struct list_head free_list;
89 struct list_head dirty_list;
90 struct hlist_head *cache_bucket;
91
92 void (*flush_function)(struct ib_fmr_pool *pool,
93 void * arg);
94 void *flush_arg;
95
96 struct task_struct *thread;
97
98 atomic_t req_ser;
99 atomic_t flush_ser;
100
101 wait_queue_head_t force_wait;
102 };
103
104 static inline u32 ib_fmr_hash(u64 first_page)
105 {
106 return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
107 (IB_FMR_HASH_SIZE - 1);
108 }
109
110 /* Caller must hold pool_lock */
111 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
112 u64 *page_list,
113 int page_list_len,
114 u64 io_virtual_address)
115 {
116 struct hlist_head *bucket;
117 struct ib_pool_fmr *fmr;
118 struct hlist_node *pos;
119
120 if (!pool->cache_bucket)
121 return NULL;
122
123 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
124
125 hlist_for_each_entry(fmr, pos, bucket, cache_node)
126 if (io_virtual_address == fmr->io_virtual_address &&
127 page_list_len == fmr->page_list_len &&
128 !memcmp(page_list, fmr->page_list,
129 page_list_len * sizeof *page_list))
130 return fmr;
131
132 return NULL;
133 }
134
135 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
136 {
137 int ret;
138 struct ib_pool_fmr *fmr;
139 LIST_HEAD(unmap_list);
140 LIST_HEAD(fmr_list);
141
142 spin_lock_irq(&pool->pool_lock);
143
144 list_for_each_entry(fmr, &pool->dirty_list, list) {
145 hlist_del_init(&fmr->cache_node);
146 fmr->remap_count = 0;
147 list_add_tail(&fmr->fmr->list, &fmr_list);
148
149 #ifdef DEBUG
150 if (fmr->ref_count !=0) {
151 printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d",
152 fmr, fmr->ref_count);
153 }
154 #endif
155 }
156
157 list_splice(&pool->dirty_list, &unmap_list);
158 INIT_LIST_HEAD(&pool->dirty_list);
159 pool->dirty_len = 0;
160
161 spin_unlock_irq(&pool->pool_lock);
162
163 if (list_empty(&unmap_list)) {
164 return;
165 }
166
167 ret = ib_unmap_fmr(&fmr_list);
168 if (ret)
169 printk(KERN_WARNING "ib_unmap_fmr returned %d", ret);
170
171 spin_lock_irq(&pool->pool_lock);
172 list_splice(&unmap_list, &pool->free_list);
173 spin_unlock_irq(&pool->pool_lock);
174 }
175
176 static int ib_fmr_cleanup_thread(void *pool_ptr)
177 {
178 struct ib_fmr_pool *pool = pool_ptr;
179
180 do {
181 if (pool->dirty_len >= pool->dirty_watermark ||
182 atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
183 ib_fmr_batch_release(pool);
184
185 atomic_inc(&pool->flush_ser);
186 wake_up_interruptible(&pool->force_wait);
187
188 if (pool->flush_function)
189 pool->flush_function(pool, pool->flush_arg);
190 }
191
192 set_current_state(TASK_INTERRUPTIBLE);
193 if (pool->dirty_len < pool->dirty_watermark &&
194 atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
195 !kthread_should_stop())
196 schedule();
197 __set_current_state(TASK_RUNNING);
198 } while (!kthread_should_stop());
199
200 return 0;
201 }
202
203 /**
204 * ib_create_fmr_pool - Create an FMR pool
205 * @pd:Protection domain for FMRs
206 * @params:FMR pool parameters
207 *
208 * Create a pool of FMRs. Return value is pointer to new pool or
209 * error code if creation failed.
210 */
211 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
212 struct ib_fmr_pool_param *params)
213 {
214 struct ib_device *device;
215 struct ib_fmr_pool *pool;
216 int i;
217 int ret;
218
219 if (!params)
220 return ERR_PTR(-EINVAL);
221
222 device = pd->device;
223 if (!device->alloc_fmr || !device->dealloc_fmr ||
224 !device->map_phys_fmr || !device->unmap_fmr) {
225 printk(KERN_WARNING "Device %s does not support fast memory regions",
226 device->name);
227 return ERR_PTR(-ENOSYS);
228 }
229
230 pool = kmalloc(sizeof *pool, GFP_KERNEL);
231 if (!pool) {
232 printk(KERN_WARNING "couldn't allocate pool struct");
233 return ERR_PTR(-ENOMEM);
234 }
235
236 pool->cache_bucket = NULL;
237
238 pool->flush_function = params->flush_function;
239 pool->flush_arg = params->flush_arg;
240
241 INIT_LIST_HEAD(&pool->free_list);
242 INIT_LIST_HEAD(&pool->dirty_list);
243
244 if (params->cache) {
245 pool->cache_bucket =
246 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
247 GFP_KERNEL);
248 if (!pool->cache_bucket) {
249 printk(KERN_WARNING "Failed to allocate cache in pool");
250 ret = -ENOMEM;
251 goto out_free_pool;
252 }
253
254 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
255 INIT_HLIST_HEAD(pool->cache_bucket + i);
256 }
257
258 pool->pool_size = 0;
259 pool->max_pages = params->max_pages_per_fmr;
260 pool->dirty_watermark = params->dirty_watermark;
261 pool->dirty_len = 0;
262 spin_lock_init(&pool->pool_lock);
263 atomic_set(&pool->req_ser, 0);
264 atomic_set(&pool->flush_ser, 0);
265 init_waitqueue_head(&pool->force_wait);
266
267 pool->thread = kthread_create(ib_fmr_cleanup_thread,
268 pool,
269 "ib_fmr(%s)",
270 device->name);
271 if (IS_ERR(pool->thread)) {
272 printk(KERN_WARNING "couldn't start cleanup thread");
273 ret = PTR_ERR(pool->thread);
274 goto out_free_pool;
275 }
276
277 {
278 struct ib_pool_fmr *fmr;
279 struct ib_fmr_attr attr = {
280 .max_pages = params->max_pages_per_fmr,
281 .max_maps = IB_FMR_MAX_REMAPS,
282 .page_size = PAGE_SHIFT
283 };
284
285 for (i = 0; i < params->pool_size; ++i) {
286 fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
287 GFP_KERNEL);
288 if (!fmr) {
289 printk(KERN_WARNING "failed to allocate fmr struct "
290 "for FMR %d", i);
291 goto out_fail;
292 }
293
294 fmr->pool = pool;
295 fmr->remap_count = 0;
296 fmr->ref_count = 0;
297 INIT_HLIST_NODE(&fmr->cache_node);
298
299 fmr->fmr = ib_alloc_fmr(pd, params->access, &attr);
300 if (IS_ERR(fmr->fmr)) {
301 printk(KERN_WARNING "fmr_create failed for FMR %d", i);
302 kfree(fmr);
303 goto out_fail;
304 }
305
306 list_add_tail(&fmr->list, &pool->free_list);
307 ++pool->pool_size;
308 }
309 }
310
311 return pool;
312
313 out_free_pool:
314 kfree(pool->cache_bucket);
315 kfree(pool);
316
317 return ERR_PTR(ret);
318
319 out_fail:
320 ib_destroy_fmr_pool(pool);
321
322 return ERR_PTR(-ENOMEM);
323 }
324 EXPORT_SYMBOL(ib_create_fmr_pool);
325
326 /**
327 * ib_destroy_fmr_pool - Free FMR pool
328 * @pool:FMR pool to free
329 *
330 * Destroy an FMR pool and free all associated resources.
331 */
332 int ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
333 {
334 struct ib_pool_fmr *fmr;
335 struct ib_pool_fmr *tmp;
336 int i;
337
338 kthread_stop(pool->thread);
339 ib_fmr_batch_release(pool);
340
341 i = 0;
342 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
343 ib_dealloc_fmr(fmr->fmr);
344 list_del(&fmr->list);
345 kfree(fmr);
346 ++i;
347 }
348
349 if (i < pool->pool_size)
350 printk(KERN_WARNING "pool still has %d regions registered",
351 pool->pool_size - i);
352
353 kfree(pool->cache_bucket);
354 kfree(pool);
355
356 return 0;
357 }
358 EXPORT_SYMBOL(ib_destroy_fmr_pool);
359
360 /**
361 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
362 * @pool:FMR pool to flush
363 *
364 * Ensure that all unmapped FMRs are fully invalidated.
365 */
366 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
367 {
368 int serial;
369
370 atomic_inc(&pool->req_ser);
371 /*
372 * It's OK if someone else bumps req_ser again here -- we'll
373 * just wait a little longer.
374 */
375 serial = atomic_read(&pool->req_ser);
376
377 wake_up_process(pool->thread);
378
379 if (wait_event_interruptible(pool->force_wait,
380 atomic_read(&pool->flush_ser) -
381 atomic_read(&pool->req_ser) >= 0))
382 return -EINTR;
383
384 return 0;
385 }
386 EXPORT_SYMBOL(ib_flush_fmr_pool);
387
388 /**
389 * ib_fmr_pool_map_phys -
390 * @pool:FMR pool to allocate FMR from
391 * @page_list:List of pages to map
392 * @list_len:Number of pages in @page_list
393 * @io_virtual_address:I/O virtual address for new FMR
394 *
395 * Map an FMR from an FMR pool.
396 */
397 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
398 u64 *page_list,
399 int list_len,
400 u64 *io_virtual_address)
401 {
402 struct ib_fmr_pool *pool = pool_handle;
403 struct ib_pool_fmr *fmr;
404 unsigned long flags;
405 int result;
406
407 if (list_len < 1 || list_len > pool->max_pages)
408 return ERR_PTR(-EINVAL);
409
410 spin_lock_irqsave(&pool->pool_lock, flags);
411 fmr = ib_fmr_cache_lookup(pool,
412 page_list,
413 list_len,
414 *io_virtual_address);
415 if (fmr) {
416 /* found in cache */
417 ++fmr->ref_count;
418 if (fmr->ref_count == 1) {
419 list_del(&fmr->list);
420 }
421
422 spin_unlock_irqrestore(&pool->pool_lock, flags);
423
424 return fmr;
425 }
426
427 if (list_empty(&pool->free_list)) {
428 spin_unlock_irqrestore(&pool->pool_lock, flags);
429 return ERR_PTR(-EAGAIN);
430 }
431
432 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
433 list_del(&fmr->list);
434 hlist_del_init(&fmr->cache_node);
435 spin_unlock_irqrestore(&pool->pool_lock, flags);
436
437 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
438 *io_virtual_address);
439
440 if (result) {
441 spin_lock_irqsave(&pool->pool_lock, flags);
442 list_add(&fmr->list, &pool->free_list);
443 spin_unlock_irqrestore(&pool->pool_lock, flags);
444
445 printk(KERN_WARNING "fmr_map returns %d\n",
446 result);
447
448 return ERR_PTR(result);
449 }
450
451 ++fmr->remap_count;
452 fmr->ref_count = 1;
453
454 if (pool->cache_bucket) {
455 fmr->io_virtual_address = *io_virtual_address;
456 fmr->page_list_len = list_len;
457 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
458
459 spin_lock_irqsave(&pool->pool_lock, flags);
460 hlist_add_head(&fmr->cache_node,
461 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
462 spin_unlock_irqrestore(&pool->pool_lock, flags);
463 }
464
465 return fmr;
466 }
467 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
468
469 /**
470 * ib_fmr_pool_unmap - Unmap FMR
471 * @fmr:FMR to unmap
472 *
473 * Unmap an FMR. The FMR mapping may remain valid until the FMR is
474 * reused (or until ib_flush_fmr_pool() is called).
475 */
476 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
477 {
478 struct ib_fmr_pool *pool;
479 unsigned long flags;
480
481 pool = fmr->pool;
482
483 spin_lock_irqsave(&pool->pool_lock, flags);
484
485 --fmr->ref_count;
486 if (!fmr->ref_count) {
487 if (fmr->remap_count < IB_FMR_MAX_REMAPS) {
488 list_add_tail(&fmr->list, &pool->free_list);
489 } else {
490 list_add_tail(&fmr->list, &pool->dirty_list);
491 ++pool->dirty_len;
492 wake_up_process(pool->thread);
493 }
494 }
495
496 #ifdef DEBUG
497 if (fmr->ref_count < 0)
498 printk(KERN_WARNING "FMR %p has ref count %d < 0",
499 fmr, fmr->ref_count);
500 #endif
501
502 spin_unlock_irqrestore(&pool->pool_lock, flags);
503
504 return 0;
505 }
506 EXPORT_SYMBOL(ib_fmr_pool_unmap);
This page took 0.054081 seconds and 5 git commands to generate.