ftrace: avoid modifying kprobe'd records
[deliverable/linux.git] / drivers / infiniband / hw / mthca / mthca_memfree.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id$
35 */
36
37 #include <linux/mm.h>
38 #include <linux/scatterlist.h>
39 #include <linux/sched.h>
40
41 #include <asm/page.h>
42
43 #include "mthca_memfree.h"
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46
47 /*
48 * We allocate in as big chunks as we can, up to a maximum of 256 KB
49 * per chunk.
50 */
51 enum {
52 MTHCA_ICM_ALLOC_SIZE = 1 << 18,
53 MTHCA_TABLE_CHUNK_SIZE = 1 << 18
54 };
55
56 struct mthca_user_db_table {
57 struct mutex mutex;
58 struct {
59 u64 uvirt;
60 struct scatterlist mem;
61 int refcount;
62 } page[0];
63 };
64
65 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
66 {
67 int i;
68
69 if (chunk->nsg > 0)
70 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
71 PCI_DMA_BIDIRECTIONAL);
72
73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(sg_page(&chunk->mem[i]),
75 get_order(chunk->mem[i].length));
76 }
77
78 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
79 {
80 int i;
81
82 for (i = 0; i < chunk->npages; ++i) {
83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
84 lowmem_page_address(sg_page(&chunk->mem[i])),
85 sg_dma_address(&chunk->mem[i]));
86 }
87 }
88
89 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
90 {
91 struct mthca_icm_chunk *chunk, *tmp;
92
93 if (!icm)
94 return;
95
96 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
97 if (coherent)
98 mthca_free_icm_coherent(dev, chunk);
99 else
100 mthca_free_icm_pages(dev, chunk);
101
102 kfree(chunk);
103 }
104
105 kfree(icm);
106 }
107
108 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
109 {
110 struct page *page;
111
112 page = alloc_pages(gfp_mask, order);
113 if (!page)
114 return -ENOMEM;
115
116 sg_set_page(mem, page, PAGE_SIZE << order, 0);
117 return 0;
118 }
119
120 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
121 int order, gfp_t gfp_mask)
122 {
123 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
124 gfp_mask);
125 if (!buf)
126 return -ENOMEM;
127
128 sg_set_buf(mem, buf, PAGE_SIZE << order);
129 BUG_ON(mem->offset);
130 sg_dma_len(mem) = PAGE_SIZE << order;
131 return 0;
132 }
133
134 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
135 gfp_t gfp_mask, int coherent)
136 {
137 struct mthca_icm *icm;
138 struct mthca_icm_chunk *chunk = NULL;
139 int cur_order;
140 int ret;
141
142 /* We use sg_set_buf for coherent allocs, which assumes low memory */
143 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
144
145 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
146 if (!icm)
147 return icm;
148
149 icm->refcount = 0;
150 INIT_LIST_HEAD(&icm->chunk_list);
151
152 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
153
154 while (npages > 0) {
155 if (!chunk) {
156 chunk = kmalloc(sizeof *chunk,
157 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
158 if (!chunk)
159 goto fail;
160
161 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
162 chunk->npages = 0;
163 chunk->nsg = 0;
164 list_add_tail(&chunk->list, &icm->chunk_list);
165 }
166
167 while (1 << cur_order > npages)
168 --cur_order;
169
170 if (coherent)
171 ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
172 &chunk->mem[chunk->npages],
173 cur_order, gfp_mask);
174 else
175 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
176 cur_order, gfp_mask);
177
178 if (!ret) {
179 ++chunk->npages;
180
181 if (coherent)
182 ++chunk->nsg;
183 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
184 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
185 chunk->npages,
186 PCI_DMA_BIDIRECTIONAL);
187
188 if (chunk->nsg <= 0)
189 goto fail;
190 }
191
192 if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
193 chunk = NULL;
194
195 npages -= 1 << cur_order;
196 } else {
197 --cur_order;
198 if (cur_order < 0)
199 goto fail;
200 }
201 }
202
203 if (!coherent && chunk) {
204 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
205 chunk->npages,
206 PCI_DMA_BIDIRECTIONAL);
207
208 if (chunk->nsg <= 0)
209 goto fail;
210 }
211
212 return icm;
213
214 fail:
215 mthca_free_icm(dev, icm, coherent);
216 return NULL;
217 }
218
219 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
220 {
221 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
222 int ret = 0;
223 u8 status;
224
225 mutex_lock(&table->mutex);
226
227 if (table->icm[i]) {
228 ++table->icm[i]->refcount;
229 goto out;
230 }
231
232 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
233 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
234 __GFP_NOWARN, table->coherent);
235 if (!table->icm[i]) {
236 ret = -ENOMEM;
237 goto out;
238 }
239
240 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
241 &status) || status) {
242 mthca_free_icm(dev, table->icm[i], table->coherent);
243 table->icm[i] = NULL;
244 ret = -ENOMEM;
245 goto out;
246 }
247
248 ++table->icm[i]->refcount;
249
250 out:
251 mutex_unlock(&table->mutex);
252 return ret;
253 }
254
255 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
256 {
257 int i;
258 u8 status;
259
260 if (!mthca_is_memfree(dev))
261 return;
262
263 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
264
265 mutex_lock(&table->mutex);
266
267 if (--table->icm[i]->refcount == 0) {
268 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
269 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
270 &status);
271 mthca_free_icm(dev, table->icm[i], table->coherent);
272 table->icm[i] = NULL;
273 }
274
275 mutex_unlock(&table->mutex);
276 }
277
278 void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
279 {
280 int idx, offset, dma_offset, i;
281 struct mthca_icm_chunk *chunk;
282 struct mthca_icm *icm;
283 struct page *page = NULL;
284
285 if (!table->lowmem)
286 return NULL;
287
288 mutex_lock(&table->mutex);
289
290 idx = (obj & (table->num_obj - 1)) * table->obj_size;
291 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
292 dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
293
294 if (!icm)
295 goto out;
296
297 list_for_each_entry(chunk, &icm->chunk_list, list) {
298 for (i = 0; i < chunk->npages; ++i) {
299 if (dma_handle && dma_offset >= 0) {
300 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
301 *dma_handle = sg_dma_address(&chunk->mem[i]) +
302 dma_offset;
303 dma_offset -= sg_dma_len(&chunk->mem[i]);
304 }
305 /* DMA mapping can merge pages but not split them,
306 * so if we found the page, dma_handle has already
307 * been assigned to. */
308 if (chunk->mem[i].length > offset) {
309 page = sg_page(&chunk->mem[i]);
310 goto out;
311 }
312 offset -= chunk->mem[i].length;
313 }
314 }
315
316 out:
317 mutex_unlock(&table->mutex);
318 return page ? lowmem_page_address(page) + offset : NULL;
319 }
320
321 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
322 int start, int end)
323 {
324 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
325 int i, err;
326
327 for (i = start; i <= end; i += inc) {
328 err = mthca_table_get(dev, table, i);
329 if (err)
330 goto fail;
331 }
332
333 return 0;
334
335 fail:
336 while (i > start) {
337 i -= inc;
338 mthca_table_put(dev, table, i);
339 }
340
341 return err;
342 }
343
344 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
345 int start, int end)
346 {
347 int i;
348
349 if (!mthca_is_memfree(dev))
350 return;
351
352 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
353 mthca_table_put(dev, table, i);
354 }
355
356 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
357 u64 virt, int obj_size,
358 int nobj, int reserved,
359 int use_lowmem, int use_coherent)
360 {
361 struct mthca_icm_table *table;
362 int obj_per_chunk;
363 int num_icm;
364 unsigned chunk_size;
365 int i;
366 u8 status;
367
368 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
369 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
370
371 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
372 if (!table)
373 return NULL;
374
375 table->virt = virt;
376 table->num_icm = num_icm;
377 table->num_obj = nobj;
378 table->obj_size = obj_size;
379 table->lowmem = use_lowmem;
380 table->coherent = use_coherent;
381 mutex_init(&table->mutex);
382
383 for (i = 0; i < num_icm; ++i)
384 table->icm[i] = NULL;
385
386 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
387 chunk_size = MTHCA_TABLE_CHUNK_SIZE;
388 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
389 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
390
391 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
392 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
393 __GFP_NOWARN, use_coherent);
394 if (!table->icm[i])
395 goto err;
396 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
397 &status) || status) {
398 mthca_free_icm(dev, table->icm[i], table->coherent);
399 table->icm[i] = NULL;
400 goto err;
401 }
402
403 /*
404 * Add a reference to this ICM chunk so that it never
405 * gets freed (since it contains reserved firmware objects).
406 */
407 ++table->icm[i]->refcount;
408 }
409
410 return table;
411
412 err:
413 for (i = 0; i < num_icm; ++i)
414 if (table->icm[i]) {
415 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
416 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
417 &status);
418 mthca_free_icm(dev, table->icm[i], table->coherent);
419 }
420
421 kfree(table);
422
423 return NULL;
424 }
425
426 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
427 {
428 int i;
429 u8 status;
430
431 for (i = 0; i < table->num_icm; ++i)
432 if (table->icm[i]) {
433 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
434 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
435 &status);
436 mthca_free_icm(dev, table->icm[i], table->coherent);
437 }
438
439 kfree(table);
440 }
441
442 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
443 {
444 return dev->uar_table.uarc_base +
445 uar->index * dev->uar_table.uarc_size +
446 page * MTHCA_ICM_PAGE_SIZE;
447 }
448
449 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
450 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
451 {
452 struct page *pages[1];
453 int ret = 0;
454 u8 status;
455 int i;
456
457 if (!mthca_is_memfree(dev))
458 return 0;
459
460 if (index < 0 || index > dev->uar_table.uarc_size / 8)
461 return -EINVAL;
462
463 mutex_lock(&db_tab->mutex);
464
465 i = index / MTHCA_DB_REC_PER_PAGE;
466
467 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
468 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
469 (uaddr & 4095)) {
470 ret = -EINVAL;
471 goto out;
472 }
473
474 if (db_tab->page[i].refcount) {
475 ++db_tab->page[i].refcount;
476 goto out;
477 }
478
479 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
480 pages, NULL);
481 if (ret < 0)
482 goto out;
483
484 sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
485 uaddr & ~PAGE_MASK);
486
487 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
488 if (ret < 0) {
489 put_page(pages[0]);
490 goto out;
491 }
492
493 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
494 mthca_uarc_virt(dev, uar, i), &status);
495 if (!ret && status)
496 ret = -EINVAL;
497 if (ret) {
498 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
499 put_page(sg_page(&db_tab->page[i].mem));
500 goto out;
501 }
502
503 db_tab->page[i].uvirt = uaddr;
504 db_tab->page[i].refcount = 1;
505
506 out:
507 mutex_unlock(&db_tab->mutex);
508 return ret;
509 }
510
511 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
512 struct mthca_user_db_table *db_tab, int index)
513 {
514 if (!mthca_is_memfree(dev))
515 return;
516
517 /*
518 * To make our bookkeeping simpler, we don't unmap DB
519 * pages until we clean up the whole db table.
520 */
521
522 mutex_lock(&db_tab->mutex);
523
524 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
525
526 mutex_unlock(&db_tab->mutex);
527 }
528
529 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
530 {
531 struct mthca_user_db_table *db_tab;
532 int npages;
533 int i;
534
535 if (!mthca_is_memfree(dev))
536 return NULL;
537
538 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
539 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
540 if (!db_tab)
541 return ERR_PTR(-ENOMEM);
542
543 mutex_init(&db_tab->mutex);
544 for (i = 0; i < npages; ++i) {
545 db_tab->page[i].refcount = 0;
546 db_tab->page[i].uvirt = 0;
547 sg_init_table(&db_tab->page[i].mem, 1);
548 }
549
550 return db_tab;
551 }
552
553 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
554 struct mthca_user_db_table *db_tab)
555 {
556 int i;
557 u8 status;
558
559 if (!mthca_is_memfree(dev))
560 return;
561
562 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
563 if (db_tab->page[i].uvirt) {
564 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
565 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
566 put_page(sg_page(&db_tab->page[i].mem));
567 }
568 }
569
570 kfree(db_tab);
571 }
572
573 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
574 u32 qn, __be32 **db)
575 {
576 int group;
577 int start, end, dir;
578 int i, j;
579 struct mthca_db_page *page;
580 int ret = 0;
581 u8 status;
582
583 mutex_lock(&dev->db_tab->mutex);
584
585 switch (type) {
586 case MTHCA_DB_TYPE_CQ_ARM:
587 case MTHCA_DB_TYPE_SQ:
588 group = 0;
589 start = 0;
590 end = dev->db_tab->max_group1;
591 dir = 1;
592 break;
593
594 case MTHCA_DB_TYPE_CQ_SET_CI:
595 case MTHCA_DB_TYPE_RQ:
596 case MTHCA_DB_TYPE_SRQ:
597 group = 1;
598 start = dev->db_tab->npages - 1;
599 end = dev->db_tab->min_group2;
600 dir = -1;
601 break;
602
603 default:
604 ret = -EINVAL;
605 goto out;
606 }
607
608 for (i = start; i != end; i += dir)
609 if (dev->db_tab->page[i].db_rec &&
610 !bitmap_full(dev->db_tab->page[i].used,
611 MTHCA_DB_REC_PER_PAGE)) {
612 page = dev->db_tab->page + i;
613 goto found;
614 }
615
616 for (i = start; i != end; i += dir)
617 if (!dev->db_tab->page[i].db_rec) {
618 page = dev->db_tab->page + i;
619 goto alloc;
620 }
621
622 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
623 ret = -ENOMEM;
624 goto out;
625 }
626
627 if (group == 0)
628 ++dev->db_tab->max_group1;
629 else
630 --dev->db_tab->min_group2;
631
632 page = dev->db_tab->page + end;
633
634 alloc:
635 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
636 &page->mapping, GFP_KERNEL);
637 if (!page->db_rec) {
638 ret = -ENOMEM;
639 goto out;
640 }
641 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
642
643 ret = mthca_MAP_ICM_page(dev, page->mapping,
644 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
645 if (!ret && status)
646 ret = -EINVAL;
647 if (ret) {
648 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
649 page->db_rec, page->mapping);
650 goto out;
651 }
652
653 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
654
655 found:
656 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
657 set_bit(j, page->used);
658
659 if (group == 1)
660 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
661
662 ret = i * MTHCA_DB_REC_PER_PAGE + j;
663
664 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
665
666 *db = (__be32 *) &page->db_rec[j];
667
668 out:
669 mutex_unlock(&dev->db_tab->mutex);
670
671 return ret;
672 }
673
674 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
675 {
676 int i, j;
677 struct mthca_db_page *page;
678 u8 status;
679
680 i = db_index / MTHCA_DB_REC_PER_PAGE;
681 j = db_index % MTHCA_DB_REC_PER_PAGE;
682
683 page = dev->db_tab->page + i;
684
685 mutex_lock(&dev->db_tab->mutex);
686
687 page->db_rec[j] = 0;
688 if (i >= dev->db_tab->min_group2)
689 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
690 clear_bit(j, page->used);
691
692 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
693 i >= dev->db_tab->max_group1 - 1) {
694 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
695
696 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
697 page->db_rec, page->mapping);
698 page->db_rec = NULL;
699
700 if (i == dev->db_tab->max_group1) {
701 --dev->db_tab->max_group1;
702 /* XXX may be able to unmap more pages now */
703 }
704 if (i == dev->db_tab->min_group2)
705 ++dev->db_tab->min_group2;
706 }
707
708 mutex_unlock(&dev->db_tab->mutex);
709 }
710
711 int mthca_init_db_tab(struct mthca_dev *dev)
712 {
713 int i;
714
715 if (!mthca_is_memfree(dev))
716 return 0;
717
718 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
719 if (!dev->db_tab)
720 return -ENOMEM;
721
722 mutex_init(&dev->db_tab->mutex);
723
724 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
725 dev->db_tab->max_group1 = 0;
726 dev->db_tab->min_group2 = dev->db_tab->npages - 1;
727
728 dev->db_tab->page = kmalloc(dev->db_tab->npages *
729 sizeof *dev->db_tab->page,
730 GFP_KERNEL);
731 if (!dev->db_tab->page) {
732 kfree(dev->db_tab);
733 return -ENOMEM;
734 }
735
736 for (i = 0; i < dev->db_tab->npages; ++i)
737 dev->db_tab->page[i].db_rec = NULL;
738
739 return 0;
740 }
741
742 void mthca_cleanup_db_tab(struct mthca_dev *dev)
743 {
744 int i;
745 u8 status;
746
747 if (!mthca_is_memfree(dev))
748 return;
749
750 /*
751 * Because we don't always free our UARC pages when they
752 * become empty to make mthca_free_db() simpler we need to
753 * make a sweep through the doorbell pages and free any
754 * leftover pages now.
755 */
756 for (i = 0; i < dev->db_tab->npages; ++i) {
757 if (!dev->db_tab->page[i].db_rec)
758 continue;
759
760 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
761 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
762
763 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
764
765 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
766 dev->db_tab->page[i].db_rec,
767 dev->db_tab->page[i].mapping);
768 }
769
770 kfree(dev->db_tab->page);
771 kfree(dev->db_tab);
772 }
This page took 0.062069 seconds and 5 git commands to generate.