2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/scatterlist.h>
39 #include <linux/sched.h>
43 #include "mthca_memfree.h"
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
48 * We allocate in as big chunks as we can, up to a maximum of 256 KB
52 MTHCA_ICM_ALLOC_SIZE
= 1 << 18,
53 MTHCA_TABLE_CHUNK_SIZE
= 1 << 18
56 struct mthca_user_db_table
{
60 struct scatterlist mem
;
65 static void mthca_free_icm_pages(struct mthca_dev
*dev
, struct mthca_icm_chunk
*chunk
)
70 pci_unmap_sg(dev
->pdev
, chunk
->mem
, chunk
->npages
,
71 PCI_DMA_BIDIRECTIONAL
);
73 for (i
= 0; i
< chunk
->npages
; ++i
)
74 __free_pages(sg_page(&chunk
->mem
[i
]),
75 get_order(chunk
->mem
[i
].length
));
78 static void mthca_free_icm_coherent(struct mthca_dev
*dev
, struct mthca_icm_chunk
*chunk
)
82 for (i
= 0; i
< chunk
->npages
; ++i
) {
83 dma_free_coherent(&dev
->pdev
->dev
, chunk
->mem
[i
].length
,
84 lowmem_page_address(sg_page(&chunk
->mem
[i
])),
85 sg_dma_address(&chunk
->mem
[i
]));
89 void mthca_free_icm(struct mthca_dev
*dev
, struct mthca_icm
*icm
, int coherent
)
91 struct mthca_icm_chunk
*chunk
, *tmp
;
96 list_for_each_entry_safe(chunk
, tmp
, &icm
->chunk_list
, list
) {
98 mthca_free_icm_coherent(dev
, chunk
);
100 mthca_free_icm_pages(dev
, chunk
);
108 static int mthca_alloc_icm_pages(struct scatterlist
*mem
, int order
, gfp_t gfp_mask
)
113 * Use __GFP_ZERO because buggy firmware assumes ICM pages are
114 * cleared, and subtle failures are seen if they aren't.
116 page
= alloc_pages(gfp_mask
| __GFP_ZERO
, order
);
120 sg_set_page(mem
, page
, PAGE_SIZE
<< order
, 0);
124 static int mthca_alloc_icm_coherent(struct device
*dev
, struct scatterlist
*mem
,
125 int order
, gfp_t gfp_mask
)
127 void *buf
= dma_alloc_coherent(dev
, PAGE_SIZE
<< order
, &sg_dma_address(mem
),
132 sg_set_buf(mem
, buf
, PAGE_SIZE
<< order
);
134 sg_dma_len(mem
) = PAGE_SIZE
<< order
;
138 struct mthca_icm
*mthca_alloc_icm(struct mthca_dev
*dev
, int npages
,
139 gfp_t gfp_mask
, int coherent
)
141 struct mthca_icm
*icm
;
142 struct mthca_icm_chunk
*chunk
= NULL
;
146 /* We use sg_set_buf for coherent allocs, which assumes low memory */
147 BUG_ON(coherent
&& (gfp_mask
& __GFP_HIGHMEM
));
149 icm
= kmalloc(sizeof *icm
, gfp_mask
& ~(__GFP_HIGHMEM
| __GFP_NOWARN
));
154 INIT_LIST_HEAD(&icm
->chunk_list
);
156 cur_order
= get_order(MTHCA_ICM_ALLOC_SIZE
);
160 chunk
= kmalloc(sizeof *chunk
,
161 gfp_mask
& ~(__GFP_HIGHMEM
| __GFP_NOWARN
));
165 sg_init_table(chunk
->mem
, MTHCA_ICM_CHUNK_LEN
);
168 list_add_tail(&chunk
->list
, &icm
->chunk_list
);
171 while (1 << cur_order
> npages
)
175 ret
= mthca_alloc_icm_coherent(&dev
->pdev
->dev
,
176 &chunk
->mem
[chunk
->npages
],
177 cur_order
, gfp_mask
);
179 ret
= mthca_alloc_icm_pages(&chunk
->mem
[chunk
->npages
],
180 cur_order
, gfp_mask
);
187 else if (chunk
->npages
== MTHCA_ICM_CHUNK_LEN
) {
188 chunk
->nsg
= pci_map_sg(dev
->pdev
, chunk
->mem
,
190 PCI_DMA_BIDIRECTIONAL
);
196 if (chunk
->npages
== MTHCA_ICM_CHUNK_LEN
)
199 npages
-= 1 << cur_order
;
207 if (!coherent
&& chunk
) {
208 chunk
->nsg
= pci_map_sg(dev
->pdev
, chunk
->mem
,
210 PCI_DMA_BIDIRECTIONAL
);
219 mthca_free_icm(dev
, icm
, coherent
);
223 int mthca_table_get(struct mthca_dev
*dev
, struct mthca_icm_table
*table
, int obj
)
225 int i
= (obj
& (table
->num_obj
- 1)) * table
->obj_size
/ MTHCA_TABLE_CHUNK_SIZE
;
229 mutex_lock(&table
->mutex
);
232 ++table
->icm
[i
]->refcount
;
236 table
->icm
[i
] = mthca_alloc_icm(dev
, MTHCA_TABLE_CHUNK_SIZE
>> PAGE_SHIFT
,
237 (table
->lowmem
? GFP_KERNEL
: GFP_HIGHUSER
) |
238 __GFP_NOWARN
, table
->coherent
);
239 if (!table
->icm
[i
]) {
244 if (mthca_MAP_ICM(dev
, table
->icm
[i
], table
->virt
+ i
* MTHCA_TABLE_CHUNK_SIZE
,
245 &status
) || status
) {
246 mthca_free_icm(dev
, table
->icm
[i
], table
->coherent
);
247 table
->icm
[i
] = NULL
;
252 ++table
->icm
[i
]->refcount
;
255 mutex_unlock(&table
->mutex
);
259 void mthca_table_put(struct mthca_dev
*dev
, struct mthca_icm_table
*table
, int obj
)
264 if (!mthca_is_memfree(dev
))
267 i
= (obj
& (table
->num_obj
- 1)) * table
->obj_size
/ MTHCA_TABLE_CHUNK_SIZE
;
269 mutex_lock(&table
->mutex
);
271 if (--table
->icm
[i
]->refcount
== 0) {
272 mthca_UNMAP_ICM(dev
, table
->virt
+ i
* MTHCA_TABLE_CHUNK_SIZE
,
273 MTHCA_TABLE_CHUNK_SIZE
/ MTHCA_ICM_PAGE_SIZE
,
275 mthca_free_icm(dev
, table
->icm
[i
], table
->coherent
);
276 table
->icm
[i
] = NULL
;
279 mutex_unlock(&table
->mutex
);
282 void *mthca_table_find(struct mthca_icm_table
*table
, int obj
, dma_addr_t
*dma_handle
)
284 int idx
, offset
, dma_offset
, i
;
285 struct mthca_icm_chunk
*chunk
;
286 struct mthca_icm
*icm
;
287 struct page
*page
= NULL
;
292 mutex_lock(&table
->mutex
);
294 idx
= (obj
& (table
->num_obj
- 1)) * table
->obj_size
;
295 icm
= table
->icm
[idx
/ MTHCA_TABLE_CHUNK_SIZE
];
296 dma_offset
= offset
= idx
% MTHCA_TABLE_CHUNK_SIZE
;
301 list_for_each_entry(chunk
, &icm
->chunk_list
, list
) {
302 for (i
= 0; i
< chunk
->npages
; ++i
) {
303 if (dma_handle
&& dma_offset
>= 0) {
304 if (sg_dma_len(&chunk
->mem
[i
]) > dma_offset
)
305 *dma_handle
= sg_dma_address(&chunk
->mem
[i
]) +
307 dma_offset
-= sg_dma_len(&chunk
->mem
[i
]);
309 /* DMA mapping can merge pages but not split them,
310 * so if we found the page, dma_handle has already
311 * been assigned to. */
312 if (chunk
->mem
[i
].length
> offset
) {
313 page
= sg_page(&chunk
->mem
[i
]);
316 offset
-= chunk
->mem
[i
].length
;
321 mutex_unlock(&table
->mutex
);
322 return page
? lowmem_page_address(page
) + offset
: NULL
;
325 int mthca_table_get_range(struct mthca_dev
*dev
, struct mthca_icm_table
*table
,
328 int inc
= MTHCA_TABLE_CHUNK_SIZE
/ table
->obj_size
;
331 for (i
= start
; i
<= end
; i
+= inc
) {
332 err
= mthca_table_get(dev
, table
, i
);
342 mthca_table_put(dev
, table
, i
);
348 void mthca_table_put_range(struct mthca_dev
*dev
, struct mthca_icm_table
*table
,
353 if (!mthca_is_memfree(dev
))
356 for (i
= start
; i
<= end
; i
+= MTHCA_TABLE_CHUNK_SIZE
/ table
->obj_size
)
357 mthca_table_put(dev
, table
, i
);
360 struct mthca_icm_table
*mthca_alloc_icm_table(struct mthca_dev
*dev
,
361 u64 virt
, int obj_size
,
362 int nobj
, int reserved
,
363 int use_lowmem
, int use_coherent
)
365 struct mthca_icm_table
*table
;
372 obj_per_chunk
= MTHCA_TABLE_CHUNK_SIZE
/ obj_size
;
373 num_icm
= DIV_ROUND_UP(nobj
, obj_per_chunk
);
375 table
= kmalloc(sizeof *table
+ num_icm
* sizeof *table
->icm
, GFP_KERNEL
);
380 table
->num_icm
= num_icm
;
381 table
->num_obj
= nobj
;
382 table
->obj_size
= obj_size
;
383 table
->lowmem
= use_lowmem
;
384 table
->coherent
= use_coherent
;
385 mutex_init(&table
->mutex
);
387 for (i
= 0; i
< num_icm
; ++i
)
388 table
->icm
[i
] = NULL
;
390 for (i
= 0; i
* MTHCA_TABLE_CHUNK_SIZE
< reserved
* obj_size
; ++i
) {
391 chunk_size
= MTHCA_TABLE_CHUNK_SIZE
;
392 if ((i
+ 1) * MTHCA_TABLE_CHUNK_SIZE
> nobj
* obj_size
)
393 chunk_size
= nobj
* obj_size
- i
* MTHCA_TABLE_CHUNK_SIZE
;
395 table
->icm
[i
] = mthca_alloc_icm(dev
, chunk_size
>> PAGE_SHIFT
,
396 (use_lowmem
? GFP_KERNEL
: GFP_HIGHUSER
) |
397 __GFP_NOWARN
, use_coherent
);
400 if (mthca_MAP_ICM(dev
, table
->icm
[i
], virt
+ i
* MTHCA_TABLE_CHUNK_SIZE
,
401 &status
) || status
) {
402 mthca_free_icm(dev
, table
->icm
[i
], table
->coherent
);
403 table
->icm
[i
] = NULL
;
408 * Add a reference to this ICM chunk so that it never
409 * gets freed (since it contains reserved firmware objects).
411 ++table
->icm
[i
]->refcount
;
417 for (i
= 0; i
< num_icm
; ++i
)
419 mthca_UNMAP_ICM(dev
, virt
+ i
* MTHCA_TABLE_CHUNK_SIZE
,
420 MTHCA_TABLE_CHUNK_SIZE
/ MTHCA_ICM_PAGE_SIZE
,
422 mthca_free_icm(dev
, table
->icm
[i
], table
->coherent
);
430 void mthca_free_icm_table(struct mthca_dev
*dev
, struct mthca_icm_table
*table
)
435 for (i
= 0; i
< table
->num_icm
; ++i
)
437 mthca_UNMAP_ICM(dev
, table
->virt
+ i
* MTHCA_TABLE_CHUNK_SIZE
,
438 MTHCA_TABLE_CHUNK_SIZE
/ MTHCA_ICM_PAGE_SIZE
,
440 mthca_free_icm(dev
, table
->icm
[i
], table
->coherent
);
446 static u64
mthca_uarc_virt(struct mthca_dev
*dev
, struct mthca_uar
*uar
, int page
)
448 return dev
->uar_table
.uarc_base
+
449 uar
->index
* dev
->uar_table
.uarc_size
+
450 page
* MTHCA_ICM_PAGE_SIZE
;
453 int mthca_map_user_db(struct mthca_dev
*dev
, struct mthca_uar
*uar
,
454 struct mthca_user_db_table
*db_tab
, int index
, u64 uaddr
)
456 struct page
*pages
[1];
461 if (!mthca_is_memfree(dev
))
464 if (index
< 0 || index
> dev
->uar_table
.uarc_size
/ 8)
467 mutex_lock(&db_tab
->mutex
);
469 i
= index
/ MTHCA_DB_REC_PER_PAGE
;
471 if ((db_tab
->page
[i
].refcount
>= MTHCA_DB_REC_PER_PAGE
) ||
472 (db_tab
->page
[i
].uvirt
&& db_tab
->page
[i
].uvirt
!= uaddr
) ||
478 if (db_tab
->page
[i
].refcount
) {
479 ++db_tab
->page
[i
].refcount
;
483 ret
= get_user_pages(current
, current
->mm
, uaddr
& PAGE_MASK
, 1, 1, 0,
488 sg_set_page(&db_tab
->page
[i
].mem
, pages
[0], MTHCA_ICM_PAGE_SIZE
,
491 ret
= pci_map_sg(dev
->pdev
, &db_tab
->page
[i
].mem
, 1, PCI_DMA_TODEVICE
);
497 ret
= mthca_MAP_ICM_page(dev
, sg_dma_address(&db_tab
->page
[i
].mem
),
498 mthca_uarc_virt(dev
, uar
, i
), &status
);
502 pci_unmap_sg(dev
->pdev
, &db_tab
->page
[i
].mem
, 1, PCI_DMA_TODEVICE
);
503 put_page(sg_page(&db_tab
->page
[i
].mem
));
507 db_tab
->page
[i
].uvirt
= uaddr
;
508 db_tab
->page
[i
].refcount
= 1;
511 mutex_unlock(&db_tab
->mutex
);
515 void mthca_unmap_user_db(struct mthca_dev
*dev
, struct mthca_uar
*uar
,
516 struct mthca_user_db_table
*db_tab
, int index
)
518 if (!mthca_is_memfree(dev
))
522 * To make our bookkeeping simpler, we don't unmap DB
523 * pages until we clean up the whole db table.
526 mutex_lock(&db_tab
->mutex
);
528 --db_tab
->page
[index
/ MTHCA_DB_REC_PER_PAGE
].refcount
;
530 mutex_unlock(&db_tab
->mutex
);
533 struct mthca_user_db_table
*mthca_init_user_db_tab(struct mthca_dev
*dev
)
535 struct mthca_user_db_table
*db_tab
;
539 if (!mthca_is_memfree(dev
))
542 npages
= dev
->uar_table
.uarc_size
/ MTHCA_ICM_PAGE_SIZE
;
543 db_tab
= kmalloc(sizeof *db_tab
+ npages
* sizeof *db_tab
->page
, GFP_KERNEL
);
545 return ERR_PTR(-ENOMEM
);
547 mutex_init(&db_tab
->mutex
);
548 for (i
= 0; i
< npages
; ++i
) {
549 db_tab
->page
[i
].refcount
= 0;
550 db_tab
->page
[i
].uvirt
= 0;
551 sg_init_table(&db_tab
->page
[i
].mem
, 1);
557 void mthca_cleanup_user_db_tab(struct mthca_dev
*dev
, struct mthca_uar
*uar
,
558 struct mthca_user_db_table
*db_tab
)
563 if (!mthca_is_memfree(dev
))
566 for (i
= 0; i
< dev
->uar_table
.uarc_size
/ MTHCA_ICM_PAGE_SIZE
; ++i
) {
567 if (db_tab
->page
[i
].uvirt
) {
568 mthca_UNMAP_ICM(dev
, mthca_uarc_virt(dev
, uar
, i
), 1, &status
);
569 pci_unmap_sg(dev
->pdev
, &db_tab
->page
[i
].mem
, 1, PCI_DMA_TODEVICE
);
570 put_page(sg_page(&db_tab
->page
[i
].mem
));
577 int mthca_alloc_db(struct mthca_dev
*dev
, enum mthca_db_type type
,
583 struct mthca_db_page
*page
;
587 mutex_lock(&dev
->db_tab
->mutex
);
590 case MTHCA_DB_TYPE_CQ_ARM
:
591 case MTHCA_DB_TYPE_SQ
:
594 end
= dev
->db_tab
->max_group1
;
598 case MTHCA_DB_TYPE_CQ_SET_CI
:
599 case MTHCA_DB_TYPE_RQ
:
600 case MTHCA_DB_TYPE_SRQ
:
602 start
= dev
->db_tab
->npages
- 1;
603 end
= dev
->db_tab
->min_group2
;
612 for (i
= start
; i
!= end
; i
+= dir
)
613 if (dev
->db_tab
->page
[i
].db_rec
&&
614 !bitmap_full(dev
->db_tab
->page
[i
].used
,
615 MTHCA_DB_REC_PER_PAGE
)) {
616 page
= dev
->db_tab
->page
+ i
;
620 for (i
= start
; i
!= end
; i
+= dir
)
621 if (!dev
->db_tab
->page
[i
].db_rec
) {
622 page
= dev
->db_tab
->page
+ i
;
626 if (dev
->db_tab
->max_group1
>= dev
->db_tab
->min_group2
- 1) {
632 ++dev
->db_tab
->max_group1
;
634 --dev
->db_tab
->min_group2
;
636 page
= dev
->db_tab
->page
+ end
;
639 page
->db_rec
= dma_alloc_coherent(&dev
->pdev
->dev
, MTHCA_ICM_PAGE_SIZE
,
640 &page
->mapping
, GFP_KERNEL
);
645 memset(page
->db_rec
, 0, MTHCA_ICM_PAGE_SIZE
);
647 ret
= mthca_MAP_ICM_page(dev
, page
->mapping
,
648 mthca_uarc_virt(dev
, &dev
->driver_uar
, i
), &status
);
652 dma_free_coherent(&dev
->pdev
->dev
, MTHCA_ICM_PAGE_SIZE
,
653 page
->db_rec
, page
->mapping
);
657 bitmap_zero(page
->used
, MTHCA_DB_REC_PER_PAGE
);
660 j
= find_first_zero_bit(page
->used
, MTHCA_DB_REC_PER_PAGE
);
661 set_bit(j
, page
->used
);
664 j
= MTHCA_DB_REC_PER_PAGE
- 1 - j
;
666 ret
= i
* MTHCA_DB_REC_PER_PAGE
+ j
;
668 page
->db_rec
[j
] = cpu_to_be64((qn
<< 8) | (type
<< 5));
670 *db
= (__be32
*) &page
->db_rec
[j
];
673 mutex_unlock(&dev
->db_tab
->mutex
);
678 void mthca_free_db(struct mthca_dev
*dev
, int type
, int db_index
)
681 struct mthca_db_page
*page
;
684 i
= db_index
/ MTHCA_DB_REC_PER_PAGE
;
685 j
= db_index
% MTHCA_DB_REC_PER_PAGE
;
687 page
= dev
->db_tab
->page
+ i
;
689 mutex_lock(&dev
->db_tab
->mutex
);
692 if (i
>= dev
->db_tab
->min_group2
)
693 j
= MTHCA_DB_REC_PER_PAGE
- 1 - j
;
694 clear_bit(j
, page
->used
);
696 if (bitmap_empty(page
->used
, MTHCA_DB_REC_PER_PAGE
) &&
697 i
>= dev
->db_tab
->max_group1
- 1) {
698 mthca_UNMAP_ICM(dev
, mthca_uarc_virt(dev
, &dev
->driver_uar
, i
), 1, &status
);
700 dma_free_coherent(&dev
->pdev
->dev
, MTHCA_ICM_PAGE_SIZE
,
701 page
->db_rec
, page
->mapping
);
704 if (i
== dev
->db_tab
->max_group1
) {
705 --dev
->db_tab
->max_group1
;
706 /* XXX may be able to unmap more pages now */
708 if (i
== dev
->db_tab
->min_group2
)
709 ++dev
->db_tab
->min_group2
;
712 mutex_unlock(&dev
->db_tab
->mutex
);
715 int mthca_init_db_tab(struct mthca_dev
*dev
)
719 if (!mthca_is_memfree(dev
))
722 dev
->db_tab
= kmalloc(sizeof *dev
->db_tab
, GFP_KERNEL
);
726 mutex_init(&dev
->db_tab
->mutex
);
728 dev
->db_tab
->npages
= dev
->uar_table
.uarc_size
/ MTHCA_ICM_PAGE_SIZE
;
729 dev
->db_tab
->max_group1
= 0;
730 dev
->db_tab
->min_group2
= dev
->db_tab
->npages
- 1;
732 dev
->db_tab
->page
= kmalloc(dev
->db_tab
->npages
*
733 sizeof *dev
->db_tab
->page
,
735 if (!dev
->db_tab
->page
) {
740 for (i
= 0; i
< dev
->db_tab
->npages
; ++i
)
741 dev
->db_tab
->page
[i
].db_rec
= NULL
;
746 void mthca_cleanup_db_tab(struct mthca_dev
*dev
)
751 if (!mthca_is_memfree(dev
))
755 * Because we don't always free our UARC pages when they
756 * become empty to make mthca_free_db() simpler we need to
757 * make a sweep through the doorbell pages and free any
758 * leftover pages now.
760 for (i
= 0; i
< dev
->db_tab
->npages
; ++i
) {
761 if (!dev
->db_tab
->page
[i
].db_rec
)
764 if (!bitmap_empty(dev
->db_tab
->page
[i
].used
, MTHCA_DB_REC_PER_PAGE
))
765 mthca_warn(dev
, "Kernel UARC page %d not empty\n", i
);
767 mthca_UNMAP_ICM(dev
, mthca_uarc_virt(dev
, &dev
->driver_uar
, i
), 1, &status
);
769 dma_free_coherent(&dev
->pdev
->dev
, MTHCA_ICM_PAGE_SIZE
,
770 dev
->db_tab
->page
[i
].db_rec
,
771 dev
->db_tab
->page
[i
].mapping
);
774 kfree(dev
->db_tab
->page
);
This page took 0.069492 seconds and 5 git commands to generate.