Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
CommitLineData
1403b1a3
PN
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
25d0479a
JP
33
34#define pr_fmt(fmt) "[TTM] " fmt
35
1403b1a3
PN
36#include <linux/list.h>
37#include <linux/spinlock.h>
38#include <linux/highmem.h>
39#include <linux/mm_types.h>
07458661 40#include <linux/module.h>
1403b1a3 41#include <linux/mm.h>
4cdc840a 42#include <linux/seq_file.h> /* for seq_printf */
2125b8a4 43#include <linux/slab.h>
f9820a46 44#include <linux/dma-mapping.h>
1403b1a3 45
60063497 46#include <linux/atomic.h>
1403b1a3 47
760285e7
DH
48#include <drm/ttm/ttm_bo_driver.h>
49#include <drm/ttm/ttm_page_alloc.h>
1403b1a3 50
d6678651
LT
51#ifdef TTM_HAS_AGP
52#include <asm/agp.h>
53#endif
1403b1a3
PN
54
55#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56#define SMALL_ALLOCATION 16
57#define FREE_ALL_PAGES (~0U)
58/* times are in msecs */
59#define PAGE_FREE_INTERVAL 1000
60
61/**
62 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
63 *
64 * @lock: Protects the shared pool from concurrnet access. Must be used with
65 * irqsave/irqrestore variants because pool allocator maybe called from
66 * delayed work.
67 * @fill_lock: Prevent concurrent calls to fill.
68 * @list: Pool of free uc/wc pages for fast reuse.
69 * @gfp_flags: Flags to pass for alloc_page.
70 * @npages: Number of pages in pool.
71 */
72struct ttm_page_pool {
73 spinlock_t lock;
74 bool fill_lock;
75 struct list_head list;
0e57a3cc 76 gfp_t gfp_flags;
1403b1a3 77 unsigned npages;
07458661
PN
78 char *name;
79 unsigned long nfrees;
80 unsigned long nrefills;
1403b1a3
PN
81};
82
c96af79e
PN
83/**
84 * Limits for the pool. They are handled without locks because only place where
85 * they may change is in sysfs store. They won't have immediate effect anyway
4abe4389 86 * so forcing serialization to access them is pointless.
c96af79e
PN
87 */
88
1403b1a3
PN
89struct ttm_pool_opts {
90 unsigned alloc_size;
91 unsigned max_size;
92 unsigned small;
93};
94
95#define NUM_POOLS 4
96
97/**
98 * struct ttm_pool_manager - Holds memory pools for fst allocation
99 *
100 * Manager is read only object for pool code so it doesn't need locking.
101 *
102 * @free_interval: minimum number of jiffies between freeing pages from pool.
103 * @page_alloc_inited: reference counting for pool allocation.
104 * @work: Work that is used to shrink the pool. Work is only run when there is
105 * some pages to free.
106 * @small_allocation: Limit in number of pages what is small allocation.
107 *
108 * @pools: All pool objects in use.
109 **/
110struct ttm_pool_manager {
c96af79e 111 struct kobject kobj;
1403b1a3 112 struct shrinker mm_shrink;
1403b1a3
PN
113 struct ttm_pool_opts options;
114
115 union {
116 struct ttm_page_pool pools[NUM_POOLS];
117 struct {
118 struct ttm_page_pool wc_pool;
119 struct ttm_page_pool uc_pool;
120 struct ttm_page_pool wc_pool_dma32;
121 struct ttm_page_pool uc_pool_dma32;
122 } ;
123 };
124};
125
c96af79e
PN
126static struct attribute ttm_page_pool_max = {
127 .name = "pool_max_size",
128 .mode = S_IRUGO | S_IWUSR
129};
130static struct attribute ttm_page_pool_small = {
131 .name = "pool_small_allocation",
132 .mode = S_IRUGO | S_IWUSR
133};
134static struct attribute ttm_page_pool_alloc_size = {
135 .name = "pool_allocation_size",
136 .mode = S_IRUGO | S_IWUSR
137};
138
139static struct attribute *ttm_pool_attrs[] = {
140 &ttm_page_pool_max,
141 &ttm_page_pool_small,
142 &ttm_page_pool_alloc_size,
143 NULL
144};
145
146static void ttm_pool_kobj_release(struct kobject *kobj)
147{
148 struct ttm_pool_manager *m =
149 container_of(kobj, struct ttm_pool_manager, kobj);
5870a4d9 150 kfree(m);
c96af79e
PN
151}
152
153static ssize_t ttm_pool_store(struct kobject *kobj,
154 struct attribute *attr, const char *buffer, size_t size)
155{
156 struct ttm_pool_manager *m =
157 container_of(kobj, struct ttm_pool_manager, kobj);
158 int chars;
159 unsigned val;
160 chars = sscanf(buffer, "%u", &val);
161 if (chars == 0)
162 return size;
163
164 /* Convert kb to number of pages */
165 val = val / (PAGE_SIZE >> 10);
166
167 if (attr == &ttm_page_pool_max)
168 m->options.max_size = val;
169 else if (attr == &ttm_page_pool_small)
170 m->options.small = val;
171 else if (attr == &ttm_page_pool_alloc_size) {
172 if (val > NUM_PAGES_TO_ALLOC*8) {
25d0479a 173 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
4abe4389
TH
174 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
c96af79e
PN
176 return size;
177 } else if (val > NUM_PAGES_TO_ALLOC) {
25d0479a
JP
178 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
c96af79e
PN
180 }
181 m->options.alloc_size = val;
182 }
183
184 return size;
185}
186
187static ssize_t ttm_pool_show(struct kobject *kobj,
188 struct attribute *attr, char *buffer)
189{
190 struct ttm_pool_manager *m =
191 container_of(kobj, struct ttm_pool_manager, kobj);
192 unsigned val = 0;
193
194 if (attr == &ttm_page_pool_max)
195 val = m->options.max_size;
196 else if (attr == &ttm_page_pool_small)
197 val = m->options.small;
198 else if (attr == &ttm_page_pool_alloc_size)
199 val = m->options.alloc_size;
200
201 val = val * (PAGE_SIZE >> 10);
202
203 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
204}
205
206static const struct sysfs_ops ttm_pool_sysfs_ops = {
207 .show = &ttm_pool_show,
208 .store = &ttm_pool_store,
209};
210
211static struct kobj_type ttm_pool_kobj_type = {
212 .release = &ttm_pool_kobj_release,
213 .sysfs_ops = &ttm_pool_sysfs_ops,
214 .default_attrs = ttm_pool_attrs,
215};
216
5870a4d9 217static struct ttm_pool_manager *_manager;
1403b1a3 218
975efdb1 219#ifndef CONFIG_X86
1403b1a3
PN
220static int set_pages_array_wb(struct page **pages, int addrinarray)
221{
222#ifdef TTM_HAS_AGP
223 int i;
224
225 for (i = 0; i < addrinarray; i++)
226 unmap_page_from_agp(pages[i]);
227#endif
228 return 0;
229}
230
231static int set_pages_array_wc(struct page **pages, int addrinarray)
232{
233#ifdef TTM_HAS_AGP
234 int i;
235
236 for (i = 0; i < addrinarray; i++)
237 map_page_into_agp(pages[i]);
238#endif
239 return 0;
240}
241
242static int set_pages_array_uc(struct page **pages, int addrinarray)
243{
244#ifdef TTM_HAS_AGP
245 int i;
246
247 for (i = 0; i < addrinarray; i++)
248 map_page_into_agp(pages[i]);
249#endif
250 return 0;
251}
252#endif
253
254/**
255 * Select the right pool or requested caching state and ttm flags. */
256static struct ttm_page_pool *ttm_get_pool(int flags,
257 enum ttm_caching_state cstate)
258{
259 int pool_index;
260
261 if (cstate == tt_cached)
262 return NULL;
263
264 if (cstate == tt_wc)
265 pool_index = 0x0;
266 else
267 pool_index = 0x1;
268
269 if (flags & TTM_PAGE_FLAG_DMA32)
270 pool_index |= 0x2;
271
5870a4d9 272 return &_manager->pools[pool_index];
1403b1a3
PN
273}
274
275/* set memory back to wb and free the pages. */
276static void ttm_pages_put(struct page *pages[], unsigned npages)
277{
278 unsigned i;
279 if (set_pages_array_wb(pages, npages))
25d0479a 280 pr_err("Failed to set %d pages to wb!\n", npages);
1403b1a3
PN
281 for (i = 0; i < npages; ++i)
282 __free_page(pages[i]);
283}
284
285static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
286 unsigned freed_pages)
287{
288 pool->npages -= freed_pages;
07458661 289 pool->nfrees += freed_pages;
1403b1a3
PN
290}
291
292/**
293 * Free pages from pool.
294 *
295 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
296 * number of pages in one go.
297 *
298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool
300 **/
301static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
302{
303 unsigned long irq_flags;
304 struct page *p;
305 struct page **pages_to_free;
306 unsigned freed_pages = 0,
307 npages_to_free = nr_free;
308
309 if (NUM_PAGES_TO_ALLOC < nr_free)
310 npages_to_free = NUM_PAGES_TO_ALLOC;
311
312 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
313 GFP_KERNEL);
314 if (!pages_to_free) {
25d0479a 315 pr_err("Failed to allocate memory for pool free operation\n");
1403b1a3
PN
316 return 0;
317 }
318
319restart:
320 spin_lock_irqsave(&pool->lock, irq_flags);
321
322 list_for_each_entry_reverse(p, &pool->list, lru) {
323 if (freed_pages >= npages_to_free)
324 break;
325
326 pages_to_free[freed_pages++] = p;
327 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
328 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
329 /* remove range of pages from the pool */
330 __list_del(p->lru.prev, &pool->list);
331
332 ttm_pool_update_free_locked(pool, freed_pages);
333 /**
334 * Because changing page caching is costly
335 * we unlock the pool to prevent stalling.
336 */
337 spin_unlock_irqrestore(&pool->lock, irq_flags);
338
339 ttm_pages_put(pages_to_free, freed_pages);
340 if (likely(nr_free != FREE_ALL_PAGES))
341 nr_free -= freed_pages;
342
343 if (NUM_PAGES_TO_ALLOC >= nr_free)
344 npages_to_free = nr_free;
345 else
346 npages_to_free = NUM_PAGES_TO_ALLOC;
347
348 freed_pages = 0;
349
350 /* free all so restart the processing */
351 if (nr_free)
352 goto restart;
353
0d74f86f 354 /* Not allowed to fall through or break because
1403b1a3
PN
355 * following context is inside spinlock while we are
356 * outside here.
357 */
358 goto out;
359
360 }
361 }
362
1403b1a3
PN
363 /* remove range of pages from the pool */
364 if (freed_pages) {
365 __list_del(&p->lru, &pool->list);
366
367 ttm_pool_update_free_locked(pool, freed_pages);
368 nr_free -= freed_pages;
369 }
370
371 spin_unlock_irqrestore(&pool->lock, irq_flags);
372
373 if (freed_pages)
374 ttm_pages_put(pages_to_free, freed_pages);
375out:
376 kfree(pages_to_free);
377 return nr_free;
378}
379
1403b1a3 380/**
4abe4389 381 * Callback for mm to request pool to reduce number of page held.
7dc19d5a
DC
382 *
383 * XXX: (dchinner) Deadlock warning!
384 *
385 * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
386 * this can deadlock when called a sc->gfp_mask that is not equal to
387 * GFP_KERNEL.
388 *
389 * This code is crying out for a shrinker per pool....
1403b1a3 390 */
7dc19d5a
DC
391static unsigned long
392ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1403b1a3
PN
393{
394 static atomic_t start_pool = ATOMIC_INIT(0);
395 unsigned i;
396 unsigned pool_offset = atomic_add_return(1, &start_pool);
397 struct ttm_page_pool *pool;
1495f230 398 int shrink_pages = sc->nr_to_scan;
7dc19d5a 399 unsigned long freed = 0;
1403b1a3
PN
400
401 pool_offset = pool_offset % NUM_POOLS;
402 /* select start pool in round robin fashion */
403 for (i = 0; i < NUM_POOLS; ++i) {
404 unsigned nr_free = shrink_pages;
405 if (shrink_pages == 0)
406 break;
5870a4d9 407 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
1403b1a3 408 shrink_pages = ttm_page_pool_free(pool, nr_free);
7dc19d5a 409 freed += nr_free - shrink_pages;
1403b1a3 410 }
7dc19d5a
DC
411 return freed;
412}
413
414
415static unsigned long
416ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
417{
418 unsigned i;
419 unsigned long count = 0;
420
421 for (i = 0; i < NUM_POOLS; ++i)
422 count += _manager->pools[i].npages;
423
424 return count;
1403b1a3
PN
425}
426
427static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
428{
7dc19d5a
DC
429 manager->mm_shrink.count_objects = ttm_pool_shrink_count;
430 manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
1403b1a3
PN
431 manager->mm_shrink.seeks = 1;
432 register_shrinker(&manager->mm_shrink);
433}
434
435static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
436{
437 unregister_shrinker(&manager->mm_shrink);
438}
439
440static int ttm_set_pages_caching(struct page **pages,
441 enum ttm_caching_state cstate, unsigned cpages)
442{
443 int r = 0;
444 /* Set page caching */
445 switch (cstate) {
446 case tt_uncached:
447 r = set_pages_array_uc(pages, cpages);
448 if (r)
25d0479a 449 pr_err("Failed to set %d pages to uc!\n", cpages);
1403b1a3
PN
450 break;
451 case tt_wc:
452 r = set_pages_array_wc(pages, cpages);
453 if (r)
25d0479a 454 pr_err("Failed to set %d pages to wc!\n", cpages);
1403b1a3
PN
455 break;
456 default:
457 break;
458 }
459 return r;
460}
461
462/**
463 * Free pages the pages that failed to change the caching state. If there is
464 * any pages that have changed their caching state already put them to the
465 * pool.
466 */
467static void ttm_handle_caching_state_failure(struct list_head *pages,
468 int ttm_flags, enum ttm_caching_state cstate,
469 struct page **failed_pages, unsigned cpages)
470{
471 unsigned i;
4abe4389 472 /* Failed pages have to be freed */
1403b1a3
PN
473 for (i = 0; i < cpages; ++i) {
474 list_del(&failed_pages[i]->lru);
475 __free_page(failed_pages[i]);
476 }
477}
478
479/**
480 * Allocate new pages with correct caching.
481 *
482 * This function is reentrant if caller updates count depending on number of
483 * pages returned in pages array.
484 */
0e57a3cc 485static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
1403b1a3
PN
486 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
487{
488 struct page **caching_array;
489 struct page *p;
490 int r = 0;
491 unsigned i, cpages;
492 unsigned max_cpages = min(count,
493 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
494
495 /* allocate array for page caching change */
496 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
497
498 if (!caching_array) {
25d0479a 499 pr_err("Unable to allocate table for new pages\n");
1403b1a3
PN
500 return -ENOMEM;
501 }
502
503 for (i = 0, cpages = 0; i < count; ++i) {
504 p = alloc_page(gfp_flags);
505
506 if (!p) {
25d0479a 507 pr_err("Unable to get page %u\n", i);
1403b1a3
PN
508
509 /* store already allocated pages in the pool after
510 * setting the caching state */
511 if (cpages) {
4abe4389
TH
512 r = ttm_set_pages_caching(caching_array,
513 cstate, cpages);
1403b1a3
PN
514 if (r)
515 ttm_handle_caching_state_failure(pages,
516 ttm_flags, cstate,
517 caching_array, cpages);
518 }
519 r = -ENOMEM;
520 goto out;
521 }
522
523#ifdef CONFIG_HIGHMEM
524 /* gfp flags of highmem page should never be dma32 so we
525 * we should be fine in such case
526 */
527 if (!PageHighMem(p))
528#endif
529 {
530 caching_array[cpages++] = p;
531 if (cpages == max_cpages) {
532
533 r = ttm_set_pages_caching(caching_array,
534 cstate, cpages);
535 if (r) {
536 ttm_handle_caching_state_failure(pages,
537 ttm_flags, cstate,
538 caching_array, cpages);
539 goto out;
540 }
541 cpages = 0;
542 }
543 }
544
545 list_add(&p->lru, pages);
546 }
547
548 if (cpages) {
549 r = ttm_set_pages_caching(caching_array, cstate, cpages);
550 if (r)
551 ttm_handle_caching_state_failure(pages,
552 ttm_flags, cstate,
553 caching_array, cpages);
554 }
555out:
556 kfree(caching_array);
557
558 return r;
559}
560
561/**
0d74f86f 562 * Fill the given pool if there aren't enough pages and the requested number of
1403b1a3
PN
563 * pages is small.
564 */
565static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
566 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
567 unsigned long *irq_flags)
568{
569 struct page *p;
570 int r;
571 unsigned cpages = 0;
572 /**
573 * Only allow one pool fill operation at a time.
574 * If pool doesn't have enough pages for the allocation new pages are
575 * allocated from outside of pool.
576 */
577 if (pool->fill_lock)
578 return;
579
580 pool->fill_lock = true;
581
0d74f86f
KRW
582 /* If allocation request is small and there are not enough
583 * pages in a pool we fill the pool up first. */
5870a4d9 584 if (count < _manager->options.small
1403b1a3
PN
585 && count > pool->npages) {
586 struct list_head new_pages;
5870a4d9 587 unsigned alloc_size = _manager->options.alloc_size;
1403b1a3
PN
588
589 /**
590 * Can't change page caching if in irqsave context. We have to
591 * drop the pool->lock.
592 */
593 spin_unlock_irqrestore(&pool->lock, *irq_flags);
594
595 INIT_LIST_HEAD(&new_pages);
596 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
597 cstate, alloc_size);
598 spin_lock_irqsave(&pool->lock, *irq_flags);
599
600 if (!r) {
601 list_splice(&new_pages, &pool->list);
07458661 602 ++pool->nrefills;
1403b1a3
PN
603 pool->npages += alloc_size;
604 } else {
25d0479a 605 pr_err("Failed to fill pool (%p)\n", pool);
1403b1a3
PN
606 /* If we have any pages left put them to the pool. */
607 list_for_each_entry(p, &pool->list, lru) {
608 ++cpages;
609 }
610 list_splice(&new_pages, &pool->list);
611 pool->npages += cpages;
612 }
613
614 }
615 pool->fill_lock = false;
616}
617
618/**
0d74f86f 619 * Cut 'count' number of pages from the pool and put them on the return list.
1403b1a3 620 *
0d74f86f 621 * @return count of pages still required to fulfill the request.
1403b1a3
PN
622 */
623static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
822c4d9a
JG
624 struct list_head *pages,
625 int ttm_flags,
626 enum ttm_caching_state cstate,
627 unsigned count)
1403b1a3
PN
628{
629 unsigned long irq_flags;
630 struct list_head *p;
631 unsigned i;
632
633 spin_lock_irqsave(&pool->lock, irq_flags);
634 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
635
636 if (count >= pool->npages) {
637 /* take all pages from the pool */
638 list_splice_init(&pool->list, pages);
639 count -= pool->npages;
640 pool->npages = 0;
641 goto out;
642 }
643 /* find the last pages to include for requested number of pages. Split
0d74f86f 644 * pool to begin and halve it to reduce search space. */
1403b1a3
PN
645 if (count <= pool->npages/2) {
646 i = 0;
647 list_for_each(p, &pool->list) {
648 if (++i == count)
649 break;
650 }
651 } else {
652 i = pool->npages + 1;
653 list_for_each_prev(p, &pool->list) {
654 if (--i == count)
655 break;
656 }
657 }
0d74f86f 658 /* Cut 'count' number of pages from the pool */
1403b1a3
PN
659 list_cut_position(pages, &pool->list, p);
660 pool->npages -= count;
661 count = 0;
662out:
663 spin_unlock_irqrestore(&pool->lock, irq_flags);
664 return count;
665}
666
8e7e7052
JG
667/* Put all pages in pages list to correct pool to wait for reuse */
668static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
669 enum ttm_caching_state cstate)
670{
671 unsigned long irq_flags;
672 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
673 unsigned i;
674
675 if (pool == NULL) {
676 /* No pool for this memory type so free the pages */
677 for (i = 0; i < npages; i++) {
678 if (pages[i]) {
679 if (page_count(pages[i]) != 1)
25d0479a 680 pr_err("Erroneous page count. Leaking pages.\n");
8e7e7052
JG
681 __free_page(pages[i]);
682 pages[i] = NULL;
683 }
684 }
685 return;
686 }
687
688 spin_lock_irqsave(&pool->lock, irq_flags);
689 for (i = 0; i < npages; i++) {
690 if (pages[i]) {
691 if (page_count(pages[i]) != 1)
25d0479a 692 pr_err("Erroneous page count. Leaking pages.\n");
8e7e7052
JG
693 list_add_tail(&pages[i]->lru, &pool->list);
694 pages[i] = NULL;
695 pool->npages++;
696 }
697 }
698 /* Check that we don't go over the pool limit */
699 npages = 0;
700 if (pool->npages > _manager->options.max_size) {
701 npages = pool->npages - _manager->options.max_size;
702 /* free at least NUM_PAGES_TO_ALLOC number of pages
703 * to reduce calls to set_memory_wb */
704 if (npages < NUM_PAGES_TO_ALLOC)
705 npages = NUM_PAGES_TO_ALLOC;
706 }
707 spin_unlock_irqrestore(&pool->lock, irq_flags);
708 if (npages)
709 ttm_page_pool_free(pool, npages);
710}
711
1403b1a3
PN
712/*
713 * On success pages list will hold count number of correctly
714 * cached pages.
715 */
8e7e7052
JG
716static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
717 enum ttm_caching_state cstate)
1403b1a3
PN
718{
719 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
822c4d9a 720 struct list_head plist;
1403b1a3 721 struct page *p = NULL;
0e57a3cc 722 gfp_t gfp_flags = GFP_USER;
822c4d9a 723 unsigned count;
1403b1a3
PN
724 int r;
725
726 /* set zero flag for page allocation if required */
727 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
728 gfp_flags |= __GFP_ZERO;
729
730 /* No pool for cached pages */
731 if (pool == NULL) {
732 if (flags & TTM_PAGE_FLAG_DMA32)
733 gfp_flags |= GFP_DMA32;
734 else
e8613c0e 735 gfp_flags |= GFP_HIGHUSER;
1403b1a3 736
822c4d9a 737 for (r = 0; r < npages; ++r) {
d87dfdbf 738 p = alloc_page(gfp_flags);
1403b1a3
PN
739 if (!p) {
740
25d0479a 741 pr_err("Unable to allocate page\n");
1403b1a3
PN
742 return -ENOMEM;
743 }
d87dfdbf 744
822c4d9a 745 pages[r] = p;
1403b1a3
PN
746 }
747 return 0;
748 }
749
1403b1a3
PN
750 /* combine zero flag to pool flags */
751 gfp_flags |= pool->gfp_flags;
752
753 /* First we take pages from the pool */
822c4d9a
JG
754 INIT_LIST_HEAD(&plist);
755 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
756 count = 0;
757 list_for_each_entry(p, &plist, lru) {
758 pages[count++] = p;
759 }
1403b1a3
PN
760
761 /* clear the pages coming from the pool if requested */
762 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
822c4d9a 763 list_for_each_entry(p, &plist, lru) {
ac207ed2
ZY
764 if (PageHighMem(p))
765 clear_highpage(p);
766 else
767 clear_page(page_address(p));
1403b1a3
PN
768 }
769 }
770
771 /* If pool didn't have enough pages allocate new one. */
822c4d9a 772 if (npages > 0) {
1403b1a3
PN
773 /* ttm_alloc_new_pages doesn't reference pool so we can run
774 * multiple requests in parallel.
775 **/
822c4d9a
JG
776 INIT_LIST_HEAD(&plist);
777 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
778 list_for_each_entry(p, &plist, lru) {
779 pages[count++] = p;
780 }
1403b1a3
PN
781 if (r) {
782 /* If there is any pages in the list put them back to
783 * the pool. */
25d0479a 784 pr_err("Failed to allocate extra pages for large request\n");
8e7e7052 785 ttm_put_pages(pages, count, flags, cstate);
1403b1a3
PN
786 return r;
787 }
788 }
789
1403b1a3
PN
790 return 0;
791}
792
3b9c214a 793static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
07458661 794 char *name)
1403b1a3
PN
795{
796 spin_lock_init(&pool->lock);
797 pool->fill_lock = false;
798 INIT_LIST_HEAD(&pool->list);
07458661 799 pool->npages = pool->nfrees = 0;
1403b1a3 800 pool->gfp_flags = flags;
07458661 801 pool->name = name;
1403b1a3
PN
802}
803
c96af79e 804int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1403b1a3 805{
c96af79e 806 int ret;
5870a4d9
FJ
807
808 WARN_ON(_manager);
1403b1a3 809
25d0479a 810 pr_info("Initializing pool allocator\n");
1403b1a3 811
5870a4d9 812 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1403b1a3 813
5870a4d9 814 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
1403b1a3 815
5870a4d9 816 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
1403b1a3 817
5870a4d9
FJ
818 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
819 GFP_USER | GFP_DMA32, "wc dma");
1403b1a3 820
5870a4d9
FJ
821 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
822 GFP_USER | GFP_DMA32, "uc dma");
1403b1a3 823
5870a4d9
FJ
824 _manager->options.max_size = max_pages;
825 _manager->options.small = SMALL_ALLOCATION;
826 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
827
828 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
829 &glob->kobj, "pool");
c96af79e 830 if (unlikely(ret != 0)) {
5870a4d9
FJ
831 kobject_put(&_manager->kobj);
832 _manager = NULL;
c96af79e
PN
833 return ret;
834 }
835
5870a4d9 836 ttm_pool_mm_shrink_init(_manager);
1403b1a3
PN
837
838 return 0;
839}
840
0e57a3cc 841void ttm_page_alloc_fini(void)
1403b1a3
PN
842{
843 int i;
844
25d0479a 845 pr_info("Finalizing pool allocator\n");
5870a4d9 846 ttm_pool_mm_shrink_fini(_manager);
1403b1a3
PN
847
848 for (i = 0; i < NUM_POOLS; ++i)
5870a4d9 849 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
c96af79e 850
5870a4d9
FJ
851 kobject_put(&_manager->kobj);
852 _manager = NULL;
1403b1a3 853}
07458661 854
b1e5f172
JG
855int ttm_pool_populate(struct ttm_tt *ttm)
856{
857 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
858 unsigned i;
859 int ret;
860
861 if (ttm->state != tt_unpopulated)
862 return 0;
863
864 for (i = 0; i < ttm->num_pages; ++i) {
8e7e7052
JG
865 ret = ttm_get_pages(&ttm->pages[i], 1,
866 ttm->page_flags,
867 ttm->caching_state);
b1e5f172
JG
868 if (ret != 0) {
869 ttm_pool_unpopulate(ttm);
870 return -ENOMEM;
871 }
872
873 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
874 false, false);
875 if (unlikely(ret != 0)) {
876 ttm_pool_unpopulate(ttm);
877 return -ENOMEM;
878 }
879 }
880
881 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
882 ret = ttm_tt_swapin(ttm);
883 if (unlikely(ret != 0)) {
884 ttm_pool_unpopulate(ttm);
885 return ret;
886 }
887 }
888
889 ttm->state = tt_unbound;
890 return 0;
891}
892EXPORT_SYMBOL(ttm_pool_populate);
893
894void ttm_pool_unpopulate(struct ttm_tt *ttm)
895{
896 unsigned i;
897
898 for (i = 0; i < ttm->num_pages; ++i) {
899 if (ttm->pages[i]) {
900 ttm_mem_global_free_page(ttm->glob->mem_glob,
901 ttm->pages[i]);
902 ttm_put_pages(&ttm->pages[i], 1,
903 ttm->page_flags,
8e7e7052 904 ttm->caching_state);
b1e5f172
JG
905 }
906 }
907 ttm->state = tt_unpopulated;
908}
909EXPORT_SYMBOL(ttm_pool_unpopulate);
910
07458661
PN
911int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
912{
913 struct ttm_page_pool *p;
914 unsigned i;
915 char *h[] = {"pool", "refills", "pages freed", "size"};
5870a4d9 916 if (!_manager) {
07458661
PN
917 seq_printf(m, "No pool allocator running.\n");
918 return 0;
919 }
920 seq_printf(m, "%6s %12s %13s %8s\n",
921 h[0], h[1], h[2], h[3]);
922 for (i = 0; i < NUM_POOLS; ++i) {
5870a4d9 923 p = &_manager->pools[i];
07458661
PN
924
925 seq_printf(m, "%6s %12ld %13ld %8d\n",
926 p->name, p->nrefills,
927 p->nfrees, p->npages);
928 }
929 return 0;
930}
931EXPORT_SYMBOL(ttm_page_alloc_debugfs);
This page took 0.308922 seconds and 5 git commands to generate.