2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
51 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area
55 #include <linux/bitmap.h>
56 #include <linux/bootmem.h>
57 #include <linux/list.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/percpu.h>
62 #include <linux/pfn.h>
63 #include <linux/rbtree.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
67 #include <asm/cacheflush.h>
68 #include <asm/tlbflush.h>
70 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
71 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
74 struct list_head list
; /* linked to pcpu_slot lists */
75 struct rb_node rb_node
; /* key is chunk->vm->addr */
76 int free_size
; /* free bytes in the chunk */
77 int contig_hint
; /* max contiguous size hint */
78 struct vm_struct
*vm
; /* mapped vmalloc region */
79 int map_used
; /* # of map entries used */
80 int map_alloc
; /* # of map entries allocated */
81 int *map
; /* allocation map */
82 bool immutable
; /* no [de]population allowed */
83 struct page
**page
; /* points to page array */
84 struct page
*page_ar
[]; /* #cpus * UNIT_PAGES */
87 static int pcpu_unit_pages __read_mostly
;
88 static int pcpu_unit_size __read_mostly
;
89 static int pcpu_chunk_size __read_mostly
;
90 static int pcpu_nr_slots __read_mostly
;
91 static size_t pcpu_chunk_struct_size __read_mostly
;
93 /* the address of the first chunk which starts with the kernel static area */
94 void *pcpu_base_addr __read_mostly
;
95 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
97 /* optional reserved chunk, only accessible for reserved allocations */
98 static struct pcpu_chunk
*pcpu_reserved_chunk
;
99 /* offset limit of the reserved chunk */
100 static int pcpu_reserved_chunk_limit
;
103 * One mutex to rule them all.
105 * The following mutex is grabbed in the outermost public alloc/free
106 * interface functions and released only when the operation is
107 * complete. As such, every function in this file other than the
108 * outermost functions are called under pcpu_mutex.
110 * It can easily be switched to use spinlock such that only the area
111 * allocation and page population commit are protected with it doing
112 * actual [de]allocation without holding any lock. However, given
113 * what this allocator does, I think it's better to let them run
116 static DEFINE_MUTEX(pcpu_mutex
);
118 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
119 static struct rb_root pcpu_addr_root
= RB_ROOT
; /* chunks by address */
121 static int __pcpu_size_to_slot(int size
)
123 int highbit
= fls(size
); /* size is in bytes */
124 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
127 static int pcpu_size_to_slot(int size
)
129 if (size
== pcpu_unit_size
)
130 return pcpu_nr_slots
- 1;
131 return __pcpu_size_to_slot(size
);
134 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
136 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
139 return pcpu_size_to_slot(chunk
->free_size
);
142 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
144 return cpu
* pcpu_unit_pages
+ page_idx
;
147 static struct page
**pcpu_chunk_pagep(struct pcpu_chunk
*chunk
,
148 unsigned int cpu
, int page_idx
)
150 return &chunk
->page
[pcpu_page_idx(cpu
, page_idx
)];
153 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
154 unsigned int cpu
, int page_idx
)
156 return (unsigned long)chunk
->vm
->addr
+
157 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
160 static bool pcpu_chunk_page_occupied(struct pcpu_chunk
*chunk
,
163 return *pcpu_chunk_pagep(chunk
, 0, page_idx
) != NULL
;
167 * pcpu_realloc - versatile realloc
168 * @p: the current pointer (can be NULL for new allocations)
169 * @size: the current size in bytes (can be 0 for new allocations)
170 * @new_size: the wanted new size in bytes (can be 0 for free)
172 * More robust realloc which can be used to allocate, resize or free a
173 * memory area of arbitrary size. If the needed size goes over
174 * PAGE_SIZE, kernel VM is used.
177 * The new pointer on success, NULL on failure.
179 static void *pcpu_realloc(void *p
, size_t size
, size_t new_size
)
183 if (new_size
<= PAGE_SIZE
)
184 new = kmalloc(new_size
, GFP_KERNEL
);
186 new = vmalloc(new_size
);
187 if (new_size
&& !new)
190 memcpy(new, p
, min(size
, new_size
));
192 memset(new + size
, 0, new_size
- size
);
194 if (size
<= PAGE_SIZE
)
203 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
204 * @chunk: chunk of interest
205 * @oslot: the previous slot it was on
207 * This function is called after an allocation or free changed @chunk.
208 * New slot according to the changed state is determined and @chunk is
209 * moved to the slot. Note that the reserved chunk is never put on
212 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
214 int nslot
= pcpu_chunk_slot(chunk
);
216 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
218 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
220 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
224 static struct rb_node
**pcpu_chunk_rb_search(void *addr
,
225 struct rb_node
**parentp
)
227 struct rb_node
**p
= &pcpu_addr_root
.rb_node
;
228 struct rb_node
*parent
= NULL
;
229 struct pcpu_chunk
*chunk
;
233 chunk
= rb_entry(parent
, struct pcpu_chunk
, rb_node
);
235 if (addr
< chunk
->vm
->addr
)
237 else if (addr
> chunk
->vm
->addr
)
249 * pcpu_chunk_addr_search - search for chunk containing specified address
250 * @addr: address to search for
252 * Look for chunk which might contain @addr. More specifically, it
253 * searchs for the chunk with the highest start address which isn't
257 * The address of the found chunk.
259 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
261 struct rb_node
*n
, *parent
;
262 struct pcpu_chunk
*chunk
;
264 /* is it in the reserved chunk? */
265 if (pcpu_reserved_chunk
) {
266 void *start
= pcpu_reserved_chunk
->vm
->addr
;
268 if (addr
>= start
&& addr
< start
+ pcpu_reserved_chunk_limit
)
269 return pcpu_reserved_chunk
;
272 /* nah... search the regular ones */
273 n
= *pcpu_chunk_rb_search(addr
, &parent
);
275 /* no exactly matching chunk, the parent is the closest */
279 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
281 if (addr
< chunk
->vm
->addr
) {
282 /* the parent was the next one, look for the previous one */
285 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
292 * pcpu_chunk_addr_insert - insert chunk into address rb tree
293 * @new: chunk to insert
295 * Insert @new into address rb tree.
297 static void pcpu_chunk_addr_insert(struct pcpu_chunk
*new)
299 struct rb_node
**p
, *parent
;
301 p
= pcpu_chunk_rb_search(new->vm
->addr
, &parent
);
303 rb_link_node(&new->rb_node
, parent
, p
);
304 rb_insert_color(&new->rb_node
, &pcpu_addr_root
);
308 * pcpu_split_block - split a map block
309 * @chunk: chunk of interest
310 * @i: index of map block to split
311 * @head: head size in bytes (can be 0)
312 * @tail: tail size in bytes (can be 0)
314 * Split the @i'th map block into two or three blocks. If @head is
315 * non-zero, @head bytes block is inserted before block @i moving it
316 * to @i+1 and reducing its size by @head bytes.
318 * If @tail is non-zero, the target block, which can be @i or @i+1
319 * depending on @head, is reduced by @tail bytes and @tail byte block
320 * is inserted after the target block.
323 * 0 on success, -errno on failure.
325 static int pcpu_split_block(struct pcpu_chunk
*chunk
, int i
, int head
, int tail
)
327 int nr_extra
= !!head
+ !!tail
;
328 int target
= chunk
->map_used
+ nr_extra
;
330 /* reallocation required? */
331 if (chunk
->map_alloc
< target
) {
335 new_alloc
= PCPU_DFL_MAP_ALLOC
;
336 while (new_alloc
< target
)
339 if (chunk
->map_alloc
< PCPU_DFL_MAP_ALLOC
) {
341 * map_alloc smaller than the default size
342 * indicates that the chunk is one of the
343 * first chunks and still using static map.
344 * Allocate a dynamic one and copy.
346 new = pcpu_realloc(NULL
, 0, new_alloc
* sizeof(new[0]));
348 memcpy(new, chunk
->map
,
349 chunk
->map_alloc
* sizeof(new[0]));
351 new = pcpu_realloc(chunk
->map
,
352 chunk
->map_alloc
* sizeof(new[0]),
353 new_alloc
* sizeof(new[0]));
357 chunk
->map_alloc
= new_alloc
;
361 /* insert a new subblock */
362 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
363 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
364 chunk
->map_used
+= nr_extra
;
367 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
368 chunk
->map
[i
++] = head
;
371 chunk
->map
[i
++] -= tail
;
372 chunk
->map
[i
] = tail
;
378 * pcpu_alloc_area - allocate area from a pcpu_chunk
379 * @chunk: chunk of interest
380 * @size: wanted size in bytes
381 * @align: wanted align
383 * Try to allocate @size bytes area aligned at @align from @chunk.
384 * Note that this function only allocates the offset. It doesn't
385 * populate or map the area.
388 * Allocated offset in @chunk on success, -errno on failure.
390 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
392 int oslot
= pcpu_chunk_slot(chunk
);
396 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
397 bool is_last
= i
+ 1 == chunk
->map_used
;
400 /* extra for alignment requirement */
401 head
= ALIGN(off
, align
) - off
;
402 BUG_ON(i
== 0 && head
!= 0);
404 if (chunk
->map
[i
] < 0)
406 if (chunk
->map
[i
] < head
+ size
) {
407 max_contig
= max(chunk
->map
[i
], max_contig
);
412 * If head is small or the previous block is free,
413 * merge'em. Note that 'small' is defined as smaller
414 * than sizeof(int), which is very small but isn't too
415 * uncommon for percpu allocations.
417 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
418 if (chunk
->map
[i
- 1] > 0)
419 chunk
->map
[i
- 1] += head
;
421 chunk
->map
[i
- 1] -= head
;
422 chunk
->free_size
-= head
;
424 chunk
->map
[i
] -= head
;
429 /* if tail is small, just keep it around */
430 tail
= chunk
->map
[i
] - head
- size
;
431 if (tail
< sizeof(int))
434 /* split if warranted */
436 if (pcpu_split_block(chunk
, i
, head
, tail
))
441 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
444 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
447 /* update hint and mark allocated */
449 chunk
->contig_hint
= max_contig
; /* fully scanned */
451 chunk
->contig_hint
= max(chunk
->contig_hint
,
454 chunk
->free_size
-= chunk
->map
[i
];
455 chunk
->map
[i
] = -chunk
->map
[i
];
457 pcpu_chunk_relocate(chunk
, oslot
);
461 chunk
->contig_hint
= max_contig
; /* fully scanned */
462 pcpu_chunk_relocate(chunk
, oslot
);
465 * Tell the upper layer that this chunk has no area left.
466 * Note that this is not an error condition but a notification
467 * to upper layer that it needs to look at other chunks.
468 * -ENOSPC is chosen as it isn't used in memory subsystem and
469 * matches the meaning in a way.
475 * pcpu_free_area - free area to a pcpu_chunk
476 * @chunk: chunk of interest
477 * @freeme: offset of area to free
479 * Free area starting from @freeme to @chunk. Note that this function
480 * only modifies the allocation map. It doesn't depopulate or unmap
483 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
485 int oslot
= pcpu_chunk_slot(chunk
);
488 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
491 BUG_ON(off
!= freeme
);
492 BUG_ON(chunk
->map
[i
] > 0);
494 chunk
->map
[i
] = -chunk
->map
[i
];
495 chunk
->free_size
+= chunk
->map
[i
];
497 /* merge with previous? */
498 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
499 chunk
->map
[i
- 1] += chunk
->map
[i
];
501 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
502 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
505 /* merge with next? */
506 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
507 chunk
->map
[i
] += chunk
->map
[i
+ 1];
509 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
510 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
513 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
514 pcpu_chunk_relocate(chunk
, oslot
);
518 * pcpu_unmap - unmap pages out of a pcpu_chunk
519 * @chunk: chunk of interest
520 * @page_start: page index of the first page to unmap
521 * @page_end: page index of the last page to unmap + 1
522 * @flush: whether to flush cache and tlb or not
524 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
525 * If @flush is true, vcache is flushed before unmapping and tlb
528 static void pcpu_unmap(struct pcpu_chunk
*chunk
, int page_start
, int page_end
,
531 unsigned int last
= num_possible_cpus() - 1;
534 /* unmap must not be done on immutable chunk */
535 WARN_ON(chunk
->immutable
);
538 * Each flushing trial can be very expensive, issue flush on
539 * the whole region at once rather than doing it for each cpu.
540 * This could be an overkill but is more scalable.
543 flush_cache_vunmap(pcpu_chunk_addr(chunk
, 0, page_start
),
544 pcpu_chunk_addr(chunk
, last
, page_end
));
546 for_each_possible_cpu(cpu
)
547 unmap_kernel_range_noflush(
548 pcpu_chunk_addr(chunk
, cpu
, page_start
),
549 (page_end
- page_start
) << PAGE_SHIFT
);
551 /* ditto as flush_cache_vunmap() */
553 flush_tlb_kernel_range(pcpu_chunk_addr(chunk
, 0, page_start
),
554 pcpu_chunk_addr(chunk
, last
, page_end
));
558 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
559 * @chunk: chunk to depopulate
560 * @off: offset to the area to depopulate
561 * @size: size of the area to depopulate in bytes
562 * @flush: whether to flush cache and tlb or not
564 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
565 * from @chunk. If @flush is true, vcache is flushed before unmapping
568 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
571 int page_start
= PFN_DOWN(off
);
572 int page_end
= PFN_UP(off
+ size
);
573 int unmap_start
= -1;
574 int uninitialized_var(unmap_end
);
578 for (i
= page_start
; i
< page_end
; i
++) {
579 for_each_possible_cpu(cpu
) {
580 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
588 * If it's partial depopulation, it might get
589 * populated or depopulated again. Mark the
594 unmap_start
= unmap_start
< 0 ? i
: unmap_start
;
599 if (unmap_start
>= 0)
600 pcpu_unmap(chunk
, unmap_start
, unmap_end
, flush
);
604 * pcpu_map - map pages into a pcpu_chunk
605 * @chunk: chunk of interest
606 * @page_start: page index of the first page to map
607 * @page_end: page index of the last page to map + 1
609 * For each cpu, map pages [@page_start,@page_end) into @chunk.
610 * vcache is flushed afterwards.
612 static int pcpu_map(struct pcpu_chunk
*chunk
, int page_start
, int page_end
)
614 unsigned int last
= num_possible_cpus() - 1;
618 /* map must not be done on immutable chunk */
619 WARN_ON(chunk
->immutable
);
621 for_each_possible_cpu(cpu
) {
622 err
= map_kernel_range_noflush(
623 pcpu_chunk_addr(chunk
, cpu
, page_start
),
624 (page_end
- page_start
) << PAGE_SHIFT
,
626 pcpu_chunk_pagep(chunk
, cpu
, page_start
));
631 /* flush at once, please read comments in pcpu_unmap() */
632 flush_cache_vmap(pcpu_chunk_addr(chunk
, 0, page_start
),
633 pcpu_chunk_addr(chunk
, last
, page_end
));
638 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
639 * @chunk: chunk of interest
640 * @off: offset to the area to populate
641 * @size: size of the area to populate in bytes
643 * For each cpu, populate and map pages [@page_start,@page_end) into
644 * @chunk. The area is cleared on return.
646 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
648 const gfp_t alloc_mask
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
649 int page_start
= PFN_DOWN(off
);
650 int page_end
= PFN_UP(off
+ size
);
652 int uninitialized_var(map_end
);
656 for (i
= page_start
; i
< page_end
; i
++) {
657 if (pcpu_chunk_page_occupied(chunk
, i
)) {
658 if (map_start
>= 0) {
659 if (pcpu_map(chunk
, map_start
, map_end
))
666 map_start
= map_start
< 0 ? i
: map_start
;
669 for_each_possible_cpu(cpu
) {
670 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
672 *pagep
= alloc_pages_node(cpu_to_node(cpu
),
679 if (map_start
>= 0 && pcpu_map(chunk
, map_start
, map_end
))
682 for_each_possible_cpu(cpu
)
683 memset(chunk
->vm
->addr
+ cpu
* pcpu_unit_size
+ off
, 0,
688 /* likely under heavy memory pressure, give memory back */
689 pcpu_depopulate_chunk(chunk
, off
, size
, true);
693 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
698 free_vm_area(chunk
->vm
);
699 pcpu_realloc(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]), 0);
703 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
705 struct pcpu_chunk
*chunk
;
707 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
711 chunk
->map
= pcpu_realloc(NULL
, 0,
712 PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
713 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
714 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
715 chunk
->page
= chunk
->page_ar
;
717 chunk
->vm
= get_vm_area(pcpu_chunk_size
, GFP_KERNEL
);
719 free_pcpu_chunk(chunk
);
723 INIT_LIST_HEAD(&chunk
->list
);
724 chunk
->free_size
= pcpu_unit_size
;
725 chunk
->contig_hint
= pcpu_unit_size
;
731 * pcpu_alloc - the percpu allocator
732 * @size: size of area to allocate in bytes
733 * @align: alignment of area (max PAGE_SIZE)
734 * @reserved: allocate from the reserved chunk if available
736 * Allocate percpu area of @size bytes aligned at @align. Might
737 * sleep. Might trigger writeouts.
740 * Percpu pointer to the allocated area on success, NULL on failure.
742 static void *pcpu_alloc(size_t size
, size_t align
, bool reserved
)
745 struct pcpu_chunk
*chunk
;
748 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
749 WARN(true, "illegal size (%zu) or align (%zu) for "
750 "percpu allocation\n", size
, align
);
754 mutex_lock(&pcpu_mutex
);
756 /* serve reserved allocations from the reserved chunk if available */
757 if (reserved
&& pcpu_reserved_chunk
) {
758 chunk
= pcpu_reserved_chunk
;
759 if (size
> chunk
->contig_hint
)
761 off
= pcpu_alloc_area(chunk
, size
, align
);
767 /* search through normal chunks */
768 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
769 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
770 if (size
> chunk
->contig_hint
)
772 off
= pcpu_alloc_area(chunk
, size
, align
);
780 /* hmmm... no space left, create a new chunk */
781 chunk
= alloc_pcpu_chunk();
784 pcpu_chunk_relocate(chunk
, -1);
785 pcpu_chunk_addr_insert(chunk
);
787 off
= pcpu_alloc_area(chunk
, size
, align
);
792 /* populate, map and clear the area */
793 if (pcpu_populate_chunk(chunk
, off
, size
)) {
794 pcpu_free_area(chunk
, off
);
798 ptr
= __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
800 mutex_unlock(&pcpu_mutex
);
805 * __alloc_percpu - allocate dynamic percpu area
806 * @size: size of area to allocate in bytes
807 * @align: alignment of area (max PAGE_SIZE)
809 * Allocate percpu area of @size bytes aligned at @align. Might
810 * sleep. Might trigger writeouts.
813 * Percpu pointer to the allocated area on success, NULL on failure.
815 void *__alloc_percpu(size_t size
, size_t align
)
817 return pcpu_alloc(size
, align
, false);
819 EXPORT_SYMBOL_GPL(__alloc_percpu
);
822 * __alloc_reserved_percpu - allocate reserved percpu area
823 * @size: size of area to allocate in bytes
824 * @align: alignment of area (max PAGE_SIZE)
826 * Allocate percpu area of @size bytes aligned at @align from reserved
827 * percpu area if arch has set it up; otherwise, allocation is served
828 * from the same dynamic area. Might sleep. Might trigger writeouts.
831 * Percpu pointer to the allocated area on success, NULL on failure.
833 void *__alloc_reserved_percpu(size_t size
, size_t align
)
835 return pcpu_alloc(size
, align
, true);
838 static void pcpu_kill_chunk(struct pcpu_chunk
*chunk
)
840 WARN_ON(chunk
->immutable
);
841 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
, false);
842 list_del(&chunk
->list
);
843 rb_erase(&chunk
->rb_node
, &pcpu_addr_root
);
844 free_pcpu_chunk(chunk
);
848 * free_percpu - free percpu area
849 * @ptr: pointer to area to free
851 * Free percpu area @ptr. Might sleep.
853 void free_percpu(void *ptr
)
855 void *addr
= __pcpu_ptr_to_addr(ptr
);
856 struct pcpu_chunk
*chunk
;
862 mutex_lock(&pcpu_mutex
);
864 chunk
= pcpu_chunk_addr_search(addr
);
865 off
= addr
- chunk
->vm
->addr
;
867 pcpu_free_area(chunk
, off
);
869 /* the chunk became fully free, kill one if there are other free ones */
870 if (chunk
->free_size
== pcpu_unit_size
) {
871 struct pcpu_chunk
*pos
;
873 list_for_each_entry(pos
,
874 &pcpu_slot
[pcpu_chunk_slot(chunk
)], list
)
876 pcpu_kill_chunk(pos
);
881 mutex_unlock(&pcpu_mutex
);
883 EXPORT_SYMBOL_GPL(free_percpu
);
886 * pcpu_setup_first_chunk - initialize the first percpu chunk
887 * @get_page_fn: callback to fetch page pointer
888 * @static_size: the size of static percpu area in bytes
889 * @reserved_size: the size of reserved percpu area in bytes
890 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
891 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
892 * @base_addr: mapped address, NULL for auto
893 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
895 * Initialize the first percpu chunk which contains the kernel static
896 * perpcu area. This function is to be called from arch percpu area
897 * setup path. The first two parameters are mandatory. The rest are
900 * @get_page_fn() should return pointer to percpu page given cpu
901 * number and page number. It should at least return enough pages to
902 * cover the static area. The returned pages for static area should
903 * have been initialized with valid data. If @unit_size is specified,
904 * it can also return pages after the static area. NULL return
905 * indicates end of pages for the cpu. Note that @get_page_fn() must
906 * return the same number of pages for all cpus.
908 * @reserved_size, if non-zero, specifies the amount of bytes to
909 * reserve after the static area in the first chunk. This reserves
910 * the first chunk such that it's available only through reserved
911 * percpu allocation. This is primarily used to serve module percpu
912 * static areas on architectures where the addressing model has
913 * limited offset range for symbol relocations to guarantee module
914 * percpu symbols fall inside the relocatable range.
916 * @unit_size, if non-negative, specifies unit size and must be
917 * aligned to PAGE_SIZE and equal to or larger than @static_size +
918 * @reserved_size + @dyn_size.
920 * @dyn_size, if non-negative, limits the number of bytes available
921 * for dynamic allocation in the first chunk. Specifying non-negative
922 * value make percpu leave alone the area beyond @static_size +
923 * @reserved_size + @dyn_size.
925 * Non-null @base_addr means that the caller already allocated virtual
926 * region for the first chunk and mapped it. percpu must not mess
927 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
928 * @populate_pte_fn doesn't make any sense.
930 * @populate_pte_fn is used to populate the pagetable. NULL means the
931 * caller already populated the pagetable.
933 * If the first chunk ends up with both reserved and dynamic areas, it
934 * is served by two chunks - one to serve the core static and reserved
935 * areas and the other for the dynamic area. They share the same vm
936 * and page map but uses different area allocation map to stay away
937 * from each other. The latter chunk is circulated in the chunk slots
938 * and available for dynamic allocation like any other chunks.
941 * The determined pcpu_unit_size which can be used to initialize
944 size_t __init
pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn
,
945 size_t static_size
, size_t reserved_size
,
946 ssize_t unit_size
, ssize_t dyn_size
,
948 pcpu_populate_pte_fn_t populate_pte_fn
)
950 static struct vm_struct first_vm
;
951 static int smap
[2], dmap
[2];
952 struct pcpu_chunk
*schunk
, *dchunk
= NULL
;
958 BUILD_BUG_ON(ARRAY_SIZE(smap
) >= PCPU_DFL_MAP_ALLOC
||
959 ARRAY_SIZE(dmap
) >= PCPU_DFL_MAP_ALLOC
);
960 BUG_ON(!static_size
);
961 if (unit_size
>= 0) {
962 BUG_ON(unit_size
< static_size
+ reserved_size
+
963 (dyn_size
>= 0 ? dyn_size
: 0));
964 BUG_ON(unit_size
& ~PAGE_MASK
);
966 BUG_ON(dyn_size
>= 0);
969 BUG_ON(base_addr
&& populate_pte_fn
);
972 pcpu_unit_pages
= unit_size
>> PAGE_SHIFT
;
974 pcpu_unit_pages
= max_t(int, PCPU_MIN_UNIT_SIZE
>> PAGE_SHIFT
,
975 PFN_UP(static_size
+ reserved_size
));
977 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
978 pcpu_chunk_size
= num_possible_cpus() * pcpu_unit_size
;
979 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
)
980 + num_possible_cpus() * pcpu_unit_pages
* sizeof(struct page
*);
983 dyn_size
= pcpu_unit_size
- static_size
- reserved_size
;
986 * Allocate chunk slots. The additional last slot is for
989 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
990 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
991 for (i
= 0; i
< pcpu_nr_slots
; i
++)
992 INIT_LIST_HEAD(&pcpu_slot
[i
]);
995 * Initialize static chunk. If reserved_size is zero, the
996 * static chunk covers static area + dynamic allocation area
997 * in the first chunk. If reserved_size is not zero, it
998 * covers static area + reserved area (mostly used for module
999 * static percpu allocation).
1001 schunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1002 INIT_LIST_HEAD(&schunk
->list
);
1003 schunk
->vm
= &first_vm
;
1005 schunk
->map_alloc
= ARRAY_SIZE(smap
);
1006 schunk
->page
= schunk
->page_ar
;
1008 if (reserved_size
) {
1009 schunk
->free_size
= reserved_size
;
1010 pcpu_reserved_chunk
= schunk
; /* not for dynamic alloc */
1012 schunk
->free_size
= dyn_size
;
1013 dyn_size
= 0; /* dynamic area covered */
1015 schunk
->contig_hint
= schunk
->free_size
;
1017 schunk
->map
[schunk
->map_used
++] = -static_size
;
1018 if (schunk
->free_size
)
1019 schunk
->map
[schunk
->map_used
++] = schunk
->free_size
;
1021 pcpu_reserved_chunk_limit
= static_size
+ schunk
->free_size
;
1023 /* init dynamic chunk if necessary */
1025 dchunk
= alloc_bootmem(sizeof(struct pcpu_chunk
));
1026 INIT_LIST_HEAD(&dchunk
->list
);
1027 dchunk
->vm
= &first_vm
;
1029 dchunk
->map_alloc
= ARRAY_SIZE(dmap
);
1030 dchunk
->page
= schunk
->page_ar
; /* share page map with schunk */
1032 dchunk
->contig_hint
= dchunk
->free_size
= dyn_size
;
1033 dchunk
->map
[dchunk
->map_used
++] = -pcpu_reserved_chunk_limit
;
1034 dchunk
->map
[dchunk
->map_used
++] = dchunk
->free_size
;
1037 /* allocate vm address */
1038 first_vm
.flags
= VM_ALLOC
;
1039 first_vm
.size
= pcpu_chunk_size
;
1042 vm_area_register_early(&first_vm
, PAGE_SIZE
);
1045 * Pages already mapped. No need to remap into
1046 * vmalloc area. In this case the first chunks can't
1047 * be mapped or unmapped by percpu and are marked
1050 first_vm
.addr
= base_addr
;
1051 schunk
->immutable
= true;
1053 dchunk
->immutable
= true;
1058 for_each_possible_cpu(cpu
) {
1059 for (i
= 0; i
< pcpu_unit_pages
; i
++) {
1060 struct page
*page
= get_page_fn(cpu
, i
);
1064 *pcpu_chunk_pagep(schunk
, cpu
, i
) = page
;
1067 BUG_ON(i
< PFN_UP(static_size
));
1072 BUG_ON(nr_pages
!= i
);
1076 if (populate_pte_fn
) {
1077 for_each_possible_cpu(cpu
)
1078 for (i
= 0; i
< nr_pages
; i
++)
1079 populate_pte_fn(pcpu_chunk_addr(schunk
,
1082 err
= pcpu_map(schunk
, 0, nr_pages
);
1084 panic("failed to setup static percpu area, err=%d\n",
1088 /* link the first chunk in */
1090 pcpu_chunk_relocate(schunk
, -1);
1091 pcpu_chunk_addr_insert(schunk
);
1093 pcpu_chunk_relocate(dchunk
, -1);
1094 pcpu_chunk_addr_insert(dchunk
);
1098 pcpu_base_addr
= (void *)pcpu_chunk_addr(schunk
, 0, 0);
1099 return pcpu_unit_size
;