2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
56 #include <linux/bitmap.h>
57 #include <linux/bootmem.h>
58 #include <linux/list.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/percpu.h>
63 #include <linux/pfn.h>
64 #include <linux/rbtree.h>
65 #include <linux/slab.h>
66 #include <linux/spinlock.h>
67 #include <linux/vmalloc.h>
68 #include <linux/workqueue.h>
70 #include <asm/cacheflush.h>
71 #include <asm/sections.h>
72 #include <asm/tlbflush.h>
74 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
75 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
77 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
78 #ifndef __addr_to_pcpu_ptr
79 #define __addr_to_pcpu_ptr(addr) \
80 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
81 + (unsigned long)__per_cpu_start)
83 #ifndef __pcpu_ptr_to_addr
84 #define __pcpu_ptr_to_addr(ptr) \
85 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
86 - (unsigned long)__per_cpu_start)
90 struct list_head list
; /* linked to pcpu_slot lists */
91 struct rb_node rb_node
; /* key is chunk->vm->addr */
92 int free_size
; /* free bytes in the chunk */
93 int contig_hint
; /* max contiguous size hint */
94 struct vm_struct
*vm
; /* mapped vmalloc region */
95 int map_used
; /* # of map entries used */
96 int map_alloc
; /* # of map entries allocated */
97 int *map
; /* allocation map */
98 bool immutable
; /* no [de]population allowed */
99 struct page
**page
; /* points to page array */
100 struct page
*page_ar
[]; /* #cpus * UNIT_PAGES */
103 static int pcpu_unit_pages __read_mostly
;
104 static int pcpu_unit_size __read_mostly
;
105 static int pcpu_chunk_size __read_mostly
;
106 static int pcpu_nr_slots __read_mostly
;
107 static size_t pcpu_chunk_struct_size __read_mostly
;
109 /* the address of the first chunk which starts with the kernel static area */
110 void *pcpu_base_addr __read_mostly
;
111 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
114 * The first chunk which always exists. Note that unlike other
115 * chunks, this one can be allocated and mapped in several different
116 * ways and thus often doesn't live in the vmalloc area.
118 static struct pcpu_chunk
*pcpu_first_chunk
;
121 * Optional reserved chunk. This chunk reserves part of the first
122 * chunk and serves it for reserved allocations. The amount of
123 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
124 * area doesn't exist, the following variables contain NULL and 0
127 static struct pcpu_chunk
*pcpu_reserved_chunk
;
128 static int pcpu_reserved_chunk_limit
;
131 * Synchronization rules.
133 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
134 * protects allocation/reclaim paths, chunks and chunk->page arrays.
135 * The latter is a spinlock and protects the index data structures -
136 * chunk slots, rbtree, chunks and area maps in chunks.
138 * During allocation, pcpu_alloc_mutex is kept locked all the time and
139 * pcpu_lock is grabbed and released as necessary. All actual memory
140 * allocations are done using GFP_KERNEL with pcpu_lock released.
142 * Free path accesses and alters only the index data structures, so it
143 * can be safely called from atomic context. When memory needs to be
144 * returned to the system, free path schedules reclaim_work which
145 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
146 * reclaimed, release both locks and frees the chunks. Note that it's
147 * necessary to grab both locks to remove a chunk from circulation as
148 * allocation path might be referencing the chunk with only
149 * pcpu_alloc_mutex locked.
151 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* protects whole alloc and reclaim */
152 static DEFINE_SPINLOCK(pcpu_lock
); /* protects index data structures */
154 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
155 static struct rb_root pcpu_addr_root
= RB_ROOT
; /* chunks by address */
157 /* reclaim work to release fully free chunks, scheduled from free path */
158 static void pcpu_reclaim(struct work_struct
*work
);
159 static DECLARE_WORK(pcpu_reclaim_work
, pcpu_reclaim
);
161 static int __pcpu_size_to_slot(int size
)
163 int highbit
= fls(size
); /* size is in bytes */
164 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
167 static int pcpu_size_to_slot(int size
)
169 if (size
== pcpu_unit_size
)
170 return pcpu_nr_slots
- 1;
171 return __pcpu_size_to_slot(size
);
174 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
176 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
179 return pcpu_size_to_slot(chunk
->free_size
);
182 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
184 return cpu
* pcpu_unit_pages
+ page_idx
;
187 static struct page
**pcpu_chunk_pagep(struct pcpu_chunk
*chunk
,
188 unsigned int cpu
, int page_idx
)
190 return &chunk
->page
[pcpu_page_idx(cpu
, page_idx
)];
193 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
194 unsigned int cpu
, int page_idx
)
196 return (unsigned long)chunk
->vm
->addr
+
197 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
200 static bool pcpu_chunk_page_occupied(struct pcpu_chunk
*chunk
,
203 return *pcpu_chunk_pagep(chunk
, 0, page_idx
) != NULL
;
207 * pcpu_mem_alloc - allocate memory
208 * @size: bytes to allocate
210 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
211 * kzalloc() is used; otherwise, vmalloc() is used. The returned
212 * memory is always zeroed.
215 * Does GFP_KERNEL allocation.
218 * Pointer to the allocated area on success, NULL on failure.
220 static void *pcpu_mem_alloc(size_t size
)
222 if (size
<= PAGE_SIZE
)
223 return kzalloc(size
, GFP_KERNEL
);
225 void *ptr
= vmalloc(size
);
227 memset(ptr
, 0, size
);
233 * pcpu_mem_free - free memory
234 * @ptr: memory to free
235 * @size: size of the area
237 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
239 static void pcpu_mem_free(void *ptr
, size_t size
)
241 if (size
<= PAGE_SIZE
)
248 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
249 * @chunk: chunk of interest
250 * @oslot: the previous slot it was on
252 * This function is called after an allocation or free changed @chunk.
253 * New slot according to the changed state is determined and @chunk is
254 * moved to the slot. Note that the reserved chunk is never put on
260 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
262 int nslot
= pcpu_chunk_slot(chunk
);
264 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
266 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
268 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
272 static struct rb_node
**pcpu_chunk_rb_search(void *addr
,
273 struct rb_node
**parentp
)
275 struct rb_node
**p
= &pcpu_addr_root
.rb_node
;
276 struct rb_node
*parent
= NULL
;
277 struct pcpu_chunk
*chunk
;
281 chunk
= rb_entry(parent
, struct pcpu_chunk
, rb_node
);
283 if (addr
< chunk
->vm
->addr
)
285 else if (addr
> chunk
->vm
->addr
)
297 * pcpu_chunk_addr_search - search for chunk containing specified address
298 * @addr: address to search for
300 * Look for chunk which might contain @addr. More specifically, it
301 * searchs for the chunk with the highest start address which isn't
308 * The address of the found chunk.
310 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
312 void *first_start
= pcpu_first_chunk
->vm
->addr
;
313 struct rb_node
*n
, *parent
;
314 struct pcpu_chunk
*chunk
;
316 /* is it in the first chunk? */
317 if (addr
>= first_start
&& addr
< first_start
+ pcpu_chunk_size
) {
318 /* is it in the reserved area? */
319 if (addr
< first_start
+ pcpu_reserved_chunk_limit
)
320 return pcpu_reserved_chunk
;
321 return pcpu_first_chunk
;
324 /* nah... search the regular ones */
325 n
= *pcpu_chunk_rb_search(addr
, &parent
);
327 /* no exactly matching chunk, the parent is the closest */
331 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
333 if (addr
< chunk
->vm
->addr
) {
334 /* the parent was the next one, look for the previous one */
337 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
344 * pcpu_chunk_addr_insert - insert chunk into address rb tree
345 * @new: chunk to insert
347 * Insert @new into address rb tree.
352 static void pcpu_chunk_addr_insert(struct pcpu_chunk
*new)
354 struct rb_node
**p
, *parent
;
356 p
= pcpu_chunk_rb_search(new->vm
->addr
, &parent
);
358 rb_link_node(&new->rb_node
, parent
, p
);
359 rb_insert_color(&new->rb_node
, &pcpu_addr_root
);
363 * pcpu_extend_area_map - extend area map for allocation
364 * @chunk: target chunk
366 * Extend area map of @chunk so that it can accomodate an allocation.
367 * A single allocation can split an area into three areas, so this
368 * function makes sure that @chunk->map has at least two extra slots.
371 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
372 * if area map is extended.
375 * 0 if noop, 1 if successfully extended, -errno on failure.
377 static int pcpu_extend_area_map(struct pcpu_chunk
*chunk
)
384 if (chunk
->map_alloc
>= chunk
->map_used
+ 2)
387 spin_unlock_irq(&pcpu_lock
);
389 new_alloc
= PCPU_DFL_MAP_ALLOC
;
390 while (new_alloc
< chunk
->map_used
+ 2)
393 new = pcpu_mem_alloc(new_alloc
* sizeof(new[0]));
395 spin_lock_irq(&pcpu_lock
);
400 * Acquire pcpu_lock and switch to new area map. Only free
401 * could have happened inbetween, so map_used couldn't have
404 spin_lock_irq(&pcpu_lock
);
405 BUG_ON(new_alloc
< chunk
->map_used
+ 2);
407 size
= chunk
->map_alloc
* sizeof(chunk
->map
[0]);
408 memcpy(new, chunk
->map
, size
);
411 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
412 * one of the first chunks and still using static map.
414 if (chunk
->map_alloc
>= PCPU_DFL_MAP_ALLOC
)
415 pcpu_mem_free(chunk
->map
, size
);
417 chunk
->map_alloc
= new_alloc
;
423 * pcpu_split_block - split a map block
424 * @chunk: chunk of interest
425 * @i: index of map block to split
426 * @head: head size in bytes (can be 0)
427 * @tail: tail size in bytes (can be 0)
429 * Split the @i'th map block into two or three blocks. If @head is
430 * non-zero, @head bytes block is inserted before block @i moving it
431 * to @i+1 and reducing its size by @head bytes.
433 * If @tail is non-zero, the target block, which can be @i or @i+1
434 * depending on @head, is reduced by @tail bytes and @tail byte block
435 * is inserted after the target block.
437 * @chunk->map must have enough free slots to accomodate the split.
442 static void pcpu_split_block(struct pcpu_chunk
*chunk
, int i
,
445 int nr_extra
= !!head
+ !!tail
;
447 BUG_ON(chunk
->map_alloc
< chunk
->map_used
+ nr_extra
);
449 /* insert new subblocks */
450 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
451 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
452 chunk
->map_used
+= nr_extra
;
455 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
456 chunk
->map
[i
++] = head
;
459 chunk
->map
[i
++] -= tail
;
460 chunk
->map
[i
] = tail
;
465 * pcpu_alloc_area - allocate area from a pcpu_chunk
466 * @chunk: chunk of interest
467 * @size: wanted size in bytes
468 * @align: wanted align
470 * Try to allocate @size bytes area aligned at @align from @chunk.
471 * Note that this function only allocates the offset. It doesn't
472 * populate or map the area.
474 * @chunk->map must have at least two free slots.
480 * Allocated offset in @chunk on success, -1 if no matching area is
483 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
485 int oslot
= pcpu_chunk_slot(chunk
);
489 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
490 bool is_last
= i
+ 1 == chunk
->map_used
;
493 /* extra for alignment requirement */
494 head
= ALIGN(off
, align
) - off
;
495 BUG_ON(i
== 0 && head
!= 0);
497 if (chunk
->map
[i
] < 0)
499 if (chunk
->map
[i
] < head
+ size
) {
500 max_contig
= max(chunk
->map
[i
], max_contig
);
505 * If head is small or the previous block is free,
506 * merge'em. Note that 'small' is defined as smaller
507 * than sizeof(int), which is very small but isn't too
508 * uncommon for percpu allocations.
510 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
511 if (chunk
->map
[i
- 1] > 0)
512 chunk
->map
[i
- 1] += head
;
514 chunk
->map
[i
- 1] -= head
;
515 chunk
->free_size
-= head
;
517 chunk
->map
[i
] -= head
;
522 /* if tail is small, just keep it around */
523 tail
= chunk
->map
[i
] - head
- size
;
524 if (tail
< sizeof(int))
527 /* split if warranted */
529 pcpu_split_block(chunk
, i
, head
, tail
);
533 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
536 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
539 /* update hint and mark allocated */
541 chunk
->contig_hint
= max_contig
; /* fully scanned */
543 chunk
->contig_hint
= max(chunk
->contig_hint
,
546 chunk
->free_size
-= chunk
->map
[i
];
547 chunk
->map
[i
] = -chunk
->map
[i
];
549 pcpu_chunk_relocate(chunk
, oslot
);
553 chunk
->contig_hint
= max_contig
; /* fully scanned */
554 pcpu_chunk_relocate(chunk
, oslot
);
556 /* tell the upper layer that this chunk has no matching area */
561 * pcpu_free_area - free area to a pcpu_chunk
562 * @chunk: chunk of interest
563 * @freeme: offset of area to free
565 * Free area starting from @freeme to @chunk. Note that this function
566 * only modifies the allocation map. It doesn't depopulate or unmap
572 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
574 int oslot
= pcpu_chunk_slot(chunk
);
577 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
580 BUG_ON(off
!= freeme
);
581 BUG_ON(chunk
->map
[i
] > 0);
583 chunk
->map
[i
] = -chunk
->map
[i
];
584 chunk
->free_size
+= chunk
->map
[i
];
586 /* merge with previous? */
587 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
588 chunk
->map
[i
- 1] += chunk
->map
[i
];
590 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
591 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
594 /* merge with next? */
595 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
596 chunk
->map
[i
] += chunk
->map
[i
+ 1];
598 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
599 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
602 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
603 pcpu_chunk_relocate(chunk
, oslot
);
607 * pcpu_unmap - unmap pages out of a pcpu_chunk
608 * @chunk: chunk of interest
609 * @page_start: page index of the first page to unmap
610 * @page_end: page index of the last page to unmap + 1
611 * @flush: whether to flush cache and tlb or not
613 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
614 * If @flush is true, vcache is flushed before unmapping and tlb
617 static void pcpu_unmap(struct pcpu_chunk
*chunk
, int page_start
, int page_end
,
620 unsigned int last
= num_possible_cpus() - 1;
623 /* unmap must not be done on immutable chunk */
624 WARN_ON(chunk
->immutable
);
627 * Each flushing trial can be very expensive, issue flush on
628 * the whole region at once rather than doing it for each cpu.
629 * This could be an overkill but is more scalable.
632 flush_cache_vunmap(pcpu_chunk_addr(chunk
, 0, page_start
),
633 pcpu_chunk_addr(chunk
, last
, page_end
));
635 for_each_possible_cpu(cpu
)
636 unmap_kernel_range_noflush(
637 pcpu_chunk_addr(chunk
, cpu
, page_start
),
638 (page_end
- page_start
) << PAGE_SHIFT
);
640 /* ditto as flush_cache_vunmap() */
642 flush_tlb_kernel_range(pcpu_chunk_addr(chunk
, 0, page_start
),
643 pcpu_chunk_addr(chunk
, last
, page_end
));
647 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
648 * @chunk: chunk to depopulate
649 * @off: offset to the area to depopulate
650 * @size: size of the area to depopulate in bytes
651 * @flush: whether to flush cache and tlb or not
653 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
654 * from @chunk. If @flush is true, vcache is flushed before unmapping
660 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
663 int page_start
= PFN_DOWN(off
);
664 int page_end
= PFN_UP(off
+ size
);
665 int unmap_start
= -1;
666 int uninitialized_var(unmap_end
);
670 for (i
= page_start
; i
< page_end
; i
++) {
671 for_each_possible_cpu(cpu
) {
672 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
680 * If it's partial depopulation, it might get
681 * populated or depopulated again. Mark the
686 unmap_start
= unmap_start
< 0 ? i
: unmap_start
;
691 if (unmap_start
>= 0)
692 pcpu_unmap(chunk
, unmap_start
, unmap_end
, flush
);
696 * pcpu_map - map pages into a pcpu_chunk
697 * @chunk: chunk of interest
698 * @page_start: page index of the first page to map
699 * @page_end: page index of the last page to map + 1
701 * For each cpu, map pages [@page_start,@page_end) into @chunk.
702 * vcache is flushed afterwards.
704 static int pcpu_map(struct pcpu_chunk
*chunk
, int page_start
, int page_end
)
706 unsigned int last
= num_possible_cpus() - 1;
710 /* map must not be done on immutable chunk */
711 WARN_ON(chunk
->immutable
);
713 for_each_possible_cpu(cpu
) {
714 err
= map_kernel_range_noflush(
715 pcpu_chunk_addr(chunk
, cpu
, page_start
),
716 (page_end
- page_start
) << PAGE_SHIFT
,
718 pcpu_chunk_pagep(chunk
, cpu
, page_start
));
723 /* flush at once, please read comments in pcpu_unmap() */
724 flush_cache_vmap(pcpu_chunk_addr(chunk
, 0, page_start
),
725 pcpu_chunk_addr(chunk
, last
, page_end
));
730 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
731 * @chunk: chunk of interest
732 * @off: offset to the area to populate
733 * @size: size of the area to populate in bytes
735 * For each cpu, populate and map pages [@page_start,@page_end) into
736 * @chunk. The area is cleared on return.
739 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
741 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
743 const gfp_t alloc_mask
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
744 int page_start
= PFN_DOWN(off
);
745 int page_end
= PFN_UP(off
+ size
);
747 int uninitialized_var(map_end
);
751 for (i
= page_start
; i
< page_end
; i
++) {
752 if (pcpu_chunk_page_occupied(chunk
, i
)) {
753 if (map_start
>= 0) {
754 if (pcpu_map(chunk
, map_start
, map_end
))
761 map_start
= map_start
< 0 ? i
: map_start
;
764 for_each_possible_cpu(cpu
) {
765 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
767 *pagep
= alloc_pages_node(cpu_to_node(cpu
),
774 if (map_start
>= 0 && pcpu_map(chunk
, map_start
, map_end
))
777 for_each_possible_cpu(cpu
)
778 memset(chunk
->vm
->addr
+ cpu
* pcpu_unit_size
+ off
, 0,
783 /* likely under heavy memory pressure, give memory back */
784 pcpu_depopulate_chunk(chunk
, off
, size
, true);
788 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
793 free_vm_area(chunk
->vm
);
794 pcpu_mem_free(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]));
798 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
800 struct pcpu_chunk
*chunk
;
802 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
806 chunk
->map
= pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
807 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
808 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
809 chunk
->page
= chunk
->page_ar
;
811 chunk
->vm
= get_vm_area(pcpu_chunk_size
, GFP_KERNEL
);
813 free_pcpu_chunk(chunk
);
817 INIT_LIST_HEAD(&chunk
->list
);
818 chunk
->free_size
= pcpu_unit_size
;
819 chunk
->contig_hint
= pcpu_unit_size
;
825 * pcpu_alloc - the percpu allocator
826 * @size: size of area to allocate in bytes
827 * @align: alignment of area (max PAGE_SIZE)
828 * @reserved: allocate from the reserved chunk if available
830 * Allocate percpu area of @size bytes aligned at @align.
833 * Does GFP_KERNEL allocation.
836 * Percpu pointer to the allocated area on success, NULL on failure.
838 static void *pcpu_alloc(size_t size
, size_t align
, bool reserved
)
840 struct pcpu_chunk
*chunk
;
843 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
844 WARN(true, "illegal size (%zu) or align (%zu) for "
845 "percpu allocation\n", size
, align
);
849 mutex_lock(&pcpu_alloc_mutex
);
850 spin_lock_irq(&pcpu_lock
);
852 /* serve reserved allocations from the reserved chunk if available */
853 if (reserved
&& pcpu_reserved_chunk
) {
854 chunk
= pcpu_reserved_chunk
;
855 if (size
> chunk
->contig_hint
||
856 pcpu_extend_area_map(chunk
) < 0)
858 off
= pcpu_alloc_area(chunk
, size
, align
);
865 /* search through normal chunks */
866 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
867 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
868 if (size
> chunk
->contig_hint
)
871 switch (pcpu_extend_area_map(chunk
)) {
875 goto restart
; /* pcpu_lock dropped, restart */
880 off
= pcpu_alloc_area(chunk
, size
, align
);
886 /* hmmm... no space left, create a new chunk */
887 spin_unlock_irq(&pcpu_lock
);
889 chunk
= alloc_pcpu_chunk();
891 goto fail_unlock_mutex
;
893 spin_lock_irq(&pcpu_lock
);
894 pcpu_chunk_relocate(chunk
, -1);
895 pcpu_chunk_addr_insert(chunk
);
899 spin_unlock_irq(&pcpu_lock
);
901 /* populate, map and clear the area */
902 if (pcpu_populate_chunk(chunk
, off
, size
)) {
903 spin_lock_irq(&pcpu_lock
);
904 pcpu_free_area(chunk
, off
);
908 mutex_unlock(&pcpu_alloc_mutex
);
910 return __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
913 spin_unlock_irq(&pcpu_lock
);
915 mutex_unlock(&pcpu_alloc_mutex
);
920 * __alloc_percpu - allocate dynamic percpu area
921 * @size: size of area to allocate in bytes
922 * @align: alignment of area (max PAGE_SIZE)
924 * Allocate percpu area of @size bytes aligned at @align. Might
925 * sleep. Might trigger writeouts.
928 * Does GFP_KERNEL allocation.
931 * Percpu pointer to the allocated area on success, NULL on failure.
933 void *__alloc_percpu(size_t size
, size_t align
)
935 return pcpu_alloc(size
, align
, false);
937 EXPORT_SYMBOL_GPL(__alloc_percpu
);
940 * __alloc_reserved_percpu - allocate reserved percpu area
941 * @size: size of area to allocate in bytes
942 * @align: alignment of area (max PAGE_SIZE)
944 * Allocate percpu area of @size bytes aligned at @align from reserved
945 * percpu area if arch has set it up; otherwise, allocation is served
946 * from the same dynamic area. Might sleep. Might trigger writeouts.
949 * Does GFP_KERNEL allocation.
952 * Percpu pointer to the allocated area on success, NULL on failure.
954 void *__alloc_reserved_percpu(size_t size
, size_t align
)
956 return pcpu_alloc(size
, align
, true);
960 * pcpu_reclaim - reclaim fully free chunks, workqueue function
963 * Reclaim all fully free chunks except for the first one.
968 static void pcpu_reclaim(struct work_struct
*work
)
971 struct list_head
*head
= &pcpu_slot
[pcpu_nr_slots
- 1];
972 struct pcpu_chunk
*chunk
, *next
;
974 mutex_lock(&pcpu_alloc_mutex
);
975 spin_lock_irq(&pcpu_lock
);
977 list_for_each_entry_safe(chunk
, next
, head
, list
) {
978 WARN_ON(chunk
->immutable
);
980 /* spare the first one */
981 if (chunk
== list_first_entry(head
, struct pcpu_chunk
, list
))
984 rb_erase(&chunk
->rb_node
, &pcpu_addr_root
);
985 list_move(&chunk
->list
, &todo
);
988 spin_unlock_irq(&pcpu_lock
);
989 mutex_unlock(&pcpu_alloc_mutex
);
991 list_for_each_entry_safe(chunk
, next
, &todo
, list
) {
992 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
, false);
993 free_pcpu_chunk(chunk
);
998 * free_percpu - free percpu area
999 * @ptr: pointer to area to free
1001 * Free percpu area @ptr.
1004 * Can be called from atomic context.
1006 void free_percpu(void *ptr
)
1008 void *addr
= __pcpu_ptr_to_addr(ptr
);
1009 struct pcpu_chunk
*chunk
;
1010 unsigned long flags
;
1016 spin_lock_irqsave(&pcpu_lock
, flags
);
1018 chunk
= pcpu_chunk_addr_search(addr
);
1019 off
= addr
- chunk
->vm
->addr
;
1021 pcpu_free_area(chunk
, off
);
1023 /* if there are more than one fully free chunks, wake up grim reaper */
1024 if (chunk
->free_size
== pcpu_unit_size
) {
1025 struct pcpu_chunk
*pos
;
1027 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
1029 schedule_work(&pcpu_reclaim_work
);
1034 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1036 EXPORT_SYMBOL_GPL(free_percpu
);
1039 * pcpu_setup_first_chunk - initialize the first percpu chunk
1040 * @get_page_fn: callback to fetch page pointer
1041 * @static_size: the size of static percpu area in bytes
1042 * @reserved_size: the size of reserved percpu area in bytes
1043 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1044 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1045 * @base_addr: mapped address, NULL for auto
1046 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
1048 * Initialize the first percpu chunk which contains the kernel static
1049 * perpcu area. This function is to be called from arch percpu area
1050 * setup path. The first two parameters are mandatory. The rest are
1053 * @get_page_fn() should return pointer to percpu page given cpu
1054 * number and page number. It should at least return enough pages to
1055 * cover the static area. The returned pages for static area should
1056 * have been initialized with valid data. If @unit_size is specified,
1057 * it can also return pages after the static area. NULL return
1058 * indicates end of pages for the cpu. Note that @get_page_fn() must
1059 * return the same number of pages for all cpus.
1061 * @reserved_size, if non-zero, specifies the amount of bytes to
1062 * reserve after the static area in the first chunk. This reserves
1063 * the first chunk such that it's available only through reserved
1064 * percpu allocation. This is primarily used to serve module percpu
1065 * static areas on architectures where the addressing model has
1066 * limited offset range for symbol relocations to guarantee module
1067 * percpu symbols fall inside the relocatable range.
1069 * @dyn_size, if non-negative, determines the number of bytes
1070 * available for dynamic allocation in the first chunk. Specifying
1071 * non-negative value makes percpu leave alone the area beyond
1072 * @static_size + @reserved_size + @dyn_size.
1074 * @unit_size, if non-negative, specifies unit size and must be
1075 * aligned to PAGE_SIZE and equal to or larger than @static_size +
1076 * @reserved_size + if non-negative, @dyn_size.
1078 * Non-null @base_addr means that the caller already allocated virtual
1079 * region for the first chunk and mapped it. percpu must not mess
1080 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
1081 * @populate_pte_fn doesn't make any sense.
1083 * @populate_pte_fn is used to populate the pagetable. NULL means the
1084 * caller already populated the pagetable.
1086 * If the first chunk ends up with both reserved and dynamic areas, it
1087 * is served by two chunks - one to serve the core static and reserved
1088 * areas and the other for the dynamic area. They share the same vm
1089 * and page map but uses different area allocation map to stay away
1090 * from each other. The latter chunk is circulated in the chunk slots
1091 * and available for dynamic allocation like any other chunks.
1094 * The determined pcpu_unit_size which can be used to initialize
1097 size_t __init
pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn
,
1098 size_t static_size
, size_t reserved_size
,
1099 ssize_t dyn_size
, ssize_t unit_size
,
1101 pcpu_populate_pte_fn_t populate_pte_fn
)
1103 static struct vm_struct first_vm
;
1104 static int smap
[2], dmap
[2];
1105 size_t size_sum
= static_size
+ reserved_size
+
1106 (dyn_size
>= 0 ? dyn_size
: 0);
1107 struct pcpu_chunk
*schunk
, *dchunk
= NULL
;
1113 BUILD_BUG_ON(ARRAY_SIZE(smap
) >= PCPU_DFL_MAP_ALLOC
||
1114 ARRAY_SIZE(dmap
) >= PCPU_DFL_MAP_ALLOC
);
1115 BUG_ON(!static_size
);
1116 if (unit_size
>= 0) {
1117 BUG_ON(unit_size
< size_sum
);
1118 BUG_ON(unit_size
& ~PAGE_MASK
);
1119 BUG_ON(unit_size
< PCPU_MIN_UNIT_SIZE
);
1122 BUG_ON(base_addr
&& populate_pte_fn
);
1125 pcpu_unit_pages
= unit_size
>> PAGE_SHIFT
;
1127 pcpu_unit_pages
= max_t(int, PCPU_MIN_UNIT_SIZE
>> PAGE_SHIFT
,
1130 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
1131 pcpu_chunk_size
= num_possible_cpus() * pcpu_unit_size
;
1132 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
)
1133 + num_possible_cpus() * pcpu_unit_pages
* sizeof(struct page
*);
1136 dyn_size
= pcpu_unit_size
- static_size
- reserved_size
;
1139 * Allocate chunk slots. The additional last slot is for
1142 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
1143 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
1144 for (i
= 0; i
< pcpu_nr_slots
; i
++)
1145 INIT_LIST_HEAD(&pcpu_slot
[i
]);
1148 * Initialize static chunk. If reserved_size is zero, the
1149 * static chunk covers static area + dynamic allocation area
1150 * in the first chunk. If reserved_size is not zero, it
1151 * covers static area + reserved area (mostly used for module
1152 * static percpu allocation).
1154 schunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1155 INIT_LIST_HEAD(&schunk
->list
);
1156 schunk
->vm
= &first_vm
;
1158 schunk
->map_alloc
= ARRAY_SIZE(smap
);
1159 schunk
->page
= schunk
->page_ar
;
1161 if (reserved_size
) {
1162 schunk
->free_size
= reserved_size
;
1163 pcpu_reserved_chunk
= schunk
;
1164 pcpu_reserved_chunk_limit
= static_size
+ reserved_size
;
1166 schunk
->free_size
= dyn_size
;
1167 dyn_size
= 0; /* dynamic area covered */
1169 schunk
->contig_hint
= schunk
->free_size
;
1171 schunk
->map
[schunk
->map_used
++] = -static_size
;
1172 if (schunk
->free_size
)
1173 schunk
->map
[schunk
->map_used
++] = schunk
->free_size
;
1175 /* init dynamic chunk if necessary */
1177 dchunk
= alloc_bootmem(sizeof(struct pcpu_chunk
));
1178 INIT_LIST_HEAD(&dchunk
->list
);
1179 dchunk
->vm
= &first_vm
;
1181 dchunk
->map_alloc
= ARRAY_SIZE(dmap
);
1182 dchunk
->page
= schunk
->page_ar
; /* share page map with schunk */
1184 dchunk
->contig_hint
= dchunk
->free_size
= dyn_size
;
1185 dchunk
->map
[dchunk
->map_used
++] = -pcpu_reserved_chunk_limit
;
1186 dchunk
->map
[dchunk
->map_used
++] = dchunk
->free_size
;
1189 /* allocate vm address */
1190 first_vm
.flags
= VM_ALLOC
;
1191 first_vm
.size
= pcpu_chunk_size
;
1194 vm_area_register_early(&first_vm
, PAGE_SIZE
);
1197 * Pages already mapped. No need to remap into
1198 * vmalloc area. In this case the first chunks can't
1199 * be mapped or unmapped by percpu and are marked
1202 first_vm
.addr
= base_addr
;
1203 schunk
->immutable
= true;
1205 dchunk
->immutable
= true;
1210 for_each_possible_cpu(cpu
) {
1211 for (i
= 0; i
< pcpu_unit_pages
; i
++) {
1212 struct page
*page
= get_page_fn(cpu
, i
);
1216 *pcpu_chunk_pagep(schunk
, cpu
, i
) = page
;
1219 BUG_ON(i
< PFN_UP(static_size
));
1224 BUG_ON(nr_pages
!= i
);
1228 if (populate_pte_fn
) {
1229 for_each_possible_cpu(cpu
)
1230 for (i
= 0; i
< nr_pages
; i
++)
1231 populate_pte_fn(pcpu_chunk_addr(schunk
,
1234 err
= pcpu_map(schunk
, 0, nr_pages
);
1236 panic("failed to setup static percpu area, err=%d\n",
1240 /* link the first chunk in */
1241 pcpu_first_chunk
= dchunk
?: schunk
;
1242 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
1245 pcpu_base_addr
= (void *)pcpu_chunk_addr(schunk
, 0, 0);
1246 return pcpu_unit_size
;
1250 * Embedding first chunk setup helper.
1252 static void *pcpue_ptr __initdata
;
1253 static size_t pcpue_size __initdata
;
1254 static size_t pcpue_unit_size __initdata
;
1256 static struct page
* __init
pcpue_get_page(unsigned int cpu
, int pageno
)
1258 size_t off
= (size_t)pageno
<< PAGE_SHIFT
;
1260 if (off
>= pcpue_size
)
1263 return virt_to_page(pcpue_ptr
+ cpu
* pcpue_unit_size
+ off
);
1267 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1268 * @static_size: the size of static percpu area in bytes
1269 * @reserved_size: the size of reserved percpu area in bytes
1270 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1271 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1273 * This is a helper to ease setting up embedded first percpu chunk and
1274 * can be called where pcpu_setup_first_chunk() is expected.
1276 * If this function is used to setup the first chunk, it is allocated
1277 * as a contiguous area using bootmem allocator and used as-is without
1278 * being mapped into vmalloc area. This enables the first chunk to
1279 * piggy back on the linear physical mapping which often uses larger
1282 * When @dyn_size is positive, dynamic area might be larger than
1283 * specified to fill page alignment. Also, when @dyn_size is auto,
1284 * @dyn_size does not fill the whole first chunk but only what's
1285 * necessary for page alignment after static and reserved areas.
1287 * If the needed size is smaller than the minimum or specified unit
1288 * size, the leftover is returned to the bootmem allocator.
1291 * The determined pcpu_unit_size which can be used to initialize
1292 * percpu access on success, -errno on failure.
1294 ssize_t __init
pcpu_embed_first_chunk(size_t static_size
, size_t reserved_size
,
1295 ssize_t dyn_size
, ssize_t unit_size
)
1299 /* determine parameters and allocate */
1300 pcpue_size
= PFN_ALIGN(static_size
+ reserved_size
+
1301 (dyn_size
>= 0 ? dyn_size
: 0));
1303 dyn_size
= pcpue_size
- static_size
- reserved_size
;
1305 if (unit_size
>= 0) {
1306 BUG_ON(unit_size
< pcpue_size
);
1307 pcpue_unit_size
= unit_size
;
1309 pcpue_unit_size
= max_t(size_t, pcpue_size
, PCPU_MIN_UNIT_SIZE
);
1311 pcpue_ptr
= __alloc_bootmem_nopanic(
1312 num_possible_cpus() * pcpue_unit_size
,
1313 PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
1317 /* return the leftover and copy */
1318 for_each_possible_cpu(cpu
) {
1319 void *ptr
= pcpue_ptr
+ cpu
* pcpue_unit_size
;
1321 free_bootmem(__pa(ptr
+ pcpue_size
),
1322 pcpue_unit_size
- pcpue_size
);
1323 memcpy(ptr
, __per_cpu_load
, static_size
);
1326 /* we're ready, commit */
1327 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
1328 pcpue_size
>> PAGE_SHIFT
, pcpue_ptr
, static_size
);
1330 return pcpu_setup_first_chunk(pcpue_get_page
, static_size
,
1331 reserved_size
, dyn_size
,
1332 pcpue_unit_size
, pcpue_ptr
, NULL
);