percpu: make @dyn_size always mean min dyn_size in first chunk init functions
[deliverable/linux.git] / mm / percpu.c
1 /*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be eqaul to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
54 */
55
56 #include <linux/bitmap.h>
57 #include <linux/bootmem.h>
58 #include <linux/err.h>
59 #include <linux/list.h>
60 #include <linux/log2.h>
61 #include <linux/mm.h>
62 #include <linux/module.h>
63 #include <linux/mutex.h>
64 #include <linux/percpu.h>
65 #include <linux/pfn.h>
66 #include <linux/slab.h>
67 #include <linux/spinlock.h>
68 #include <linux/vmalloc.h>
69 #include <linux/workqueue.h>
70
71 #include <asm/cacheflush.h>
72 #include <asm/sections.h>
73 #include <asm/tlbflush.h>
74 #include <asm/io.h>
75
76 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
78
79 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80 #ifndef __addr_to_pcpu_ptr
81 #define __addr_to_pcpu_ptr(addr) \
82 (void __percpu *)((unsigned long)(addr) - \
83 (unsigned long)pcpu_base_addr + \
84 (unsigned long)__per_cpu_start)
85 #endif
86 #ifndef __pcpu_ptr_to_addr
87 #define __pcpu_ptr_to_addr(ptr) \
88 (void __force *)((unsigned long)(ptr) + \
89 (unsigned long)pcpu_base_addr - \
90 (unsigned long)__per_cpu_start)
91 #endif
92
93 struct pcpu_chunk {
94 struct list_head list; /* linked to pcpu_slot lists */
95 int free_size; /* free bytes in the chunk */
96 int contig_hint; /* max contiguous size hint */
97 void *base_addr; /* base address of this chunk */
98 int map_used; /* # of map entries used */
99 int map_alloc; /* # of map entries allocated */
100 int *map; /* allocation map */
101 void *data; /* chunk data */
102 bool immutable; /* no [de]population allowed */
103 unsigned long populated[]; /* populated bitmap */
104 };
105
106 static int pcpu_unit_pages __read_mostly;
107 static int pcpu_unit_size __read_mostly;
108 static int pcpu_nr_units __read_mostly;
109 static int pcpu_atom_size __read_mostly;
110 static int pcpu_nr_slots __read_mostly;
111 static size_t pcpu_chunk_struct_size __read_mostly;
112
113 /* cpus with the lowest and highest unit numbers */
114 static unsigned int pcpu_first_unit_cpu __read_mostly;
115 static unsigned int pcpu_last_unit_cpu __read_mostly;
116
117 /* the address of the first chunk which starts with the kernel static area */
118 void *pcpu_base_addr __read_mostly;
119 EXPORT_SYMBOL_GPL(pcpu_base_addr);
120
121 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
122 const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
123
124 /* group information, used for vm allocation */
125 static int pcpu_nr_groups __read_mostly;
126 static const unsigned long *pcpu_group_offsets __read_mostly;
127 static const size_t *pcpu_group_sizes __read_mostly;
128
129 /*
130 * The first chunk which always exists. Note that unlike other
131 * chunks, this one can be allocated and mapped in several different
132 * ways and thus often doesn't live in the vmalloc area.
133 */
134 static struct pcpu_chunk *pcpu_first_chunk;
135
136 /*
137 * Optional reserved chunk. This chunk reserves part of the first
138 * chunk and serves it for reserved allocations. The amount of
139 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
140 * area doesn't exist, the following variables contain NULL and 0
141 * respectively.
142 */
143 static struct pcpu_chunk *pcpu_reserved_chunk;
144 static int pcpu_reserved_chunk_limit;
145
146 /*
147 * Synchronization rules.
148 *
149 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
150 * protects allocation/reclaim paths, chunks, populated bitmap and
151 * vmalloc mapping. The latter is a spinlock and protects the index
152 * data structures - chunk slots, chunks and area maps in chunks.
153 *
154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary. All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released. In
157 * general, percpu memory can't be allocated with irq off but
158 * irqsave/restore are still used in alloc path so that it can be used
159 * from early init path - sched_init() specifically.
160 *
161 * Free path accesses and alters only the index data structures, so it
162 * can be safely called from atomic context. When memory needs to be
163 * returned to the system, free path schedules reclaim_work which
164 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
165 * reclaimed, release both locks and frees the chunks. Note that it's
166 * necessary to grab both locks to remove a chunk from circulation as
167 * allocation path might be referencing the chunk with only
168 * pcpu_alloc_mutex locked.
169 */
170 static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
171 static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
172
173 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
174
175 /* reclaim work to release fully free chunks, scheduled from free path */
176 static void pcpu_reclaim(struct work_struct *work);
177 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
178
179 static bool pcpu_addr_in_first_chunk(void *addr)
180 {
181 void *first_start = pcpu_first_chunk->base_addr;
182
183 return addr >= first_start && addr < first_start + pcpu_unit_size;
184 }
185
186 static bool pcpu_addr_in_reserved_chunk(void *addr)
187 {
188 void *first_start = pcpu_first_chunk->base_addr;
189
190 return addr >= first_start &&
191 addr < first_start + pcpu_reserved_chunk_limit;
192 }
193
194 static int __pcpu_size_to_slot(int size)
195 {
196 int highbit = fls(size); /* size is in bytes */
197 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
198 }
199
200 static int pcpu_size_to_slot(int size)
201 {
202 if (size == pcpu_unit_size)
203 return pcpu_nr_slots - 1;
204 return __pcpu_size_to_slot(size);
205 }
206
207 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
208 {
209 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
210 return 0;
211
212 return pcpu_size_to_slot(chunk->free_size);
213 }
214
215 /* set the pointer to a chunk in a page struct */
216 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
217 {
218 page->index = (unsigned long)pcpu;
219 }
220
221 /* obtain pointer to a chunk from a page struct */
222 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
223 {
224 return (struct pcpu_chunk *)page->index;
225 }
226
227 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
228 {
229 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
230 }
231
232 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
233 unsigned int cpu, int page_idx)
234 {
235 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
236 (page_idx << PAGE_SHIFT);
237 }
238
239 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
240 int *rs, int *re, int end)
241 {
242 *rs = find_next_zero_bit(chunk->populated, end, *rs);
243 *re = find_next_bit(chunk->populated, end, *rs + 1);
244 }
245
246 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
247 int *rs, int *re, int end)
248 {
249 *rs = find_next_bit(chunk->populated, end, *rs);
250 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
251 }
252
253 /*
254 * (Un)populated page region iterators. Iterate over (un)populated
255 * page regions betwen @start and @end in @chunk. @rs and @re should
256 * be integer variables and will be set to start and end page index of
257 * the current region.
258 */
259 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
260 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
261 (rs) < (re); \
262 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
263
264 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
265 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
266 (rs) < (re); \
267 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
268
269 /**
270 * pcpu_mem_alloc - allocate memory
271 * @size: bytes to allocate
272 *
273 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
274 * kzalloc() is used; otherwise, vmalloc() is used. The returned
275 * memory is always zeroed.
276 *
277 * CONTEXT:
278 * Does GFP_KERNEL allocation.
279 *
280 * RETURNS:
281 * Pointer to the allocated area on success, NULL on failure.
282 */
283 static void *pcpu_mem_alloc(size_t size)
284 {
285 if (size <= PAGE_SIZE)
286 return kzalloc(size, GFP_KERNEL);
287 else {
288 void *ptr = vmalloc(size);
289 if (ptr)
290 memset(ptr, 0, size);
291 return ptr;
292 }
293 }
294
295 /**
296 * pcpu_mem_free - free memory
297 * @ptr: memory to free
298 * @size: size of the area
299 *
300 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
301 */
302 static void pcpu_mem_free(void *ptr, size_t size)
303 {
304 if (size <= PAGE_SIZE)
305 kfree(ptr);
306 else
307 vfree(ptr);
308 }
309
310 /**
311 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
312 * @chunk: chunk of interest
313 * @oslot: the previous slot it was on
314 *
315 * This function is called after an allocation or free changed @chunk.
316 * New slot according to the changed state is determined and @chunk is
317 * moved to the slot. Note that the reserved chunk is never put on
318 * chunk slots.
319 *
320 * CONTEXT:
321 * pcpu_lock.
322 */
323 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
324 {
325 int nslot = pcpu_chunk_slot(chunk);
326
327 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
328 if (oslot < nslot)
329 list_move(&chunk->list, &pcpu_slot[nslot]);
330 else
331 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
332 }
333 }
334
335 /**
336 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
337 * @chunk: chunk of interest
338 *
339 * Determine whether area map of @chunk needs to be extended to
340 * accomodate a new allocation.
341 *
342 * CONTEXT:
343 * pcpu_lock.
344 *
345 * RETURNS:
346 * New target map allocation length if extension is necessary, 0
347 * otherwise.
348 */
349 static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
350 {
351 int new_alloc;
352
353 if (chunk->map_alloc >= chunk->map_used + 2)
354 return 0;
355
356 new_alloc = PCPU_DFL_MAP_ALLOC;
357 while (new_alloc < chunk->map_used + 2)
358 new_alloc *= 2;
359
360 return new_alloc;
361 }
362
363 /**
364 * pcpu_extend_area_map - extend area map of a chunk
365 * @chunk: chunk of interest
366 * @new_alloc: new target allocation length of the area map
367 *
368 * Extend area map of @chunk to have @new_alloc entries.
369 *
370 * CONTEXT:
371 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
372 *
373 * RETURNS:
374 * 0 on success, -errno on failure.
375 */
376 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
377 {
378 int *old = NULL, *new = NULL;
379 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
380 unsigned long flags;
381
382 new = pcpu_mem_alloc(new_size);
383 if (!new)
384 return -ENOMEM;
385
386 /* acquire pcpu_lock and switch to new area map */
387 spin_lock_irqsave(&pcpu_lock, flags);
388
389 if (new_alloc <= chunk->map_alloc)
390 goto out_unlock;
391
392 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
393 memcpy(new, chunk->map, old_size);
394
395 /*
396 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
397 * one of the first chunks and still using static map.
398 */
399 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
400 old = chunk->map;
401
402 chunk->map_alloc = new_alloc;
403 chunk->map = new;
404 new = NULL;
405
406 out_unlock:
407 spin_unlock_irqrestore(&pcpu_lock, flags);
408
409 /*
410 * pcpu_mem_free() might end up calling vfree() which uses
411 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
412 */
413 pcpu_mem_free(old, old_size);
414 pcpu_mem_free(new, new_size);
415
416 return 0;
417 }
418
419 /**
420 * pcpu_split_block - split a map block
421 * @chunk: chunk of interest
422 * @i: index of map block to split
423 * @head: head size in bytes (can be 0)
424 * @tail: tail size in bytes (can be 0)
425 *
426 * Split the @i'th map block into two or three blocks. If @head is
427 * non-zero, @head bytes block is inserted before block @i moving it
428 * to @i+1 and reducing its size by @head bytes.
429 *
430 * If @tail is non-zero, the target block, which can be @i or @i+1
431 * depending on @head, is reduced by @tail bytes and @tail byte block
432 * is inserted after the target block.
433 *
434 * @chunk->map must have enough free slots to accomodate the split.
435 *
436 * CONTEXT:
437 * pcpu_lock.
438 */
439 static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
440 int head, int tail)
441 {
442 int nr_extra = !!head + !!tail;
443
444 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
445
446 /* insert new subblocks */
447 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
448 sizeof(chunk->map[0]) * (chunk->map_used - i));
449 chunk->map_used += nr_extra;
450
451 if (head) {
452 chunk->map[i + 1] = chunk->map[i] - head;
453 chunk->map[i++] = head;
454 }
455 if (tail) {
456 chunk->map[i++] -= tail;
457 chunk->map[i] = tail;
458 }
459 }
460
461 /**
462 * pcpu_alloc_area - allocate area from a pcpu_chunk
463 * @chunk: chunk of interest
464 * @size: wanted size in bytes
465 * @align: wanted align
466 *
467 * Try to allocate @size bytes area aligned at @align from @chunk.
468 * Note that this function only allocates the offset. It doesn't
469 * populate or map the area.
470 *
471 * @chunk->map must have at least two free slots.
472 *
473 * CONTEXT:
474 * pcpu_lock.
475 *
476 * RETURNS:
477 * Allocated offset in @chunk on success, -1 if no matching area is
478 * found.
479 */
480 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
481 {
482 int oslot = pcpu_chunk_slot(chunk);
483 int max_contig = 0;
484 int i, off;
485
486 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
487 bool is_last = i + 1 == chunk->map_used;
488 int head, tail;
489
490 /* extra for alignment requirement */
491 head = ALIGN(off, align) - off;
492 BUG_ON(i == 0 && head != 0);
493
494 if (chunk->map[i] < 0)
495 continue;
496 if (chunk->map[i] < head + size) {
497 max_contig = max(chunk->map[i], max_contig);
498 continue;
499 }
500
501 /*
502 * If head is small or the previous block is free,
503 * merge'em. Note that 'small' is defined as smaller
504 * than sizeof(int), which is very small but isn't too
505 * uncommon for percpu allocations.
506 */
507 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
508 if (chunk->map[i - 1] > 0)
509 chunk->map[i - 1] += head;
510 else {
511 chunk->map[i - 1] -= head;
512 chunk->free_size -= head;
513 }
514 chunk->map[i] -= head;
515 off += head;
516 head = 0;
517 }
518
519 /* if tail is small, just keep it around */
520 tail = chunk->map[i] - head - size;
521 if (tail < sizeof(int))
522 tail = 0;
523
524 /* split if warranted */
525 if (head || tail) {
526 pcpu_split_block(chunk, i, head, tail);
527 if (head) {
528 i++;
529 off += head;
530 max_contig = max(chunk->map[i - 1], max_contig);
531 }
532 if (tail)
533 max_contig = max(chunk->map[i + 1], max_contig);
534 }
535
536 /* update hint and mark allocated */
537 if (is_last)
538 chunk->contig_hint = max_contig; /* fully scanned */
539 else
540 chunk->contig_hint = max(chunk->contig_hint,
541 max_contig);
542
543 chunk->free_size -= chunk->map[i];
544 chunk->map[i] = -chunk->map[i];
545
546 pcpu_chunk_relocate(chunk, oslot);
547 return off;
548 }
549
550 chunk->contig_hint = max_contig; /* fully scanned */
551 pcpu_chunk_relocate(chunk, oslot);
552
553 /* tell the upper layer that this chunk has no matching area */
554 return -1;
555 }
556
557 /**
558 * pcpu_free_area - free area to a pcpu_chunk
559 * @chunk: chunk of interest
560 * @freeme: offset of area to free
561 *
562 * Free area starting from @freeme to @chunk. Note that this function
563 * only modifies the allocation map. It doesn't depopulate or unmap
564 * the area.
565 *
566 * CONTEXT:
567 * pcpu_lock.
568 */
569 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
570 {
571 int oslot = pcpu_chunk_slot(chunk);
572 int i, off;
573
574 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
575 if (off == freeme)
576 break;
577 BUG_ON(off != freeme);
578 BUG_ON(chunk->map[i] > 0);
579
580 chunk->map[i] = -chunk->map[i];
581 chunk->free_size += chunk->map[i];
582
583 /* merge with previous? */
584 if (i > 0 && chunk->map[i - 1] >= 0) {
585 chunk->map[i - 1] += chunk->map[i];
586 chunk->map_used--;
587 memmove(&chunk->map[i], &chunk->map[i + 1],
588 (chunk->map_used - i) * sizeof(chunk->map[0]));
589 i--;
590 }
591 /* merge with next? */
592 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
593 chunk->map[i] += chunk->map[i + 1];
594 chunk->map_used--;
595 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
596 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
597 }
598
599 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
600 pcpu_chunk_relocate(chunk, oslot);
601 }
602
603 static struct pcpu_chunk *pcpu_alloc_chunk(void)
604 {
605 struct pcpu_chunk *chunk;
606
607 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
608 if (!chunk)
609 return NULL;
610
611 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
612 if (!chunk->map) {
613 kfree(chunk);
614 return NULL;
615 }
616
617 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
618 chunk->map[chunk->map_used++] = pcpu_unit_size;
619
620 INIT_LIST_HEAD(&chunk->list);
621 chunk->free_size = pcpu_unit_size;
622 chunk->contig_hint = pcpu_unit_size;
623
624 return chunk;
625 }
626
627 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
628 {
629 if (!chunk)
630 return;
631 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
632 kfree(chunk);
633 }
634
635 /*
636 * Chunk management implementation.
637 *
638 * To allow different implementations, chunk alloc/free and
639 * [de]population are implemented in a separate file which is pulled
640 * into this file and compiled together. The following functions
641 * should be implemented.
642 *
643 * pcpu_populate_chunk - populate the specified range of a chunk
644 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
645 * pcpu_create_chunk - create a new chunk
646 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
647 * pcpu_addr_to_page - translate address to physical address
648 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
649 */
650 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
651 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
652 static struct pcpu_chunk *pcpu_create_chunk(void);
653 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
654 static struct page *pcpu_addr_to_page(void *addr);
655 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
656
657 #ifdef CONFIG_NEED_PER_CPU_KM
658 #include "percpu-km.c"
659 #else
660 #include "percpu-vm.c"
661 #endif
662
663 /**
664 * pcpu_chunk_addr_search - determine chunk containing specified address
665 * @addr: address for which the chunk needs to be determined.
666 *
667 * RETURNS:
668 * The address of the found chunk.
669 */
670 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
671 {
672 /* is it in the first chunk? */
673 if (pcpu_addr_in_first_chunk(addr)) {
674 /* is it in the reserved area? */
675 if (pcpu_addr_in_reserved_chunk(addr))
676 return pcpu_reserved_chunk;
677 return pcpu_first_chunk;
678 }
679
680 /*
681 * The address is relative to unit0 which might be unused and
682 * thus unmapped. Offset the address to the unit space of the
683 * current processor before looking it up in the vmalloc
684 * space. Note that any possible cpu id can be used here, so
685 * there's no need to worry about preemption or cpu hotplug.
686 */
687 addr += pcpu_unit_offsets[raw_smp_processor_id()];
688 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
689 }
690
691 /**
692 * pcpu_alloc - the percpu allocator
693 * @size: size of area to allocate in bytes
694 * @align: alignment of area (max PAGE_SIZE)
695 * @reserved: allocate from the reserved chunk if available
696 *
697 * Allocate percpu area of @size bytes aligned at @align.
698 *
699 * CONTEXT:
700 * Does GFP_KERNEL allocation.
701 *
702 * RETURNS:
703 * Percpu pointer to the allocated area on success, NULL on failure.
704 */
705 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
706 {
707 static int warn_limit = 10;
708 struct pcpu_chunk *chunk;
709 const char *err;
710 int slot, off, new_alloc;
711 unsigned long flags;
712
713 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
714 WARN(true, "illegal size (%zu) or align (%zu) for "
715 "percpu allocation\n", size, align);
716 return NULL;
717 }
718
719 mutex_lock(&pcpu_alloc_mutex);
720 spin_lock_irqsave(&pcpu_lock, flags);
721
722 /* serve reserved allocations from the reserved chunk if available */
723 if (reserved && pcpu_reserved_chunk) {
724 chunk = pcpu_reserved_chunk;
725
726 if (size > chunk->contig_hint) {
727 err = "alloc from reserved chunk failed";
728 goto fail_unlock;
729 }
730
731 while ((new_alloc = pcpu_need_to_extend(chunk))) {
732 spin_unlock_irqrestore(&pcpu_lock, flags);
733 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
734 err = "failed to extend area map of reserved chunk";
735 goto fail_unlock_mutex;
736 }
737 spin_lock_irqsave(&pcpu_lock, flags);
738 }
739
740 off = pcpu_alloc_area(chunk, size, align);
741 if (off >= 0)
742 goto area_found;
743
744 err = "alloc from reserved chunk failed";
745 goto fail_unlock;
746 }
747
748 restart:
749 /* search through normal chunks */
750 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
751 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
752 if (size > chunk->contig_hint)
753 continue;
754
755 new_alloc = pcpu_need_to_extend(chunk);
756 if (new_alloc) {
757 spin_unlock_irqrestore(&pcpu_lock, flags);
758 if (pcpu_extend_area_map(chunk,
759 new_alloc) < 0) {
760 err = "failed to extend area map";
761 goto fail_unlock_mutex;
762 }
763 spin_lock_irqsave(&pcpu_lock, flags);
764 /*
765 * pcpu_lock has been dropped, need to
766 * restart cpu_slot list walking.
767 */
768 goto restart;
769 }
770
771 off = pcpu_alloc_area(chunk, size, align);
772 if (off >= 0)
773 goto area_found;
774 }
775 }
776
777 /* hmmm... no space left, create a new chunk */
778 spin_unlock_irqrestore(&pcpu_lock, flags);
779
780 chunk = pcpu_create_chunk();
781 if (!chunk) {
782 err = "failed to allocate new chunk";
783 goto fail_unlock_mutex;
784 }
785
786 spin_lock_irqsave(&pcpu_lock, flags);
787 pcpu_chunk_relocate(chunk, -1);
788 goto restart;
789
790 area_found:
791 spin_unlock_irqrestore(&pcpu_lock, flags);
792
793 /* populate, map and clear the area */
794 if (pcpu_populate_chunk(chunk, off, size)) {
795 spin_lock_irqsave(&pcpu_lock, flags);
796 pcpu_free_area(chunk, off);
797 err = "failed to populate";
798 goto fail_unlock;
799 }
800
801 mutex_unlock(&pcpu_alloc_mutex);
802
803 /* return address relative to base address */
804 return __addr_to_pcpu_ptr(chunk->base_addr + off);
805
806 fail_unlock:
807 spin_unlock_irqrestore(&pcpu_lock, flags);
808 fail_unlock_mutex:
809 mutex_unlock(&pcpu_alloc_mutex);
810 if (warn_limit) {
811 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
812 "%s\n", size, align, err);
813 dump_stack();
814 if (!--warn_limit)
815 pr_info("PERCPU: limit reached, disable warning\n");
816 }
817 return NULL;
818 }
819
820 /**
821 * __alloc_percpu - allocate dynamic percpu area
822 * @size: size of area to allocate in bytes
823 * @align: alignment of area (max PAGE_SIZE)
824 *
825 * Allocate percpu area of @size bytes aligned at @align. Might
826 * sleep. Might trigger writeouts.
827 *
828 * CONTEXT:
829 * Does GFP_KERNEL allocation.
830 *
831 * RETURNS:
832 * Percpu pointer to the allocated area on success, NULL on failure.
833 */
834 void __percpu *__alloc_percpu(size_t size, size_t align)
835 {
836 return pcpu_alloc(size, align, false);
837 }
838 EXPORT_SYMBOL_GPL(__alloc_percpu);
839
840 /**
841 * __alloc_reserved_percpu - allocate reserved percpu area
842 * @size: size of area to allocate in bytes
843 * @align: alignment of area (max PAGE_SIZE)
844 *
845 * Allocate percpu area of @size bytes aligned at @align from reserved
846 * percpu area if arch has set it up; otherwise, allocation is served
847 * from the same dynamic area. Might sleep. Might trigger writeouts.
848 *
849 * CONTEXT:
850 * Does GFP_KERNEL allocation.
851 *
852 * RETURNS:
853 * Percpu pointer to the allocated area on success, NULL on failure.
854 */
855 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
856 {
857 return pcpu_alloc(size, align, true);
858 }
859
860 /**
861 * pcpu_reclaim - reclaim fully free chunks, workqueue function
862 * @work: unused
863 *
864 * Reclaim all fully free chunks except for the first one.
865 *
866 * CONTEXT:
867 * workqueue context.
868 */
869 static void pcpu_reclaim(struct work_struct *work)
870 {
871 LIST_HEAD(todo);
872 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
873 struct pcpu_chunk *chunk, *next;
874
875 mutex_lock(&pcpu_alloc_mutex);
876 spin_lock_irq(&pcpu_lock);
877
878 list_for_each_entry_safe(chunk, next, head, list) {
879 WARN_ON(chunk->immutable);
880
881 /* spare the first one */
882 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
883 continue;
884
885 list_move(&chunk->list, &todo);
886 }
887
888 spin_unlock_irq(&pcpu_lock);
889
890 list_for_each_entry_safe(chunk, next, &todo, list) {
891 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
892 pcpu_destroy_chunk(chunk);
893 }
894
895 mutex_unlock(&pcpu_alloc_mutex);
896 }
897
898 /**
899 * free_percpu - free percpu area
900 * @ptr: pointer to area to free
901 *
902 * Free percpu area @ptr.
903 *
904 * CONTEXT:
905 * Can be called from atomic context.
906 */
907 void free_percpu(void __percpu *ptr)
908 {
909 void *addr;
910 struct pcpu_chunk *chunk;
911 unsigned long flags;
912 int off;
913
914 if (!ptr)
915 return;
916
917 addr = __pcpu_ptr_to_addr(ptr);
918
919 spin_lock_irqsave(&pcpu_lock, flags);
920
921 chunk = pcpu_chunk_addr_search(addr);
922 off = addr - chunk->base_addr;
923
924 pcpu_free_area(chunk, off);
925
926 /* if there are more than one fully free chunks, wake up grim reaper */
927 if (chunk->free_size == pcpu_unit_size) {
928 struct pcpu_chunk *pos;
929
930 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
931 if (pos != chunk) {
932 schedule_work(&pcpu_reclaim_work);
933 break;
934 }
935 }
936
937 spin_unlock_irqrestore(&pcpu_lock, flags);
938 }
939 EXPORT_SYMBOL_GPL(free_percpu);
940
941 /**
942 * is_kernel_percpu_address - test whether address is from static percpu area
943 * @addr: address to test
944 *
945 * Test whether @addr belongs to in-kernel static percpu area. Module
946 * static percpu areas are not considered. For those, use
947 * is_module_percpu_address().
948 *
949 * RETURNS:
950 * %true if @addr is from in-kernel static percpu area, %false otherwise.
951 */
952 bool is_kernel_percpu_address(unsigned long addr)
953 {
954 const size_t static_size = __per_cpu_end - __per_cpu_start;
955 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
956 unsigned int cpu;
957
958 for_each_possible_cpu(cpu) {
959 void *start = per_cpu_ptr(base, cpu);
960
961 if ((void *)addr >= start && (void *)addr < start + static_size)
962 return true;
963 }
964 return false;
965 }
966
967 /**
968 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
969 * @addr: the address to be converted to physical address
970 *
971 * Given @addr which is dereferenceable address obtained via one of
972 * percpu access macros, this function translates it into its physical
973 * address. The caller is responsible for ensuring @addr stays valid
974 * until this function finishes.
975 *
976 * RETURNS:
977 * The physical address for @addr.
978 */
979 phys_addr_t per_cpu_ptr_to_phys(void *addr)
980 {
981 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
982 bool in_first_chunk = false;
983 unsigned long first_start, first_end;
984 unsigned int cpu;
985
986 /*
987 * The following test on first_start/end isn't strictly
988 * necessary but will speed up lookups of addresses which
989 * aren't in the first chunk.
990 */
991 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
992 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
993 pcpu_unit_pages);
994 if ((unsigned long)addr >= first_start &&
995 (unsigned long)addr < first_end) {
996 for_each_possible_cpu(cpu) {
997 void *start = per_cpu_ptr(base, cpu);
998
999 if (addr >= start && addr < start + pcpu_unit_size) {
1000 in_first_chunk = true;
1001 break;
1002 }
1003 }
1004 }
1005
1006 if (in_first_chunk) {
1007 if ((unsigned long)addr < VMALLOC_START ||
1008 (unsigned long)addr >= VMALLOC_END)
1009 return __pa(addr);
1010 else
1011 return page_to_phys(vmalloc_to_page(addr));
1012 } else
1013 return page_to_phys(pcpu_addr_to_page(addr));
1014 }
1015
1016 /**
1017 * pcpu_alloc_alloc_info - allocate percpu allocation info
1018 * @nr_groups: the number of groups
1019 * @nr_units: the number of units
1020 *
1021 * Allocate ai which is large enough for @nr_groups groups containing
1022 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1023 * cpu_map array which is long enough for @nr_units and filled with
1024 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1025 * pointer of other groups.
1026 *
1027 * RETURNS:
1028 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1029 * failure.
1030 */
1031 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1032 int nr_units)
1033 {
1034 struct pcpu_alloc_info *ai;
1035 size_t base_size, ai_size;
1036 void *ptr;
1037 int unit;
1038
1039 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1040 __alignof__(ai->groups[0].cpu_map[0]));
1041 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1042
1043 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1044 if (!ptr)
1045 return NULL;
1046 ai = ptr;
1047 ptr += base_size;
1048
1049 ai->groups[0].cpu_map = ptr;
1050
1051 for (unit = 0; unit < nr_units; unit++)
1052 ai->groups[0].cpu_map[unit] = NR_CPUS;
1053
1054 ai->nr_groups = nr_groups;
1055 ai->__ai_size = PFN_ALIGN(ai_size);
1056
1057 return ai;
1058 }
1059
1060 /**
1061 * pcpu_free_alloc_info - free percpu allocation info
1062 * @ai: pcpu_alloc_info to free
1063 *
1064 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1065 */
1066 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1067 {
1068 free_bootmem(__pa(ai), ai->__ai_size);
1069 }
1070
1071 /**
1072 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1073 * @reserved_size: the size of reserved percpu area in bytes
1074 * @dyn_size: minimum free size for dynamic allocation in bytes
1075 * @atom_size: allocation atom size
1076 * @cpu_distance_fn: callback to determine distance between cpus, optional
1077 *
1078 * This function determines grouping of units, their mappings to cpus
1079 * and other parameters considering needed percpu size, allocation
1080 * atom size and distances between CPUs.
1081 *
1082 * Groups are always mutliples of atom size and CPUs which are of
1083 * LOCAL_DISTANCE both ways are grouped together and share space for
1084 * units in the same group. The returned configuration is guaranteed
1085 * to have CPUs on different nodes on different groups and >=75% usage
1086 * of allocated virtual address space.
1087 *
1088 * RETURNS:
1089 * On success, pointer to the new allocation_info is returned. On
1090 * failure, ERR_PTR value is returned.
1091 */
1092 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1093 size_t reserved_size, size_t dyn_size,
1094 size_t atom_size,
1095 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1096 {
1097 static int group_map[NR_CPUS] __initdata;
1098 static int group_cnt[NR_CPUS] __initdata;
1099 const size_t static_size = __per_cpu_end - __per_cpu_start;
1100 int nr_groups = 1, nr_units = 0;
1101 size_t size_sum, min_unit_size, alloc_size;
1102 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1103 int last_allocs, group, unit;
1104 unsigned int cpu, tcpu;
1105 struct pcpu_alloc_info *ai;
1106 unsigned int *cpu_map;
1107
1108 /* this function may be called multiple times */
1109 memset(group_map, 0, sizeof(group_map));
1110 memset(group_cnt, 0, sizeof(group_cnt));
1111
1112 size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size);
1113 dyn_size = size_sum - static_size - reserved_size;
1114
1115 /*
1116 * Determine min_unit_size, alloc_size and max_upa such that
1117 * alloc_size is multiple of atom_size and is the smallest
1118 * which can accomodate 4k aligned segments which are equal to
1119 * or larger than min_unit_size.
1120 */
1121 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1122
1123 alloc_size = roundup(min_unit_size, atom_size);
1124 upa = alloc_size / min_unit_size;
1125 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1126 upa--;
1127 max_upa = upa;
1128
1129 /* group cpus according to their proximity */
1130 for_each_possible_cpu(cpu) {
1131 group = 0;
1132 next_group:
1133 for_each_possible_cpu(tcpu) {
1134 if (cpu == tcpu)
1135 break;
1136 if (group_map[tcpu] == group && cpu_distance_fn &&
1137 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1138 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1139 group++;
1140 nr_groups = max(nr_groups, group + 1);
1141 goto next_group;
1142 }
1143 }
1144 group_map[cpu] = group;
1145 group_cnt[group]++;
1146 }
1147
1148 /*
1149 * Expand unit size until address space usage goes over 75%
1150 * and then as much as possible without using more address
1151 * space.
1152 */
1153 last_allocs = INT_MAX;
1154 for (upa = max_upa; upa; upa--) {
1155 int allocs = 0, wasted = 0;
1156
1157 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1158 continue;
1159
1160 for (group = 0; group < nr_groups; group++) {
1161 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1162 allocs += this_allocs;
1163 wasted += this_allocs * upa - group_cnt[group];
1164 }
1165
1166 /*
1167 * Don't accept if wastage is over 25%. The
1168 * greater-than comparison ensures upa==1 always
1169 * passes the following check.
1170 */
1171 if (wasted > num_possible_cpus() / 3)
1172 continue;
1173
1174 /* and then don't consume more memory */
1175 if (allocs > last_allocs)
1176 break;
1177 last_allocs = allocs;
1178 best_upa = upa;
1179 }
1180 upa = best_upa;
1181
1182 /* allocate and fill alloc_info */
1183 for (group = 0; group < nr_groups; group++)
1184 nr_units += roundup(group_cnt[group], upa);
1185
1186 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1187 if (!ai)
1188 return ERR_PTR(-ENOMEM);
1189 cpu_map = ai->groups[0].cpu_map;
1190
1191 for (group = 0; group < nr_groups; group++) {
1192 ai->groups[group].cpu_map = cpu_map;
1193 cpu_map += roundup(group_cnt[group], upa);
1194 }
1195
1196 ai->static_size = static_size;
1197 ai->reserved_size = reserved_size;
1198 ai->dyn_size = dyn_size;
1199 ai->unit_size = alloc_size / upa;
1200 ai->atom_size = atom_size;
1201 ai->alloc_size = alloc_size;
1202
1203 for (group = 0, unit = 0; group_cnt[group]; group++) {
1204 struct pcpu_group_info *gi = &ai->groups[group];
1205
1206 /*
1207 * Initialize base_offset as if all groups are located
1208 * back-to-back. The caller should update this to
1209 * reflect actual allocation.
1210 */
1211 gi->base_offset = unit * ai->unit_size;
1212
1213 for_each_possible_cpu(cpu)
1214 if (group_map[cpu] == group)
1215 gi->cpu_map[gi->nr_units++] = cpu;
1216 gi->nr_units = roundup(gi->nr_units, upa);
1217 unit += gi->nr_units;
1218 }
1219 BUG_ON(unit != nr_units);
1220
1221 return ai;
1222 }
1223
1224 /**
1225 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1226 * @lvl: loglevel
1227 * @ai: allocation info to dump
1228 *
1229 * Print out information about @ai using loglevel @lvl.
1230 */
1231 static void pcpu_dump_alloc_info(const char *lvl,
1232 const struct pcpu_alloc_info *ai)
1233 {
1234 int group_width = 1, cpu_width = 1, width;
1235 char empty_str[] = "--------";
1236 int alloc = 0, alloc_end = 0;
1237 int group, v;
1238 int upa, apl; /* units per alloc, allocs per line */
1239
1240 v = ai->nr_groups;
1241 while (v /= 10)
1242 group_width++;
1243
1244 v = num_possible_cpus();
1245 while (v /= 10)
1246 cpu_width++;
1247 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1248
1249 upa = ai->alloc_size / ai->unit_size;
1250 width = upa * (cpu_width + 1) + group_width + 3;
1251 apl = rounddown_pow_of_two(max(60 / width, 1));
1252
1253 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1254 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1255 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1256
1257 for (group = 0; group < ai->nr_groups; group++) {
1258 const struct pcpu_group_info *gi = &ai->groups[group];
1259 int unit = 0, unit_end = 0;
1260
1261 BUG_ON(gi->nr_units % upa);
1262 for (alloc_end += gi->nr_units / upa;
1263 alloc < alloc_end; alloc++) {
1264 if (!(alloc % apl)) {
1265 printk("\n");
1266 printk("%spcpu-alloc: ", lvl);
1267 }
1268 printk("[%0*d] ", group_width, group);
1269
1270 for (unit_end += upa; unit < unit_end; unit++)
1271 if (gi->cpu_map[unit] != NR_CPUS)
1272 printk("%0*d ", cpu_width,
1273 gi->cpu_map[unit]);
1274 else
1275 printk("%s ", empty_str);
1276 }
1277 }
1278 printk("\n");
1279 }
1280
1281 /**
1282 * pcpu_setup_first_chunk - initialize the first percpu chunk
1283 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1284 * @base_addr: mapped address
1285 *
1286 * Initialize the first percpu chunk which contains the kernel static
1287 * perpcu area. This function is to be called from arch percpu area
1288 * setup path.
1289 *
1290 * @ai contains all information necessary to initialize the first
1291 * chunk and prime the dynamic percpu allocator.
1292 *
1293 * @ai->static_size is the size of static percpu area.
1294 *
1295 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1296 * reserve after the static area in the first chunk. This reserves
1297 * the first chunk such that it's available only through reserved
1298 * percpu allocation. This is primarily used to serve module percpu
1299 * static areas on architectures where the addressing model has
1300 * limited offset range for symbol relocations to guarantee module
1301 * percpu symbols fall inside the relocatable range.
1302 *
1303 * @ai->dyn_size determines the number of bytes available for dynamic
1304 * allocation in the first chunk. The area between @ai->static_size +
1305 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1306 *
1307 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1308 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1309 * @ai->dyn_size.
1310 *
1311 * @ai->atom_size is the allocation atom size and used as alignment
1312 * for vm areas.
1313 *
1314 * @ai->alloc_size is the allocation size and always multiple of
1315 * @ai->atom_size. This is larger than @ai->atom_size if
1316 * @ai->unit_size is larger than @ai->atom_size.
1317 *
1318 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1319 * percpu areas. Units which should be colocated are put into the
1320 * same group. Dynamic VM areas will be allocated according to these
1321 * groupings. If @ai->nr_groups is zero, a single group containing
1322 * all units is assumed.
1323 *
1324 * The caller should have mapped the first chunk at @base_addr and
1325 * copied static data to each unit.
1326 *
1327 * If the first chunk ends up with both reserved and dynamic areas, it
1328 * is served by two chunks - one to serve the core static and reserved
1329 * areas and the other for the dynamic area. They share the same vm
1330 * and page map but uses different area allocation map to stay away
1331 * from each other. The latter chunk is circulated in the chunk slots
1332 * and available for dynamic allocation like any other chunks.
1333 *
1334 * RETURNS:
1335 * 0 on success, -errno on failure.
1336 */
1337 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1338 void *base_addr)
1339 {
1340 static char cpus_buf[4096] __initdata;
1341 static int smap[2], dmap[2];
1342 size_t dyn_size = ai->dyn_size;
1343 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1344 struct pcpu_chunk *schunk, *dchunk = NULL;
1345 unsigned long *group_offsets;
1346 size_t *group_sizes;
1347 unsigned long *unit_off;
1348 unsigned int cpu;
1349 int *unit_map;
1350 int group, unit, i;
1351
1352 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1353
1354 #define PCPU_SETUP_BUG_ON(cond) do { \
1355 if (unlikely(cond)) { \
1356 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1357 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1358 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1359 BUG(); \
1360 } \
1361 } while (0)
1362
1363 /* sanity checks */
1364 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1365 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1366 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1367 PCPU_SETUP_BUG_ON(!ai->static_size);
1368 PCPU_SETUP_BUG_ON(!base_addr);
1369 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1370 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1371 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1372 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1373
1374 /* process group information and build config tables accordingly */
1375 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1376 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1377 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1378 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1379
1380 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1381 unit_map[cpu] = UINT_MAX;
1382 pcpu_first_unit_cpu = NR_CPUS;
1383
1384 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1385 const struct pcpu_group_info *gi = &ai->groups[group];
1386
1387 group_offsets[group] = gi->base_offset;
1388 group_sizes[group] = gi->nr_units * ai->unit_size;
1389
1390 for (i = 0; i < gi->nr_units; i++) {
1391 cpu = gi->cpu_map[i];
1392 if (cpu == NR_CPUS)
1393 continue;
1394
1395 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1396 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1397 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1398
1399 unit_map[cpu] = unit + i;
1400 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1401
1402 if (pcpu_first_unit_cpu == NR_CPUS)
1403 pcpu_first_unit_cpu = cpu;
1404 }
1405 }
1406 pcpu_last_unit_cpu = cpu;
1407 pcpu_nr_units = unit;
1408
1409 for_each_possible_cpu(cpu)
1410 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1411
1412 /* we're done parsing the input, undefine BUG macro and dump config */
1413 #undef PCPU_SETUP_BUG_ON
1414 pcpu_dump_alloc_info(KERN_INFO, ai);
1415
1416 pcpu_nr_groups = ai->nr_groups;
1417 pcpu_group_offsets = group_offsets;
1418 pcpu_group_sizes = group_sizes;
1419 pcpu_unit_map = unit_map;
1420 pcpu_unit_offsets = unit_off;
1421
1422 /* determine basic parameters */
1423 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1424 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1425 pcpu_atom_size = ai->atom_size;
1426 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1427 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1428
1429 /*
1430 * Allocate chunk slots. The additional last slot is for
1431 * empty chunks.
1432 */
1433 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1434 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1435 for (i = 0; i < pcpu_nr_slots; i++)
1436 INIT_LIST_HEAD(&pcpu_slot[i]);
1437
1438 /*
1439 * Initialize static chunk. If reserved_size is zero, the
1440 * static chunk covers static area + dynamic allocation area
1441 * in the first chunk. If reserved_size is not zero, it
1442 * covers static area + reserved area (mostly used for module
1443 * static percpu allocation).
1444 */
1445 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1446 INIT_LIST_HEAD(&schunk->list);
1447 schunk->base_addr = base_addr;
1448 schunk->map = smap;
1449 schunk->map_alloc = ARRAY_SIZE(smap);
1450 schunk->immutable = true;
1451 bitmap_fill(schunk->populated, pcpu_unit_pages);
1452
1453 if (ai->reserved_size) {
1454 schunk->free_size = ai->reserved_size;
1455 pcpu_reserved_chunk = schunk;
1456 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1457 } else {
1458 schunk->free_size = dyn_size;
1459 dyn_size = 0; /* dynamic area covered */
1460 }
1461 schunk->contig_hint = schunk->free_size;
1462
1463 schunk->map[schunk->map_used++] = -ai->static_size;
1464 if (schunk->free_size)
1465 schunk->map[schunk->map_used++] = schunk->free_size;
1466
1467 /* init dynamic chunk if necessary */
1468 if (dyn_size) {
1469 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1470 INIT_LIST_HEAD(&dchunk->list);
1471 dchunk->base_addr = base_addr;
1472 dchunk->map = dmap;
1473 dchunk->map_alloc = ARRAY_SIZE(dmap);
1474 dchunk->immutable = true;
1475 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1476
1477 dchunk->contig_hint = dchunk->free_size = dyn_size;
1478 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1479 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1480 }
1481
1482 /* link the first chunk in */
1483 pcpu_first_chunk = dchunk ?: schunk;
1484 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1485
1486 /* we're done */
1487 pcpu_base_addr = base_addr;
1488 return 0;
1489 }
1490
1491 const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1492 [PCPU_FC_AUTO] = "auto",
1493 [PCPU_FC_EMBED] = "embed",
1494 [PCPU_FC_PAGE] = "page",
1495 };
1496
1497 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1498
1499 static int __init percpu_alloc_setup(char *str)
1500 {
1501 if (0)
1502 /* nada */;
1503 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1504 else if (!strcmp(str, "embed"))
1505 pcpu_chosen_fc = PCPU_FC_EMBED;
1506 #endif
1507 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1508 else if (!strcmp(str, "page"))
1509 pcpu_chosen_fc = PCPU_FC_PAGE;
1510 #endif
1511 else
1512 pr_warning("PERCPU: unknown allocator %s specified\n", str);
1513
1514 return 0;
1515 }
1516 early_param("percpu_alloc", percpu_alloc_setup);
1517
1518 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1519 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1520 /**
1521 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1522 * @reserved_size: the size of reserved percpu area in bytes
1523 * @dyn_size: minimum free size for dynamic allocation in bytes
1524 * @atom_size: allocation atom size
1525 * @cpu_distance_fn: callback to determine distance between cpus, optional
1526 * @alloc_fn: function to allocate percpu page
1527 * @free_fn: funtion to free percpu page
1528 *
1529 * This is a helper to ease setting up embedded first percpu chunk and
1530 * can be called where pcpu_setup_first_chunk() is expected.
1531 *
1532 * If this function is used to setup the first chunk, it is allocated
1533 * by calling @alloc_fn and used as-is without being mapped into
1534 * vmalloc area. Allocations are always whole multiples of @atom_size
1535 * aligned to @atom_size.
1536 *
1537 * This enables the first chunk to piggy back on the linear physical
1538 * mapping which often uses larger page size. Please note that this
1539 * can result in very sparse cpu->unit mapping on NUMA machines thus
1540 * requiring large vmalloc address space. Don't use this allocator if
1541 * vmalloc space is not orders of magnitude larger than distances
1542 * between node memory addresses (ie. 32bit NUMA machines).
1543 *
1544 * @dyn_size specifies the minimum dynamic area size.
1545 *
1546 * If the needed size is smaller than the minimum or specified unit
1547 * size, the leftover is returned using @free_fn.
1548 *
1549 * RETURNS:
1550 * 0 on success, -errno on failure.
1551 */
1552 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1553 size_t atom_size,
1554 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1555 pcpu_fc_alloc_fn_t alloc_fn,
1556 pcpu_fc_free_fn_t free_fn)
1557 {
1558 void *base = (void *)ULONG_MAX;
1559 void **areas = NULL;
1560 struct pcpu_alloc_info *ai;
1561 size_t size_sum, areas_size, max_distance;
1562 int group, i, rc;
1563
1564 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1565 cpu_distance_fn);
1566 if (IS_ERR(ai))
1567 return PTR_ERR(ai);
1568
1569 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1570 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1571
1572 areas = alloc_bootmem_nopanic(areas_size);
1573 if (!areas) {
1574 rc = -ENOMEM;
1575 goto out_free;
1576 }
1577
1578 /* allocate, copy and determine base address */
1579 for (group = 0; group < ai->nr_groups; group++) {
1580 struct pcpu_group_info *gi = &ai->groups[group];
1581 unsigned int cpu = NR_CPUS;
1582 void *ptr;
1583
1584 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1585 cpu = gi->cpu_map[i];
1586 BUG_ON(cpu == NR_CPUS);
1587
1588 /* allocate space for the whole group */
1589 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1590 if (!ptr) {
1591 rc = -ENOMEM;
1592 goto out_free_areas;
1593 }
1594 areas[group] = ptr;
1595
1596 base = min(ptr, base);
1597
1598 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1599 if (gi->cpu_map[i] == NR_CPUS) {
1600 /* unused unit, free whole */
1601 free_fn(ptr, ai->unit_size);
1602 continue;
1603 }
1604 /* copy and return the unused part */
1605 memcpy(ptr, __per_cpu_load, ai->static_size);
1606 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1607 }
1608 }
1609
1610 /* base address is now known, determine group base offsets */
1611 max_distance = 0;
1612 for (group = 0; group < ai->nr_groups; group++) {
1613 ai->groups[group].base_offset = areas[group] - base;
1614 max_distance = max_t(size_t, max_distance,
1615 ai->groups[group].base_offset);
1616 }
1617 max_distance += ai->unit_size;
1618
1619 /* warn if maximum distance is further than 75% of vmalloc space */
1620 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1621 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1622 "space 0x%lx\n",
1623 max_distance, VMALLOC_END - VMALLOC_START);
1624 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1625 /* and fail if we have fallback */
1626 rc = -EINVAL;
1627 goto out_free;
1628 #endif
1629 }
1630
1631 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1632 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1633 ai->dyn_size, ai->unit_size);
1634
1635 rc = pcpu_setup_first_chunk(ai, base);
1636 goto out_free;
1637
1638 out_free_areas:
1639 for (group = 0; group < ai->nr_groups; group++)
1640 free_fn(areas[group],
1641 ai->groups[group].nr_units * ai->unit_size);
1642 out_free:
1643 pcpu_free_alloc_info(ai);
1644 if (areas)
1645 free_bootmem(__pa(areas), areas_size);
1646 return rc;
1647 }
1648 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1649 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1650
1651 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1652 /**
1653 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1654 * @reserved_size: the size of reserved percpu area in bytes
1655 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1656 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1657 * @populate_pte_fn: function to populate pte
1658 *
1659 * This is a helper to ease setting up page-remapped first percpu
1660 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1661 *
1662 * This is the basic allocator. Static percpu area is allocated
1663 * page-by-page into vmalloc area.
1664 *
1665 * RETURNS:
1666 * 0 on success, -errno on failure.
1667 */
1668 int __init pcpu_page_first_chunk(size_t reserved_size,
1669 pcpu_fc_alloc_fn_t alloc_fn,
1670 pcpu_fc_free_fn_t free_fn,
1671 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1672 {
1673 static struct vm_struct vm;
1674 struct pcpu_alloc_info *ai;
1675 char psize_str[16];
1676 int unit_pages;
1677 size_t pages_size;
1678 struct page **pages;
1679 int unit, i, j, rc;
1680
1681 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1682
1683 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1684 if (IS_ERR(ai))
1685 return PTR_ERR(ai);
1686 BUG_ON(ai->nr_groups != 1);
1687 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1688
1689 unit_pages = ai->unit_size >> PAGE_SHIFT;
1690
1691 /* unaligned allocations can't be freed, round up to page size */
1692 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1693 sizeof(pages[0]));
1694 pages = alloc_bootmem(pages_size);
1695
1696 /* allocate pages */
1697 j = 0;
1698 for (unit = 0; unit < num_possible_cpus(); unit++)
1699 for (i = 0; i < unit_pages; i++) {
1700 unsigned int cpu = ai->groups[0].cpu_map[unit];
1701 void *ptr;
1702
1703 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1704 if (!ptr) {
1705 pr_warning("PERCPU: failed to allocate %s page "
1706 "for cpu%u\n", psize_str, cpu);
1707 goto enomem;
1708 }
1709 pages[j++] = virt_to_page(ptr);
1710 }
1711
1712 /* allocate vm area, map the pages and copy static data */
1713 vm.flags = VM_ALLOC;
1714 vm.size = num_possible_cpus() * ai->unit_size;
1715 vm_area_register_early(&vm, PAGE_SIZE);
1716
1717 for (unit = 0; unit < num_possible_cpus(); unit++) {
1718 unsigned long unit_addr =
1719 (unsigned long)vm.addr + unit * ai->unit_size;
1720
1721 for (i = 0; i < unit_pages; i++)
1722 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1723
1724 /* pte already populated, the following shouldn't fail */
1725 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1726 unit_pages);
1727 if (rc < 0)
1728 panic("failed to map percpu area, err=%d\n", rc);
1729
1730 /*
1731 * FIXME: Archs with virtual cache should flush local
1732 * cache for the linear mapping here - something
1733 * equivalent to flush_cache_vmap() on the local cpu.
1734 * flush_cache_vmap() can't be used as most supporting
1735 * data structures are not set up yet.
1736 */
1737
1738 /* copy static data */
1739 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1740 }
1741
1742 /* we're ready, commit */
1743 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1744 unit_pages, psize_str, vm.addr, ai->static_size,
1745 ai->reserved_size, ai->dyn_size);
1746
1747 rc = pcpu_setup_first_chunk(ai, vm.addr);
1748 goto out_free_ar;
1749
1750 enomem:
1751 while (--j >= 0)
1752 free_fn(page_address(pages[j]), PAGE_SIZE);
1753 rc = -ENOMEM;
1754 out_free_ar:
1755 free_bootmem(__pa(pages), pages_size);
1756 pcpu_free_alloc_info(ai);
1757 return rc;
1758 }
1759 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1760
1761 /*
1762 * Generic percpu area setup.
1763 *
1764 * The embedding helper is used because its behavior closely resembles
1765 * the original non-dynamic generic percpu area setup. This is
1766 * important because many archs have addressing restrictions and might
1767 * fail if the percpu area is located far away from the previous
1768 * location. As an added bonus, in non-NUMA cases, embedding is
1769 * generally a good idea TLB-wise because percpu area can piggy back
1770 * on the physical linear memory mapping which uses large page
1771 * mappings on applicable archs.
1772 */
1773 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1774 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1775 EXPORT_SYMBOL(__per_cpu_offset);
1776
1777 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1778 size_t align)
1779 {
1780 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1781 }
1782
1783 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1784 {
1785 free_bootmem(__pa(ptr), size);
1786 }
1787
1788 void __init setup_per_cpu_areas(void)
1789 {
1790 unsigned long delta;
1791 unsigned int cpu;
1792 int rc;
1793
1794 /*
1795 * Always reserve area for module percpu variables. That's
1796 * what the legacy allocator did.
1797 */
1798 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1799 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1800 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1801 if (rc < 0)
1802 panic("Failed to initialized percpu areas.");
1803
1804 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1805 for_each_possible_cpu(cpu)
1806 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1807 }
1808 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
This page took 0.103035 seconds and 6 git commands to generate.