Merge tag 'v3.10.68' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / percpu.c
CommitLineData
fbf59bc9 1/*
88999a89 2 * mm/percpu.c - percpu memory allocator
fbf59bc9
TH
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
88999a89
TH
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
2f39e637
TH
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
88999a89 16 * When a chunk is filled up, another chunk is allocated.
fbf59bc9
TH
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
2f39e637
TH
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
fbf59bc9 29 *
2f39e637
TH
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
fbf59bc9
TH
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
4785879e 34 * guaranteed to be equal to or larger than the maximum contiguous
fbf59bc9
TH
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
e1b9aa3f
CL
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
fbf59bc9
TH
45 *
46 * To use this allocator, arch code should do the followings.
47 *
fbf59bc9 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
e0100983
TH
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
fbf59bc9 51 *
8d408b4b
TH
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
fbf59bc9
TH
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
fd1e8a1f 58#include <linux/err.h>
fbf59bc9 59#include <linux/list.h>
a530b795 60#include <linux/log2.h>
fbf59bc9
TH
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
fbf59bc9 66#include <linux/slab.h>
ccea34b5 67#include <linux/spinlock.h>
fbf59bc9 68#include <linux/vmalloc.h>
a56dbddf 69#include <linux/workqueue.h>
f528f0b8 70#include <linux/kmemleak.h>
fbf59bc9
TH
71
72#include <asm/cacheflush.h>
e0100983 73#include <asm/sections.h>
fbf59bc9 74#include <asm/tlbflush.h>
3b034b0d 75#include <asm/io.h>
fbf59bc9 76
fbf59bc9
TH
77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79
bbddff05 80#ifdef CONFIG_SMP
e0100983
TH
81/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82#ifndef __addr_to_pcpu_ptr
83#define __addr_to_pcpu_ptr(addr) \
43cf38eb
TH
84 (void __percpu *)((unsigned long)(addr) - \
85 (unsigned long)pcpu_base_addr + \
86 (unsigned long)__per_cpu_start)
e0100983
TH
87#endif
88#ifndef __pcpu_ptr_to_addr
89#define __pcpu_ptr_to_addr(ptr) \
43cf38eb
TH
90 (void __force *)((unsigned long)(ptr) + \
91 (unsigned long)pcpu_base_addr - \
92 (unsigned long)__per_cpu_start)
e0100983 93#endif
bbddff05
TH
94#else /* CONFIG_SMP */
95/* on UP, it's always identity mapped */
96#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
97#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
98#endif /* CONFIG_SMP */
e0100983 99
fbf59bc9
TH
100struct pcpu_chunk {
101 struct list_head list; /* linked to pcpu_slot lists */
fbf59bc9
TH
102 int free_size; /* free bytes in the chunk */
103 int contig_hint; /* max contiguous size hint */
bba174f5 104 void *base_addr; /* base address of this chunk */
fbf59bc9
TH
105 int map_used; /* # of map entries used */
106 int map_alloc; /* # of map entries allocated */
107 int *map; /* allocation map */
88999a89 108 void *data; /* chunk data */
8d408b4b 109 bool immutable; /* no [de]population allowed */
ce3141a2 110 unsigned long populated[]; /* populated bitmap */
fbf59bc9
TH
111};
112
40150d37
TH
113static int pcpu_unit_pages __read_mostly;
114static int pcpu_unit_size __read_mostly;
2f39e637 115static int pcpu_nr_units __read_mostly;
6563297c 116static int pcpu_atom_size __read_mostly;
40150d37
TH
117static int pcpu_nr_slots __read_mostly;
118static size_t pcpu_chunk_struct_size __read_mostly;
fbf59bc9 119
a855b84c
TH
120/* cpus with the lowest and highest unit addresses */
121static unsigned int pcpu_low_unit_cpu __read_mostly;
122static unsigned int pcpu_high_unit_cpu __read_mostly;
2f39e637 123
fbf59bc9 124/* the address of the first chunk which starts with the kernel static area */
40150d37 125void *pcpu_base_addr __read_mostly;
fbf59bc9
TH
126EXPORT_SYMBOL_GPL(pcpu_base_addr);
127
fb435d52
TH
128static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
129const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
2f39e637 130
6563297c
TH
131/* group information, used for vm allocation */
132static int pcpu_nr_groups __read_mostly;
133static const unsigned long *pcpu_group_offsets __read_mostly;
134static const size_t *pcpu_group_sizes __read_mostly;
135
ae9e6bc9
TH
136/*
137 * The first chunk which always exists. Note that unlike other
138 * chunks, this one can be allocated and mapped in several different
139 * ways and thus often doesn't live in the vmalloc area.
140 */
141static struct pcpu_chunk *pcpu_first_chunk;
142
143/*
144 * Optional reserved chunk. This chunk reserves part of the first
145 * chunk and serves it for reserved allocations. The amount of
146 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
147 * area doesn't exist, the following variables contain NULL and 0
148 * respectively.
149 */
edcb4639 150static struct pcpu_chunk *pcpu_reserved_chunk;
edcb4639
TH
151static int pcpu_reserved_chunk_limit;
152
fbf59bc9 153/*
ccea34b5
TH
154 * Synchronization rules.
155 *
156 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
ce3141a2
TH
157 * protects allocation/reclaim paths, chunks, populated bitmap and
158 * vmalloc mapping. The latter is a spinlock and protects the index
159 * data structures - chunk slots, chunks and area maps in chunks.
ccea34b5
TH
160 *
161 * During allocation, pcpu_alloc_mutex is kept locked all the time and
162 * pcpu_lock is grabbed and released as necessary. All actual memory
403a91b1
JK
163 * allocations are done using GFP_KERNEL with pcpu_lock released. In
164 * general, percpu memory can't be allocated with irq off but
165 * irqsave/restore are still used in alloc path so that it can be used
166 * from early init path - sched_init() specifically.
ccea34b5
TH
167 *
168 * Free path accesses and alters only the index data structures, so it
169 * can be safely called from atomic context. When memory needs to be
170 * returned to the system, free path schedules reclaim_work which
171 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
172 * reclaimed, release both locks and frees the chunks. Note that it's
173 * necessary to grab both locks to remove a chunk from circulation as
174 * allocation path might be referencing the chunk with only
175 * pcpu_alloc_mutex locked.
fbf59bc9 176 */
ccea34b5
TH
177static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
178static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
fbf59bc9 179
40150d37 180static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
fbf59bc9 181
a56dbddf
TH
182/* reclaim work to release fully free chunks, scheduled from free path */
183static void pcpu_reclaim(struct work_struct *work);
184static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
185
020ec653
TH
186static bool pcpu_addr_in_first_chunk(void *addr)
187{
188 void *first_start = pcpu_first_chunk->base_addr;
189
190 return addr >= first_start && addr < first_start + pcpu_unit_size;
191}
192
193static bool pcpu_addr_in_reserved_chunk(void *addr)
194{
195 void *first_start = pcpu_first_chunk->base_addr;
196
197 return addr >= first_start &&
198 addr < first_start + pcpu_reserved_chunk_limit;
199}
200
d9b55eeb 201static int __pcpu_size_to_slot(int size)
fbf59bc9 202{
cae3aeb8 203 int highbit = fls(size); /* size is in bytes */
fbf59bc9
TH
204 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
205}
206
d9b55eeb
TH
207static int pcpu_size_to_slot(int size)
208{
209 if (size == pcpu_unit_size)
210 return pcpu_nr_slots - 1;
211 return __pcpu_size_to_slot(size);
212}
213
fbf59bc9
TH
214static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
215{
216 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
217 return 0;
218
219 return pcpu_size_to_slot(chunk->free_size);
220}
221
88999a89
TH
222/* set the pointer to a chunk in a page struct */
223static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
224{
225 page->index = (unsigned long)pcpu;
226}
227
228/* obtain pointer to a chunk from a page struct */
229static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
230{
231 return (struct pcpu_chunk *)page->index;
232}
233
234static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
fbf59bc9 235{
2f39e637 236 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
fbf59bc9
TH
237}
238
9983b6f0
TH
239static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
240 unsigned int cpu, int page_idx)
fbf59bc9 241{
bba174f5 242 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
fb435d52 243 (page_idx << PAGE_SHIFT);
fbf59bc9
TH
244}
245
88999a89
TH
246static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
247 int *rs, int *re, int end)
ce3141a2
TH
248{
249 *rs = find_next_zero_bit(chunk->populated, end, *rs);
250 *re = find_next_bit(chunk->populated, end, *rs + 1);
251}
252
88999a89
TH
253static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
254 int *rs, int *re, int end)
ce3141a2
TH
255{
256 *rs = find_next_bit(chunk->populated, end, *rs);
257 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
258}
259
260/*
261 * (Un)populated page region iterators. Iterate over (un)populated
b595076a 262 * page regions between @start and @end in @chunk. @rs and @re should
ce3141a2
TH
263 * be integer variables and will be set to start and end page index of
264 * the current region.
265 */
266#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
267 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
268 (rs) < (re); \
269 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
270
271#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
272 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
273 (rs) < (re); \
274 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
275
fbf59bc9 276/**
90459ce0 277 * pcpu_mem_zalloc - allocate memory
1880d93b 278 * @size: bytes to allocate
fbf59bc9 279 *
1880d93b 280 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
90459ce0 281 * kzalloc() is used; otherwise, vzalloc() is used. The returned
1880d93b 282 * memory is always zeroed.
fbf59bc9 283 *
ccea34b5
TH
284 * CONTEXT:
285 * Does GFP_KERNEL allocation.
286 *
fbf59bc9 287 * RETURNS:
1880d93b 288 * Pointer to the allocated area on success, NULL on failure.
fbf59bc9 289 */
90459ce0 290static void *pcpu_mem_zalloc(size_t size)
fbf59bc9 291{
099a19d9
TH
292 if (WARN_ON_ONCE(!slab_is_available()))
293 return NULL;
294
1880d93b
TH
295 if (size <= PAGE_SIZE)
296 return kzalloc(size, GFP_KERNEL);
7af4c093
JJ
297 else
298 return vzalloc(size);
1880d93b 299}
fbf59bc9 300
1880d93b
TH
301/**
302 * pcpu_mem_free - free memory
303 * @ptr: memory to free
304 * @size: size of the area
305 *
90459ce0 306 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
1880d93b
TH
307 */
308static void pcpu_mem_free(void *ptr, size_t size)
309{
fbf59bc9 310 if (size <= PAGE_SIZE)
1880d93b 311 kfree(ptr);
fbf59bc9 312 else
1880d93b 313 vfree(ptr);
fbf59bc9
TH
314}
315
316/**
317 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
318 * @chunk: chunk of interest
319 * @oslot: the previous slot it was on
320 *
321 * This function is called after an allocation or free changed @chunk.
322 * New slot according to the changed state is determined and @chunk is
edcb4639
TH
323 * moved to the slot. Note that the reserved chunk is never put on
324 * chunk slots.
ccea34b5
TH
325 *
326 * CONTEXT:
327 * pcpu_lock.
fbf59bc9
TH
328 */
329static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
330{
331 int nslot = pcpu_chunk_slot(chunk);
332
edcb4639 333 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
fbf59bc9
TH
334 if (oslot < nslot)
335 list_move(&chunk->list, &pcpu_slot[nslot]);
336 else
337 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
338 }
339}
340
9f7dcf22 341/**
833af842
TH
342 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
343 * @chunk: chunk of interest
9f7dcf22 344 *
833af842 345 * Determine whether area map of @chunk needs to be extended to
25985edc 346 * accommodate a new allocation.
9f7dcf22 347 *
ccea34b5 348 * CONTEXT:
833af842 349 * pcpu_lock.
ccea34b5 350 *
9f7dcf22 351 * RETURNS:
833af842
TH
352 * New target map allocation length if extension is necessary, 0
353 * otherwise.
9f7dcf22 354 */
833af842 355static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
9f7dcf22
TH
356{
357 int new_alloc;
9f7dcf22 358
9f7dcf22
TH
359 if (chunk->map_alloc >= chunk->map_used + 2)
360 return 0;
361
362 new_alloc = PCPU_DFL_MAP_ALLOC;
363 while (new_alloc < chunk->map_used + 2)
364 new_alloc *= 2;
365
833af842
TH
366 return new_alloc;
367}
368
369/**
370 * pcpu_extend_area_map - extend area map of a chunk
371 * @chunk: chunk of interest
372 * @new_alloc: new target allocation length of the area map
373 *
374 * Extend area map of @chunk to have @new_alloc entries.
375 *
376 * CONTEXT:
377 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
378 *
379 * RETURNS:
380 * 0 on success, -errno on failure.
381 */
382static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
383{
384 int *old = NULL, *new = NULL;
385 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
386 unsigned long flags;
387
90459ce0 388 new = pcpu_mem_zalloc(new_size);
833af842 389 if (!new)
9f7dcf22 390 return -ENOMEM;
ccea34b5 391
833af842
TH
392 /* acquire pcpu_lock and switch to new area map */
393 spin_lock_irqsave(&pcpu_lock, flags);
394
395 if (new_alloc <= chunk->map_alloc)
396 goto out_unlock;
9f7dcf22 397
833af842 398 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
a002d148
HS
399 old = chunk->map;
400
401 memcpy(new, old, old_size);
9f7dcf22 402
9f7dcf22
TH
403 chunk->map_alloc = new_alloc;
404 chunk->map = new;
833af842
TH
405 new = NULL;
406
407out_unlock:
408 spin_unlock_irqrestore(&pcpu_lock, flags);
409
410 /*
411 * pcpu_mem_free() might end up calling vfree() which uses
412 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
413 */
414 pcpu_mem_free(old, old_size);
415 pcpu_mem_free(new, new_size);
416
9f7dcf22
TH
417 return 0;
418}
419
fbf59bc9
TH
420/**
421 * pcpu_split_block - split a map block
422 * @chunk: chunk of interest
423 * @i: index of map block to split
cae3aeb8
TH
424 * @head: head size in bytes (can be 0)
425 * @tail: tail size in bytes (can be 0)
fbf59bc9
TH
426 *
427 * Split the @i'th map block into two or three blocks. If @head is
428 * non-zero, @head bytes block is inserted before block @i moving it
429 * to @i+1 and reducing its size by @head bytes.
430 *
431 * If @tail is non-zero, the target block, which can be @i or @i+1
432 * depending on @head, is reduced by @tail bytes and @tail byte block
433 * is inserted after the target block.
434 *
25985edc 435 * @chunk->map must have enough free slots to accommodate the split.
ccea34b5
TH
436 *
437 * CONTEXT:
438 * pcpu_lock.
fbf59bc9 439 */
9f7dcf22
TH
440static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
441 int head, int tail)
fbf59bc9
TH
442{
443 int nr_extra = !!head + !!tail;
1880d93b 444
9f7dcf22 445 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
fbf59bc9 446
9f7dcf22 447 /* insert new subblocks */
fbf59bc9
TH
448 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
449 sizeof(chunk->map[0]) * (chunk->map_used - i));
450 chunk->map_used += nr_extra;
451
452 if (head) {
453 chunk->map[i + 1] = chunk->map[i] - head;
454 chunk->map[i++] = head;
455 }
456 if (tail) {
457 chunk->map[i++] -= tail;
458 chunk->map[i] = tail;
459 }
fbf59bc9
TH
460}
461
462/**
463 * pcpu_alloc_area - allocate area from a pcpu_chunk
464 * @chunk: chunk of interest
cae3aeb8 465 * @size: wanted size in bytes
fbf59bc9
TH
466 * @align: wanted align
467 *
468 * Try to allocate @size bytes area aligned at @align from @chunk.
469 * Note that this function only allocates the offset. It doesn't
470 * populate or map the area.
471 *
9f7dcf22
TH
472 * @chunk->map must have at least two free slots.
473 *
ccea34b5
TH
474 * CONTEXT:
475 * pcpu_lock.
476 *
fbf59bc9 477 * RETURNS:
9f7dcf22
TH
478 * Allocated offset in @chunk on success, -1 if no matching area is
479 * found.
fbf59bc9
TH
480 */
481static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
482{
483 int oslot = pcpu_chunk_slot(chunk);
484 int max_contig = 0;
485 int i, off;
486
fbf59bc9
TH
487 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
488 bool is_last = i + 1 == chunk->map_used;
489 int head, tail;
490
491 /* extra for alignment requirement */
492 head = ALIGN(off, align) - off;
493 BUG_ON(i == 0 && head != 0);
494
495 if (chunk->map[i] < 0)
496 continue;
497 if (chunk->map[i] < head + size) {
498 max_contig = max(chunk->map[i], max_contig);
499 continue;
500 }
501
502 /*
503 * If head is small or the previous block is free,
504 * merge'em. Note that 'small' is defined as smaller
505 * than sizeof(int), which is very small but isn't too
506 * uncommon for percpu allocations.
507 */
508 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
509 if (chunk->map[i - 1] > 0)
510 chunk->map[i - 1] += head;
511 else {
512 chunk->map[i - 1] -= head;
513 chunk->free_size -= head;
514 }
515 chunk->map[i] -= head;
516 off += head;
517 head = 0;
518 }
519
520 /* if tail is small, just keep it around */
521 tail = chunk->map[i] - head - size;
522 if (tail < sizeof(int))
523 tail = 0;
524
525 /* split if warranted */
526 if (head || tail) {
9f7dcf22 527 pcpu_split_block(chunk, i, head, tail);
fbf59bc9
TH
528 if (head) {
529 i++;
530 off += head;
531 max_contig = max(chunk->map[i - 1], max_contig);
532 }
533 if (tail)
534 max_contig = max(chunk->map[i + 1], max_contig);
535 }
536
537 /* update hint and mark allocated */
538 if (is_last)
539 chunk->contig_hint = max_contig; /* fully scanned */
540 else
541 chunk->contig_hint = max(chunk->contig_hint,
542 max_contig);
543
544 chunk->free_size -= chunk->map[i];
545 chunk->map[i] = -chunk->map[i];
546
547 pcpu_chunk_relocate(chunk, oslot);
548 return off;
549 }
550
551 chunk->contig_hint = max_contig; /* fully scanned */
552 pcpu_chunk_relocate(chunk, oslot);
553
9f7dcf22
TH
554 /* tell the upper layer that this chunk has no matching area */
555 return -1;
fbf59bc9
TH
556}
557
558/**
559 * pcpu_free_area - free area to a pcpu_chunk
560 * @chunk: chunk of interest
561 * @freeme: offset of area to free
562 *
563 * Free area starting from @freeme to @chunk. Note that this function
564 * only modifies the allocation map. It doesn't depopulate or unmap
565 * the area.
ccea34b5
TH
566 *
567 * CONTEXT:
568 * pcpu_lock.
fbf59bc9
TH
569 */
570static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
571{
572 int oslot = pcpu_chunk_slot(chunk);
573 int i, off;
574
575 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
576 if (off == freeme)
577 break;
578 BUG_ON(off != freeme);
579 BUG_ON(chunk->map[i] > 0);
580
581 chunk->map[i] = -chunk->map[i];
582 chunk->free_size += chunk->map[i];
583
584 /* merge with previous? */
585 if (i > 0 && chunk->map[i - 1] >= 0) {
586 chunk->map[i - 1] += chunk->map[i];
587 chunk->map_used--;
588 memmove(&chunk->map[i], &chunk->map[i + 1],
589 (chunk->map_used - i) * sizeof(chunk->map[0]));
590 i--;
591 }
592 /* merge with next? */
593 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
594 chunk->map[i] += chunk->map[i + 1];
595 chunk->map_used--;
596 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
597 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
598 }
599
600 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
601 pcpu_chunk_relocate(chunk, oslot);
602}
603
6081089f
TH
604static struct pcpu_chunk *pcpu_alloc_chunk(void)
605{
606 struct pcpu_chunk *chunk;
607
90459ce0 608 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
6081089f
TH
609 if (!chunk)
610 return NULL;
611
90459ce0
BL
612 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
613 sizeof(chunk->map[0]));
6081089f 614 if (!chunk->map) {
46c03261 615 pcpu_mem_free(chunk, pcpu_chunk_struct_size);
6081089f
TH
616 return NULL;
617 }
618
619 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
620 chunk->map[chunk->map_used++] = pcpu_unit_size;
621
622 INIT_LIST_HEAD(&chunk->list);
623 chunk->free_size = pcpu_unit_size;
624 chunk->contig_hint = pcpu_unit_size;
625
626 return chunk;
627}
628
629static void pcpu_free_chunk(struct pcpu_chunk *chunk)
630{
631 if (!chunk)
632 return;
633 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
b4916cb1 634 pcpu_mem_free(chunk, pcpu_chunk_struct_size);
6081089f
TH
635}
636
9f645532
TH
637/*
638 * Chunk management implementation.
639 *
640 * To allow different implementations, chunk alloc/free and
641 * [de]population are implemented in a separate file which is pulled
642 * into this file and compiled together. The following functions
643 * should be implemented.
644 *
645 * pcpu_populate_chunk - populate the specified range of a chunk
646 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
647 * pcpu_create_chunk - create a new chunk
648 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
649 * pcpu_addr_to_page - translate address to physical address
650 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
fbf59bc9 651 */
9f645532
TH
652static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
653static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
654static struct pcpu_chunk *pcpu_create_chunk(void);
655static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
656static struct page *pcpu_addr_to_page(void *addr);
657static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
fbf59bc9 658
b0c9778b
TH
659#ifdef CONFIG_NEED_PER_CPU_KM
660#include "percpu-km.c"
661#else
9f645532 662#include "percpu-vm.c"
b0c9778b 663#endif
fbf59bc9 664
88999a89
TH
665/**
666 * pcpu_chunk_addr_search - determine chunk containing specified address
667 * @addr: address for which the chunk needs to be determined.
668 *
669 * RETURNS:
670 * The address of the found chunk.
671 */
672static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
673{
674 /* is it in the first chunk? */
675 if (pcpu_addr_in_first_chunk(addr)) {
676 /* is it in the reserved area? */
677 if (pcpu_addr_in_reserved_chunk(addr))
678 return pcpu_reserved_chunk;
679 return pcpu_first_chunk;
680 }
681
682 /*
683 * The address is relative to unit0 which might be unused and
684 * thus unmapped. Offset the address to the unit space of the
685 * current processor before looking it up in the vmalloc
686 * space. Note that any possible cpu id can be used here, so
687 * there's no need to worry about preemption or cpu hotplug.
688 */
689 addr += pcpu_unit_offsets[raw_smp_processor_id()];
9f645532 690 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
88999a89
TH
691}
692
fbf59bc9 693/**
edcb4639 694 * pcpu_alloc - the percpu allocator
cae3aeb8 695 * @size: size of area to allocate in bytes
fbf59bc9 696 * @align: alignment of area (max PAGE_SIZE)
edcb4639 697 * @reserved: allocate from the reserved chunk if available
fbf59bc9 698 *
ccea34b5
TH
699 * Allocate percpu area of @size bytes aligned at @align.
700 *
701 * CONTEXT:
702 * Does GFP_KERNEL allocation.
fbf59bc9
TH
703 *
704 * RETURNS:
705 * Percpu pointer to the allocated area on success, NULL on failure.
706 */
43cf38eb 707static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
fbf59bc9 708{
f2badb0c 709 static int warn_limit = 10;
fbf59bc9 710 struct pcpu_chunk *chunk;
f2badb0c 711 const char *err;
833af842 712 int slot, off, new_alloc;
403a91b1 713 unsigned long flags;
f528f0b8 714 void __percpu *ptr;
fbf59bc9 715
8d408b4b 716 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
fbf59bc9
TH
717 WARN(true, "illegal size (%zu) or align (%zu) for "
718 "percpu allocation\n", size, align);
719 return NULL;
720 }
721
ccea34b5 722 mutex_lock(&pcpu_alloc_mutex);
403a91b1 723 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 724
edcb4639
TH
725 /* serve reserved allocations from the reserved chunk if available */
726 if (reserved && pcpu_reserved_chunk) {
727 chunk = pcpu_reserved_chunk;
833af842
TH
728
729 if (size > chunk->contig_hint) {
730 err = "alloc from reserved chunk failed";
ccea34b5 731 goto fail_unlock;
f2badb0c 732 }
833af842
TH
733
734 while ((new_alloc = pcpu_need_to_extend(chunk))) {
735 spin_unlock_irqrestore(&pcpu_lock, flags);
736 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
737 err = "failed to extend area map of reserved chunk";
738 goto fail_unlock_mutex;
739 }
740 spin_lock_irqsave(&pcpu_lock, flags);
741 }
742
edcb4639
TH
743 off = pcpu_alloc_area(chunk, size, align);
744 if (off >= 0)
745 goto area_found;
833af842 746
f2badb0c 747 err = "alloc from reserved chunk failed";
ccea34b5 748 goto fail_unlock;
edcb4639
TH
749 }
750
ccea34b5 751restart:
edcb4639 752 /* search through normal chunks */
fbf59bc9
TH
753 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
754 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
755 if (size > chunk->contig_hint)
756 continue;
ccea34b5 757
833af842
TH
758 new_alloc = pcpu_need_to_extend(chunk);
759 if (new_alloc) {
760 spin_unlock_irqrestore(&pcpu_lock, flags);
761 if (pcpu_extend_area_map(chunk,
762 new_alloc) < 0) {
763 err = "failed to extend area map";
764 goto fail_unlock_mutex;
765 }
766 spin_lock_irqsave(&pcpu_lock, flags);
767 /*
768 * pcpu_lock has been dropped, need to
769 * restart cpu_slot list walking.
770 */
771 goto restart;
ccea34b5
TH
772 }
773
fbf59bc9
TH
774 off = pcpu_alloc_area(chunk, size, align);
775 if (off >= 0)
776 goto area_found;
fbf59bc9
TH
777 }
778 }
779
780 /* hmmm... no space left, create a new chunk */
403a91b1 781 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 782
6081089f 783 chunk = pcpu_create_chunk();
f2badb0c
TH
784 if (!chunk) {
785 err = "failed to allocate new chunk";
ccea34b5 786 goto fail_unlock_mutex;
f2badb0c 787 }
ccea34b5 788
403a91b1 789 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 790 pcpu_chunk_relocate(chunk, -1);
ccea34b5 791 goto restart;
fbf59bc9
TH
792
793area_found:
403a91b1 794 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 795
fbf59bc9
TH
796 /* populate, map and clear the area */
797 if (pcpu_populate_chunk(chunk, off, size)) {
403a91b1 798 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 799 pcpu_free_area(chunk, off);
f2badb0c 800 err = "failed to populate";
ccea34b5 801 goto fail_unlock;
fbf59bc9
TH
802 }
803
ccea34b5
TH
804 mutex_unlock(&pcpu_alloc_mutex);
805
bba174f5 806 /* return address relative to base address */
f528f0b8
CM
807 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
808 kmemleak_alloc_percpu(ptr, size);
809 return ptr;
ccea34b5
TH
810
811fail_unlock:
403a91b1 812 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5
TH
813fail_unlock_mutex:
814 mutex_unlock(&pcpu_alloc_mutex);
f2badb0c
TH
815 if (warn_limit) {
816 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
817 "%s\n", size, align, err);
818 dump_stack();
819 if (!--warn_limit)
820 pr_info("PERCPU: limit reached, disable warning\n");
821 }
ccea34b5 822 return NULL;
fbf59bc9 823}
edcb4639
TH
824
825/**
826 * __alloc_percpu - allocate dynamic percpu area
827 * @size: size of area to allocate in bytes
828 * @align: alignment of area (max PAGE_SIZE)
829 *
9329ba97
TH
830 * Allocate zero-filled percpu area of @size bytes aligned at @align.
831 * Might sleep. Might trigger writeouts.
edcb4639 832 *
ccea34b5
TH
833 * CONTEXT:
834 * Does GFP_KERNEL allocation.
835 *
edcb4639
TH
836 * RETURNS:
837 * Percpu pointer to the allocated area on success, NULL on failure.
838 */
43cf38eb 839void __percpu *__alloc_percpu(size_t size, size_t align)
edcb4639
TH
840{
841 return pcpu_alloc(size, align, false);
842}
fbf59bc9
TH
843EXPORT_SYMBOL_GPL(__alloc_percpu);
844
edcb4639
TH
845/**
846 * __alloc_reserved_percpu - allocate reserved percpu area
847 * @size: size of area to allocate in bytes
848 * @align: alignment of area (max PAGE_SIZE)
849 *
9329ba97
TH
850 * Allocate zero-filled percpu area of @size bytes aligned at @align
851 * from reserved percpu area if arch has set it up; otherwise,
852 * allocation is served from the same dynamic area. Might sleep.
853 * Might trigger writeouts.
edcb4639 854 *
ccea34b5
TH
855 * CONTEXT:
856 * Does GFP_KERNEL allocation.
857 *
edcb4639
TH
858 * RETURNS:
859 * Percpu pointer to the allocated area on success, NULL on failure.
860 */
43cf38eb 861void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
edcb4639
TH
862{
863 return pcpu_alloc(size, align, true);
864}
865
a56dbddf
TH
866/**
867 * pcpu_reclaim - reclaim fully free chunks, workqueue function
868 * @work: unused
869 *
870 * Reclaim all fully free chunks except for the first one.
ccea34b5
TH
871 *
872 * CONTEXT:
873 * workqueue context.
a56dbddf
TH
874 */
875static void pcpu_reclaim(struct work_struct *work)
fbf59bc9 876{
a56dbddf
TH
877 LIST_HEAD(todo);
878 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
879 struct pcpu_chunk *chunk, *next;
880
ccea34b5
TH
881 mutex_lock(&pcpu_alloc_mutex);
882 spin_lock_irq(&pcpu_lock);
a56dbddf
TH
883
884 list_for_each_entry_safe(chunk, next, head, list) {
885 WARN_ON(chunk->immutable);
886
887 /* spare the first one */
888 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
889 continue;
890
a56dbddf
TH
891 list_move(&chunk->list, &todo);
892 }
893
ccea34b5 894 spin_unlock_irq(&pcpu_lock);
a56dbddf
TH
895
896 list_for_each_entry_safe(chunk, next, &todo, list) {
ce3141a2 897 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
6081089f 898 pcpu_destroy_chunk(chunk);
a56dbddf 899 }
971f3918
TH
900
901 mutex_unlock(&pcpu_alloc_mutex);
fbf59bc9
TH
902}
903
904/**
905 * free_percpu - free percpu area
906 * @ptr: pointer to area to free
907 *
ccea34b5
TH
908 * Free percpu area @ptr.
909 *
910 * CONTEXT:
911 * Can be called from atomic context.
fbf59bc9 912 */
43cf38eb 913void free_percpu(void __percpu *ptr)
fbf59bc9 914{
129182e5 915 void *addr;
fbf59bc9 916 struct pcpu_chunk *chunk;
ccea34b5 917 unsigned long flags;
fbf59bc9
TH
918 int off;
919
920 if (!ptr)
921 return;
922
f528f0b8
CM
923 kmemleak_free_percpu(ptr);
924
129182e5
AM
925 addr = __pcpu_ptr_to_addr(ptr);
926
ccea34b5 927 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9
TH
928
929 chunk = pcpu_chunk_addr_search(addr);
bba174f5 930 off = addr - chunk->base_addr;
fbf59bc9
TH
931
932 pcpu_free_area(chunk, off);
933
a56dbddf 934 /* if there are more than one fully free chunks, wake up grim reaper */
fbf59bc9
TH
935 if (chunk->free_size == pcpu_unit_size) {
936 struct pcpu_chunk *pos;
937
a56dbddf 938 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
fbf59bc9 939 if (pos != chunk) {
a56dbddf 940 schedule_work(&pcpu_reclaim_work);
fbf59bc9
TH
941 break;
942 }
943 }
944
ccea34b5 945 spin_unlock_irqrestore(&pcpu_lock, flags);
fbf59bc9
TH
946}
947EXPORT_SYMBOL_GPL(free_percpu);
948
10fad5e4
TH
949/**
950 * is_kernel_percpu_address - test whether address is from static percpu area
951 * @addr: address to test
952 *
953 * Test whether @addr belongs to in-kernel static percpu area. Module
954 * static percpu areas are not considered. For those, use
955 * is_module_percpu_address().
956 *
957 * RETURNS:
958 * %true if @addr is from in-kernel static percpu area, %false otherwise.
959 */
960bool is_kernel_percpu_address(unsigned long addr)
961{
bbddff05 962#ifdef CONFIG_SMP
10fad5e4
TH
963 const size_t static_size = __per_cpu_end - __per_cpu_start;
964 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
965 unsigned int cpu;
966
967 for_each_possible_cpu(cpu) {
968 void *start = per_cpu_ptr(base, cpu);
969
970 if ((void *)addr >= start && (void *)addr < start + static_size)
971 return true;
972 }
bbddff05
TH
973#endif
974 /* on UP, can't distinguish from other static vars, always false */
10fad5e4
TH
975 return false;
976}
977
3b034b0d
VG
978/**
979 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
980 * @addr: the address to be converted to physical address
981 *
982 * Given @addr which is dereferenceable address obtained via one of
983 * percpu access macros, this function translates it into its physical
984 * address. The caller is responsible for ensuring @addr stays valid
985 * until this function finishes.
986 *
67589c71
DY
987 * percpu allocator has special setup for the first chunk, which currently
988 * supports either embedding in linear address space or vmalloc mapping,
989 * and, from the second one, the backing allocator (currently either vm or
990 * km) provides translation.
991 *
992 * The addr can be tranlated simply without checking if it falls into the
993 * first chunk. But the current code reflects better how percpu allocator
994 * actually works, and the verification can discover both bugs in percpu
995 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
996 * code.
997 *
3b034b0d
VG
998 * RETURNS:
999 * The physical address for @addr.
1000 */
1001phys_addr_t per_cpu_ptr_to_phys(void *addr)
1002{
9983b6f0
TH
1003 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1004 bool in_first_chunk = false;
a855b84c 1005 unsigned long first_low, first_high;
9983b6f0
TH
1006 unsigned int cpu;
1007
1008 /*
a855b84c 1009 * The following test on unit_low/high isn't strictly
9983b6f0
TH
1010 * necessary but will speed up lookups of addresses which
1011 * aren't in the first chunk.
1012 */
a855b84c
TH
1013 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1014 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1015 pcpu_unit_pages);
1016 if ((unsigned long)addr >= first_low &&
1017 (unsigned long)addr < first_high) {
9983b6f0
TH
1018 for_each_possible_cpu(cpu) {
1019 void *start = per_cpu_ptr(base, cpu);
1020
1021 if (addr >= start && addr < start + pcpu_unit_size) {
1022 in_first_chunk = true;
1023 break;
1024 }
1025 }
1026 }
1027
1028 if (in_first_chunk) {
eac522ef 1029 if (!is_vmalloc_addr(addr))
020ec653
TH
1030 return __pa(addr);
1031 else
9f57bd4d
ES
1032 return page_to_phys(vmalloc_to_page(addr)) +
1033 offset_in_page(addr);
020ec653 1034 } else
9f57bd4d
ES
1035 return page_to_phys(pcpu_addr_to_page(addr)) +
1036 offset_in_page(addr);
3b034b0d
VG
1037}
1038
fbf59bc9 1039/**
fd1e8a1f
TH
1040 * pcpu_alloc_alloc_info - allocate percpu allocation info
1041 * @nr_groups: the number of groups
1042 * @nr_units: the number of units
1043 *
1044 * Allocate ai which is large enough for @nr_groups groups containing
1045 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1046 * cpu_map array which is long enough for @nr_units and filled with
1047 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1048 * pointer of other groups.
1049 *
1050 * RETURNS:
1051 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1052 * failure.
1053 */
1054struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1055 int nr_units)
1056{
1057 struct pcpu_alloc_info *ai;
1058 size_t base_size, ai_size;
1059 void *ptr;
1060 int unit;
1061
1062 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1063 __alignof__(ai->groups[0].cpu_map[0]));
1064 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1065
1066 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1067 if (!ptr)
1068 return NULL;
1069 ai = ptr;
1070 ptr += base_size;
1071
1072 ai->groups[0].cpu_map = ptr;
1073
1074 for (unit = 0; unit < nr_units; unit++)
1075 ai->groups[0].cpu_map[unit] = NR_CPUS;
1076
1077 ai->nr_groups = nr_groups;
1078 ai->__ai_size = PFN_ALIGN(ai_size);
1079
1080 return ai;
1081}
1082
1083/**
1084 * pcpu_free_alloc_info - free percpu allocation info
1085 * @ai: pcpu_alloc_info to free
1086 *
1087 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1088 */
1089void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1090{
1091 free_bootmem(__pa(ai), ai->__ai_size);
1092}
1093
fd1e8a1f
TH
1094/**
1095 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1096 * @lvl: loglevel
1097 * @ai: allocation info to dump
1098 *
1099 * Print out information about @ai using loglevel @lvl.
1100 */
1101static void pcpu_dump_alloc_info(const char *lvl,
1102 const struct pcpu_alloc_info *ai)
033e48fb 1103{
fd1e8a1f 1104 int group_width = 1, cpu_width = 1, width;
033e48fb 1105 char empty_str[] = "--------";
fd1e8a1f
TH
1106 int alloc = 0, alloc_end = 0;
1107 int group, v;
1108 int upa, apl; /* units per alloc, allocs per line */
1109
1110 v = ai->nr_groups;
1111 while (v /= 10)
1112 group_width++;
033e48fb 1113
fd1e8a1f 1114 v = num_possible_cpus();
033e48fb 1115 while (v /= 10)
fd1e8a1f
TH
1116 cpu_width++;
1117 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
033e48fb 1118
fd1e8a1f
TH
1119 upa = ai->alloc_size / ai->unit_size;
1120 width = upa * (cpu_width + 1) + group_width + 3;
1121 apl = rounddown_pow_of_two(max(60 / width, 1));
033e48fb 1122
fd1e8a1f
TH
1123 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1124 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1125 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
033e48fb 1126
fd1e8a1f
TH
1127 for (group = 0; group < ai->nr_groups; group++) {
1128 const struct pcpu_group_info *gi = &ai->groups[group];
1129 int unit = 0, unit_end = 0;
1130
1131 BUG_ON(gi->nr_units % upa);
1132 for (alloc_end += gi->nr_units / upa;
1133 alloc < alloc_end; alloc++) {
1134 if (!(alloc % apl)) {
cb129820 1135 printk(KERN_CONT "\n");
fd1e8a1f
TH
1136 printk("%spcpu-alloc: ", lvl);
1137 }
cb129820 1138 printk(KERN_CONT "[%0*d] ", group_width, group);
fd1e8a1f
TH
1139
1140 for (unit_end += upa; unit < unit_end; unit++)
1141 if (gi->cpu_map[unit] != NR_CPUS)
cb129820 1142 printk(KERN_CONT "%0*d ", cpu_width,
fd1e8a1f
TH
1143 gi->cpu_map[unit]);
1144 else
cb129820 1145 printk(KERN_CONT "%s ", empty_str);
033e48fb 1146 }
033e48fb 1147 }
cb129820 1148 printk(KERN_CONT "\n");
033e48fb 1149}
033e48fb 1150
fbf59bc9 1151/**
8d408b4b 1152 * pcpu_setup_first_chunk - initialize the first percpu chunk
fd1e8a1f 1153 * @ai: pcpu_alloc_info describing how to percpu area is shaped
38a6be52 1154 * @base_addr: mapped address
8d408b4b
TH
1155 *
1156 * Initialize the first percpu chunk which contains the kernel static
1157 * perpcu area. This function is to be called from arch percpu area
38a6be52 1158 * setup path.
8d408b4b 1159 *
fd1e8a1f
TH
1160 * @ai contains all information necessary to initialize the first
1161 * chunk and prime the dynamic percpu allocator.
1162 *
1163 * @ai->static_size is the size of static percpu area.
1164 *
1165 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
edcb4639
TH
1166 * reserve after the static area in the first chunk. This reserves
1167 * the first chunk such that it's available only through reserved
1168 * percpu allocation. This is primarily used to serve module percpu
1169 * static areas on architectures where the addressing model has
1170 * limited offset range for symbol relocations to guarantee module
1171 * percpu symbols fall inside the relocatable range.
1172 *
fd1e8a1f
TH
1173 * @ai->dyn_size determines the number of bytes available for dynamic
1174 * allocation in the first chunk. The area between @ai->static_size +
1175 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
6074d5b0 1176 *
fd1e8a1f
TH
1177 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1178 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1179 * @ai->dyn_size.
8d408b4b 1180 *
fd1e8a1f
TH
1181 * @ai->atom_size is the allocation atom size and used as alignment
1182 * for vm areas.
8d408b4b 1183 *
fd1e8a1f
TH
1184 * @ai->alloc_size is the allocation size and always multiple of
1185 * @ai->atom_size. This is larger than @ai->atom_size if
1186 * @ai->unit_size is larger than @ai->atom_size.
1187 *
1188 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1189 * percpu areas. Units which should be colocated are put into the
1190 * same group. Dynamic VM areas will be allocated according to these
1191 * groupings. If @ai->nr_groups is zero, a single group containing
1192 * all units is assumed.
8d408b4b 1193 *
38a6be52
TH
1194 * The caller should have mapped the first chunk at @base_addr and
1195 * copied static data to each unit.
fbf59bc9 1196 *
edcb4639
TH
1197 * If the first chunk ends up with both reserved and dynamic areas, it
1198 * is served by two chunks - one to serve the core static and reserved
1199 * areas and the other for the dynamic area. They share the same vm
1200 * and page map but uses different area allocation map to stay away
1201 * from each other. The latter chunk is circulated in the chunk slots
1202 * and available for dynamic allocation like any other chunks.
1203 *
fbf59bc9 1204 * RETURNS:
fb435d52 1205 * 0 on success, -errno on failure.
fbf59bc9 1206 */
fb435d52
TH
1207int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1208 void *base_addr)
fbf59bc9 1209{
635b75fc 1210 static char cpus_buf[4096] __initdata;
099a19d9
TH
1211 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1212 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
fd1e8a1f
TH
1213 size_t dyn_size = ai->dyn_size;
1214 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
edcb4639 1215 struct pcpu_chunk *schunk, *dchunk = NULL;
6563297c
TH
1216 unsigned long *group_offsets;
1217 size_t *group_sizes;
fb435d52 1218 unsigned long *unit_off;
fbf59bc9 1219 unsigned int cpu;
fd1e8a1f
TH
1220 int *unit_map;
1221 int group, unit, i;
fbf59bc9 1222
635b75fc
TH
1223 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1224
1225#define PCPU_SETUP_BUG_ON(cond) do { \
1226 if (unlikely(cond)) { \
1227 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1228 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1229 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1230 BUG(); \
1231 } \
1232} while (0)
1233
2f39e637 1234 /* sanity checks */
635b75fc 1235 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
bbddff05 1236#ifdef CONFIG_SMP
635b75fc 1237 PCPU_SETUP_BUG_ON(!ai->static_size);
0415b00d 1238 PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
bbddff05 1239#endif
635b75fc 1240 PCPU_SETUP_BUG_ON(!base_addr);
0415b00d 1241 PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
635b75fc
TH
1242 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1243 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1244 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
099a19d9 1245 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
9f645532 1246 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
8d408b4b 1247
6563297c
TH
1248 /* process group information and build config tables accordingly */
1249 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1250 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
fd1e8a1f 1251 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
fb435d52 1252 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
2f39e637 1253
fd1e8a1f 1254 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
ffe0d5a5 1255 unit_map[cpu] = UINT_MAX;
a855b84c
TH
1256
1257 pcpu_low_unit_cpu = NR_CPUS;
1258 pcpu_high_unit_cpu = NR_CPUS;
2f39e637 1259
fd1e8a1f
TH
1260 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1261 const struct pcpu_group_info *gi = &ai->groups[group];
2f39e637 1262
6563297c
TH
1263 group_offsets[group] = gi->base_offset;
1264 group_sizes[group] = gi->nr_units * ai->unit_size;
1265
fd1e8a1f
TH
1266 for (i = 0; i < gi->nr_units; i++) {
1267 cpu = gi->cpu_map[i];
1268 if (cpu == NR_CPUS)
1269 continue;
8d408b4b 1270
635b75fc
TH
1271 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1272 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1273 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
fbf59bc9 1274
fd1e8a1f 1275 unit_map[cpu] = unit + i;
fb435d52
TH
1276 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1277
a855b84c
TH
1278 /* determine low/high unit_cpu */
1279 if (pcpu_low_unit_cpu == NR_CPUS ||
1280 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1281 pcpu_low_unit_cpu = cpu;
1282 if (pcpu_high_unit_cpu == NR_CPUS ||
1283 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1284 pcpu_high_unit_cpu = cpu;
fd1e8a1f 1285 }
2f39e637 1286 }
fd1e8a1f
TH
1287 pcpu_nr_units = unit;
1288
1289 for_each_possible_cpu(cpu)
635b75fc
TH
1290 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1291
1292 /* we're done parsing the input, undefine BUG macro and dump config */
1293#undef PCPU_SETUP_BUG_ON
bcbea798 1294 pcpu_dump_alloc_info(KERN_DEBUG, ai);
fd1e8a1f 1295
6563297c
TH
1296 pcpu_nr_groups = ai->nr_groups;
1297 pcpu_group_offsets = group_offsets;
1298 pcpu_group_sizes = group_sizes;
fd1e8a1f 1299 pcpu_unit_map = unit_map;
fb435d52 1300 pcpu_unit_offsets = unit_off;
2f39e637
TH
1301
1302 /* determine basic parameters */
fd1e8a1f 1303 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
d9b55eeb 1304 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
6563297c 1305 pcpu_atom_size = ai->atom_size;
ce3141a2
TH
1306 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1307 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
cafe8816 1308
d9b55eeb
TH
1309 /*
1310 * Allocate chunk slots. The additional last slot is for
1311 * empty chunks.
1312 */
1313 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
fbf59bc9
TH
1314 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1315 for (i = 0; i < pcpu_nr_slots; i++)
1316 INIT_LIST_HEAD(&pcpu_slot[i]);
1317
edcb4639
TH
1318 /*
1319 * Initialize static chunk. If reserved_size is zero, the
1320 * static chunk covers static area + dynamic allocation area
1321 * in the first chunk. If reserved_size is not zero, it
1322 * covers static area + reserved area (mostly used for module
1323 * static percpu allocation).
1324 */
2441d15c
TH
1325 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1326 INIT_LIST_HEAD(&schunk->list);
bba174f5 1327 schunk->base_addr = base_addr;
61ace7fa
TH
1328 schunk->map = smap;
1329 schunk->map_alloc = ARRAY_SIZE(smap);
38a6be52 1330 schunk->immutable = true;
ce3141a2 1331 bitmap_fill(schunk->populated, pcpu_unit_pages);
edcb4639 1332
fd1e8a1f
TH
1333 if (ai->reserved_size) {
1334 schunk->free_size = ai->reserved_size;
ae9e6bc9 1335 pcpu_reserved_chunk = schunk;
fd1e8a1f 1336 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
edcb4639
TH
1337 } else {
1338 schunk->free_size = dyn_size;
1339 dyn_size = 0; /* dynamic area covered */
1340 }
2441d15c 1341 schunk->contig_hint = schunk->free_size;
fbf59bc9 1342
fd1e8a1f 1343 schunk->map[schunk->map_used++] = -ai->static_size;
61ace7fa
TH
1344 if (schunk->free_size)
1345 schunk->map[schunk->map_used++] = schunk->free_size;
1346
edcb4639
TH
1347 /* init dynamic chunk if necessary */
1348 if (dyn_size) {
ce3141a2 1349 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
edcb4639 1350 INIT_LIST_HEAD(&dchunk->list);
bba174f5 1351 dchunk->base_addr = base_addr;
edcb4639
TH
1352 dchunk->map = dmap;
1353 dchunk->map_alloc = ARRAY_SIZE(dmap);
38a6be52 1354 dchunk->immutable = true;
ce3141a2 1355 bitmap_fill(dchunk->populated, pcpu_unit_pages);
edcb4639
TH
1356
1357 dchunk->contig_hint = dchunk->free_size = dyn_size;
1358 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1359 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1360 }
1361
2441d15c 1362 /* link the first chunk in */
ae9e6bc9
TH
1363 pcpu_first_chunk = dchunk ?: schunk;
1364 pcpu_chunk_relocate(pcpu_first_chunk, -1);
fbf59bc9
TH
1365
1366 /* we're done */
bba174f5 1367 pcpu_base_addr = base_addr;
fb435d52 1368 return 0;
fbf59bc9 1369}
66c3a757 1370
bbddff05
TH
1371#ifdef CONFIG_SMP
1372
17f3609c 1373const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
f58dc01b
TH
1374 [PCPU_FC_AUTO] = "auto",
1375 [PCPU_FC_EMBED] = "embed",
1376 [PCPU_FC_PAGE] = "page",
f58dc01b 1377};
66c3a757 1378
f58dc01b 1379enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
66c3a757 1380
f58dc01b
TH
1381static int __init percpu_alloc_setup(char *str)
1382{
5479c78a
CG
1383 if (!str)
1384 return -EINVAL;
1385
f58dc01b
TH
1386 if (0)
1387 /* nada */;
1388#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1389 else if (!strcmp(str, "embed"))
1390 pcpu_chosen_fc = PCPU_FC_EMBED;
1391#endif
1392#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1393 else if (!strcmp(str, "page"))
1394 pcpu_chosen_fc = PCPU_FC_PAGE;
f58dc01b
TH
1395#endif
1396 else
1397 pr_warning("PERCPU: unknown allocator %s specified\n", str);
66c3a757 1398
f58dc01b 1399 return 0;
66c3a757 1400}
f58dc01b 1401early_param("percpu_alloc", percpu_alloc_setup);
66c3a757 1402
3c9a024f
TH
1403/*
1404 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1405 * Build it if needed by the arch config or the generic setup is going
1406 * to be used.
1407 */
08fc4580
TH
1408#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1409 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
3c9a024f
TH
1410#define BUILD_EMBED_FIRST_CHUNK
1411#endif
1412
1413/* build pcpu_page_first_chunk() iff needed by the arch config */
1414#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1415#define BUILD_PAGE_FIRST_CHUNK
1416#endif
1417
1418/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1419#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1420/**
1421 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1422 * @reserved_size: the size of reserved percpu area in bytes
1423 * @dyn_size: minimum free size for dynamic allocation in bytes
1424 * @atom_size: allocation atom size
1425 * @cpu_distance_fn: callback to determine distance between cpus, optional
1426 *
1427 * This function determines grouping of units, their mappings to cpus
1428 * and other parameters considering needed percpu size, allocation
1429 * atom size and distances between CPUs.
1430 *
1431 * Groups are always mutliples of atom size and CPUs which are of
1432 * LOCAL_DISTANCE both ways are grouped together and share space for
1433 * units in the same group. The returned configuration is guaranteed
1434 * to have CPUs on different nodes on different groups and >=75% usage
1435 * of allocated virtual address space.
1436 *
1437 * RETURNS:
1438 * On success, pointer to the new allocation_info is returned. On
1439 * failure, ERR_PTR value is returned.
1440 */
1441static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1442 size_t reserved_size, size_t dyn_size,
1443 size_t atom_size,
1444 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1445{
1446 static int group_map[NR_CPUS] __initdata;
1447 static int group_cnt[NR_CPUS] __initdata;
1448 const size_t static_size = __per_cpu_end - __per_cpu_start;
1449 int nr_groups = 1, nr_units = 0;
1450 size_t size_sum, min_unit_size, alloc_size;
1451 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1452 int last_allocs, group, unit;
1453 unsigned int cpu, tcpu;
1454 struct pcpu_alloc_info *ai;
1455 unsigned int *cpu_map;
1456
1457 /* this function may be called multiple times */
1458 memset(group_map, 0, sizeof(group_map));
1459 memset(group_cnt, 0, sizeof(group_cnt));
1460
1461 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1462 size_sum = PFN_ALIGN(static_size + reserved_size +
1463 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1464 dyn_size = size_sum - static_size - reserved_size;
1465
1466 /*
1467 * Determine min_unit_size, alloc_size and max_upa such that
1468 * alloc_size is multiple of atom_size and is the smallest
25985edc 1469 * which can accommodate 4k aligned segments which are equal to
3c9a024f
TH
1470 * or larger than min_unit_size.
1471 */
1472 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1473
1474 alloc_size = roundup(min_unit_size, atom_size);
1475 upa = alloc_size / min_unit_size;
1476 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1477 upa--;
1478 max_upa = upa;
1479
1480 /* group cpus according to their proximity */
1481 for_each_possible_cpu(cpu) {
1482 group = 0;
1483 next_group:
1484 for_each_possible_cpu(tcpu) {
1485 if (cpu == tcpu)
1486 break;
1487 if (group_map[tcpu] == group && cpu_distance_fn &&
1488 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1489 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1490 group++;
1491 nr_groups = max(nr_groups, group + 1);
1492 goto next_group;
1493 }
1494 }
1495 group_map[cpu] = group;
1496 group_cnt[group]++;
1497 }
1498
1499 /*
1500 * Expand unit size until address space usage goes over 75%
1501 * and then as much as possible without using more address
1502 * space.
1503 */
1504 last_allocs = INT_MAX;
1505 for (upa = max_upa; upa; upa--) {
1506 int allocs = 0, wasted = 0;
1507
1508 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1509 continue;
1510
1511 for (group = 0; group < nr_groups; group++) {
1512 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1513 allocs += this_allocs;
1514 wasted += this_allocs * upa - group_cnt[group];
1515 }
1516
1517 /*
1518 * Don't accept if wastage is over 1/3. The
1519 * greater-than comparison ensures upa==1 always
1520 * passes the following check.
1521 */
1522 if (wasted > num_possible_cpus() / 3)
1523 continue;
1524
1525 /* and then don't consume more memory */
1526 if (allocs > last_allocs)
1527 break;
1528 last_allocs = allocs;
1529 best_upa = upa;
1530 }
1531 upa = best_upa;
1532
1533 /* allocate and fill alloc_info */
1534 for (group = 0; group < nr_groups; group++)
1535 nr_units += roundup(group_cnt[group], upa);
1536
1537 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1538 if (!ai)
1539 return ERR_PTR(-ENOMEM);
1540 cpu_map = ai->groups[0].cpu_map;
1541
1542 for (group = 0; group < nr_groups; group++) {
1543 ai->groups[group].cpu_map = cpu_map;
1544 cpu_map += roundup(group_cnt[group], upa);
1545 }
1546
1547 ai->static_size = static_size;
1548 ai->reserved_size = reserved_size;
1549 ai->dyn_size = dyn_size;
1550 ai->unit_size = alloc_size / upa;
1551 ai->atom_size = atom_size;
1552 ai->alloc_size = alloc_size;
1553
1554 for (group = 0, unit = 0; group_cnt[group]; group++) {
1555 struct pcpu_group_info *gi = &ai->groups[group];
1556
1557 /*
1558 * Initialize base_offset as if all groups are located
1559 * back-to-back. The caller should update this to
1560 * reflect actual allocation.
1561 */
1562 gi->base_offset = unit * ai->unit_size;
1563
1564 for_each_possible_cpu(cpu)
1565 if (group_map[cpu] == group)
1566 gi->cpu_map[gi->nr_units++] = cpu;
1567 gi->nr_units = roundup(gi->nr_units, upa);
1568 unit += gi->nr_units;
1569 }
1570 BUG_ON(unit != nr_units);
1571
1572 return ai;
1573}
1574#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1575
1576#if defined(BUILD_EMBED_FIRST_CHUNK)
66c3a757
TH
1577/**
1578 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
66c3a757 1579 * @reserved_size: the size of reserved percpu area in bytes
4ba6ce25 1580 * @dyn_size: minimum free size for dynamic allocation in bytes
c8826dd5
TH
1581 * @atom_size: allocation atom size
1582 * @cpu_distance_fn: callback to determine distance between cpus, optional
1583 * @alloc_fn: function to allocate percpu page
25985edc 1584 * @free_fn: function to free percpu page
66c3a757
TH
1585 *
1586 * This is a helper to ease setting up embedded first percpu chunk and
1587 * can be called where pcpu_setup_first_chunk() is expected.
1588 *
1589 * If this function is used to setup the first chunk, it is allocated
c8826dd5
TH
1590 * by calling @alloc_fn and used as-is without being mapped into
1591 * vmalloc area. Allocations are always whole multiples of @atom_size
1592 * aligned to @atom_size.
1593 *
1594 * This enables the first chunk to piggy back on the linear physical
1595 * mapping which often uses larger page size. Please note that this
1596 * can result in very sparse cpu->unit mapping on NUMA machines thus
1597 * requiring large vmalloc address space. Don't use this allocator if
1598 * vmalloc space is not orders of magnitude larger than distances
1599 * between node memory addresses (ie. 32bit NUMA machines).
66c3a757 1600 *
4ba6ce25 1601 * @dyn_size specifies the minimum dynamic area size.
66c3a757
TH
1602 *
1603 * If the needed size is smaller than the minimum or specified unit
c8826dd5 1604 * size, the leftover is returned using @free_fn.
66c3a757
TH
1605 *
1606 * RETURNS:
fb435d52 1607 * 0 on success, -errno on failure.
66c3a757 1608 */
4ba6ce25 1609int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
c8826dd5
TH
1610 size_t atom_size,
1611 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1612 pcpu_fc_alloc_fn_t alloc_fn,
1613 pcpu_fc_free_fn_t free_fn)
66c3a757 1614{
c8826dd5
TH
1615 void *base = (void *)ULONG_MAX;
1616 void **areas = NULL;
fd1e8a1f 1617 struct pcpu_alloc_info *ai;
6ea529a2 1618 size_t size_sum, areas_size, max_distance;
c8826dd5 1619 int group, i, rc;
66c3a757 1620
c8826dd5
TH
1621 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1622 cpu_distance_fn);
fd1e8a1f
TH
1623 if (IS_ERR(ai))
1624 return PTR_ERR(ai);
66c3a757 1625
fd1e8a1f 1626 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
c8826dd5 1627 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
fa8a7094 1628
c8826dd5
TH
1629 areas = alloc_bootmem_nopanic(areas_size);
1630 if (!areas) {
fb435d52 1631 rc = -ENOMEM;
c8826dd5 1632 goto out_free;
fa8a7094 1633 }
66c3a757 1634
c8826dd5
TH
1635 /* allocate, copy and determine base address */
1636 for (group = 0; group < ai->nr_groups; group++) {
1637 struct pcpu_group_info *gi = &ai->groups[group];
1638 unsigned int cpu = NR_CPUS;
1639 void *ptr;
1640
1641 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1642 cpu = gi->cpu_map[i];
1643 BUG_ON(cpu == NR_CPUS);
1644
1645 /* allocate space for the whole group */
1646 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1647 if (!ptr) {
1648 rc = -ENOMEM;
1649 goto out_free_areas;
1650 }
f528f0b8
CM
1651 /* kmemleak tracks the percpu allocations separately */
1652 kmemleak_free(ptr);
c8826dd5 1653 areas[group] = ptr;
fd1e8a1f 1654
c8826dd5 1655 base = min(ptr, base);
42b64281
TH
1656 }
1657
1658 /*
1659 * Copy data and free unused parts. This should happen after all
1660 * allocations are complete; otherwise, we may end up with
1661 * overlapping groups.
1662 */
1663 for (group = 0; group < ai->nr_groups; group++) {
1664 struct pcpu_group_info *gi = &ai->groups[group];
1665 void *ptr = areas[group];
c8826dd5
TH
1666
1667 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1668 if (gi->cpu_map[i] == NR_CPUS) {
1669 /* unused unit, free whole */
1670 free_fn(ptr, ai->unit_size);
1671 continue;
1672 }
1673 /* copy and return the unused part */
1674 memcpy(ptr, __per_cpu_load, ai->static_size);
1675 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1676 }
fa8a7094 1677 }
66c3a757 1678
c8826dd5 1679 /* base address is now known, determine group base offsets */
6ea529a2
TH
1680 max_distance = 0;
1681 for (group = 0; group < ai->nr_groups; group++) {
c8826dd5 1682 ai->groups[group].base_offset = areas[group] - base;
1a0c3298
TH
1683 max_distance = max_t(size_t, max_distance,
1684 ai->groups[group].base_offset);
6ea529a2
TH
1685 }
1686 max_distance += ai->unit_size;
1687
1688 /* warn if maximum distance is further than 75% of vmalloc space */
1689 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1a0c3298 1690 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
787e5b06
MF
1691 "space 0x%lx\n", max_distance,
1692 (unsigned long)(VMALLOC_END - VMALLOC_START));
6ea529a2
TH
1693#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1694 /* and fail if we have fallback */
1695 rc = -EINVAL;
1696 goto out_free;
1697#endif
1698 }
c8826dd5 1699
004018e2 1700 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
fd1e8a1f
TH
1701 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1702 ai->dyn_size, ai->unit_size);
d4b95f80 1703
fb435d52 1704 rc = pcpu_setup_first_chunk(ai, base);
c8826dd5
TH
1705 goto out_free;
1706
1707out_free_areas:
1708 for (group = 0; group < ai->nr_groups; group++)
1709 free_fn(areas[group],
1710 ai->groups[group].nr_units * ai->unit_size);
1711out_free:
fd1e8a1f 1712 pcpu_free_alloc_info(ai);
c8826dd5
TH
1713 if (areas)
1714 free_bootmem(__pa(areas), areas_size);
fb435d52 1715 return rc;
d4b95f80 1716}
3c9a024f 1717#endif /* BUILD_EMBED_FIRST_CHUNK */
d4b95f80 1718
3c9a024f 1719#ifdef BUILD_PAGE_FIRST_CHUNK
d4b95f80 1720/**
00ae4064 1721 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
d4b95f80
TH
1722 * @reserved_size: the size of reserved percpu area in bytes
1723 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
25985edc 1724 * @free_fn: function to free percpu page, always called with PAGE_SIZE
d4b95f80
TH
1725 * @populate_pte_fn: function to populate pte
1726 *
00ae4064
TH
1727 * This is a helper to ease setting up page-remapped first percpu
1728 * chunk and can be called where pcpu_setup_first_chunk() is expected.
d4b95f80
TH
1729 *
1730 * This is the basic allocator. Static percpu area is allocated
1731 * page-by-page into vmalloc area.
1732 *
1733 * RETURNS:
fb435d52 1734 * 0 on success, -errno on failure.
d4b95f80 1735 */
fb435d52
TH
1736int __init pcpu_page_first_chunk(size_t reserved_size,
1737 pcpu_fc_alloc_fn_t alloc_fn,
1738 pcpu_fc_free_fn_t free_fn,
1739 pcpu_fc_populate_pte_fn_t populate_pte_fn)
d4b95f80 1740{
8f05a6a6 1741 static struct vm_struct vm;
fd1e8a1f 1742 struct pcpu_alloc_info *ai;
00ae4064 1743 char psize_str[16];
ce3141a2 1744 int unit_pages;
d4b95f80 1745 size_t pages_size;
ce3141a2 1746 struct page **pages;
fb435d52 1747 int unit, i, j, rc;
d4b95f80 1748
00ae4064
TH
1749 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1750
4ba6ce25 1751 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
fd1e8a1f
TH
1752 if (IS_ERR(ai))
1753 return PTR_ERR(ai);
1754 BUG_ON(ai->nr_groups != 1);
1755 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1756
1757 unit_pages = ai->unit_size >> PAGE_SHIFT;
d4b95f80
TH
1758
1759 /* unaligned allocations can't be freed, round up to page size */
fd1e8a1f
TH
1760 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1761 sizeof(pages[0]));
ce3141a2 1762 pages = alloc_bootmem(pages_size);
d4b95f80 1763
8f05a6a6 1764 /* allocate pages */
d4b95f80 1765 j = 0;
fd1e8a1f 1766 for (unit = 0; unit < num_possible_cpus(); unit++)
ce3141a2 1767 for (i = 0; i < unit_pages; i++) {
fd1e8a1f 1768 unsigned int cpu = ai->groups[0].cpu_map[unit];
d4b95f80
TH
1769 void *ptr;
1770
3cbc8565 1771 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
d4b95f80 1772 if (!ptr) {
00ae4064
TH
1773 pr_warning("PERCPU: failed to allocate %s page "
1774 "for cpu%u\n", psize_str, cpu);
d4b95f80
TH
1775 goto enomem;
1776 }
f528f0b8
CM
1777 /* kmemleak tracks the percpu allocations separately */
1778 kmemleak_free(ptr);
ce3141a2 1779 pages[j++] = virt_to_page(ptr);
d4b95f80
TH
1780 }
1781
8f05a6a6
TH
1782 /* allocate vm area, map the pages and copy static data */
1783 vm.flags = VM_ALLOC;
fd1e8a1f 1784 vm.size = num_possible_cpus() * ai->unit_size;
8f05a6a6
TH
1785 vm_area_register_early(&vm, PAGE_SIZE);
1786
fd1e8a1f 1787 for (unit = 0; unit < num_possible_cpus(); unit++) {
1d9d3257 1788 unsigned long unit_addr =
fd1e8a1f 1789 (unsigned long)vm.addr + unit * ai->unit_size;
8f05a6a6 1790
ce3141a2 1791 for (i = 0; i < unit_pages; i++)
8f05a6a6
TH
1792 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1793
1794 /* pte already populated, the following shouldn't fail */
fb435d52
TH
1795 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1796 unit_pages);
1797 if (rc < 0)
1798 panic("failed to map percpu area, err=%d\n", rc);
66c3a757 1799
8f05a6a6
TH
1800 /*
1801 * FIXME: Archs with virtual cache should flush local
1802 * cache for the linear mapping here - something
1803 * equivalent to flush_cache_vmap() on the local cpu.
1804 * flush_cache_vmap() can't be used as most supporting
1805 * data structures are not set up yet.
1806 */
1807
1808 /* copy static data */
fd1e8a1f 1809 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
66c3a757
TH
1810 }
1811
1812 /* we're ready, commit */
1d9d3257 1813 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
fd1e8a1f
TH
1814 unit_pages, psize_str, vm.addr, ai->static_size,
1815 ai->reserved_size, ai->dyn_size);
d4b95f80 1816
fb435d52 1817 rc = pcpu_setup_first_chunk(ai, vm.addr);
d4b95f80
TH
1818 goto out_free_ar;
1819
1820enomem:
1821 while (--j >= 0)
ce3141a2 1822 free_fn(page_address(pages[j]), PAGE_SIZE);
fb435d52 1823 rc = -ENOMEM;
d4b95f80 1824out_free_ar:
ce3141a2 1825 free_bootmem(__pa(pages), pages_size);
fd1e8a1f 1826 pcpu_free_alloc_info(ai);
fb435d52 1827 return rc;
d4b95f80 1828}
3c9a024f 1829#endif /* BUILD_PAGE_FIRST_CHUNK */
d4b95f80 1830
bbddff05 1831#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
e74e3962 1832/*
bbddff05 1833 * Generic SMP percpu area setup.
e74e3962
TH
1834 *
1835 * The embedding helper is used because its behavior closely resembles
1836 * the original non-dynamic generic percpu area setup. This is
1837 * important because many archs have addressing restrictions and might
1838 * fail if the percpu area is located far away from the previous
1839 * location. As an added bonus, in non-NUMA cases, embedding is
1840 * generally a good idea TLB-wise because percpu area can piggy back
1841 * on the physical linear memory mapping which uses large page
1842 * mappings on applicable archs.
1843 */
e74e3962
TH
1844unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1845EXPORT_SYMBOL(__per_cpu_offset);
1846
c8826dd5
TH
1847static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1848 size_t align)
1849{
1850 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1851}
66c3a757 1852
c8826dd5
TH
1853static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1854{
1855 free_bootmem(__pa(ptr), size);
1856}
1857
e74e3962
TH
1858void __init setup_per_cpu_areas(void)
1859{
e74e3962
TH
1860 unsigned long delta;
1861 unsigned int cpu;
fb435d52 1862 int rc;
e74e3962
TH
1863
1864 /*
1865 * Always reserve area for module percpu variables. That's
1866 * what the legacy allocator did.
1867 */
fb435d52 1868 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
c8826dd5
TH
1869 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1870 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
fb435d52 1871 if (rc < 0)
bbddff05 1872 panic("Failed to initialize percpu areas.");
e74e3962
TH
1873
1874 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1875 for_each_possible_cpu(cpu)
fb435d52 1876 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
66c3a757 1877}
bbddff05
TH
1878#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1879
1880#else /* CONFIG_SMP */
1881
1882/*
1883 * UP percpu area setup.
1884 *
1885 * UP always uses km-based percpu allocator with identity mapping.
1886 * Static percpu variables are indistinguishable from the usual static
1887 * variables and don't require any special preparation.
1888 */
1889void __init setup_per_cpu_areas(void)
1890{
1891 const size_t unit_size =
1892 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1893 PERCPU_DYNAMIC_RESERVE));
1894 struct pcpu_alloc_info *ai;
1895 void *fc;
1896
1897 ai = pcpu_alloc_alloc_info(1, 1);
1898 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1899 if (!ai || !fc)
1900 panic("Failed to allocate memory for percpu areas.");
100d13c3
CM
1901 /* kmemleak tracks the percpu allocations separately */
1902 kmemleak_free(fc);
bbddff05
TH
1903
1904 ai->dyn_size = unit_size;
1905 ai->unit_size = unit_size;
1906 ai->atom_size = unit_size;
1907 ai->alloc_size = unit_size;
1908 ai->groups[0].nr_units = 1;
1909 ai->groups[0].cpu_map[0] = 0;
1910
1911 if (pcpu_setup_first_chunk(ai, fc) < 0)
1912 panic("Failed to initialize percpu areas.");
1913}
1914
1915#endif /* CONFIG_SMP */
099a19d9
TH
1916
1917/*
1918 * First and reserved chunks are initialized with temporary allocation
1919 * map in initdata so that they can be used before slab is online.
1920 * This function is called after slab is brought up and replaces those
1921 * with properly allocated maps.
1922 */
1923void __init percpu_init_late(void)
1924{
1925 struct pcpu_chunk *target_chunks[] =
1926 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1927 struct pcpu_chunk *chunk;
1928 unsigned long flags;
1929 int i;
1930
1931 for (i = 0; (chunk = target_chunks[i]); i++) {
1932 int *map;
1933 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1934
1935 BUILD_BUG_ON(size > PAGE_SIZE);
1936
90459ce0 1937 map = pcpu_mem_zalloc(size);
099a19d9
TH
1938 BUG_ON(!map);
1939
1940 spin_lock_irqsave(&pcpu_lock, flags);
1941 memcpy(map, chunk->map, size);
1942 chunk->map = map;
1943 spin_unlock_irqrestore(&pcpu_lock, flags);
1944 }
1945}