FROMLIST: refactor header includes to allow kthread.h inclusion in psi_types.h
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / include / linux / memblock.h
1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
3 #ifdef __KERNEL__
4
5 #ifdef CONFIG_HAVE_MEMBLOCK
6 /*
7 * Logical memory blocks.
8 *
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/init.h>
18 #include <linux/mm.h>
19
20 #define INIT_MEMBLOCK_REGIONS 128
21 #define INIT_PHYSMEM_REGIONS 4
22
23 /* Definition of memblock flags. */
24 enum {
25 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
28 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
29 };
30
31 struct memblock_region {
32 phys_addr_t base;
33 phys_addr_t size;
34 unsigned long flags;
35 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
36 int nid;
37 #endif
38 };
39
40 struct memblock_type {
41 unsigned long cnt; /* number of regions */
42 unsigned long max; /* size of the allocated array */
43 phys_addr_t total_size; /* size of all regions */
44 struct memblock_region *regions;
45 char *name;
46 };
47
48 struct memblock {
49 bool bottom_up; /* is bottom up direction? */
50 phys_addr_t current_limit;
51 struct memblock_type memory;
52 struct memblock_type reserved;
53 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
54 struct memblock_type physmem;
55 #endif
56 };
57
58 extern struct memblock memblock;
59 extern int memblock_debug;
60
61 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
62 #define __init_memblock __meminit
63 #define __initdata_memblock __meminitdata
64 void memblock_discard(void);
65 #else
66 #define __init_memblock
67 #define __initdata_memblock
68 #endif
69
70 #define memblock_dbg(fmt, ...) \
71 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
72
73 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
74 phys_addr_t start, phys_addr_t end,
75 int nid, ulong flags);
76 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
77 phys_addr_t size, phys_addr_t align);
78 void memblock_allow_resize(void);
79 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
80 int memblock_add(phys_addr_t base, phys_addr_t size);
81 int memblock_remove(phys_addr_t base, phys_addr_t size);
82 int memblock_free(phys_addr_t base, phys_addr_t size);
83 int memblock_reserve(phys_addr_t base, phys_addr_t size);
84 void memblock_trim_memory(phys_addr_t align);
85 bool memblock_overlaps_region(struct memblock_type *type,
86 phys_addr_t base, phys_addr_t size);
87 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
88 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
89 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
90 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
91 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
92 ulong choose_memblock_flags(void);
93
94 /* Low level functions */
95 int memblock_add_range(struct memblock_type *type,
96 phys_addr_t base, phys_addr_t size,
97 int nid, unsigned long flags);
98
99 void __next_mem_range(u64 *idx, int nid, ulong flags,
100 struct memblock_type *type_a,
101 struct memblock_type *type_b, phys_addr_t *out_start,
102 phys_addr_t *out_end, int *out_nid);
103
104 void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
105 struct memblock_type *type_a,
106 struct memblock_type *type_b, phys_addr_t *out_start,
107 phys_addr_t *out_end, int *out_nid);
108
109 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
110 phys_addr_t *out_end);
111
112 void __memblock_free_early(phys_addr_t base, phys_addr_t size);
113 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
114
115 /**
116 * for_each_mem_range - iterate through memblock areas from type_a and not
117 * included in type_b. Or just type_a if type_b is NULL.
118 * @i: u64 used as loop variable
119 * @type_a: ptr to memblock_type to iterate
120 * @type_b: ptr to memblock_type which excludes from the iteration
121 * @nid: node selector, %NUMA_NO_NODE for all nodes
122 * @flags: pick from blocks based on memory attributes
123 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
124 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
125 * @p_nid: ptr to int for nid of the range, can be %NULL
126 */
127 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
128 p_start, p_end, p_nid) \
129 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
130 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \
132 __next_mem_range(&i, nid, flags, type_a, type_b, \
133 p_start, p_end, p_nid))
134
135 /**
136 * for_each_mem_range_rev - reverse iterate through memblock areas from
137 * type_a and not included in type_b. Or just type_a if type_b is NULL.
138 * @i: u64 used as loop variable
139 * @type_a: ptr to memblock_type to iterate
140 * @type_b: ptr to memblock_type which excludes from the iteration
141 * @nid: node selector, %NUMA_NO_NODE for all nodes
142 * @flags: pick from blocks based on memory attributes
143 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
144 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
145 * @p_nid: ptr to int for nid of the range, can be %NULL
146 */
147 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
148 p_start, p_end, p_nid) \
149 for (i = (u64)ULLONG_MAX, \
150 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
151 p_start, p_end, p_nid); \
152 i != (u64)ULLONG_MAX; \
153 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
154 p_start, p_end, p_nid))
155
156 /**
157 * for_each_reserved_mem_region - iterate over all reserved memblock areas
158 * @i: u64 used as loop variable
159 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
160 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
161 *
162 * Walks over reserved areas of memblock. Available as soon as memblock
163 * is initialized.
164 */
165 #define for_each_reserved_mem_region(i, p_start, p_end) \
166 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
167 i != (u64)ULLONG_MAX; \
168 __next_reserved_mem_region(&i, p_start, p_end))
169
170 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
171 {
172 return m->flags & MEMBLOCK_HOTPLUG;
173 }
174
175 static inline bool memblock_is_mirror(struct memblock_region *m)
176 {
177 return m->flags & MEMBLOCK_MIRROR;
178 }
179
180 static inline bool memblock_is_nomap(struct memblock_region *m)
181 {
182 return m->flags & MEMBLOCK_NOMAP;
183 }
184
185 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
186 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
187 unsigned long *end_pfn);
188 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
189 unsigned long *out_end_pfn, int *out_nid);
190
191 /**
192 * for_each_mem_pfn_range - early memory pfn range iterator
193 * @i: an integer used as loop variable
194 * @nid: node selector, %MAX_NUMNODES for all nodes
195 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
196 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
197 * @p_nid: ptr to int for nid of the range, can be %NULL
198 *
199 * Walks over configured memory ranges.
200 */
201 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
202 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
203 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
204 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
205
206 /**
207 * for_each_free_mem_range - iterate through free memblock areas
208 * @i: u64 used as loop variable
209 * @nid: node selector, %NUMA_NO_NODE for all nodes
210 * @flags: pick from blocks based on memory attributes
211 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
212 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
213 * @p_nid: ptr to int for nid of the range, can be %NULL
214 *
215 * Walks over free (memory && !reserved) areas of memblock. Available as
216 * soon as memblock is initialized.
217 */
218 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
219 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
220 nid, flags, p_start, p_end, p_nid)
221
222 /**
223 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
224 * @i: u64 used as loop variable
225 * @nid: node selector, %NUMA_NO_NODE for all nodes
226 * @flags: pick from blocks based on memory attributes
227 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
228 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
229 * @p_nid: ptr to int for nid of the range, can be %NULL
230 *
231 * Walks over free (memory && !reserved) areas of memblock in reverse
232 * order. Available as soon as memblock is initialized.
233 */
234 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
235 p_nid) \
236 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
237 nid, flags, p_start, p_end, p_nid)
238
239 static inline void memblock_set_region_flags(struct memblock_region *r,
240 unsigned long flags)
241 {
242 r->flags |= flags;
243 }
244
245 static inline void memblock_clear_region_flags(struct memblock_region *r,
246 unsigned long flags)
247 {
248 r->flags &= ~flags;
249 }
250
251 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
252 int memblock_set_node(phys_addr_t base, phys_addr_t size,
253 struct memblock_type *type, int nid);
254
255 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
256 {
257 r->nid = nid;
258 }
259
260 static inline int memblock_get_region_node(const struct memblock_region *r)
261 {
262 return r->nid;
263 }
264 #else
265 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
266 {
267 }
268
269 static inline int memblock_get_region_node(const struct memblock_region *r)
270 {
271 return 0;
272 }
273 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
274
275 phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
276 phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
277
278 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
279
280 /*
281 * Set the allocation direction to bottom-up or top-down.
282 */
283 static inline void __init memblock_set_bottom_up(bool enable)
284 {
285 memblock.bottom_up = enable;
286 }
287
288 /*
289 * Check if the allocation direction is bottom-up or not.
290 * if this is true, that said, memblock will allocate memory
291 * in bottom-up direction.
292 */
293 static inline bool memblock_bottom_up(void)
294 {
295 return memblock.bottom_up;
296 }
297
298 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
299 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
300 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
301
302 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
303 phys_addr_t start, phys_addr_t end,
304 ulong flags);
305 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
306 phys_addr_t max_addr);
307 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
308 phys_addr_t max_addr);
309 phys_addr_t memblock_phys_mem_size(void);
310 phys_addr_t memblock_reserved_size(void);
311 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
312 phys_addr_t memblock_start_of_DRAM(void);
313 phys_addr_t memblock_end_of_DRAM(void);
314 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
315 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
316 void memblock_mem_limit_remove_map(phys_addr_t limit);
317 bool memblock_is_memory(phys_addr_t addr);
318 int memblock_is_map_memory(phys_addr_t addr);
319 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
320 bool memblock_is_reserved(phys_addr_t addr);
321 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
322
323 extern void __memblock_dump_all(void);
324
325 static inline void memblock_dump_all(void)
326 {
327 if (memblock_debug)
328 __memblock_dump_all();
329 }
330
331 /**
332 * memblock_set_current_limit - Set the current allocation limit to allow
333 * limiting allocations to what is currently
334 * accessible during boot
335 * @limit: New limit value (physical address)
336 */
337 void memblock_set_current_limit(phys_addr_t limit);
338
339
340 phys_addr_t memblock_get_current_limit(void);
341
342 /*
343 * pfn conversion functions
344 *
345 * While the memory MEMBLOCKs should always be page aligned, the reserved
346 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
347 * idea of what they return for such non aligned MEMBLOCKs.
348 */
349
350 /**
351 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
352 * @reg: memblock_region structure
353 */
354 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
355 {
356 return PFN_UP(reg->base);
357 }
358
359 /**
360 * memblock_region_memory_end_pfn - Return the end_pfn this region
361 * @reg: memblock_region structure
362 */
363 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
364 {
365 return PFN_DOWN(reg->base + reg->size);
366 }
367
368 /**
369 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
370 * @reg: memblock_region structure
371 */
372 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
373 {
374 return PFN_DOWN(reg->base);
375 }
376
377 /**
378 * memblock_region_reserved_end_pfn - Return the end_pfn this region
379 * @reg: memblock_region structure
380 */
381 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
382 {
383 return PFN_UP(reg->base + reg->size);
384 }
385
386 #define for_each_memblock(memblock_type, region) \
387 for (region = memblock.memblock_type.regions; \
388 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
389 region++)
390
391 #define for_each_memblock_type(memblock_type, rgn) \
392 for (idx = 0, rgn = &memblock_type->regions[0]; \
393 idx < memblock_type->cnt; \
394 idx++, rgn = &memblock_type->regions[idx])
395
396 #ifdef CONFIG_MEMTEST
397 extern void early_memtest(phys_addr_t start, phys_addr_t end);
398 #else
399 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
400 {
401 }
402 #endif
403
404 extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
405 phys_addr_t end_addr);
406 #else
407 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
408 {
409 return 0;
410 }
411
412 static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
413 phys_addr_t end_addr)
414 {
415 return 0;
416 }
417
418 #endif /* CONFIG_HAVE_MEMBLOCK */
419
420 #endif /* __KERNEL__ */
421
422 #endif /* _LINUX_MEMBLOCK_H */