Merge commit 'linus/master' into HEAD
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / slab.h
1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 */
8
9 #ifndef _LINUX_SLAB_H
10 #define _LINUX_SLAB_H
11
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14
15 /*
16 * Flags to pass to kmem_cache_create().
17 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
18 */
19 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
20 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
21 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
22 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
23 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
24 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
25 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
26 /*
27 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
28 *
29 * This delays freeing the SLAB page by a grace period, it does _NOT_
30 * delay object freeing. This means that if you do kmem_cache_free()
31 * that memory location is free to be reused at any time. Thus it may
32 * be possible to see another object there in the same RCU grace period.
33 *
34 * This feature only ensures the memory location backing the object
35 * stays valid, the trick to using this is relying on an independent
36 * object validation pass. Something like:
37 *
38 * rcu_read_lock()
39 * again:
40 * obj = lockless_lookup(key);
41 * if (obj) {
42 * if (!try_get_ref(obj)) // might fail for free objects
43 * goto again;
44 *
45 * if (obj->key != key) { // not the object we expected
46 * put_ref(obj);
47 * goto again;
48 * }
49 * }
50 * rcu_read_unlock();
51 *
52 * See also the comment on struct slab_rcu in mm/slab.c.
53 */
54 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
55 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
56 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
57
58 /* Flag to prevent checks on free */
59 #ifdef CONFIG_DEBUG_OBJECTS
60 # define SLAB_DEBUG_OBJECTS 0x00400000UL
61 #else
62 # define SLAB_DEBUG_OBJECTS 0x00000000UL
63 #endif
64
65 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
66
67 /* Don't track use of uninitialized memory */
68 #ifdef CONFIG_KMEMCHECK
69 # define SLAB_NOTRACK 0x01000000UL
70 #else
71 # define SLAB_NOTRACK 0x00000000UL
72 #endif
73
74 /* The following flags affect the page allocator grouping pages by mobility */
75 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
76 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
77 /*
78 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
79 *
80 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
81 *
82 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
83 * Both make kfree a no-op.
84 */
85 #define ZERO_SIZE_PTR ((void *)16)
86
87 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
88 (unsigned long)ZERO_SIZE_PTR)
89
90 /*
91 * struct kmem_cache related prototypes
92 */
93 void __init kmem_cache_init(void);
94 int slab_is_available(void);
95
96 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
97 unsigned long,
98 void (*)(void *));
99 void kmem_cache_destroy(struct kmem_cache *);
100 int kmem_cache_shrink(struct kmem_cache *);
101 void kmem_cache_free(struct kmem_cache *, void *);
102 unsigned int kmem_cache_size(struct kmem_cache *);
103 const char *kmem_cache_name(struct kmem_cache *);
104 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
105
106 /*
107 * Please use this macro to create slab caches. Simply specify the
108 * name of the structure and maybe some flags that are listed above.
109 *
110 * The alignment of the struct determines object alignment. If you
111 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
112 * then the objects will be properly aligned in SMP configurations.
113 */
114 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
115 sizeof(struct __struct), __alignof__(struct __struct),\
116 (__flags), NULL)
117
118 /*
119 * The largest kmalloc size supported by the slab allocators is
120 * 32 megabyte (2^25) or the maximum allocatable page order if that is
121 * less than 32 MB.
122 *
123 * WARNING: Its not easy to increase this value since the allocators have
124 * to do various tricks to work around compiler limitations in order to
125 * ensure proper constant folding.
126 */
127 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
128 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
129
130 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
131 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
132
133 /*
134 * Common kmalloc functions provided by all allocators
135 */
136 void * __must_check __krealloc(const void *, size_t, gfp_t);
137 void * __must_check krealloc(const void *, size_t, gfp_t);
138 void kfree(const void *);
139 void kzfree(const void *);
140 size_t ksize(const void *);
141
142 /*
143 * Allocator specific definitions. These are mainly used to establish optimized
144 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
145 * selecting the appropriate general cache at compile time.
146 *
147 * Allocators must define at least:
148 *
149 * kmem_cache_alloc()
150 * __kmalloc()
151 * kmalloc()
152 *
153 * Those wishing to support NUMA must also define:
154 *
155 * kmem_cache_alloc_node()
156 * kmalloc_node()
157 *
158 * See each allocator definition file for additional comments and
159 * implementation notes.
160 */
161 #ifdef CONFIG_SLUB
162 #include <linux/slub_def.h>
163 #elif defined(CONFIG_SLOB)
164 #include <linux/slob_def.h>
165 #else
166 #include <linux/slab_def.h>
167 #endif
168
169 /**
170 * kcalloc - allocate memory for an array. The memory is set to zero.
171 * @n: number of elements.
172 * @size: element size.
173 * @flags: the type of memory to allocate.
174 *
175 * The @flags argument may be one of:
176 *
177 * %GFP_USER - Allocate memory on behalf of user. May sleep.
178 *
179 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
180 *
181 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
182 * For example, use this inside interrupt handlers.
183 *
184 * %GFP_HIGHUSER - Allocate pages from high memory.
185 *
186 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
187 *
188 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
189 *
190 * %GFP_NOWAIT - Allocation will not sleep.
191 *
192 * %GFP_THISNODE - Allocate node-local memory only.
193 *
194 * %GFP_DMA - Allocation suitable for DMA.
195 * Should only be used for kmalloc() caches. Otherwise, use a
196 * slab created with SLAB_DMA.
197 *
198 * Also it is possible to set different flags by OR'ing
199 * in one or more of the following additional @flags:
200 *
201 * %__GFP_COLD - Request cache-cold pages instead of
202 * trying to return cache-warm pages.
203 *
204 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
205 *
206 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
207 * (think twice before using).
208 *
209 * %__GFP_NORETRY - If memory is not immediately available,
210 * then give up at once.
211 *
212 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
213 *
214 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
215 *
216 * There are other flags available as well, but these are not intended
217 * for general use, and so are not documented here. For a full list of
218 * potential flags, always refer to linux/gfp.h.
219 */
220 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
221 {
222 if (size != 0 && n > ULONG_MAX / size)
223 return NULL;
224 return __kmalloc(n * size, flags | __GFP_ZERO);
225 }
226
227 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
228 /**
229 * kmalloc_node - allocate memory from a specific node
230 * @size: how many bytes of memory are required.
231 * @flags: the type of memory to allocate (see kcalloc).
232 * @node: node to allocate from.
233 *
234 * kmalloc() for non-local nodes, used to allocate from a specific node
235 * if available. Equivalent to kmalloc() in the non-NUMA single-node
236 * case.
237 */
238 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
239 {
240 return kmalloc(size, flags);
241 }
242
243 static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
244 {
245 return __kmalloc(size, flags);
246 }
247
248 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
249
250 static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
251 gfp_t flags, int node)
252 {
253 return kmem_cache_alloc(cachep, flags);
254 }
255 #endif /* !CONFIG_NUMA && !CONFIG_SLOB */
256
257 /*
258 * kmalloc_track_caller is a special version of kmalloc that records the
259 * calling function of the routine calling it for slab leak tracking instead
260 * of just the calling function (confusing, eh?).
261 * It's useful when the call to kmalloc comes from a widely-used standard
262 * allocator where we care about the real place the memory allocation
263 * request comes from.
264 */
265 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
266 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
267 #define kmalloc_track_caller(size, flags) \
268 __kmalloc_track_caller(size, flags, _RET_IP_)
269 #else
270 #define kmalloc_track_caller(size, flags) \
271 __kmalloc(size, flags)
272 #endif /* DEBUG_SLAB */
273
274 #ifdef CONFIG_NUMA
275 /*
276 * kmalloc_node_track_caller is a special version of kmalloc_node that
277 * records the calling function of the routine calling it for slab leak
278 * tracking instead of just the calling function (confusing, eh?).
279 * It's useful when the call to kmalloc_node comes from a widely-used
280 * standard allocator where we care about the real place the memory
281 * allocation request comes from.
282 */
283 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
284 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
285 #define kmalloc_node_track_caller(size, flags, node) \
286 __kmalloc_node_track_caller(size, flags, node, \
287 _RET_IP_)
288 #else
289 #define kmalloc_node_track_caller(size, flags, node) \
290 __kmalloc_node(size, flags, node)
291 #endif
292
293 #else /* CONFIG_NUMA */
294
295 #define kmalloc_node_track_caller(size, flags, node) \
296 kmalloc_track_caller(size, flags)
297
298 #endif /* CONFIG_NUMA */
299
300 /*
301 * Shortcuts
302 */
303 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
304 {
305 return kmem_cache_alloc(k, flags | __GFP_ZERO);
306 }
307
308 /**
309 * kzalloc - allocate memory. The memory is set to zero.
310 * @size: how many bytes of memory are required.
311 * @flags: the type of memory to allocate (see kmalloc).
312 */
313 static inline void *kzalloc(size_t size, gfp_t flags)
314 {
315 return kmalloc(size, flags | __GFP_ZERO);
316 }
317
318 /**
319 * kzalloc_node - allocate zeroed memory from a particular memory node.
320 * @size: how many bytes of memory are required.
321 * @flags: the type of memory to allocate (see kmalloc).
322 * @node: memory node from which to allocate
323 */
324 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
325 {
326 return kmalloc_node(size, flags | __GFP_ZERO, node);
327 }
328
329 void __init kmem_cache_init_late(void);
330
331 #endif /* _LINUX_SLAB_H */