drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / slab_def.h
CommitLineData
2e892f43
CL
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
2e892f43 14#include <linux/compiler.h>
039ca4e7 15
8eae985f
PE
16/*
17 * struct kmem_cache
18 *
19 * manages a cache.
20 */
21
22struct kmem_cache {
b56efcf0 23/* 1) Cache tunables. Protected by cache_chain_mutex */
8eae985f
PE
24 unsigned int batchcount;
25 unsigned int limit;
26 unsigned int shared;
27
3b0efdfa 28 unsigned int size;
8eae985f 29 u32 reciprocal_buffer_size;
b56efcf0 30/* 2) touched by every alloc & free from the backend */
8eae985f
PE
31
32 unsigned int flags; /* constant flags */
33 unsigned int num; /* # of objs per slab */
34
b56efcf0 35/* 3) cache_grow/shrink */
8eae985f
PE
36 /* order of pgs per slab (2^n) */
37 unsigned int gfporder;
38
39 /* force GFP flags, e.g. GFP_DMA */
a618e89f 40 gfp_t allocflags;
8eae985f
PE
41
42 size_t colour; /* cache colouring range */
43 unsigned int colour_off; /* colour offset */
44 struct kmem_cache *slabp_cache;
45 unsigned int slab_size;
8eae985f
PE
46
47 /* constructor func */
48 void (*ctor)(void *obj);
49
b56efcf0 50/* 4) cache creation/removal */
8eae985f 51 const char *name;
3b0efdfa
CL
52 struct list_head list;
53 int refcount;
54 int object_size;
55 int align;
8eae985f 56
b56efcf0 57/* 5) statistics */
8eae985f
PE
58#ifdef CONFIG_DEBUG_SLAB
59 unsigned long num_active;
60 unsigned long num_allocations;
61 unsigned long high_mark;
62 unsigned long grown;
63 unsigned long reaped;
64 unsigned long errors;
65 unsigned long max_freeable;
66 unsigned long node_allocs;
67 unsigned long node_frees;
68 unsigned long node_overflow;
69 atomic_t allochit;
70 atomic_t allocmiss;
71 atomic_t freehit;
72 atomic_t freemiss;
73
74 /*
75 * If debugging is enabled, then the allocator can add additional
3b0efdfa 76 * fields and/or padding to every object. size contains the total
8eae985f
PE
77 * object size including these internal fields, the following two
78 * variables contain the offset to the user object and its size.
79 */
80 int obj_offset;
8eae985f 81#endif /* CONFIG_DEBUG_SLAB */
ba6c496e
GC
82#ifdef CONFIG_MEMCG_KMEM
83 struct memcg_cache_params *memcg_params;
84#endif
8eae985f 85
b56efcf0 86/* 6) per-cpu/per-node data, touched during every alloc/free */
8eae985f 87 /*
b56efcf0
ED
88 * We put array[] at the end of kmem_cache, because we want to size
89 * this array to nr_cpu_ids slots instead of NR_CPUS
8eae985f 90 * (see kmem_cache_init())
b56efcf0
ED
91 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of cpus.
3c583465
CL
93 *
94 * We also need to guarantee that the list is able to accomodate a
95 * pointer for each node since "nodelists" uses the remainder of
96 * available pointers.
8eae985f 97 */
6a67368c 98 struct kmem_cache_node **node;
3c583465 99 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
8eae985f 100 /*
b56efcf0 101 * Do not add fields after array[]
8eae985f
PE
102 */
103};
104
6193a2ff
PM
105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106void *__kmalloc(size_t size, gfp_t flags);
107
0f24f128 108#ifdef CONFIG_TRACING
4052147c 109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
36555751
EGM
110#else
111static __always_inline void *
4052147c 112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
2e892f43 113{
36555751
EGM
114 return kmem_cache_alloc(cachep, flags);
115}
36555751
EGM
116#endif
117
118static __always_inline void *kmalloc(size_t size, gfp_t flags)
119{
120 struct kmem_cache *cachep;
121 void *ret;
122
2e892f43 123 if (__builtin_constant_p(size)) {
e3366016 124 int i;
6cb8f913
CL
125
126 if (!size)
127 return ZERO_SIZE_PTR;
128
6286ae97
CL
129 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
130 return NULL;
131
e3366016
CL
132 i = kmalloc_index(size);
133
4b51d669
CL
134#ifdef CONFIG_ZONE_DMA
135 if (flags & GFP_DMA)
e3366016 136 cachep = kmalloc_dma_caches[i];
36555751 137 else
4b51d669 138#endif
e3366016 139 cachep = kmalloc_caches[i];
36555751 140
4052147c 141 ret = kmem_cache_alloc_trace(cachep, flags, size);
36555751
EGM
142
143 return ret;
2e892f43
CL
144 }
145 return __kmalloc(size, flags);
146}
147
2e892f43
CL
148#ifdef CONFIG_NUMA
149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
6193a2ff 150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2e892f43 151
0f24f128 152#ifdef CONFIG_TRACING
dffa3f98 153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
85beb586 154 gfp_t flags,
dffa3f98
EG
155 int nodeid,
156 size_t size);
36555751
EGM
157#else
158static __always_inline void *
dffa3f98 159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
85beb586 160 gfp_t flags,
dffa3f98
EG
161 int nodeid,
162 size_t size)
36555751
EGM
163{
164 return kmem_cache_alloc_node(cachep, flags, nodeid);
165}
166#endif
167
168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
2e892f43 169{
36555751 170 struct kmem_cache *cachep;
36555751 171
2e892f43 172 if (__builtin_constant_p(size)) {
e3366016 173 int i;
6cb8f913
CL
174
175 if (!size)
176 return ZERO_SIZE_PTR;
177
6286ae97
CL
178 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
179 return NULL;
180
e3366016
CL
181 i = kmalloc_index(size);
182
4b51d669
CL
183#ifdef CONFIG_ZONE_DMA
184 if (flags & GFP_DMA)
e3366016 185 cachep = kmalloc_dma_caches[i];
36555751 186 else
4b51d669 187#endif
e3366016 188 cachep = kmalloc_caches[i];
36555751 189
dffa3f98 190 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
2e892f43
CL
191 }
192 return __kmalloc_node(size, flags, node);
193}
194
195#endif /* CONFIG_NUMA */
196
197#endif /* _LINUX_SLAB_DEF_H */