defconfig: exynos9610: Re-add dropped Wi-Fi AP options lost
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / mbcache.c
1 #include <linux/spinlock.h>
2 #include <linux/slab.h>
3 #include <linux/list.h>
4 #include <linux/list_bl.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/workqueue.h>
8 #include <linux/mbcache.h>
9
10 /*
11 * Mbcache is a simple key-value store. Keys need not be unique, however
12 * key-value pairs are expected to be unique (we use this fact in
13 * mb_cache_entry_delete()).
14 *
15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16 * Ext4 also uses it for deduplication of xattr values stored in inodes.
17 * They use hash of data as a key and provide a value that may represent a
18 * block or inode number. That's why keys need not be unique (hash of different
19 * data may be the same). However user provided value always uniquely
20 * identifies a cache entry.
21 *
22 * We provide functions for creation and removal of entries, search by key,
23 * and a special "delete entry with given key-value pair" operation. Fixed
24 * size hash table is used for fast key lookups.
25 */
26
27 struct mb_cache {
28 /* Hash table of entries */
29 struct hlist_bl_head *c_hash;
30 /* log2 of hash table size */
31 int c_bucket_bits;
32 /* Maximum entries in cache to avoid degrading hash too much */
33 unsigned long c_max_entries;
34 /* Protects c_list, c_entry_count */
35 spinlock_t c_list_lock;
36 struct list_head c_list;
37 /* Number of entries in cache */
38 unsigned long c_entry_count;
39 struct shrinker c_shrink;
40 /* Work for shrinking when the cache has too many entries */
41 struct work_struct c_shrink_work;
42 };
43
44 static struct kmem_cache *mb_entry_cache;
45
46 static unsigned long mb_cache_shrink(struct mb_cache *cache,
47 unsigned long nr_to_scan);
48
49 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
50 u32 key)
51 {
52 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
53 }
54
55 /*
56 * Number of entries to reclaim synchronously when there are too many entries
57 * in cache
58 */
59 #define SYNC_SHRINK_BATCH 64
60
61 /*
62 * mb_cache_entry_create - create entry in cache
63 * @cache - cache where the entry should be created
64 * @mask - gfp mask with which the entry should be allocated
65 * @key - key of the entry
66 * @value - value of the entry
67 * @reusable - is the entry reusable by others?
68 *
69 * Creates entry in @cache with key @key and value @value. The function returns
70 * -EBUSY if entry with the same key and value already exists in cache.
71 * Otherwise 0 is returned.
72 */
73 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
74 u64 value, bool reusable)
75 {
76 struct mb_cache_entry *entry, *dup;
77 struct hlist_bl_node *dup_node;
78 struct hlist_bl_head *head;
79
80 /* Schedule background reclaim if there are too many entries */
81 if (cache->c_entry_count >= cache->c_max_entries)
82 schedule_work(&cache->c_shrink_work);
83 /* Do some sync reclaim if background reclaim cannot keep up */
84 if (cache->c_entry_count >= 2*cache->c_max_entries)
85 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
86
87 entry = kmem_cache_alloc(mb_entry_cache, mask);
88 if (!entry)
89 return -ENOMEM;
90
91 INIT_LIST_HEAD(&entry->e_list);
92 /* One ref for hash, one ref returned */
93 atomic_set(&entry->e_refcnt, 1);
94 entry->e_key = key;
95 entry->e_value = value;
96 entry->e_reusable = reusable;
97 entry->e_referenced = 0;
98 head = mb_cache_entry_head(cache, key);
99 hlist_bl_lock(head);
100 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
101 if (dup->e_key == key && dup->e_value == value) {
102 hlist_bl_unlock(head);
103 kmem_cache_free(mb_entry_cache, entry);
104 return -EBUSY;
105 }
106 }
107 hlist_bl_add_head(&entry->e_hash_list, head);
108 hlist_bl_unlock(head);
109
110 spin_lock(&cache->c_list_lock);
111 list_add_tail(&entry->e_list, &cache->c_list);
112 /* Grab ref for LRU list */
113 atomic_inc(&entry->e_refcnt);
114 cache->c_entry_count++;
115 spin_unlock(&cache->c_list_lock);
116
117 return 0;
118 }
119 EXPORT_SYMBOL(mb_cache_entry_create);
120
121 void __mb_cache_entry_free(struct mb_cache_entry *entry)
122 {
123 kmem_cache_free(mb_entry_cache, entry);
124 }
125 EXPORT_SYMBOL(__mb_cache_entry_free);
126
127 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
128 struct mb_cache_entry *entry,
129 u32 key)
130 {
131 struct mb_cache_entry *old_entry = entry;
132 struct hlist_bl_node *node;
133 struct hlist_bl_head *head;
134
135 head = mb_cache_entry_head(cache, key);
136 hlist_bl_lock(head);
137 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
138 node = entry->e_hash_list.next;
139 else
140 node = hlist_bl_first(head);
141 while (node) {
142 entry = hlist_bl_entry(node, struct mb_cache_entry,
143 e_hash_list);
144 if (entry->e_key == key && entry->e_reusable) {
145 atomic_inc(&entry->e_refcnt);
146 goto out;
147 }
148 node = node->next;
149 }
150 entry = NULL;
151 out:
152 hlist_bl_unlock(head);
153 if (old_entry)
154 mb_cache_entry_put(cache, old_entry);
155
156 return entry;
157 }
158
159 /*
160 * mb_cache_entry_find_first - find the first reusable entry with the given key
161 * @cache: cache where we should search
162 * @key: key to look for
163 *
164 * Search in @cache for a reusable entry with key @key. Grabs reference to the
165 * first reusable entry found and returns the entry.
166 */
167 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
168 u32 key)
169 {
170 return __entry_find(cache, NULL, key);
171 }
172 EXPORT_SYMBOL(mb_cache_entry_find_first);
173
174 /*
175 * mb_cache_entry_find_next - find next reusable entry with the same key
176 * @cache: cache where we should search
177 * @entry: entry to start search from
178 *
179 * Finds next reusable entry in the hash chain which has the same key as @entry.
180 * If @entry is unhashed (which can happen when deletion of entry races with the
181 * search), finds the first reusable entry in the hash chain. The function drops
182 * reference to @entry and returns with a reference to the found entry.
183 */
184 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
185 struct mb_cache_entry *entry)
186 {
187 return __entry_find(cache, entry, entry->e_key);
188 }
189 EXPORT_SYMBOL(mb_cache_entry_find_next);
190
191 /*
192 * mb_cache_entry_get - get a cache entry by value (and key)
193 * @cache - cache we work with
194 * @key - key
195 * @value - value
196 */
197 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
198 u64 value)
199 {
200 struct hlist_bl_node *node;
201 struct hlist_bl_head *head;
202 struct mb_cache_entry *entry;
203
204 head = mb_cache_entry_head(cache, key);
205 hlist_bl_lock(head);
206 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
207 if (entry->e_key == key && entry->e_value == value) {
208 atomic_inc(&entry->e_refcnt);
209 goto out;
210 }
211 }
212 entry = NULL;
213 out:
214 hlist_bl_unlock(head);
215 return entry;
216 }
217 EXPORT_SYMBOL(mb_cache_entry_get);
218
219 /* mb_cache_entry_delete - remove a cache entry
220 * @cache - cache we work with
221 * @key - key
222 * @value - value
223 *
224 * Remove entry from cache @cache with key @key and value @value.
225 */
226 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
227 {
228 struct hlist_bl_node *node;
229 struct hlist_bl_head *head;
230 struct mb_cache_entry *entry;
231
232 head = mb_cache_entry_head(cache, key);
233 hlist_bl_lock(head);
234 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
235 if (entry->e_key == key && entry->e_value == value) {
236 /* We keep hash list reference to keep entry alive */
237 hlist_bl_del_init(&entry->e_hash_list);
238 hlist_bl_unlock(head);
239 spin_lock(&cache->c_list_lock);
240 if (!list_empty(&entry->e_list)) {
241 list_del_init(&entry->e_list);
242 cache->c_entry_count--;
243 atomic_dec(&entry->e_refcnt);
244 }
245 spin_unlock(&cache->c_list_lock);
246 mb_cache_entry_put(cache, entry);
247 return;
248 }
249 }
250 hlist_bl_unlock(head);
251 }
252 EXPORT_SYMBOL(mb_cache_entry_delete);
253
254 /* mb_cache_entry_touch - cache entry got used
255 * @cache - cache the entry belongs to
256 * @entry - entry that got used
257 *
258 * Marks entry as used to give hit higher chances of surviving in cache.
259 */
260 void mb_cache_entry_touch(struct mb_cache *cache,
261 struct mb_cache_entry *entry)
262 {
263 entry->e_referenced = 1;
264 }
265 EXPORT_SYMBOL(mb_cache_entry_touch);
266
267 static unsigned long mb_cache_count(struct shrinker *shrink,
268 struct shrink_control *sc)
269 {
270 struct mb_cache *cache = container_of(shrink, struct mb_cache,
271 c_shrink);
272
273 return cache->c_entry_count;
274 }
275
276 /* Shrink number of entries in cache */
277 static unsigned long mb_cache_shrink(struct mb_cache *cache,
278 unsigned long nr_to_scan)
279 {
280 struct mb_cache_entry *entry;
281 struct hlist_bl_head *head;
282 unsigned long shrunk = 0;
283
284 spin_lock(&cache->c_list_lock);
285 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
286 entry = list_first_entry(&cache->c_list,
287 struct mb_cache_entry, e_list);
288 if (entry->e_referenced) {
289 entry->e_referenced = 0;
290 list_move_tail(&entry->e_list, &cache->c_list);
291 continue;
292 }
293 list_del_init(&entry->e_list);
294 cache->c_entry_count--;
295 /*
296 * We keep LRU list reference so that entry doesn't go away
297 * from under us.
298 */
299 spin_unlock(&cache->c_list_lock);
300 head = mb_cache_entry_head(cache, entry->e_key);
301 hlist_bl_lock(head);
302 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
303 hlist_bl_del_init(&entry->e_hash_list);
304 atomic_dec(&entry->e_refcnt);
305 }
306 hlist_bl_unlock(head);
307 if (mb_cache_entry_put(cache, entry))
308 shrunk++;
309 cond_resched();
310 spin_lock(&cache->c_list_lock);
311 }
312 spin_unlock(&cache->c_list_lock);
313
314 return shrunk;
315 }
316
317 static unsigned long mb_cache_scan(struct shrinker *shrink,
318 struct shrink_control *sc)
319 {
320 struct mb_cache *cache = container_of(shrink, struct mb_cache,
321 c_shrink);
322 return mb_cache_shrink(cache, sc->nr_to_scan);
323 }
324
325 /* We shrink 1/X of the cache when we have too many entries in it */
326 #define SHRINK_DIVISOR 16
327
328 static void mb_cache_shrink_worker(struct work_struct *work)
329 {
330 struct mb_cache *cache = container_of(work, struct mb_cache,
331 c_shrink_work);
332 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
333 }
334
335 /*
336 * mb_cache_create - create cache
337 * @bucket_bits: log2 of the hash table size
338 *
339 * Create cache for keys with 2^bucket_bits hash entries.
340 */
341 struct mb_cache *mb_cache_create(int bucket_bits)
342 {
343 struct mb_cache *cache;
344 unsigned long bucket_count = 1UL << bucket_bits;
345 unsigned long i;
346
347 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
348 if (!cache)
349 goto err_out;
350 cache->c_bucket_bits = bucket_bits;
351 cache->c_max_entries = bucket_count << 4;
352 INIT_LIST_HEAD(&cache->c_list);
353 spin_lock_init(&cache->c_list_lock);
354 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
355 GFP_KERNEL);
356 if (!cache->c_hash) {
357 kfree(cache);
358 goto err_out;
359 }
360 for (i = 0; i < bucket_count; i++)
361 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
362
363 cache->c_shrink.count_objects = mb_cache_count;
364 cache->c_shrink.scan_objects = mb_cache_scan;
365 cache->c_shrink.seeks = DEFAULT_SEEKS;
366 if (register_shrinker(&cache->c_shrink)) {
367 kfree(cache->c_hash);
368 kfree(cache);
369 goto err_out;
370 }
371
372 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
373
374 return cache;
375
376 err_out:
377 return NULL;
378 }
379 EXPORT_SYMBOL(mb_cache_create);
380
381 /*
382 * mb_cache_destroy - destroy cache
383 * @cache: the cache to destroy
384 *
385 * Free all entries in cache and cache itself. Caller must make sure nobody
386 * (except shrinker) can reach @cache when calling this.
387 */
388 void mb_cache_destroy(struct mb_cache *cache)
389 {
390 struct mb_cache_entry *entry, *next;
391
392 unregister_shrinker(&cache->c_shrink);
393
394 /*
395 * We don't bother with any locking. Cache must not be used at this
396 * point.
397 */
398 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
399 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
400 hlist_bl_del_init(&entry->e_hash_list);
401 atomic_dec(&entry->e_refcnt);
402 } else
403 WARN_ON(1);
404 list_del(&entry->e_list);
405 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
406 mb_cache_entry_put(cache, entry);
407 }
408 kfree(cache->c_hash);
409 kfree(cache);
410 }
411 EXPORT_SYMBOL(mb_cache_destroy);
412
413 static int __init mbcache_init(void)
414 {
415 mb_entry_cache = kmem_cache_create("mbcache",
416 sizeof(struct mb_cache_entry), 0,
417 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
418 if (!mb_entry_cache)
419 return -ENOMEM;
420 return 0;
421 }
422
423 static void __exit mbcache_exit(void)
424 {
425 kmem_cache_destroy(mb_entry_cache);
426 }
427
428 module_init(mbcache_init)
429 module_exit(mbcache_exit)
430
431 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
432 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
433 MODULE_LICENSE("GPL");