[RAMEN9610-12171] mm: hpa: change allocate logic from buddy
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / z3fold.c
1 /*
2 * z3fold.c
3 *
4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
6 *
7 * This implementation is based on zbud written by Seth Jennings.
8 *
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
16 *
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
19 *
20 * z3fold doesn't export any API and is meant to be used via zpool API.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
36
37 /*****************
38 * Structures
39 *****************/
40 struct z3fold_pool;
41 struct z3fold_ops {
42 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
43 };
44
45 enum buddy {
46 HEADLESS = 0,
47 FIRST,
48 MIDDLE,
49 LAST,
50 BUDDIES_MAX
51 };
52
53 /*
54 * struct z3fold_header - z3fold page metadata occupying first chunks of each
55 * z3fold page, except for HEADLESS pages
56 * @buddy: links the z3fold page into the relevant list in the
57 * pool
58 * @page_lock: per-page lock
59 * @refcount: reference count for the z3fold page
60 * @work: work_struct for page layout optimization
61 * @pool: pointer to the pool which this page belongs to
62 * @cpu: CPU which this page "belongs" to
63 * @first_chunks: the size of the first buddy in chunks, 0 if free
64 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
65 * @last_chunks: the size of the last buddy in chunks, 0 if free
66 * @first_num: the starting number (for the first handle)
67 */
68 struct z3fold_header {
69 struct list_head buddy;
70 spinlock_t page_lock;
71 struct kref refcount;
72 struct work_struct work;
73 struct z3fold_pool *pool;
74 short cpu;
75 unsigned short first_chunks;
76 unsigned short middle_chunks;
77 unsigned short last_chunks;
78 unsigned short start_middle;
79 unsigned short first_num:2;
80 };
81
82 /*
83 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84 * adjusting internal fragmentation. It also determines the number of
85 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87 * in the beginning of an allocated page are occupied by z3fold header, so
88 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89 * which shows the max number of free chunks in z3fold page, also there will
90 * be 63, or 62, respectively, freelists per pool.
91 */
92 #define NCHUNKS_ORDER 6
93
94 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100
101 #define BUDDY_MASK (0x3)
102
103 /**
104 * struct z3fold_pool - stores metadata for each z3fold pool
105 * @name: pool name
106 * @lock: protects pool unbuddied/lru lists
107 * @stale_lock: protects pool stale page list
108 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
109 * buddies; the list each z3fold page is added to depends on
110 * the size of its free region.
111 * @lru: list tracking the z3fold pages in LRU order by most recently
112 * added buddy.
113 * @stale: list of pages marked for freeing
114 * @pages_nr: number of z3fold pages in the pool.
115 * @ops: pointer to a structure of user defined operations specified at
116 * pool creation time.
117 * @compact_wq: workqueue for page layout background optimization
118 * @release_wq: workqueue for safe page release
119 * @work: work_struct for safe page release
120 *
121 * This structure is allocated at pool creation time and maintains metadata
122 * pertaining to a particular z3fold pool.
123 */
124 struct z3fold_pool {
125 const char *name;
126 spinlock_t lock;
127 spinlock_t stale_lock;
128 struct list_head *unbuddied;
129 struct list_head lru;
130 struct list_head stale;
131 atomic64_t pages_nr;
132 const struct z3fold_ops *ops;
133 struct zpool *zpool;
134 const struct zpool_ops *zpool_ops;
135 struct workqueue_struct *compact_wq;
136 struct workqueue_struct *release_wq;
137 struct work_struct work;
138 };
139
140 /*
141 * Internal z3fold page flags
142 */
143 enum z3fold_page_flags {
144 PAGE_HEADLESS = 0,
145 MIDDLE_CHUNK_MAPPED,
146 NEEDS_COMPACTING,
147 PAGE_STALE,
148 UNDER_RECLAIM
149 };
150
151 /*****************
152 * Helpers
153 *****************/
154
155 /* Converts an allocation size in bytes to size in z3fold chunks */
156 static int size_to_chunks(size_t size)
157 {
158 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
159 }
160
161 #define for_each_unbuddied_list(_iter, _begin) \
162 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
163
164 static void compact_page_work(struct work_struct *w);
165
166 /* Initializes the z3fold header of a newly allocated z3fold page */
167 static struct z3fold_header *init_z3fold_page(struct page *page,
168 struct z3fold_pool *pool)
169 {
170 struct z3fold_header *zhdr = page_address(page);
171
172 INIT_LIST_HEAD(&page->lru);
173 clear_bit(PAGE_HEADLESS, &page->private);
174 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
175 clear_bit(NEEDS_COMPACTING, &page->private);
176 clear_bit(PAGE_STALE, &page->private);
177 clear_bit(UNDER_RECLAIM, &page->private);
178
179 spin_lock_init(&zhdr->page_lock);
180 kref_init(&zhdr->refcount);
181 zhdr->first_chunks = 0;
182 zhdr->middle_chunks = 0;
183 zhdr->last_chunks = 0;
184 zhdr->first_num = 0;
185 zhdr->start_middle = 0;
186 zhdr->cpu = -1;
187 zhdr->pool = pool;
188 INIT_LIST_HEAD(&zhdr->buddy);
189 INIT_WORK(&zhdr->work, compact_page_work);
190 return zhdr;
191 }
192
193 /* Resets the struct page fields and frees the page */
194 static void free_z3fold_page(struct page *page)
195 {
196 __free_page(page);
197 }
198
199 /* Lock a z3fold page */
200 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
201 {
202 spin_lock(&zhdr->page_lock);
203 }
204
205 /* Try to lock a z3fold page */
206 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
207 {
208 return spin_trylock(&zhdr->page_lock);
209 }
210
211 /* Unlock a z3fold page */
212 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
213 {
214 spin_unlock(&zhdr->page_lock);
215 }
216
217 /*
218 * Encodes the handle of a particular buddy within a z3fold page
219 * Pool lock should be held as this function accesses first_num
220 */
221 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
222 {
223 unsigned long handle;
224
225 handle = (unsigned long)zhdr;
226 if (bud != HEADLESS)
227 handle += (bud + zhdr->first_num) & BUDDY_MASK;
228 return handle;
229 }
230
231 /* Returns the z3fold page where a given handle is stored */
232 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
233 {
234 return (struct z3fold_header *)(handle & PAGE_MASK);
235 }
236
237 /*
238 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
239 * but that doesn't matter. because the masking will result in the
240 * correct buddy number.
241 */
242 static enum buddy handle_to_buddy(unsigned long handle)
243 {
244 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
245 return (handle - zhdr->first_num) & BUDDY_MASK;
246 }
247
248 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
249 {
250 struct page *page = virt_to_page(zhdr);
251 struct z3fold_pool *pool = zhdr->pool;
252
253 WARN_ON(!list_empty(&zhdr->buddy));
254 set_bit(PAGE_STALE, &page->private);
255 clear_bit(NEEDS_COMPACTING, &page->private);
256 spin_lock(&pool->lock);
257 if (!list_empty(&page->lru))
258 list_del(&page->lru);
259 spin_unlock(&pool->lock);
260 if (locked)
261 z3fold_page_unlock(zhdr);
262 spin_lock(&pool->stale_lock);
263 list_add(&zhdr->buddy, &pool->stale);
264 queue_work(pool->release_wq, &pool->work);
265 spin_unlock(&pool->stale_lock);
266 }
267
268 static void __attribute__((__unused__))
269 release_z3fold_page(struct kref *ref)
270 {
271 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
272 refcount);
273 __release_z3fold_page(zhdr, false);
274 }
275
276 static void release_z3fold_page_locked(struct kref *ref)
277 {
278 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
279 refcount);
280 WARN_ON(z3fold_page_trylock(zhdr));
281 __release_z3fold_page(zhdr, true);
282 }
283
284 static void release_z3fold_page_locked_list(struct kref *ref)
285 {
286 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
287 refcount);
288 spin_lock(&zhdr->pool->lock);
289 list_del_init(&zhdr->buddy);
290 spin_unlock(&zhdr->pool->lock);
291
292 WARN_ON(z3fold_page_trylock(zhdr));
293 __release_z3fold_page(zhdr, true);
294 }
295
296 static void free_pages_work(struct work_struct *w)
297 {
298 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
299
300 spin_lock(&pool->stale_lock);
301 while (!list_empty(&pool->stale)) {
302 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
303 struct z3fold_header, buddy);
304 struct page *page = virt_to_page(zhdr);
305
306 list_del(&zhdr->buddy);
307 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
308 continue;
309 spin_unlock(&pool->stale_lock);
310 cancel_work_sync(&zhdr->work);
311 free_z3fold_page(page);
312 cond_resched();
313 spin_lock(&pool->stale_lock);
314 }
315 spin_unlock(&pool->stale_lock);
316 }
317
318 /*
319 * Returns the number of free chunks in a z3fold page.
320 * NB: can't be used with HEADLESS pages.
321 */
322 static int num_free_chunks(struct z3fold_header *zhdr)
323 {
324 int nfree;
325 /*
326 * If there is a middle object, pick up the bigger free space
327 * either before or after it. Otherwise just subtract the number
328 * of chunks occupied by the first and the last objects.
329 */
330 if (zhdr->middle_chunks != 0) {
331 int nfree_before = zhdr->first_chunks ?
332 0 : zhdr->start_middle - ZHDR_CHUNKS;
333 int nfree_after = zhdr->last_chunks ?
334 0 : TOTAL_CHUNKS -
335 (zhdr->start_middle + zhdr->middle_chunks);
336 nfree = max(nfree_before, nfree_after);
337 } else
338 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
339 return nfree;
340 }
341
342 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
343 unsigned short dst_chunk)
344 {
345 void *beg = zhdr;
346 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
347 beg + (zhdr->start_middle << CHUNK_SHIFT),
348 zhdr->middle_chunks << CHUNK_SHIFT);
349 }
350
351 #define BIG_CHUNK_GAP 3
352 /* Has to be called with lock held */
353 static int z3fold_compact_page(struct z3fold_header *zhdr)
354 {
355 struct page *page = virt_to_page(zhdr);
356
357 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
358 return 0; /* can't move middle chunk, it's used */
359
360 if (zhdr->middle_chunks == 0)
361 return 0; /* nothing to compact */
362
363 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
364 /* move to the beginning */
365 mchunk_memmove(zhdr, ZHDR_CHUNKS);
366 zhdr->first_chunks = zhdr->middle_chunks;
367 zhdr->middle_chunks = 0;
368 zhdr->start_middle = 0;
369 zhdr->first_num++;
370 return 1;
371 }
372
373 /*
374 * moving data is expensive, so let's only do that if
375 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
376 */
377 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
378 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
379 BIG_CHUNK_GAP) {
380 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
381 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
382 return 1;
383 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
384 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
385 + zhdr->middle_chunks) >=
386 BIG_CHUNK_GAP) {
387 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
388 zhdr->middle_chunks;
389 mchunk_memmove(zhdr, new_start);
390 zhdr->start_middle = new_start;
391 return 1;
392 }
393
394 return 0;
395 }
396
397 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
398 {
399 struct z3fold_pool *pool = zhdr->pool;
400 struct page *page;
401 struct list_head *unbuddied;
402 int fchunks;
403
404 page = virt_to_page(zhdr);
405 if (locked)
406 WARN_ON(z3fold_page_trylock(zhdr));
407 else
408 z3fold_page_lock(zhdr);
409 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
410 z3fold_page_unlock(zhdr);
411 return;
412 }
413 spin_lock(&pool->lock);
414 list_del_init(&zhdr->buddy);
415 spin_unlock(&pool->lock);
416
417 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
418 atomic64_dec(&pool->pages_nr);
419 return;
420 }
421
422 z3fold_compact_page(zhdr);
423 unbuddied = get_cpu_ptr(pool->unbuddied);
424 fchunks = num_free_chunks(zhdr);
425 if (fchunks < NCHUNKS &&
426 (!zhdr->first_chunks || !zhdr->middle_chunks ||
427 !zhdr->last_chunks)) {
428 /* the page's not completely free and it's unbuddied */
429 spin_lock(&pool->lock);
430 list_add(&zhdr->buddy, &unbuddied[fchunks]);
431 spin_unlock(&pool->lock);
432 zhdr->cpu = smp_processor_id();
433 }
434 put_cpu_ptr(pool->unbuddied);
435 z3fold_page_unlock(zhdr);
436 }
437
438 static void compact_page_work(struct work_struct *w)
439 {
440 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
441 work);
442
443 do_compact_page(zhdr, false);
444 }
445
446
447 /*
448 * API Functions
449 */
450
451 /**
452 * z3fold_create_pool() - create a new z3fold pool
453 * @name: pool name
454 * @gfp: gfp flags when allocating the z3fold pool structure
455 * @ops: user-defined operations for the z3fold pool
456 *
457 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
458 * failed.
459 */
460 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
461 const struct z3fold_ops *ops)
462 {
463 struct z3fold_pool *pool = NULL;
464 int i, cpu;
465
466 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
467 if (!pool)
468 goto out;
469 spin_lock_init(&pool->lock);
470 spin_lock_init(&pool->stale_lock);
471 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
472 if (!pool->unbuddied)
473 goto out_pool;
474 for_each_possible_cpu(cpu) {
475 struct list_head *unbuddied =
476 per_cpu_ptr(pool->unbuddied, cpu);
477 for_each_unbuddied_list(i, 0)
478 INIT_LIST_HEAD(&unbuddied[i]);
479 }
480 INIT_LIST_HEAD(&pool->lru);
481 INIT_LIST_HEAD(&pool->stale);
482 atomic64_set(&pool->pages_nr, 0);
483 pool->name = name;
484 pool->compact_wq = create_singlethread_workqueue(pool->name);
485 if (!pool->compact_wq)
486 goto out_unbuddied;
487 pool->release_wq = create_singlethread_workqueue(pool->name);
488 if (!pool->release_wq)
489 goto out_wq;
490 INIT_WORK(&pool->work, free_pages_work);
491 pool->ops = ops;
492 return pool;
493
494 out_wq:
495 destroy_workqueue(pool->compact_wq);
496 out_unbuddied:
497 free_percpu(pool->unbuddied);
498 out_pool:
499 kfree(pool);
500 out:
501 return NULL;
502 }
503
504 /**
505 * z3fold_destroy_pool() - destroys an existing z3fold pool
506 * @pool: the z3fold pool to be destroyed
507 *
508 * The pool should be emptied before this function is called.
509 */
510 static void z3fold_destroy_pool(struct z3fold_pool *pool)
511 {
512 destroy_workqueue(pool->release_wq);
513 destroy_workqueue(pool->compact_wq);
514 kfree(pool);
515 }
516
517 /**
518 * z3fold_alloc() - allocates a region of a given size
519 * @pool: z3fold pool from which to allocate
520 * @size: size in bytes of the desired allocation
521 * @gfp: gfp flags used if the pool needs to grow
522 * @handle: handle of the new allocation
523 *
524 * This function will attempt to find a free region in the pool large enough to
525 * satisfy the allocation request. A search of the unbuddied lists is
526 * performed first. If no suitable free region is found, then a new page is
527 * allocated and added to the pool to satisfy the request.
528 *
529 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
530 * as z3fold pool pages.
531 *
532 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
533 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
534 * a new page.
535 */
536 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
537 unsigned long *handle)
538 {
539 int chunks = 0, i, freechunks;
540 struct z3fold_header *zhdr = NULL;
541 struct page *page = NULL;
542 enum buddy bud;
543 bool can_sleep = (gfp & __GFP_RECLAIM) == __GFP_RECLAIM;
544
545 if (!size || (gfp & __GFP_HIGHMEM))
546 return -EINVAL;
547
548 if (size > PAGE_SIZE)
549 return -ENOSPC;
550
551 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
552 bud = HEADLESS;
553 else {
554 struct list_head *unbuddied;
555 chunks = size_to_chunks(size);
556
557 lookup:
558 /* First, try to find an unbuddied z3fold page. */
559 unbuddied = get_cpu_ptr(pool->unbuddied);
560 for_each_unbuddied_list(i, chunks) {
561 struct list_head *l = &unbuddied[i];
562
563 zhdr = list_first_entry_or_null(READ_ONCE(l),
564 struct z3fold_header, buddy);
565
566 if (!zhdr)
567 continue;
568
569 /* Re-check under lock. */
570 spin_lock(&pool->lock);
571 l = &unbuddied[i];
572 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
573 struct z3fold_header, buddy)) ||
574 !z3fold_page_trylock(zhdr)) {
575 spin_unlock(&pool->lock);
576 put_cpu_ptr(pool->unbuddied);
577 goto lookup;
578 }
579 list_del_init(&zhdr->buddy);
580 zhdr->cpu = -1;
581 spin_unlock(&pool->lock);
582
583 page = virt_to_page(zhdr);
584 if (test_bit(NEEDS_COMPACTING, &page->private)) {
585 z3fold_page_unlock(zhdr);
586 zhdr = NULL;
587 put_cpu_ptr(pool->unbuddied);
588 if (can_sleep)
589 cond_resched();
590 goto lookup;
591 }
592
593 /*
594 * this page could not be removed from its unbuddied
595 * list while pool lock was held, and then we've taken
596 * page lock so kref_put could not be called before
597 * we got here, so it's safe to just call kref_get()
598 */
599 kref_get(&zhdr->refcount);
600 break;
601 }
602 put_cpu_ptr(pool->unbuddied);
603
604 if (zhdr) {
605 if (zhdr->first_chunks == 0) {
606 if (zhdr->middle_chunks != 0 &&
607 chunks >= zhdr->start_middle)
608 bud = LAST;
609 else
610 bud = FIRST;
611 } else if (zhdr->last_chunks == 0)
612 bud = LAST;
613 else if (zhdr->middle_chunks == 0)
614 bud = MIDDLE;
615 else {
616 if (kref_put(&zhdr->refcount,
617 release_z3fold_page_locked))
618 atomic64_dec(&pool->pages_nr);
619 else
620 z3fold_page_unlock(zhdr);
621 pr_err("No free chunks in unbuddied\n");
622 WARN_ON(1);
623 goto lookup;
624 }
625 goto found;
626 }
627 bud = FIRST;
628 }
629
630 spin_lock(&pool->stale_lock);
631 zhdr = list_first_entry_or_null(&pool->stale,
632 struct z3fold_header, buddy);
633 /*
634 * Before allocating a page, let's see if we can take one from the
635 * stale pages list. cancel_work_sync() can sleep so we must make
636 * sure it won't be called in case we're in atomic context.
637 */
638 if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
639 list_del(&zhdr->buddy);
640 spin_unlock(&pool->stale_lock);
641 if (can_sleep)
642 cancel_work_sync(&zhdr->work);
643 page = virt_to_page(zhdr);
644 } else {
645 spin_unlock(&pool->stale_lock);
646 page = alloc_page(gfp);
647 }
648
649 if (!page)
650 return -ENOMEM;
651
652 atomic64_inc(&pool->pages_nr);
653 zhdr = init_z3fold_page(page, pool);
654
655 if (bud == HEADLESS) {
656 set_bit(PAGE_HEADLESS, &page->private);
657 goto headless;
658 }
659 z3fold_page_lock(zhdr);
660
661 found:
662 if (bud == FIRST)
663 zhdr->first_chunks = chunks;
664 else if (bud == LAST)
665 zhdr->last_chunks = chunks;
666 else {
667 zhdr->middle_chunks = chunks;
668 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
669 }
670
671 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
672 zhdr->middle_chunks == 0) {
673 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
674
675 /* Add to unbuddied list */
676 freechunks = num_free_chunks(zhdr);
677 spin_lock(&pool->lock);
678 list_add(&zhdr->buddy, &unbuddied[freechunks]);
679 spin_unlock(&pool->lock);
680 zhdr->cpu = smp_processor_id();
681 put_cpu_ptr(pool->unbuddied);
682 }
683
684 headless:
685 spin_lock(&pool->lock);
686 /* Add/move z3fold page to beginning of LRU */
687 if (!list_empty(&page->lru))
688 list_del(&page->lru);
689
690 list_add(&page->lru, &pool->lru);
691
692 *handle = encode_handle(zhdr, bud);
693 spin_unlock(&pool->lock);
694 if (bud != HEADLESS)
695 z3fold_page_unlock(zhdr);
696
697 return 0;
698 }
699
700 /**
701 * z3fold_free() - frees the allocation associated with the given handle
702 * @pool: pool in which the allocation resided
703 * @handle: handle associated with the allocation returned by z3fold_alloc()
704 *
705 * In the case that the z3fold page in which the allocation resides is under
706 * reclaim, as indicated by the PG_reclaim flag being set, this function
707 * only sets the first|last_chunks to 0. The page is actually freed
708 * once both buddies are evicted (see z3fold_reclaim_page() below).
709 */
710 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
711 {
712 struct z3fold_header *zhdr;
713 struct page *page;
714 enum buddy bud;
715
716 zhdr = handle_to_z3fold_header(handle);
717 page = virt_to_page(zhdr);
718
719 if (test_bit(PAGE_HEADLESS, &page->private)) {
720 /* HEADLESS page stored */
721 bud = HEADLESS;
722 } else {
723 z3fold_page_lock(zhdr);
724 bud = handle_to_buddy(handle);
725
726 switch (bud) {
727 case FIRST:
728 zhdr->first_chunks = 0;
729 break;
730 case MIDDLE:
731 zhdr->middle_chunks = 0;
732 zhdr->start_middle = 0;
733 break;
734 case LAST:
735 zhdr->last_chunks = 0;
736 break;
737 default:
738 pr_err("%s: unknown bud %d\n", __func__, bud);
739 WARN_ON(1);
740 z3fold_page_unlock(zhdr);
741 return;
742 }
743 }
744
745 if (bud == HEADLESS) {
746 spin_lock(&pool->lock);
747 list_del(&page->lru);
748 spin_unlock(&pool->lock);
749 free_z3fold_page(page);
750 atomic64_dec(&pool->pages_nr);
751 return;
752 }
753
754 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
755 atomic64_dec(&pool->pages_nr);
756 return;
757 }
758 if (test_bit(UNDER_RECLAIM, &page->private)) {
759 z3fold_page_unlock(zhdr);
760 return;
761 }
762 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
763 z3fold_page_unlock(zhdr);
764 return;
765 }
766 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
767 spin_lock(&pool->lock);
768 list_del_init(&zhdr->buddy);
769 spin_unlock(&pool->lock);
770 zhdr->cpu = -1;
771 kref_get(&zhdr->refcount);
772 do_compact_page(zhdr, true);
773 return;
774 }
775 kref_get(&zhdr->refcount);
776 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
777 z3fold_page_unlock(zhdr);
778 }
779
780 /**
781 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
782 * @pool: pool from which a page will attempt to be evicted
783 * @retires: number of pages on the LRU list for which eviction will
784 * be attempted before failing
785 *
786 * z3fold reclaim is different from normal system reclaim in that it is done
787 * from the bottom, up. This is because only the bottom layer, z3fold, has
788 * information on how the allocations are organized within each z3fold page.
789 * This has the potential to create interesting locking situations between
790 * z3fold and the user, however.
791 *
792 * To avoid these, this is how z3fold_reclaim_page() should be called:
793
794 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
795 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
796 * call the user-defined eviction handler with the pool and handle as
797 * arguments.
798 *
799 * If the handle can not be evicted, the eviction handler should return
800 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
801 * appropriate list and try the next z3fold page on the LRU up to
802 * a user defined number of retries.
803 *
804 * If the handle is successfully evicted, the eviction handler should
805 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
806 * contains logic to delay freeing the page if the page is under reclaim,
807 * as indicated by the setting of the PG_reclaim flag on the underlying page.
808 *
809 * If all buddies in the z3fold page are successfully evicted, then the
810 * z3fold page can be freed.
811 *
812 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
813 * no pages to evict or an eviction handler is not registered, -EAGAIN if
814 * the retry limit was hit.
815 */
816 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
817 {
818 int i, ret = 0;
819 struct z3fold_header *zhdr = NULL;
820 struct page *page = NULL;
821 struct list_head *pos;
822 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
823
824 spin_lock(&pool->lock);
825 if (!pool->ops || !pool->ops->evict || retries == 0) {
826 spin_unlock(&pool->lock);
827 return -EINVAL;
828 }
829 for (i = 0; i < retries; i++) {
830 if (list_empty(&pool->lru)) {
831 spin_unlock(&pool->lock);
832 return -EINVAL;
833 }
834 list_for_each_prev(pos, &pool->lru) {
835 page = list_entry(pos, struct page, lru);
836 if (test_bit(PAGE_HEADLESS, &page->private))
837 /* candidate found */
838 break;
839
840 zhdr = page_address(page);
841 if (!z3fold_page_trylock(zhdr))
842 continue; /* can't evict at this point */
843 kref_get(&zhdr->refcount);
844 list_del_init(&zhdr->buddy);
845 zhdr->cpu = -1;
846 set_bit(UNDER_RECLAIM, &page->private);
847 break;
848 }
849
850 list_del_init(&page->lru);
851 spin_unlock(&pool->lock);
852
853 if (!test_bit(PAGE_HEADLESS, &page->private)) {
854 /*
855 * We need encode the handles before unlocking, since
856 * we can race with free that will set
857 * (first|last)_chunks to 0
858 */
859 first_handle = 0;
860 last_handle = 0;
861 middle_handle = 0;
862 if (zhdr->first_chunks)
863 first_handle = encode_handle(zhdr, FIRST);
864 if (zhdr->middle_chunks)
865 middle_handle = encode_handle(zhdr, MIDDLE);
866 if (zhdr->last_chunks)
867 last_handle = encode_handle(zhdr, LAST);
868 /*
869 * it's safe to unlock here because we hold a
870 * reference to this page
871 */
872 z3fold_page_unlock(zhdr);
873 } else {
874 first_handle = encode_handle(zhdr, HEADLESS);
875 last_handle = middle_handle = 0;
876 }
877
878 /* Issue the eviction callback(s) */
879 if (middle_handle) {
880 ret = pool->ops->evict(pool, middle_handle);
881 if (ret)
882 goto next;
883 }
884 if (first_handle) {
885 ret = pool->ops->evict(pool, first_handle);
886 if (ret)
887 goto next;
888 }
889 if (last_handle) {
890 ret = pool->ops->evict(pool, last_handle);
891 if (ret)
892 goto next;
893 }
894 next:
895 if (test_bit(PAGE_HEADLESS, &page->private)) {
896 if (ret == 0) {
897 free_z3fold_page(page);
898 return 0;
899 }
900 spin_lock(&pool->lock);
901 list_add(&page->lru, &pool->lru);
902 spin_unlock(&pool->lock);
903 } else {
904 z3fold_page_lock(zhdr);
905 clear_bit(UNDER_RECLAIM, &page->private);
906 if (kref_put(&zhdr->refcount,
907 release_z3fold_page_locked)) {
908 atomic64_dec(&pool->pages_nr);
909 return 0;
910 }
911 /*
912 * if we are here, the page is still not completely
913 * free. Take the global pool lock then to be able
914 * to add it back to the lru list
915 */
916 spin_lock(&pool->lock);
917 list_add(&page->lru, &pool->lru);
918 spin_unlock(&pool->lock);
919 z3fold_page_unlock(zhdr);
920 }
921
922 /* We started off locked to we need to lock the pool back */
923 spin_lock(&pool->lock);
924 }
925 spin_unlock(&pool->lock);
926 return -EAGAIN;
927 }
928
929 /**
930 * z3fold_map() - maps the allocation associated with the given handle
931 * @pool: pool in which the allocation resides
932 * @handle: handle associated with the allocation to be mapped
933 *
934 * Extracts the buddy number from handle and constructs the pointer to the
935 * correct starting chunk within the page.
936 *
937 * Returns: a pointer to the mapped allocation
938 */
939 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
940 {
941 struct z3fold_header *zhdr;
942 struct page *page;
943 void *addr;
944 enum buddy buddy;
945
946 zhdr = handle_to_z3fold_header(handle);
947 addr = zhdr;
948 page = virt_to_page(zhdr);
949
950 if (test_bit(PAGE_HEADLESS, &page->private))
951 goto out;
952
953 z3fold_page_lock(zhdr);
954 buddy = handle_to_buddy(handle);
955 switch (buddy) {
956 case FIRST:
957 addr += ZHDR_SIZE_ALIGNED;
958 break;
959 case MIDDLE:
960 addr += zhdr->start_middle << CHUNK_SHIFT;
961 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
962 break;
963 case LAST:
964 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
965 break;
966 default:
967 pr_err("unknown buddy id %d\n", buddy);
968 WARN_ON(1);
969 addr = NULL;
970 break;
971 }
972
973 z3fold_page_unlock(zhdr);
974 out:
975 return addr;
976 }
977
978 /**
979 * z3fold_unmap() - unmaps the allocation associated with the given handle
980 * @pool: pool in which the allocation resides
981 * @handle: handle associated with the allocation to be unmapped
982 */
983 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
984 {
985 struct z3fold_header *zhdr;
986 struct page *page;
987 enum buddy buddy;
988
989 zhdr = handle_to_z3fold_header(handle);
990 page = virt_to_page(zhdr);
991
992 if (test_bit(PAGE_HEADLESS, &page->private))
993 return;
994
995 z3fold_page_lock(zhdr);
996 buddy = handle_to_buddy(handle);
997 if (buddy == MIDDLE)
998 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
999 z3fold_page_unlock(zhdr);
1000 }
1001
1002 /**
1003 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1004 * @pool: pool whose size is being queried
1005 *
1006 * Returns: size in pages of the given pool.
1007 */
1008 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1009 {
1010 return atomic64_read(&pool->pages_nr);
1011 }
1012
1013 /*****************
1014 * zpool
1015 ****************/
1016
1017 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1018 {
1019 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1020 return pool->zpool_ops->evict(pool->zpool, handle);
1021 else
1022 return -ENOENT;
1023 }
1024
1025 static const struct z3fold_ops z3fold_zpool_ops = {
1026 .evict = z3fold_zpool_evict
1027 };
1028
1029 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1030 const struct zpool_ops *zpool_ops,
1031 struct zpool *zpool)
1032 {
1033 struct z3fold_pool *pool;
1034
1035 pool = z3fold_create_pool(name, gfp,
1036 zpool_ops ? &z3fold_zpool_ops : NULL);
1037 if (pool) {
1038 pool->zpool = zpool;
1039 pool->zpool_ops = zpool_ops;
1040 }
1041 return pool;
1042 }
1043
1044 static void z3fold_zpool_destroy(void *pool)
1045 {
1046 z3fold_destroy_pool(pool);
1047 }
1048
1049 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1050 unsigned long *handle)
1051 {
1052 return z3fold_alloc(pool, size, gfp, handle);
1053 }
1054 static void z3fold_zpool_free(void *pool, unsigned long handle)
1055 {
1056 z3fold_free(pool, handle);
1057 }
1058
1059 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1060 unsigned int *reclaimed)
1061 {
1062 unsigned int total = 0;
1063 int ret = -EINVAL;
1064
1065 while (total < pages) {
1066 ret = z3fold_reclaim_page(pool, 8);
1067 if (ret < 0)
1068 break;
1069 total++;
1070 }
1071
1072 if (reclaimed)
1073 *reclaimed = total;
1074
1075 return ret;
1076 }
1077
1078 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1079 enum zpool_mapmode mm)
1080 {
1081 return z3fold_map(pool, handle);
1082 }
1083 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1084 {
1085 z3fold_unmap(pool, handle);
1086 }
1087
1088 static u64 z3fold_zpool_total_size(void *pool)
1089 {
1090 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1091 }
1092
1093 static struct zpool_driver z3fold_zpool_driver = {
1094 .type = "z3fold",
1095 .owner = THIS_MODULE,
1096 .create = z3fold_zpool_create,
1097 .destroy = z3fold_zpool_destroy,
1098 .malloc = z3fold_zpool_malloc,
1099 .free = z3fold_zpool_free,
1100 .shrink = z3fold_zpool_shrink,
1101 .map = z3fold_zpool_map,
1102 .unmap = z3fold_zpool_unmap,
1103 .total_size = z3fold_zpool_total_size,
1104 };
1105
1106 MODULE_ALIAS("zpool-z3fold");
1107
1108 static int __init init_z3fold(void)
1109 {
1110 /* Make sure the z3fold header is not larger than the page size */
1111 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1112 zpool_register_driver(&z3fold_zpool_driver);
1113
1114 return 0;
1115 }
1116
1117 static void __exit exit_z3fold(void)
1118 {
1119 zpool_unregister_driver(&z3fold_zpool_driver);
1120 }
1121
1122 module_init(init_z3fold);
1123 module_exit(exit_z3fold);
1124
1125 MODULE_LICENSE("GPL");
1126 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1127 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");