2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 * This allocator is designed for use with zram. Thus, the allocator is
16 * supposed to work well under low memory conditions. In particular, it
17 * never attempts higher order page allocation which is very likely to
18 * fail under memory pressure. On the other hand, if we just use single
19 * (0-order) pages, it would suffer from very high fragmentation --
20 * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
21 * This was one of the major issues with its predecessor (xvmalloc).
23 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
24 * and links them together using various 'struct page' fields. These linked
25 * pages act as a single higher-order page i.e. an object can span 0-order
26 * page boundaries. The code refers to these linked pages as a single entity
29 * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
30 * since this satisfies the requirements of all its current users (in the
31 * worst case, page is incompressible and is thus stored "as-is" i.e. in
32 * uncompressed form). For allocation requests larger than this size, failure
33 * is returned (see zs_malloc).
35 * Additionally, zs_malloc() does not return a dereferenceable pointer.
36 * Instead, it returns an opaque handle (unsigned long) which encodes actual
37 * location of the allocated object. The reason for this indirection is that
38 * zsmalloc does not keep zspages permanently mapped since that would cause
39 * issues on 32-bit systems where the VA region for kernel space mappings
40 * is very small. So, before using the allocating memory, the object has to
41 * be mapped using zs_map_object() to get a usable pointer and subsequently
42 * unmapped using zs_unmap_object().
44 * Following is how we use various fields and flags of underlying
45 * struct page(s) to form a zspage.
47 * Usage of struct page fields:
48 * page->first_page: points to the first component (0-order) page
49 * page->index (union with page->freelist): offset of the first object
50 * starting in this page. For the first page, this is
51 * always 0, so we use this field (aka freelist) to point
52 * to the first free object in zspage.
53 * page->lru: links together all component pages (except the first page)
56 * For _first_ page only:
58 * page->private (union with page->first_page): refers to the
59 * component page after the first page
60 * page->freelist: points to the first free object in zspage.
61 * Free objects are linked together using in-place
63 * page->objects: maximum number of objects we can store in this
64 * zspage (class->zspage_order * PAGE_SIZE / class->size)
65 * page->lru: links together first pages of various zspages.
66 * Basically forming list of zspages in a fullness group.
67 * page->mapping: class index and fullness group of the zspage
69 * Usage of struct page flags:
70 * PG_private: identifies the first component page
71 * PG_private2: identifies the last component page
75 #ifdef CONFIG_ZSMALLOC_DEBUG
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/bitops.h>
82 #include <linux/errno.h>
83 #include <linux/highmem.h>
84 #include <linux/string.h>
85 #include <linux/slab.h>
86 #include <asm/tlbflush.h>
87 #include <asm/pgtable.h>
88 #include <linux/cpumask.h>
89 #include <linux/cpu.h>
90 #include <linux/vmalloc.h>
91 #include <linux/hardirq.h>
92 #include <linux/spinlock.h>
93 #include <linux/types.h>
94 #include <linux/zsmalloc.h>
95 #include <linux/zpool.h>
98 * This must be power of 2 and greater than of equal to sizeof(link_free).
99 * These two conditions ensure that any 'struct link_free' itself doesn't
100 * span more than 1 page which avoids complex case of mapping 2 pages simply
101 * to restore link_free pointer values.
106 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
107 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
109 #define ZS_MAX_ZSPAGE_ORDER 2
110 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
113 * Object location (<PFN>, <obj_idx>) is encoded as
114 * as single (unsigned long) handle value.
116 * Note that object index <obj_idx> is relative to system
117 * page <PFN> it is stored in, so for each sub-page belonging
118 * to a zspage, obj_idx starts with 0.
120 * This is made more complicated by various memory models and PAE.
123 #ifndef MAX_PHYSMEM_BITS
124 #ifdef CONFIG_HIGHMEM64G
125 #define MAX_PHYSMEM_BITS 36
126 #else /* !CONFIG_HIGHMEM64G */
128 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
131 #define MAX_PHYSMEM_BITS BITS_PER_LONG
134 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
135 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
136 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
138 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
139 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
140 #define ZS_MIN_ALLOC_SIZE \
141 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
142 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
145 * On systems with 4K page size, this gives 255 size classes! There is a
147 * - Large number of size classes is potentially wasteful as free page are
148 * spread across these classes
149 * - Small number of size classes causes large internal fragmentation
150 * - Probably its better to use specific size classes (empirically
151 * determined). NOTE: all those class sizes must be set as multiple of
152 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
154 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
157 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
158 #define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
159 ZS_SIZE_CLASS_DELTA + 1)
162 * We do not maintain any list for completely empty zspages,
163 * since a zspage is freed when it becomes empty.
165 enum fullness_group
{
170 _ZS_NR_FULLNESS_GROUPS
,
175 #define _ZS_NR_AVAILABLE_FULLNESS_GROUPS ZS_FULL
178 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
180 * n = number of allocated objects
181 * N = total number of objects zspage can store
182 * f = fullness_threshold_frac
184 * Similarly, we assign zspage to:
185 * ZS_ALMOST_FULL when n > N / f
186 * ZS_EMPTY when n == 0
187 * ZS_FULL when n == N
189 * (see: fix_fullness_group())
191 static const int fullness_threshold_frac
= 4;
195 * Size of objects stored in this class. Must be multiple
201 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
202 int pages_per_zspage
;
206 struct page
*fullness_list
[_ZS_NR_FULLNESS_GROUPS
];
210 * Placed within free objects to form a singly linked list.
211 * For every zspage, first_page->freelist gives head of this list.
213 * This must be power of 2 and less than or equal to ZS_ALIGN
216 /* Handle of next free chunk (encodes <PFN, obj_idx>) */
221 struct size_class
*size_class
[ZS_SIZE_CLASSES
];
223 gfp_t flags
; /* allocation flags used when growing pool */
224 atomic_long_t pages_allocated
;
230 * A zspage's class index and fullness group
231 * are encoded in its (first)page->mapping
233 #define CLASS_IDX_BITS 28
234 #define FULLNESS_BITS 4
235 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
236 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
238 struct mapping_area
{
239 #ifdef CONFIG_PGTABLE_MAPPING
240 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
242 char *vm_buf
; /* copy buffer for objects that span pages */
244 char *vm_addr
; /* address of kmap_atomic()'ed pages */
245 enum zs_mapmode vm_mm
; /* mapping mode */
248 /* atomic counter indicating which class/fg to reclaim from */
249 static atomic_t lru_class_fg
;
250 /* specific order of fg we want to reclaim from */
251 static enum fullness_group lru_fg
[] = {
256 #define _ZS_NR_LRU_CLASS_FG (ZS_SIZE_CLASSES * ARRAY_SIZE(lru_fg))
262 static int zs_zpool_evict(struct zs_pool
*pool
, unsigned long handle
)
264 return zpool_evict(pool
, handle
);
267 static struct zs_ops zs_zpool_ops
= {
268 .evict
= zs_zpool_evict
271 static void *zs_zpool_create(gfp_t gfp
, struct zpool_ops
*zpool_ops
)
273 return zs_create_pool(gfp
, &zs_zpool_ops
);
276 static void zs_zpool_destroy(void *pool
)
278 zs_destroy_pool(pool
);
281 static int zs_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
282 unsigned long *handle
)
284 *handle
= zs_malloc(pool
, size
);
285 return *handle
? 0 : -1;
287 static void zs_zpool_free(void *pool
, unsigned long handle
)
289 zs_free(pool
, handle
);
292 static int zs_zpool_shrink(void *pool
, unsigned int pages
,
293 unsigned int *reclaimed
)
295 int total
= 0, ret
= 0;
297 while (total
< pages
) {
298 ret
= zs_shrink(pool
);
311 static void *zs_zpool_map(void *pool
, unsigned long handle
,
312 enum zpool_mapmode mm
)
314 enum zs_mapmode zs_mm
;
323 case ZPOOL_MM_RW
: /* fallthru */
329 return zs_map_object(pool
, handle
, zs_mm
);
331 static void zs_zpool_unmap(void *pool
, unsigned long handle
)
333 zs_unmap_object(pool
, handle
);
336 static u64
zs_zpool_total_size(void *pool
)
338 return zs_get_total_pages(pool
) << PAGE_SHIFT
;
341 static struct zpool_driver zs_zpool_driver
= {
343 .owner
= THIS_MODULE
,
344 .create
= zs_zpool_create
,
345 .destroy
= zs_zpool_destroy
,
346 .malloc
= zs_zpool_malloc
,
347 .free
= zs_zpool_free
,
348 .shrink
= zs_zpool_shrink
,
350 .unmap
= zs_zpool_unmap
,
351 .total_size
= zs_zpool_total_size
,
354 MODULE_ALIAS("zpool-zsmalloc");
355 #endif /* CONFIG_ZPOOL */
357 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
358 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
360 static int is_first_page(struct page
*page
)
362 return PagePrivate(page
);
365 static int is_last_page(struct page
*page
)
367 return PagePrivate2(page
);
370 static void get_zspage_mapping(struct page
*page
, unsigned int *class_idx
,
371 enum fullness_group
*fullness
)
374 BUG_ON(!is_first_page(page
));
376 m
= (unsigned long)page
->mapping
;
377 *fullness
= m
& FULLNESS_MASK
;
378 *class_idx
= (m
>> FULLNESS_BITS
) & CLASS_IDX_MASK
;
381 static void set_zspage_mapping(struct page
*page
, unsigned int class_idx
,
382 enum fullness_group fullness
)
385 BUG_ON(!is_first_page(page
));
387 m
= ((class_idx
& CLASS_IDX_MASK
) << FULLNESS_BITS
) |
388 (fullness
& FULLNESS_MASK
);
389 page
->mapping
= (struct address_space
*)m
;
393 * zsmalloc divides the pool into various size classes where each
394 * class maintains a list of zspages where each zspage is divided
395 * into equal sized chunks. Each allocation falls into one of these
396 * classes depending on its size. This function returns index of the
397 * size class which has chunk size big enough to hold the give size.
399 static int get_size_class_index(int size
)
403 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
404 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
405 ZS_SIZE_CLASS_DELTA
);
411 * For each size class, zspages are divided into different groups
412 * depending on how "full" they are. This was done so that we could
413 * easily find empty or nearly empty zspages when we try to shrink
414 * the pool (not yet implemented). This function returns fullness
415 * status of the given page.
417 static enum fullness_group
get_fullness_group(struct page
*page
)
419 int inuse
, max_objects
;
420 enum fullness_group fg
;
421 BUG_ON(!is_first_page(page
));
424 max_objects
= page
->objects
;
428 else if (inuse
== max_objects
)
430 else if (inuse
<= max_objects
/ fullness_threshold_frac
)
431 fg
= ZS_ALMOST_EMPTY
;
439 * Each size class maintains various freelists and zspages are assigned
440 * to one of these freelists based on the number of live objects they
441 * have. This functions inserts the given zspage into the freelist
442 * identified by <class, fullness_group>.
444 static void insert_zspage(struct page
*page
, struct size_class
*class,
445 enum fullness_group fullness
)
449 BUG_ON(!is_first_page(page
));
451 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
454 head
= &class->fullness_list
[fullness
];
456 list_add_tail(&page
->lru
, &(*head
)->lru
);
462 * This function removes the given zspage from the freelist identified
463 * by <class, fullness_group>.
465 static void remove_zspage(struct page
*page
, struct size_class
*class,
466 enum fullness_group fullness
)
470 BUG_ON(!is_first_page(page
));
472 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
475 head
= &class->fullness_list
[fullness
];
477 if (list_empty(&(*head
)->lru
))
479 else if (*head
== page
)
480 *head
= (struct page
*)list_entry((*head
)->lru
.next
,
483 list_del_init(&page
->lru
);
487 * Each size class maintains zspages in different fullness groups depending
488 * on the number of live objects they contain. When allocating or freeing
489 * objects, the fullness status of the page can change, say, from ALMOST_FULL
490 * to ALMOST_EMPTY when freeing an object. This function checks if such
491 * a status change has occurred for the given page and accordingly moves the
492 * page from the freelist of the old fullness group to that of the new
495 static enum fullness_group
fix_fullness_group(struct zs_pool
*pool
,
499 struct size_class
*class;
500 enum fullness_group currfg
, newfg
;
502 BUG_ON(!is_first_page(page
));
504 get_zspage_mapping(page
, &class_idx
, &currfg
);
505 class = pool
->size_class
[class_idx
];
506 newfg
= get_fullness_group(page
);
507 /* Need to do this even if currfg == newfg, to update lru */
508 remove_zspage(page
, class, currfg
);
509 insert_zspage(page
, class, newfg
);
511 set_zspage_mapping(page
, class_idx
, newfg
);
517 * We have to decide on how many pages to link together
518 * to form a zspage for each size class. This is important
519 * to reduce wastage due to unusable space left at end of
520 * each zspage which is given as:
521 * wastage = Zp - Zp % size_class
522 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
524 * For example, for size class of 3/8 * PAGE_SIZE, we should
525 * link together 3 PAGE_SIZE sized pages to form a zspage
526 * since then we can perfectly fit in 8 such objects.
528 static int get_pages_per_zspage(int class_size
)
530 int i
, max_usedpc
= 0;
531 /* zspage order which gives maximum used size per KB */
532 int max_usedpc_order
= 1;
534 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
538 zspage_size
= i
* PAGE_SIZE
;
539 waste
= zspage_size
% class_size
;
540 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
542 if (usedpc
> max_usedpc
) {
544 max_usedpc_order
= i
;
548 return max_usedpc_order
;
552 * A single 'zspage' is composed of many system pages which are
553 * linked together using fields in struct page. This function finds
554 * the first/head page, given any component page of a zspage.
556 static struct page
*get_first_page(struct page
*page
)
558 if (is_first_page(page
))
561 return page
->first_page
;
564 static struct page
*get_next_page(struct page
*page
)
568 if (is_last_page(page
))
570 else if (is_first_page(page
))
571 next
= (struct page
*)page_private(page
);
573 next
= list_entry(page
->lru
.next
, struct page
, lru
);
579 * Encode <page, obj_idx> as a single handle value.
580 * On hardware platforms with physical memory starting at 0x0 the pfn
581 * could be 0 so we ensure that the handle will never be 0 by adjusting the
582 * encoded obj_idx value before encoding.
584 static void *obj_location_to_handle(struct page
*page
, unsigned long obj_idx
)
586 unsigned long handle
;
593 handle
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
594 handle
|= ((obj_idx
+ 1) & OBJ_INDEX_MASK
);
596 return (void *)handle
;
600 * Decode <page, obj_idx> pair from the given object handle. We adjust the
601 * decoded obj_idx back to its original value since it was adjusted in
602 * obj_location_to_handle().
604 static void obj_handle_to_location(unsigned long handle
, struct page
**page
,
605 unsigned long *obj_idx
)
607 *page
= pfn_to_page(handle
>> OBJ_INDEX_BITS
);
608 *obj_idx
= (handle
& OBJ_INDEX_MASK
) - 1;
611 static unsigned long obj_idx_to_offset(struct page
*page
,
612 unsigned long obj_idx
, int class_size
)
614 unsigned long off
= 0;
616 if (!is_first_page(page
))
619 return off
+ obj_idx
* class_size
;
622 static bool obj_handle_is_free(struct page
*first_page
,
623 struct size_class
*class, unsigned long handle
)
625 unsigned long obj
, idx
, offset
;
627 struct link_free
*link
;
629 BUG_ON(!is_first_page(first_page
));
631 obj
= (unsigned long)first_page
->freelist
;
637 obj_handle_to_location(obj
, &page
, &idx
);
638 offset
= obj_idx_to_offset(page
, idx
, class->size
);
640 link
= (struct link_free
*)kmap_atomic(page
) +
641 offset
/ sizeof(*link
);
642 obj
= (unsigned long)link
->next
;
649 static void obj_free(unsigned long obj
, struct page
*page
, unsigned long offset
)
651 struct page
*first_page
= get_first_page(page
);
652 struct link_free
*link
;
654 /* Insert this object in containing zspage's freelist */
655 link
= (struct link_free
*)((unsigned char *)kmap_atomic(page
)
657 link
->next
= first_page
->freelist
;
659 first_page
->freelist
= (void *)obj
;
664 static void reset_page(struct page
*page
)
666 clear_bit(PG_private
, &page
->flags
);
667 clear_bit(PG_private_2
, &page
->flags
);
668 set_page_private(page
, 0);
669 page
->mapping
= NULL
;
670 page
->freelist
= NULL
;
671 page_mapcount_reset(page
);
674 static void free_zspage(struct page
*first_page
)
676 struct page
*nextp
, *tmp
, *head_extra
;
678 BUG_ON(!is_first_page(first_page
));
679 BUG_ON(first_page
->inuse
);
681 head_extra
= (struct page
*)page_private(first_page
);
683 reset_page(first_page
);
684 __free_page(first_page
);
686 /* zspage with only 1 system page */
690 list_for_each_entry_safe(nextp
, tmp
, &head_extra
->lru
, lru
) {
691 list_del(&nextp
->lru
);
695 reset_page(head_extra
);
696 __free_page(head_extra
);
699 /* Initialize a newly allocated zspage */
700 static void init_zspage(struct page
*first_page
, struct size_class
*class)
702 unsigned long off
= 0;
703 struct page
*page
= first_page
;
705 BUG_ON(!is_first_page(first_page
));
707 struct page
*next_page
;
708 struct link_free
*link
;
709 unsigned int i
, objs_on_page
;
712 * page->index stores offset of first object starting
713 * in the page. For the first page, this is always 0,
714 * so we use first_page->index (aka ->freelist) to store
715 * head of corresponding zspage's freelist.
717 if (page
!= first_page
)
720 link
= (struct link_free
*)kmap_atomic(page
) +
722 objs_on_page
= (PAGE_SIZE
- off
) / class->size
;
724 for (i
= 1; i
<= objs_on_page
; i
++) {
726 if (off
< PAGE_SIZE
) {
727 link
->next
= obj_location_to_handle(page
, i
);
728 link
+= class->size
/ sizeof(*link
);
733 * We now come to the last (full or partial) object on this
734 * page, which must point to the first object on the next
737 next_page
= get_next_page(page
);
738 link
->next
= obj_location_to_handle(next_page
, 0);
741 off
= (off
+ class->size
) % PAGE_SIZE
;
746 * Allocate a zspage for the given size class
748 static struct page
*alloc_zspage(struct size_class
*class, gfp_t flags
)
751 struct page
*first_page
= NULL
, *uninitialized_var(prev_page
);
754 * Allocate individual pages and link them together as:
755 * 1. first page->private = first sub-page
756 * 2. all sub-pages are linked together using page->lru
757 * 3. each sub-page is linked to the first page using page->first_page
759 * For each size class, First/Head pages are linked together using
760 * page->lru. Also, we set PG_private to identify the first page
761 * (i.e. no other sub-page has this flag set) and PG_private_2 to
762 * identify the last page.
765 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
768 page
= alloc_page(flags
);
772 INIT_LIST_HEAD(&page
->lru
);
773 if (i
== 0) { /* first page */
774 SetPagePrivate(page
);
775 set_page_private(page
, 0);
777 first_page
->inuse
= 0;
780 set_page_private(first_page
, (unsigned long)page
);
782 page
->first_page
= first_page
;
784 list_add(&page
->lru
, &prev_page
->lru
);
785 if (i
== class->pages_per_zspage
- 1) /* last page */
786 SetPagePrivate2(page
);
790 init_zspage(first_page
, class);
792 first_page
->freelist
= obj_location_to_handle(first_page
, 0);
793 /* Maximum number of objects we can store in this zspage */
794 first_page
->objects
= class->pages_per_zspage
* PAGE_SIZE
/ class->size
;
796 error
= 0; /* Success */
799 if (unlikely(error
) && first_page
) {
800 free_zspage(first_page
);
808 * This tries to reclaim all the provided zspage's objects by calling the
809 * zs_pool's ops->evict function for each object in use. This requires
810 * the zspage's class lock to be held when calling this function. Since
811 * the evict function may sleep, this drops the class lock before evicting
812 * and objects. No other locks should be held when calling this function.
813 * This will return with the class lock unlocked.
815 * If there is no zs_pool->ops or ops->evict function, this returns error.
817 * This returns 0 on success, -err on failure. On failure, some of the
818 * objects may have been freed, but not all. On success, the entire zspage
819 * has been freed and should not be used anymore.
821 static int reclaim_zspage(struct zs_pool
*pool
, struct page
*first_page
)
823 struct size_class
*class;
824 enum fullness_group fullness
;
825 struct page
*page
= first_page
;
826 unsigned long handle
;
827 int class_idx
, ret
= 0;
829 BUG_ON(!is_first_page(first_page
));
831 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
832 class = pool
->size_class
[class_idx
];
834 assert_spin_locked(&class->lock
);
836 if (!pool
->ops
|| !pool
->ops
->evict
) {
837 spin_unlock(&class->lock
);
841 /* move the zspage into the reclaim fullness group,
842 * so it's not available for use by zs_malloc,
843 * and won't be freed by zs_free
845 remove_zspage(first_page
, class, fullness
);
846 set_zspage_mapping(first_page
, class_idx
, ZS_RECLAIM
);
848 spin_unlock(&class->lock
);
853 unsigned long offset
, idx
= 0;
855 while ((offset
= obj_idx_to_offset(page
, idx
, class->size
))
857 handle
= (unsigned long)obj_location_to_handle(page
,
859 if (obj_handle_is_free(first_page
, class, handle
))
861 ret
= pool
->ops
->evict(pool
, handle
);
863 spin_lock(&class->lock
);
864 fix_fullness_group(pool
, first_page
);
865 spin_unlock(&class->lock
);
868 obj_free(handle
, page
, offset
);
871 page
= get_next_page(page
);
874 free_zspage(first_page
);
876 atomic_long_sub(class->pages_per_zspage
, &pool
->pages_allocated
);
881 static struct page
*find_available_zspage(struct size_class
*class)
886 for (i
= 0; i
< _ZS_NR_AVAILABLE_FULLNESS_GROUPS
; i
++) {
887 page
= class->fullness_list
[i
];
895 /* this simply iterates atomically through all classes,
896 * using a specific fullness group. At the end, it starts
897 * over using the next fullness group, and so on. The
898 * fullness groups are used in a specific order, from
899 * least to most full.
901 static void find_next_lru_class_fg(struct zs_pool
*pool
,
902 struct size_class
**class, enum fullness_group
*fg
)
904 int i
= atomic_inc_return(&lru_class_fg
);
906 if (i
>= _ZS_NR_LRU_CLASS_FG
) {
909 i
%= _ZS_NR_LRU_CLASS_FG
;
910 /* only need to try once, since if we don't
911 * succeed whoever changed it will also try
912 * and eventually someone will reset it
914 atomic_cmpxchg(&lru_class_fg
, orig
, i
);
916 *class = pool
->size_class
[i
% ZS_SIZE_CLASSES
];
917 *fg
= lru_fg
[i
/ ZS_SIZE_CLASSES
];
921 * This attempts to find the LRU zspage, but that's not really possible
922 * because zspages are not contained in a single LRU list, they're
923 * contained inside fullness groups which are themselves contained
924 * inside classes. So this simply iterates through the classes and
925 * fullness groups to find the next non-empty fullness group, and
926 * uses the LRU zspage there.
928 * On success, the zspage is returned with its class locked.
929 * On failure, NULL is returned.
931 static struct page
*find_lru_zspage(struct zs_pool
*pool
)
933 struct size_class
*class;
935 enum fullness_group fg
;
938 while (tries
++ < _ZS_NR_LRU_CLASS_FG
) {
939 find_next_lru_class_fg(pool
, &class, &fg
);
941 spin_lock(&class->lock
);
943 page
= class->fullness_list
[fg
];
945 return list_prev_entry(page
, lru
);
947 spin_unlock(&class->lock
);
953 #ifdef CONFIG_PGTABLE_MAPPING
954 static inline int __zs_cpu_up(struct mapping_area
*area
)
957 * Make sure we don't leak memory if a cpu UP notification
958 * and zs_init() race and both call zs_cpu_up() on the same cpu
962 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
968 static inline void __zs_cpu_down(struct mapping_area
*area
)
971 free_vm_area(area
->vm
);
975 static inline void *__zs_map_object(struct mapping_area
*area
,
976 struct page
*pages
[2], int off
, int size
)
978 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, pages
));
979 area
->vm_addr
= area
->vm
->addr
;
980 return area
->vm_addr
+ off
;
983 static inline void __zs_unmap_object(struct mapping_area
*area
,
984 struct page
*pages
[2], int off
, int size
)
986 unsigned long addr
= (unsigned long)area
->vm_addr
;
988 unmap_kernel_range(addr
, PAGE_SIZE
* 2);
991 #else /* CONFIG_PGTABLE_MAPPING */
993 static inline int __zs_cpu_up(struct mapping_area
*area
)
996 * Make sure we don't leak memory if a cpu UP notification
997 * and zs_init() race and both call zs_cpu_up() on the same cpu
1001 area
->vm_buf
= (char *)__get_free_page(GFP_KERNEL
);
1007 static inline void __zs_cpu_down(struct mapping_area
*area
)
1010 free_page((unsigned long)area
->vm_buf
);
1011 area
->vm_buf
= NULL
;
1014 static void *__zs_map_object(struct mapping_area
*area
,
1015 struct page
*pages
[2], int off
, int size
)
1019 char *buf
= area
->vm_buf
;
1021 /* disable page faults to match kmap_atomic() return conditions */
1022 pagefault_disable();
1024 /* no read fastpath */
1025 if (area
->vm_mm
== ZS_MM_WO
)
1028 sizes
[0] = PAGE_SIZE
- off
;
1029 sizes
[1] = size
- sizes
[0];
1031 /* copy object to per-cpu buffer */
1032 addr
= kmap_atomic(pages
[0]);
1033 memcpy(buf
, addr
+ off
, sizes
[0]);
1034 kunmap_atomic(addr
);
1035 addr
= kmap_atomic(pages
[1]);
1036 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
1037 kunmap_atomic(addr
);
1039 return area
->vm_buf
;
1042 static void __zs_unmap_object(struct mapping_area
*area
,
1043 struct page
*pages
[2], int off
, int size
)
1047 char *buf
= area
->vm_buf
;
1049 /* no write fastpath */
1050 if (area
->vm_mm
== ZS_MM_RO
)
1053 sizes
[0] = PAGE_SIZE
- off
;
1054 sizes
[1] = size
- sizes
[0];
1056 /* copy per-cpu buffer to object */
1057 addr
= kmap_atomic(pages
[0]);
1058 memcpy(addr
+ off
, buf
, sizes
[0]);
1059 kunmap_atomic(addr
);
1060 addr
= kmap_atomic(pages
[1]);
1061 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
1062 kunmap_atomic(addr
);
1065 /* enable page faults to match kunmap_atomic() return conditions */
1069 #endif /* CONFIG_PGTABLE_MAPPING */
1071 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
1074 int ret
, cpu
= (long)pcpu
;
1075 struct mapping_area
*area
;
1078 case CPU_UP_PREPARE
:
1079 area
= &per_cpu(zs_map_area
, cpu
);
1080 ret
= __zs_cpu_up(area
);
1082 return notifier_from_errno(ret
);
1085 case CPU_UP_CANCELED
:
1086 area
= &per_cpu(zs_map_area
, cpu
);
1087 __zs_cpu_down(area
);
1094 static struct notifier_block zs_cpu_nb
= {
1095 .notifier_call
= zs_cpu_notifier
1098 static void zs_exit(void)
1103 zpool_unregister_driver(&zs_zpool_driver
);
1106 cpu_notifier_register_begin();
1108 for_each_online_cpu(cpu
)
1109 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
1110 __unregister_cpu_notifier(&zs_cpu_nb
);
1112 cpu_notifier_register_done();
1115 static int zs_init(void)
1119 cpu_notifier_register_begin();
1121 __register_cpu_notifier(&zs_cpu_nb
);
1122 for_each_online_cpu(cpu
) {
1123 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
1124 if (notifier_to_errno(ret
)) {
1125 cpu_notifier_register_done();
1130 cpu_notifier_register_done();
1133 zpool_register_driver(&zs_zpool_driver
);
1139 return notifier_to_errno(ret
);
1142 static unsigned int get_maxobj_per_zspage(int size
, int pages_per_zspage
)
1144 return pages_per_zspage
* PAGE_SIZE
/ size
;
1147 static bool can_merge(struct size_class
*prev
, int size
, int pages_per_zspage
)
1149 if (prev
->pages_per_zspage
!= pages_per_zspage
)
1152 if (get_maxobj_per_zspage(prev
->size
, prev
->pages_per_zspage
)
1153 != get_maxobj_per_zspage(size
, pages_per_zspage
))
1160 * zs_create_pool - Creates an allocation pool to work from.
1161 * @flags: allocation flags used to allocate pool metadata
1163 * This function must be called before anything when using
1164 * the zsmalloc allocator.
1166 * On success, a pointer to the newly created pool is returned,
1169 struct zs_pool
*zs_create_pool(gfp_t flags
, struct zs_ops
*ops
)
1172 struct zs_pool
*pool
;
1174 ovhd_size
= roundup(sizeof(*pool
), PAGE_SIZE
);
1175 pool
= kzalloc(ovhd_size
, GFP_KERNEL
);
1180 * Iterate reversly, because, size of size_class that we want to use
1181 * for merging should be larger or equal to current size.
1183 for (i
= ZS_SIZE_CLASSES
- 1; i
>= 0; i
--) {
1185 int pages_per_zspage
;
1186 struct size_class
*class;
1187 struct size_class
*prev_class
;
1189 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
1190 if (size
> ZS_MAX_ALLOC_SIZE
)
1191 size
= ZS_MAX_ALLOC_SIZE
;
1192 pages_per_zspage
= get_pages_per_zspage(size
);
1195 * size_class is used for normal zsmalloc operation such
1196 * as alloc/free for that size. Although it is natural that we
1197 * have one size_class for each size, there is a chance that we
1198 * can get more memory utilization if we use one size_class for
1199 * many different sizes whose size_class have same
1200 * characteristics. So, we makes size_class point to
1201 * previous size_class if possible.
1203 if (i
< ZS_SIZE_CLASSES
- 1) {
1204 prev_class
= pool
->size_class
[i
+ 1];
1205 if (can_merge(prev_class
, size
, pages_per_zspage
)) {
1206 pool
->size_class
[i
] = prev_class
;
1211 class = kzalloc(sizeof(struct size_class
), GFP_KERNEL
);
1217 class->pages_per_zspage
= pages_per_zspage
;
1218 spin_lock_init(&class->lock
);
1219 pool
->size_class
[i
] = class;
1222 pool
->flags
= flags
;
1228 zs_destroy_pool(pool
);
1231 EXPORT_SYMBOL_GPL(zs_create_pool
);
1233 void zs_destroy_pool(struct zs_pool
*pool
)
1237 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
1239 struct size_class
*class = pool
->size_class
[i
];
1244 if (class->index
!= i
)
1247 for (fg
= 0; fg
< _ZS_NR_FULLNESS_GROUPS
; fg
++) {
1248 if (class->fullness_list
[fg
]) {
1249 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
1257 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
1260 * zs_malloc - Allocate block of given size from pool.
1261 * @pool: pool to allocate from
1262 * @size: size of block to allocate
1264 * On success, handle to the allocated object is returned,
1266 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1268 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
)
1271 struct link_free
*link
;
1272 struct size_class
*class;
1274 struct page
*first_page
, *m_page
;
1275 unsigned long m_objidx
, m_offset
;
1277 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
1280 class = pool
->size_class
[get_size_class_index(size
)];
1282 spin_lock(&class->lock
);
1283 first_page
= find_available_zspage(class);
1286 spin_unlock(&class->lock
);
1287 first_page
= alloc_zspage(class, pool
->flags
);
1288 if (unlikely(!first_page
))
1291 set_zspage_mapping(first_page
, class->index
, ZS_EMPTY
);
1292 atomic_long_add(class->pages_per_zspage
,
1293 &pool
->pages_allocated
);
1294 spin_lock(&class->lock
);
1297 obj
= (unsigned long)first_page
->freelist
;
1298 obj_handle_to_location(obj
, &m_page
, &m_objidx
);
1299 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
1301 link
= (struct link_free
*)kmap_atomic(m_page
) +
1302 m_offset
/ sizeof(*link
);
1303 first_page
->freelist
= link
->next
;
1304 memset(link
, POISON_INUSE
, sizeof(*link
));
1305 kunmap_atomic(link
);
1307 first_page
->inuse
++;
1308 /* Now move the zspage to another fullness group, if required */
1309 fix_fullness_group(pool
, first_page
);
1310 spin_unlock(&class->lock
);
1314 EXPORT_SYMBOL_GPL(zs_malloc
);
1317 * zs_free - Free the handle from this pool.
1318 * @pool: pool containing the handle
1319 * @obj: the handle to free
1321 * The caller must provide a valid handle that is contained
1322 * in the provided pool. The caller must ensure this is
1323 * not called after evict() has returned successfully for the
1326 void zs_free(struct zs_pool
*pool
, unsigned long obj
)
1328 struct page
*first_page
, *f_page
;
1329 unsigned long f_objidx
, f_offset
;
1332 struct size_class
*class;
1333 enum fullness_group fullness
;
1338 obj_handle_to_location(obj
, &f_page
, &f_objidx
);
1339 first_page
= get_first_page(f_page
);
1341 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1342 class = pool
->size_class
[class_idx
];
1343 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
1345 spin_lock(&class->lock
);
1347 /* must re-check fullness after taking class lock */
1348 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
1349 if (fullness
== ZS_RECLAIM
) {
1350 spin_unlock(&class->lock
);
1351 return; /* will be freed during reclaim */
1354 obj_free(obj
, f_page
, f_offset
);
1356 fullness
= fix_fullness_group(pool
, first_page
);
1357 spin_unlock(&class->lock
);
1359 if (fullness
== ZS_EMPTY
) {
1360 atomic_long_sub(class->pages_per_zspage
,
1361 &pool
->pages_allocated
);
1362 free_zspage(first_page
);
1365 EXPORT_SYMBOL_GPL(zs_free
);
1368 * zs_shrink - Shrink the pool
1369 * @pool: pool to shrink
1371 * The pool will be shrunk by one zspage, which is some
1372 * number of pages in size. On success, the number of freed
1373 * pages is returned. On failure, the error is returned.
1375 int zs_shrink(struct zs_pool
*pool
)
1377 struct size_class
*class;
1378 enum fullness_group fullness
;
1382 if (!pool
->ops
|| !pool
->ops
->evict
)
1385 /* if a page is found, the class is locked */
1386 page
= find_lru_zspage(pool
);
1390 get_zspage_mapping(page
, &class_idx
, &fullness
);
1391 class = pool
->size_class
[class_idx
];
1393 /* reclaim_zspage unlocks the class lock */
1394 ret
= reclaim_zspage(pool
, page
);
1398 return class->pages_per_zspage
;
1400 EXPORT_SYMBOL_GPL(zs_shrink
);
1403 * zs_map_object - get address of allocated object from handle.
1404 * @pool: pool from which the object was allocated
1405 * @handle: handle returned from zs_malloc
1407 * Before using an object allocated from zs_malloc, it must be mapped using
1408 * this function. When done with the object, it must be unmapped using
1411 * Only one object can be mapped per cpu at a time. There is no protection
1412 * against nested mappings.
1414 * This function returns with preemption and page faults disabled.
1416 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
1420 unsigned long obj_idx
, off
;
1422 unsigned int class_idx
;
1423 enum fullness_group fg
;
1424 struct size_class
*class;
1425 struct mapping_area
*area
;
1426 struct page
*pages
[2];
1431 * Because we use per-cpu mapping areas shared among the
1432 * pools/users, we can't allow mapping in interrupt context
1433 * because it can corrupt another users mappings.
1435 BUG_ON(in_interrupt());
1437 obj_handle_to_location(handle
, &page
, &obj_idx
);
1438 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1439 class = pool
->size_class
[class_idx
];
1440 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1442 area
= &get_cpu_var(zs_map_area
);
1444 if (off
+ class->size
<= PAGE_SIZE
) {
1445 /* this object is contained entirely within a page */
1446 area
->vm_addr
= kmap_atomic(page
);
1447 return area
->vm_addr
+ off
;
1450 /* this object spans two pages */
1452 pages
[1] = get_next_page(page
);
1455 return __zs_map_object(area
, pages
, off
, class->size
);
1457 EXPORT_SYMBOL_GPL(zs_map_object
);
1459 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1462 unsigned long obj_idx
, off
;
1464 unsigned int class_idx
;
1465 enum fullness_group fg
;
1466 struct size_class
*class;
1467 struct mapping_area
*area
;
1471 obj_handle_to_location(handle
, &page
, &obj_idx
);
1472 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1473 class = pool
->size_class
[class_idx
];
1474 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1476 area
= this_cpu_ptr(&zs_map_area
);
1477 if (off
+ class->size
<= PAGE_SIZE
)
1478 kunmap_atomic(area
->vm_addr
);
1480 struct page
*pages
[2];
1483 pages
[1] = get_next_page(page
);
1486 __zs_unmap_object(area
, pages
, off
, class->size
);
1488 put_cpu_var(zs_map_area
);
1490 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1492 unsigned long zs_get_total_pages(struct zs_pool
*pool
)
1494 return atomic_long_read(&pool
->pages_allocated
);
1496 EXPORT_SYMBOL_GPL(zs_get_total_pages
);
1498 module_init(zs_init
);
1499 module_exit(zs_exit
);
1501 MODULE_LICENSE("Dual BSD/GPL");
1502 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");