2 * zsmalloc memory allocator
4 * Copyright (C) 2011 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
15 * This allocator is designed for use with zcache and zram. Thus, the
16 * allocator is supposed to work well under low memory conditions. In
17 * particular, it never attempts higher order page allocation which is
18 * very likely to fail under memory pressure. On the other hand, if we
19 * just use single (0-order) pages, it would suffer from very high
20 * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
21 * an entire page. This was one of the major issues with its predecessor
24 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
25 * and links them together using various 'struct page' fields. These linked
26 * pages act as a single higher-order page i.e. an object can span 0-order
27 * page boundaries. The code refers to these linked pages as a single entity
30 * Following is how we use various fields and flags of underlying
31 * struct page(s) to form a zspage.
33 * Usage of struct page fields:
34 * page->first_page: points to the first component (0-order) page
35 * page->index (union with page->freelist): offset of the first object
36 * starting in this page. For the first page, this is
37 * always 0, so we use this field (aka freelist) to point
38 * to the first free object in zspage.
39 * page->lru: links together all component pages (except the first page)
42 * For _first_ page only:
44 * page->private (union with page->first_page): refers to the
45 * component page after the first page
46 * page->freelist: points to the first free object in zspage.
47 * Free objects are linked together using in-place
49 * page->objects: maximum number of objects we can store in this
50 * zspage (class->zspage_order * PAGE_SIZE / class->size)
51 * page->lru: links together first pages of various zspages.
52 * Basically forming list of zspages in a fullness group.
53 * page->mapping: class index and fullness group of the zspage
55 * Usage of struct page flags:
56 * PG_private: identifies the first component page
57 * PG_private2: identifies the last component page
61 #ifdef CONFIG_ZSMALLOC_DEBUG
65 #include <linux/module.h>
66 #include <linux/kernel.h>
67 #include <linux/bitops.h>
68 #include <linux/errno.h>
69 #include <linux/highmem.h>
70 #include <linux/init.h>
71 #include <linux/string.h>
72 #include <linux/slab.h>
73 #include <asm/tlbflush.h>
74 #include <asm/pgtable.h>
75 #include <linux/cpumask.h>
76 #include <linux/cpu.h>
77 #include <linux/vmalloc.h>
78 #include <linux/hardirq.h>
79 #include <linux/spinlock.h>
80 #include <linux/types.h>
85 * This must be power of 2 and greater than of equal to sizeof(link_free).
86 * These two conditions ensure that any 'struct link_free' itself doesn't
87 * span more than 1 page which avoids complex case of mapping 2 pages simply
88 * to restore link_free pointer values.
93 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
94 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
96 #define ZS_MAX_ZSPAGE_ORDER 2
97 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
100 * Object location (<PFN>, <obj_idx>) is encoded as
101 * as single (void *) handle value.
103 * Note that object index <obj_idx> is relative to system
104 * page <PFN> it is stored in, so for each sub-page belonging
105 * to a zspage, obj_idx starts with 0.
107 * This is made more complicated by various memory models and PAE.
110 #ifndef MAX_PHYSMEM_BITS
111 #ifdef CONFIG_HIGHMEM64G
112 #define MAX_PHYSMEM_BITS 36
113 #else /* !CONFIG_HIGHMEM64G */
115 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
118 #define MAX_PHYSMEM_BITS BITS_PER_LONG
121 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
122 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
123 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
125 #define MAX(a, b) ((a) >= (b) ? (a) : (b))
126 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
127 #define ZS_MIN_ALLOC_SIZE \
128 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
129 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
132 * On systems with 4K page size, this gives 254 size classes! There is a
134 * - Large number of size classes is potentially wasteful as free page are
135 * spread across these classes
136 * - Small number of size classes causes large internal fragmentation
137 * - Probably its better to use specific size classes (empirically
138 * determined). NOTE: all those class sizes must be set as multiple of
139 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
141 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
144 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
145 #define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
146 ZS_SIZE_CLASS_DELTA + 1)
149 * We do not maintain any list for completely empty or full pages
151 enum fullness_group
{
154 _ZS_NR_FULLNESS_GROUPS
,
161 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
163 * n = number of allocated objects
164 * N = total number of objects zspage can store
165 * f = 1/fullness_threshold_frac
167 * Similarly, we assign zspage to:
168 * ZS_ALMOST_FULL when n > N / f
169 * ZS_EMPTY when n == 0
170 * ZS_FULL when n == N
172 * (see: fix_fullness_group())
174 static const int fullness_threshold_frac
= 4;
178 * Size of objects stored in this class. Must be multiple
184 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
185 int pages_per_zspage
;
192 struct page
*fullness_list
[_ZS_NR_FULLNESS_GROUPS
];
196 * Placed within free objects to form a singly linked list.
197 * For every zspage, first_page->freelist gives head of this list.
199 * This must be power of 2 and less than or equal to ZS_ALIGN
202 /* Handle of next free chunk (encodes <PFN, obj_idx>) */
207 struct size_class size_class
[ZS_SIZE_CLASSES
];
209 gfp_t flags
; /* allocation flags used when growing pool */
213 * A zspage's class index and fullness group
214 * are encoded in its (first)page->mapping
216 #define CLASS_IDX_BITS 28
217 #define FULLNESS_BITS 4
218 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
219 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
222 * By default, zsmalloc uses a copy-based object mapping method to access
223 * allocations that span two pages. However, if a particular architecture
224 * performs VM mapping faster than copying, then it should be added here
225 * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
226 * page table mapping rather than copying for object mapping.
228 #if defined(CONFIG_ARM)
229 #define USE_PGTABLE_MAPPING
232 struct mapping_area
{
233 #ifdef USE_PGTABLE_MAPPING
234 struct vm_struct
*vm
; /* vm area for mapping object that span pages */
236 char *vm_buf
; /* copy buffer for objects that span pages */
238 char *vm_addr
; /* address of kmap_atomic()'ed pages */
239 enum zs_mapmode vm_mm
; /* mapping mode */
243 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
244 static DEFINE_PER_CPU(struct mapping_area
, zs_map_area
);
246 static int is_first_page(struct page
*page
)
248 return PagePrivate(page
);
251 static int is_last_page(struct page
*page
)
253 return PagePrivate2(page
);
256 static void get_zspage_mapping(struct page
*page
, unsigned int *class_idx
,
257 enum fullness_group
*fullness
)
260 BUG_ON(!is_first_page(page
));
262 m
= (unsigned long)page
->mapping
;
263 *fullness
= m
& FULLNESS_MASK
;
264 *class_idx
= (m
>> FULLNESS_BITS
) & CLASS_IDX_MASK
;
267 static void set_zspage_mapping(struct page
*page
, unsigned int class_idx
,
268 enum fullness_group fullness
)
271 BUG_ON(!is_first_page(page
));
273 m
= ((class_idx
& CLASS_IDX_MASK
) << FULLNESS_BITS
) |
274 (fullness
& FULLNESS_MASK
);
275 page
->mapping
= (struct address_space
*)m
;
278 static int get_size_class_index(int size
)
282 if (likely(size
> ZS_MIN_ALLOC_SIZE
))
283 idx
= DIV_ROUND_UP(size
- ZS_MIN_ALLOC_SIZE
,
284 ZS_SIZE_CLASS_DELTA
);
289 static enum fullness_group
get_fullness_group(struct page
*page
)
291 int inuse
, max_objects
;
292 enum fullness_group fg
;
293 BUG_ON(!is_first_page(page
));
296 max_objects
= page
->objects
;
300 else if (inuse
== max_objects
)
302 else if (inuse
<= max_objects
/ fullness_threshold_frac
)
303 fg
= ZS_ALMOST_EMPTY
;
310 static void insert_zspage(struct page
*page
, struct size_class
*class,
311 enum fullness_group fullness
)
315 BUG_ON(!is_first_page(page
));
317 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
320 head
= &class->fullness_list
[fullness
];
322 list_add_tail(&page
->lru
, &(*head
)->lru
);
327 static void remove_zspage(struct page
*page
, struct size_class
*class,
328 enum fullness_group fullness
)
332 BUG_ON(!is_first_page(page
));
334 if (fullness
>= _ZS_NR_FULLNESS_GROUPS
)
337 head
= &class->fullness_list
[fullness
];
339 if (list_empty(&(*head
)->lru
))
341 else if (*head
== page
)
342 *head
= (struct page
*)list_entry((*head
)->lru
.next
,
345 list_del_init(&page
->lru
);
348 static enum fullness_group
fix_fullness_group(struct zs_pool
*pool
,
352 struct size_class
*class;
353 enum fullness_group currfg
, newfg
;
355 BUG_ON(!is_first_page(page
));
357 get_zspage_mapping(page
, &class_idx
, &currfg
);
358 newfg
= get_fullness_group(page
);
362 class = &pool
->size_class
[class_idx
];
363 remove_zspage(page
, class, currfg
);
364 insert_zspage(page
, class, newfg
);
365 set_zspage_mapping(page
, class_idx
, newfg
);
372 * We have to decide on how many pages to link together
373 * to form a zspage for each size class. This is important
374 * to reduce wastage due to unusable space left at end of
375 * each zspage which is given as:
376 * wastage = Zp - Zp % size_class
377 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
379 * For example, for size class of 3/8 * PAGE_SIZE, we should
380 * link together 3 PAGE_SIZE sized pages to form a zspage
381 * since then we can perfectly fit in 8 such objects.
383 static int get_pages_per_zspage(int class_size
)
385 int i
, max_usedpc
= 0;
386 /* zspage order which gives maximum used size per KB */
387 int max_usedpc_order
= 1;
389 for (i
= 1; i
<= ZS_MAX_PAGES_PER_ZSPAGE
; i
++) {
393 zspage_size
= i
* PAGE_SIZE
;
394 waste
= zspage_size
% class_size
;
395 usedpc
= (zspage_size
- waste
) * 100 / zspage_size
;
397 if (usedpc
> max_usedpc
) {
399 max_usedpc_order
= i
;
403 return max_usedpc_order
;
407 * A single 'zspage' is composed of many system pages which are
408 * linked together using fields in struct page. This function finds
409 * the first/head page, given any component page of a zspage.
411 static struct page
*get_first_page(struct page
*page
)
413 if (is_first_page(page
))
416 return page
->first_page
;
419 static struct page
*get_next_page(struct page
*page
)
423 if (is_last_page(page
))
425 else if (is_first_page(page
))
426 next
= (struct page
*)page
->private;
428 next
= list_entry(page
->lru
.next
, struct page
, lru
);
433 /* Encode <page, obj_idx> as a single handle value */
434 static void *obj_location_to_handle(struct page
*page
, unsigned long obj_idx
)
436 unsigned long handle
;
443 handle
= page_to_pfn(page
) << OBJ_INDEX_BITS
;
444 handle
|= (obj_idx
& OBJ_INDEX_MASK
);
446 return (void *)handle
;
449 /* Decode <page, obj_idx> pair from the given object handle */
450 static void obj_handle_to_location(unsigned long handle
, struct page
**page
,
451 unsigned long *obj_idx
)
453 *page
= pfn_to_page(handle
>> OBJ_INDEX_BITS
);
454 *obj_idx
= handle
& OBJ_INDEX_MASK
;
457 static unsigned long obj_idx_to_offset(struct page
*page
,
458 unsigned long obj_idx
, int class_size
)
460 unsigned long off
= 0;
462 if (!is_first_page(page
))
465 return off
+ obj_idx
* class_size
;
468 static void reset_page(struct page
*page
)
470 clear_bit(PG_private
, &page
->flags
);
471 clear_bit(PG_private_2
, &page
->flags
);
472 set_page_private(page
, 0);
473 page
->mapping
= NULL
;
474 page
->freelist
= NULL
;
475 page_mapcount_reset(page
);
478 static void free_zspage(struct page
*first_page
)
480 struct page
*nextp
, *tmp
, *head_extra
;
482 BUG_ON(!is_first_page(first_page
));
483 BUG_ON(first_page
->inuse
);
485 head_extra
= (struct page
*)page_private(first_page
);
487 reset_page(first_page
);
488 __free_page(first_page
);
490 /* zspage with only 1 system page */
494 list_for_each_entry_safe(nextp
, tmp
, &head_extra
->lru
, lru
) {
495 list_del(&nextp
->lru
);
499 reset_page(head_extra
);
500 __free_page(head_extra
);
503 /* Initialize a newly allocated zspage */
504 static void init_zspage(struct page
*first_page
, struct size_class
*class)
506 unsigned long off
= 0;
507 struct page
*page
= first_page
;
509 BUG_ON(!is_first_page(first_page
));
511 struct page
*next_page
;
512 struct link_free
*link
;
513 unsigned int i
, objs_on_page
;
516 * page->index stores offset of first object starting
517 * in the page. For the first page, this is always 0,
518 * so we use first_page->index (aka ->freelist) to store
519 * head of corresponding zspage's freelist.
521 if (page
!= first_page
)
524 link
= (struct link_free
*)kmap_atomic(page
) +
526 objs_on_page
= (PAGE_SIZE
- off
) / class->size
;
528 for (i
= 1; i
<= objs_on_page
; i
++) {
530 if (off
< PAGE_SIZE
) {
531 link
->next
= obj_location_to_handle(page
, i
);
532 link
+= class->size
/ sizeof(*link
);
537 * We now come to the last (full or partial) object on this
538 * page, which must point to the first object on the next
541 next_page
= get_next_page(page
);
542 link
->next
= obj_location_to_handle(next_page
, 0);
545 off
= (off
+ class->size
) % PAGE_SIZE
;
550 * Allocate a zspage for the given size class
552 static struct page
*alloc_zspage(struct size_class
*class, gfp_t flags
)
555 struct page
*first_page
= NULL
, *uninitialized_var(prev_page
);
558 * Allocate individual pages and link them together as:
559 * 1. first page->private = first sub-page
560 * 2. all sub-pages are linked together using page->lru
561 * 3. each sub-page is linked to the first page using page->first_page
563 * For each size class, First/Head pages are linked together using
564 * page->lru. Also, we set PG_private to identify the first page
565 * (i.e. no other sub-page has this flag set) and PG_private_2 to
566 * identify the last page.
569 for (i
= 0; i
< class->pages_per_zspage
; i
++) {
572 page
= alloc_page(flags
);
576 INIT_LIST_HEAD(&page
->lru
);
577 if (i
== 0) { /* first page */
578 SetPagePrivate(page
);
579 set_page_private(page
, 0);
581 first_page
->inuse
= 0;
584 first_page
->private = (unsigned long)page
;
586 page
->first_page
= first_page
;
588 list_add(&page
->lru
, &prev_page
->lru
);
589 if (i
== class->pages_per_zspage
- 1) /* last page */
590 SetPagePrivate2(page
);
594 init_zspage(first_page
, class);
596 first_page
->freelist
= obj_location_to_handle(first_page
, 0);
597 /* Maximum number of objects we can store in this zspage */
598 first_page
->objects
= class->pages_per_zspage
* PAGE_SIZE
/ class->size
;
600 error
= 0; /* Success */
603 if (unlikely(error
) && first_page
) {
604 free_zspage(first_page
);
611 static struct page
*find_get_zspage(struct size_class
*class)
616 for (i
= 0; i
< _ZS_NR_FULLNESS_GROUPS
; i
++) {
617 page
= class->fullness_list
[i
];
625 #ifdef USE_PGTABLE_MAPPING
626 static inline int __zs_cpu_up(struct mapping_area
*area
)
629 * Make sure we don't leak memory if a cpu UP notification
630 * and zs_init() race and both call zs_cpu_up() on the same cpu
634 area
->vm
= alloc_vm_area(PAGE_SIZE
* 2, NULL
);
640 static inline void __zs_cpu_down(struct mapping_area
*area
)
643 free_vm_area(area
->vm
);
647 static inline void *__zs_map_object(struct mapping_area
*area
,
648 struct page
*pages
[2], int off
, int size
)
650 BUG_ON(map_vm_area(area
->vm
, PAGE_KERNEL
, &pages
));
651 area
->vm_addr
= area
->vm
->addr
;
652 return area
->vm_addr
+ off
;
655 static inline void __zs_unmap_object(struct mapping_area
*area
,
656 struct page
*pages
[2], int off
, int size
)
658 unsigned long addr
= (unsigned long)area
->vm_addr
;
659 unsigned long end
= addr
+ (PAGE_SIZE
* 2);
661 flush_cache_vunmap(addr
, end
);
662 unmap_kernel_range_noflush(addr
, PAGE_SIZE
* 2);
663 flush_tlb_kernel_range(addr
, end
);
666 #else /* USE_PGTABLE_MAPPING */
668 static inline int __zs_cpu_up(struct mapping_area
*area
)
671 * Make sure we don't leak memory if a cpu UP notification
672 * and zs_init() race and both call zs_cpu_up() on the same cpu
676 area
->vm_buf
= (char *)__get_free_page(GFP_KERNEL
);
682 static inline void __zs_cpu_down(struct mapping_area
*area
)
685 free_page((unsigned long)area
->vm_buf
);
689 static void *__zs_map_object(struct mapping_area
*area
,
690 struct page
*pages
[2], int off
, int size
)
694 char *buf
= area
->vm_buf
;
696 /* disable page faults to match kmap_atomic() return conditions */
699 /* no read fastpath */
700 if (area
->vm_mm
== ZS_MM_WO
)
703 sizes
[0] = PAGE_SIZE
- off
;
704 sizes
[1] = size
- sizes
[0];
706 /* copy object to per-cpu buffer */
707 addr
= kmap_atomic(pages
[0]);
708 memcpy(buf
, addr
+ off
, sizes
[0]);
710 addr
= kmap_atomic(pages
[1]);
711 memcpy(buf
+ sizes
[0], addr
, sizes
[1]);
717 static void __zs_unmap_object(struct mapping_area
*area
,
718 struct page
*pages
[2], int off
, int size
)
722 char *buf
= area
->vm_buf
;
724 /* no write fastpath */
725 if (area
->vm_mm
== ZS_MM_RO
)
728 sizes
[0] = PAGE_SIZE
- off
;
729 sizes
[1] = size
- sizes
[0];
731 /* copy per-cpu buffer to object */
732 addr
= kmap_atomic(pages
[0]);
733 memcpy(addr
+ off
, buf
, sizes
[0]);
735 addr
= kmap_atomic(pages
[1]);
736 memcpy(addr
, buf
+ sizes
[0], sizes
[1]);
740 /* enable page faults to match kunmap_atomic() return conditions */
744 #endif /* USE_PGTABLE_MAPPING */
746 static int zs_cpu_notifier(struct notifier_block
*nb
, unsigned long action
,
749 int ret
, cpu
= (long)pcpu
;
750 struct mapping_area
*area
;
754 area
= &per_cpu(zs_map_area
, cpu
);
755 ret
= __zs_cpu_up(area
);
757 return notifier_from_errno(ret
);
760 case CPU_UP_CANCELED
:
761 area
= &per_cpu(zs_map_area
, cpu
);
769 static struct notifier_block zs_cpu_nb
= {
770 .notifier_call
= zs_cpu_notifier
773 static void zs_exit(void)
777 for_each_online_cpu(cpu
)
778 zs_cpu_notifier(NULL
, CPU_DEAD
, (void *)(long)cpu
);
779 unregister_cpu_notifier(&zs_cpu_nb
);
782 static int zs_init(void)
786 register_cpu_notifier(&zs_cpu_nb
);
787 for_each_online_cpu(cpu
) {
788 ret
= zs_cpu_notifier(NULL
, CPU_UP_PREPARE
, (void *)(long)cpu
);
789 if (notifier_to_errno(ret
))
795 return notifier_to_errno(ret
);
799 * zs_create_pool - Creates an allocation pool to work from.
800 * @flags: allocation flags used to allocate pool metadata
802 * This function must be called before anything when using
803 * the zsmalloc allocator.
805 * On success, a pointer to the newly created pool is returned,
808 struct zs_pool
*zs_create_pool(gfp_t flags
)
811 struct zs_pool
*pool
;
813 ovhd_size
= roundup(sizeof(*pool
), PAGE_SIZE
);
814 pool
= kzalloc(ovhd_size
, GFP_KERNEL
);
818 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
820 struct size_class
*class;
822 size
= ZS_MIN_ALLOC_SIZE
+ i
* ZS_SIZE_CLASS_DELTA
;
823 if (size
> ZS_MAX_ALLOC_SIZE
)
824 size
= ZS_MAX_ALLOC_SIZE
;
826 class = &pool
->size_class
[i
];
829 spin_lock_init(&class->lock
);
830 class->pages_per_zspage
= get_pages_per_zspage(size
);
838 EXPORT_SYMBOL_GPL(zs_create_pool
);
840 void zs_destroy_pool(struct zs_pool
*pool
)
844 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++) {
846 struct size_class
*class = &pool
->size_class
[i
];
848 for (fg
= 0; fg
< _ZS_NR_FULLNESS_GROUPS
; fg
++) {
849 if (class->fullness_list
[fg
]) {
850 pr_info("Freeing non-empty class with size "
851 "%db, fullness group %d\n",
858 EXPORT_SYMBOL_GPL(zs_destroy_pool
);
861 * zs_malloc - Allocate block of given size from pool.
862 * @pool: pool to allocate from
863 * @size: size of block to allocate
865 * On success, handle to the allocated object is returned,
867 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
869 unsigned long zs_malloc(struct zs_pool
*pool
, size_t size
)
872 struct link_free
*link
;
874 struct size_class
*class;
876 struct page
*first_page
, *m_page
;
877 unsigned long m_objidx
, m_offset
;
879 if (unlikely(!size
|| size
> ZS_MAX_ALLOC_SIZE
))
882 class_idx
= get_size_class_index(size
);
883 class = &pool
->size_class
[class_idx
];
884 BUG_ON(class_idx
!= class->index
);
886 spin_lock(&class->lock
);
887 first_page
= find_get_zspage(class);
890 spin_unlock(&class->lock
);
891 first_page
= alloc_zspage(class, pool
->flags
);
892 if (unlikely(!first_page
))
895 set_zspage_mapping(first_page
, class->index
, ZS_EMPTY
);
896 spin_lock(&class->lock
);
897 class->pages_allocated
+= class->pages_per_zspage
;
900 obj
= (unsigned long)first_page
->freelist
;
901 obj_handle_to_location(obj
, &m_page
, &m_objidx
);
902 m_offset
= obj_idx_to_offset(m_page
, m_objidx
, class->size
);
904 link
= (struct link_free
*)kmap_atomic(m_page
) +
905 m_offset
/ sizeof(*link
);
906 first_page
->freelist
= link
->next
;
907 memset(link
, POISON_INUSE
, sizeof(*link
));
911 /* Now move the zspage to another fullness group, if required */
912 fix_fullness_group(pool
, first_page
);
913 spin_unlock(&class->lock
);
917 EXPORT_SYMBOL_GPL(zs_malloc
);
919 void zs_free(struct zs_pool
*pool
, unsigned long obj
)
921 struct link_free
*link
;
922 struct page
*first_page
, *f_page
;
923 unsigned long f_objidx
, f_offset
;
926 struct size_class
*class;
927 enum fullness_group fullness
;
932 obj_handle_to_location(obj
, &f_page
, &f_objidx
);
933 first_page
= get_first_page(f_page
);
935 get_zspage_mapping(first_page
, &class_idx
, &fullness
);
936 class = &pool
->size_class
[class_idx
];
937 f_offset
= obj_idx_to_offset(f_page
, f_objidx
, class->size
);
939 spin_lock(&class->lock
);
941 /* Insert this object in containing zspage's freelist */
942 link
= (struct link_free
*)((unsigned char *)kmap_atomic(f_page
)
944 link
->next
= first_page
->freelist
;
946 first_page
->freelist
= (void *)obj
;
949 fullness
= fix_fullness_group(pool
, first_page
);
951 if (fullness
== ZS_EMPTY
)
952 class->pages_allocated
-= class->pages_per_zspage
;
954 spin_unlock(&class->lock
);
956 if (fullness
== ZS_EMPTY
)
957 free_zspage(first_page
);
959 EXPORT_SYMBOL_GPL(zs_free
);
962 * zs_map_object - get address of allocated object from handle.
963 * @pool: pool from which the object was allocated
964 * @handle: handle returned from zs_malloc
966 * Before using an object allocated from zs_malloc, it must be mapped using
967 * this function. When done with the object, it must be unmapped using
970 * Only one object can be mapped per cpu at a time. There is no protection
971 * against nested mappings.
973 * This function returns with preemption and page faults disabled.
975 void *zs_map_object(struct zs_pool
*pool
, unsigned long handle
,
979 unsigned long obj_idx
, off
;
981 unsigned int class_idx
;
982 enum fullness_group fg
;
983 struct size_class
*class;
984 struct mapping_area
*area
;
985 struct page
*pages
[2];
990 * Because we use per-cpu mapping areas shared among the
991 * pools/users, we can't allow mapping in interrupt context
992 * because it can corrupt another users mappings.
994 BUG_ON(in_interrupt());
996 obj_handle_to_location(handle
, &page
, &obj_idx
);
997 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
998 class = &pool
->size_class
[class_idx
];
999 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1001 area
= &get_cpu_var(zs_map_area
);
1003 if (off
+ class->size
<= PAGE_SIZE
) {
1004 /* this object is contained entirely within a page */
1005 area
->vm_addr
= kmap_atomic(page
);
1006 return area
->vm_addr
+ off
;
1009 /* this object spans two pages */
1011 pages
[1] = get_next_page(page
);
1014 return __zs_map_object(area
, pages
, off
, class->size
);
1016 EXPORT_SYMBOL_GPL(zs_map_object
);
1018 void zs_unmap_object(struct zs_pool
*pool
, unsigned long handle
)
1021 unsigned long obj_idx
, off
;
1023 unsigned int class_idx
;
1024 enum fullness_group fg
;
1025 struct size_class
*class;
1026 struct mapping_area
*area
;
1030 obj_handle_to_location(handle
, &page
, &obj_idx
);
1031 get_zspage_mapping(get_first_page(page
), &class_idx
, &fg
);
1032 class = &pool
->size_class
[class_idx
];
1033 off
= obj_idx_to_offset(page
, obj_idx
, class->size
);
1035 area
= &__get_cpu_var(zs_map_area
);
1036 if (off
+ class->size
<= PAGE_SIZE
)
1037 kunmap_atomic(area
->vm_addr
);
1039 struct page
*pages
[2];
1042 pages
[1] = get_next_page(page
);
1045 __zs_unmap_object(area
, pages
, off
, class->size
);
1047 put_cpu_var(zs_map_area
);
1049 EXPORT_SYMBOL_GPL(zs_unmap_object
);
1051 u64
zs_get_total_size_bytes(struct zs_pool
*pool
)
1056 for (i
= 0; i
< ZS_SIZE_CLASSES
; i
++)
1057 npages
+= pool
->size_class
[i
].pages_allocated
;
1059 return npages
<< PAGE_SHIFT
;
1061 EXPORT_SYMBOL_GPL(zs_get_total_size_bytes
);
1063 module_init(zs_init
);
1064 module_exit(zs_exit
);
1066 MODULE_LICENSE("Dual BSD/GPL");
1067 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");