Commit | Line | Data |
---|---|---|
3c2a0909 S |
1 | /* |
2 | * zsmalloc memory allocator | |
3 | * | |
4 | * Copyright (C) 2011 Nitin Gupta | |
5 | * Copyright (C) 2012, 2013 Minchan Kim | |
6 | * | |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the license that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | */ | |
13 | ||
14 | /* | |
15 | * This allocator is designed for use with zram. Thus, the allocator is | |
16 | * supposed to work well under low memory conditions. In particular, it | |
17 | * never attempts higher order page allocation which is very likely to | |
18 | * fail under memory pressure. On the other hand, if we just use single | |
19 | * (0-order) pages, it would suffer from very high fragmentation -- | |
20 | * any object of size PAGE_SIZE/2 or larger would occupy an entire page. | |
21 | * This was one of the major issues with its predecessor (xvmalloc). | |
22 | * | |
23 | * To overcome these issues, zsmalloc allocates a bunch of 0-order pages | |
24 | * and links them together using various 'struct page' fields. These linked | |
25 | * pages act as a single higher-order page i.e. an object can span 0-order | |
26 | * page boundaries. The code refers to these linked pages as a single entity | |
27 | * called zspage. | |
28 | * | |
29 | * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE | |
30 | * since this satisfies the requirements of all its current users (in the | |
31 | * worst case, page is incompressible and is thus stored "as-is" i.e. in | |
32 | * uncompressed form). For allocation requests larger than this size, failure | |
33 | * is returned (see zs_malloc). | |
34 | * | |
35 | * Additionally, zs_malloc() does not return a dereferenceable pointer. | |
36 | * Instead, it returns an opaque handle (unsigned long) which encodes actual | |
37 | * location of the allocated object. The reason for this indirection is that | |
38 | * zsmalloc does not keep zspages permanently mapped since that would cause | |
39 | * issues on 32-bit systems where the VA region for kernel space mappings | |
40 | * is very small. So, before using the allocating memory, the object has to | |
41 | * be mapped using zs_map_object() to get a usable pointer and subsequently | |
42 | * unmapped using zs_unmap_object(). | |
43 | * | |
44 | * Following is how we use various fields and flags of underlying | |
45 | * struct page(s) to form a zspage. | |
46 | * | |
47 | * Usage of struct page fields: | |
48 | * page->first_page: points to the first component (0-order) page | |
49 | * page->index (union with page->freelist): offset of the first object | |
50 | * starting in this page. For the first page, this is | |
51 | * always 0, so we use this field (aka freelist) to point | |
52 | * to the first free object in zspage. | |
53 | * page->lru: links together all component pages (except the first page) | |
54 | * of a zspage | |
55 | * | |
56 | * For _first_ page only: | |
57 | * | |
58 | * page->private (union with page->first_page): refers to the | |
59 | * component page after the first page | |
60 | * page->freelist: points to the first free object in zspage. | |
61 | * Free objects are linked together using in-place | |
62 | * metadata. | |
63 | * page->objects: maximum number of objects we can store in this | |
64 | * zspage (class->zspage_order * PAGE_SIZE / class->size) | |
65 | * page->lru: links together first pages of various zspages. | |
66 | * Basically forming list of zspages in a fullness group. | |
67 | * page->mapping: class index and fullness group of the zspage | |
68 | * | |
69 | * Usage of struct page flags: | |
70 | * PG_private: identifies the first component page | |
71 | * PG_private2: identifies the last component page | |
72 | * | |
73 | */ | |
74 | ||
75 | #ifdef CONFIG_ZSMALLOC_DEBUG | |
76 | #define DEBUG | |
77 | #endif | |
78 | ||
79 | #include <linux/module.h> | |
80 | #include <linux/kernel.h> | |
81 | #include <linux/bitops.h> | |
82 | #include <linux/errno.h> | |
83 | #include <linux/highmem.h> | |
84 | #include <linux/string.h> | |
85 | #include <linux/slab.h> | |
86 | #include <asm/tlbflush.h> | |
87 | #include <asm/pgtable.h> | |
88 | #include <linux/cpumask.h> | |
89 | #include <linux/cpu.h> | |
90 | #include <linux/vmalloc.h> | |
91 | #include <linux/hardirq.h> | |
92 | #include <linux/spinlock.h> | |
93 | #include <linux/types.h> | |
94 | #include <linux/zsmalloc.h> | |
95 | #include <linux/zpool.h> | |
96 | ||
97 | /* | |
98 | * This must be power of 2 and greater than of equal to sizeof(link_free). | |
99 | * These two conditions ensure that any 'struct link_free' itself doesn't | |
100 | * span more than 1 page which avoids complex case of mapping 2 pages simply | |
101 | * to restore link_free pointer values. | |
102 | */ | |
103 | #define ZS_ALIGN 8 | |
104 | ||
105 | /* | |
106 | * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) | |
107 | * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. | |
108 | */ | |
109 | #define ZS_MAX_ZSPAGE_ORDER 2 | |
110 | #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) | |
111 | ||
112 | /* | |
113 | * Object location (<PFN>, <obj_idx>) is encoded as | |
114 | * as single (unsigned long) handle value. | |
115 | * | |
116 | * Note that object index <obj_idx> is relative to system | |
117 | * page <PFN> it is stored in, so for each sub-page belonging | |
118 | * to a zspage, obj_idx starts with 0. | |
119 | * | |
120 | * This is made more complicated by various memory models and PAE. | |
121 | */ | |
122 | ||
123 | #ifndef MAX_PHYSMEM_BITS | |
124 | #ifdef CONFIG_HIGHMEM64G | |
125 | #define MAX_PHYSMEM_BITS 36 | |
126 | #else /* !CONFIG_HIGHMEM64G */ | |
127 | /* | |
128 | * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just | |
129 | * be PAGE_SHIFT | |
130 | */ | |
131 | #define MAX_PHYSMEM_BITS BITS_PER_LONG | |
132 | #endif | |
133 | #endif | |
134 | #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) | |
135 | #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) | |
136 | #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) | |
137 | ||
138 | #define MAX(a, b) ((a) >= (b) ? (a) : (b)) | |
139 | /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ | |
140 | #define ZS_MIN_ALLOC_SIZE \ | |
141 | MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) | |
142 | #define ZS_MAX_ALLOC_SIZE PAGE_SIZE | |
143 | ||
144 | /* | |
145 | * On systems with 4K page size, this gives 255 size classes! There is a | |
146 | * trader-off here: | |
147 | * - Large number of size classes is potentially wasteful as free page are | |
148 | * spread across these classes | |
149 | * - Small number of size classes causes large internal fragmentation | |
150 | * - Probably its better to use specific size classes (empirically | |
151 | * determined). NOTE: all those class sizes must be set as multiple of | |
152 | * ZS_ALIGN to make sure link_free itself never has to span 2 pages. | |
153 | * | |
154 | * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN | |
155 | * (reason above) | |
156 | */ | |
157 | #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) | |
158 | #define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \ | |
159 | ZS_SIZE_CLASS_DELTA + 1) | |
160 | ||
161 | /* | |
162 | * We do not maintain any list for completely empty zspages, | |
163 | * since a zspage is freed when it becomes empty. | |
164 | */ | |
165 | enum fullness_group { | |
166 | ZS_ALMOST_FULL, | |
167 | ZS_ALMOST_EMPTY, | |
168 | ZS_FULL, | |
169 | ||
170 | _ZS_NR_FULLNESS_GROUPS, | |
171 | ||
172 | ZS_EMPTY, | |
173 | ZS_RECLAIM | |
174 | }; | |
175 | #define _ZS_NR_AVAILABLE_FULLNESS_GROUPS ZS_FULL | |
176 | ||
177 | /* | |
178 | * We assign a page to ZS_ALMOST_EMPTY fullness group when: | |
179 | * n <= N / f, where | |
180 | * n = number of allocated objects | |
181 | * N = total number of objects zspage can store | |
182 | * f = fullness_threshold_frac | |
183 | * | |
184 | * Similarly, we assign zspage to: | |
185 | * ZS_ALMOST_FULL when n > N / f | |
186 | * ZS_EMPTY when n == 0 | |
187 | * ZS_FULL when n == N | |
188 | * | |
189 | * (see: fix_fullness_group()) | |
190 | */ | |
191 | static const int fullness_threshold_frac = 4; | |
192 | ||
193 | struct size_class { | |
194 | /* | |
195 | * Size of objects stored in this class. Must be multiple | |
196 | * of ZS_ALIGN. | |
197 | */ | |
198 | int size; | |
199 | unsigned int index; | |
200 | ||
201 | /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ | |
202 | int pages_per_zspage; | |
203 | ||
204 | spinlock_t lock; | |
205 | ||
206 | struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; | |
207 | }; | |
208 | ||
209 | /* | |
210 | * Placed within free objects to form a singly linked list. | |
211 | * For every zspage, first_page->freelist gives head of this list. | |
212 | * | |
213 | * This must be power of 2 and less than or equal to ZS_ALIGN | |
214 | */ | |
215 | struct link_free { | |
216 | /* Handle of next free chunk (encodes <PFN, obj_idx>) */ | |
217 | void *next; | |
218 | }; | |
219 | ||
220 | struct zs_pool { | |
221 | struct size_class *size_class[ZS_SIZE_CLASSES]; | |
222 | ||
223 | gfp_t flags; /* allocation flags used when growing pool */ | |
224 | atomic_long_t pages_allocated; | |
225 | ||
226 | struct zs_ops *ops; | |
227 | }; | |
228 | ||
229 | /* | |
230 | * A zspage's class index and fullness group | |
231 | * are encoded in its (first)page->mapping | |
232 | */ | |
233 | #define CLASS_IDX_BITS 28 | |
234 | #define FULLNESS_BITS 4 | |
235 | #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) | |
236 | #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) | |
237 | ||
238 | struct mapping_area { | |
239 | #ifdef CONFIG_PGTABLE_MAPPING | |
240 | struct vm_struct *vm; /* vm area for mapping object that span pages */ | |
241 | #else | |
242 | char *vm_buf; /* copy buffer for objects that span pages */ | |
243 | #endif | |
244 | char *vm_addr; /* address of kmap_atomic()'ed pages */ | |
245 | enum zs_mapmode vm_mm; /* mapping mode */ | |
246 | }; | |
247 | ||
248 | /* atomic counter indicating which class/fg to reclaim from */ | |
249 | static atomic_t lru_class_fg; | |
250 | /* specific order of fg we want to reclaim from */ | |
251 | static enum fullness_group lru_fg[] = { | |
252 | ZS_ALMOST_EMPTY, | |
253 | ZS_ALMOST_FULL, | |
254 | ZS_FULL | |
255 | }; | |
256 | #define _ZS_NR_LRU_CLASS_FG (ZS_SIZE_CLASSES * ARRAY_SIZE(lru_fg)) | |
257 | ||
258 | /* zpool driver */ | |
259 | ||
260 | #ifdef CONFIG_ZPOOL | |
261 | ||
262 | static int zs_zpool_evict(struct zs_pool *pool, unsigned long handle) | |
263 | { | |
264 | return zpool_evict(pool, handle); | |
265 | } | |
266 | ||
267 | static struct zs_ops zs_zpool_ops = { | |
268 | .evict = zs_zpool_evict | |
269 | }; | |
270 | ||
271 | static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops) | |
272 | { | |
273 | return zs_create_pool(gfp, &zs_zpool_ops); | |
274 | } | |
275 | ||
276 | static void zs_zpool_destroy(void *pool) | |
277 | { | |
278 | zs_destroy_pool(pool); | |
279 | } | |
280 | ||
281 | static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, | |
282 | unsigned long *handle) | |
283 | { | |
284 | *handle = zs_malloc(pool, size); | |
285 | return *handle ? 0 : -1; | |
286 | } | |
287 | static void zs_zpool_free(void *pool, unsigned long handle) | |
288 | { | |
289 | zs_free(pool, handle); | |
290 | } | |
291 | ||
292 | static int zs_zpool_shrink(void *pool, unsigned int pages, | |
293 | unsigned int *reclaimed) | |
294 | { | |
295 | int total = 0, ret = 0; | |
296 | ||
297 | while (total < pages) { | |
298 | ret = zs_shrink(pool); | |
299 | WARN_ON(!ret); | |
300 | if (ret <= 0) | |
301 | break; | |
302 | total += ret; | |
303 | ret = 0; | |
304 | } | |
305 | ||
306 | if (reclaimed) | |
307 | *reclaimed = total; | |
308 | return ret; | |
309 | } | |
310 | ||
311 | static void *zs_zpool_map(void *pool, unsigned long handle, | |
312 | enum zpool_mapmode mm) | |
313 | { | |
314 | enum zs_mapmode zs_mm; | |
315 | ||
316 | switch (mm) { | |
317 | case ZPOOL_MM_RO: | |
318 | zs_mm = ZS_MM_RO; | |
319 | break; | |
320 | case ZPOOL_MM_WO: | |
321 | zs_mm = ZS_MM_WO; | |
322 | break; | |
323 | case ZPOOL_MM_RW: /* fallthru */ | |
324 | default: | |
325 | zs_mm = ZS_MM_RW; | |
326 | break; | |
327 | } | |
328 | ||
329 | return zs_map_object(pool, handle, zs_mm); | |
330 | } | |
331 | static void zs_zpool_unmap(void *pool, unsigned long handle) | |
332 | { | |
333 | zs_unmap_object(pool, handle); | |
334 | } | |
335 | ||
336 | static u64 zs_zpool_total_size(void *pool) | |
337 | { | |
338 | return zs_get_total_pages(pool) << PAGE_SHIFT; | |
339 | } | |
340 | ||
341 | static struct zpool_driver zs_zpool_driver = { | |
342 | .type = "zsmalloc", | |
343 | .owner = THIS_MODULE, | |
344 | .create = zs_zpool_create, | |
345 | .destroy = zs_zpool_destroy, | |
346 | .malloc = zs_zpool_malloc, | |
347 | .free = zs_zpool_free, | |
348 | .shrink = zs_zpool_shrink, | |
349 | .map = zs_zpool_map, | |
350 | .unmap = zs_zpool_unmap, | |
351 | .total_size = zs_zpool_total_size, | |
352 | }; | |
353 | ||
354 | MODULE_ALIAS("zpool-zsmalloc"); | |
355 | #endif /* CONFIG_ZPOOL */ | |
356 | ||
357 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ | |
358 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); | |
359 | ||
360 | static int is_first_page(struct page *page) | |
361 | { | |
362 | return PagePrivate(page); | |
363 | } | |
364 | ||
365 | static int is_last_page(struct page *page) | |
366 | { | |
367 | return PagePrivate2(page); | |
368 | } | |
369 | ||
370 | static void get_zspage_mapping(struct page *page, unsigned int *class_idx, | |
371 | enum fullness_group *fullness) | |
372 | { | |
373 | unsigned long m; | |
374 | BUG_ON(!is_first_page(page)); | |
375 | ||
376 | m = (unsigned long)page->mapping; | |
377 | *fullness = m & FULLNESS_MASK; | |
378 | *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; | |
379 | } | |
380 | ||
381 | static void set_zspage_mapping(struct page *page, unsigned int class_idx, | |
382 | enum fullness_group fullness) | |
383 | { | |
384 | unsigned long m; | |
385 | BUG_ON(!is_first_page(page)); | |
386 | ||
387 | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | | |
388 | (fullness & FULLNESS_MASK); | |
389 | page->mapping = (struct address_space *)m; | |
390 | } | |
391 | ||
392 | /* | |
393 | * zsmalloc divides the pool into various size classes where each | |
394 | * class maintains a list of zspages where each zspage is divided | |
395 | * into equal sized chunks. Each allocation falls into one of these | |
396 | * classes depending on its size. This function returns index of the | |
397 | * size class which has chunk size big enough to hold the give size. | |
398 | */ | |
399 | static int get_size_class_index(int size) | |
400 | { | |
401 | int idx = 0; | |
402 | ||
403 | if (likely(size > ZS_MIN_ALLOC_SIZE)) | |
404 | idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, | |
405 | ZS_SIZE_CLASS_DELTA); | |
406 | ||
407 | return idx; | |
408 | } | |
409 | ||
410 | /* | |
411 | * For each size class, zspages are divided into different groups | |
412 | * depending on how "full" they are. This was done so that we could | |
413 | * easily find empty or nearly empty zspages when we try to shrink | |
414 | * the pool (not yet implemented). This function returns fullness | |
415 | * status of the given page. | |
416 | */ | |
417 | static enum fullness_group get_fullness_group(struct page *page) | |
418 | { | |
419 | int inuse, max_objects; | |
420 | enum fullness_group fg; | |
421 | BUG_ON(!is_first_page(page)); | |
422 | ||
423 | inuse = page->inuse; | |
424 | max_objects = page->objects; | |
425 | ||
426 | if (inuse == 0) | |
427 | fg = ZS_EMPTY; | |
428 | else if (inuse == max_objects) | |
429 | fg = ZS_FULL; | |
430 | else if (inuse <= max_objects / fullness_threshold_frac) | |
431 | fg = ZS_ALMOST_EMPTY; | |
432 | else | |
433 | fg = ZS_ALMOST_FULL; | |
434 | ||
435 | return fg; | |
436 | } | |
437 | ||
438 | /* | |
439 | * Each size class maintains various freelists and zspages are assigned | |
440 | * to one of these freelists based on the number of live objects they | |
441 | * have. This functions inserts the given zspage into the freelist | |
442 | * identified by <class, fullness_group>. | |
443 | */ | |
444 | static void insert_zspage(struct page *page, struct size_class *class, | |
445 | enum fullness_group fullness) | |
446 | { | |
447 | struct page **head; | |
448 | ||
449 | BUG_ON(!is_first_page(page)); | |
450 | ||
451 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
452 | return; | |
453 | ||
454 | head = &class->fullness_list[fullness]; | |
455 | if (*head) | |
456 | list_add_tail(&page->lru, &(*head)->lru); | |
457 | ||
458 | *head = page; | |
459 | } | |
460 | ||
461 | /* | |
462 | * This function removes the given zspage from the freelist identified | |
463 | * by <class, fullness_group>. | |
464 | */ | |
465 | static void remove_zspage(struct page *page, struct size_class *class, | |
466 | enum fullness_group fullness) | |
467 | { | |
468 | struct page **head; | |
469 | ||
470 | BUG_ON(!is_first_page(page)); | |
471 | ||
472 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
473 | return; | |
474 | ||
475 | head = &class->fullness_list[fullness]; | |
476 | BUG_ON(!*head); | |
477 | if (list_empty(&(*head)->lru)) | |
478 | *head = NULL; | |
479 | else if (*head == page) | |
480 | *head = (struct page *)list_entry((*head)->lru.next, | |
481 | struct page, lru); | |
482 | ||
483 | list_del_init(&page->lru); | |
484 | } | |
485 | ||
486 | /* | |
487 | * Each size class maintains zspages in different fullness groups depending | |
488 | * on the number of live objects they contain. When allocating or freeing | |
489 | * objects, the fullness status of the page can change, say, from ALMOST_FULL | |
490 | * to ALMOST_EMPTY when freeing an object. This function checks if such | |
491 | * a status change has occurred for the given page and accordingly moves the | |
492 | * page from the freelist of the old fullness group to that of the new | |
493 | * fullness group. | |
494 | */ | |
495 | static enum fullness_group fix_fullness_group(struct zs_pool *pool, | |
496 | struct page *page) | |
497 | { | |
498 | int class_idx; | |
499 | struct size_class *class; | |
500 | enum fullness_group currfg, newfg; | |
501 | ||
502 | BUG_ON(!is_first_page(page)); | |
503 | ||
504 | get_zspage_mapping(page, &class_idx, &currfg); | |
505 | class = pool->size_class[class_idx]; | |
506 | newfg = get_fullness_group(page); | |
507 | /* Need to do this even if currfg == newfg, to update lru */ | |
508 | remove_zspage(page, class, currfg); | |
509 | insert_zspage(page, class, newfg); | |
510 | if (currfg != newfg) | |
511 | set_zspage_mapping(page, class_idx, newfg); | |
512 | ||
513 | return newfg; | |
514 | } | |
515 | ||
516 | /* | |
517 | * We have to decide on how many pages to link together | |
518 | * to form a zspage for each size class. This is important | |
519 | * to reduce wastage due to unusable space left at end of | |
520 | * each zspage which is given as: | |
521 | * wastage = Zp - Zp % size_class | |
522 | * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... | |
523 | * | |
524 | * For example, for size class of 3/8 * PAGE_SIZE, we should | |
525 | * link together 3 PAGE_SIZE sized pages to form a zspage | |
526 | * since then we can perfectly fit in 8 such objects. | |
527 | */ | |
528 | static int get_pages_per_zspage(int class_size) | |
529 | { | |
530 | int i, max_usedpc = 0; | |
531 | /* zspage order which gives maximum used size per KB */ | |
532 | int max_usedpc_order = 1; | |
533 | ||
534 | for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { | |
535 | int zspage_size; | |
536 | int waste, usedpc; | |
537 | ||
538 | zspage_size = i * PAGE_SIZE; | |
539 | waste = zspage_size % class_size; | |
540 | usedpc = (zspage_size - waste) * 100 / zspage_size; | |
541 | ||
542 | if (usedpc > max_usedpc) { | |
543 | max_usedpc = usedpc; | |
544 | max_usedpc_order = i; | |
545 | } | |
546 | } | |
547 | ||
548 | return max_usedpc_order; | |
549 | } | |
550 | ||
551 | /* | |
552 | * A single 'zspage' is composed of many system pages which are | |
553 | * linked together using fields in struct page. This function finds | |
554 | * the first/head page, given any component page of a zspage. | |
555 | */ | |
556 | static struct page *get_first_page(struct page *page) | |
557 | { | |
558 | if (is_first_page(page)) | |
559 | return page; | |
560 | else | |
561 | return page->first_page; | |
562 | } | |
563 | ||
564 | static struct page *get_next_page(struct page *page) | |
565 | { | |
566 | struct page *next; | |
567 | ||
568 | if (is_last_page(page)) | |
569 | next = NULL; | |
570 | else if (is_first_page(page)) | |
571 | next = (struct page *)page_private(page); | |
572 | else | |
573 | next = list_entry(page->lru.next, struct page, lru); | |
574 | ||
575 | return next; | |
576 | } | |
577 | ||
578 | /* | |
579 | * Encode <page, obj_idx> as a single handle value. | |
580 | * On hardware platforms with physical memory starting at 0x0 the pfn | |
581 | * could be 0 so we ensure that the handle will never be 0 by adjusting the | |
582 | * encoded obj_idx value before encoding. | |
583 | */ | |
584 | static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) | |
585 | { | |
586 | unsigned long handle; | |
587 | ||
588 | if (!page) { | |
589 | BUG_ON(obj_idx); | |
590 | return NULL; | |
591 | } | |
592 | ||
593 | handle = page_to_pfn(page) << OBJ_INDEX_BITS; | |
594 | handle |= ((obj_idx + 1) & OBJ_INDEX_MASK); | |
595 | ||
596 | return (void *)handle; | |
597 | } | |
598 | ||
599 | /* | |
600 | * Decode <page, obj_idx> pair from the given object handle. We adjust the | |
601 | * decoded obj_idx back to its original value since it was adjusted in | |
602 | * obj_location_to_handle(). | |
603 | */ | |
604 | static void obj_handle_to_location(unsigned long handle, struct page **page, | |
605 | unsigned long *obj_idx) | |
606 | { | |
607 | *page = pfn_to_page(handle >> OBJ_INDEX_BITS); | |
608 | *obj_idx = (handle & OBJ_INDEX_MASK) - 1; | |
609 | } | |
610 | ||
611 | static unsigned long obj_idx_to_offset(struct page *page, | |
612 | unsigned long obj_idx, int class_size) | |
613 | { | |
614 | unsigned long off = 0; | |
615 | ||
616 | if (!is_first_page(page)) | |
617 | off = page->index; | |
618 | ||
619 | return off + obj_idx * class_size; | |
620 | } | |
621 | ||
622 | static bool obj_handle_is_free(struct page *first_page, | |
623 | struct size_class *class, unsigned long handle) | |
624 | { | |
625 | unsigned long obj, idx, offset; | |
626 | struct page *page; | |
627 | struct link_free *link; | |
628 | ||
629 | BUG_ON(!is_first_page(first_page)); | |
630 | ||
631 | obj = (unsigned long)first_page->freelist; | |
632 | ||
633 | while (obj) { | |
634 | if (obj == handle) | |
635 | return true; | |
636 | ||
637 | obj_handle_to_location(obj, &page, &idx); | |
638 | offset = obj_idx_to_offset(page, idx, class->size); | |
639 | ||
640 | link = (struct link_free *)kmap_atomic(page) + | |
641 | offset / sizeof(*link); | |
642 | obj = (unsigned long)link->next; | |
643 | kunmap_atomic(link); | |
644 | } | |
645 | ||
646 | return false; | |
647 | } | |
648 | ||
649 | static void obj_free(unsigned long obj, struct page *page, unsigned long offset) | |
650 | { | |
651 | struct page *first_page = get_first_page(page); | |
652 | struct link_free *link; | |
653 | ||
654 | /* Insert this object in containing zspage's freelist */ | |
655 | link = (struct link_free *)((unsigned char *)kmap_atomic(page) | |
656 | + offset); | |
657 | link->next = first_page->freelist; | |
658 | kunmap_atomic(link); | |
659 | first_page->freelist = (void *)obj; | |
660 | ||
661 | first_page->inuse--; | |
662 | } | |
663 | ||
664 | static void reset_page(struct page *page) | |
665 | { | |
666 | clear_bit(PG_private, &page->flags); | |
667 | clear_bit(PG_private_2, &page->flags); | |
668 | set_page_private(page, 0); | |
669 | page->mapping = NULL; | |
670 | page->freelist = NULL; | |
671 | page_mapcount_reset(page); | |
672 | } | |
673 | ||
674 | static void free_zspage(struct page *first_page) | |
675 | { | |
676 | struct page *nextp, *tmp, *head_extra; | |
677 | ||
678 | BUG_ON(!is_first_page(first_page)); | |
679 | BUG_ON(first_page->inuse); | |
680 | ||
681 | head_extra = (struct page *)page_private(first_page); | |
682 | ||
683 | reset_page(first_page); | |
684 | __free_page(first_page); | |
685 | ||
686 | /* zspage with only 1 system page */ | |
687 | if (!head_extra) | |
688 | return; | |
689 | ||
690 | list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { | |
691 | list_del(&nextp->lru); | |
692 | reset_page(nextp); | |
693 | __free_page(nextp); | |
694 | } | |
695 | reset_page(head_extra); | |
696 | __free_page(head_extra); | |
697 | } | |
698 | ||
699 | /* Initialize a newly allocated zspage */ | |
700 | static void init_zspage(struct page *first_page, struct size_class *class) | |
701 | { | |
702 | unsigned long off = 0; | |
703 | struct page *page = first_page; | |
704 | ||
705 | BUG_ON(!is_first_page(first_page)); | |
706 | while (page) { | |
707 | struct page *next_page; | |
708 | struct link_free *link; | |
709 | unsigned int i, objs_on_page; | |
710 | ||
711 | /* | |
712 | * page->index stores offset of first object starting | |
713 | * in the page. For the first page, this is always 0, | |
714 | * so we use first_page->index (aka ->freelist) to store | |
715 | * head of corresponding zspage's freelist. | |
716 | */ | |
717 | if (page != first_page) | |
718 | page->index = off; | |
719 | ||
720 | link = (struct link_free *)kmap_atomic(page) + | |
721 | off / sizeof(*link); | |
722 | objs_on_page = (PAGE_SIZE - off) / class->size; | |
723 | ||
724 | for (i = 1; i <= objs_on_page; i++) { | |
725 | off += class->size; | |
726 | if (off < PAGE_SIZE) { | |
727 | link->next = obj_location_to_handle(page, i); | |
728 | link += class->size / sizeof(*link); | |
729 | } | |
730 | } | |
731 | ||
732 | /* | |
733 | * We now come to the last (full or partial) object on this | |
734 | * page, which must point to the first object on the next | |
735 | * page (if present) | |
736 | */ | |
737 | next_page = get_next_page(page); | |
738 | link->next = obj_location_to_handle(next_page, 0); | |
739 | kunmap_atomic(link); | |
740 | page = next_page; | |
741 | off = (off + class->size) % PAGE_SIZE; | |
742 | } | |
743 | } | |
744 | ||
745 | /* | |
746 | * Allocate a zspage for the given size class | |
747 | */ | |
748 | static struct page *alloc_zspage(struct size_class *class, gfp_t flags) | |
749 | { | |
750 | int i, error; | |
751 | struct page *first_page = NULL, *uninitialized_var(prev_page); | |
752 | ||
753 | /* | |
754 | * Allocate individual pages and link them together as: | |
755 | * 1. first page->private = first sub-page | |
756 | * 2. all sub-pages are linked together using page->lru | |
757 | * 3. each sub-page is linked to the first page using page->first_page | |
758 | * | |
759 | * For each size class, First/Head pages are linked together using | |
760 | * page->lru. Also, we set PG_private to identify the first page | |
761 | * (i.e. no other sub-page has this flag set) and PG_private_2 to | |
762 | * identify the last page. | |
763 | */ | |
764 | error = -ENOMEM; | |
765 | for (i = 0; i < class->pages_per_zspage; i++) { | |
766 | struct page *page; | |
767 | ||
768 | page = alloc_page(flags); | |
769 | if (!page) | |
770 | goto cleanup; | |
771 | ||
772 | INIT_LIST_HEAD(&page->lru); | |
773 | if (i == 0) { /* first page */ | |
774 | SetPagePrivate(page); | |
775 | set_page_private(page, 0); | |
776 | first_page = page; | |
777 | first_page->inuse = 0; | |
778 | } | |
779 | if (i == 1) | |
780 | set_page_private(first_page, (unsigned long)page); | |
781 | if (i >= 1) | |
782 | page->first_page = first_page; | |
783 | if (i >= 2) | |
784 | list_add(&page->lru, &prev_page->lru); | |
785 | if (i == class->pages_per_zspage - 1) /* last page */ | |
786 | SetPagePrivate2(page); | |
787 | prev_page = page; | |
788 | } | |
789 | ||
790 | init_zspage(first_page, class); | |
791 | ||
792 | first_page->freelist = obj_location_to_handle(first_page, 0); | |
793 | /* Maximum number of objects we can store in this zspage */ | |
794 | first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; | |
795 | ||
796 | error = 0; /* Success */ | |
797 | ||
798 | cleanup: | |
799 | if (unlikely(error) && first_page) { | |
800 | free_zspage(first_page); | |
801 | first_page = NULL; | |
802 | } | |
803 | ||
804 | return first_page; | |
805 | } | |
806 | ||
807 | /* | |
808 | * This tries to reclaim all the provided zspage's objects by calling the | |
809 | * zs_pool's ops->evict function for each object in use. This requires | |
810 | * the zspage's class lock to be held when calling this function. Since | |
811 | * the evict function may sleep, this drops the class lock before evicting | |
812 | * and objects. No other locks should be held when calling this function. | |
813 | * This will return with the class lock unlocked. | |
814 | * | |
815 | * If there is no zs_pool->ops or ops->evict function, this returns error. | |
816 | * | |
817 | * This returns 0 on success, -err on failure. On failure, some of the | |
818 | * objects may have been freed, but not all. On success, the entire zspage | |
819 | * has been freed and should not be used anymore. | |
820 | */ | |
821 | static int reclaim_zspage(struct zs_pool *pool, struct page *first_page) | |
822 | { | |
823 | struct size_class *class; | |
824 | enum fullness_group fullness; | |
825 | struct page *page = first_page; | |
826 | unsigned long handle; | |
827 | int class_idx, ret = 0; | |
828 | ||
829 | BUG_ON(!is_first_page(first_page)); | |
830 | ||
831 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
832 | class = pool->size_class[class_idx]; | |
833 | ||
834 | assert_spin_locked(&class->lock); | |
835 | ||
836 | if (!pool->ops || !pool->ops->evict) { | |
837 | spin_unlock(&class->lock); | |
838 | return -EINVAL; | |
839 | } | |
840 | ||
841 | /* move the zspage into the reclaim fullness group, | |
842 | * so it's not available for use by zs_malloc, | |
843 | * and won't be freed by zs_free | |
844 | */ | |
845 | remove_zspage(first_page, class, fullness); | |
846 | set_zspage_mapping(first_page, class_idx, ZS_RECLAIM); | |
847 | ||
848 | spin_unlock(&class->lock); | |
849 | ||
850 | might_sleep(); | |
851 | ||
852 | while (page) { | |
853 | unsigned long offset, idx = 0; | |
854 | ||
855 | while ((offset = obj_idx_to_offset(page, idx, class->size)) | |
856 | < PAGE_SIZE) { | |
857 | handle = (unsigned long)obj_location_to_handle(page, | |
858 | idx++); | |
859 | if (obj_handle_is_free(first_page, class, handle)) | |
860 | continue; | |
861 | ret = pool->ops->evict(pool, handle); | |
862 | if (ret) { | |
863 | spin_lock(&class->lock); | |
864 | fix_fullness_group(pool, first_page); | |
865 | spin_unlock(&class->lock); | |
866 | return ret; | |
867 | } | |
868 | obj_free(handle, page, offset); | |
869 | } | |
870 | ||
871 | page = get_next_page(page); | |
872 | } | |
873 | ||
874 | free_zspage(first_page); | |
875 | ||
876 | atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); | |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
881 | static struct page *find_available_zspage(struct size_class *class) | |
882 | { | |
883 | int i; | |
884 | struct page *page; | |
885 | ||
886 | for (i = 0; i < _ZS_NR_AVAILABLE_FULLNESS_GROUPS; i++) { | |
887 | page = class->fullness_list[i]; | |
888 | if (page) | |
889 | break; | |
890 | } | |
891 | ||
892 | return page; | |
893 | } | |
894 | ||
895 | /* this simply iterates atomically through all classes, | |
896 | * using a specific fullness group. At the end, it starts | |
897 | * over using the next fullness group, and so on. The | |
898 | * fullness groups are used in a specific order, from | |
899 | * least to most full. | |
900 | */ | |
901 | static void find_next_lru_class_fg(struct zs_pool *pool, | |
902 | struct size_class **class, enum fullness_group *fg) | |
903 | { | |
904 | int i = atomic_inc_return(&lru_class_fg); | |
905 | ||
906 | if (i >= _ZS_NR_LRU_CLASS_FG) { | |
907 | int orig = i; | |
908 | ||
909 | i %= _ZS_NR_LRU_CLASS_FG; | |
910 | /* only need to try once, since if we don't | |
911 | * succeed whoever changed it will also try | |
912 | * and eventually someone will reset it | |
913 | */ | |
914 | atomic_cmpxchg(&lru_class_fg, orig, i); | |
915 | } | |
916 | *class = pool->size_class[i % ZS_SIZE_CLASSES]; | |
917 | *fg = lru_fg[i / ZS_SIZE_CLASSES]; | |
918 | } | |
919 | ||
920 | /* | |
921 | * This attempts to find the LRU zspage, but that's not really possible | |
922 | * because zspages are not contained in a single LRU list, they're | |
923 | * contained inside fullness groups which are themselves contained | |
924 | * inside classes. So this simply iterates through the classes and | |
925 | * fullness groups to find the next non-empty fullness group, and | |
926 | * uses the LRU zspage there. | |
927 | * | |
928 | * On success, the zspage is returned with its class locked. | |
929 | * On failure, NULL is returned. | |
930 | */ | |
931 | static struct page *find_lru_zspage(struct zs_pool *pool) | |
932 | { | |
933 | struct size_class *class; | |
934 | struct page *page; | |
935 | enum fullness_group fg; | |
936 | int tries = 0; | |
937 | ||
938 | while (tries++ < _ZS_NR_LRU_CLASS_FG) { | |
939 | find_next_lru_class_fg(pool, &class, &fg); | |
940 | ||
941 | spin_lock(&class->lock); | |
942 | ||
943 | page = class->fullness_list[fg]; | |
944 | if (page) | |
945 | return list_prev_entry(page, lru); | |
946 | ||
947 | spin_unlock(&class->lock); | |
948 | } | |
949 | ||
950 | return NULL; | |
951 | } | |
952 | ||
953 | #ifdef CONFIG_PGTABLE_MAPPING | |
954 | static inline int __zs_cpu_up(struct mapping_area *area) | |
955 | { | |
956 | /* | |
957 | * Make sure we don't leak memory if a cpu UP notification | |
958 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
959 | */ | |
960 | if (area->vm) | |
961 | return 0; | |
962 | area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); | |
963 | if (!area->vm) | |
964 | return -ENOMEM; | |
965 | return 0; | |
966 | } | |
967 | ||
968 | static inline void __zs_cpu_down(struct mapping_area *area) | |
969 | { | |
970 | if (area->vm) | |
971 | free_vm_area(area->vm); | |
972 | area->vm = NULL; | |
973 | } | |
974 | ||
975 | static inline void *__zs_map_object(struct mapping_area *area, | |
976 | struct page *pages[2], int off, int size) | |
977 | { | |
978 | BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); | |
979 | area->vm_addr = area->vm->addr; | |
980 | return area->vm_addr + off; | |
981 | } | |
982 | ||
983 | static inline void __zs_unmap_object(struct mapping_area *area, | |
984 | struct page *pages[2], int off, int size) | |
985 | { | |
986 | unsigned long addr = (unsigned long)area->vm_addr; | |
987 | ||
988 | unmap_kernel_range(addr, PAGE_SIZE * 2); | |
989 | } | |
990 | ||
991 | #else /* CONFIG_PGTABLE_MAPPING */ | |
992 | ||
993 | static inline int __zs_cpu_up(struct mapping_area *area) | |
994 | { | |
995 | /* | |
996 | * Make sure we don't leak memory if a cpu UP notification | |
997 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
998 | */ | |
999 | if (area->vm_buf) | |
1000 | return 0; | |
1001 | area->vm_buf = (char *)__get_free_page(GFP_KERNEL); | |
1002 | if (!area->vm_buf) | |
1003 | return -ENOMEM; | |
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | static inline void __zs_cpu_down(struct mapping_area *area) | |
1008 | { | |
1009 | if (area->vm_buf) | |
1010 | free_page((unsigned long)area->vm_buf); | |
1011 | area->vm_buf = NULL; | |
1012 | } | |
1013 | ||
1014 | static void *__zs_map_object(struct mapping_area *area, | |
1015 | struct page *pages[2], int off, int size) | |
1016 | { | |
1017 | int sizes[2]; | |
1018 | void *addr; | |
1019 | char *buf = area->vm_buf; | |
1020 | ||
1021 | /* disable page faults to match kmap_atomic() return conditions */ | |
1022 | pagefault_disable(); | |
1023 | ||
1024 | /* no read fastpath */ | |
1025 | if (area->vm_mm == ZS_MM_WO) | |
1026 | goto out; | |
1027 | ||
1028 | sizes[0] = PAGE_SIZE - off; | |
1029 | sizes[1] = size - sizes[0]; | |
1030 | ||
1031 | /* copy object to per-cpu buffer */ | |
1032 | addr = kmap_atomic(pages[0]); | |
1033 | memcpy(buf, addr + off, sizes[0]); | |
1034 | kunmap_atomic(addr); | |
1035 | addr = kmap_atomic(pages[1]); | |
1036 | memcpy(buf + sizes[0], addr, sizes[1]); | |
1037 | kunmap_atomic(addr); | |
1038 | out: | |
1039 | return area->vm_buf; | |
1040 | } | |
1041 | ||
1042 | static void __zs_unmap_object(struct mapping_area *area, | |
1043 | struct page *pages[2], int off, int size) | |
1044 | { | |
1045 | int sizes[2]; | |
1046 | void *addr; | |
1047 | char *buf = area->vm_buf; | |
1048 | ||
1049 | /* no write fastpath */ | |
1050 | if (area->vm_mm == ZS_MM_RO) | |
1051 | goto out; | |
1052 | ||
1053 | sizes[0] = PAGE_SIZE - off; | |
1054 | sizes[1] = size - sizes[0]; | |
1055 | ||
1056 | /* copy per-cpu buffer to object */ | |
1057 | addr = kmap_atomic(pages[0]); | |
1058 | memcpy(addr + off, buf, sizes[0]); | |
1059 | kunmap_atomic(addr); | |
1060 | addr = kmap_atomic(pages[1]); | |
1061 | memcpy(addr, buf + sizes[0], sizes[1]); | |
1062 | kunmap_atomic(addr); | |
1063 | ||
1064 | out: | |
1065 | /* enable page faults to match kunmap_atomic() return conditions */ | |
1066 | pagefault_enable(); | |
1067 | } | |
1068 | ||
1069 | #endif /* CONFIG_PGTABLE_MAPPING */ | |
1070 | ||
1071 | static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, | |
1072 | void *pcpu) | |
1073 | { | |
1074 | int ret, cpu = (long)pcpu; | |
1075 | struct mapping_area *area; | |
1076 | ||
1077 | switch (action) { | |
1078 | case CPU_UP_PREPARE: | |
1079 | area = &per_cpu(zs_map_area, cpu); | |
1080 | ret = __zs_cpu_up(area); | |
1081 | if (ret) | |
1082 | return notifier_from_errno(ret); | |
1083 | break; | |
1084 | case CPU_DEAD: | |
1085 | case CPU_UP_CANCELED: | |
1086 | area = &per_cpu(zs_map_area, cpu); | |
1087 | __zs_cpu_down(area); | |
1088 | break; | |
1089 | } | |
1090 | ||
1091 | return NOTIFY_OK; | |
1092 | } | |
1093 | ||
1094 | static struct notifier_block zs_cpu_nb = { | |
1095 | .notifier_call = zs_cpu_notifier | |
1096 | }; | |
1097 | ||
1098 | static void zs_exit(void) | |
1099 | { | |
1100 | int cpu; | |
1101 | ||
1102 | #ifdef CONFIG_ZPOOL | |
1103 | zpool_unregister_driver(&zs_zpool_driver); | |
1104 | #endif | |
1105 | ||
1106 | cpu_notifier_register_begin(); | |
1107 | ||
1108 | for_each_online_cpu(cpu) | |
1109 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | |
1110 | __unregister_cpu_notifier(&zs_cpu_nb); | |
1111 | ||
1112 | cpu_notifier_register_done(); | |
1113 | } | |
1114 | ||
1115 | static int zs_init(void) | |
1116 | { | |
1117 | int cpu, ret; | |
1118 | ||
1119 | cpu_notifier_register_begin(); | |
1120 | ||
1121 | __register_cpu_notifier(&zs_cpu_nb); | |
1122 | for_each_online_cpu(cpu) { | |
1123 | ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | |
1124 | if (notifier_to_errno(ret)) { | |
1125 | cpu_notifier_register_done(); | |
1126 | goto fail; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | cpu_notifier_register_done(); | |
1131 | ||
1132 | #ifdef CONFIG_ZPOOL | |
1133 | zpool_register_driver(&zs_zpool_driver); | |
1134 | #endif | |
1135 | ||
1136 | return 0; | |
1137 | fail: | |
1138 | zs_exit(); | |
1139 | return notifier_to_errno(ret); | |
1140 | } | |
1141 | ||
1142 | static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) | |
1143 | { | |
1144 | return pages_per_zspage * PAGE_SIZE / size; | |
1145 | } | |
1146 | ||
1147 | static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) | |
1148 | { | |
1149 | if (prev->pages_per_zspage != pages_per_zspage) | |
1150 | return false; | |
1151 | ||
1152 | if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) | |
1153 | != get_maxobj_per_zspage(size, pages_per_zspage)) | |
1154 | return false; | |
1155 | ||
1156 | return true; | |
1157 | } | |
1158 | ||
1159 | /** | |
1160 | * zs_create_pool - Creates an allocation pool to work from. | |
1161 | * @flags: allocation flags used to allocate pool metadata | |
1162 | * | |
1163 | * This function must be called before anything when using | |
1164 | * the zsmalloc allocator. | |
1165 | * | |
1166 | * On success, a pointer to the newly created pool is returned, | |
1167 | * otherwise NULL. | |
1168 | */ | |
1169 | struct zs_pool *zs_create_pool(gfp_t flags, struct zs_ops *ops) | |
1170 | { | |
1171 | int i, ovhd_size; | |
1172 | struct zs_pool *pool; | |
1173 | ||
1174 | ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); | |
1175 | pool = kzalloc(ovhd_size, GFP_KERNEL); | |
1176 | if (!pool) | |
1177 | return NULL; | |
1178 | ||
1179 | /* | |
1180 | * Iterate reversly, because, size of size_class that we want to use | |
1181 | * for merging should be larger or equal to current size. | |
1182 | */ | |
1183 | for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { | |
1184 | int size; | |
1185 | int pages_per_zspage; | |
1186 | struct size_class *class; | |
1187 | struct size_class *prev_class; | |
1188 | ||
1189 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; | |
1190 | if (size > ZS_MAX_ALLOC_SIZE) | |
1191 | size = ZS_MAX_ALLOC_SIZE; | |
1192 | pages_per_zspage = get_pages_per_zspage(size); | |
1193 | ||
1194 | /* | |
1195 | * size_class is used for normal zsmalloc operation such | |
1196 | * as alloc/free for that size. Although it is natural that we | |
1197 | * have one size_class for each size, there is a chance that we | |
1198 | * can get more memory utilization if we use one size_class for | |
1199 | * many different sizes whose size_class have same | |
1200 | * characteristics. So, we makes size_class point to | |
1201 | * previous size_class if possible. | |
1202 | */ | |
1203 | if (i < ZS_SIZE_CLASSES - 1) { | |
1204 | prev_class = pool->size_class[i + 1]; | |
1205 | if (can_merge(prev_class, size, pages_per_zspage)) { | |
1206 | pool->size_class[i] = prev_class; | |
1207 | continue; | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | class = kzalloc(sizeof(struct size_class), GFP_KERNEL); | |
1212 | if (!class) | |
1213 | goto err; | |
1214 | ||
1215 | class->size = size; | |
1216 | class->index = i; | |
1217 | class->pages_per_zspage = pages_per_zspage; | |
1218 | spin_lock_init(&class->lock); | |
1219 | pool->size_class[i] = class; | |
1220 | } | |
1221 | ||
1222 | pool->flags = flags; | |
1223 | pool->ops = ops; | |
1224 | ||
1225 | return pool; | |
1226 | ||
1227 | err: | |
1228 | zs_destroy_pool(pool); | |
1229 | return NULL; | |
1230 | } | |
1231 | EXPORT_SYMBOL_GPL(zs_create_pool); | |
1232 | ||
1233 | void zs_destroy_pool(struct zs_pool *pool) | |
1234 | { | |
1235 | int i; | |
1236 | ||
1237 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
1238 | int fg; | |
1239 | struct size_class *class = pool->size_class[i]; | |
1240 | ||
1241 | if (!class) | |
1242 | continue; | |
1243 | ||
1244 | if (class->index != i) | |
1245 | continue; | |
1246 | ||
1247 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { | |
1248 | if (class->fullness_list[fg]) { | |
1249 | pr_info("Freeing non-empty class with size %db, fullness group %d\n", | |
1250 | class->size, fg); | |
1251 | } | |
1252 | } | |
1253 | kfree(class); | |
1254 | } | |
1255 | kfree(pool); | |
1256 | } | |
1257 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | |
1258 | ||
1259 | /** | |
1260 | * zs_malloc - Allocate block of given size from pool. | |
1261 | * @pool: pool to allocate from | |
1262 | * @size: size of block to allocate | |
1263 | * | |
1264 | * On success, handle to the allocated object is returned, | |
1265 | * otherwise 0. | |
1266 | * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. | |
1267 | */ | |
1268 | unsigned long zs_malloc(struct zs_pool *pool, size_t size) | |
1269 | { | |
1270 | unsigned long obj; | |
1271 | struct link_free *link; | |
1272 | struct size_class *class; | |
1273 | ||
1274 | struct page *first_page, *m_page; | |
1275 | unsigned long m_objidx, m_offset; | |
1276 | ||
1277 | if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) | |
1278 | return 0; | |
1279 | ||
1280 | class = pool->size_class[get_size_class_index(size)]; | |
1281 | ||
1282 | spin_lock(&class->lock); | |
1283 | first_page = find_available_zspage(class); | |
1284 | ||
1285 | if (!first_page) { | |
1286 | spin_unlock(&class->lock); | |
1287 | first_page = alloc_zspage(class, pool->flags); | |
1288 | if (unlikely(!first_page)) | |
1289 | return 0; | |
1290 | ||
1291 | set_zspage_mapping(first_page, class->index, ZS_EMPTY); | |
1292 | atomic_long_add(class->pages_per_zspage, | |
1293 | &pool->pages_allocated); | |
1294 | spin_lock(&class->lock); | |
1295 | } | |
1296 | ||
1297 | obj = (unsigned long)first_page->freelist; | |
1298 | obj_handle_to_location(obj, &m_page, &m_objidx); | |
1299 | m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); | |
1300 | ||
1301 | link = (struct link_free *)kmap_atomic(m_page) + | |
1302 | m_offset / sizeof(*link); | |
1303 | first_page->freelist = link->next; | |
1304 | memset(link, POISON_INUSE, sizeof(*link)); | |
1305 | kunmap_atomic(link); | |
1306 | ||
1307 | first_page->inuse++; | |
1308 | /* Now move the zspage to another fullness group, if required */ | |
1309 | fix_fullness_group(pool, first_page); | |
1310 | spin_unlock(&class->lock); | |
1311 | ||
1312 | return obj; | |
1313 | } | |
1314 | EXPORT_SYMBOL_GPL(zs_malloc); | |
1315 | ||
1316 | /** | |
1317 | * zs_free - Free the handle from this pool. | |
1318 | * @pool: pool containing the handle | |
1319 | * @obj: the handle to free | |
1320 | * | |
1321 | * The caller must provide a valid handle that is contained | |
1322 | * in the provided pool. The caller must ensure this is | |
1323 | * not called after evict() has returned successfully for the | |
1324 | * handle. | |
1325 | */ | |
1326 | void zs_free(struct zs_pool *pool, unsigned long obj) | |
1327 | { | |
1328 | struct page *first_page, *f_page; | |
1329 | unsigned long f_objidx, f_offset; | |
1330 | ||
1331 | int class_idx; | |
1332 | struct size_class *class; | |
1333 | enum fullness_group fullness; | |
1334 | ||
1335 | if (unlikely(!obj)) | |
1336 | return; | |
1337 | ||
1338 | obj_handle_to_location(obj, &f_page, &f_objidx); | |
1339 | first_page = get_first_page(f_page); | |
1340 | ||
1341 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
1342 | class = pool->size_class[class_idx]; | |
1343 | f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); | |
1344 | ||
1345 | spin_lock(&class->lock); | |
1346 | ||
1347 | /* must re-check fullness after taking class lock */ | |
1348 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
1349 | if (fullness == ZS_RECLAIM) { | |
1350 | spin_unlock(&class->lock); | |
1351 | return; /* will be freed during reclaim */ | |
1352 | } | |
1353 | ||
1354 | obj_free(obj, f_page, f_offset); | |
1355 | ||
1356 | fullness = fix_fullness_group(pool, first_page); | |
1357 | spin_unlock(&class->lock); | |
1358 | ||
1359 | if (fullness == ZS_EMPTY) { | |
1360 | atomic_long_sub(class->pages_per_zspage, | |
1361 | &pool->pages_allocated); | |
1362 | free_zspage(first_page); | |
1363 | } | |
1364 | } | |
1365 | EXPORT_SYMBOL_GPL(zs_free); | |
1366 | ||
1367 | /** | |
1368 | * zs_shrink - Shrink the pool | |
1369 | * @pool: pool to shrink | |
1370 | * | |
1371 | * The pool will be shrunk by one zspage, which is some | |
1372 | * number of pages in size. On success, the number of freed | |
1373 | * pages is returned. On failure, the error is returned. | |
1374 | */ | |
1375 | int zs_shrink(struct zs_pool *pool) | |
1376 | { | |
1377 | struct size_class *class; | |
1378 | enum fullness_group fullness; | |
1379 | struct page *page; | |
1380 | int class_idx, ret; | |
1381 | ||
1382 | if (!pool->ops || !pool->ops->evict) | |
1383 | return -EINVAL; | |
1384 | ||
1385 | /* if a page is found, the class is locked */ | |
1386 | page = find_lru_zspage(pool); | |
1387 | if (!page) | |
1388 | return -ENOENT; | |
1389 | ||
1390 | get_zspage_mapping(page, &class_idx, &fullness); | |
1391 | class = pool->size_class[class_idx]; | |
1392 | ||
1393 | /* reclaim_zspage unlocks the class lock */ | |
1394 | ret = reclaim_zspage(pool, page); | |
1395 | if (ret) | |
1396 | return ret; | |
1397 | ||
1398 | return class->pages_per_zspage; | |
1399 | } | |
1400 | EXPORT_SYMBOL_GPL(zs_shrink); | |
1401 | ||
1402 | /** | |
1403 | * zs_map_object - get address of allocated object from handle. | |
1404 | * @pool: pool from which the object was allocated | |
1405 | * @handle: handle returned from zs_malloc | |
1406 | * | |
1407 | * Before using an object allocated from zs_malloc, it must be mapped using | |
1408 | * this function. When done with the object, it must be unmapped using | |
1409 | * zs_unmap_object. | |
1410 | * | |
1411 | * Only one object can be mapped per cpu at a time. There is no protection | |
1412 | * against nested mappings. | |
1413 | * | |
1414 | * This function returns with preemption and page faults disabled. | |
1415 | */ | |
1416 | void *zs_map_object(struct zs_pool *pool, unsigned long handle, | |
1417 | enum zs_mapmode mm) | |
1418 | { | |
1419 | struct page *page; | |
1420 | unsigned long obj_idx, off; | |
1421 | ||
1422 | unsigned int class_idx; | |
1423 | enum fullness_group fg; | |
1424 | struct size_class *class; | |
1425 | struct mapping_area *area; | |
1426 | struct page *pages[2]; | |
1427 | ||
1428 | BUG_ON(!handle); | |
1429 | ||
1430 | /* | |
1431 | * Because we use per-cpu mapping areas shared among the | |
1432 | * pools/users, we can't allow mapping in interrupt context | |
1433 | * because it can corrupt another users mappings. | |
1434 | */ | |
1435 | BUG_ON(in_interrupt()); | |
1436 | ||
1437 | obj_handle_to_location(handle, &page, &obj_idx); | |
1438 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
1439 | class = pool->size_class[class_idx]; | |
1440 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
1441 | ||
1442 | area = &get_cpu_var(zs_map_area); | |
1443 | area->vm_mm = mm; | |
1444 | if (off + class->size <= PAGE_SIZE) { | |
1445 | /* this object is contained entirely within a page */ | |
1446 | area->vm_addr = kmap_atomic(page); | |
1447 | return area->vm_addr + off; | |
1448 | } | |
1449 | ||
1450 | /* this object spans two pages */ | |
1451 | pages[0] = page; | |
1452 | pages[1] = get_next_page(page); | |
1453 | BUG_ON(!pages[1]); | |
1454 | ||
1455 | return __zs_map_object(area, pages, off, class->size); | |
1456 | } | |
1457 | EXPORT_SYMBOL_GPL(zs_map_object); | |
1458 | ||
1459 | void zs_unmap_object(struct zs_pool *pool, unsigned long handle) | |
1460 | { | |
1461 | struct page *page; | |
1462 | unsigned long obj_idx, off; | |
1463 | ||
1464 | unsigned int class_idx; | |
1465 | enum fullness_group fg; | |
1466 | struct size_class *class; | |
1467 | struct mapping_area *area; | |
1468 | ||
1469 | BUG_ON(!handle); | |
1470 | ||
1471 | obj_handle_to_location(handle, &page, &obj_idx); | |
1472 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
1473 | class = pool->size_class[class_idx]; | |
1474 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
1475 | ||
1476 | area = this_cpu_ptr(&zs_map_area); | |
1477 | if (off + class->size <= PAGE_SIZE) | |
1478 | kunmap_atomic(area->vm_addr); | |
1479 | else { | |
1480 | struct page *pages[2]; | |
1481 | ||
1482 | pages[0] = page; | |
1483 | pages[1] = get_next_page(page); | |
1484 | BUG_ON(!pages[1]); | |
1485 | ||
1486 | __zs_unmap_object(area, pages, off, class->size); | |
1487 | } | |
1488 | put_cpu_var(zs_map_area); | |
1489 | } | |
1490 | EXPORT_SYMBOL_GPL(zs_unmap_object); | |
1491 | ||
1492 | unsigned long zs_get_total_pages(struct zs_pool *pool) | |
1493 | { | |
1494 | return atomic_long_read(&pool->pages_allocated); | |
1495 | } | |
1496 | EXPORT_SYMBOL_GPL(zs_get_total_pages); | |
1497 | ||
1498 | module_init(zs_init); | |
1499 | module_exit(zs_exit); | |
1500 | ||
1501 | MODULE_LICENSE("Dual BSD/GPL"); | |
1502 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |