mm: do not alloc cma page for shmem
authorCho KyongHo <pullip.cho@samsung.com>
Wed, 21 Dec 2016 02:17:20 +0000 (11:17 +0900)
committerhskang <hs1218.kang@samsung.com>
Mon, 27 Aug 2018 07:23:05 +0000 (16:23 +0900)
Some page caches of shmem tends to be pinned for a long time.
Therefore shmem should not allocate page caches from CMA because cma
pages should be always ready to be reclaimed promptly.

Change-Id: I024dad765feec07b0028851b0b98b7db7f3c85a5
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
mm/shmem.c

index 6a5973d727f895d86a359ee6b8b769fcd68aafaf..024e5c217f96f625e2c3c85a5d1fea6d35d955d5 100644 (file)
@@ -2170,6 +2170,13 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                inode->i_blocks = 0;
                inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
                inode->i_generation = get_seconds();
+               /*
+                * removal of __GFP_HIGHMEM breaks GFP_HIGHUSER_MOVABLE. It is
+                * required not to allocate CMA pages to the page caches of
+                * shmem.
+                */
+               mapping_set_gfp_mask(inode->i_mapping,
+                       mapping_gfp_mask(inode->i_mapping) & ~__GFP_HIGHMEM);
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);