mm: do not alloc cma page for shmem
authorCho KyongHo <pullip.cho@samsung.com>
Wed, 21 Dec 2016 02:17:20 +0000 (11:17 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:23:00 +0000 (20:23 +0300)
Some page caches of shmem tends to be pinned for a long time.
Therefore shmem should not allocate page caches from CMA because cma
pages should be always ready to be reclaimed promptly.

Change-Id: I024dad765feec07b0028851b0b98b7db7f3c85a5
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
mm/shmem.c

index ec4a851e58c39c28bff91f1fff18aca2c50b199f..d36818107dc89446f5dfd308c0e9655e1be761dd 100644 (file)
@@ -2204,6 +2204,13 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                inode->i_blocks = 0;
                inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
                inode->i_generation = get_seconds();
+               /*
+                * removal of __GFP_HIGHMEM breaks GFP_HIGHUSER_MOVABLE. It is
+                * required not to allocate CMA pages to the page caches of
+                * shmem.
+                */
+               mapping_set_gfp_mask(inode->i_mapping,
+                       mapping_gfp_mask(inode->i_mapping) & ~__GFP_HIGHMEM);
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);