1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
12 * Routines for handling mm_structs
14 extern struct mm_struct
* mm_alloc(void);
17 * mmgrab() - Pin a &struct mm_struct.
18 * @mm: The &struct mm_struct to pin.
20 * Make sure that @mm will not get freed even after the owning task
21 * exits. This doesn't guarantee that the associated address space
22 * will still exist later on and mmget_not_zero() has to be used before
25 * This is a preferred way to to pin @mm for a longer/unbounded amount
28 * Use mmdrop() to release the reference acquired by mmgrab().
30 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
31 * of &mm_struct.mm_count vs &mm_struct.mm_users.
33 static inline void mmgrab(struct mm_struct
*mm
)
35 atomic_inc(&mm
->mm_count
);
38 /* mmdrop drops the mm and the page tables */
39 extern void __mmdrop(struct mm_struct
*);
40 static inline void mmdrop(struct mm_struct
*mm
)
42 if (unlikely(atomic_dec_and_test(&mm
->mm_count
)))
46 static inline void mmdrop_async_fn(struct work_struct
*work
)
48 struct mm_struct
*mm
= container_of(work
, struct mm_struct
, async_put_work
);
52 static inline void mmdrop_async(struct mm_struct
*mm
)
54 if (unlikely(atomic_dec_and_test(&mm
->mm_count
))) {
55 INIT_WORK(&mm
->async_put_work
, mmdrop_async_fn
);
56 schedule_work(&mm
->async_put_work
);
61 * This has to be called after a get_task_mm()/mmget_not_zero()
62 * followed by taking the mmap_sem for writing before modifying the
63 * vmas or anything the coredump pretends not to change from under it.
65 * NOTE: find_extend_vma() called from GUP context is the only place
66 * that can modify the "mm" (notably the vm_start/end) under mmap_sem
67 * for reading and outside the context of the process, so it is also
68 * the only case that holds the mmap_sem for reading that must call
69 * this function. Generally if the mmap_sem is hold for reading
70 * there's no need of this check after get_task_mm()/mmget_not_zero().
72 * This function can be obsoleted and the check can be removed, after
73 * the coredump code will hold the mmap_sem for writing before
74 * invoking the ->core_dump methods.
76 static inline bool mmget_still_valid(struct mm_struct
*mm
)
78 return likely(!mm
->core_state
);
82 * mmget() - Pin the address space associated with a &struct mm_struct.
83 * @mm: The address space to pin.
85 * Make sure that the address space of the given &struct mm_struct doesn't
86 * go away. This does not protect against parts of the address space being
87 * modified or freed, however.
89 * Never use this function to pin this address space for an
90 * unbounded/indefinite amount of time.
92 * Use mmput() to release the reference acquired by mmget().
94 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
95 * of &mm_struct.mm_count vs &mm_struct.mm_users.
97 static inline void mmget(struct mm_struct
*mm
)
99 atomic_inc(&mm
->mm_users
);
102 static inline bool mmget_not_zero(struct mm_struct
*mm
)
104 return atomic_inc_not_zero(&mm
->mm_users
);
107 /* mmput gets rid of the mappings and all user-space */
108 extern void mmput(struct mm_struct
*);
110 /* same as above but performs the slow path from the async context. Can
111 * be called from the atomic context as well
113 void mmput_async(struct mm_struct
*);
116 /* Grab a reference to a task's mm, if it is not already going away */
117 extern struct mm_struct
*get_task_mm(struct task_struct
*task
);
119 * Grab a reference to a task's mm, if it is not already going away
120 * and ptrace_may_access with the mode parameter passed to it
123 extern struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
);
124 /* Remove the current tasks stale references to the old mm_struct */
125 extern void mm_release(struct task_struct
*, struct mm_struct
*);
128 extern void mm_update_next_owner(struct mm_struct
*mm
);
130 static inline void mm_update_next_owner(struct mm_struct
*mm
)
133 #endif /* CONFIG_MEMCG */
136 extern void arch_pick_mmap_layout(struct mm_struct
*mm
);
138 arch_get_unmapped_area(struct file
*, unsigned long, unsigned long,
139 unsigned long, unsigned long);
141 arch_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr
,
142 unsigned long len
, unsigned long pgoff
,
143 unsigned long flags
);
145 static inline void arch_pick_mmap_layout(struct mm_struct
*mm
) {}
148 static inline bool in_vfork(struct task_struct
*tsk
)
153 * need RCU to access ->real_parent if CLONE_VM was used along with
156 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
159 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
160 * ->real_parent is not necessarily the task doing vfork(), so in
161 * theory we can't rely on task_lock() if we want to dereference it.
163 * And in this case we can't trust the real_parent->mm == tsk->mm
164 * check, it can be false negative. But we do not care, if init or
165 * another oom-unkillable task does this it should blame itself.
168 ret
= tsk
->vfork_done
&& tsk
->real_parent
->mm
== tsk
->mm
;
175 * Applies per-task gfp context to the given allocation flags.
176 * PF_MEMALLOC_NOIO implies GFP_NOIO
177 * PF_MEMALLOC_NOFS implies GFP_NOFS
179 static inline gfp_t
current_gfp_context(gfp_t flags
)
182 * NOIO implies both NOIO and NOFS and it is a weaker context
183 * so always make sure it makes precendence
185 if (unlikely(current
->flags
& PF_MEMALLOC_NOIO
))
186 flags
&= ~(__GFP_IO
| __GFP_FS
);
187 else if (unlikely(current
->flags
& PF_MEMALLOC_NOFS
))
192 #ifdef CONFIG_LOCKDEP
193 extern void fs_reclaim_acquire(gfp_t gfp_mask
);
194 extern void fs_reclaim_release(gfp_t gfp_mask
);
196 static inline void fs_reclaim_acquire(gfp_t gfp_mask
) { }
197 static inline void fs_reclaim_release(gfp_t gfp_mask
) { }
200 static inline unsigned int memalloc_noio_save(void)
202 unsigned int flags
= current
->flags
& PF_MEMALLOC_NOIO
;
203 current
->flags
|= PF_MEMALLOC_NOIO
;
207 static inline void memalloc_noio_restore(unsigned int flags
)
209 current
->flags
= (current
->flags
& ~PF_MEMALLOC_NOIO
) | flags
;
212 static inline unsigned int memalloc_nofs_save(void)
214 unsigned int flags
= current
->flags
& PF_MEMALLOC_NOFS
;
215 current
->flags
|= PF_MEMALLOC_NOFS
;
219 static inline void memalloc_nofs_restore(unsigned int flags
)
221 current
->flags
= (current
->flags
& ~PF_MEMALLOC_NOFS
) | flags
;
224 static inline unsigned int memalloc_noreclaim_save(void)
226 unsigned int flags
= current
->flags
& PF_MEMALLOC
;
227 current
->flags
|= PF_MEMALLOC
;
231 static inline void memalloc_noreclaim_restore(unsigned int flags
)
233 current
->flags
= (current
->flags
& ~PF_MEMALLOC
) | flags
;
236 #ifdef CONFIG_MEMBARRIER
238 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY
= (1U << 0),
239 MEMBARRIER_STATE_SWITCH_MM
= (1U << 1),
242 static inline void membarrier_execve(struct task_struct
*t
)
244 atomic_set(&t
->mm
->membarrier_state
, 0);
247 static inline void membarrier_execve(struct task_struct
*t
)
252 #endif /* _LINUX_SCHED_MM_H */