1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
10 #include <linux/sync_core.h>
13 * Routines for handling mm_structs
15 extern struct mm_struct *mm_alloc(void);
18 * mmgrab() - Pin a &struct mm_struct.
19 * @mm: The &struct mm_struct to pin.
21 * Make sure that @mm will not get freed even after the owning task
22 * exits. This doesn't guarantee that the associated address space
23 * will still exist later on and mmget_not_zero() has to be used before
26 * This is a preferred way to to pin @mm for a longer/unbounded amount
29 * Use mmdrop() to release the reference acquired by mmgrab().
31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
34 static inline void mmgrab(struct mm_struct *mm)
36 atomic_inc(&mm->mm_count);
39 extern void __mmdrop(struct mm_struct *mm);
41 static inline void mmdrop(struct mm_struct *mm)
44 * The implicit full barrier implied by atomic_dec_and_test() is
45 * required by the membarrier system call before returning to
46 * user-space, after storing to rq->curr.
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
53 * This has to be called after a get_task_mm()/mmget_not_zero()
54 * followed by taking the mmap_sem for writing before modifying the
55 * vmas or anything the coredump pretends not to change from under it.
57 * NOTE: find_extend_vma() called from GUP context is the only place
58 * that can modify the "mm" (notably the vm_start/end) under mmap_sem
59 * for reading and outside the context of the process, so it is also
60 * the only case that holds the mmap_sem for reading that must call
61 * this function. Generally if the mmap_sem is hold for reading
62 * there's no need of this check after get_task_mm()/mmget_not_zero().
64 * This function can be obsoleted and the check can be removed, after
65 * the coredump code will hold the mmap_sem for writing before
66 * invoking the ->core_dump methods.
68 static inline bool mmget_still_valid(struct mm_struct *mm)
70 return likely(!mm->core_state);
74 * mmget() - Pin the address space associated with a &struct mm_struct.
75 * @mm: The address space to pin.
77 * Make sure that the address space of the given &struct mm_struct doesn't
78 * go away. This does not protect against parts of the address space being
79 * modified or freed, however.
81 * Never use this function to pin this address space for an
82 * unbounded/indefinite amount of time.
84 * Use mmput() to release the reference acquired by mmget().
86 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
87 * of &mm_struct.mm_count vs &mm_struct.mm_users.
89 static inline void mmget(struct mm_struct *mm)
91 atomic_inc(&mm->mm_users);
94 static inline bool mmget_not_zero(struct mm_struct *mm)
96 return atomic_inc_not_zero(&mm->mm_users);
99 /* mmput gets rid of the mappings and all user-space */
100 extern void mmput(struct mm_struct *);
102 /* same as above but performs the slow path from the async context. Can
103 * be called from the atomic context as well
105 void mmput_async(struct mm_struct *);
108 /* Grab a reference to a task's mm, if it is not already going away */
109 extern struct mm_struct *get_task_mm(struct task_struct *task);
111 * Grab a reference to a task's mm, if it is not already going away
112 * and ptrace_may_access with the mode parameter passed to it
115 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
116 /* Remove the current tasks stale references to the old mm_struct */
117 extern void mm_release(struct task_struct *, struct mm_struct *);
120 extern void mm_update_next_owner(struct mm_struct *mm);
122 static inline void mm_update_next_owner(struct mm_struct *mm)
125 #endif /* CONFIG_MEMCG */
128 extern void arch_pick_mmap_layout(struct mm_struct *mm,
129 struct rlimit *rlim_stack);
131 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
132 unsigned long, unsigned long);
134 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
135 unsigned long len, unsigned long pgoff,
136 unsigned long flags);
138 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
139 struct rlimit *rlim_stack) {}
142 static inline bool in_vfork(struct task_struct *tsk)
147 * need RCU to access ->real_parent if CLONE_VM was used along with
150 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
153 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
154 * ->real_parent is not necessarily the task doing vfork(), so in
155 * theory we can't rely on task_lock() if we want to dereference it.
157 * And in this case we can't trust the real_parent->mm == tsk->mm
158 * check, it can be false negative. But we do not care, if init or
159 * another oom-unkillable task does this it should blame itself.
162 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
169 * Applies per-task gfp context to the given allocation flags.
170 * PF_MEMALLOC_NOIO implies GFP_NOIO
171 * PF_MEMALLOC_NOFS implies GFP_NOFS
172 * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
174 static inline gfp_t current_gfp_context(gfp_t flags)
176 if (unlikely(current->flags &
177 (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
179 * NOIO implies both NOIO and NOFS and it is a weaker context
180 * so always make sure it makes precedence
182 if (current->flags & PF_MEMALLOC_NOIO)
183 flags &= ~(__GFP_IO | __GFP_FS);
184 else if (current->flags & PF_MEMALLOC_NOFS)
187 if (current->flags & PF_MEMALLOC_NOCMA)
188 flags &= ~__GFP_MOVABLE;
194 #ifdef CONFIG_LOCKDEP
195 extern void __fs_reclaim_acquire(void);
196 extern void __fs_reclaim_release(void);
197 extern void fs_reclaim_acquire(gfp_t gfp_mask);
198 extern void fs_reclaim_release(gfp_t gfp_mask);
200 static inline void __fs_reclaim_acquire(void) { }
201 static inline void __fs_reclaim_release(void) { }
202 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
203 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
207 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
209 * This functions marks the beginning of the GFP_NOIO allocation scope.
210 * All further allocations will implicitly drop __GFP_IO flag and so
211 * they are safe for the IO critical section from the allocation recursion
212 * point of view. Use memalloc_noio_restore to end the scope with flags
213 * returned by this function.
215 * This function is safe to be used from any context.
217 static inline unsigned int memalloc_noio_save(void)
219 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
220 current->flags |= PF_MEMALLOC_NOIO;
225 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
226 * @flags: Flags to restore.
228 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
229 * Always make sure that that the given flags is the return value from the
230 * pairing memalloc_noio_save call.
232 static inline void memalloc_noio_restore(unsigned int flags)
234 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
238 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
240 * This functions marks the beginning of the GFP_NOFS allocation scope.
241 * All further allocations will implicitly drop __GFP_FS flag and so
242 * they are safe for the FS critical section from the allocation recursion
243 * point of view. Use memalloc_nofs_restore to end the scope with flags
244 * returned by this function.
246 * This function is safe to be used from any context.
248 static inline unsigned int memalloc_nofs_save(void)
250 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
251 current->flags |= PF_MEMALLOC_NOFS;
256 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
257 * @flags: Flags to restore.
259 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
260 * Always make sure that that the given flags is the return value from the
261 * pairing memalloc_nofs_save call.
263 static inline void memalloc_nofs_restore(unsigned int flags)
265 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
268 static inline unsigned int memalloc_noreclaim_save(void)
270 unsigned int flags = current->flags & PF_MEMALLOC;
271 current->flags |= PF_MEMALLOC;
275 static inline void memalloc_noreclaim_restore(unsigned int flags)
277 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
281 static inline unsigned int memalloc_nocma_save(void)
283 unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
285 current->flags |= PF_MEMALLOC_NOCMA;
289 static inline void memalloc_nocma_restore(unsigned int flags)
291 current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
294 static inline unsigned int memalloc_nocma_save(void)
299 static inline void memalloc_nocma_restore(unsigned int flags)
306 * memalloc_use_memcg - Starts the remote memcg charging scope.
307 * @memcg: memcg to charge.
309 * This function marks the beginning of the remote memcg charging scope. All the
310 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
313 * NOTE: This function is not nesting safe.
315 static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
317 WARN_ON_ONCE(current->active_memcg);
318 current->active_memcg = memcg;
322 * memalloc_unuse_memcg - Ends the remote memcg charging scope.
324 * This function marks the end of the remote memcg charging scope started by
325 * memalloc_use_memcg().
327 static inline void memalloc_unuse_memcg(void)
329 current->active_memcg = NULL;
332 static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
336 static inline void memalloc_unuse_memcg(void)
341 #ifdef CONFIG_MEMBARRIER
343 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
344 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
345 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
346 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
347 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
348 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
352 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
355 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
356 #include <asm/membarrier.h>
359 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
361 if (likely(!(atomic_read(&mm->membarrier_state) &
362 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
364 sync_core_before_usermode();
367 static inline void membarrier_execve(struct task_struct *t)
369 atomic_set(&t->mm->membarrier_state, 0);
372 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
373 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
374 struct mm_struct *next,
375 struct task_struct *tsk)
379 static inline void membarrier_execve(struct task_struct *t)
382 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
387 #endif /* _LINUX_SCHED_MM_H */