1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
12 * Routines for handling mm_structs
14 extern struct mm_struct
* mm_alloc(void);
17 * mmgrab() - Pin a &struct mm_struct.
18 * @mm: The &struct mm_struct to pin.
20 * Make sure that @mm will not get freed even after the owning task
21 * exits. This doesn't guarantee that the associated address space
22 * will still exist later on and mmget_not_zero() has to be used before
25 * This is a preferred way to to pin @mm for a longer/unbounded amount
28 * Use mmdrop() to release the reference acquired by mmgrab().
30 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
31 * of &mm_struct.mm_count vs &mm_struct.mm_users.
33 static inline void mmgrab(struct mm_struct
*mm
)
35 atomic_inc(&mm
->mm_count
);
38 /* mmdrop drops the mm and the page tables */
39 extern void __mmdrop(struct mm_struct
*);
40 static inline void mmdrop(struct mm_struct
*mm
)
42 if (unlikely(atomic_dec_and_test(&mm
->mm_count
)))
46 static inline void mmdrop_async_fn(struct work_struct
*work
)
48 struct mm_struct
*mm
= container_of(work
, struct mm_struct
, async_put_work
);
52 static inline void mmdrop_async(struct mm_struct
*mm
)
54 if (unlikely(atomic_dec_and_test(&mm
->mm_count
))) {
55 INIT_WORK(&mm
->async_put_work
, mmdrop_async_fn
);
56 schedule_work(&mm
->async_put_work
);
61 * This has to be called after a get_task_mm()/mmget_not_zero()
62 * followed by taking the mmap_sem for writing before modifying the
63 * vmas or anything the coredump pretends not to change from under it.
65 * It also has to be called when mmgrab() is used in the context of
66 * the process, but then the mm_count refcount is transferred outside
67 * the context of the process to run down_write() on that pinned mm.
69 * NOTE: find_extend_vma() called from GUP context is the only place
70 * that can modify the "mm" (notably the vm_start/end) under mmap_sem
71 * for reading and outside the context of the process, so it is also
72 * the only case that holds the mmap_sem for reading that must call
73 * this function. Generally if the mmap_sem is hold for reading
74 * there's no need of this check after get_task_mm()/mmget_not_zero().
76 * This function can be obsoleted and the check can be removed, after
77 * the coredump code will hold the mmap_sem for writing before
78 * invoking the ->core_dump methods.
80 static inline bool mmget_still_valid(struct mm_struct
*mm
)
82 return likely(!mm
->core_state
);
86 * mmget() - Pin the address space associated with a &struct mm_struct.
87 * @mm: The address space to pin.
89 * Make sure that the address space of the given &struct mm_struct doesn't
90 * go away. This does not protect against parts of the address space being
91 * modified or freed, however.
93 * Never use this function to pin this address space for an
94 * unbounded/indefinite amount of time.
96 * Use mmput() to release the reference acquired by mmget().
98 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
99 * of &mm_struct.mm_count vs &mm_struct.mm_users.
101 static inline void mmget(struct mm_struct
*mm
)
103 atomic_inc(&mm
->mm_users
);
106 static inline bool mmget_not_zero(struct mm_struct
*mm
)
108 return atomic_inc_not_zero(&mm
->mm_users
);
111 /* mmput gets rid of the mappings and all user-space */
112 extern void mmput(struct mm_struct
*);
114 /* same as above but performs the slow path from the async context. Can
115 * be called from the atomic context as well
117 void mmput_async(struct mm_struct
*);
120 /* Grab a reference to a task's mm, if it is not already going away */
121 extern struct mm_struct
*get_task_mm(struct task_struct
*task
);
123 * Grab a reference to a task's mm, if it is not already going away
124 * and ptrace_may_access with the mode parameter passed to it
127 extern struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
);
128 /* Remove the current tasks stale references to the old mm_struct on exit() */
129 extern void exit_mm_release(struct task_struct
*, struct mm_struct
*);
130 /* Remove the current tasks stale references to the old mm_struct on exec() */
131 extern void exec_mm_release(struct task_struct
*, struct mm_struct
*);
134 extern void mm_update_next_owner(struct mm_struct
*mm
);
136 static inline void mm_update_next_owner(struct mm_struct
*mm
)
139 #endif /* CONFIG_MEMCG */
142 extern void arch_pick_mmap_layout(struct mm_struct
*mm
);
144 arch_get_unmapped_area(struct file
*, unsigned long, unsigned long,
145 unsigned long, unsigned long);
147 arch_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr
,
148 unsigned long len
, unsigned long pgoff
,
149 unsigned long flags
);
151 static inline void arch_pick_mmap_layout(struct mm_struct
*mm
) {}
154 static inline bool in_vfork(struct task_struct
*tsk
)
159 * need RCU to access ->real_parent if CLONE_VM was used along with
162 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
165 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
166 * ->real_parent is not necessarily the task doing vfork(), so in
167 * theory we can't rely on task_lock() if we want to dereference it.
169 * And in this case we can't trust the real_parent->mm == tsk->mm
170 * check, it can be false negative. But we do not care, if init or
171 * another oom-unkillable task does this it should blame itself.
174 ret
= tsk
->vfork_done
&& tsk
->real_parent
->mm
== tsk
->mm
;
181 * Applies per-task gfp context to the given allocation flags.
182 * PF_MEMALLOC_NOIO implies GFP_NOIO
183 * PF_MEMALLOC_NOFS implies GFP_NOFS
185 static inline gfp_t
current_gfp_context(gfp_t flags
)
188 * NOIO implies both NOIO and NOFS and it is a weaker context
189 * so always make sure it makes precendence
191 if (unlikely(current
->flags
& PF_MEMALLOC_NOIO
))
192 flags
&= ~(__GFP_IO
| __GFP_FS
);
193 else if (unlikely(current
->flags
& PF_MEMALLOC_NOFS
))
198 #ifdef CONFIG_LOCKDEP
199 extern void fs_reclaim_acquire(gfp_t gfp_mask
);
200 extern void fs_reclaim_release(gfp_t gfp_mask
);
202 static inline void fs_reclaim_acquire(gfp_t gfp_mask
) { }
203 static inline void fs_reclaim_release(gfp_t gfp_mask
) { }
206 static inline unsigned int memalloc_noio_save(void)
208 unsigned int flags
= current
->flags
& PF_MEMALLOC_NOIO
;
209 current
->flags
|= PF_MEMALLOC_NOIO
;
213 static inline void memalloc_noio_restore(unsigned int flags
)
215 current
->flags
= (current
->flags
& ~PF_MEMALLOC_NOIO
) | flags
;
218 static inline unsigned int memalloc_nofs_save(void)
220 unsigned int flags
= current
->flags
& PF_MEMALLOC_NOFS
;
221 current
->flags
|= PF_MEMALLOC_NOFS
;
225 static inline void memalloc_nofs_restore(unsigned int flags
)
227 current
->flags
= (current
->flags
& ~PF_MEMALLOC_NOFS
) | flags
;
230 static inline unsigned int memalloc_noreclaim_save(void)
232 unsigned int flags
= current
->flags
& PF_MEMALLOC
;
233 current
->flags
|= PF_MEMALLOC
;
237 static inline void memalloc_noreclaim_restore(unsigned int flags
)
239 current
->flags
= (current
->flags
& ~PF_MEMALLOC
) | flags
;
242 #ifdef CONFIG_MEMBARRIER
244 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY
= (1U << 0),
245 MEMBARRIER_STATE_SWITCH_MM
= (1U << 1),
248 static inline void membarrier_execve(struct task_struct
*t
)
250 atomic_set(&t
->mm
->membarrier_state
, 0);
253 static inline void membarrier_execve(struct task_struct
*t
)
258 #endif /* _LINUX_SCHED_MM_H */