]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/sched/mm.h
exit/exec: Seperate mm_release()
[mirror_ubuntu-bionic-kernel.git] / include / linux / sched / mm.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10
11 /*
12 * Routines for handling mm_structs
13 */
14 extern struct mm_struct * mm_alloc(void);
15
16 /**
17 * mmgrab() - Pin a &struct mm_struct.
18 * @mm: The &struct mm_struct to pin.
19 *
20 * Make sure that @mm will not get freed even after the owning task
21 * exits. This doesn't guarantee that the associated address space
22 * will still exist later on and mmget_not_zero() has to be used before
23 * accessing it.
24 *
25 * This is a preferred way to to pin @mm for a longer/unbounded amount
26 * of time.
27 *
28 * Use mmdrop() to release the reference acquired by mmgrab().
29 *
30 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
31 * of &mm_struct.mm_count vs &mm_struct.mm_users.
32 */
33 static inline void mmgrab(struct mm_struct *mm)
34 {
35 atomic_inc(&mm->mm_count);
36 }
37
38 /* mmdrop drops the mm and the page tables */
39 extern void __mmdrop(struct mm_struct *);
40 static inline void mmdrop(struct mm_struct *mm)
41 {
42 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
43 __mmdrop(mm);
44 }
45
46 static inline void mmdrop_async_fn(struct work_struct *work)
47 {
48 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
49 __mmdrop(mm);
50 }
51
52 static inline void mmdrop_async(struct mm_struct *mm)
53 {
54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
55 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
56 schedule_work(&mm->async_put_work);
57 }
58 }
59
60 /*
61 * This has to be called after a get_task_mm()/mmget_not_zero()
62 * followed by taking the mmap_sem for writing before modifying the
63 * vmas or anything the coredump pretends not to change from under it.
64 *
65 * It also has to be called when mmgrab() is used in the context of
66 * the process, but then the mm_count refcount is transferred outside
67 * the context of the process to run down_write() on that pinned mm.
68 *
69 * NOTE: find_extend_vma() called from GUP context is the only place
70 * that can modify the "mm" (notably the vm_start/end) under mmap_sem
71 * for reading and outside the context of the process, so it is also
72 * the only case that holds the mmap_sem for reading that must call
73 * this function. Generally if the mmap_sem is hold for reading
74 * there's no need of this check after get_task_mm()/mmget_not_zero().
75 *
76 * This function can be obsoleted and the check can be removed, after
77 * the coredump code will hold the mmap_sem for writing before
78 * invoking the ->core_dump methods.
79 */
80 static inline bool mmget_still_valid(struct mm_struct *mm)
81 {
82 return likely(!mm->core_state);
83 }
84
85 /**
86 * mmget() - Pin the address space associated with a &struct mm_struct.
87 * @mm: The address space to pin.
88 *
89 * Make sure that the address space of the given &struct mm_struct doesn't
90 * go away. This does not protect against parts of the address space being
91 * modified or freed, however.
92 *
93 * Never use this function to pin this address space for an
94 * unbounded/indefinite amount of time.
95 *
96 * Use mmput() to release the reference acquired by mmget().
97 *
98 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
99 * of &mm_struct.mm_count vs &mm_struct.mm_users.
100 */
101 static inline void mmget(struct mm_struct *mm)
102 {
103 atomic_inc(&mm->mm_users);
104 }
105
106 static inline bool mmget_not_zero(struct mm_struct *mm)
107 {
108 return atomic_inc_not_zero(&mm->mm_users);
109 }
110
111 /* mmput gets rid of the mappings and all user-space */
112 extern void mmput(struct mm_struct *);
113 #ifdef CONFIG_MMU
114 /* same as above but performs the slow path from the async context. Can
115 * be called from the atomic context as well
116 */
117 void mmput_async(struct mm_struct *);
118 #endif
119
120 /* Grab a reference to a task's mm, if it is not already going away */
121 extern struct mm_struct *get_task_mm(struct task_struct *task);
122 /*
123 * Grab a reference to a task's mm, if it is not already going away
124 * and ptrace_may_access with the mode parameter passed to it
125 * succeeds.
126 */
127 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
128 /* Remove the current tasks stale references to the old mm_struct on exit() */
129 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
130 /* Remove the current tasks stale references to the old mm_struct on exec() */
131 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
132
133 #ifdef CONFIG_MEMCG
134 extern void mm_update_next_owner(struct mm_struct *mm);
135 #else
136 static inline void mm_update_next_owner(struct mm_struct *mm)
137 {
138 }
139 #endif /* CONFIG_MEMCG */
140
141 #ifdef CONFIG_MMU
142 extern void arch_pick_mmap_layout(struct mm_struct *mm);
143 extern unsigned long
144 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
145 unsigned long, unsigned long);
146 extern unsigned long
147 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
148 unsigned long len, unsigned long pgoff,
149 unsigned long flags);
150 #else
151 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
152 #endif
153
154 static inline bool in_vfork(struct task_struct *tsk)
155 {
156 bool ret;
157
158 /*
159 * need RCU to access ->real_parent if CLONE_VM was used along with
160 * CLONE_PARENT.
161 *
162 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
163 * imply CLONE_VM
164 *
165 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
166 * ->real_parent is not necessarily the task doing vfork(), so in
167 * theory we can't rely on task_lock() if we want to dereference it.
168 *
169 * And in this case we can't trust the real_parent->mm == tsk->mm
170 * check, it can be false negative. But we do not care, if init or
171 * another oom-unkillable task does this it should blame itself.
172 */
173 rcu_read_lock();
174 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
175 rcu_read_unlock();
176
177 return ret;
178 }
179
180 /*
181 * Applies per-task gfp context to the given allocation flags.
182 * PF_MEMALLOC_NOIO implies GFP_NOIO
183 * PF_MEMALLOC_NOFS implies GFP_NOFS
184 */
185 static inline gfp_t current_gfp_context(gfp_t flags)
186 {
187 /*
188 * NOIO implies both NOIO and NOFS and it is a weaker context
189 * so always make sure it makes precendence
190 */
191 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
192 flags &= ~(__GFP_IO | __GFP_FS);
193 else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
194 flags &= ~__GFP_FS;
195 return flags;
196 }
197
198 #ifdef CONFIG_LOCKDEP
199 extern void fs_reclaim_acquire(gfp_t gfp_mask);
200 extern void fs_reclaim_release(gfp_t gfp_mask);
201 #else
202 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
203 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
204 #endif
205
206 static inline unsigned int memalloc_noio_save(void)
207 {
208 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
209 current->flags |= PF_MEMALLOC_NOIO;
210 return flags;
211 }
212
213 static inline void memalloc_noio_restore(unsigned int flags)
214 {
215 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
216 }
217
218 static inline unsigned int memalloc_nofs_save(void)
219 {
220 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
221 current->flags |= PF_MEMALLOC_NOFS;
222 return flags;
223 }
224
225 static inline void memalloc_nofs_restore(unsigned int flags)
226 {
227 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
228 }
229
230 static inline unsigned int memalloc_noreclaim_save(void)
231 {
232 unsigned int flags = current->flags & PF_MEMALLOC;
233 current->flags |= PF_MEMALLOC;
234 return flags;
235 }
236
237 static inline void memalloc_noreclaim_restore(unsigned int flags)
238 {
239 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
240 }
241
242 #ifdef CONFIG_MEMBARRIER
243 enum {
244 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
245 MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
246 };
247
248 static inline void membarrier_execve(struct task_struct *t)
249 {
250 atomic_set(&t->mm->membarrier_state, 0);
251 }
252 #else
253 static inline void membarrier_execve(struct task_struct *t)
254 {
255 }
256 #endif
257
258 #endif /* _LINUX_SCHED_MM_H */