]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/linux/sched/mm.h
1 #ifndef _LINUX_SCHED_MM_H
2 #define _LINUX_SCHED_MM_H
4 #include <linux/sched.h>
5 #include <linux/mm_types.h>
9 * Routines for handling mm_structs
11 extern struct mm_struct
* mm_alloc(void);
14 * mmgrab() - Pin a &struct mm_struct.
15 * @mm: The &struct mm_struct to pin.
17 * Make sure that @mm will not get freed even after the owning task
18 * exits. This doesn't guarantee that the associated address space
19 * will still exist later on and mmget_not_zero() has to be used before
22 * This is a preferred way to to pin @mm for a longer/unbounded amount
25 * Use mmdrop() to release the reference acquired by mmgrab().
27 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
28 * of &mm_struct.mm_count vs &mm_struct.mm_users.
30 static inline void mmgrab(struct mm_struct
*mm
)
32 atomic_inc(&mm
->mm_count
);
35 /* mmdrop drops the mm and the page tables */
36 extern void __mmdrop(struct mm_struct
*);
37 static inline void mmdrop(struct mm_struct
*mm
)
39 if (unlikely(atomic_dec_and_test(&mm
->mm_count
)))
43 static inline void mmdrop_async_fn(struct work_struct
*work
)
45 struct mm_struct
*mm
= container_of(work
, struct mm_struct
, async_put_work
);
49 static inline void mmdrop_async(struct mm_struct
*mm
)
51 if (unlikely(atomic_dec_and_test(&mm
->mm_count
))) {
52 INIT_WORK(&mm
->async_put_work
, mmdrop_async_fn
);
53 schedule_work(&mm
->async_put_work
);
58 * mmget() - Pin the address space associated with a &struct mm_struct.
59 * @mm: The address space to pin.
61 * Make sure that the address space of the given &struct mm_struct doesn't
62 * go away. This does not protect against parts of the address space being
63 * modified or freed, however.
65 * Never use this function to pin this address space for an
66 * unbounded/indefinite amount of time.
68 * Use mmput() to release the reference acquired by mmget().
70 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
71 * of &mm_struct.mm_count vs &mm_struct.mm_users.
73 static inline void mmget(struct mm_struct
*mm
)
75 atomic_inc(&mm
->mm_users
);
78 static inline bool mmget_not_zero(struct mm_struct
*mm
)
80 return atomic_inc_not_zero(&mm
->mm_users
);
83 /* mmput gets rid of the mappings and all user-space */
84 extern void mmput(struct mm_struct
*);
86 /* same as above but performs the slow path from the async context. Can
87 * be called from the atomic context as well
89 extern void mmput_async(struct mm_struct
*);
92 /* Grab a reference to a task's mm, if it is not already going away */
93 extern struct mm_struct
*get_task_mm(struct task_struct
*task
);
95 * Grab a reference to a task's mm, if it is not already going away
96 * and ptrace_may_access with the mode parameter passed to it
99 extern struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
);
100 /* Remove the current tasks stale references to the old mm_struct */
101 extern void mm_release(struct task_struct
*, struct mm_struct
*);
104 extern void mm_update_next_owner(struct mm_struct
*mm
);
106 static inline void mm_update_next_owner(struct mm_struct
*mm
)
109 #endif /* CONFIG_MEMCG */
112 extern void arch_pick_mmap_layout(struct mm_struct
*mm
);
114 arch_get_unmapped_area(struct file
*, unsigned long, unsigned long,
115 unsigned long, unsigned long);
117 arch_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr
,
118 unsigned long len
, unsigned long pgoff
,
119 unsigned long flags
);
121 static inline void arch_pick_mmap_layout(struct mm_struct
*mm
) {}
124 #endif /* _LINUX_SCHED_MM_H */