]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/swapops.h
mm: soft-dirty: keep soft-dirty bits over thp migration
[mirror_ubuntu-bionic-kernel.git] / include / linux / swapops.h
1 #ifndef _LINUX_SWAPOPS_H
2 #define _LINUX_SWAPOPS_H
3
4 #include <linux/radix-tree.h>
5 #include <linux/bug.h>
6
7 /*
8 * swapcache pages are stored in the swapper_space radix tree. We want to
9 * get good packing density in that tree, so the index should be dense in
10 * the low-order bits.
11 *
12 * We arrange the `type' and `offset' fields so that `type' is at the seven
13 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
14 * remaining bits. Although `type' itself needs only five bits, we allow for
15 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
16 *
17 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
18 */
19 #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
20 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
21 #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
22
23 /*
24 * Store a type+offset into a swp_entry_t in an arch-independent format
25 */
26 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
27 {
28 swp_entry_t ret;
29
30 ret.val = (type << SWP_TYPE_SHIFT(ret)) |
31 (offset & SWP_OFFSET_MASK(ret));
32 return ret;
33 }
34
35 /*
36 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
37 * arch-independent format
38 */
39 static inline unsigned swp_type(swp_entry_t entry)
40 {
41 return (entry.val >> SWP_TYPE_SHIFT(entry));
42 }
43
44 /*
45 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
46 * arch-independent format
47 */
48 static inline pgoff_t swp_offset(swp_entry_t entry)
49 {
50 return entry.val & SWP_OFFSET_MASK(entry);
51 }
52
53 #ifdef CONFIG_MMU
54 /* check whether a pte points to a swap entry */
55 static inline int is_swap_pte(pte_t pte)
56 {
57 return !pte_none(pte) && !pte_present(pte);
58 }
59 #endif
60
61 /*
62 * Convert the arch-dependent pte representation of a swp_entry_t into an
63 * arch-independent swp_entry_t.
64 */
65 static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66 {
67 swp_entry_t arch_entry;
68
69 if (pte_swp_soft_dirty(pte))
70 pte = pte_swp_clear_soft_dirty(pte);
71 arch_entry = __pte_to_swp_entry(pte);
72 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
73 }
74
75 /*
76 * Convert the arch-independent representation of a swp_entry_t into the
77 * arch-dependent pte representation.
78 */
79 static inline pte_t swp_entry_to_pte(swp_entry_t entry)
80 {
81 swp_entry_t arch_entry;
82
83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
84 return __swp_entry_to_pte(arch_entry);
85 }
86
87 static inline swp_entry_t radix_to_swp_entry(void *arg)
88 {
89 swp_entry_t entry;
90
91 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
92 return entry;
93 }
94
95 static inline void *swp_to_radix_entry(swp_entry_t entry)
96 {
97 unsigned long value;
98
99 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
100 return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
101 }
102
103 #ifdef CONFIG_MIGRATION
104 static inline swp_entry_t make_migration_entry(struct page *page, int write)
105 {
106 BUG_ON(!PageLocked(compound_head(page)));
107
108 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
109 page_to_pfn(page));
110 }
111
112 static inline int is_migration_entry(swp_entry_t entry)
113 {
114 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
115 swp_type(entry) == SWP_MIGRATION_WRITE);
116 }
117
118 static inline int is_write_migration_entry(swp_entry_t entry)
119 {
120 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
121 }
122
123 static inline struct page *migration_entry_to_page(swp_entry_t entry)
124 {
125 struct page *p = pfn_to_page(swp_offset(entry));
126 /*
127 * Any use of migration entries may only occur while the
128 * corresponding page is locked
129 */
130 BUG_ON(!PageLocked(compound_head(p)));
131 return p;
132 }
133
134 static inline void make_migration_entry_read(swp_entry_t *entry)
135 {
136 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
137 }
138
139 extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
140 spinlock_t *ptl);
141 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
142 unsigned long address);
143 extern void migration_entry_wait_huge(struct vm_area_struct *vma,
144 struct mm_struct *mm, pte_t *pte);
145 #else
146
147 #define make_migration_entry(page, write) swp_entry(0, 0)
148 static inline int is_migration_entry(swp_entry_t swp)
149 {
150 return 0;
151 }
152 static inline struct page *migration_entry_to_page(swp_entry_t entry)
153 {
154 return NULL;
155 }
156
157 static inline void make_migration_entry_read(swp_entry_t *entryp) { }
158 static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
159 spinlock_t *ptl) { }
160 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
161 unsigned long address) { }
162 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
163 struct mm_struct *mm, pte_t *pte) { }
164 static inline int is_write_migration_entry(swp_entry_t entry)
165 {
166 return 0;
167 }
168
169 #endif
170
171 struct page_vma_mapped_walk;
172
173 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
174 extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
175 struct page *page);
176
177 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
178 struct page *new);
179
180 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
181
182 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
183 {
184 swp_entry_t arch_entry;
185
186 if (pmd_swp_soft_dirty(pmd))
187 pmd = pmd_swp_clear_soft_dirty(pmd);
188 arch_entry = __pmd_to_swp_entry(pmd);
189 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
190 }
191
192 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
193 {
194 swp_entry_t arch_entry;
195
196 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
197 return __swp_entry_to_pmd(arch_entry);
198 }
199
200 static inline int is_pmd_migration_entry(pmd_t pmd)
201 {
202 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
203 }
204 #else
205 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
206 struct page *page)
207 {
208 BUILD_BUG();
209 }
210
211 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
212 struct page *new)
213 {
214 BUILD_BUG();
215 }
216
217 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
218
219 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
220 {
221 return swp_entry(0, 0);
222 }
223
224 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
225 {
226 return __pmd(0);
227 }
228
229 static inline int is_pmd_migration_entry(pmd_t pmd)
230 {
231 return 0;
232 }
233 #endif
234
235 #ifdef CONFIG_MEMORY_FAILURE
236
237 extern atomic_long_t num_poisoned_pages __read_mostly;
238
239 /*
240 * Support for hardware poisoned pages
241 */
242 static inline swp_entry_t make_hwpoison_entry(struct page *page)
243 {
244 BUG_ON(!PageLocked(page));
245 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
246 }
247
248 static inline int is_hwpoison_entry(swp_entry_t entry)
249 {
250 return swp_type(entry) == SWP_HWPOISON;
251 }
252
253 static inline bool test_set_page_hwpoison(struct page *page)
254 {
255 return TestSetPageHWPoison(page);
256 }
257
258 static inline void num_poisoned_pages_inc(void)
259 {
260 atomic_long_inc(&num_poisoned_pages);
261 }
262
263 static inline void num_poisoned_pages_dec(void)
264 {
265 atomic_long_dec(&num_poisoned_pages);
266 }
267
268 #else
269
270 static inline swp_entry_t make_hwpoison_entry(struct page *page)
271 {
272 return swp_entry(0, 0);
273 }
274
275 static inline int is_hwpoison_entry(swp_entry_t swp)
276 {
277 return 0;
278 }
279
280 static inline bool test_set_page_hwpoison(struct page *page)
281 {
282 return false;
283 }
284
285 static inline void num_poisoned_pages_inc(void)
286 {
287 }
288 #endif
289
290 #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
291 static inline int non_swap_entry(swp_entry_t entry)
292 {
293 return swp_type(entry) >= MAX_SWAPFILES;
294 }
295 #else
296 static inline int non_swap_entry(swp_entry_t entry)
297 {
298 return 0;
299 }
300 #endif
301
302 #endif /* _LINUX_SWAPOPS_H */