1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAPOPS_H
3 #define _LINUX_SWAPOPS_H
5 #include <linux/radix-tree.h>
7 #include <linux/mm_types.h>
12 * swapcache pages are stored in the swapper_space radix tree. We want to
13 * get good packing density in that tree, so the index should be dense in
16 * We arrange the `type' and `offset' fields so that `type' is at the seven
17 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
18 * remaining bits. Although `type' itself needs only five bits, we allow for
19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
23 #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
24 #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
26 /* Clear all flags but only keep swp_entry_t related information */
27 static inline pte_t
pte_swp_clear_flags(pte_t pte
)
29 if (pte_swp_soft_dirty(pte
))
30 pte
= pte_swp_clear_soft_dirty(pte
);
31 if (pte_swp_uffd_wp(pte
))
32 pte
= pte_swp_clear_uffd_wp(pte
);
37 * Store a type+offset into a swp_entry_t in an arch-independent format
39 static inline swp_entry_t
swp_entry(unsigned long type
, pgoff_t offset
)
43 ret
.val
= (type
<< SWP_TYPE_SHIFT
) | (offset
& SWP_OFFSET_MASK
);
48 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
49 * arch-independent format
51 static inline unsigned swp_type(swp_entry_t entry
)
53 return (entry
.val
>> SWP_TYPE_SHIFT
);
57 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
58 * arch-independent format
60 static inline pgoff_t
swp_offset(swp_entry_t entry
)
62 return entry
.val
& SWP_OFFSET_MASK
;
65 /* check whether a pte points to a swap entry */
66 static inline int is_swap_pte(pte_t pte
)
68 return !pte_none(pte
) && !pte_present(pte
);
72 * Convert the arch-dependent pte representation of a swp_entry_t into an
73 * arch-independent swp_entry_t.
75 static inline swp_entry_t
pte_to_swp_entry(pte_t pte
)
77 swp_entry_t arch_entry
;
79 pte
= pte_swp_clear_flags(pte
);
80 arch_entry
= __pte_to_swp_entry(pte
);
81 return swp_entry(__swp_type(arch_entry
), __swp_offset(arch_entry
));
85 * Convert the arch-independent representation of a swp_entry_t into the
86 * arch-dependent pte representation.
88 static inline pte_t
swp_entry_to_pte(swp_entry_t entry
)
90 swp_entry_t arch_entry
;
92 arch_entry
= __swp_entry(swp_type(entry
), swp_offset(entry
));
93 return __swp_entry_to_pte(arch_entry
);
96 static inline swp_entry_t
radix_to_swp_entry(void *arg
)
100 entry
.val
= xa_to_value(arg
);
104 static inline void *swp_to_radix_entry(swp_entry_t entry
)
106 return xa_mk_value(entry
.val
);
109 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
110 static inline swp_entry_t
make_readable_device_private_entry(pgoff_t offset
)
112 return swp_entry(SWP_DEVICE_READ
, offset
);
115 static inline swp_entry_t
make_writable_device_private_entry(pgoff_t offset
)
117 return swp_entry(SWP_DEVICE_WRITE
, offset
);
120 static inline bool is_device_private_entry(swp_entry_t entry
)
122 int type
= swp_type(entry
);
123 return type
== SWP_DEVICE_READ
|| type
== SWP_DEVICE_WRITE
;
126 static inline bool is_writable_device_private_entry(swp_entry_t entry
)
128 return unlikely(swp_type(entry
) == SWP_DEVICE_WRITE
);
131 static inline swp_entry_t
make_readable_device_exclusive_entry(pgoff_t offset
)
133 return swp_entry(SWP_DEVICE_EXCLUSIVE_READ
, offset
);
136 static inline swp_entry_t
make_writable_device_exclusive_entry(pgoff_t offset
)
138 return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE
, offset
);
141 static inline bool is_device_exclusive_entry(swp_entry_t entry
)
143 return swp_type(entry
) == SWP_DEVICE_EXCLUSIVE_READ
||
144 swp_type(entry
) == SWP_DEVICE_EXCLUSIVE_WRITE
;
147 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry
)
149 return unlikely(swp_type(entry
) == SWP_DEVICE_EXCLUSIVE_WRITE
);
151 #else /* CONFIG_DEVICE_PRIVATE */
152 static inline swp_entry_t
make_readable_device_private_entry(pgoff_t offset
)
154 return swp_entry(0, 0);
157 static inline swp_entry_t
make_writable_device_private_entry(pgoff_t offset
)
159 return swp_entry(0, 0);
162 static inline bool is_device_private_entry(swp_entry_t entry
)
167 static inline bool is_writable_device_private_entry(swp_entry_t entry
)
172 static inline swp_entry_t
make_readable_device_exclusive_entry(pgoff_t offset
)
174 return swp_entry(0, 0);
177 static inline swp_entry_t
make_writable_device_exclusive_entry(pgoff_t offset
)
179 return swp_entry(0, 0);
182 static inline bool is_device_exclusive_entry(swp_entry_t entry
)
187 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry
)
191 #endif /* CONFIG_DEVICE_PRIVATE */
193 #ifdef CONFIG_MIGRATION
194 static inline int is_migration_entry(swp_entry_t entry
)
196 return unlikely(swp_type(entry
) == SWP_MIGRATION_READ
||
197 swp_type(entry
) == SWP_MIGRATION_WRITE
);
200 static inline int is_writable_migration_entry(swp_entry_t entry
)
202 return unlikely(swp_type(entry
) == SWP_MIGRATION_WRITE
);
205 static inline swp_entry_t
make_readable_migration_entry(pgoff_t offset
)
207 return swp_entry(SWP_MIGRATION_READ
, offset
);
210 static inline swp_entry_t
make_writable_migration_entry(pgoff_t offset
)
212 return swp_entry(SWP_MIGRATION_WRITE
, offset
);
215 extern void __migration_entry_wait(struct mm_struct
*mm
, pte_t
*ptep
,
217 extern void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
218 unsigned long address
);
219 extern void migration_entry_wait_huge(struct vm_area_struct
*vma
,
220 struct mm_struct
*mm
, pte_t
*pte
);
222 static inline swp_entry_t
make_readable_migration_entry(pgoff_t offset
)
224 return swp_entry(0, 0);
227 static inline swp_entry_t
make_writable_migration_entry(pgoff_t offset
)
229 return swp_entry(0, 0);
232 static inline int is_migration_entry(swp_entry_t swp
)
237 static inline void __migration_entry_wait(struct mm_struct
*mm
, pte_t
*ptep
,
239 static inline void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
240 unsigned long address
) { }
241 static inline void migration_entry_wait_huge(struct vm_area_struct
*vma
,
242 struct mm_struct
*mm
, pte_t
*pte
) { }
243 static inline int is_writable_migration_entry(swp_entry_t entry
)
250 static inline struct page
*pfn_swap_entry_to_page(swp_entry_t entry
)
252 struct page
*p
= pfn_to_page(swp_offset(entry
));
255 * Any use of migration entries may only occur while the
256 * corresponding page is locked
258 BUG_ON(is_migration_entry(entry
) && !PageLocked(p
));
264 * A pfn swap entry is a special type of swap entry that always has a pfn stored
265 * in the swap offset. They are used to represent unaddressable device memory
266 * and to restrict access to a page undergoing migration.
268 static inline bool is_pfn_swap_entry(swp_entry_t entry
)
270 return is_migration_entry(entry
) || is_device_private_entry(entry
) ||
271 is_device_exclusive_entry(entry
);
274 struct page_vma_mapped_walk
;
276 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
277 extern void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
280 extern void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
,
283 extern void pmd_migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
);
285 static inline swp_entry_t
pmd_to_swp_entry(pmd_t pmd
)
287 swp_entry_t arch_entry
;
289 if (pmd_swp_soft_dirty(pmd
))
290 pmd
= pmd_swp_clear_soft_dirty(pmd
);
291 if (pmd_swp_uffd_wp(pmd
))
292 pmd
= pmd_swp_clear_uffd_wp(pmd
);
293 arch_entry
= __pmd_to_swp_entry(pmd
);
294 return swp_entry(__swp_type(arch_entry
), __swp_offset(arch_entry
));
297 static inline pmd_t
swp_entry_to_pmd(swp_entry_t entry
)
299 swp_entry_t arch_entry
;
301 arch_entry
= __swp_entry(swp_type(entry
), swp_offset(entry
));
302 return __swp_entry_to_pmd(arch_entry
);
305 static inline int is_pmd_migration_entry(pmd_t pmd
)
307 return !pmd_present(pmd
) && is_migration_entry(pmd_to_swp_entry(pmd
));
310 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
316 static inline void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
,
322 static inline void pmd_migration_entry_wait(struct mm_struct
*m
, pmd_t
*p
) { }
324 static inline swp_entry_t
pmd_to_swp_entry(pmd_t pmd
)
326 return swp_entry(0, 0);
329 static inline pmd_t
swp_entry_to_pmd(swp_entry_t entry
)
334 static inline int is_pmd_migration_entry(pmd_t pmd
)
340 #ifdef CONFIG_MEMORY_FAILURE
342 extern atomic_long_t num_poisoned_pages __read_mostly
;
345 * Support for hardware poisoned pages
347 static inline swp_entry_t
make_hwpoison_entry(struct page
*page
)
349 BUG_ON(!PageLocked(page
));
350 return swp_entry(SWP_HWPOISON
, page_to_pfn(page
));
353 static inline int is_hwpoison_entry(swp_entry_t entry
)
355 return swp_type(entry
) == SWP_HWPOISON
;
358 static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry
)
360 return swp_offset(entry
);
363 static inline void num_poisoned_pages_inc(void)
365 atomic_long_inc(&num_poisoned_pages
);
368 static inline void num_poisoned_pages_dec(void)
370 atomic_long_dec(&num_poisoned_pages
);
375 static inline swp_entry_t
make_hwpoison_entry(struct page
*page
)
377 return swp_entry(0, 0);
380 static inline int is_hwpoison_entry(swp_entry_t swp
)
385 static inline void num_poisoned_pages_inc(void)
390 #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
391 defined(CONFIG_DEVICE_PRIVATE)
392 static inline int non_swap_entry(swp_entry_t entry
)
394 return swp_type(entry
) >= MAX_SWAPFILES
;
397 static inline int non_swap_entry(swp_entry_t entry
)
403 #endif /* CONFIG_MMU */
404 #endif /* _LINUX_SWAPOPS_H */