1 #ifndef _LINUX_SWAPOPS_H
2 #define _LINUX_SWAPOPS_H
4 #include <linux/radix-tree.h>
8 * swapcache pages are stored in the swapper_space radix tree. We want to
9 * get good packing density in that tree, so the index should be dense in
12 * We arrange the `type' and `offset' fields so that `type' is at the seven
13 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
14 * remaining bits. Although `type' itself needs only five bits, we allow for
15 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
17 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
19 #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
20 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
21 #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
24 * Store a type+offset into a swp_entry_t in an arch-independent format
26 static inline swp_entry_t
swp_entry(unsigned long type
, pgoff_t offset
)
30 ret
.val
= (type
<< SWP_TYPE_SHIFT(ret
)) |
31 (offset
& SWP_OFFSET_MASK(ret
));
36 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
37 * arch-independent format
39 static inline unsigned swp_type(swp_entry_t entry
)
41 return (entry
.val
>> SWP_TYPE_SHIFT(entry
));
45 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
46 * arch-independent format
48 static inline pgoff_t
swp_offset(swp_entry_t entry
)
50 return entry
.val
& SWP_OFFSET_MASK(entry
);
54 /* check whether a pte points to a swap entry */
55 static inline int is_swap_pte(pte_t pte
)
57 return !pte_none(pte
) && !pte_present(pte
);
62 * Convert the arch-dependent pte representation of a swp_entry_t into an
63 * arch-independent swp_entry_t.
65 static inline swp_entry_t
pte_to_swp_entry(pte_t pte
)
67 swp_entry_t arch_entry
;
69 if (pte_swp_soft_dirty(pte
))
70 pte
= pte_swp_clear_soft_dirty(pte
);
71 arch_entry
= __pte_to_swp_entry(pte
);
72 return swp_entry(__swp_type(arch_entry
), __swp_offset(arch_entry
));
76 * Convert the arch-independent representation of a swp_entry_t into the
77 * arch-dependent pte representation.
79 static inline pte_t
swp_entry_to_pte(swp_entry_t entry
)
81 swp_entry_t arch_entry
;
83 arch_entry
= __swp_entry(swp_type(entry
), swp_offset(entry
));
84 return __swp_entry_to_pte(arch_entry
);
87 static inline swp_entry_t
radix_to_swp_entry(void *arg
)
91 entry
.val
= (unsigned long)arg
>> RADIX_TREE_EXCEPTIONAL_SHIFT
;
95 static inline void *swp_to_radix_entry(swp_entry_t entry
)
99 value
= entry
.val
<< RADIX_TREE_EXCEPTIONAL_SHIFT
;
100 return (void *)(value
| RADIX_TREE_EXCEPTIONAL_ENTRY
);
103 #ifdef CONFIG_MIGRATION
104 static inline swp_entry_t
make_migration_entry(struct page
*page
, int write
)
106 BUG_ON(!PageLocked(compound_head(page
)));
108 return swp_entry(write
? SWP_MIGRATION_WRITE
: SWP_MIGRATION_READ
,
112 static inline int is_migration_entry(swp_entry_t entry
)
114 return unlikely(swp_type(entry
) == SWP_MIGRATION_READ
||
115 swp_type(entry
) == SWP_MIGRATION_WRITE
);
118 static inline int is_write_migration_entry(swp_entry_t entry
)
120 return unlikely(swp_type(entry
) == SWP_MIGRATION_WRITE
);
123 static inline struct page
*migration_entry_to_page(swp_entry_t entry
)
125 struct page
*p
= pfn_to_page(swp_offset(entry
));
127 * Any use of migration entries may only occur while the
128 * corresponding page is locked
130 BUG_ON(!PageLocked(compound_head(p
)));
134 static inline void make_migration_entry_read(swp_entry_t
*entry
)
136 *entry
= swp_entry(SWP_MIGRATION_READ
, swp_offset(*entry
));
139 extern void __migration_entry_wait(struct mm_struct
*mm
, pte_t
*ptep
,
141 extern void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
142 unsigned long address
);
143 extern void migration_entry_wait_huge(struct vm_area_struct
*vma
,
144 struct mm_struct
*mm
, pte_t
*pte
);
147 #define make_migration_entry(page, write) swp_entry(0, 0)
148 static inline int is_migration_entry(swp_entry_t swp
)
152 static inline struct page
*migration_entry_to_page(swp_entry_t entry
)
157 static inline void make_migration_entry_read(swp_entry_t
*entryp
) { }
158 static inline void __migration_entry_wait(struct mm_struct
*mm
, pte_t
*ptep
,
160 static inline void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
161 unsigned long address
) { }
162 static inline void migration_entry_wait_huge(struct vm_area_struct
*vma
,
163 struct mm_struct
*mm
, pte_t
*pte
) { }
164 static inline int is_write_migration_entry(swp_entry_t entry
)
171 struct page_vma_mapped_walk
;
173 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
174 extern void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
177 extern void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
,
180 extern void pmd_migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
);
182 static inline swp_entry_t
pmd_to_swp_entry(pmd_t pmd
)
184 swp_entry_t arch_entry
;
186 if (pmd_swp_soft_dirty(pmd
))
187 pmd
= pmd_swp_clear_soft_dirty(pmd
);
188 arch_entry
= __pmd_to_swp_entry(pmd
);
189 return swp_entry(__swp_type(arch_entry
), __swp_offset(arch_entry
));
192 static inline pmd_t
swp_entry_to_pmd(swp_entry_t entry
)
194 swp_entry_t arch_entry
;
196 arch_entry
= __swp_entry(swp_type(entry
), swp_offset(entry
));
197 return __swp_entry_to_pmd(arch_entry
);
200 static inline int is_pmd_migration_entry(pmd_t pmd
)
202 return !pmd_present(pmd
) && is_migration_entry(pmd_to_swp_entry(pmd
));
205 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
211 static inline void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
,
217 static inline void pmd_migration_entry_wait(struct mm_struct
*m
, pmd_t
*p
) { }
219 static inline swp_entry_t
pmd_to_swp_entry(pmd_t pmd
)
221 return swp_entry(0, 0);
224 static inline pmd_t
swp_entry_to_pmd(swp_entry_t entry
)
229 static inline int is_pmd_migration_entry(pmd_t pmd
)
235 #ifdef CONFIG_MEMORY_FAILURE
237 extern atomic_long_t num_poisoned_pages __read_mostly
;
240 * Support for hardware poisoned pages
242 static inline swp_entry_t
make_hwpoison_entry(struct page
*page
)
244 BUG_ON(!PageLocked(page
));
245 return swp_entry(SWP_HWPOISON
, page_to_pfn(page
));
248 static inline int is_hwpoison_entry(swp_entry_t entry
)
250 return swp_type(entry
) == SWP_HWPOISON
;
253 static inline bool test_set_page_hwpoison(struct page
*page
)
255 return TestSetPageHWPoison(page
);
258 static inline void num_poisoned_pages_inc(void)
260 atomic_long_inc(&num_poisoned_pages
);
263 static inline void num_poisoned_pages_dec(void)
265 atomic_long_dec(&num_poisoned_pages
);
270 static inline swp_entry_t
make_hwpoison_entry(struct page
*page
)
272 return swp_entry(0, 0);
275 static inline int is_hwpoison_entry(swp_entry_t swp
)
280 static inline bool test_set_page_hwpoison(struct page
*page
)
285 static inline void num_poisoned_pages_inc(void)
290 #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
291 static inline int non_swap_entry(swp_entry_t entry
)
293 return swp_type(entry
) >= MAX_SWAPFILES
;
296 static inline int non_swap_entry(swp_entry_t entry
)
302 #endif /* _LINUX_SWAPOPS_H */