]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/swap.h
lib/div64.c: off by one in shift
[mirror_ubuntu-bionic-kernel.git] / include / linux / swap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
1da177e4
LT
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
66e1707b 9#include <linux/memcontrol.h>
1da177e4 10#include <linux/sched.h>
af936a16 11#include <linux/node.h>
33806f06 12#include <linux/fs.h>
60063497 13#include <linux/atomic.h>
c53954a0 14#include <linux/page-flags.h>
1da177e4
LT
15#include <asm/page.h>
16
8bc719d3
MS
17struct notifier_block;
18
ab954160
AM
19struct bio;
20
1da177e4
LT
21#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
22#define SWAP_FLAG_PRIO_MASK 0x7fff
23#define SWAP_FLAG_PRIO_SHIFT 0
dcf6b7dd
RA
24#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
25#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
26#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
1da177e4 27
d15cab97 28#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
dcf6b7dd
RA
29 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
30 SWAP_FLAG_DISCARD_PAGES)
36005bae 31#define SWAP_BATCH 64
d15cab97 32
1da177e4
LT
33static inline int current_is_kswapd(void)
34{
35 return current->flags & PF_KSWAPD;
36}
37
38/*
39 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
40 * be swapped to. The swap type and the offset into that swap type are
41 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
42 * for the type means that the maximum number of swapcache pages is 27 bits
43 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
44 * the type/offset into the pte as 5/27 as well.
45 */
46#define MAX_SWAPFILES_SHIFT 5
a7420aa5
AK
47
48/*
49 * Use some of the swap files numbers for other purposes. This
50 * is a convenient way to hook into the VM to trigger special
51 * actions on faults.
52 */
53
5042db43
JG
54/*
55 * Unaddressable device memory support. See include/linux/hmm.h and
56 * Documentation/vm/hmm.txt. Short description is we need struct pages for
57 * device memory that is unaddressable (inaccessible) by CPU, so that we can
58 * migrate part of a process memory to device memory.
59 *
60 * When a page is migrated from CPU to device, we set the CPU page table entry
61 * to a special SWP_DEVICE_* entry.
62 */
63#ifdef CONFIG_DEVICE_PRIVATE
64#define SWP_DEVICE_NUM 2
65#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
66#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
67#else
68#define SWP_DEVICE_NUM 0
69#endif
70
a7420aa5
AK
71/*
72 * NUMA node memory migration support
73 */
74#ifdef CONFIG_MIGRATION
75#define SWP_MIGRATION_NUM 2
76#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
77#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
0697212a 78#else
a7420aa5 79#define SWP_MIGRATION_NUM 0
0697212a 80#endif
1da177e4 81
a7420aa5
AK
82/*
83 * Handling of hardware poisoned pages with memory corruption.
84 */
85#ifdef CONFIG_MEMORY_FAILURE
86#define SWP_HWPOISON_NUM 1
87#define SWP_HWPOISON MAX_SWAPFILES
88#else
89#define SWP_HWPOISON_NUM 0
90#endif
91
92#define MAX_SWAPFILES \
5042db43
JG
93 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
94 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
a7420aa5 95
1da177e4
LT
96/*
97 * Magic header for a swap area. The first part of the union is
98 * what the swap magic looks like for the old (limited to 128MB)
99 * swap area format, the second part of the union adds - in the
100 * old reserved area - some extra information. Note that the first
101 * kilobyte is reserved for boot loader or disk label stuff...
102 *
103 * Having the magic at the end of the PAGE_SIZE makes detecting swap
104 * areas somewhat tricky on machines that support multiple page sizes.
105 * For 2.5 we'll probably want to move the magic to just beyond the
106 * bootbits...
107 */
108union swap_header {
109 struct {
110 char reserved[PAGE_SIZE - 10];
111 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
112 } magic;
113 struct {
e8f03d02
AD
114 char bootbits[1024]; /* Space for disklabel etc. */
115 __u32 version;
116 __u32 last_page;
117 __u32 nr_badpages;
118 unsigned char sws_uuid[16];
119 unsigned char sws_volume[16];
120 __u32 padding[117];
121 __u32 badpages[1];
1da177e4
LT
122 } info;
123};
124
1da177e4
LT
125/*
126 * current->reclaim_state points to one of these when a task is running
127 * memory reclaim
128 */
129struct reclaim_state {
130 unsigned long reclaimed_slab;
131};
132
133#ifdef __KERNEL__
134
135struct address_space;
136struct sysinfo;
137struct writeback_control;
138struct zone;
139
140/*
141 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
142 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
143 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
144 * from setup, they're handled identically.
145 *
146 * We always assume that blocks are of size PAGE_SIZE.
147 */
148struct swap_extent {
149 struct list_head list;
150 pgoff_t start_page;
151 pgoff_t nr_pages;
152 sector_t start_block;
153};
154
155/*
156 * Max bad pages in the new format..
157 */
158#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
159#define MAX_SWAP_BADPAGES \
160 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
161
162enum {
163 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
164 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
dcf6b7dd 165 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
7992fde7 166 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
20137a49 167 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
570a335b 168 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
b2725643 169 SWP_BLKDEV = (1 << 6), /* its a block device */
62c230bc 170 SWP_FILE = (1 << 7), /* set after swap_activate success */
dcf6b7dd
RA
171 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
172 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
f0571429 173 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
539a6fea 174 SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
52b7efdb 175 /* add others here before... */
539a6fea 176 SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
1da177e4
LT
177};
178
d778df51 179#define SWAP_CLUSTER_MAX 32UL
748446bb 180#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
1da177e4 181
570a335b
HD
182#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
183#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
184#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
185#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
186#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
aaa46865 187#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
253d553b 188
2a8f9449
SL
189/*
190 * We use this to track usage of a cluster. A cluster is a block of swap disk
191 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
192 * free clusters are organized into a list. We fetch an entry from the list to
193 * get a free cluster.
194 *
195 * The data field stores next cluster if the cluster is free or cluster usage
196 * counter otherwise. The flags field determines if a cluster is free. This is
197 * protected by swap_info_struct.lock.
198 */
199struct swap_cluster_info {
235b6217
HY
200 spinlock_t lock; /*
201 * Protect swap_cluster_info fields
202 * and swap_info_struct->swap_map
203 * elements correspond to the swap
204 * cluster
205 */
2a8f9449
SL
206 unsigned int data:24;
207 unsigned int flags:8;
208};
209#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
210#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
e0709829 211#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
2a8f9449 212
ebc2a1a6
SL
213/*
214 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
215 * its own cluster and swapout sequentially. The purpose is to optimize swapout
216 * throughput.
217 */
218struct percpu_cluster {
219 struct swap_cluster_info index; /* Current cluster index */
220 unsigned int next; /* Likely next allocation offset */
221};
222
6b534915
HY
223struct swap_cluster_list {
224 struct swap_cluster_info head;
225 struct swap_cluster_info tail;
226};
227
1da177e4
LT
228/*
229 * The in-memory structure used to track swap areas.
1da177e4
LT
230 */
231struct swap_info_struct {
efa90a98
HD
232 unsigned long flags; /* SWP_USED etc: see above */
233 signed short prio; /* swap priority of this type */
18ab4d4c 234 struct plist_node list; /* entry in swap_active_head */
efa90a98 235 signed char type; /* strange name for an index */
7509765a
HD
236 unsigned int max; /* extent of the swap_map */
237 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
2a8f9449 238 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
6b534915 239 struct swap_cluster_list free_clusters; /* free clusters list */
7509765a
HD
240 unsigned int lowest_bit; /* index of first free in swap_map */
241 unsigned int highest_bit; /* index of last free in swap_map */
242 unsigned int pages; /* total of usable pages of swap */
243 unsigned int inuse_pages; /* number of those currently in use */
244 unsigned int cluster_next; /* likely index for next allocation */
245 unsigned int cluster_nr; /* countdown to next cluster search */
ebc2a1a6 246 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
7509765a
HD
247 struct swap_extent *curr_swap_extent;
248 struct swap_extent first_swap_extent;
249 struct block_device *bdev; /* swap device or bdev of swap file */
250 struct file *swap_file; /* seldom referenced */
251 unsigned int old_block_size; /* seldom referenced */
38b5faf4
DM
252#ifdef CONFIG_FRONTSWAP
253 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
254 atomic_t frontswap_pages; /* frontswap pages in-use counter */
255#endif
ec8acf20
SL
256 spinlock_t lock; /*
257 * protect map scan related fields like
258 * swap_map, lowest_bit, highest_bit,
259 * inuse_pages, cluster_next,
815c2c54
SL
260 * cluster_nr, lowest_alloc,
261 * highest_alloc, free/discard cluster
262 * list. other fields are only changed
263 * at swapon/swapoff, so are protected
264 * by swap_lock. changing flags need
265 * hold this lock and swap_lock. If
266 * both locks need hold, hold swap_lock
267 * first.
ec8acf20 268 */
2628bd6f
HY
269 spinlock_t cont_lock; /*
270 * protect swap count continuation page
271 * list.
272 */
815c2c54 273 struct work_struct discard_work; /* discard worker */
6b534915 274 struct swap_cluster_list discard_clusters; /* discard clusters list */
92ee8bd3
AL
275 struct plist_node avail_lists[0]; /*
276 * entries in swap_avail_heads, one
277 * entry per node.
278 * Must be last as the number of the
279 * array is nr_node_ids, which is not
280 * a fixed value so have to allocate
281 * dynamically.
282 * And it has to be an array so that
283 * plist_for_each_* can work.
284 */
1da177e4
LT
285};
286
ec560175
HY
287#ifdef CONFIG_64BIT
288#define SWAP_RA_ORDER_CEILING 5
289#else
290/* Avoid stack overflow, because we need to save part of page table */
291#define SWAP_RA_ORDER_CEILING 3
292#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
293#endif
294
295struct vma_swap_readahead {
296 unsigned short win;
297 unsigned short offset;
298 unsigned short nr_pte;
299#ifdef CONFIG_64BIT
300 pte_t *ptes;
301#else
302 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
303#endif
304};
305
a528910e
JW
306/* linux/mm/workingset.c */
307void *workingset_eviction(struct address_space *mapping, struct page *page);
308bool workingset_refault(void *shadow);
309void workingset_activation(struct page *page);
c7df8ad2
MG
310
311/* Do not use directly, use workingset_lookup_update */
312void workingset_update_node(struct radix_tree_node *node);
313
314/* Returns workingset_update_node() if the mapping has shadow entries. */
315#define workingset_lookup_update(mapping) \
316({ \
317 radix_tree_update_node_t __helper = workingset_update_node; \
318 if (dax_mapping(mapping) || shmem_mapping(mapping)) \
319 __helper = NULL; \
320 __helper; \
321})
a528910e 322
1da177e4
LT
323/* linux/mm/page_alloc.c */
324extern unsigned long totalram_pages;
cb45b0e9 325extern unsigned long totalreserve_pages;
ebec3862
ZY
326extern unsigned long nr_free_buffer_pages(void);
327extern unsigned long nr_free_pagecache_pages(void);
1da177e4 328
c41f012a
MH
329/* Definition of global_zone_page_state not available yet */
330#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
96177299
CL
331
332
1da177e4 333/* linux/mm/swap.c */
c53954a0 334extern void lru_cache_add(struct page *);
2329d375
JZ
335extern void lru_cache_add_anon(struct page *page);
336extern void lru_cache_add_file(struct page *page);
fa9add64 337extern void lru_add_page_tail(struct page *page, struct page *page_tail,
5bc7b8ac 338 struct lruvec *lruvec, struct list_head *head);
b3c97528
HH
339extern void activate_page(struct page *);
340extern void mark_page_accessed(struct page *);
1da177e4 341extern void lru_add_drain(void);
f0cb3c76 342extern void lru_add_drain_cpu(int cpu);
5fbc4616 343extern void lru_add_drain_all(void);
a47fed5b 344extern void lru_add_drain_all_cpuslocked(void);
ac6aadb2 345extern void rotate_reclaimable_page(struct page *page);
cc5993bd 346extern void deactivate_file_page(struct page *page);
f7ad2a6c 347extern void mark_page_lazyfree(struct page *page);
1da177e4
LT
348extern void swap_setup(void);
349
894bc310
LS
350extern void add_page_to_unevictable_list(struct page *page);
351
00501b53
JW
352extern void lru_cache_add_active_or_unevictable(struct page *page,
353 struct vm_area_struct *vma);
354
1da177e4 355/* linux/mm/vmscan.c */
5a1c84b4 356extern unsigned long zone_reclaimable_pages(struct zone *zone);
599d0c95 357extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
dac1d27b 358extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 359 gfp_t gfp_mask, nodemask_t *mask);
f3fd4a61 360extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
b70a2a21
JW
361extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
362 unsigned long nr_pages,
363 gfp_t gfp_mask,
364 bool may_swap);
a9dd0a83 365extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
185efc0f 366 gfp_t gfp_mask, bool noswap,
ef8f2327 367 pg_data_t *pgdat,
185efc0f 368 unsigned long *nr_scanned);
69e05944 369extern unsigned long shrink_all_memory(unsigned long nr_pages);
1da177e4 370extern int vm_swappiness;
b20a3503 371extern int remove_mapping(struct address_space *mapping, struct page *page);
b21e0b90 372extern unsigned long vm_total_pages;
b20a3503 373
9eeff239 374#ifdef CONFIG_NUMA
a5f5f91d 375extern int node_reclaim_mode;
9614634f 376extern int sysctl_min_unmapped_ratio;
0ff38490 377extern int sysctl_min_slab_ratio;
a5f5f91d 378extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
9eeff239 379#else
a5f5f91d
MG
380#define node_reclaim_mode 0
381static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
382 unsigned int order)
9eeff239
CL
383{
384 return 0;
385}
386#endif
387
39b5f29a 388extern int page_evictable(struct page *page);
24513264 389extern void check_move_unevictable_pages(struct page **, int nr_pages);
af936a16 390
3218ae14 391extern int kswapd_run(int nid);
8fe23e05 392extern void kswapd_stop(int nid);
33398cf2 393
1da177e4 394#ifdef CONFIG_SWAP
be297968
CH
395
396#include <linux/blk_types.h> /* for bio_end_io_t */
397
1da177e4 398/* linux/mm/page_io.c */
23955622 399extern int swap_readpage(struct page *page, bool do_poll);
1da177e4 400extern int swap_writepage(struct page *page, struct writeback_control *wbc);
4246a0b6 401extern void end_swap_bio_write(struct bio *bio);
1eec6702 402extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
4246a0b6 403 bio_end_io_t end_write_func);
62c230bc 404extern int swap_set_page_dirty(struct page *page);
1da177e4 405
a509bc1a
MG
406int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
407 unsigned long nr_pages, sector_t start_block);
408int generic_swapfile_activate(struct swap_info_struct *, struct file *,
409 sector_t *);
410
1da177e4 411/* linux/mm/swap_state.c */
4b3ef9da
HY
412/* One swap address space for each 64M swap space */
413#define SWAP_ADDRESS_SPACE_SHIFT 14
414#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
415extern struct address_space *swapper_spaces[];
ec560175 416extern bool swap_vma_readahead;
4b3ef9da
HY
417#define swap_address_space(entry) \
418 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
419 >> SWAP_ADDRESS_SPACE_SHIFT])
33806f06 420extern unsigned long total_swapcache_pages(void);
1da177e4 421extern void show_swap_cache_info(void);
0f074658 422extern int add_to_swap(struct page *page);
73b1262f 423extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
2f772e6c 424extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
1da177e4
LT
425extern void __delete_from_swap_cache(struct page *);
426extern void delete_from_swap_cache(struct page *);
1da177e4
LT
427extern void free_page_and_swap_cache(struct page *);
428extern void free_pages_and_swap_cache(struct page **, int);
ec560175
HY
429extern struct page *lookup_swap_cache(swp_entry_t entry,
430 struct vm_area_struct *vma,
431 unsigned long addr);
02098fea 432extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
23955622
SL
433 struct vm_area_struct *vma, unsigned long addr,
434 bool do_poll);
5b999aad
DS
435extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
436 struct vm_area_struct *vma, unsigned long addr,
437 bool *new_page_allocated);
02098fea 438extern struct page *swapin_readahead(swp_entry_t, gfp_t,
46017e95
HD
439 struct vm_area_struct *vma, unsigned long addr);
440
ec560175
HY
441extern struct page *swap_readahead_detect(struct vm_fault *vmf,
442 struct vma_swap_readahead *swap_ra);
443extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
444 struct vm_fault *vmf,
445 struct vma_swap_readahead *swap_ra);
446
1da177e4 447/* linux/mm/swapfile.c */
ec8acf20 448extern atomic_long_t nr_swap_pages;
1da177e4 449extern long total_swap_pages;
81a0298b 450extern atomic_t nr_rotate_swap;
67afa38e 451extern bool has_usable_swap(void);
ec8acf20 452
81a0298b
HY
453static inline bool swap_use_vma_readahead(void)
454{
455 return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
456}
457
ec8acf20
SL
458/* Swap 50% full? Release swapcache more aggressively.. */
459static inline bool vm_swap_full(void)
460{
461 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
462}
463
464static inline long get_nr_swap_pages(void)
465{
466 return atomic_long_read(&nr_swap_pages);
467}
468
1da177e4 469extern void si_swapinfo(struct sysinfo *);
38d8b4e6 470extern swp_entry_t get_swap_page(struct page *page);
75f6d6d2 471extern void put_swap_page(struct page *page, swp_entry_t entry);
910321ea 472extern swp_entry_t get_swap_page_of_type(int);
38d8b4e6 473extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
570a335b 474extern int add_swap_count_continuation(swp_entry_t, gfp_t);
aaa46865 475extern void swap_shmem_alloc(swp_entry_t);
570a335b
HD
476extern int swap_duplicate(swp_entry_t);
477extern int swapcache_prepare(swp_entry_t);
1da177e4 478extern void swap_free(swp_entry_t);
7c00bafe 479extern void swapcache_free_entries(swp_entry_t *entries, int n);
2509ef26 480extern int free_swap_and_cache(swp_entry_t);
7bf23687 481extern int swap_type_of(dev_t, sector_t, struct block_device **);
f577eb30 482extern unsigned int count_swap_pages(int, int);
d4906e1a 483extern sector_t map_swap_page(struct page *, struct block_device **);
3aef83e0 484extern sector_t swapdev_block(int, pgoff_t);
bde05d1c 485extern int page_swapcount(struct page *);
aa8d22a1 486extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
e8c26ab6 487extern int __swp_swapcount(swp_entry_t entry);
8334b962 488extern int swp_swapcount(swp_entry_t entry);
f981c595 489extern struct swap_info_struct *page_swap_info(struct page *);
0bcac06f 490extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
6d0a07ed 491extern bool reuse_swap_page(struct page *, int *);
a2c43eed 492extern int try_to_free_swap(struct page *);
1da177e4 493struct backing_dev_info;
4b3ef9da
HY
494extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
495extern void exit_swap_address_space(unsigned int type);
1da177e4 496
1da177e4
LT
497#else /* CONFIG_SWAP */
498
0bcac06f
MK
499static inline int swap_readpage(struct page *page, bool do_poll)
500{
501 return 0;
502}
503
504static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
505{
506 return NULL;
507}
508
d2cf5ad6 509#define swap_address_space(entry) (NULL)
ec8acf20 510#define get_nr_swap_pages() 0L
b962716b 511#define total_swap_pages 0L
33806f06 512#define total_swapcache_pages() 0UL
ec8acf20 513#define vm_swap_full() 0
1da177e4
LT
514
515#define si_swapinfo(val) \
516 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
9ae5b3c7 517/* only sparc can not include linux/pagemap.h in this file
ea1754a0 518 * so leave put_page and release_pages undeclared... */
1da177e4 519#define free_page_and_swap_cache(page) \
09cbfeaf 520 put_page(page)
1da177e4 521#define free_pages_and_swap_cache(pages, nr) \
c6f92f9f 522 release_pages((pages), (nr));
1da177e4 523
bd96b9eb
CK
524static inline void show_swap_cache_info(void)
525{
526}
527
5042db43
JG
528#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
529#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
bd96b9eb 530
570a335b 531static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
355cfa73 532{
570a335b
HD
533 return 0;
534}
535
aaa46865
HD
536static inline void swap_shmem_alloc(swp_entry_t swp)
537{
538}
539
570a335b
HD
540static inline int swap_duplicate(swp_entry_t swp)
541{
542 return 0;
355cfa73
KH
543}
544
bd96b9eb
CK
545static inline void swap_free(swp_entry_t swp)
546{
547}
548
75f6d6d2 549static inline void put_swap_page(struct page *page, swp_entry_t swp)
cb4b86ba
KH
550{
551}
552
02098fea 553static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
bd96b9eb
CK
554 struct vm_area_struct *vma, unsigned long addr)
555{
556 return NULL;
557}
558
ec560175
HY
559static inline bool swap_use_vma_readahead(void)
560{
561 return false;
562}
563
564static inline struct page *swap_readahead_detect(
565 struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
566{
567 return NULL;
568}
569
570static inline struct page *do_swap_page_readahead(
571 swp_entry_t fentry, gfp_t gfp_mask,
572 struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
573{
574 return NULL;
575}
576
9fab5619
HD
577static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
578{
579 return 0;
580}
581
ec560175
HY
582static inline struct page *lookup_swap_cache(swp_entry_t swp,
583 struct vm_area_struct *vma,
584 unsigned long addr)
bd96b9eb
CK
585{
586 return NULL;
587}
588
0f074658 589static inline int add_to_swap(struct page *page)
60371d97
HD
590{
591 return 0;
592}
593
73b1262f
HD
594static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
595 gfp_t gfp_mask)
bd96b9eb 596{
73b1262f 597 return -1;
bd96b9eb
CK
598}
599
600static inline void __delete_from_swap_cache(struct page *page)
601{
602}
603
604static inline void delete_from_swap_cache(struct page *page)
605{
606}
607
bde05d1c
HD
608static inline int page_swapcount(struct page *page)
609{
610 return 0;
611}
612
aa8d22a1
MK
613static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
614{
615 return 0;
616}
617
e8c26ab6
TC
618static inline int __swp_swapcount(swp_entry_t entry)
619{
620 return 0;
621}
622
8334b962
MK
623static inline int swp_swapcount(swp_entry_t entry)
624{
625 return 0;
626}
627
ba3c4ce6
HY
628#define reuse_swap_page(page, total_map_swapcount) \
629 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
1da177e4 630
a2c43eed 631static inline int try_to_free_swap(struct page *page)
68a22394
RR
632{
633 return 0;
634}
635
38d8b4e6 636static inline swp_entry_t get_swap_page(struct page *page)
1da177e4
LT
637{
638 swp_entry_t entry;
639 entry.val = 0;
640 return entry;
641}
642
1da177e4 643#endif /* CONFIG_SWAP */
6f2cb2f1 644
59807685
HY
645#ifdef CONFIG_THP_SWAP
646extern int split_swap_cluster(swp_entry_t entry);
647#else
648static inline int split_swap_cluster(swp_entry_t entry)
649{
650 return 0;
651}
652#endif
653
6f2cb2f1
VD
654#ifdef CONFIG_MEMCG
655static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
656{
4550c4e1
JW
657 /* Cgroup2 doesn't have per-cgroup swappiness */
658 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
659 return vm_swappiness;
660
6f2cb2f1
VD
661 /* root ? */
662 if (mem_cgroup_disabled() || !memcg->css.parent)
663 return vm_swappiness;
664
665 return memcg->swappiness;
666}
667
668#else
669static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
670{
671 return vm_swappiness;
672}
673#endif
674
675#ifdef CONFIG_MEMCG_SWAP
676extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
677extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
38d8b4e6 678extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
d8b38438 679extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
5ccc5aba 680extern bool mem_cgroup_swap_full(struct page *page);
6f2cb2f1
VD
681#else
682static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
683{
684}
685
686static inline int mem_cgroup_try_charge_swap(struct page *page,
687 swp_entry_t entry)
688{
689 return 0;
690}
691
38d8b4e6
HY
692static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
693 unsigned int nr_pages)
6f2cb2f1
VD
694{
695}
d8b38438
VD
696
697static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
698{
699 return get_nr_swap_pages();
700}
5ccc5aba
VD
701
702static inline bool mem_cgroup_swap_full(struct page *page)
703{
704 return vm_swap_full();
705}
6f2cb2f1
VD
706#endif
707
1da177e4
LT
708#endif /* __KERNEL__*/
709#endif /* _LINUX_SWAP_H */