4 #include <linux/spinlock.h>
5 #include <linux/linkage.h>
6 #include <linux/mmzone.h>
7 #include <linux/list.h>
8 #include <linux/memcontrol.h>
9 #include <linux/sched.h>
10 #include <linux/node.h>
12 #include <linux/atomic.h>
13 #include <linux/page-flags.h>
16 struct notifier_block
;
20 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
21 #define SWAP_FLAG_PRIO_MASK 0x7fff
22 #define SWAP_FLAG_PRIO_SHIFT 0
23 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
24 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
25 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
27 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
28 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
29 SWAP_FLAG_DISCARD_PAGES)
32 static inline int current_is_kswapd(void)
34 return current
->flags
& PF_KSWAPD
;
38 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
39 * be swapped to. The swap type and the offset into that swap type are
40 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
41 * for the type means that the maximum number of swapcache pages is 27 bits
42 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
43 * the type/offset into the pte as 5/27 as well.
45 #define MAX_SWAPFILES_SHIFT 5
48 * Use some of the swap files numbers for other purposes. This
49 * is a convenient way to hook into the VM to trigger special
54 * Unaddressable device memory support. See include/linux/hmm.h and
55 * Documentation/vm/hmm.txt. Short description is we need struct pages for
56 * device memory that is unaddressable (inaccessible) by CPU, so that we can
57 * migrate part of a process memory to device memory.
59 * When a page is migrated from CPU to device, we set the CPU page table entry
60 * to a special SWP_DEVICE_* entry.
62 #ifdef CONFIG_DEVICE_PRIVATE
63 #define SWP_DEVICE_NUM 2
64 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
65 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
67 #define SWP_DEVICE_NUM 0
71 * NUMA node memory migration support
73 #ifdef CONFIG_MIGRATION
74 #define SWP_MIGRATION_NUM 2
75 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
76 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
78 #define SWP_MIGRATION_NUM 0
82 * Handling of hardware poisoned pages with memory corruption.
84 #ifdef CONFIG_MEMORY_FAILURE
85 #define SWP_HWPOISON_NUM 1
86 #define SWP_HWPOISON MAX_SWAPFILES
88 #define SWP_HWPOISON_NUM 0
91 #define MAX_SWAPFILES \
92 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
93 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
96 * Magic header for a swap area. The first part of the union is
97 * what the swap magic looks like for the old (limited to 128MB)
98 * swap area format, the second part of the union adds - in the
99 * old reserved area - some extra information. Note that the first
100 * kilobyte is reserved for boot loader or disk label stuff...
102 * Having the magic at the end of the PAGE_SIZE makes detecting swap
103 * areas somewhat tricky on machines that support multiple page sizes.
104 * For 2.5 we'll probably want to move the magic to just beyond the
109 char reserved
[PAGE_SIZE
- 10];
110 char magic
[10]; /* SWAP-SPACE or SWAPSPACE2 */
113 char bootbits
[1024]; /* Space for disklabel etc. */
117 unsigned char sws_uuid
[16];
118 unsigned char sws_volume
[16];
125 * current->reclaim_state points to one of these when a task is running
128 struct reclaim_state
{
129 unsigned long reclaimed_slab
;
134 struct address_space
;
136 struct writeback_control
;
140 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
141 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
142 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
143 * from setup, they're handled identically.
145 * We always assume that blocks are of size PAGE_SIZE.
148 struct list_head list
;
151 sector_t start_block
;
155 * Max bad pages in the new format..
157 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
158 #define MAX_SWAP_BADPAGES \
159 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
162 SWP_USED
= (1 << 0), /* is slot in swap_info[] used? */
163 SWP_WRITEOK
= (1 << 1), /* ok to write to this swap? */
164 SWP_DISCARDABLE
= (1 << 2), /* blkdev support discard */
165 SWP_DISCARDING
= (1 << 3), /* now discarding a free cluster */
166 SWP_SOLIDSTATE
= (1 << 4), /* blkdev seeks are cheap */
167 SWP_CONTINUED
= (1 << 5), /* swap_map has count continuation */
168 SWP_BLKDEV
= (1 << 6), /* its a block device */
169 SWP_FILE
= (1 << 7), /* set after swap_activate success */
170 SWP_AREA_DISCARD
= (1 << 8), /* single-time swap area discards */
171 SWP_PAGE_DISCARD
= (1 << 9), /* freed swap page-cluster discards */
172 SWP_STABLE_WRITES
= (1 << 10), /* no overwrite PG_writeback pages */
173 /* add others here before... */
174 SWP_SCANNING
= (1 << 11), /* refcount in scan_swap_map */
177 #define SWAP_CLUSTER_MAX 32UL
178 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
180 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
181 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
182 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
183 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
184 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
185 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
188 * We use this to track usage of a cluster. A cluster is a block of swap disk
189 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
190 * free clusters are organized into a list. We fetch an entry from the list to
191 * get a free cluster.
193 * The data field stores next cluster if the cluster is free or cluster usage
194 * counter otherwise. The flags field determines if a cluster is free. This is
195 * protected by swap_info_struct.lock.
197 struct swap_cluster_info
{
199 * Protect swap_cluster_info fields
200 * and swap_info_struct->swap_map
201 * elements correspond to the swap
204 unsigned int data
:24;
205 unsigned int flags
:8;
207 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
208 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
209 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
212 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
213 * its own cluster and swapout sequentially. The purpose is to optimize swapout
216 struct percpu_cluster
{
217 struct swap_cluster_info index
; /* Current cluster index */
218 unsigned int next
; /* Likely next allocation offset */
221 struct swap_cluster_list
{
222 struct swap_cluster_info head
;
223 struct swap_cluster_info tail
;
227 * The in-memory structure used to track swap areas.
229 struct swap_info_struct
{
230 unsigned long flags
; /* SWP_USED etc: see above */
231 signed short prio
; /* swap priority of this type */
232 struct plist_node list
; /* entry in swap_active_head */
233 struct plist_node avail_lists
[MAX_NUMNODES
];/* entry in swap_avail_heads */
234 signed char type
; /* strange name for an index */
235 unsigned int max
; /* extent of the swap_map */
236 unsigned char *swap_map
; /* vmalloc'ed array of usage counts */
237 struct swap_cluster_info
*cluster_info
; /* cluster info. Only for SSD */
238 struct swap_cluster_list free_clusters
; /* free clusters list */
239 unsigned int lowest_bit
; /* index of first free in swap_map */
240 unsigned int highest_bit
; /* index of last free in swap_map */
241 unsigned int pages
; /* total of usable pages of swap */
242 unsigned int inuse_pages
; /* number of those currently in use */
243 unsigned int cluster_next
; /* likely index for next allocation */
244 unsigned int cluster_nr
; /* countdown to next cluster search */
245 struct percpu_cluster __percpu
*percpu_cluster
; /* per cpu's swap location */
246 struct swap_extent
*curr_swap_extent
;
247 struct swap_extent first_swap_extent
;
248 struct block_device
*bdev
; /* swap device or bdev of swap file */
249 struct file
*swap_file
; /* seldom referenced */
250 unsigned int old_block_size
; /* seldom referenced */
251 #ifdef CONFIG_FRONTSWAP
252 unsigned long *frontswap_map
; /* frontswap in-use, one bit per page */
253 atomic_t frontswap_pages
; /* frontswap pages in-use counter */
256 * protect map scan related fields like
257 * swap_map, lowest_bit, highest_bit,
258 * inuse_pages, cluster_next,
259 * cluster_nr, lowest_alloc,
260 * highest_alloc, free/discard cluster
261 * list. other fields are only changed
262 * at swapon/swapoff, so are protected
263 * by swap_lock. changing flags need
264 * hold this lock and swap_lock. If
265 * both locks need hold, hold swap_lock
268 struct work_struct discard_work
; /* discard worker */
269 struct swap_cluster_list discard_clusters
; /* discard clusters list */
273 #define SWAP_RA_ORDER_CEILING 5
275 /* Avoid stack overflow, because we need to save part of page table */
276 #define SWAP_RA_ORDER_CEILING 3
277 #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
280 struct vma_swap_readahead
{
282 unsigned short offset
;
283 unsigned short nr_pte
;
287 pte_t ptes
[SWAP_RA_PTE_CACHE_SIZE
];
291 /* linux/mm/workingset.c */
292 void *workingset_eviction(struct address_space
*mapping
, struct page
*page
);
293 bool workingset_refault(void *shadow
);
294 void workingset_activation(struct page
*page
);
295 void workingset_update_node(struct radix_tree_node
*node
, void *private);
297 /* linux/mm/page_alloc.c */
298 extern unsigned long totalram_pages
;
299 extern unsigned long totalreserve_pages
;
300 extern unsigned long nr_free_buffer_pages(void);
301 extern unsigned long nr_free_pagecache_pages(void);
303 /* Definition of global_zone_page_state not available yet */
304 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
307 /* linux/mm/swap.c */
308 extern void lru_cache_add(struct page
*);
309 extern void lru_cache_add_anon(struct page
*page
);
310 extern void lru_cache_add_file(struct page
*page
);
311 extern void lru_add_page_tail(struct page
*page
, struct page
*page_tail
,
312 struct lruvec
*lruvec
, struct list_head
*head
);
313 extern void activate_page(struct page
*);
314 extern void mark_page_accessed(struct page
*);
315 extern void lru_add_drain(void);
316 extern void lru_add_drain_cpu(int cpu
);
317 extern void lru_add_drain_all(void);
318 extern void lru_add_drain_all_cpuslocked(void);
319 extern void rotate_reclaimable_page(struct page
*page
);
320 extern void deactivate_file_page(struct page
*page
);
321 extern void mark_page_lazyfree(struct page
*page
);
322 extern void swap_setup(void);
324 extern void add_page_to_unevictable_list(struct page
*page
);
326 extern void lru_cache_add_active_or_unevictable(struct page
*page
,
327 struct vm_area_struct
*vma
);
329 /* linux/mm/vmscan.c */
330 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
331 extern unsigned long pgdat_reclaimable_pages(struct pglist_data
*pgdat
);
332 extern unsigned long try_to_free_pages(struct zonelist
*zonelist
, int order
,
333 gfp_t gfp_mask
, nodemask_t
*mask
);
334 extern int __isolate_lru_page(struct page
*page
, isolate_mode_t mode
);
335 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup
*memcg
,
336 unsigned long nr_pages
,
339 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup
*mem
,
340 gfp_t gfp_mask
, bool noswap
,
342 unsigned long *nr_scanned
);
343 extern unsigned long shrink_all_memory(unsigned long nr_pages
);
344 extern int vm_swappiness
;
345 extern int remove_mapping(struct address_space
*mapping
, struct page
*page
);
346 extern unsigned long vm_total_pages
;
349 extern int node_reclaim_mode
;
350 extern int sysctl_min_unmapped_ratio
;
351 extern int sysctl_min_slab_ratio
;
352 extern int node_reclaim(struct pglist_data
*, gfp_t
, unsigned int);
354 #define node_reclaim_mode 0
355 static inline int node_reclaim(struct pglist_data
*pgdat
, gfp_t mask
,
362 extern int page_evictable(struct page
*page
);
363 extern void check_move_unevictable_pages(struct page
**, int nr_pages
);
365 extern int kswapd_run(int nid
);
366 extern void kswapd_stop(int nid
);
370 #include <linux/blk_types.h> /* for bio_end_io_t */
372 /* linux/mm/page_io.c */
373 extern int swap_readpage(struct page
*page
, bool do_poll
);
374 extern int swap_writepage(struct page
*page
, struct writeback_control
*wbc
);
375 extern void end_swap_bio_write(struct bio
*bio
);
376 extern int __swap_writepage(struct page
*page
, struct writeback_control
*wbc
,
377 bio_end_io_t end_write_func
);
378 extern int swap_set_page_dirty(struct page
*page
);
380 int add_swap_extent(struct swap_info_struct
*sis
, unsigned long start_page
,
381 unsigned long nr_pages
, sector_t start_block
);
382 int generic_swapfile_activate(struct swap_info_struct
*, struct file
*,
385 /* linux/mm/swap_state.c */
386 /* One swap address space for each 64M swap space */
387 #define SWAP_ADDRESS_SPACE_SHIFT 14
388 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
389 extern struct address_space
*swapper_spaces
[];
390 extern bool swap_vma_readahead
;
391 #define swap_address_space(entry) \
392 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
393 >> SWAP_ADDRESS_SPACE_SHIFT])
394 extern unsigned long total_swapcache_pages(void);
395 extern void show_swap_cache_info(void);
396 extern int add_to_swap(struct page
*page
);
397 extern int add_to_swap_cache(struct page
*, swp_entry_t
, gfp_t
);
398 extern int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
);
399 extern void __delete_from_swap_cache(struct page
*);
400 extern void delete_from_swap_cache(struct page
*);
401 extern void free_page_and_swap_cache(struct page
*);
402 extern void free_pages_and_swap_cache(struct page
**, int);
403 extern struct page
*lookup_swap_cache(swp_entry_t entry
,
404 struct vm_area_struct
*vma
,
406 extern struct page
*read_swap_cache_async(swp_entry_t
, gfp_t
,
407 struct vm_area_struct
*vma
, unsigned long addr
,
409 extern struct page
*__read_swap_cache_async(swp_entry_t
, gfp_t
,
410 struct vm_area_struct
*vma
, unsigned long addr
,
411 bool *new_page_allocated
);
412 extern struct page
*swapin_readahead(swp_entry_t
, gfp_t
,
413 struct vm_area_struct
*vma
, unsigned long addr
);
415 extern struct page
*swap_readahead_detect(struct vm_fault
*vmf
,
416 struct vma_swap_readahead
*swap_ra
);
417 extern struct page
*do_swap_page_readahead(swp_entry_t fentry
, gfp_t gfp_mask
,
418 struct vm_fault
*vmf
,
419 struct vma_swap_readahead
*swap_ra
);
421 /* linux/mm/swapfile.c */
422 extern atomic_long_t nr_swap_pages
;
423 extern long total_swap_pages
;
424 extern atomic_t nr_rotate_swap
;
425 extern bool has_usable_swap(void);
427 static inline bool swap_use_vma_readahead(void)
429 return READ_ONCE(swap_vma_readahead
) && !atomic_read(&nr_rotate_swap
);
432 /* Swap 50% full? Release swapcache more aggressively.. */
433 static inline bool vm_swap_full(void)
435 return atomic_long_read(&nr_swap_pages
) * 2 < total_swap_pages
;
438 static inline long get_nr_swap_pages(void)
440 return atomic_long_read(&nr_swap_pages
);
443 extern void si_swapinfo(struct sysinfo
*);
444 extern swp_entry_t
get_swap_page(struct page
*page
);
445 extern void put_swap_page(struct page
*page
, swp_entry_t entry
);
446 extern swp_entry_t
get_swap_page_of_type(int);
447 extern int get_swap_pages(int n
, bool cluster
, swp_entry_t swp_entries
[]);
448 extern int add_swap_count_continuation(swp_entry_t
, gfp_t
);
449 extern void swap_shmem_alloc(swp_entry_t
);
450 extern int swap_duplicate(swp_entry_t
);
451 extern int swapcache_prepare(swp_entry_t
);
452 extern void swap_free(swp_entry_t
);
453 extern void swapcache_free_entries(swp_entry_t
*entries
, int n
);
454 extern int free_swap_and_cache(swp_entry_t
);
455 extern int swap_type_of(dev_t
, sector_t
, struct block_device
**);
456 extern unsigned int count_swap_pages(int, int);
457 extern sector_t
map_swap_page(struct page
*, struct block_device
**);
458 extern sector_t
swapdev_block(int, pgoff_t
);
459 extern int page_swapcount(struct page
*);
460 extern int __swp_swapcount(swp_entry_t entry
);
461 extern int swp_swapcount(swp_entry_t entry
);
462 extern struct swap_info_struct
*page_swap_info(struct page
*);
463 extern bool reuse_swap_page(struct page
*, int *);
464 extern int try_to_free_swap(struct page
*);
465 struct backing_dev_info
;
466 extern int init_swap_address_space(unsigned int type
, unsigned long nr_pages
);
467 extern void exit_swap_address_space(unsigned int type
);
469 #else /* CONFIG_SWAP */
471 #define swap_address_space(entry) (NULL)
472 #define get_nr_swap_pages() 0L
473 #define total_swap_pages 0L
474 #define total_swapcache_pages() 0UL
475 #define vm_swap_full() 0
477 #define si_swapinfo(val) \
478 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
479 /* only sparc can not include linux/pagemap.h in this file
480 * so leave put_page and release_pages undeclared... */
481 #define free_page_and_swap_cache(page) \
483 #define free_pages_and_swap_cache(pages, nr) \
484 release_pages((pages), (nr), false);
486 static inline void show_swap_cache_info(void)
490 #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
491 #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
493 static inline int add_swap_count_continuation(swp_entry_t swp
, gfp_t gfp_mask
)
498 static inline void swap_shmem_alloc(swp_entry_t swp
)
502 static inline int swap_duplicate(swp_entry_t swp
)
507 static inline void swap_free(swp_entry_t swp
)
511 static inline void put_swap_page(struct page
*page
, swp_entry_t swp
)
515 static inline struct page
*swapin_readahead(swp_entry_t swp
, gfp_t gfp_mask
,
516 struct vm_area_struct
*vma
, unsigned long addr
)
521 static inline bool swap_use_vma_readahead(void)
526 static inline struct page
*swap_readahead_detect(
527 struct vm_fault
*vmf
, struct vma_swap_readahead
*swap_ra
)
532 static inline struct page
*do_swap_page_readahead(
533 swp_entry_t fentry
, gfp_t gfp_mask
,
534 struct vm_fault
*vmf
, struct vma_swap_readahead
*swap_ra
)
539 static inline int swap_writepage(struct page
*p
, struct writeback_control
*wbc
)
544 static inline struct page
*lookup_swap_cache(swp_entry_t swp
,
545 struct vm_area_struct
*vma
,
551 static inline int add_to_swap(struct page
*page
)
556 static inline int add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
562 static inline void __delete_from_swap_cache(struct page
*page
)
566 static inline void delete_from_swap_cache(struct page
*page
)
570 static inline int page_swapcount(struct page
*page
)
575 static inline int __swp_swapcount(swp_entry_t entry
)
580 static inline int swp_swapcount(swp_entry_t entry
)
585 #define reuse_swap_page(page, total_map_swapcount) \
586 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
588 static inline int try_to_free_swap(struct page
*page
)
593 static inline swp_entry_t
get_swap_page(struct page
*page
)
600 #endif /* CONFIG_SWAP */
602 #ifdef CONFIG_THP_SWAP
603 extern int split_swap_cluster(swp_entry_t entry
);
605 static inline int split_swap_cluster(swp_entry_t entry
)
612 static inline int mem_cgroup_swappiness(struct mem_cgroup
*memcg
)
614 /* Cgroup2 doesn't have per-cgroup swappiness */
615 if (cgroup_subsys_on_dfl(memory_cgrp_subsys
))
616 return vm_swappiness
;
619 if (mem_cgroup_disabled() || !memcg
->css
.parent
)
620 return vm_swappiness
;
622 return memcg
->swappiness
;
626 static inline int mem_cgroup_swappiness(struct mem_cgroup
*mem
)
628 return vm_swappiness
;
632 #ifdef CONFIG_MEMCG_SWAP
633 extern void mem_cgroup_swapout(struct page
*page
, swp_entry_t entry
);
634 extern int mem_cgroup_try_charge_swap(struct page
*page
, swp_entry_t entry
);
635 extern void mem_cgroup_uncharge_swap(swp_entry_t entry
, unsigned int nr_pages
);
636 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
);
637 extern bool mem_cgroup_swap_full(struct page
*page
);
639 static inline void mem_cgroup_swapout(struct page
*page
, swp_entry_t entry
)
643 static inline int mem_cgroup_try_charge_swap(struct page
*page
,
649 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry
,
650 unsigned int nr_pages
)
654 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
)
656 return get_nr_swap_pages();
659 static inline bool mem_cgroup_swap_full(struct page
*page
)
661 return vm_swap_full();
665 #endif /* __KERNEL__*/
666 #endif /* _LINUX_SWAP_H */