]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - include/linux/swap.h
ACPI: fix acpi_find_child_device() invocation in acpi_preset_companion()
[mirror_ubuntu-bionic-kernel.git] / include / linux / swap.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
9#include <linux/memcontrol.h>
10#include <linux/sched.h>
11#include <linux/node.h>
12#include <linux/fs.h>
13#include <linux/atomic.h>
14#include <linux/page-flags.h>
15#include <asm/page.h>
16
17struct notifier_block;
18
19struct bio;
20
21#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
22#define SWAP_FLAG_PRIO_MASK 0x7fff
23#define SWAP_FLAG_PRIO_SHIFT 0
24#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
25#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
26#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
27
28#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
29 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
30 SWAP_FLAG_DISCARD_PAGES)
31#define SWAP_BATCH 64
32
33static inline int current_is_kswapd(void)
34{
35 return current->flags & PF_KSWAPD;
36}
37
38/*
39 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
40 * be swapped to. The swap type and the offset into that swap type are
41 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
42 * for the type means that the maximum number of swapcache pages is 27 bits
43 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
44 * the type/offset into the pte as 5/27 as well.
45 */
46#define MAX_SWAPFILES_SHIFT 5
47
48/*
49 * Use some of the swap files numbers for other purposes. This
50 * is a convenient way to hook into the VM to trigger special
51 * actions on faults.
52 */
53
54/*
55 * Unaddressable device memory support. See include/linux/hmm.h and
56 * Documentation/vm/hmm.txt. Short description is we need struct pages for
57 * device memory that is unaddressable (inaccessible) by CPU, so that we can
58 * migrate part of a process memory to device memory.
59 *
60 * When a page is migrated from CPU to device, we set the CPU page table entry
61 * to a special SWP_DEVICE_* entry.
62 */
63#ifdef CONFIG_DEVICE_PRIVATE
64#define SWP_DEVICE_NUM 2
65#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
66#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
67#else
68#define SWP_DEVICE_NUM 0
69#endif
70
71/*
72 * NUMA node memory migration support
73 */
74#ifdef CONFIG_MIGRATION
75#define SWP_MIGRATION_NUM 2
76#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
77#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
78#else
79#define SWP_MIGRATION_NUM 0
80#endif
81
82/*
83 * Handling of hardware poisoned pages with memory corruption.
84 */
85#ifdef CONFIG_MEMORY_FAILURE
86#define SWP_HWPOISON_NUM 1
87#define SWP_HWPOISON MAX_SWAPFILES
88#else
89#define SWP_HWPOISON_NUM 0
90#endif
91
92#define MAX_SWAPFILES \
93 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
94 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
95
96/*
97 * Magic header for a swap area. The first part of the union is
98 * what the swap magic looks like for the old (limited to 128MB)
99 * swap area format, the second part of the union adds - in the
100 * old reserved area - some extra information. Note that the first
101 * kilobyte is reserved for boot loader or disk label stuff...
102 *
103 * Having the magic at the end of the PAGE_SIZE makes detecting swap
104 * areas somewhat tricky on machines that support multiple page sizes.
105 * For 2.5 we'll probably want to move the magic to just beyond the
106 * bootbits...
107 */
108union swap_header {
109 struct {
110 char reserved[PAGE_SIZE - 10];
111 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
112 } magic;
113 struct {
114 char bootbits[1024]; /* Space for disklabel etc. */
115 __u32 version;
116 __u32 last_page;
117 __u32 nr_badpages;
118 unsigned char sws_uuid[16];
119 unsigned char sws_volume[16];
120 __u32 padding[117];
121 __u32 badpages[1];
122 } info;
123};
124
125/*
126 * current->reclaim_state points to one of these when a task is running
127 * memory reclaim
128 */
129struct reclaim_state {
130 unsigned long reclaimed_slab;
131};
132
133#ifdef __KERNEL__
134
135struct address_space;
136struct sysinfo;
137struct writeback_control;
138struct zone;
139
140/*
141 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
142 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
143 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
144 * from setup, they're handled identically.
145 *
146 * We always assume that blocks are of size PAGE_SIZE.
147 */
148struct swap_extent {
149 struct list_head list;
150 pgoff_t start_page;
151 pgoff_t nr_pages;
152 sector_t start_block;
153};
154
155/*
156 * Max bad pages in the new format..
157 */
158#define MAX_SWAP_BADPAGES \
159 ((offsetof(union swap_header, magic.magic) - \
160 offsetof(union swap_header, info.badpages)) / sizeof(int))
161
162enum {
163 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
164 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
165 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
166 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
167 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
168 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
169 SWP_BLKDEV = (1 << 6), /* its a block device */
170 SWP_FILE = (1 << 7), /* set after swap_activate success */
171 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
172 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
173 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
174 SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
175 /* add others here before... */
176 SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
177};
178
179#define SWAP_CLUSTER_MAX 32UL
180#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
181
182#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
183#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
184#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
185#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
186#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
187#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
188
189/*
190 * We use this to track usage of a cluster. A cluster is a block of swap disk
191 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
192 * free clusters are organized into a list. We fetch an entry from the list to
193 * get a free cluster.
194 *
195 * The data field stores next cluster if the cluster is free or cluster usage
196 * counter otherwise. The flags field determines if a cluster is free. This is
197 * protected by swap_info_struct.lock.
198 */
199struct swap_cluster_info {
200 spinlock_t lock; /*
201 * Protect swap_cluster_info fields
202 * and swap_info_struct->swap_map
203 * elements correspond to the swap
204 * cluster
205 */
206 unsigned int data:24;
207 unsigned int flags:8;
208};
209#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
210#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
211#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
212
213/*
214 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
215 * its own cluster and swapout sequentially. The purpose is to optimize swapout
216 * throughput.
217 */
218struct percpu_cluster {
219 struct swap_cluster_info index; /* Current cluster index */
220 unsigned int next; /* Likely next allocation offset */
221};
222
223struct swap_cluster_list {
224 struct swap_cluster_info head;
225 struct swap_cluster_info tail;
226};
227
228/*
229 * The in-memory structure used to track swap areas.
230 */
231struct swap_info_struct {
232 unsigned long flags; /* SWP_USED etc: see above */
233 signed short prio; /* swap priority of this type */
234 struct plist_node list; /* entry in swap_active_head */
235 signed char type; /* strange name for an index */
236 unsigned int max; /* extent of the swap_map */
237 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
238 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
239 struct swap_cluster_list free_clusters; /* free clusters list */
240 unsigned int lowest_bit; /* index of first free in swap_map */
241 unsigned int highest_bit; /* index of last free in swap_map */
242 unsigned int pages; /* total of usable pages of swap */
243 unsigned int inuse_pages; /* number of those currently in use */
244 unsigned int cluster_next; /* likely index for next allocation */
245 unsigned int cluster_nr; /* countdown to next cluster search */
246 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
247 struct swap_extent *curr_swap_extent;
248 struct swap_extent first_swap_extent;
249 struct block_device *bdev; /* swap device or bdev of swap file */
250 struct file *swap_file; /* seldom referenced */
251 unsigned int old_block_size; /* seldom referenced */
252#ifdef CONFIG_FRONTSWAP
253 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
254 atomic_t frontswap_pages; /* frontswap pages in-use counter */
255#endif
256 spinlock_t lock; /*
257 * protect map scan related fields like
258 * swap_map, lowest_bit, highest_bit,
259 * inuse_pages, cluster_next,
260 * cluster_nr, lowest_alloc,
261 * highest_alloc, free/discard cluster
262 * list. other fields are only changed
263 * at swapon/swapoff, so are protected
264 * by swap_lock. changing flags need
265 * hold this lock and swap_lock. If
266 * both locks need hold, hold swap_lock
267 * first.
268 */
269 spinlock_t cont_lock; /*
270 * protect swap count continuation page
271 * list.
272 */
273 struct work_struct discard_work; /* discard worker */
274 struct swap_cluster_list discard_clusters; /* discard clusters list */
275 struct plist_node avail_lists[0]; /*
276 * entries in swap_avail_heads, one
277 * entry per node.
278 * Must be last as the number of the
279 * array is nr_node_ids, which is not
280 * a fixed value so have to allocate
281 * dynamically.
282 * And it has to be an array so that
283 * plist_for_each_* can work.
284 */
285};
286
287#ifdef CONFIG_64BIT
288#define SWAP_RA_ORDER_CEILING 5
289#else
290/* Avoid stack overflow, because we need to save part of page table */
291#define SWAP_RA_ORDER_CEILING 3
292#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
293#endif
294
295struct vma_swap_readahead {
296 unsigned short win;
297 unsigned short offset;
298 unsigned short nr_pte;
299#ifdef CONFIG_64BIT
300 pte_t *ptes;
301#else
302 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
303#endif
304};
305
306/* linux/mm/workingset.c */
307void *workingset_eviction(struct address_space *mapping, struct page *page);
308bool workingset_refault(void *shadow);
309void workingset_activation(struct page *page);
310
311/* Do not use directly, use workingset_lookup_update */
312void workingset_update_node(struct radix_tree_node *node);
313
314/* Returns workingset_update_node() if the mapping has shadow entries. */
315#define workingset_lookup_update(mapping) \
316({ \
317 radix_tree_update_node_t __helper = workingset_update_node; \
318 if (dax_mapping(mapping) || shmem_mapping(mapping)) \
319 __helper = NULL; \
320 __helper; \
321})
322
323/* linux/mm/page_alloc.c */
324extern unsigned long totalram_pages;
325extern unsigned long totalreserve_pages;
326extern unsigned long nr_free_buffer_pages(void);
327extern unsigned long nr_free_pagecache_pages(void);
328
329/* Definition of global_zone_page_state not available yet */
330#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
331
332
333/* linux/mm/swap.c */
334extern void lru_cache_add(struct page *);
335extern void lru_cache_add_anon(struct page *page);
336extern void lru_cache_add_file(struct page *page);
337extern void lru_add_page_tail(struct page *page, struct page *page_tail,
338 struct lruvec *lruvec, struct list_head *head);
339extern void activate_page(struct page *);
340extern void mark_page_accessed(struct page *);
341extern void lru_add_drain(void);
342extern void lru_add_drain_cpu(int cpu);
343extern void lru_add_drain_all(void);
344extern void lru_add_drain_all_cpuslocked(void);
345extern void rotate_reclaimable_page(struct page *page);
346extern void deactivate_file_page(struct page *page);
347extern void mark_page_lazyfree(struct page *page);
348extern void swap_setup(void);
349
350extern void add_page_to_unevictable_list(struct page *page);
351
352extern void lru_cache_add_active_or_unevictable(struct page *page,
353 struct vm_area_struct *vma);
354
355/* linux/mm/vmscan.c */
356extern unsigned long zone_reclaimable_pages(struct zone *zone);
357extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
358extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
359 gfp_t gfp_mask, nodemask_t *mask);
360extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
361extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
362 unsigned long nr_pages,
363 gfp_t gfp_mask,
364 bool may_swap);
365extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
366 gfp_t gfp_mask, bool noswap,
367 pg_data_t *pgdat,
368 unsigned long *nr_scanned);
369extern unsigned long shrink_all_memory(unsigned long nr_pages);
370extern int vm_swappiness;
371extern int remove_mapping(struct address_space *mapping, struct page *page);
372extern unsigned long vm_total_pages;
373
374#ifdef CONFIG_NUMA
375extern int node_reclaim_mode;
376extern int sysctl_min_unmapped_ratio;
377extern int sysctl_min_slab_ratio;
378#else
379#define node_reclaim_mode 0
380#endif
381
382extern int page_evictable(struct page *page);
383extern void check_move_unevictable_pages(struct page **, int nr_pages);
384
385extern int kswapd_run(int nid);
386extern void kswapd_stop(int nid);
387
388#ifdef CONFIG_SWAP
389
390#include <linux/blk_types.h> /* for bio_end_io_t */
391
392/* linux/mm/page_io.c */
393extern int swap_readpage(struct page *page, bool do_poll);
394extern int swap_writepage(struct page *page, struct writeback_control *wbc);
395extern void end_swap_bio_write(struct bio *bio);
396extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
397 bio_end_io_t end_write_func);
398extern int swap_set_page_dirty(struct page *page);
399
400int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
401 unsigned long nr_pages, sector_t start_block);
402int generic_swapfile_activate(struct swap_info_struct *, struct file *,
403 sector_t *);
404
405/* linux/mm/swap_state.c */
406/* One swap address space for each 64M swap space */
407#define SWAP_ADDRESS_SPACE_SHIFT 14
408#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
409extern struct address_space *swapper_spaces[];
410extern bool swap_vma_readahead;
411#define swap_address_space(entry) \
412 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
413 >> SWAP_ADDRESS_SPACE_SHIFT])
414extern unsigned long total_swapcache_pages(void);
415extern void show_swap_cache_info(void);
416extern int add_to_swap(struct page *page);
417extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
418extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
419extern void __delete_from_swap_cache(struct page *);
420extern void delete_from_swap_cache(struct page *);
421extern void free_page_and_swap_cache(struct page *);
422extern void free_pages_and_swap_cache(struct page **, int);
423extern struct page *lookup_swap_cache(swp_entry_t entry,
424 struct vm_area_struct *vma,
425 unsigned long addr);
426extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
427 struct vm_area_struct *vma, unsigned long addr,
428 bool do_poll);
429extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
430 struct vm_area_struct *vma, unsigned long addr,
431 bool *new_page_allocated);
432extern struct page *swapin_readahead(swp_entry_t, gfp_t,
433 struct vm_area_struct *vma, unsigned long addr);
434
435extern struct page *swap_readahead_detect(struct vm_fault *vmf,
436 struct vma_swap_readahead *swap_ra);
437extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
438 struct vm_fault *vmf,
439 struct vma_swap_readahead *swap_ra);
440
441/* linux/mm/swapfile.c */
442extern atomic_long_t nr_swap_pages;
443extern long total_swap_pages;
444extern atomic_t nr_rotate_swap;
445extern bool has_usable_swap(void);
446
447static inline bool swap_use_vma_readahead(void)
448{
449 return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
450}
451
452/* Swap 50% full? Release swapcache more aggressively.. */
453static inline bool vm_swap_full(void)
454{
455 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
456}
457
458static inline long get_nr_swap_pages(void)
459{
460 return atomic_long_read(&nr_swap_pages);
461}
462
463extern void si_swapinfo(struct sysinfo *);
464extern swp_entry_t get_swap_page(struct page *page);
465extern void put_swap_page(struct page *page, swp_entry_t entry);
466extern swp_entry_t get_swap_page_of_type(int);
467extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
468extern int add_swap_count_continuation(swp_entry_t, gfp_t);
469extern void swap_shmem_alloc(swp_entry_t);
470extern int swap_duplicate(swp_entry_t);
471extern int swapcache_prepare(swp_entry_t);
472extern void swap_free(swp_entry_t);
473extern void swapcache_free_entries(swp_entry_t *entries, int n);
474extern int free_swap_and_cache(swp_entry_t);
475extern int swap_type_of(dev_t, sector_t, struct block_device **);
476extern unsigned int count_swap_pages(int, int);
477extern sector_t map_swap_page(struct page *, struct block_device **);
478extern sector_t swapdev_block(int, pgoff_t);
479extern int page_swapcount(struct page *);
480extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
481extern int __swp_swapcount(swp_entry_t entry);
482extern int swp_swapcount(swp_entry_t entry);
483extern struct swap_info_struct *page_swap_info(struct page *);
484extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
485extern bool reuse_swap_page(struct page *, int *);
486extern int try_to_free_swap(struct page *);
487struct backing_dev_info;
488extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
489extern void exit_swap_address_space(unsigned int type);
490
491#else /* CONFIG_SWAP */
492
493static inline int swap_readpage(struct page *page, bool do_poll)
494{
495 return 0;
496}
497
498static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
499{
500 return NULL;
501}
502
503#define swap_address_space(entry) (NULL)
504#define get_nr_swap_pages() 0L
505#define total_swap_pages 0L
506#define total_swapcache_pages() 0UL
507#define vm_swap_full() 0
508
509#define si_swapinfo(val) \
510 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
511/* only sparc can not include linux/pagemap.h in this file
512 * so leave put_page and release_pages undeclared... */
513#define free_page_and_swap_cache(page) \
514 put_page(page)
515#define free_pages_and_swap_cache(pages, nr) \
516 release_pages((pages), (nr));
517
518static inline void show_swap_cache_info(void)
519{
520}
521
522#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
523#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
524
525static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
526{
527 return 0;
528}
529
530static inline void swap_shmem_alloc(swp_entry_t swp)
531{
532}
533
534static inline int swap_duplicate(swp_entry_t swp)
535{
536 return 0;
537}
538
539static inline void swap_free(swp_entry_t swp)
540{
541}
542
543static inline void put_swap_page(struct page *page, swp_entry_t swp)
544{
545}
546
547static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
548 struct vm_area_struct *vma, unsigned long addr)
549{
550 return NULL;
551}
552
553static inline bool swap_use_vma_readahead(void)
554{
555 return false;
556}
557
558static inline struct page *swap_readahead_detect(
559 struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
560{
561 return NULL;
562}
563
564static inline struct page *do_swap_page_readahead(
565 swp_entry_t fentry, gfp_t gfp_mask,
566 struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
567{
568 return NULL;
569}
570
571static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
572{
573 return 0;
574}
575
576static inline struct page *lookup_swap_cache(swp_entry_t swp,
577 struct vm_area_struct *vma,
578 unsigned long addr)
579{
580 return NULL;
581}
582
583static inline int add_to_swap(struct page *page)
584{
585 return 0;
586}
587
588static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
589 gfp_t gfp_mask)
590{
591 return -1;
592}
593
594static inline void __delete_from_swap_cache(struct page *page)
595{
596}
597
598static inline void delete_from_swap_cache(struct page *page)
599{
600}
601
602static inline int page_swapcount(struct page *page)
603{
604 return 0;
605}
606
607static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
608{
609 return 0;
610}
611
612static inline int __swp_swapcount(swp_entry_t entry)
613{
614 return 0;
615}
616
617static inline int swp_swapcount(swp_entry_t entry)
618{
619 return 0;
620}
621
622#define reuse_swap_page(page, total_map_swapcount) \
623 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
624
625static inline int try_to_free_swap(struct page *page)
626{
627 return 0;
628}
629
630static inline swp_entry_t get_swap_page(struct page *page)
631{
632 swp_entry_t entry;
633 entry.val = 0;
634 return entry;
635}
636
637#endif /* CONFIG_SWAP */
638
639#ifdef CONFIG_THP_SWAP
640extern int split_swap_cluster(swp_entry_t entry);
641#else
642static inline int split_swap_cluster(swp_entry_t entry)
643{
644 return 0;
645}
646#endif
647
648#ifdef CONFIG_MEMCG
649static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
650{
651 /* Cgroup2 doesn't have per-cgroup swappiness */
652 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
653 return vm_swappiness;
654
655 /* root ? */
656 if (mem_cgroup_disabled() || !memcg->css.parent)
657 return vm_swappiness;
658
659 return memcg->swappiness;
660}
661
662#else
663static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
664{
665 return vm_swappiness;
666}
667#endif
668
669#ifdef CONFIG_MEMCG_SWAP
670extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
671extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
672extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
673extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
674extern bool mem_cgroup_swap_full(struct page *page);
675#else
676static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
677{
678}
679
680static inline int mem_cgroup_try_charge_swap(struct page *page,
681 swp_entry_t entry)
682{
683 return 0;
684}
685
686static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
687 unsigned int nr_pages)
688{
689}
690
691static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
692{
693 return get_nr_swap_pages();
694}
695
696static inline bool mem_cgroup_swap_full(struct page *page)
697{
698 return vm_swap_full();
699}
700#endif
701
702#endif /* __KERNEL__*/
703#endif /* _LINUX_SWAP_H */