]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_SWAP_H |
2 | #define _LINUX_SWAP_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/spinlock.h> |
5 | #include <linux/linkage.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/list.h> | |
66e1707b | 8 | #include <linux/memcontrol.h> |
1da177e4 | 9 | #include <linux/sched.h> |
af936a16 | 10 | #include <linux/node.h> |
33806f06 | 11 | #include <linux/fs.h> |
60063497 | 12 | #include <linux/atomic.h> |
c53954a0 | 13 | #include <linux/page-flags.h> |
1da177e4 LT |
14 | #include <asm/page.h> |
15 | ||
8bc719d3 MS |
16 | struct notifier_block; |
17 | ||
ab954160 AM |
18 | struct bio; |
19 | ||
1da177e4 LT |
20 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
21 | #define SWAP_FLAG_PRIO_MASK 0x7fff | |
22 | #define SWAP_FLAG_PRIO_SHIFT 0 | |
dcf6b7dd RA |
23 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
24 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ | |
25 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ | |
1da177e4 | 26 | |
d15cab97 | 27 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
dcf6b7dd RA |
28 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
29 | SWAP_FLAG_DISCARD_PAGES) | |
d15cab97 | 30 | |
1da177e4 LT |
31 | static inline int current_is_kswapd(void) |
32 | { | |
33 | return current->flags & PF_KSWAPD; | |
34 | } | |
35 | ||
36 | /* | |
37 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can | |
38 | * be swapped to. The swap type and the offset into that swap type are | |
39 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits | |
40 | * for the type means that the maximum number of swapcache pages is 27 bits | |
41 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs | |
42 | * the type/offset into the pte as 5/27 as well. | |
43 | */ | |
44 | #define MAX_SWAPFILES_SHIFT 5 | |
a7420aa5 AK |
45 | |
46 | /* | |
47 | * Use some of the swap files numbers for other purposes. This | |
48 | * is a convenient way to hook into the VM to trigger special | |
49 | * actions on faults. | |
50 | */ | |
51 | ||
52 | /* | |
53 | * NUMA node memory migration support | |
54 | */ | |
55 | #ifdef CONFIG_MIGRATION | |
56 | #define SWP_MIGRATION_NUM 2 | |
57 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) | |
58 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) | |
0697212a | 59 | #else |
a7420aa5 | 60 | #define SWP_MIGRATION_NUM 0 |
0697212a | 61 | #endif |
1da177e4 | 62 | |
a7420aa5 AK |
63 | /* |
64 | * Handling of hardware poisoned pages with memory corruption. | |
65 | */ | |
66 | #ifdef CONFIG_MEMORY_FAILURE | |
67 | #define SWP_HWPOISON_NUM 1 | |
68 | #define SWP_HWPOISON MAX_SWAPFILES | |
69 | #else | |
70 | #define SWP_HWPOISON_NUM 0 | |
71 | #endif | |
72 | ||
73 | #define MAX_SWAPFILES \ | |
74 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) | |
75 | ||
1da177e4 LT |
76 | /* |
77 | * Magic header for a swap area. The first part of the union is | |
78 | * what the swap magic looks like for the old (limited to 128MB) | |
79 | * swap area format, the second part of the union adds - in the | |
80 | * old reserved area - some extra information. Note that the first | |
81 | * kilobyte is reserved for boot loader or disk label stuff... | |
82 | * | |
83 | * Having the magic at the end of the PAGE_SIZE makes detecting swap | |
84 | * areas somewhat tricky on machines that support multiple page sizes. | |
85 | * For 2.5 we'll probably want to move the magic to just beyond the | |
86 | * bootbits... | |
87 | */ | |
88 | union swap_header { | |
89 | struct { | |
90 | char reserved[PAGE_SIZE - 10]; | |
91 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ | |
92 | } magic; | |
93 | struct { | |
e8f03d02 AD |
94 | char bootbits[1024]; /* Space for disklabel etc. */ |
95 | __u32 version; | |
96 | __u32 last_page; | |
97 | __u32 nr_badpages; | |
98 | unsigned char sws_uuid[16]; | |
99 | unsigned char sws_volume[16]; | |
100 | __u32 padding[117]; | |
101 | __u32 badpages[1]; | |
1da177e4 LT |
102 | } info; |
103 | }; | |
104 | ||
105 | /* A swap entry has to fit into a "unsigned long", as | |
106 | * the entry is hidden in the "index" field of the | |
107 | * swapper address space. | |
108 | */ | |
109 | typedef struct { | |
110 | unsigned long val; | |
111 | } swp_entry_t; | |
112 | ||
113 | /* | |
114 | * current->reclaim_state points to one of these when a task is running | |
115 | * memory reclaim | |
116 | */ | |
117 | struct reclaim_state { | |
118 | unsigned long reclaimed_slab; | |
119 | }; | |
120 | ||
121 | #ifdef __KERNEL__ | |
122 | ||
123 | struct address_space; | |
124 | struct sysinfo; | |
125 | struct writeback_control; | |
126 | struct zone; | |
127 | ||
128 | /* | |
129 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of | |
130 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the | |
131 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart | |
132 | * from setup, they're handled identically. | |
133 | * | |
134 | * We always assume that blocks are of size PAGE_SIZE. | |
135 | */ | |
136 | struct swap_extent { | |
137 | struct list_head list; | |
138 | pgoff_t start_page; | |
139 | pgoff_t nr_pages; | |
140 | sector_t start_block; | |
141 | }; | |
142 | ||
143 | /* | |
144 | * Max bad pages in the new format.. | |
145 | */ | |
146 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) | |
147 | #define MAX_SWAP_BADPAGES \ | |
148 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) | |
149 | ||
150 | enum { | |
151 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | |
152 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | |
dcf6b7dd | 153 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
7992fde7 | 154 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
20137a49 | 155 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
570a335b | 156 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
b2725643 | 157 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
62c230bc | 158 | SWP_FILE = (1 << 7), /* set after swap_activate success */ |
dcf6b7dd RA |
159 | SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ |
160 | SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ | |
52b7efdb | 161 | /* add others here before... */ |
dcf6b7dd | 162 | SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */ |
1da177e4 LT |
163 | }; |
164 | ||
d778df51 | 165 | #define SWAP_CLUSTER_MAX 32UL |
748446bb | 166 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
1da177e4 | 167 | |
8afdcece | 168 | /* |
4be89a34 JZ |
169 | * Ratio between zone->managed_pages and the "gap" that above the per-zone |
170 | * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that | |
171 | * do not meet the (high_wmark + gap) watermark, even which already met the | |
172 | * high_wmark, in order to provide better per-zone lru behavior. We are ok to | |
8afdcece MG |
173 | * spend not more than 1% of the memory for this zone balancing "gap". |
174 | */ | |
175 | #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 | |
176 | ||
570a335b HD |
177 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
178 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ | |
179 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ | |
180 | #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ | |
181 | #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ | |
aaa46865 | 182 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ |
253d553b | 183 | |
2a8f9449 SL |
184 | /* |
185 | * We use this to track usage of a cluster. A cluster is a block of swap disk | |
186 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All | |
187 | * free clusters are organized into a list. We fetch an entry from the list to | |
188 | * get a free cluster. | |
189 | * | |
190 | * The data field stores next cluster if the cluster is free or cluster usage | |
191 | * counter otherwise. The flags field determines if a cluster is free. This is | |
192 | * protected by swap_info_struct.lock. | |
193 | */ | |
194 | struct swap_cluster_info { | |
195 | unsigned int data:24; | |
196 | unsigned int flags:8; | |
197 | }; | |
198 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ | |
199 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ | |
200 | ||
ebc2a1a6 SL |
201 | /* |
202 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from | |
203 | * its own cluster and swapout sequentially. The purpose is to optimize swapout | |
204 | * throughput. | |
205 | */ | |
206 | struct percpu_cluster { | |
207 | struct swap_cluster_info index; /* Current cluster index */ | |
208 | unsigned int next; /* Likely next allocation offset */ | |
209 | }; | |
210 | ||
1da177e4 LT |
211 | /* |
212 | * The in-memory structure used to track swap areas. | |
1da177e4 LT |
213 | */ |
214 | struct swap_info_struct { | |
efa90a98 HD |
215 | unsigned long flags; /* SWP_USED etc: see above */ |
216 | signed short prio; /* swap priority of this type */ | |
18ab4d4c DS |
217 | struct plist_node list; /* entry in swap_active_head */ |
218 | struct plist_node avail_list; /* entry in swap_avail_head */ | |
efa90a98 | 219 | signed char type; /* strange name for an index */ |
7509765a HD |
220 | unsigned int max; /* extent of the swap_map */ |
221 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ | |
2a8f9449 SL |
222 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
223 | struct swap_cluster_info free_cluster_head; /* free cluster list head */ | |
224 | struct swap_cluster_info free_cluster_tail; /* free cluster list tail */ | |
7509765a HD |
225 | unsigned int lowest_bit; /* index of first free in swap_map */ |
226 | unsigned int highest_bit; /* index of last free in swap_map */ | |
227 | unsigned int pages; /* total of usable pages of swap */ | |
228 | unsigned int inuse_pages; /* number of those currently in use */ | |
229 | unsigned int cluster_next; /* likely index for next allocation */ | |
230 | unsigned int cluster_nr; /* countdown to next cluster search */ | |
ebc2a1a6 | 231 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
7509765a HD |
232 | struct swap_extent *curr_swap_extent; |
233 | struct swap_extent first_swap_extent; | |
234 | struct block_device *bdev; /* swap device or bdev of swap file */ | |
235 | struct file *swap_file; /* seldom referenced */ | |
236 | unsigned int old_block_size; /* seldom referenced */ | |
38b5faf4 DM |
237 | #ifdef CONFIG_FRONTSWAP |
238 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ | |
239 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ | |
240 | #endif | |
ec8acf20 SL |
241 | spinlock_t lock; /* |
242 | * protect map scan related fields like | |
243 | * swap_map, lowest_bit, highest_bit, | |
244 | * inuse_pages, cluster_next, | |
815c2c54 SL |
245 | * cluster_nr, lowest_alloc, |
246 | * highest_alloc, free/discard cluster | |
247 | * list. other fields are only changed | |
248 | * at swapon/swapoff, so are protected | |
249 | * by swap_lock. changing flags need | |
250 | * hold this lock and swap_lock. If | |
251 | * both locks need hold, hold swap_lock | |
252 | * first. | |
ec8acf20 | 253 | */ |
815c2c54 SL |
254 | struct work_struct discard_work; /* discard worker */ |
255 | struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */ | |
256 | struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */ | |
1da177e4 LT |
257 | }; |
258 | ||
a528910e JW |
259 | /* linux/mm/workingset.c */ |
260 | void *workingset_eviction(struct address_space *mapping, struct page *page); | |
261 | bool workingset_refault(void *shadow); | |
262 | void workingset_activation(struct page *page); | |
449dd698 JW |
263 | extern struct list_lru workingset_shadow_nodes; |
264 | ||
265 | static inline unsigned int workingset_node_pages(struct radix_tree_node *node) | |
266 | { | |
267 | return node->count & RADIX_TREE_COUNT_MASK; | |
268 | } | |
269 | ||
270 | static inline void workingset_node_pages_inc(struct radix_tree_node *node) | |
271 | { | |
272 | node->count++; | |
273 | } | |
274 | ||
275 | static inline void workingset_node_pages_dec(struct radix_tree_node *node) | |
276 | { | |
277 | node->count--; | |
278 | } | |
279 | ||
280 | static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) | |
281 | { | |
282 | return node->count >> RADIX_TREE_COUNT_SHIFT; | |
283 | } | |
284 | ||
285 | static inline void workingset_node_shadows_inc(struct radix_tree_node *node) | |
286 | { | |
287 | node->count += 1U << RADIX_TREE_COUNT_SHIFT; | |
288 | } | |
289 | ||
290 | static inline void workingset_node_shadows_dec(struct radix_tree_node *node) | |
291 | { | |
292 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; | |
293 | } | |
a528910e | 294 | |
1da177e4 LT |
295 | /* linux/mm/page_alloc.c */ |
296 | extern unsigned long totalram_pages; | |
cb45b0e9 | 297 | extern unsigned long totalreserve_pages; |
ab8fabd4 | 298 | extern unsigned long dirty_balance_reserve; |
ebec3862 ZY |
299 | extern unsigned long nr_free_buffer_pages(void); |
300 | extern unsigned long nr_free_pagecache_pages(void); | |
1da177e4 | 301 | |
96177299 CL |
302 | /* Definition of global_page_state not available yet */ |
303 | #define nr_free_pages() global_page_state(NR_FREE_PAGES) | |
304 | ||
305 | ||
1da177e4 | 306 | /* linux/mm/swap.c */ |
c53954a0 | 307 | extern void lru_cache_add(struct page *); |
2329d375 JZ |
308 | extern void lru_cache_add_anon(struct page *page); |
309 | extern void lru_cache_add_file(struct page *page); | |
fa9add64 | 310 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, |
5bc7b8ac | 311 | struct lruvec *lruvec, struct list_head *head); |
b3c97528 HH |
312 | extern void activate_page(struct page *); |
313 | extern void mark_page_accessed(struct page *); | |
2457aec6 | 314 | extern void init_page_accessed(struct page *page); |
1da177e4 | 315 | extern void lru_add_drain(void); |
f0cb3c76 | 316 | extern void lru_add_drain_cpu(int cpu); |
5fbc4616 | 317 | extern void lru_add_drain_all(void); |
ac6aadb2 | 318 | extern void rotate_reclaimable_page(struct page *page); |
31560180 | 319 | extern void deactivate_page(struct page *page); |
1da177e4 LT |
320 | extern void swap_setup(void); |
321 | ||
894bc310 LS |
322 | extern void add_page_to_unevictable_list(struct page *page); |
323 | ||
1da177e4 | 324 | /* linux/mm/vmscan.c */ |
dac1d27b | 325 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
327c0e96 | 326 | gfp_t gfp_mask, nodemask_t *mask); |
f3fd4a61 | 327 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); |
185efc0f JW |
328 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
329 | gfp_t gfp_mask, bool noswap); | |
330 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |
331 | gfp_t gfp_mask, bool noswap, | |
332 | struct zone *zone, | |
333 | unsigned long *nr_scanned); | |
69e05944 | 334 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
1da177e4 | 335 | extern int vm_swappiness; |
b20a3503 | 336 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
b21e0b90 | 337 | extern unsigned long vm_total_pages; |
b20a3503 | 338 | |
9eeff239 CL |
339 | #ifdef CONFIG_NUMA |
340 | extern int zone_reclaim_mode; | |
9614634f | 341 | extern int sysctl_min_unmapped_ratio; |
0ff38490 | 342 | extern int sysctl_min_slab_ratio; |
9eeff239 CL |
343 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); |
344 | #else | |
345 | #define zone_reclaim_mode 0 | |
346 | static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |
347 | { | |
348 | return 0; | |
349 | } | |
350 | #endif | |
351 | ||
39b5f29a | 352 | extern int page_evictable(struct page *page); |
24513264 | 353 | extern void check_move_unevictable_pages(struct page **, int nr_pages); |
af936a16 LS |
354 | |
355 | extern unsigned long scan_unevictable_pages; | |
8d65af78 | 356 | extern int scan_unevictable_handler(struct ctl_table *, int, |
af936a16 | 357 | void __user *, size_t *, loff_t *); |
e4455abb | 358 | #ifdef CONFIG_NUMA |
af936a16 LS |
359 | extern int scan_unevictable_register_node(struct node *node); |
360 | extern void scan_unevictable_unregister_node(struct node *node); | |
e4455abb TLSC |
361 | #else |
362 | static inline int scan_unevictable_register_node(struct node *node) | |
363 | { | |
364 | return 0; | |
365 | } | |
366 | static inline void scan_unevictable_unregister_node(struct node *node) | |
367 | { | |
368 | } | |
369 | #endif | |
894bc310 | 370 | |
3218ae14 | 371 | extern int kswapd_run(int nid); |
8fe23e05 | 372 | extern void kswapd_stop(int nid); |
c255a458 | 373 | #ifdef CONFIG_MEMCG |
1f4c025b KH |
374 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); |
375 | #else | |
376 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | |
377 | { | |
378 | return vm_swappiness; | |
379 | } | |
380 | #endif | |
c255a458 | 381 | #ifdef CONFIG_MEMCG_SWAP |
dac23b0d MH |
382 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); |
383 | #else | |
384 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) | |
385 | { | |
386 | } | |
387 | #endif | |
1da177e4 LT |
388 | #ifdef CONFIG_SWAP |
389 | /* linux/mm/page_io.c */ | |
aca8bf32 | 390 | extern int swap_readpage(struct page *); |
1da177e4 | 391 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
1eec6702 SJ |
392 | extern void end_swap_bio_write(struct bio *bio, int err); |
393 | extern int __swap_writepage(struct page *page, struct writeback_control *wbc, | |
394 | void (*end_write_func)(struct bio *, int)); | |
62c230bc | 395 | extern int swap_set_page_dirty(struct page *page); |
6712ecf8 | 396 | extern void end_swap_bio_read(struct bio *bio, int err); |
1da177e4 | 397 | |
a509bc1a MG |
398 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
399 | unsigned long nr_pages, sector_t start_block); | |
400 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, | |
401 | sector_t *); | |
402 | ||
1da177e4 | 403 | /* linux/mm/swap_state.c */ |
33806f06 SL |
404 | extern struct address_space swapper_spaces[]; |
405 | #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) | |
406 | extern unsigned long total_swapcache_pages(void); | |
1da177e4 | 407 | extern void show_swap_cache_info(void); |
5bc7b8ac | 408 | extern int add_to_swap(struct page *, struct list_head *list); |
73b1262f | 409 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); |
2f772e6c | 410 | extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); |
1da177e4 LT |
411 | extern void __delete_from_swap_cache(struct page *); |
412 | extern void delete_from_swap_cache(struct page *); | |
1da177e4 LT |
413 | extern void free_page_and_swap_cache(struct page *); |
414 | extern void free_pages_and_swap_cache(struct page **, int); | |
46017e95 | 415 | extern struct page *lookup_swap_cache(swp_entry_t); |
02098fea | 416 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
46017e95 | 417 | struct vm_area_struct *vma, unsigned long addr); |
02098fea | 418 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, |
46017e95 HD |
419 | struct vm_area_struct *vma, unsigned long addr); |
420 | ||
1da177e4 | 421 | /* linux/mm/swapfile.c */ |
ec8acf20 | 422 | extern atomic_long_t nr_swap_pages; |
1da177e4 | 423 | extern long total_swap_pages; |
ec8acf20 SL |
424 | |
425 | /* Swap 50% full? Release swapcache more aggressively.. */ | |
426 | static inline bool vm_swap_full(void) | |
427 | { | |
428 | return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; | |
429 | } | |
430 | ||
431 | static inline long get_nr_swap_pages(void) | |
432 | { | |
433 | return atomic_long_read(&nr_swap_pages); | |
434 | } | |
435 | ||
1da177e4 LT |
436 | extern void si_swapinfo(struct sysinfo *); |
437 | extern swp_entry_t get_swap_page(void); | |
910321ea | 438 | extern swp_entry_t get_swap_page_of_type(int); |
570a335b | 439 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
aaa46865 | 440 | extern void swap_shmem_alloc(swp_entry_t); |
570a335b HD |
441 | extern int swap_duplicate(swp_entry_t); |
442 | extern int swapcache_prepare(swp_entry_t); | |
1da177e4 | 443 | extern void swap_free(swp_entry_t); |
cb4b86ba | 444 | extern void swapcache_free(swp_entry_t, struct page *page); |
2509ef26 | 445 | extern int free_swap_and_cache(swp_entry_t); |
7bf23687 | 446 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
f577eb30 | 447 | extern unsigned int count_swap_pages(int, int); |
d4906e1a | 448 | extern sector_t map_swap_page(struct page *, struct block_device **); |
3aef83e0 | 449 | extern sector_t swapdev_block(int, pgoff_t); |
bde05d1c | 450 | extern int page_swapcount(struct page *); |
f981c595 | 451 | extern struct swap_info_struct *page_swap_info(struct page *); |
7b1fe597 | 452 | extern int reuse_swap_page(struct page *); |
a2c43eed | 453 | extern int try_to_free_swap(struct page *); |
1da177e4 LT |
454 | struct backing_dev_info; |
455 | ||
c255a458 | 456 | #ifdef CONFIG_MEMCG |
8a9478ca KH |
457 | extern void |
458 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); | |
d13d1443 | 459 | #else |
8c7c6e34 | 460 | static inline void |
8a9478ca | 461 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) |
8c7c6e34 KH |
462 | { |
463 | } | |
464 | #endif | |
d13d1443 | 465 | |
1da177e4 LT |
466 | #else /* CONFIG_SWAP */ |
467 | ||
d2cf5ad6 | 468 | #define swap_address_space(entry) (NULL) |
ec8acf20 | 469 | #define get_nr_swap_pages() 0L |
b962716b | 470 | #define total_swap_pages 0L |
33806f06 | 471 | #define total_swapcache_pages() 0UL |
ec8acf20 | 472 | #define vm_swap_full() 0 |
1da177e4 LT |
473 | |
474 | #define si_swapinfo(val) \ | |
475 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) | |
9ae5b3c7 OH |
476 | /* only sparc can not include linux/pagemap.h in this file |
477 | * so leave page_cache_release and release_pages undeclared... */ | |
1da177e4 LT |
478 | #define free_page_and_swap_cache(page) \ |
479 | page_cache_release(page) | |
480 | #define free_pages_and_swap_cache(pages, nr) \ | |
b745bc85 | 481 | release_pages((pages), (nr), false); |
1da177e4 | 482 | |
bd96b9eb CK |
483 | static inline void show_swap_cache_info(void) |
484 | { | |
485 | } | |
486 | ||
2509ef26 | 487 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
cb4b86ba | 488 | #define swapcache_prepare(swp) is_migration_entry(swp) |
bd96b9eb | 489 | |
570a335b | 490 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
355cfa73 | 491 | { |
570a335b HD |
492 | return 0; |
493 | } | |
494 | ||
aaa46865 HD |
495 | static inline void swap_shmem_alloc(swp_entry_t swp) |
496 | { | |
497 | } | |
498 | ||
570a335b HD |
499 | static inline int swap_duplicate(swp_entry_t swp) |
500 | { | |
501 | return 0; | |
355cfa73 KH |
502 | } |
503 | ||
bd96b9eb CK |
504 | static inline void swap_free(swp_entry_t swp) |
505 | { | |
506 | } | |
507 | ||
cb4b86ba KH |
508 | static inline void swapcache_free(swp_entry_t swp, struct page *page) |
509 | { | |
510 | } | |
511 | ||
02098fea | 512 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
bd96b9eb CK |
513 | struct vm_area_struct *vma, unsigned long addr) |
514 | { | |
515 | return NULL; | |
516 | } | |
517 | ||
9fab5619 HD |
518 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) |
519 | { | |
520 | return 0; | |
521 | } | |
522 | ||
bd96b9eb CK |
523 | static inline struct page *lookup_swap_cache(swp_entry_t swp) |
524 | { | |
525 | return NULL; | |
526 | } | |
527 | ||
5bc7b8ac | 528 | static inline int add_to_swap(struct page *page, struct list_head *list) |
60371d97 HD |
529 | { |
530 | return 0; | |
531 | } | |
532 | ||
73b1262f HD |
533 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
534 | gfp_t gfp_mask) | |
bd96b9eb | 535 | { |
73b1262f | 536 | return -1; |
bd96b9eb CK |
537 | } |
538 | ||
539 | static inline void __delete_from_swap_cache(struct page *page) | |
540 | { | |
541 | } | |
542 | ||
543 | static inline void delete_from_swap_cache(struct page *page) | |
544 | { | |
545 | } | |
546 | ||
bde05d1c HD |
547 | static inline int page_swapcount(struct page *page) |
548 | { | |
549 | return 0; | |
550 | } | |
551 | ||
7b1fe597 | 552 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
1da177e4 | 553 | |
a2c43eed | 554 | static inline int try_to_free_swap(struct page *page) |
68a22394 RR |
555 | { |
556 | return 0; | |
557 | } | |
558 | ||
1da177e4 LT |
559 | static inline swp_entry_t get_swap_page(void) |
560 | { | |
561 | swp_entry_t entry; | |
562 | entry.val = 0; | |
563 | return entry; | |
564 | } | |
565 | ||
e767e056 DN |
566 | static inline void |
567 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) | |
568 | { | |
569 | } | |
570 | ||
1da177e4 LT |
571 | #endif /* CONFIG_SWAP */ |
572 | #endif /* __KERNEL__*/ | |
573 | #endif /* _LINUX_SWAP_H */ |