]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_SWAP_H | |
2 | #define _LINUX_SWAP_H | |
3 | ||
4 | #include <linux/spinlock.h> | |
5 | #include <linux/linkage.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/list.h> | |
8 | #include <linux/memcontrol.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/node.h> | |
11 | ||
12 | #include <linux/atomic.h> | |
13 | #include <asm/page.h> | |
14 | ||
15 | struct notifier_block; | |
16 | ||
17 | struct bio; | |
18 | ||
19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | |
20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | |
21 | #define SWAP_FLAG_PRIO_SHIFT 0 | |
22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | |
23 | ||
24 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ | |
25 | SWAP_FLAG_DISCARD) | |
26 | ||
27 | static inline int current_is_kswapd(void) | |
28 | { | |
29 | return current->flags & PF_KSWAPD; | |
30 | } | |
31 | ||
32 | /* | |
33 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can | |
34 | * be swapped to. The swap type and the offset into that swap type are | |
35 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits | |
36 | * for the type means that the maximum number of swapcache pages is 27 bits | |
37 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs | |
38 | * the type/offset into the pte as 5/27 as well. | |
39 | */ | |
40 | #define MAX_SWAPFILES_SHIFT 5 | |
41 | ||
42 | /* | |
43 | * Use some of the swap files numbers for other purposes. This | |
44 | * is a convenient way to hook into the VM to trigger special | |
45 | * actions on faults. | |
46 | */ | |
47 | ||
48 | /* | |
49 | * NUMA node memory migration support | |
50 | */ | |
51 | #ifdef CONFIG_MIGRATION | |
52 | #define SWP_MIGRATION_NUM 2 | |
53 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) | |
54 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) | |
55 | #else | |
56 | #define SWP_MIGRATION_NUM 0 | |
57 | #endif | |
58 | ||
59 | /* | |
60 | * Handling of hardware poisoned pages with memory corruption. | |
61 | */ | |
62 | #ifdef CONFIG_MEMORY_FAILURE | |
63 | #define SWP_HWPOISON_NUM 1 | |
64 | #define SWP_HWPOISON MAX_SWAPFILES | |
65 | #else | |
66 | #define SWP_HWPOISON_NUM 0 | |
67 | #endif | |
68 | ||
69 | #define MAX_SWAPFILES \ | |
70 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) | |
71 | ||
72 | /* | |
73 | * Magic header for a swap area. The first part of the union is | |
74 | * what the swap magic looks like for the old (limited to 128MB) | |
75 | * swap area format, the second part of the union adds - in the | |
76 | * old reserved area - some extra information. Note that the first | |
77 | * kilobyte is reserved for boot loader or disk label stuff... | |
78 | * | |
79 | * Having the magic at the end of the PAGE_SIZE makes detecting swap | |
80 | * areas somewhat tricky on machines that support multiple page sizes. | |
81 | * For 2.5 we'll probably want to move the magic to just beyond the | |
82 | * bootbits... | |
83 | */ | |
84 | union swap_header { | |
85 | struct { | |
86 | char reserved[PAGE_SIZE - 10]; | |
87 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ | |
88 | } magic; | |
89 | struct { | |
90 | char bootbits[1024]; /* Space for disklabel etc. */ | |
91 | __u32 version; | |
92 | __u32 last_page; | |
93 | __u32 nr_badpages; | |
94 | unsigned char sws_uuid[16]; | |
95 | unsigned char sws_volume[16]; | |
96 | __u32 padding[117]; | |
97 | __u32 badpages[1]; | |
98 | } info; | |
99 | }; | |
100 | ||
101 | /* A swap entry has to fit into a "unsigned long", as | |
102 | * the entry is hidden in the "index" field of the | |
103 | * swapper address space. | |
104 | */ | |
105 | typedef struct { | |
106 | unsigned long val; | |
107 | } swp_entry_t; | |
108 | ||
109 | /* | |
110 | * current->reclaim_state points to one of these when a task is running | |
111 | * memory reclaim | |
112 | */ | |
113 | struct reclaim_state { | |
114 | unsigned long reclaimed_slab; | |
115 | }; | |
116 | ||
117 | #ifdef __KERNEL__ | |
118 | ||
119 | struct address_space; | |
120 | struct sysinfo; | |
121 | struct writeback_control; | |
122 | struct zone; | |
123 | ||
124 | /* | |
125 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of | |
126 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the | |
127 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart | |
128 | * from setup, they're handled identically. | |
129 | * | |
130 | * We always assume that blocks are of size PAGE_SIZE. | |
131 | */ | |
132 | struct swap_extent { | |
133 | struct list_head list; | |
134 | pgoff_t start_page; | |
135 | pgoff_t nr_pages; | |
136 | sector_t start_block; | |
137 | }; | |
138 | ||
139 | /* | |
140 | * Max bad pages in the new format.. | |
141 | */ | |
142 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) | |
143 | #define MAX_SWAP_BADPAGES \ | |
144 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) | |
145 | ||
146 | enum { | |
147 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | |
148 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | |
149 | SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ | |
150 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | |
151 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | |
152 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | |
153 | SWP_BLKDEV = (1 << 6), /* its a block device */ | |
154 | /* add others here before... */ | |
155 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ | |
156 | }; | |
157 | ||
158 | #define SWAP_CLUSTER_MAX 32 | |
159 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX | |
160 | ||
161 | /* | |
162 | * Ratio between the present memory in the zone and the "gap" that | |
163 | * we're allowing kswapd to shrink in addition to the per-zone high | |
164 | * wmark, even for zones that already have the high wmark satisfied, | |
165 | * in order to provide better per-zone lru behavior. We are ok to | |
166 | * spend not more than 1% of the memory for this zone balancing "gap". | |
167 | */ | |
168 | #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 | |
169 | ||
170 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ | |
171 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ | |
172 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ | |
173 | #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ | |
174 | #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ | |
175 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ | |
176 | ||
177 | /* | |
178 | * The in-memory structure used to track swap areas. | |
179 | */ | |
180 | struct swap_info_struct { | |
181 | unsigned long flags; /* SWP_USED etc: see above */ | |
182 | signed short prio; /* swap priority of this type */ | |
183 | signed char type; /* strange name for an index */ | |
184 | signed char next; /* next type on the swap list */ | |
185 | unsigned int max; /* extent of the swap_map */ | |
186 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ | |
187 | unsigned int lowest_bit; /* index of first free in swap_map */ | |
188 | unsigned int highest_bit; /* index of last free in swap_map */ | |
189 | unsigned int pages; /* total of usable pages of swap */ | |
190 | unsigned int inuse_pages; /* number of those currently in use */ | |
191 | unsigned int cluster_next; /* likely index for next allocation */ | |
192 | unsigned int cluster_nr; /* countdown to next cluster search */ | |
193 | unsigned int lowest_alloc; /* while preparing discard cluster */ | |
194 | unsigned int highest_alloc; /* while preparing discard cluster */ | |
195 | struct swap_extent *curr_swap_extent; | |
196 | struct swap_extent first_swap_extent; | |
197 | struct block_device *bdev; /* swap device or bdev of swap file */ | |
198 | struct file *swap_file; /* seldom referenced */ | |
199 | unsigned int old_block_size; /* seldom referenced */ | |
200 | #ifdef CONFIG_FRONTSWAP | |
201 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ | |
202 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ | |
203 | #endif | |
204 | }; | |
205 | ||
206 | struct swap_list_t { | |
207 | int head; /* head of priority-ordered swapfile list */ | |
208 | int next; /* swapfile to be used next */ | |
209 | }; | |
210 | ||
211 | /* Swap 50% full? Release swapcache more aggressively.. */ | |
212 | #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) | |
213 | ||
214 | /* linux/mm/page_alloc.c */ | |
215 | extern unsigned long totalram_pages; | |
216 | extern unsigned long totalreserve_pages; | |
217 | extern unsigned long dirty_balance_reserve; | |
218 | extern unsigned int nr_free_buffer_pages(void); | |
219 | extern unsigned int nr_free_pagecache_pages(void); | |
220 | ||
221 | /* Definition of global_page_state not available yet */ | |
222 | #define nr_free_pages() global_page_state(NR_FREE_PAGES) | |
223 | ||
224 | ||
225 | /* linux/mm/swap.c */ | |
226 | extern void __lru_cache_add(struct page *, enum lru_list lru); | |
227 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | |
228 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, | |
229 | struct lruvec *lruvec); | |
230 | extern void activate_page(struct page *); | |
231 | extern void mark_page_accessed(struct page *); | |
232 | extern void lru_add_drain(void); | |
233 | extern void lru_add_drain_cpu(int cpu); | |
234 | extern int lru_add_drain_all(void); | |
235 | extern void rotate_reclaimable_page(struct page *page); | |
236 | extern void deactivate_page(struct page *page); | |
237 | extern void swap_setup(void); | |
238 | ||
239 | extern void add_page_to_unevictable_list(struct page *page); | |
240 | ||
241 | /** | |
242 | * lru_cache_add: add a page to the page lists | |
243 | * @page: the page to add | |
244 | */ | |
245 | static inline void lru_cache_add_anon(struct page *page) | |
246 | { | |
247 | __lru_cache_add(page, LRU_INACTIVE_ANON); | |
248 | } | |
249 | ||
250 | static inline void lru_cache_add_file(struct page *page) | |
251 | { | |
252 | __lru_cache_add(page, LRU_INACTIVE_FILE); | |
253 | } | |
254 | ||
255 | /* linux/mm/vmscan.c */ | |
256 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |
257 | gfp_t gfp_mask, nodemask_t *mask); | |
258 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); | |
259 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | |
260 | gfp_t gfp_mask, bool noswap); | |
261 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |
262 | gfp_t gfp_mask, bool noswap, | |
263 | struct zone *zone, | |
264 | unsigned long *nr_scanned); | |
265 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | |
266 | extern int vm_swappiness; | |
267 | extern int remove_mapping(struct address_space *mapping, struct page *page); | |
268 | extern long vm_total_pages; | |
269 | ||
270 | #ifdef CONFIG_NUMA | |
271 | extern int zone_reclaim_mode; | |
272 | extern int sysctl_min_unmapped_ratio; | |
273 | extern int sysctl_min_slab_ratio; | |
274 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); | |
275 | #else | |
276 | #define zone_reclaim_mode 0 | |
277 | static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |
278 | { | |
279 | return 0; | |
280 | } | |
281 | #endif | |
282 | ||
283 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); | |
284 | extern void check_move_unevictable_pages(struct page **, int nr_pages); | |
285 | ||
286 | extern unsigned long scan_unevictable_pages; | |
287 | extern int scan_unevictable_handler(struct ctl_table *, int, | |
288 | void __user *, size_t *, loff_t *); | |
289 | #ifdef CONFIG_NUMA | |
290 | extern int scan_unevictable_register_node(struct node *node); | |
291 | extern void scan_unevictable_unregister_node(struct node *node); | |
292 | #else | |
293 | static inline int scan_unevictable_register_node(struct node *node) | |
294 | { | |
295 | return 0; | |
296 | } | |
297 | static inline void scan_unevictable_unregister_node(struct node *node) | |
298 | { | |
299 | } | |
300 | #endif | |
301 | ||
302 | extern int kswapd_run(int nid); | |
303 | extern void kswapd_stop(int nid); | |
304 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | |
305 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); | |
306 | #else | |
307 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | |
308 | { | |
309 | return vm_swappiness; | |
310 | } | |
311 | #endif | |
312 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | |
313 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); | |
314 | #else | |
315 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) | |
316 | { | |
317 | } | |
318 | #endif | |
319 | #ifdef CONFIG_SWAP | |
320 | /* linux/mm/page_io.c */ | |
321 | extern int swap_readpage(struct page *); | |
322 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | |
323 | extern void end_swap_bio_read(struct bio *bio, int err); | |
324 | ||
325 | /* linux/mm/swap_state.c */ | |
326 | extern struct address_space swapper_space; | |
327 | #define total_swapcache_pages swapper_space.nrpages | |
328 | extern void show_swap_cache_info(void); | |
329 | extern int add_to_swap(struct page *); | |
330 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); | |
331 | extern void __delete_from_swap_cache(struct page *); | |
332 | extern void delete_from_swap_cache(struct page *); | |
333 | extern void free_page_and_swap_cache(struct page *); | |
334 | extern void free_pages_and_swap_cache(struct page **, int); | |
335 | extern struct page *lookup_swap_cache(swp_entry_t); | |
336 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, | |
337 | struct vm_area_struct *vma, unsigned long addr); | |
338 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, | |
339 | struct vm_area_struct *vma, unsigned long addr); | |
340 | ||
341 | /* linux/mm/swapfile.c */ | |
342 | extern long nr_swap_pages; | |
343 | extern long total_swap_pages; | |
344 | extern void si_swapinfo(struct sysinfo *); | |
345 | extern swp_entry_t get_swap_page(void); | |
346 | extern swp_entry_t get_swap_page_of_type(int); | |
347 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | |
348 | extern void swap_shmem_alloc(swp_entry_t); | |
349 | extern int swap_duplicate(swp_entry_t); | |
350 | extern int swapcache_prepare(swp_entry_t); | |
351 | extern void swap_free(swp_entry_t); | |
352 | extern void swapcache_free(swp_entry_t, struct page *page); | |
353 | extern int free_swap_and_cache(swp_entry_t); | |
354 | extern int swap_type_of(dev_t, sector_t, struct block_device **); | |
355 | extern unsigned int count_swap_pages(int, int); | |
356 | extern sector_t map_swap_page(struct page *, struct block_device **); | |
357 | extern sector_t swapdev_block(int, pgoff_t); | |
358 | extern int page_swapcount(struct page *); | |
359 | extern int reuse_swap_page(struct page *); | |
360 | extern int try_to_free_swap(struct page *); | |
361 | struct backing_dev_info; | |
362 | ||
363 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | |
364 | extern void | |
365 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); | |
366 | #else | |
367 | static inline void | |
368 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) | |
369 | { | |
370 | } | |
371 | #endif | |
372 | ||
373 | #else /* CONFIG_SWAP */ | |
374 | ||
375 | #define nr_swap_pages 0L | |
376 | #define total_swap_pages 0L | |
377 | #define total_swapcache_pages 0UL | |
378 | ||
379 | #define si_swapinfo(val) \ | |
380 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) | |
381 | /* only sparc can not include linux/pagemap.h in this file | |
382 | * so leave page_cache_release and release_pages undeclared... */ | |
383 | #define free_page_and_swap_cache(page) \ | |
384 | page_cache_release(page) | |
385 | #define free_pages_and_swap_cache(pages, nr) \ | |
386 | release_pages((pages), (nr), 0); | |
387 | ||
388 | static inline void show_swap_cache_info(void) | |
389 | { | |
390 | } | |
391 | ||
392 | #define free_swap_and_cache(swp) is_migration_entry(swp) | |
393 | #define swapcache_prepare(swp) is_migration_entry(swp) | |
394 | ||
395 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) | |
396 | { | |
397 | return 0; | |
398 | } | |
399 | ||
400 | static inline void swap_shmem_alloc(swp_entry_t swp) | |
401 | { | |
402 | } | |
403 | ||
404 | static inline int swap_duplicate(swp_entry_t swp) | |
405 | { | |
406 | return 0; | |
407 | } | |
408 | ||
409 | static inline void swap_free(swp_entry_t swp) | |
410 | { | |
411 | } | |
412 | ||
413 | static inline void swapcache_free(swp_entry_t swp, struct page *page) | |
414 | { | |
415 | } | |
416 | ||
417 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, | |
418 | struct vm_area_struct *vma, unsigned long addr) | |
419 | { | |
420 | return NULL; | |
421 | } | |
422 | ||
423 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) | |
424 | { | |
425 | return 0; | |
426 | } | |
427 | ||
428 | static inline struct page *lookup_swap_cache(swp_entry_t swp) | |
429 | { | |
430 | return NULL; | |
431 | } | |
432 | ||
433 | static inline int add_to_swap(struct page *page) | |
434 | { | |
435 | return 0; | |
436 | } | |
437 | ||
438 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, | |
439 | gfp_t gfp_mask) | |
440 | { | |
441 | return -1; | |
442 | } | |
443 | ||
444 | static inline void __delete_from_swap_cache(struct page *page) | |
445 | { | |
446 | } | |
447 | ||
448 | static inline void delete_from_swap_cache(struct page *page) | |
449 | { | |
450 | } | |
451 | ||
452 | static inline int page_swapcount(struct page *page) | |
453 | { | |
454 | return 0; | |
455 | } | |
456 | ||
457 | #define reuse_swap_page(page) (page_mapcount(page) == 1) | |
458 | ||
459 | static inline int try_to_free_swap(struct page *page) | |
460 | { | |
461 | return 0; | |
462 | } | |
463 | ||
464 | static inline swp_entry_t get_swap_page(void) | |
465 | { | |
466 | swp_entry_t entry; | |
467 | entry.val = 0; | |
468 | return entry; | |
469 | } | |
470 | ||
471 | static inline void | |
472 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) | |
473 | { | |
474 | } | |
475 | ||
476 | #endif /* CONFIG_SWAP */ | |
477 | #endif /* __KERNEL__*/ | |
478 | #endif /* _LINUX_SWAP_H */ |