]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/swapfile.c
mm/swap: free swap slots in batch
[mirror_ubuntu-jammy-kernel.git] / mm / swapfile.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
7
1da177e4
LT
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/mman.h>
11#include <linux/slab.h>
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
14#include <linux/vmalloc.h>
15#include <linux/pagemap.h>
16#include <linux/namei.h>
072441e2 17#include <linux/shmem_fs.h>
1da177e4 18#include <linux/blkdev.h>
20137a49 19#include <linux/random.h>
1da177e4
LT
20#include <linux/writeback.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/init.h>
5ad64688 24#include <linux/ksm.h>
1da177e4
LT
25#include <linux/rmap.h>
26#include <linux/security.h>
27#include <linux/backing-dev.h>
fc0abb14 28#include <linux/mutex.h>
c59ede7b 29#include <linux/capability.h>
1da177e4 30#include <linux/syscalls.h>
8a9f3ccd 31#include <linux/memcontrol.h>
66d7dd51 32#include <linux/poll.h>
72788c38 33#include <linux/oom.h>
38b5faf4
DM
34#include <linux/frontswap.h>
35#include <linux/swapfile.h>
f981c595 36#include <linux/export.h>
1da177e4
LT
37
38#include <asm/pgtable.h>
39#include <asm/tlbflush.h>
40#include <linux/swapops.h>
5d1ea48b 41#include <linux/swap_cgroup.h>
1da177e4 42
570a335b
HD
43static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
44 unsigned char);
45static void free_swap_count_continuations(struct swap_info_struct *);
d4906e1a 46static sector_t map_swap_entry(swp_entry_t, struct block_device**);
570a335b 47
38b5faf4 48DEFINE_SPINLOCK(swap_lock);
7c363b8c 49static unsigned int nr_swapfiles;
ec8acf20 50atomic_long_t nr_swap_pages;
fb0fec50
CW
51/*
52 * Some modules use swappable objects and may try to swap them out under
53 * memory pressure (via the shrinker). Before doing so, they may wish to
54 * check to see if any swap space is available.
55 */
56EXPORT_SYMBOL_GPL(nr_swap_pages);
ec8acf20 57/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
1da177e4 58long total_swap_pages;
78ecba08 59static int least_priority;
1da177e4 60
1da177e4
LT
61static const char Bad_file[] = "Bad swap file entry ";
62static const char Unused_file[] = "Unused swap file entry ";
63static const char Bad_offset[] = "Bad swap offset entry ";
64static const char Unused_offset[] = "Unused swap offset entry ";
65
adfab836
DS
66/*
67 * all active swap_info_structs
68 * protected with swap_lock, and ordered by priority.
69 */
18ab4d4c
DS
70PLIST_HEAD(swap_active_head);
71
72/*
73 * all available (active, not full) swap_info_structs
74 * protected with swap_avail_lock, ordered by priority.
75 * This is used by get_swap_page() instead of swap_active_head
76 * because swap_active_head includes all swap_info_structs,
77 * but get_swap_page() doesn't need to look at full ones.
78 * This uses its own lock instead of swap_lock because when a
79 * swap_info_struct changes between not-full/full, it needs to
80 * add/remove itself to/from this list, but the swap_info_struct->lock
81 * is held and the locking order requires swap_lock to be taken
82 * before any swap_info_struct->lock.
83 */
84static PLIST_HEAD(swap_avail_head);
85static DEFINE_SPINLOCK(swap_avail_lock);
1da177e4 86
38b5faf4 87struct swap_info_struct *swap_info[MAX_SWAPFILES];
1da177e4 88
fc0abb14 89static DEFINE_MUTEX(swapon_mutex);
1da177e4 90
66d7dd51
KS
91static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
92/* Activity counter to indicate that a swapon or swapoff has occurred */
93static atomic_t proc_poll_event = ATOMIC_INIT(0);
94
8d69aaee 95static inline unsigned char swap_count(unsigned char ent)
355cfa73 96{
570a335b 97 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
355cfa73
KH
98}
99
efa90a98 100/* returns 1 if swap entry is freed */
c9e44410
KH
101static int
102__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
103{
efa90a98 104 swp_entry_t entry = swp_entry(si->type, offset);
c9e44410
KH
105 struct page *page;
106 int ret = 0;
107
f6ab1f7f 108 page = find_get_page(swap_address_space(entry), swp_offset(entry));
c9e44410
KH
109 if (!page)
110 return 0;
111 /*
112 * This function is called from scan_swap_map() and it's called
113 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
114 * We have to use trylock for avoiding deadlock. This is a special
115 * case and you should use try_to_free_swap() with explicit lock_page()
116 * in usual operations.
117 */
118 if (trylock_page(page)) {
119 ret = try_to_free_swap(page);
120 unlock_page(page);
121 }
09cbfeaf 122 put_page(page);
c9e44410
KH
123 return ret;
124}
355cfa73 125
6a6ba831
HD
126/*
127 * swapon tell device that all the old swap contents can be discarded,
128 * to allow the swap device to optimize its wear-levelling.
129 */
130static int discard_swap(struct swap_info_struct *si)
131{
132 struct swap_extent *se;
9625a5f2
HD
133 sector_t start_block;
134 sector_t nr_blocks;
6a6ba831
HD
135 int err = 0;
136
9625a5f2
HD
137 /* Do not discard the swap header page! */
138 se = &si->first_swap_extent;
139 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
140 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
141 if (nr_blocks) {
142 err = blkdev_issue_discard(si->bdev, start_block,
dd3932ed 143 nr_blocks, GFP_KERNEL, 0);
9625a5f2
HD
144 if (err)
145 return err;
146 cond_resched();
147 }
6a6ba831 148
9625a5f2
HD
149 list_for_each_entry(se, &si->first_swap_extent.list, list) {
150 start_block = se->start_block << (PAGE_SHIFT - 9);
151 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
6a6ba831
HD
152
153 err = blkdev_issue_discard(si->bdev, start_block,
dd3932ed 154 nr_blocks, GFP_KERNEL, 0);
6a6ba831
HD
155 if (err)
156 break;
157
158 cond_resched();
159 }
160 return err; /* That will often be -EOPNOTSUPP */
161}
162
7992fde7
HD
163/*
164 * swap allocation tell device that a cluster of swap can now be discarded,
165 * to allow the swap device to optimize its wear-levelling.
166 */
167static void discard_swap_cluster(struct swap_info_struct *si,
168 pgoff_t start_page, pgoff_t nr_pages)
169{
170 struct swap_extent *se = si->curr_swap_extent;
171 int found_extent = 0;
172
173 while (nr_pages) {
7992fde7
HD
174 if (se->start_page <= start_page &&
175 start_page < se->start_page + se->nr_pages) {
176 pgoff_t offset = start_page - se->start_page;
177 sector_t start_block = se->start_block + offset;
858a2990 178 sector_t nr_blocks = se->nr_pages - offset;
7992fde7
HD
179
180 if (nr_blocks > nr_pages)
181 nr_blocks = nr_pages;
182 start_page += nr_blocks;
183 nr_pages -= nr_blocks;
184
185 if (!found_extent++)
186 si->curr_swap_extent = se;
187
188 start_block <<= PAGE_SHIFT - 9;
189 nr_blocks <<= PAGE_SHIFT - 9;
190 if (blkdev_issue_discard(si->bdev, start_block,
dd3932ed 191 nr_blocks, GFP_NOIO, 0))
7992fde7
HD
192 break;
193 }
194
a8ae4991 195 se = list_next_entry(se, list);
7992fde7
HD
196 }
197}
198
048c27fd
HD
199#define SWAPFILE_CLUSTER 256
200#define LATENCY_LIMIT 256
201
2a8f9449
SL
202static inline void cluster_set_flag(struct swap_cluster_info *info,
203 unsigned int flag)
204{
205 info->flags = flag;
206}
207
208static inline unsigned int cluster_count(struct swap_cluster_info *info)
209{
210 return info->data;
211}
212
213static inline void cluster_set_count(struct swap_cluster_info *info,
214 unsigned int c)
215{
216 info->data = c;
217}
218
219static inline void cluster_set_count_flag(struct swap_cluster_info *info,
220 unsigned int c, unsigned int f)
221{
222 info->flags = f;
223 info->data = c;
224}
225
226static inline unsigned int cluster_next(struct swap_cluster_info *info)
227{
228 return info->data;
229}
230
231static inline void cluster_set_next(struct swap_cluster_info *info,
232 unsigned int n)
233{
234 info->data = n;
235}
236
237static inline void cluster_set_next_flag(struct swap_cluster_info *info,
238 unsigned int n, unsigned int f)
239{
240 info->flags = f;
241 info->data = n;
242}
243
244static inline bool cluster_is_free(struct swap_cluster_info *info)
245{
246 return info->flags & CLUSTER_FLAG_FREE;
247}
248
249static inline bool cluster_is_null(struct swap_cluster_info *info)
250{
251 return info->flags & CLUSTER_FLAG_NEXT_NULL;
252}
253
254static inline void cluster_set_null(struct swap_cluster_info *info)
255{
256 info->flags = CLUSTER_FLAG_NEXT_NULL;
257 info->data = 0;
258}
259
235b6217
HY
260static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
261 unsigned long offset)
262{
263 struct swap_cluster_info *ci;
264
265 ci = si->cluster_info;
266 if (ci) {
267 ci += offset / SWAPFILE_CLUSTER;
268 spin_lock(&ci->lock);
269 }
270 return ci;
271}
272
273static inline void unlock_cluster(struct swap_cluster_info *ci)
274{
275 if (ci)
276 spin_unlock(&ci->lock);
277}
278
279static inline struct swap_cluster_info *lock_cluster_or_swap_info(
280 struct swap_info_struct *si,
281 unsigned long offset)
282{
283 struct swap_cluster_info *ci;
284
285 ci = lock_cluster(si, offset);
286 if (!ci)
287 spin_lock(&si->lock);
288
289 return ci;
290}
291
292static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
293 struct swap_cluster_info *ci)
294{
295 if (ci)
296 unlock_cluster(ci);
297 else
298 spin_unlock(&si->lock);
299}
300
6b534915
HY
301static inline bool cluster_list_empty(struct swap_cluster_list *list)
302{
303 return cluster_is_null(&list->head);
304}
305
306static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
307{
308 return cluster_next(&list->head);
309}
310
311static void cluster_list_init(struct swap_cluster_list *list)
312{
313 cluster_set_null(&list->head);
314 cluster_set_null(&list->tail);
315}
316
317static void cluster_list_add_tail(struct swap_cluster_list *list,
318 struct swap_cluster_info *ci,
319 unsigned int idx)
320{
321 if (cluster_list_empty(list)) {
322 cluster_set_next_flag(&list->head, idx, 0);
323 cluster_set_next_flag(&list->tail, idx, 0);
324 } else {
235b6217 325 struct swap_cluster_info *ci_tail;
6b534915
HY
326 unsigned int tail = cluster_next(&list->tail);
327
235b6217
HY
328 /*
329 * Nested cluster lock, but both cluster locks are
330 * only acquired when we held swap_info_struct->lock
331 */
332 ci_tail = ci + tail;
333 spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
334 cluster_set_next(ci_tail, idx);
335 unlock_cluster(ci_tail);
6b534915
HY
336 cluster_set_next_flag(&list->tail, idx, 0);
337 }
338}
339
340static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
341 struct swap_cluster_info *ci)
342{
343 unsigned int idx;
344
345 idx = cluster_next(&list->head);
346 if (cluster_next(&list->tail) == idx) {
347 cluster_set_null(&list->head);
348 cluster_set_null(&list->tail);
349 } else
350 cluster_set_next_flag(&list->head,
351 cluster_next(&ci[idx]), 0);
352
353 return idx;
354}
355
815c2c54
SL
356/* Add a cluster to discard list and schedule it to do discard */
357static void swap_cluster_schedule_discard(struct swap_info_struct *si,
358 unsigned int idx)
359{
360 /*
361 * If scan_swap_map() can't find a free cluster, it will check
362 * si->swap_map directly. To make sure the discarding cluster isn't
363 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
364 * will be cleared after discard
365 */
366 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
367 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
368
6b534915 369 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
815c2c54
SL
370
371 schedule_work(&si->discard_work);
372}
373
374/*
375 * Doing discard actually. After a cluster discard is finished, the cluster
376 * will be added to free cluster list. caller should hold si->lock.
377*/
378static void swap_do_scheduled_discard(struct swap_info_struct *si)
379{
235b6217 380 struct swap_cluster_info *info, *ci;
815c2c54
SL
381 unsigned int idx;
382
383 info = si->cluster_info;
384
6b534915
HY
385 while (!cluster_list_empty(&si->discard_clusters)) {
386 idx = cluster_list_del_first(&si->discard_clusters, info);
815c2c54
SL
387 spin_unlock(&si->lock);
388
389 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
390 SWAPFILE_CLUSTER);
391
392 spin_lock(&si->lock);
235b6217
HY
393 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
394 cluster_set_flag(ci, CLUSTER_FLAG_FREE);
395 unlock_cluster(ci);
6b534915 396 cluster_list_add_tail(&si->free_clusters, info, idx);
235b6217 397 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
815c2c54
SL
398 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
399 0, SWAPFILE_CLUSTER);
235b6217 400 unlock_cluster(ci);
815c2c54
SL
401 }
402}
403
404static void swap_discard_work(struct work_struct *work)
405{
406 struct swap_info_struct *si;
407
408 si = container_of(work, struct swap_info_struct, discard_work);
409
410 spin_lock(&si->lock);
411 swap_do_scheduled_discard(si);
412 spin_unlock(&si->lock);
413}
414
2a8f9449
SL
415/*
416 * The cluster corresponding to page_nr will be used. The cluster will be
417 * removed from free cluster list and its usage counter will be increased.
418 */
419static void inc_cluster_info_page(struct swap_info_struct *p,
420 struct swap_cluster_info *cluster_info, unsigned long page_nr)
421{
422 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
423
424 if (!cluster_info)
425 return;
426 if (cluster_is_free(&cluster_info[idx])) {
6b534915
HY
427 VM_BUG_ON(cluster_list_first(&p->free_clusters) != idx);
428 cluster_list_del_first(&p->free_clusters, cluster_info);
2a8f9449
SL
429 cluster_set_count_flag(&cluster_info[idx], 0, 0);
430 }
431
432 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
433 cluster_set_count(&cluster_info[idx],
434 cluster_count(&cluster_info[idx]) + 1);
435}
436
437/*
438 * The cluster corresponding to page_nr decreases one usage. If the usage
439 * counter becomes 0, which means no page in the cluster is in using, we can
440 * optionally discard the cluster and add it to free cluster list.
441 */
442static void dec_cluster_info_page(struct swap_info_struct *p,
443 struct swap_cluster_info *cluster_info, unsigned long page_nr)
444{
445 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
446
447 if (!cluster_info)
448 return;
449
450 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
451 cluster_set_count(&cluster_info[idx],
452 cluster_count(&cluster_info[idx]) - 1);
453
454 if (cluster_count(&cluster_info[idx]) == 0) {
815c2c54
SL
455 /*
456 * If the swap is discardable, prepare discard the cluster
457 * instead of free it immediately. The cluster will be freed
458 * after discard.
459 */
edfe23da
SL
460 if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
461 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
815c2c54
SL
462 swap_cluster_schedule_discard(p, idx);
463 return;
464 }
465
2a8f9449 466 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
6b534915 467 cluster_list_add_tail(&p->free_clusters, cluster_info, idx);
2a8f9449
SL
468 }
469}
470
471/*
472 * It's possible scan_swap_map() uses a free cluster in the middle of free
473 * cluster list. Avoiding such abuse to avoid list corruption.
474 */
ebc2a1a6
SL
475static bool
476scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
2a8f9449
SL
477 unsigned long offset)
478{
ebc2a1a6
SL
479 struct percpu_cluster *percpu_cluster;
480 bool conflict;
481
2a8f9449 482 offset /= SWAPFILE_CLUSTER;
6b534915
HY
483 conflict = !cluster_list_empty(&si->free_clusters) &&
484 offset != cluster_list_first(&si->free_clusters) &&
2a8f9449 485 cluster_is_free(&si->cluster_info[offset]);
ebc2a1a6
SL
486
487 if (!conflict)
488 return false;
489
490 percpu_cluster = this_cpu_ptr(si->percpu_cluster);
491 cluster_set_null(&percpu_cluster->index);
492 return true;
493}
494
495/*
496 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
497 * might involve allocating a new cluster for current CPU too.
498 */
36005bae 499static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
ebc2a1a6
SL
500 unsigned long *offset, unsigned long *scan_base)
501{
502 struct percpu_cluster *cluster;
235b6217 503 struct swap_cluster_info *ci;
ebc2a1a6 504 bool found_free;
235b6217 505 unsigned long tmp, max;
ebc2a1a6
SL
506
507new_cluster:
508 cluster = this_cpu_ptr(si->percpu_cluster);
509 if (cluster_is_null(&cluster->index)) {
6b534915
HY
510 if (!cluster_list_empty(&si->free_clusters)) {
511 cluster->index = si->free_clusters.head;
ebc2a1a6
SL
512 cluster->next = cluster_next(&cluster->index) *
513 SWAPFILE_CLUSTER;
6b534915 514 } else if (!cluster_list_empty(&si->discard_clusters)) {
ebc2a1a6
SL
515 /*
516 * we don't have free cluster but have some clusters in
517 * discarding, do discard now and reclaim them
518 */
519 swap_do_scheduled_discard(si);
520 *scan_base = *offset = si->cluster_next;
521 goto new_cluster;
522 } else
36005bae 523 return false;
ebc2a1a6
SL
524 }
525
526 found_free = false;
527
528 /*
529 * Other CPUs can use our cluster if they can't find a free cluster,
530 * check if there is still free entry in the cluster
531 */
532 tmp = cluster->next;
235b6217
HY
533 max = min_t(unsigned long, si->max,
534 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
535 if (tmp >= max) {
536 cluster_set_null(&cluster->index);
537 goto new_cluster;
538 }
539 ci = lock_cluster(si, tmp);
540 while (tmp < max) {
ebc2a1a6
SL
541 if (!si->swap_map[tmp]) {
542 found_free = true;
543 break;
544 }
545 tmp++;
546 }
235b6217 547 unlock_cluster(ci);
ebc2a1a6
SL
548 if (!found_free) {
549 cluster_set_null(&cluster->index);
550 goto new_cluster;
551 }
552 cluster->next = tmp + 1;
553 *offset = tmp;
554 *scan_base = tmp;
36005bae 555 return found_free;
2a8f9449
SL
556}
557
36005bae
TC
558static int scan_swap_map_slots(struct swap_info_struct *si,
559 unsigned char usage, int nr,
560 swp_entry_t slots[])
1da177e4 561{
235b6217 562 struct swap_cluster_info *ci;
ebebbbe9 563 unsigned long offset;
c60aa176 564 unsigned long scan_base;
7992fde7 565 unsigned long last_in_cluster = 0;
048c27fd 566 int latency_ration = LATENCY_LIMIT;
36005bae
TC
567 int n_ret = 0;
568
569 if (nr > SWAP_BATCH)
570 nr = SWAP_BATCH;
7dfad418 571
886bb7e9 572 /*
7dfad418
HD
573 * We try to cluster swap pages by allocating them sequentially
574 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
575 * way, however, we resort to first-free allocation, starting
576 * a new cluster. This prevents us from scattering swap pages
577 * all over the entire swap partition, so that we reduce
578 * overall disk seek times between swap pages. -- sct
579 * But we do now try to find an empty cluster. -Andrea
c60aa176 580 * And we let swap pages go all over an SSD partition. Hugh
7dfad418
HD
581 */
582
52b7efdb 583 si->flags += SWP_SCANNING;
c60aa176 584 scan_base = offset = si->cluster_next;
ebebbbe9 585
ebc2a1a6
SL
586 /* SSD algorithm */
587 if (si->cluster_info) {
36005bae
TC
588 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
589 goto checks;
590 else
591 goto scan;
ebc2a1a6
SL
592 }
593
ebebbbe9
HD
594 if (unlikely(!si->cluster_nr--)) {
595 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
596 si->cluster_nr = SWAPFILE_CLUSTER - 1;
597 goto checks;
598 }
2a8f9449 599
ec8acf20 600 spin_unlock(&si->lock);
7dfad418 601
c60aa176
HD
602 /*
603 * If seek is expensive, start searching for new cluster from
604 * start of partition, to minimize the span of allocated swap.
50088c44
CY
605 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
606 * case, just handled by scan_swap_map_try_ssd_cluster() above.
c60aa176 607 */
50088c44 608 scan_base = offset = si->lowest_bit;
7dfad418
HD
609 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
610
611 /* Locate the first empty (unaligned) cluster */
612 for (; last_in_cluster <= si->highest_bit; offset++) {
1da177e4 613 if (si->swap_map[offset])
7dfad418
HD
614 last_in_cluster = offset + SWAPFILE_CLUSTER;
615 else if (offset == last_in_cluster) {
ec8acf20 616 spin_lock(&si->lock);
ebebbbe9
HD
617 offset -= SWAPFILE_CLUSTER - 1;
618 si->cluster_next = offset;
619 si->cluster_nr = SWAPFILE_CLUSTER - 1;
c60aa176
HD
620 goto checks;
621 }
622 if (unlikely(--latency_ration < 0)) {
623 cond_resched();
624 latency_ration = LATENCY_LIMIT;
625 }
626 }
627
628 offset = scan_base;
ec8acf20 629 spin_lock(&si->lock);
ebebbbe9 630 si->cluster_nr = SWAPFILE_CLUSTER - 1;
1da177e4 631 }
7dfad418 632
ebebbbe9 633checks:
ebc2a1a6 634 if (si->cluster_info) {
36005bae
TC
635 while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
636 /* take a break if we already got some slots */
637 if (n_ret)
638 goto done;
639 if (!scan_swap_map_try_ssd_cluster(si, &offset,
640 &scan_base))
641 goto scan;
642 }
ebc2a1a6 643 }
ebebbbe9 644 if (!(si->flags & SWP_WRITEOK))
52b7efdb 645 goto no_page;
7dfad418
HD
646 if (!si->highest_bit)
647 goto no_page;
ebebbbe9 648 if (offset > si->highest_bit)
c60aa176 649 scan_base = offset = si->lowest_bit;
c9e44410 650
235b6217 651 ci = lock_cluster(si, offset);
b73d7fce
HD
652 /* reuse swap entry of cache-only swap if not busy. */
653 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
c9e44410 654 int swap_was_freed;
235b6217 655 unlock_cluster(ci);
ec8acf20 656 spin_unlock(&si->lock);
c9e44410 657 swap_was_freed = __try_to_reclaim_swap(si, offset);
ec8acf20 658 spin_lock(&si->lock);
c9e44410
KH
659 /* entry was freed successfully, try to use this again */
660 if (swap_was_freed)
661 goto checks;
662 goto scan; /* check next one */
663 }
664
235b6217
HY
665 if (si->swap_map[offset]) {
666 unlock_cluster(ci);
36005bae
TC
667 if (!n_ret)
668 goto scan;
669 else
670 goto done;
235b6217 671 }
ebebbbe9
HD
672
673 if (offset == si->lowest_bit)
674 si->lowest_bit++;
675 if (offset == si->highest_bit)
676 si->highest_bit--;
677 si->inuse_pages++;
678 if (si->inuse_pages == si->pages) {
679 si->lowest_bit = si->max;
680 si->highest_bit = 0;
18ab4d4c
DS
681 spin_lock(&swap_avail_lock);
682 plist_del(&si->avail_list, &swap_avail_head);
683 spin_unlock(&swap_avail_lock);
1da177e4 684 }
253d553b 685 si->swap_map[offset] = usage;
2a8f9449 686 inc_cluster_info_page(si, si->cluster_info, offset);
235b6217 687 unlock_cluster(ci);
ebebbbe9 688 si->cluster_next = offset + 1;
36005bae
TC
689 slots[n_ret++] = swp_entry(si->type, offset);
690
691 /* got enough slots or reach max slots? */
692 if ((n_ret == nr) || (offset >= si->highest_bit))
693 goto done;
694
695 /* search for next available slot */
696
697 /* time to take a break? */
698 if (unlikely(--latency_ration < 0)) {
699 if (n_ret)
700 goto done;
701 spin_unlock(&si->lock);
702 cond_resched();
703 spin_lock(&si->lock);
704 latency_ration = LATENCY_LIMIT;
705 }
706
707 /* try to get more slots in cluster */
708 if (si->cluster_info) {
709 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
710 goto checks;
711 else
712 goto done;
713 }
714 /* non-ssd case */
715 ++offset;
716
717 /* non-ssd case, still more slots in cluster? */
718 if (si->cluster_nr && !si->swap_map[offset]) {
719 --si->cluster_nr;
720 goto checks;
721 }
7992fde7 722
36005bae
TC
723done:
724 si->flags -= SWP_SCANNING;
725 return n_ret;
7dfad418 726
ebebbbe9 727scan:
ec8acf20 728 spin_unlock(&si->lock);
7dfad418 729 while (++offset <= si->highest_bit) {
52b7efdb 730 if (!si->swap_map[offset]) {
ec8acf20 731 spin_lock(&si->lock);
52b7efdb
HD
732 goto checks;
733 }
c9e44410 734 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
ec8acf20 735 spin_lock(&si->lock);
c9e44410
KH
736 goto checks;
737 }
048c27fd
HD
738 if (unlikely(--latency_ration < 0)) {
739 cond_resched();
740 latency_ration = LATENCY_LIMIT;
741 }
7dfad418 742 }
c60aa176 743 offset = si->lowest_bit;
a5998061 744 while (offset < scan_base) {
c60aa176 745 if (!si->swap_map[offset]) {
ec8acf20 746 spin_lock(&si->lock);
c60aa176
HD
747 goto checks;
748 }
c9e44410 749 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
ec8acf20 750 spin_lock(&si->lock);
c9e44410
KH
751 goto checks;
752 }
c60aa176
HD
753 if (unlikely(--latency_ration < 0)) {
754 cond_resched();
755 latency_ration = LATENCY_LIMIT;
756 }
a5998061 757 offset++;
c60aa176 758 }
ec8acf20 759 spin_lock(&si->lock);
7dfad418
HD
760
761no_page:
52b7efdb 762 si->flags -= SWP_SCANNING;
36005bae 763 return n_ret;
1da177e4
LT
764}
765
36005bae
TC
766static unsigned long scan_swap_map(struct swap_info_struct *si,
767 unsigned char usage)
768{
769 swp_entry_t entry;
770 int n_ret;
771
772 n_ret = scan_swap_map_slots(si, usage, 1, &entry);
773
774 if (n_ret)
775 return swp_offset(entry);
776 else
777 return 0;
778
779}
780
781int get_swap_pages(int n_goal, swp_entry_t swp_entries[])
1da177e4 782{
adfab836 783 struct swap_info_struct *si, *next;
36005bae
TC
784 long avail_pgs;
785 int n_ret = 0;
1da177e4 786
36005bae
TC
787 avail_pgs = atomic_long_read(&nr_swap_pages);
788 if (avail_pgs <= 0)
fb4f88dc 789 goto noswap;
36005bae
TC
790
791 if (n_goal > SWAP_BATCH)
792 n_goal = SWAP_BATCH;
793
794 if (n_goal > avail_pgs)
795 n_goal = avail_pgs;
796
797 atomic_long_sub(n_goal, &nr_swap_pages);
fb4f88dc 798
18ab4d4c
DS
799 spin_lock(&swap_avail_lock);
800
801start_over:
802 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
803 /* requeue si to after same-priority siblings */
804 plist_requeue(&si->avail_list, &swap_avail_head);
805 spin_unlock(&swap_avail_lock);
ec8acf20 806 spin_lock(&si->lock);
adfab836 807 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
18ab4d4c
DS
808 spin_lock(&swap_avail_lock);
809 if (plist_node_empty(&si->avail_list)) {
810 spin_unlock(&si->lock);
811 goto nextsi;
812 }
813 WARN(!si->highest_bit,
814 "swap_info %d in list but !highest_bit\n",
815 si->type);
816 WARN(!(si->flags & SWP_WRITEOK),
817 "swap_info %d in list but !SWP_WRITEOK\n",
818 si->type);
819 plist_del(&si->avail_list, &swap_avail_head);
ec8acf20 820 spin_unlock(&si->lock);
18ab4d4c 821 goto nextsi;
ec8acf20 822 }
36005bae
TC
823 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
824 n_goal, swp_entries);
ec8acf20 825 spin_unlock(&si->lock);
36005bae
TC
826 if (n_ret)
827 goto check_out;
18ab4d4c 828 pr_debug("scan_swap_map of si %d failed to find offset\n",
36005bae
TC
829 si->type);
830
18ab4d4c
DS
831 spin_lock(&swap_avail_lock);
832nextsi:
adfab836
DS
833 /*
834 * if we got here, it's likely that si was almost full before,
835 * and since scan_swap_map() can drop the si->lock, multiple
836 * callers probably all tried to get a page from the same si
18ab4d4c
DS
837 * and it filled up before we could get one; or, the si filled
838 * up between us dropping swap_avail_lock and taking si->lock.
839 * Since we dropped the swap_avail_lock, the swap_avail_head
840 * list may have been modified; so if next is still in the
36005bae
TC
841 * swap_avail_head list then try it, otherwise start over
842 * if we have not gotten any slots.
adfab836 843 */
18ab4d4c
DS
844 if (plist_node_empty(&next->avail_list))
845 goto start_over;
1da177e4 846 }
fb4f88dc 847
18ab4d4c
DS
848 spin_unlock(&swap_avail_lock);
849
36005bae
TC
850check_out:
851 if (n_ret < n_goal)
852 atomic_long_add((long) (n_goal-n_ret), &nr_swap_pages);
fb4f88dc 853noswap:
36005bae
TC
854 return n_ret;
855}
856
857swp_entry_t get_swap_page(void)
858{
859 swp_entry_t entry;
860
861 get_swap_pages(1, &entry);
862 return entry;
1da177e4
LT
863}
864
2de1a7e4 865/* The only caller of this function is now suspend routine */
910321ea
HD
866swp_entry_t get_swap_page_of_type(int type)
867{
868 struct swap_info_struct *si;
869 pgoff_t offset;
870
910321ea 871 si = swap_info[type];
ec8acf20 872 spin_lock(&si->lock);
910321ea 873 if (si && (si->flags & SWP_WRITEOK)) {
ec8acf20 874 atomic_long_dec(&nr_swap_pages);
910321ea
HD
875 /* This is called for allocating swap entry, not cache */
876 offset = scan_swap_map(si, 1);
877 if (offset) {
ec8acf20 878 spin_unlock(&si->lock);
910321ea
HD
879 return swp_entry(type, offset);
880 }
ec8acf20 881 atomic_long_inc(&nr_swap_pages);
910321ea 882 }
ec8acf20 883 spin_unlock(&si->lock);
910321ea
HD
884 return (swp_entry_t) {0};
885}
886
e8c26ab6 887static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1da177e4 888{
73c34b6a 889 struct swap_info_struct *p;
1da177e4
LT
890 unsigned long offset, type;
891
892 if (!entry.val)
893 goto out;
894 type = swp_type(entry);
895 if (type >= nr_swapfiles)
896 goto bad_nofile;
efa90a98 897 p = swap_info[type];
1da177e4
LT
898 if (!(p->flags & SWP_USED))
899 goto bad_device;
900 offset = swp_offset(entry);
901 if (offset >= p->max)
902 goto bad_offset;
1da177e4
LT
903 return p;
904
1da177e4 905bad_offset:
6a991fc7 906 pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val);
1da177e4
LT
907 goto out;
908bad_device:
6a991fc7 909 pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val);
1da177e4
LT
910 goto out;
911bad_nofile:
6a991fc7 912 pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val);
1da177e4
LT
913out:
914 return NULL;
886bb7e9 915}
1da177e4 916
e8c26ab6
TC
917static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
918{
919 struct swap_info_struct *p;
920
921 p = __swap_info_get(entry);
922 if (!p)
923 goto out;
924 if (!p->swap_map[swp_offset(entry)])
925 goto bad_free;
926 return p;
927
928bad_free:
929 pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val);
930 goto out;
931out:
932 return NULL;
933}
934
235b6217
HY
935static struct swap_info_struct *swap_info_get(swp_entry_t entry)
936{
937 struct swap_info_struct *p;
938
939 p = _swap_info_get(entry);
940 if (p)
941 spin_lock(&p->lock);
942 return p;
943}
944
7c00bafe
TC
945static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
946 struct swap_info_struct *q)
947{
948 struct swap_info_struct *p;
949
950 p = _swap_info_get(entry);
951
952 if (p != q) {
953 if (q != NULL)
954 spin_unlock(&q->lock);
955 if (p != NULL)
956 spin_lock(&p->lock);
957 }
958 return p;
959}
960
961static unsigned char __swap_entry_free(struct swap_info_struct *p,
962 swp_entry_t entry, unsigned char usage)
1da177e4 963{
235b6217 964 struct swap_cluster_info *ci;
253d553b 965 unsigned long offset = swp_offset(entry);
8d69aaee
HD
966 unsigned char count;
967 unsigned char has_cache;
235b6217 968
7c00bafe 969 ci = lock_cluster_or_swap_info(p, offset);
355cfa73 970
253d553b 971 count = p->swap_map[offset];
235b6217 972
253d553b
HD
973 has_cache = count & SWAP_HAS_CACHE;
974 count &= ~SWAP_HAS_CACHE;
355cfa73 975
253d553b 976 if (usage == SWAP_HAS_CACHE) {
355cfa73 977 VM_BUG_ON(!has_cache);
253d553b 978 has_cache = 0;
aaa46865
HD
979 } else if (count == SWAP_MAP_SHMEM) {
980 /*
981 * Or we could insist on shmem.c using a special
982 * swap_shmem_free() and free_shmem_swap_and_cache()...
983 */
984 count = 0;
570a335b
HD
985 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
986 if (count == COUNT_CONTINUED) {
987 if (swap_count_continued(p, offset, count))
988 count = SWAP_MAP_MAX | COUNT_CONTINUED;
989 else
990 count = SWAP_MAP_MAX;
991 } else
992 count--;
993 }
253d553b 994
253d553b 995 usage = count | has_cache;
7c00bafe
TC
996 p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
997
998 unlock_cluster_or_swap_info(p, ci);
999
1000 return usage;
1001}
355cfa73 1002
7c00bafe
TC
1003static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1004{
1005 struct swap_cluster_info *ci;
1006 unsigned long offset = swp_offset(entry);
1007 unsigned char count;
1008
1009 ci = lock_cluster(p, offset);
1010 count = p->swap_map[offset];
1011 VM_BUG_ON(count != SWAP_HAS_CACHE);
1012 p->swap_map[offset] = 0;
1013 dec_cluster_info_page(p, p->cluster_info, offset);
235b6217
HY
1014 unlock_cluster(ci);
1015
7c00bafe
TC
1016 mem_cgroup_uncharge_swap(entry);
1017 if (offset < p->lowest_bit)
1018 p->lowest_bit = offset;
1019 if (offset > p->highest_bit) {
1020 bool was_full = !p->highest_bit;
1021
1022 p->highest_bit = offset;
1023 if (was_full && (p->flags & SWP_WRITEOK)) {
1024 spin_lock(&swap_avail_lock);
1025 WARN_ON(!plist_node_empty(&p->avail_list));
1026 if (plist_node_empty(&p->avail_list))
1027 plist_add(&p->avail_list,
1028 &swap_avail_head);
1029 spin_unlock(&swap_avail_lock);
73744923 1030 }
1da177e4 1031 }
7c00bafe
TC
1032 atomic_long_inc(&nr_swap_pages);
1033 p->inuse_pages--;
1034 frontswap_invalidate_page(p->type, offset);
1035 if (p->flags & SWP_BLKDEV) {
1036 struct gendisk *disk = p->bdev->bd_disk;
1037
1038 if (disk->fops->swap_slot_free_notify)
1039 disk->fops->swap_slot_free_notify(p->bdev,
1040 offset);
1041 }
1da177e4
LT
1042}
1043
1044/*
2de1a7e4 1045 * Caller has made sure that the swap device corresponding to entry
1da177e4
LT
1046 * is still around or has not been recycled.
1047 */
1048void swap_free(swp_entry_t entry)
1049{
73c34b6a 1050 struct swap_info_struct *p;
1da177e4 1051
235b6217 1052 p = _swap_info_get(entry);
7c00bafe
TC
1053 if (p) {
1054 if (!__swap_entry_free(p, entry, 1))
1055 swapcache_free_entries(&entry, 1);
1056 }
1da177e4
LT
1057}
1058
cb4b86ba
KH
1059/*
1060 * Called after dropping swapcache to decrease refcnt to swap entries.
1061 */
0a31bc97 1062void swapcache_free(swp_entry_t entry)
cb4b86ba 1063{
355cfa73
KH
1064 struct swap_info_struct *p;
1065
235b6217 1066 p = _swap_info_get(entry);
7c00bafe
TC
1067 if (p) {
1068 if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
1069 swapcache_free_entries(&entry, 1);
1070 }
1071}
1072
1073void swapcache_free_entries(swp_entry_t *entries, int n)
1074{
1075 struct swap_info_struct *p, *prev;
1076 int i;
1077
1078 if (n <= 0)
1079 return;
1080
1081 prev = NULL;
1082 p = NULL;
1083 for (i = 0; i < n; ++i) {
1084 p = swap_info_get_cont(entries[i], prev);
1085 if (p)
1086 swap_entry_free(p, entries[i]);
1087 else
1088 break;
1089 prev = p;
1090 }
235b6217 1091 if (p)
7c00bafe 1092 spin_unlock(&p->lock);
cb4b86ba
KH
1093}
1094
1da177e4 1095/*
c475a8ab 1096 * How many references to page are currently swapped out?
570a335b
HD
1097 * This does not give an exact answer when swap count is continued,
1098 * but does include the high COUNT_CONTINUED flag to allow for that.
1da177e4 1099 */
bde05d1c 1100int page_swapcount(struct page *page)
1da177e4 1101{
c475a8ab
HD
1102 int count = 0;
1103 struct swap_info_struct *p;
235b6217 1104 struct swap_cluster_info *ci;
1da177e4 1105 swp_entry_t entry;
235b6217 1106 unsigned long offset;
1da177e4 1107
4c21e2f2 1108 entry.val = page_private(page);
235b6217 1109 p = _swap_info_get(entry);
1da177e4 1110 if (p) {
235b6217
HY
1111 offset = swp_offset(entry);
1112 ci = lock_cluster_or_swap_info(p, offset);
1113 count = swap_count(p->swap_map[offset]);
1114 unlock_cluster_or_swap_info(p, ci);
1da177e4 1115 }
c475a8ab 1116 return count;
1da177e4
LT
1117}
1118
e8c26ab6
TC
1119/*
1120 * How many references to @entry are currently swapped out?
1121 * This does not give an exact answer when swap count is continued,
1122 * but does include the high COUNT_CONTINUED flag to allow for that.
1123 */
1124int __swp_swapcount(swp_entry_t entry)
1125{
1126 int count = 0;
1127 pgoff_t offset;
1128 struct swap_info_struct *si;
1129 struct swap_cluster_info *ci;
1130
1131 si = __swap_info_get(entry);
1132 if (si) {
1133 offset = swp_offset(entry);
1134 ci = lock_cluster_or_swap_info(si, offset);
1135 count = swap_count(si->swap_map[offset]);
1136 unlock_cluster_or_swap_info(si, ci);
1137 }
1138 return count;
1139}
1140
8334b962
MK
1141/*
1142 * How many references to @entry are currently swapped out?
1143 * This considers COUNT_CONTINUED so it returns exact answer.
1144 */
1145int swp_swapcount(swp_entry_t entry)
1146{
1147 int count, tmp_count, n;
1148 struct swap_info_struct *p;
235b6217 1149 struct swap_cluster_info *ci;
8334b962
MK
1150 struct page *page;
1151 pgoff_t offset;
1152 unsigned char *map;
1153
235b6217 1154 p = _swap_info_get(entry);
8334b962
MK
1155 if (!p)
1156 return 0;
1157
235b6217
HY
1158 offset = swp_offset(entry);
1159
1160 ci = lock_cluster_or_swap_info(p, offset);
1161
1162 count = swap_count(p->swap_map[offset]);
8334b962
MK
1163 if (!(count & COUNT_CONTINUED))
1164 goto out;
1165
1166 count &= ~COUNT_CONTINUED;
1167 n = SWAP_MAP_MAX + 1;
1168
8334b962
MK
1169 page = vmalloc_to_page(p->swap_map + offset);
1170 offset &= ~PAGE_MASK;
1171 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1172
1173 do {
a8ae4991 1174 page = list_next_entry(page, lru);
8334b962
MK
1175 map = kmap_atomic(page);
1176 tmp_count = map[offset];
1177 kunmap_atomic(map);
1178
1179 count += (tmp_count & ~COUNT_CONTINUED) * n;
1180 n *= (SWAP_CONT_MAX + 1);
1181 } while (tmp_count & COUNT_CONTINUED);
1182out:
235b6217 1183 unlock_cluster_or_swap_info(p, ci);
8334b962
MK
1184 return count;
1185}
1186
1da177e4 1187/*
7b1fe597
HD
1188 * We can write to an anon page without COW if there are no other references
1189 * to it. And as a side-effect, free up its swap: because the old content
1190 * on disk will never be read, and seeking back there to write new content
1191 * later would only waste time away from clustering.
6d0a07ed
AA
1192 *
1193 * NOTE: total_mapcount should not be relied upon by the caller if
1194 * reuse_swap_page() returns false, but it may be always overwritten
1195 * (see the other implementation for CONFIG_SWAP=n).
1da177e4 1196 */
6d0a07ed 1197bool reuse_swap_page(struct page *page, int *total_mapcount)
1da177e4 1198{
c475a8ab
HD
1199 int count;
1200
309381fe 1201 VM_BUG_ON_PAGE(!PageLocked(page), page);
5ad64688 1202 if (unlikely(PageKsm(page)))
6d0a07ed
AA
1203 return false;
1204 count = page_trans_huge_mapcount(page, total_mapcount);
7b1fe597 1205 if (count <= 1 && PageSwapCache(page)) {
c475a8ab 1206 count += page_swapcount(page);
f0571429
MK
1207 if (count != 1)
1208 goto out;
1209 if (!PageWriteback(page)) {
7b1fe597
HD
1210 delete_from_swap_cache(page);
1211 SetPageDirty(page);
f0571429
MK
1212 } else {
1213 swp_entry_t entry;
1214 struct swap_info_struct *p;
1215
1216 entry.val = page_private(page);
1217 p = swap_info_get(entry);
1218 if (p->flags & SWP_STABLE_WRITES) {
1219 spin_unlock(&p->lock);
1220 return false;
1221 }
1222 spin_unlock(&p->lock);
7b1fe597
HD
1223 }
1224 }
f0571429 1225out:
5ad64688 1226 return count <= 1;
1da177e4
LT
1227}
1228
1229/*
a2c43eed
HD
1230 * If swap is getting full, or if there are no more mappings of this page,
1231 * then try_to_free_swap is called to free its swap space.
1da177e4 1232 */
a2c43eed 1233int try_to_free_swap(struct page *page)
1da177e4 1234{
309381fe 1235 VM_BUG_ON_PAGE(!PageLocked(page), page);
1da177e4
LT
1236
1237 if (!PageSwapCache(page))
1238 return 0;
1239 if (PageWriteback(page))
1240 return 0;
a2c43eed 1241 if (page_swapcount(page))
1da177e4
LT
1242 return 0;
1243
b73d7fce
HD
1244 /*
1245 * Once hibernation has begun to create its image of memory,
1246 * there's a danger that one of the calls to try_to_free_swap()
1247 * - most probably a call from __try_to_reclaim_swap() while
1248 * hibernation is allocating its own swap pages for the image,
1249 * but conceivably even a call from memory reclaim - will free
1250 * the swap from a page which has already been recorded in the
1251 * image as a clean swapcache page, and then reuse its swap for
1252 * another page of the image. On waking from hibernation, the
1253 * original page might be freed under memory pressure, then
1254 * later read back in from swap, now with the wrong data.
1255 *
2de1a7e4 1256 * Hibernation suspends storage while it is writing the image
f90ac398 1257 * to disk so check that here.
b73d7fce 1258 */
f90ac398 1259 if (pm_suspended_storage())
b73d7fce
HD
1260 return 0;
1261
a2c43eed
HD
1262 delete_from_swap_cache(page);
1263 SetPageDirty(page);
1264 return 1;
68a22394
RR
1265}
1266
1da177e4
LT
1267/*
1268 * Free the swap entry like above, but also try to
1269 * free the page cache entry if it is the last user.
1270 */
2509ef26 1271int free_swap_and_cache(swp_entry_t entry)
1da177e4 1272{
2509ef26 1273 struct swap_info_struct *p;
1da177e4 1274 struct page *page = NULL;
7c00bafe 1275 unsigned char count;
1da177e4 1276
a7420aa5 1277 if (non_swap_entry(entry))
2509ef26 1278 return 1;
0697212a 1279
7c00bafe 1280 p = _swap_info_get(entry);
1da177e4 1281 if (p) {
7c00bafe
TC
1282 count = __swap_entry_free(p, entry, 1);
1283 if (count == SWAP_HAS_CACHE) {
33806f06 1284 page = find_get_page(swap_address_space(entry),
f6ab1f7f 1285 swp_offset(entry));
8413ac9d 1286 if (page && !trylock_page(page)) {
09cbfeaf 1287 put_page(page);
93fac704
NP
1288 page = NULL;
1289 }
7c00bafe
TC
1290 } else if (!count)
1291 swapcache_free_entries(&entry, 1);
1da177e4
LT
1292 }
1293 if (page) {
a2c43eed
HD
1294 /*
1295 * Not mapped elsewhere, or swap space full? Free it!
1296 * Also recheck PageSwapCache now page is locked (above).
1297 */
93fac704 1298 if (PageSwapCache(page) && !PageWriteback(page) &&
5ccc5aba 1299 (!page_mapped(page) || mem_cgroup_swap_full(page))) {
1da177e4
LT
1300 delete_from_swap_cache(page);
1301 SetPageDirty(page);
1302 }
1303 unlock_page(page);
09cbfeaf 1304 put_page(page);
1da177e4 1305 }
2509ef26 1306 return p != NULL;
1da177e4
LT
1307}
1308
b0cb1a19 1309#ifdef CONFIG_HIBERNATION
f577eb30 1310/*
915bae9e 1311 * Find the swap type that corresponds to given device (if any).
f577eb30 1312 *
915bae9e
RW
1313 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1314 * from 0, in which the swap header is expected to be located.
1315 *
1316 * This is needed for the suspend to disk (aka swsusp).
f577eb30 1317 */
7bf23687 1318int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
f577eb30 1319{
915bae9e 1320 struct block_device *bdev = NULL;
efa90a98 1321 int type;
f577eb30 1322
915bae9e
RW
1323 if (device)
1324 bdev = bdget(device);
1325
f577eb30 1326 spin_lock(&swap_lock);
efa90a98
HD
1327 for (type = 0; type < nr_swapfiles; type++) {
1328 struct swap_info_struct *sis = swap_info[type];
f577eb30 1329
915bae9e 1330 if (!(sis->flags & SWP_WRITEOK))
f577eb30 1331 continue;
b6b5bce3 1332
915bae9e 1333 if (!bdev) {
7bf23687 1334 if (bdev_p)
dddac6a7 1335 *bdev_p = bdgrab(sis->bdev);
7bf23687 1336
6e1819d6 1337 spin_unlock(&swap_lock);
efa90a98 1338 return type;
6e1819d6 1339 }
915bae9e 1340 if (bdev == sis->bdev) {
9625a5f2 1341 struct swap_extent *se = &sis->first_swap_extent;
915bae9e 1342
915bae9e 1343 if (se->start_block == offset) {
7bf23687 1344 if (bdev_p)
dddac6a7 1345 *bdev_p = bdgrab(sis->bdev);
7bf23687 1346
915bae9e
RW
1347 spin_unlock(&swap_lock);
1348 bdput(bdev);
efa90a98 1349 return type;
915bae9e 1350 }
f577eb30
RW
1351 }
1352 }
1353 spin_unlock(&swap_lock);
915bae9e
RW
1354 if (bdev)
1355 bdput(bdev);
1356
f577eb30
RW
1357 return -ENODEV;
1358}
1359
73c34b6a
HD
1360/*
1361 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1362 * corresponding to given index in swap_info (swap type).
1363 */
1364sector_t swapdev_block(int type, pgoff_t offset)
1365{
1366 struct block_device *bdev;
1367
1368 if ((unsigned int)type >= nr_swapfiles)
1369 return 0;
1370 if (!(swap_info[type]->flags & SWP_WRITEOK))
1371 return 0;
d4906e1a 1372 return map_swap_entry(swp_entry(type, offset), &bdev);
73c34b6a
HD
1373}
1374
f577eb30
RW
1375/*
1376 * Return either the total number of swap pages of given type, or the number
1377 * of free pages of that type (depending on @free)
1378 *
1379 * This is needed for software suspend
1380 */
1381unsigned int count_swap_pages(int type, int free)
1382{
1383 unsigned int n = 0;
1384
efa90a98
HD
1385 spin_lock(&swap_lock);
1386 if ((unsigned int)type < nr_swapfiles) {
1387 struct swap_info_struct *sis = swap_info[type];
1388
ec8acf20 1389 spin_lock(&sis->lock);
efa90a98
HD
1390 if (sis->flags & SWP_WRITEOK) {
1391 n = sis->pages;
f577eb30 1392 if (free)
efa90a98 1393 n -= sis->inuse_pages;
f577eb30 1394 }
ec8acf20 1395 spin_unlock(&sis->lock);
f577eb30 1396 }
efa90a98 1397 spin_unlock(&swap_lock);
f577eb30
RW
1398 return n;
1399}
73c34b6a 1400#endif /* CONFIG_HIBERNATION */
f577eb30 1401
9f8bdb3f 1402static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
179ef71c 1403{
9f8bdb3f 1404 return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
179ef71c
CG
1405}
1406
1da177e4 1407/*
72866f6f
HD
1408 * No need to decide whether this PTE shares the swap entry with others,
1409 * just let do_wp_page work it out if a write is requested later - to
1410 * force COW, vm_page_prot omits write permission from any private vma.
1da177e4 1411 */
044d66c1 1412static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1da177e4
LT
1413 unsigned long addr, swp_entry_t entry, struct page *page)
1414{
9e16b7fb 1415 struct page *swapcache;
72835c86 1416 struct mem_cgroup *memcg;
044d66c1
HD
1417 spinlock_t *ptl;
1418 pte_t *pte;
1419 int ret = 1;
1420
9e16b7fb
HD
1421 swapcache = page;
1422 page = ksm_might_need_to_copy(page, vma, addr);
1423 if (unlikely(!page))
1424 return -ENOMEM;
1425
f627c2f5
KS
1426 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1427 &memcg, false)) {
044d66c1 1428 ret = -ENOMEM;
85d9fc89
KH
1429 goto out_nolock;
1430 }
044d66c1
HD
1431
1432 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
9f8bdb3f 1433 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
f627c2f5 1434 mem_cgroup_cancel_charge(page, memcg, false);
044d66c1
HD
1435 ret = 0;
1436 goto out;
1437 }
8a9f3ccd 1438
b084d435 1439 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
d559db08 1440 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1da177e4
LT
1441 get_page(page);
1442 set_pte_at(vma->vm_mm, addr, pte,
1443 pte_mkold(mk_pte(page, vma->vm_page_prot)));
00501b53 1444 if (page == swapcache) {
d281ee61 1445 page_add_anon_rmap(page, vma, addr, false);
f627c2f5 1446 mem_cgroup_commit_charge(page, memcg, true, false);
00501b53 1447 } else { /* ksm created a completely new copy */
d281ee61 1448 page_add_new_anon_rmap(page, vma, addr, false);
f627c2f5 1449 mem_cgroup_commit_charge(page, memcg, false, false);
00501b53
JW
1450 lru_cache_add_active_or_unevictable(page, vma);
1451 }
1da177e4
LT
1452 swap_free(entry);
1453 /*
1454 * Move the page to the active list so it is not
1455 * immediately swapped out again after swapon.
1456 */
1457 activate_page(page);
044d66c1
HD
1458out:
1459 pte_unmap_unlock(pte, ptl);
85d9fc89 1460out_nolock:
9e16b7fb
HD
1461 if (page != swapcache) {
1462 unlock_page(page);
1463 put_page(page);
1464 }
044d66c1 1465 return ret;
1da177e4
LT
1466}
1467
1468static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1469 unsigned long addr, unsigned long end,
1470 swp_entry_t entry, struct page *page)
1471{
1da177e4 1472 pte_t swp_pte = swp_entry_to_pte(entry);
705e87c0 1473 pte_t *pte;
8a9f3ccd 1474 int ret = 0;
1da177e4 1475
044d66c1
HD
1476 /*
1477 * We don't actually need pte lock while scanning for swp_pte: since
1478 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
1479 * page table while we're scanning; though it could get zapped, and on
1480 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
1481 * of unmatched parts which look like swp_pte, so unuse_pte must
1482 * recheck under pte lock. Scanning without pte lock lets it be
2de1a7e4 1483 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
044d66c1
HD
1484 */
1485 pte = pte_offset_map(pmd, addr);
1da177e4
LT
1486 do {
1487 /*
1488 * swapoff spends a _lot_ of time in this loop!
1489 * Test inline before going to call unuse_pte.
1490 */
9f8bdb3f 1491 if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
044d66c1
HD
1492 pte_unmap(pte);
1493 ret = unuse_pte(vma, pmd, addr, entry, page);
1494 if (ret)
1495 goto out;
1496 pte = pte_offset_map(pmd, addr);
1da177e4
LT
1497 }
1498 } while (pte++, addr += PAGE_SIZE, addr != end);
044d66c1
HD
1499 pte_unmap(pte - 1);
1500out:
8a9f3ccd 1501 return ret;
1da177e4
LT
1502}
1503
1504static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1505 unsigned long addr, unsigned long end,
1506 swp_entry_t entry, struct page *page)
1507{
1508 pmd_t *pmd;
1509 unsigned long next;
8a9f3ccd 1510 int ret;
1da177e4
LT
1511
1512 pmd = pmd_offset(pud, addr);
1513 do {
dc644a07 1514 cond_resched();
1da177e4 1515 next = pmd_addr_end(addr, end);
1a5a9906 1516 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1da177e4 1517 continue;
8a9f3ccd
BS
1518 ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
1519 if (ret)
1520 return ret;
1da177e4
LT
1521 } while (pmd++, addr = next, addr != end);
1522 return 0;
1523}
1524
1525static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
1526 unsigned long addr, unsigned long end,
1527 swp_entry_t entry, struct page *page)
1528{
1529 pud_t *pud;
1530 unsigned long next;
8a9f3ccd 1531 int ret;
1da177e4
LT
1532
1533 pud = pud_offset(pgd, addr);
1534 do {
1535 next = pud_addr_end(addr, end);
1536 if (pud_none_or_clear_bad(pud))
1537 continue;
8a9f3ccd
BS
1538 ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
1539 if (ret)
1540 return ret;
1da177e4
LT
1541 } while (pud++, addr = next, addr != end);
1542 return 0;
1543}
1544
1545static int unuse_vma(struct vm_area_struct *vma,
1546 swp_entry_t entry, struct page *page)
1547{
1548 pgd_t *pgd;
1549 unsigned long addr, end, next;
8a9f3ccd 1550 int ret;
1da177e4 1551
3ca7b3c5 1552 if (page_anon_vma(page)) {
1da177e4
LT
1553 addr = page_address_in_vma(page, vma);
1554 if (addr == -EFAULT)
1555 return 0;
1556 else
1557 end = addr + PAGE_SIZE;
1558 } else {
1559 addr = vma->vm_start;
1560 end = vma->vm_end;
1561 }
1562
1563 pgd = pgd_offset(vma->vm_mm, addr);
1564 do {
1565 next = pgd_addr_end(addr, end);
1566 if (pgd_none_or_clear_bad(pgd))
1567 continue;
8a9f3ccd
BS
1568 ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
1569 if (ret)
1570 return ret;
1da177e4
LT
1571 } while (pgd++, addr = next, addr != end);
1572 return 0;
1573}
1574
1575static int unuse_mm(struct mm_struct *mm,
1576 swp_entry_t entry, struct page *page)
1577{
1578 struct vm_area_struct *vma;
8a9f3ccd 1579 int ret = 0;
1da177e4
LT
1580
1581 if (!down_read_trylock(&mm->mmap_sem)) {
1582 /*
7d03431c
FLVC
1583 * Activate page so shrink_inactive_list is unlikely to unmap
1584 * its ptes while lock is dropped, so swapoff can make progress.
1da177e4 1585 */
c475a8ab 1586 activate_page(page);
1da177e4
LT
1587 unlock_page(page);
1588 down_read(&mm->mmap_sem);
1589 lock_page(page);
1590 }
1da177e4 1591 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8a9f3ccd 1592 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1da177e4 1593 break;
dc644a07 1594 cond_resched();
1da177e4 1595 }
1da177e4 1596 up_read(&mm->mmap_sem);
8a9f3ccd 1597 return (ret < 0)? ret: 0;
1da177e4
LT
1598}
1599
1600/*
38b5faf4
DM
1601 * Scan swap_map (or frontswap_map if frontswap parameter is true)
1602 * from current position to next entry still in use.
1da177e4
LT
1603 * Recycle to start on reaching the end, returning 0 when empty.
1604 */
6eb396dc 1605static unsigned int find_next_to_unuse(struct swap_info_struct *si,
38b5faf4 1606 unsigned int prev, bool frontswap)
1da177e4 1607{
6eb396dc
HD
1608 unsigned int max = si->max;
1609 unsigned int i = prev;
8d69aaee 1610 unsigned char count;
1da177e4
LT
1611
1612 /*
5d337b91 1613 * No need for swap_lock here: we're just looking
1da177e4
LT
1614 * for whether an entry is in use, not modifying it; false
1615 * hits are okay, and sys_swapoff() has already prevented new
5d337b91 1616 * allocations from this area (while holding swap_lock).
1da177e4
LT
1617 */
1618 for (;;) {
1619 if (++i >= max) {
1620 if (!prev) {
1621 i = 0;
1622 break;
1623 }
1624 /*
1625 * No entries in use at top of swap_map,
1626 * loop back to start and recheck there.
1627 */
1628 max = prev + 1;
1629 prev = 0;
1630 i = 1;
1631 }
4db0c3c2 1632 count = READ_ONCE(si->swap_map[i]);
355cfa73 1633 if (count && swap_count(count) != SWAP_MAP_BAD)
dc644a07
HD
1634 if (!frontswap || frontswap_test(si, i))
1635 break;
1636 if ((i % LATENCY_LIMIT) == 0)
1637 cond_resched();
1da177e4
LT
1638 }
1639 return i;
1640}
1641
1642/*
1643 * We completely avoid races by reading each swap page in advance,
1644 * and then search for the process using it. All the necessary
1645 * page table adjustments can then be made atomically.
38b5faf4
DM
1646 *
1647 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1648 * pages_to_unuse==0 means all pages; ignored if frontswap is false
1da177e4 1649 */
38b5faf4
DM
1650int try_to_unuse(unsigned int type, bool frontswap,
1651 unsigned long pages_to_unuse)
1da177e4 1652{
efa90a98 1653 struct swap_info_struct *si = swap_info[type];
1da177e4 1654 struct mm_struct *start_mm;
edfe23da
SL
1655 volatile unsigned char *swap_map; /* swap_map is accessed without
1656 * locking. Mark it as volatile
1657 * to prevent compiler doing
1658 * something odd.
1659 */
8d69aaee 1660 unsigned char swcount;
1da177e4
LT
1661 struct page *page;
1662 swp_entry_t entry;
6eb396dc 1663 unsigned int i = 0;
1da177e4 1664 int retval = 0;
1da177e4
LT
1665
1666 /*
1667 * When searching mms for an entry, a good strategy is to
1668 * start at the first mm we freed the previous entry from
1669 * (though actually we don't notice whether we or coincidence
1670 * freed the entry). Initialize this start_mm with a hold.
1671 *
1672 * A simpler strategy would be to start at the last mm we
1673 * freed the previous entry from; but that would take less
1674 * advantage of mmlist ordering, which clusters forked mms
1675 * together, child after parent. If we race with dup_mmap(), we
1676 * prefer to resolve parent before child, lest we miss entries
1677 * duplicated after we scanned child: using last mm would invert
570a335b 1678 * that.
1da177e4
LT
1679 */
1680 start_mm = &init_mm;
1681 atomic_inc(&init_mm.mm_users);
1682
1683 /*
1684 * Keep on scanning until all entries have gone. Usually,
1685 * one pass through swap_map is enough, but not necessarily:
1686 * there are races when an instance of an entry might be missed.
1687 */
38b5faf4 1688 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1da177e4
LT
1689 if (signal_pending(current)) {
1690 retval = -EINTR;
1691 break;
1692 }
1693
886bb7e9 1694 /*
1da177e4
LT
1695 * Get a page for the entry, using the existing swap
1696 * cache page if there is one. Otherwise, get a clean
886bb7e9 1697 * page and read the swap into it.
1da177e4
LT
1698 */
1699 swap_map = &si->swap_map[i];
1700 entry = swp_entry(type, i);
02098fea
HD
1701 page = read_swap_cache_async(entry,
1702 GFP_HIGHUSER_MOVABLE, NULL, 0);
1da177e4
LT
1703 if (!page) {
1704 /*
1705 * Either swap_duplicate() failed because entry
1706 * has been freed independently, and will not be
1707 * reused since sys_swapoff() already disabled
1708 * allocation from here, or alloc_page() failed.
1709 */
edfe23da
SL
1710 swcount = *swap_map;
1711 /*
1712 * We don't hold lock here, so the swap entry could be
1713 * SWAP_MAP_BAD (when the cluster is discarding).
1714 * Instead of fail out, We can just skip the swap
1715 * entry because swapoff will wait for discarding
1716 * finish anyway.
1717 */
1718 if (!swcount || swcount == SWAP_MAP_BAD)
1da177e4
LT
1719 continue;
1720 retval = -ENOMEM;
1721 break;
1722 }
1723
1724 /*
1725 * Don't hold on to start_mm if it looks like exiting.
1726 */
1727 if (atomic_read(&start_mm->mm_users) == 1) {
1728 mmput(start_mm);
1729 start_mm = &init_mm;
1730 atomic_inc(&init_mm.mm_users);
1731 }
1732
1733 /*
1734 * Wait for and lock page. When do_swap_page races with
1735 * try_to_unuse, do_swap_page can handle the fault much
1736 * faster than try_to_unuse can locate the entry. This
1737 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1738 * defer to do_swap_page in such a case - in some tests,
1739 * do_swap_page and try_to_unuse repeatedly compete.
1740 */
1741 wait_on_page_locked(page);
1742 wait_on_page_writeback(page);
1743 lock_page(page);
1744 wait_on_page_writeback(page);
1745
1746 /*
1747 * Remove all references to entry.
1da177e4 1748 */
1da177e4 1749 swcount = *swap_map;
aaa46865
HD
1750 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1751 retval = shmem_unuse(entry, page);
1752 /* page has already been unlocked and released */
1753 if (retval < 0)
1754 break;
1755 continue;
1da177e4 1756 }
aaa46865
HD
1757 if (swap_count(swcount) && start_mm != &init_mm)
1758 retval = unuse_mm(start_mm, entry, page);
1759
355cfa73 1760 if (swap_count(*swap_map)) {
1da177e4
LT
1761 int set_start_mm = (*swap_map >= swcount);
1762 struct list_head *p = &start_mm->mmlist;
1763 struct mm_struct *new_start_mm = start_mm;
1764 struct mm_struct *prev_mm = start_mm;
1765 struct mm_struct *mm;
1766
1767 atomic_inc(&new_start_mm->mm_users);
1768 atomic_inc(&prev_mm->mm_users);
1769 spin_lock(&mmlist_lock);
aaa46865 1770 while (swap_count(*swap_map) && !retval &&
1da177e4
LT
1771 (p = p->next) != &start_mm->mmlist) {
1772 mm = list_entry(p, struct mm_struct, mmlist);
70af7c5c 1773 if (!atomic_inc_not_zero(&mm->mm_users))
1da177e4 1774 continue;
1da177e4
LT
1775 spin_unlock(&mmlist_lock);
1776 mmput(prev_mm);
1777 prev_mm = mm;
1778
1779 cond_resched();
1780
1781 swcount = *swap_map;
355cfa73 1782 if (!swap_count(swcount)) /* any usage ? */
1da177e4 1783 ;
aaa46865 1784 else if (mm == &init_mm)
1da177e4 1785 set_start_mm = 1;
aaa46865 1786 else
1da177e4 1787 retval = unuse_mm(mm, entry, page);
355cfa73 1788
32c5fc10 1789 if (set_start_mm && *swap_map < swcount) {
1da177e4
LT
1790 mmput(new_start_mm);
1791 atomic_inc(&mm->mm_users);
1792 new_start_mm = mm;
1793 set_start_mm = 0;
1794 }
1795 spin_lock(&mmlist_lock);
1796 }
1797 spin_unlock(&mmlist_lock);
1798 mmput(prev_mm);
1799 mmput(start_mm);
1800 start_mm = new_start_mm;
1801 }
1802 if (retval) {
1803 unlock_page(page);
09cbfeaf 1804 put_page(page);
1da177e4
LT
1805 break;
1806 }
1807
1da177e4
LT
1808 /*
1809 * If a reference remains (rare), we would like to leave
1810 * the page in the swap cache; but try_to_unmap could
1811 * then re-duplicate the entry once we drop page lock,
1812 * so we might loop indefinitely; also, that page could
1813 * not be swapped out to other storage meanwhile. So:
1814 * delete from cache even if there's another reference,
1815 * after ensuring that the data has been saved to disk -
1816 * since if the reference remains (rarer), it will be
1817 * read from disk into another page. Splitting into two
1818 * pages would be incorrect if swap supported "shared
1819 * private" pages, but they are handled by tmpfs files.
5ad64688
HD
1820 *
1821 * Given how unuse_vma() targets one particular offset
1822 * in an anon_vma, once the anon_vma has been determined,
1823 * this splitting happens to be just what is needed to
1824 * handle where KSM pages have been swapped out: re-reading
1825 * is unnecessarily slow, but we can fix that later on.
1da177e4 1826 */
355cfa73
KH
1827 if (swap_count(*swap_map) &&
1828 PageDirty(page) && PageSwapCache(page)) {
1da177e4
LT
1829 struct writeback_control wbc = {
1830 .sync_mode = WB_SYNC_NONE,
1831 };
1832
1833 swap_writepage(page, &wbc);
1834 lock_page(page);
1835 wait_on_page_writeback(page);
1836 }
68bdc8d6
HD
1837
1838 /*
1839 * It is conceivable that a racing task removed this page from
1840 * swap cache just before we acquired the page lock at the top,
1841 * or while we dropped it in unuse_mm(). The page might even
1842 * be back in swap cache on another swap area: that we must not
1843 * delete, since it may not have been written out to swap yet.
1844 */
1845 if (PageSwapCache(page) &&
1846 likely(page_private(page) == entry.val))
2e0e26c7 1847 delete_from_swap_cache(page);
1da177e4
LT
1848
1849 /*
1850 * So we could skip searching mms once swap count went
1851 * to 1, we did not mark any present ptes as dirty: must
2706a1b8 1852 * mark page dirty so shrink_page_list will preserve it.
1da177e4
LT
1853 */
1854 SetPageDirty(page);
1855 unlock_page(page);
09cbfeaf 1856 put_page(page);
1da177e4
LT
1857
1858 /*
1859 * Make sure that we aren't completely killing
1860 * interactive performance.
1861 */
1862 cond_resched();
38b5faf4
DM
1863 if (frontswap && pages_to_unuse > 0) {
1864 if (!--pages_to_unuse)
1865 break;
1866 }
1da177e4
LT
1867 }
1868
1869 mmput(start_mm);
1da177e4
LT
1870 return retval;
1871}
1872
1873/*
5d337b91
HD
1874 * After a successful try_to_unuse, if no swap is now in use, we know
1875 * we can empty the mmlist. swap_lock must be held on entry and exit.
1876 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1da177e4
LT
1877 * added to the mmlist just after page_duplicate - before would be racy.
1878 */
1879static void drain_mmlist(void)
1880{
1881 struct list_head *p, *next;
efa90a98 1882 unsigned int type;
1da177e4 1883
efa90a98
HD
1884 for (type = 0; type < nr_swapfiles; type++)
1885 if (swap_info[type]->inuse_pages)
1da177e4
LT
1886 return;
1887 spin_lock(&mmlist_lock);
1888 list_for_each_safe(p, next, &init_mm.mmlist)
1889 list_del_init(p);
1890 spin_unlock(&mmlist_lock);
1891}
1892
1893/*
1894 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
d4906e1a
LS
1895 * corresponds to page offset for the specified swap entry.
1896 * Note that the type of this function is sector_t, but it returns page offset
1897 * into the bdev, not sector offset.
1da177e4 1898 */
d4906e1a 1899static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1da177e4 1900{
f29ad6a9
HD
1901 struct swap_info_struct *sis;
1902 struct swap_extent *start_se;
1903 struct swap_extent *se;
1904 pgoff_t offset;
1905
efa90a98 1906 sis = swap_info[swp_type(entry)];
f29ad6a9
HD
1907 *bdev = sis->bdev;
1908
1909 offset = swp_offset(entry);
1910 start_se = sis->curr_swap_extent;
1911 se = start_se;
1da177e4
LT
1912
1913 for ( ; ; ) {
1da177e4
LT
1914 if (se->start_page <= offset &&
1915 offset < (se->start_page + se->nr_pages)) {
1916 return se->start_block + (offset - se->start_page);
1917 }
a8ae4991 1918 se = list_next_entry(se, list);
1da177e4
LT
1919 sis->curr_swap_extent = se;
1920 BUG_ON(se == start_se); /* It *must* be present */
1921 }
1922}
1923
d4906e1a
LS
1924/*
1925 * Returns the page offset into bdev for the specified page's swap entry.
1926 */
1927sector_t map_swap_page(struct page *page, struct block_device **bdev)
1928{
1929 swp_entry_t entry;
1930 entry.val = page_private(page);
1931 return map_swap_entry(entry, bdev);
1932}
1933
1da177e4
LT
1934/*
1935 * Free all of a swapdev's extent information
1936 */
1937static void destroy_swap_extents(struct swap_info_struct *sis)
1938{
9625a5f2 1939 while (!list_empty(&sis->first_swap_extent.list)) {
1da177e4
LT
1940 struct swap_extent *se;
1941
a8ae4991 1942 se = list_first_entry(&sis->first_swap_extent.list,
1da177e4
LT
1943 struct swap_extent, list);
1944 list_del(&se->list);
1945 kfree(se);
1946 }
62c230bc
MG
1947
1948 if (sis->flags & SWP_FILE) {
1949 struct file *swap_file = sis->swap_file;
1950 struct address_space *mapping = swap_file->f_mapping;
1951
1952 sis->flags &= ~SWP_FILE;
1953 mapping->a_ops->swap_deactivate(swap_file);
1954 }
1da177e4
LT
1955}
1956
1957/*
1958 * Add a block range (and the corresponding page range) into this swapdev's
11d31886 1959 * extent list. The extent list is kept sorted in page order.
1da177e4 1960 *
11d31886 1961 * This function rather assumes that it is called in ascending page order.
1da177e4 1962 */
a509bc1a 1963int
1da177e4
LT
1964add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1965 unsigned long nr_pages, sector_t start_block)
1966{
1967 struct swap_extent *se;
1968 struct swap_extent *new_se;
1969 struct list_head *lh;
1970
9625a5f2
HD
1971 if (start_page == 0) {
1972 se = &sis->first_swap_extent;
1973 sis->curr_swap_extent = se;
1974 se->start_page = 0;
1975 se->nr_pages = nr_pages;
1976 se->start_block = start_block;
1977 return 1;
1978 } else {
1979 lh = sis->first_swap_extent.list.prev; /* Highest extent */
1da177e4 1980 se = list_entry(lh, struct swap_extent, list);
11d31886
HD
1981 BUG_ON(se->start_page + se->nr_pages != start_page);
1982 if (se->start_block + se->nr_pages == start_block) {
1da177e4
LT
1983 /* Merge it */
1984 se->nr_pages += nr_pages;
1985 return 0;
1986 }
1da177e4
LT
1987 }
1988
1989 /*
1990 * No merge. Insert a new extent, preserving ordering.
1991 */
1992 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1993 if (new_se == NULL)
1994 return -ENOMEM;
1995 new_se->start_page = start_page;
1996 new_se->nr_pages = nr_pages;
1997 new_se->start_block = start_block;
1998
9625a5f2 1999 list_add_tail(&new_se->list, &sis->first_swap_extent.list);
53092a74 2000 return 1;
1da177e4
LT
2001}
2002
2003/*
2004 * A `swap extent' is a simple thing which maps a contiguous range of pages
2005 * onto a contiguous range of disk blocks. An ordered list of swap extents
2006 * is built at swapon time and is then used at swap_writepage/swap_readpage
2007 * time for locating where on disk a page belongs.
2008 *
2009 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2010 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2011 * swap files identically.
2012 *
2013 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2014 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
2015 * swapfiles are handled *identically* after swapon time.
2016 *
2017 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2018 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
2019 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2020 * requirements, they are simply tossed out - we will never use those blocks
2021 * for swapping.
2022 *
b0d9bcd4 2023 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
1da177e4
LT
2024 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
2025 * which will scribble on the fs.
2026 *
2027 * The amount of disk space which a single swap extent represents varies.
2028 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
2029 * extents in the list. To avoid much list walking, we cache the previous
2030 * search location in `curr_swap_extent', and start new searches from there.
2031 * This is extremely effective. The average number of iterations in
2032 * map_swap_page() has been measured at about 0.3 per page. - akpm.
2033 */
53092a74 2034static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1da177e4 2035{
62c230bc
MG
2036 struct file *swap_file = sis->swap_file;
2037 struct address_space *mapping = swap_file->f_mapping;
2038 struct inode *inode = mapping->host;
1da177e4
LT
2039 int ret;
2040
1da177e4
LT
2041 if (S_ISBLK(inode->i_mode)) {
2042 ret = add_swap_extent(sis, 0, sis->max, 0);
53092a74 2043 *span = sis->pages;
a509bc1a 2044 return ret;
1da177e4
LT
2045 }
2046
62c230bc 2047 if (mapping->a_ops->swap_activate) {
a509bc1a 2048 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
62c230bc
MG
2049 if (!ret) {
2050 sis->flags |= SWP_FILE;
2051 ret = add_swap_extent(sis, 0, sis->max, 0);
2052 *span = sis->pages;
2053 }
a509bc1a 2054 return ret;
62c230bc
MG
2055 }
2056
a509bc1a 2057 return generic_swapfile_activate(sis, swap_file, span);
1da177e4
LT
2058}
2059
cf0cac0a 2060static void _enable_swap_info(struct swap_info_struct *p, int prio,
2a8f9449
SL
2061 unsigned char *swap_map,
2062 struct swap_cluster_info *cluster_info)
40531542 2063{
40531542
CEB
2064 if (prio >= 0)
2065 p->prio = prio;
2066 else
2067 p->prio = --least_priority;
18ab4d4c
DS
2068 /*
2069 * the plist prio is negated because plist ordering is
2070 * low-to-high, while swap ordering is high-to-low
2071 */
2072 p->list.prio = -p->prio;
2073 p->avail_list.prio = -p->prio;
40531542 2074 p->swap_map = swap_map;
2a8f9449 2075 p->cluster_info = cluster_info;
40531542 2076 p->flags |= SWP_WRITEOK;
ec8acf20 2077 atomic_long_add(p->pages, &nr_swap_pages);
40531542
CEB
2078 total_swap_pages += p->pages;
2079
adfab836 2080 assert_spin_locked(&swap_lock);
adfab836 2081 /*
18ab4d4c
DS
2082 * both lists are plists, and thus priority ordered.
2083 * swap_active_head needs to be priority ordered for swapoff(),
2084 * which on removal of any swap_info_struct with an auto-assigned
2085 * (i.e. negative) priority increments the auto-assigned priority
2086 * of any lower-priority swap_info_structs.
2087 * swap_avail_head needs to be priority ordered for get_swap_page(),
2088 * which allocates swap pages from the highest available priority
2089 * swap_info_struct.
adfab836 2090 */
18ab4d4c
DS
2091 plist_add(&p->list, &swap_active_head);
2092 spin_lock(&swap_avail_lock);
2093 plist_add(&p->avail_list, &swap_avail_head);
2094 spin_unlock(&swap_avail_lock);
cf0cac0a
CEB
2095}
2096
2097static void enable_swap_info(struct swap_info_struct *p, int prio,
2098 unsigned char *swap_map,
2a8f9449 2099 struct swap_cluster_info *cluster_info,
cf0cac0a
CEB
2100 unsigned long *frontswap_map)
2101{
4f89849d 2102 frontswap_init(p->type, frontswap_map);
cf0cac0a 2103 spin_lock(&swap_lock);
ec8acf20 2104 spin_lock(&p->lock);
2a8f9449 2105 _enable_swap_info(p, prio, swap_map, cluster_info);
ec8acf20 2106 spin_unlock(&p->lock);
cf0cac0a
CEB
2107 spin_unlock(&swap_lock);
2108}
2109
2110static void reinsert_swap_info(struct swap_info_struct *p)
2111{
2112 spin_lock(&swap_lock);
ec8acf20 2113 spin_lock(&p->lock);
2a8f9449 2114 _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
ec8acf20 2115 spin_unlock(&p->lock);
40531542
CEB
2116 spin_unlock(&swap_lock);
2117}
2118
c4ea37c2 2119SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1da177e4 2120{
73c34b6a 2121 struct swap_info_struct *p = NULL;
8d69aaee 2122 unsigned char *swap_map;
2a8f9449 2123 struct swap_cluster_info *cluster_info;
4f89849d 2124 unsigned long *frontswap_map;
1da177e4
LT
2125 struct file *swap_file, *victim;
2126 struct address_space *mapping;
2127 struct inode *inode;
91a27b2a 2128 struct filename *pathname;
adfab836 2129 int err, found = 0;
5b808a23 2130 unsigned int old_block_size;
886bb7e9 2131
1da177e4
LT
2132 if (!capable(CAP_SYS_ADMIN))
2133 return -EPERM;
2134
191c5424
AV
2135 BUG_ON(!current->mm);
2136
1da177e4 2137 pathname = getname(specialfile);
1da177e4 2138 if (IS_ERR(pathname))
f58b59c1 2139 return PTR_ERR(pathname);
1da177e4 2140
669abf4e 2141 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
1da177e4
LT
2142 err = PTR_ERR(victim);
2143 if (IS_ERR(victim))
2144 goto out;
2145
2146 mapping = victim->f_mapping;
5d337b91 2147 spin_lock(&swap_lock);
18ab4d4c 2148 plist_for_each_entry(p, &swap_active_head, list) {
22c6f8fd 2149 if (p->flags & SWP_WRITEOK) {
adfab836
DS
2150 if (p->swap_file->f_mapping == mapping) {
2151 found = 1;
1da177e4 2152 break;
adfab836 2153 }
1da177e4 2154 }
1da177e4 2155 }
adfab836 2156 if (!found) {
1da177e4 2157 err = -EINVAL;
5d337b91 2158 spin_unlock(&swap_lock);
1da177e4
LT
2159 goto out_dput;
2160 }
191c5424 2161 if (!security_vm_enough_memory_mm(current->mm, p->pages))
1da177e4
LT
2162 vm_unacct_memory(p->pages);
2163 else {
2164 err = -ENOMEM;
5d337b91 2165 spin_unlock(&swap_lock);
1da177e4
LT
2166 goto out_dput;
2167 }
18ab4d4c
DS
2168 spin_lock(&swap_avail_lock);
2169 plist_del(&p->avail_list, &swap_avail_head);
2170 spin_unlock(&swap_avail_lock);
ec8acf20 2171 spin_lock(&p->lock);
78ecba08 2172 if (p->prio < 0) {
adfab836
DS
2173 struct swap_info_struct *si = p;
2174
18ab4d4c 2175 plist_for_each_entry_continue(si, &swap_active_head, list) {
adfab836 2176 si->prio++;
18ab4d4c
DS
2177 si->list.prio--;
2178 si->avail_list.prio--;
adfab836 2179 }
78ecba08
HD
2180 least_priority++;
2181 }
18ab4d4c 2182 plist_del(&p->list, &swap_active_head);
ec8acf20 2183 atomic_long_sub(p->pages, &nr_swap_pages);
1da177e4
LT
2184 total_swap_pages -= p->pages;
2185 p->flags &= ~SWP_WRITEOK;
ec8acf20 2186 spin_unlock(&p->lock);
5d337b91 2187 spin_unlock(&swap_lock);
fb4f88dc 2188
e1e12d2f 2189 set_current_oom_origin();
adfab836 2190 err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
e1e12d2f 2191 clear_current_oom_origin();
1da177e4 2192
1da177e4
LT
2193 if (err) {
2194 /* re-insert swap space back into swap_list */
cf0cac0a 2195 reinsert_swap_info(p);
1da177e4
LT
2196 goto out_dput;
2197 }
52b7efdb 2198
815c2c54
SL
2199 flush_work(&p->discard_work);
2200
5d337b91 2201 destroy_swap_extents(p);
570a335b
HD
2202 if (p->flags & SWP_CONTINUED)
2203 free_swap_count_continuations(p);
2204
fc0abb14 2205 mutex_lock(&swapon_mutex);
5d337b91 2206 spin_lock(&swap_lock);
ec8acf20 2207 spin_lock(&p->lock);
5d337b91
HD
2208 drain_mmlist();
2209
52b7efdb 2210 /* wait for anyone still in scan_swap_map */
52b7efdb
HD
2211 p->highest_bit = 0; /* cuts scans short */
2212 while (p->flags >= SWP_SCANNING) {
ec8acf20 2213 spin_unlock(&p->lock);
5d337b91 2214 spin_unlock(&swap_lock);
13e4b57f 2215 schedule_timeout_uninterruptible(1);
5d337b91 2216 spin_lock(&swap_lock);
ec8acf20 2217 spin_lock(&p->lock);
52b7efdb 2218 }
52b7efdb 2219
1da177e4 2220 swap_file = p->swap_file;
5b808a23 2221 old_block_size = p->old_block_size;
1da177e4
LT
2222 p->swap_file = NULL;
2223 p->max = 0;
2224 swap_map = p->swap_map;
2225 p->swap_map = NULL;
2a8f9449
SL
2226 cluster_info = p->cluster_info;
2227 p->cluster_info = NULL;
4f89849d 2228 frontswap_map = frontswap_map_get(p);
ec8acf20 2229 spin_unlock(&p->lock);
5d337b91 2230 spin_unlock(&swap_lock);
adfab836 2231 frontswap_invalidate_area(p->type);
58e97ba6 2232 frontswap_map_set(p, NULL);
fc0abb14 2233 mutex_unlock(&swapon_mutex);
ebc2a1a6
SL
2234 free_percpu(p->percpu_cluster);
2235 p->percpu_cluster = NULL;
1da177e4 2236 vfree(swap_map);
2a8f9449 2237 vfree(cluster_info);
4f89849d 2238 vfree(frontswap_map);
2de1a7e4 2239 /* Destroy swap account information */
adfab836 2240 swap_cgroup_swapoff(p->type);
4b3ef9da 2241 exit_swap_address_space(p->type);
27a7faa0 2242
1da177e4
LT
2243 inode = mapping->host;
2244 if (S_ISBLK(inode->i_mode)) {
2245 struct block_device *bdev = I_BDEV(inode);
5b808a23 2246 set_blocksize(bdev, old_block_size);
e525fd89 2247 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1da177e4 2248 } else {
5955102c 2249 inode_lock(inode);
1da177e4 2250 inode->i_flags &= ~S_SWAPFILE;
5955102c 2251 inode_unlock(inode);
1da177e4
LT
2252 }
2253 filp_close(swap_file, NULL);
f893ab41
WY
2254
2255 /*
2256 * Clear the SWP_USED flag after all resources are freed so that swapon
2257 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
2258 * not hold p->lock after we cleared its SWP_WRITEOK.
2259 */
2260 spin_lock(&swap_lock);
2261 p->flags = 0;
2262 spin_unlock(&swap_lock);
2263
1da177e4 2264 err = 0;
66d7dd51
KS
2265 atomic_inc(&proc_poll_event);
2266 wake_up_interruptible(&proc_poll_wait);
1da177e4
LT
2267
2268out_dput:
2269 filp_close(victim, NULL);
2270out:
f58b59c1 2271 putname(pathname);
1da177e4
LT
2272 return err;
2273}
2274
2275#ifdef CONFIG_PROC_FS
66d7dd51
KS
2276static unsigned swaps_poll(struct file *file, poll_table *wait)
2277{
f1514638 2278 struct seq_file *seq = file->private_data;
66d7dd51
KS
2279
2280 poll_wait(file, &proc_poll_wait, wait);
2281
f1514638
KS
2282 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2283 seq->poll_event = atomic_read(&proc_poll_event);
66d7dd51
KS
2284 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
2285 }
2286
2287 return POLLIN | POLLRDNORM;
2288}
2289
1da177e4
LT
2290/* iterator */
2291static void *swap_start(struct seq_file *swap, loff_t *pos)
2292{
efa90a98
HD
2293 struct swap_info_struct *si;
2294 int type;
1da177e4
LT
2295 loff_t l = *pos;
2296
fc0abb14 2297 mutex_lock(&swapon_mutex);
1da177e4 2298
881e4aab
SS
2299 if (!l)
2300 return SEQ_START_TOKEN;
2301
efa90a98
HD
2302 for (type = 0; type < nr_swapfiles; type++) {
2303 smp_rmb(); /* read nr_swapfiles before swap_info[type] */
2304 si = swap_info[type];
2305 if (!(si->flags & SWP_USED) || !si->swap_map)
1da177e4 2306 continue;
881e4aab 2307 if (!--l)
efa90a98 2308 return si;
1da177e4
LT
2309 }
2310
2311 return NULL;
2312}
2313
2314static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2315{
efa90a98
HD
2316 struct swap_info_struct *si = v;
2317 int type;
1da177e4 2318
881e4aab 2319 if (v == SEQ_START_TOKEN)
efa90a98
HD
2320 type = 0;
2321 else
2322 type = si->type + 1;
881e4aab 2323
efa90a98
HD
2324 for (; type < nr_swapfiles; type++) {
2325 smp_rmb(); /* read nr_swapfiles before swap_info[type] */
2326 si = swap_info[type];
2327 if (!(si->flags & SWP_USED) || !si->swap_map)
1da177e4
LT
2328 continue;
2329 ++*pos;
efa90a98 2330 return si;
1da177e4
LT
2331 }
2332
2333 return NULL;
2334}
2335
2336static void swap_stop(struct seq_file *swap, void *v)
2337{
fc0abb14 2338 mutex_unlock(&swapon_mutex);
1da177e4
LT
2339}
2340
2341static int swap_show(struct seq_file *swap, void *v)
2342{
efa90a98 2343 struct swap_info_struct *si = v;
1da177e4
LT
2344 struct file *file;
2345 int len;
2346
efa90a98 2347 if (si == SEQ_START_TOKEN) {
881e4aab
SS
2348 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2349 return 0;
2350 }
1da177e4 2351
efa90a98 2352 file = si->swap_file;
2726d566 2353 len = seq_file_path(swap, file, " \t\n\\");
6eb396dc 2354 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
886bb7e9 2355 len < 40 ? 40 - len : 1, " ",
496ad9aa 2356 S_ISBLK(file_inode(file)->i_mode) ?
1da177e4 2357 "partition" : "file\t",
efa90a98
HD
2358 si->pages << (PAGE_SHIFT - 10),
2359 si->inuse_pages << (PAGE_SHIFT - 10),
2360 si->prio);
1da177e4
LT
2361 return 0;
2362}
2363
15ad7cdc 2364static const struct seq_operations swaps_op = {
1da177e4
LT
2365 .start = swap_start,
2366 .next = swap_next,
2367 .stop = swap_stop,
2368 .show = swap_show
2369};
2370
2371static int swaps_open(struct inode *inode, struct file *file)
2372{
f1514638 2373 struct seq_file *seq;
66d7dd51
KS
2374 int ret;
2375
66d7dd51 2376 ret = seq_open(file, &swaps_op);
f1514638 2377 if (ret)
66d7dd51 2378 return ret;
66d7dd51 2379
f1514638
KS
2380 seq = file->private_data;
2381 seq->poll_event = atomic_read(&proc_poll_event);
2382 return 0;
1da177e4
LT
2383}
2384
15ad7cdc 2385static const struct file_operations proc_swaps_operations = {
1da177e4
LT
2386 .open = swaps_open,
2387 .read = seq_read,
2388 .llseek = seq_lseek,
2389 .release = seq_release,
66d7dd51 2390 .poll = swaps_poll,
1da177e4
LT
2391};
2392
2393static int __init procswaps_init(void)
2394{
3d71f86f 2395 proc_create("swaps", 0, NULL, &proc_swaps_operations);
1da177e4
LT
2396 return 0;
2397}
2398__initcall(procswaps_init);
2399#endif /* CONFIG_PROC_FS */
2400
1796316a
JB
2401#ifdef MAX_SWAPFILES_CHECK
2402static int __init max_swapfiles_check(void)
2403{
2404 MAX_SWAPFILES_CHECK();
2405 return 0;
2406}
2407late_initcall(max_swapfiles_check);
2408#endif
2409
53cbb243 2410static struct swap_info_struct *alloc_swap_info(void)
1da177e4 2411{
73c34b6a 2412 struct swap_info_struct *p;
1da177e4 2413 unsigned int type;
efa90a98
HD
2414
2415 p = kzalloc(sizeof(*p), GFP_KERNEL);
2416 if (!p)
53cbb243 2417 return ERR_PTR(-ENOMEM);
efa90a98 2418
5d337b91 2419 spin_lock(&swap_lock);
efa90a98
HD
2420 for (type = 0; type < nr_swapfiles; type++) {
2421 if (!(swap_info[type]->flags & SWP_USED))
1da177e4 2422 break;
efa90a98 2423 }
0697212a 2424 if (type >= MAX_SWAPFILES) {
5d337b91 2425 spin_unlock(&swap_lock);
efa90a98 2426 kfree(p);
730c0581 2427 return ERR_PTR(-EPERM);
1da177e4 2428 }
efa90a98
HD
2429 if (type >= nr_swapfiles) {
2430 p->type = type;
2431 swap_info[type] = p;
2432 /*
2433 * Write swap_info[type] before nr_swapfiles, in case a
2434 * racing procfs swap_start() or swap_next() is reading them.
2435 * (We never shrink nr_swapfiles, we never free this entry.)
2436 */
2437 smp_wmb();
2438 nr_swapfiles++;
2439 } else {
2440 kfree(p);
2441 p = swap_info[type];
2442 /*
2443 * Do not memset this entry: a racing procfs swap_next()
2444 * would be relying on p->type to remain valid.
2445 */
2446 }
9625a5f2 2447 INIT_LIST_HEAD(&p->first_swap_extent.list);
18ab4d4c
DS
2448 plist_node_init(&p->list, 0);
2449 plist_node_init(&p->avail_list, 0);
1da177e4 2450 p->flags = SWP_USED;
5d337b91 2451 spin_unlock(&swap_lock);
ec8acf20 2452 spin_lock_init(&p->lock);
efa90a98 2453
53cbb243 2454 return p;
53cbb243
CEB
2455}
2456
4d0e1e10
CEB
2457static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2458{
2459 int error;
2460
2461 if (S_ISBLK(inode->i_mode)) {
2462 p->bdev = bdgrab(I_BDEV(inode));
2463 error = blkdev_get(p->bdev,
6f179af8 2464 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
4d0e1e10
CEB
2465 if (error < 0) {
2466 p->bdev = NULL;
6f179af8 2467 return error;
4d0e1e10
CEB
2468 }
2469 p->old_block_size = block_size(p->bdev);
2470 error = set_blocksize(p->bdev, PAGE_SIZE);
2471 if (error < 0)
87ade72a 2472 return error;
4d0e1e10
CEB
2473 p->flags |= SWP_BLKDEV;
2474 } else if (S_ISREG(inode->i_mode)) {
2475 p->bdev = inode->i_sb->s_bdev;
5955102c 2476 inode_lock(inode);
87ade72a
CEB
2477 if (IS_SWAPFILE(inode))
2478 return -EBUSY;
2479 } else
2480 return -EINVAL;
4d0e1e10
CEB
2481
2482 return 0;
4d0e1e10
CEB
2483}
2484
ca8bd38b
CEB
2485static unsigned long read_swap_header(struct swap_info_struct *p,
2486 union swap_header *swap_header,
2487 struct inode *inode)
2488{
2489 int i;
2490 unsigned long maxpages;
2491 unsigned long swapfilepages;
d6bbbd29 2492 unsigned long last_page;
ca8bd38b
CEB
2493
2494 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
465c47fd 2495 pr_err("Unable to find swap-space signature\n");
38719025 2496 return 0;
ca8bd38b
CEB
2497 }
2498
2499 /* swap partition endianess hack... */
2500 if (swab32(swap_header->info.version) == 1) {
2501 swab32s(&swap_header->info.version);
2502 swab32s(&swap_header->info.last_page);
2503 swab32s(&swap_header->info.nr_badpages);
dd111be6
JH
2504 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2505 return 0;
ca8bd38b
CEB
2506 for (i = 0; i < swap_header->info.nr_badpages; i++)
2507 swab32s(&swap_header->info.badpages[i]);
2508 }
2509 /* Check the swap header's sub-version */
2510 if (swap_header->info.version != 1) {
465c47fd
AM
2511 pr_warn("Unable to handle swap header version %d\n",
2512 swap_header->info.version);
38719025 2513 return 0;
ca8bd38b
CEB
2514 }
2515
2516 p->lowest_bit = 1;
2517 p->cluster_next = 1;
2518 p->cluster_nr = 0;
2519
2520 /*
2521 * Find out how many pages are allowed for a single swap
9b15b817 2522 * device. There are two limiting factors: 1) the number
a2c16d6c
HD
2523 * of bits for the swap offset in the swp_entry_t type, and
2524 * 2) the number of bits in the swap pte as defined by the
9b15b817 2525 * different architectures. In order to find the
a2c16d6c 2526 * largest possible bit mask, a swap entry with swap type 0
ca8bd38b 2527 * and swap offset ~0UL is created, encoded to a swap pte,
a2c16d6c 2528 * decoded to a swp_entry_t again, and finally the swap
ca8bd38b
CEB
2529 * offset is extracted. This will mask all the bits from
2530 * the initial ~0UL mask that can't be encoded in either
2531 * the swp_entry_t or the architecture definition of a
9b15b817 2532 * swap pte.
ca8bd38b
CEB
2533 */
2534 maxpages = swp_offset(pte_to_swp_entry(
9b15b817 2535 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
d6bbbd29
RJ
2536 last_page = swap_header->info.last_page;
2537 if (last_page > maxpages) {
465c47fd 2538 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
d6bbbd29
RJ
2539 maxpages << (PAGE_SHIFT - 10),
2540 last_page << (PAGE_SHIFT - 10));
2541 }
2542 if (maxpages > last_page) {
2543 maxpages = last_page + 1;
ca8bd38b
CEB
2544 /* p->max is an unsigned int: don't overflow it */
2545 if ((unsigned int)maxpages == 0)
2546 maxpages = UINT_MAX;
2547 }
2548 p->highest_bit = maxpages - 1;
2549
2550 if (!maxpages)
38719025 2551 return 0;
ca8bd38b
CEB
2552 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2553 if (swapfilepages && maxpages > swapfilepages) {
465c47fd 2554 pr_warn("Swap area shorter than signature indicates\n");
38719025 2555 return 0;
ca8bd38b
CEB
2556 }
2557 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
38719025 2558 return 0;
ca8bd38b 2559 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
38719025 2560 return 0;
ca8bd38b
CEB
2561
2562 return maxpages;
ca8bd38b
CEB
2563}
2564
4b3ef9da 2565#define SWAP_CLUSTER_INFO_COLS \
235b6217 2566 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
4b3ef9da
HY
2567#define SWAP_CLUSTER_SPACE_COLS \
2568 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
2569#define SWAP_CLUSTER_COLS \
2570 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
235b6217 2571
915d4d7b
CEB
2572static int setup_swap_map_and_extents(struct swap_info_struct *p,
2573 union swap_header *swap_header,
2574 unsigned char *swap_map,
2a8f9449 2575 struct swap_cluster_info *cluster_info,
915d4d7b
CEB
2576 unsigned long maxpages,
2577 sector_t *span)
2578{
235b6217 2579 unsigned int j, k;
915d4d7b
CEB
2580 unsigned int nr_good_pages;
2581 int nr_extents;
2a8f9449 2582 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
235b6217
HY
2583 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
2584 unsigned long i, idx;
915d4d7b
CEB
2585
2586 nr_good_pages = maxpages - 1; /* omit header page */
2587
6b534915
HY
2588 cluster_list_init(&p->free_clusters);
2589 cluster_list_init(&p->discard_clusters);
2a8f9449 2590
915d4d7b
CEB
2591 for (i = 0; i < swap_header->info.nr_badpages; i++) {
2592 unsigned int page_nr = swap_header->info.badpages[i];
bdb8e3f6
CEB
2593 if (page_nr == 0 || page_nr > swap_header->info.last_page)
2594 return -EINVAL;
915d4d7b
CEB
2595 if (page_nr < maxpages) {
2596 swap_map[page_nr] = SWAP_MAP_BAD;
2597 nr_good_pages--;
2a8f9449
SL
2598 /*
2599 * Haven't marked the cluster free yet, no list
2600 * operation involved
2601 */
2602 inc_cluster_info_page(p, cluster_info, page_nr);
915d4d7b
CEB
2603 }
2604 }
2605
2a8f9449
SL
2606 /* Haven't marked the cluster free yet, no list operation involved */
2607 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2608 inc_cluster_info_page(p, cluster_info, i);
2609
915d4d7b
CEB
2610 if (nr_good_pages) {
2611 swap_map[0] = SWAP_MAP_BAD;
2a8f9449
SL
2612 /*
2613 * Not mark the cluster free yet, no list
2614 * operation involved
2615 */
2616 inc_cluster_info_page(p, cluster_info, 0);
915d4d7b
CEB
2617 p->max = maxpages;
2618 p->pages = nr_good_pages;
2619 nr_extents = setup_swap_extents(p, span);
bdb8e3f6
CEB
2620 if (nr_extents < 0)
2621 return nr_extents;
915d4d7b
CEB
2622 nr_good_pages = p->pages;
2623 }
2624 if (!nr_good_pages) {
465c47fd 2625 pr_warn("Empty swap-file\n");
bdb8e3f6 2626 return -EINVAL;
915d4d7b
CEB
2627 }
2628
2a8f9449
SL
2629 if (!cluster_info)
2630 return nr_extents;
2631
235b6217 2632
4b3ef9da
HY
2633 /*
2634 * Reduce false cache line sharing between cluster_info and
2635 * sharing same address space.
2636 */
235b6217
HY
2637 for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
2638 j = (k + col) % SWAP_CLUSTER_COLS;
2639 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
2640 idx = i * SWAP_CLUSTER_COLS + j;
2641 if (idx >= nr_clusters)
2642 continue;
2643 if (cluster_count(&cluster_info[idx]))
2644 continue;
2a8f9449 2645 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
6b534915
HY
2646 cluster_list_add_tail(&p->free_clusters, cluster_info,
2647 idx);
2a8f9449 2648 }
2a8f9449 2649 }
915d4d7b 2650 return nr_extents;
915d4d7b
CEB
2651}
2652
dcf6b7dd
RA
2653/*
2654 * Helper to sys_swapon determining if a given swap
2655 * backing device queue supports DISCARD operations.
2656 */
2657static bool swap_discardable(struct swap_info_struct *si)
2658{
2659 struct request_queue *q = bdev_get_queue(si->bdev);
2660
2661 if (!q || !blk_queue_discard(q))
2662 return false;
2663
2664 return true;
2665}
2666
53cbb243
CEB
2667SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2668{
2669 struct swap_info_struct *p;
91a27b2a 2670 struct filename *name;
53cbb243
CEB
2671 struct file *swap_file = NULL;
2672 struct address_space *mapping;
40531542 2673 int prio;
53cbb243
CEB
2674 int error;
2675 union swap_header *swap_header;
915d4d7b 2676 int nr_extents;
53cbb243
CEB
2677 sector_t span;
2678 unsigned long maxpages;
53cbb243 2679 unsigned char *swap_map = NULL;
2a8f9449 2680 struct swap_cluster_info *cluster_info = NULL;
38b5faf4 2681 unsigned long *frontswap_map = NULL;
53cbb243
CEB
2682 struct page *page = NULL;
2683 struct inode *inode = NULL;
53cbb243 2684
d15cab97
HD
2685 if (swap_flags & ~SWAP_FLAGS_VALID)
2686 return -EINVAL;
2687
53cbb243
CEB
2688 if (!capable(CAP_SYS_ADMIN))
2689 return -EPERM;
2690
2691 p = alloc_swap_info();
2542e513
CEB
2692 if (IS_ERR(p))
2693 return PTR_ERR(p);
53cbb243 2694
815c2c54
SL
2695 INIT_WORK(&p->discard_work, swap_discard_work);
2696
1da177e4 2697 name = getname(specialfile);
1da177e4 2698 if (IS_ERR(name)) {
7de7fb6b 2699 error = PTR_ERR(name);
1da177e4 2700 name = NULL;
bd69010b 2701 goto bad_swap;
1da177e4 2702 }
669abf4e 2703 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
1da177e4 2704 if (IS_ERR(swap_file)) {
7de7fb6b 2705 error = PTR_ERR(swap_file);
1da177e4 2706 swap_file = NULL;
bd69010b 2707 goto bad_swap;
1da177e4
LT
2708 }
2709
2710 p->swap_file = swap_file;
2711 mapping = swap_file->f_mapping;
2130781e 2712 inode = mapping->host;
6f179af8 2713
5955102c 2714 /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
4d0e1e10
CEB
2715 error = claim_swapfile(p, inode);
2716 if (unlikely(error))
1da177e4 2717 goto bad_swap;
1da177e4 2718
1da177e4
LT
2719 /*
2720 * Read the swap header.
2721 */
2722 if (!mapping->a_ops->readpage) {
2723 error = -EINVAL;
2724 goto bad_swap;
2725 }
090d2b18 2726 page = read_mapping_page(mapping, 0, swap_file);
1da177e4
LT
2727 if (IS_ERR(page)) {
2728 error = PTR_ERR(page);
2729 goto bad_swap;
2730 }
81e33971 2731 swap_header = kmap(page);
1da177e4 2732
ca8bd38b
CEB
2733 maxpages = read_swap_header(p, swap_header, inode);
2734 if (unlikely(!maxpages)) {
1da177e4
LT
2735 error = -EINVAL;
2736 goto bad_swap;
2737 }
886bb7e9 2738
81e33971 2739 /* OK, set up the swap map and apply the bad block list */
803d0c83 2740 swap_map = vzalloc(maxpages);
81e33971
HD
2741 if (!swap_map) {
2742 error = -ENOMEM;
2743 goto bad_swap;
2744 }
f0571429
MK
2745
2746 if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
2747 p->flags |= SWP_STABLE_WRITES;
2748
2a8f9449 2749 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
6f179af8 2750 int cpu;
235b6217 2751 unsigned long ci, nr_cluster;
6f179af8 2752
2a8f9449
SL
2753 p->flags |= SWP_SOLIDSTATE;
2754 /*
2755 * select a random position to start with to help wear leveling
2756 * SSD
2757 */
2758 p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
235b6217 2759 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2a8f9449 2760
235b6217 2761 cluster_info = vzalloc(nr_cluster * sizeof(*cluster_info));
2a8f9449
SL
2762 if (!cluster_info) {
2763 error = -ENOMEM;
2764 goto bad_swap;
2765 }
235b6217
HY
2766
2767 for (ci = 0; ci < nr_cluster; ci++)
2768 spin_lock_init(&((cluster_info + ci)->lock));
2769
ebc2a1a6
SL
2770 p->percpu_cluster = alloc_percpu(struct percpu_cluster);
2771 if (!p->percpu_cluster) {
2772 error = -ENOMEM;
2773 goto bad_swap;
2774 }
6f179af8 2775 for_each_possible_cpu(cpu) {
ebc2a1a6 2776 struct percpu_cluster *cluster;
6f179af8 2777 cluster = per_cpu_ptr(p->percpu_cluster, cpu);
ebc2a1a6
SL
2778 cluster_set_null(&cluster->index);
2779 }
2a8f9449 2780 }
1da177e4 2781
1421ef3c
CEB
2782 error = swap_cgroup_swapon(p->type, maxpages);
2783 if (error)
2784 goto bad_swap;
2785
915d4d7b 2786 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2a8f9449 2787 cluster_info, maxpages, &span);
915d4d7b
CEB
2788 if (unlikely(nr_extents < 0)) {
2789 error = nr_extents;
1da177e4
LT
2790 goto bad_swap;
2791 }
38b5faf4 2792 /* frontswap enabled? set up bit-per-page map for frontswap */
8ea1d2a1 2793 if (IS_ENABLED(CONFIG_FRONTSWAP))
7b57976d 2794 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
1da177e4 2795
2a8f9449
SL
2796 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2797 /*
2798 * When discard is enabled for swap with no particular
2799 * policy flagged, we set all swap discard flags here in
2800 * order to sustain backward compatibility with older
2801 * swapon(8) releases.
2802 */
2803 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2804 SWP_PAGE_DISCARD);
dcf6b7dd 2805
2a8f9449
SL
2806 /*
2807 * By flagging sys_swapon, a sysadmin can tell us to
2808 * either do single-time area discards only, or to just
2809 * perform discards for released swap page-clusters.
2810 * Now it's time to adjust the p->flags accordingly.
2811 */
2812 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
2813 p->flags &= ~SWP_PAGE_DISCARD;
2814 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
2815 p->flags &= ~SWP_AREA_DISCARD;
2816
2817 /* issue a swapon-time discard if it's still required */
2818 if (p->flags & SWP_AREA_DISCARD) {
2819 int err = discard_swap(p);
2820 if (unlikely(err))
2821 pr_err("swapon: discard_swap(%p): %d\n",
2822 p, err);
dcf6b7dd 2823 }
20137a49 2824 }
6a6ba831 2825
4b3ef9da
HY
2826 error = init_swap_address_space(p->type, maxpages);
2827 if (error)
2828 goto bad_swap;
2829
fc0abb14 2830 mutex_lock(&swapon_mutex);
40531542 2831 prio = -1;
78ecba08 2832 if (swap_flags & SWAP_FLAG_PREFER)
40531542 2833 prio =
78ecba08 2834 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2a8f9449 2835 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
c69dbfb8 2836
756a025f 2837 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
91a27b2a 2838 p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
c69dbfb8
CEB
2839 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2840 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
38b5faf4 2841 (p->flags & SWP_DISCARDABLE) ? "D" : "",
dcf6b7dd
RA
2842 (p->flags & SWP_AREA_DISCARD) ? "s" : "",
2843 (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
38b5faf4 2844 (frontswap_map) ? "FS" : "");
c69dbfb8 2845
fc0abb14 2846 mutex_unlock(&swapon_mutex);
66d7dd51
KS
2847 atomic_inc(&proc_poll_event);
2848 wake_up_interruptible(&proc_poll_wait);
2849
9b01c350
CEB
2850 if (S_ISREG(inode->i_mode))
2851 inode->i_flags |= S_SWAPFILE;
1da177e4
LT
2852 error = 0;
2853 goto out;
2854bad_swap:
ebc2a1a6
SL
2855 free_percpu(p->percpu_cluster);
2856 p->percpu_cluster = NULL;
bd69010b 2857 if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
f2090d2d
CEB
2858 set_blocksize(p->bdev, p->old_block_size);
2859 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1da177e4 2860 }
4cd3bb10 2861 destroy_swap_extents(p);
e8e6c2ec 2862 swap_cgroup_swapoff(p->type);
5d337b91 2863 spin_lock(&swap_lock);
1da177e4 2864 p->swap_file = NULL;
1da177e4 2865 p->flags = 0;
5d337b91 2866 spin_unlock(&swap_lock);
1da177e4 2867 vfree(swap_map);
2a8f9449 2868 vfree(cluster_info);
52c50567 2869 if (swap_file) {
2130781e 2870 if (inode && S_ISREG(inode->i_mode)) {
5955102c 2871 inode_unlock(inode);
2130781e
CEB
2872 inode = NULL;
2873 }
1da177e4 2874 filp_close(swap_file, NULL);
52c50567 2875 }
1da177e4
LT
2876out:
2877 if (page && !IS_ERR(page)) {
2878 kunmap(page);
09cbfeaf 2879 put_page(page);
1da177e4
LT
2880 }
2881 if (name)
2882 putname(name);
9b01c350 2883 if (inode && S_ISREG(inode->i_mode))
5955102c 2884 inode_unlock(inode);
1da177e4
LT
2885 return error;
2886}
2887
2888void si_swapinfo(struct sysinfo *val)
2889{
efa90a98 2890 unsigned int type;
1da177e4
LT
2891 unsigned long nr_to_be_unused = 0;
2892
5d337b91 2893 spin_lock(&swap_lock);
efa90a98
HD
2894 for (type = 0; type < nr_swapfiles; type++) {
2895 struct swap_info_struct *si = swap_info[type];
2896
2897 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2898 nr_to_be_unused += si->inuse_pages;
1da177e4 2899 }
ec8acf20 2900 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
1da177e4 2901 val->totalswap = total_swap_pages + nr_to_be_unused;
5d337b91 2902 spin_unlock(&swap_lock);
1da177e4
LT
2903}
2904
2905/*
2906 * Verify that a swap entry is valid and increment its swap map count.
2907 *
355cfa73
KH
2908 * Returns error code in following case.
2909 * - success -> 0
2910 * - swp_entry is invalid -> EINVAL
2911 * - swp_entry is migration entry -> EINVAL
2912 * - swap-cache reference is requested but there is already one. -> EEXIST
2913 * - swap-cache reference is requested but the entry is not used. -> ENOENT
570a335b 2914 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
1da177e4 2915 */
8d69aaee 2916static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
1da177e4 2917{
73c34b6a 2918 struct swap_info_struct *p;
235b6217 2919 struct swap_cluster_info *ci;
1da177e4 2920 unsigned long offset, type;
8d69aaee
HD
2921 unsigned char count;
2922 unsigned char has_cache;
253d553b 2923 int err = -EINVAL;
1da177e4 2924
a7420aa5 2925 if (non_swap_entry(entry))
253d553b 2926 goto out;
0697212a 2927
1da177e4
LT
2928 type = swp_type(entry);
2929 if (type >= nr_swapfiles)
2930 goto bad_file;
efa90a98 2931 p = swap_info[type];
1da177e4 2932 offset = swp_offset(entry);
355cfa73 2933 if (unlikely(offset >= p->max))
235b6217
HY
2934 goto out;
2935
2936 ci = lock_cluster_or_swap_info(p, offset);
355cfa73 2937
253d553b 2938 count = p->swap_map[offset];
edfe23da
SL
2939
2940 /*
2941 * swapin_readahead() doesn't check if a swap entry is valid, so the
2942 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
2943 */
2944 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
2945 err = -ENOENT;
2946 goto unlock_out;
2947 }
2948
253d553b
HD
2949 has_cache = count & SWAP_HAS_CACHE;
2950 count &= ~SWAP_HAS_CACHE;
2951 err = 0;
355cfa73 2952
253d553b 2953 if (usage == SWAP_HAS_CACHE) {
355cfa73
KH
2954
2955 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
253d553b
HD
2956 if (!has_cache && count)
2957 has_cache = SWAP_HAS_CACHE;
2958 else if (has_cache) /* someone else added cache */
2959 err = -EEXIST;
2960 else /* no users remaining */
2961 err = -ENOENT;
355cfa73
KH
2962
2963 } else if (count || has_cache) {
253d553b 2964
570a335b
HD
2965 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2966 count += usage;
2967 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
253d553b 2968 err = -EINVAL;
570a335b
HD
2969 else if (swap_count_continued(p, offset, count))
2970 count = COUNT_CONTINUED;
2971 else
2972 err = -ENOMEM;
355cfa73 2973 } else
253d553b
HD
2974 err = -ENOENT; /* unused swap entry */
2975
2976 p->swap_map[offset] = count | has_cache;
2977
355cfa73 2978unlock_out:
235b6217 2979 unlock_cluster_or_swap_info(p, ci);
1da177e4 2980out:
253d553b 2981 return err;
1da177e4
LT
2982
2983bad_file:
465c47fd 2984 pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
1da177e4
LT
2985 goto out;
2986}
253d553b 2987
aaa46865
HD
2988/*
2989 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2990 * (in which case its reference count is never incremented).
2991 */
2992void swap_shmem_alloc(swp_entry_t entry)
2993{
2994 __swap_duplicate(entry, SWAP_MAP_SHMEM);
2995}
2996
355cfa73 2997/*
08259d58
HD
2998 * Increase reference count of swap entry by 1.
2999 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3000 * but could not be atomically allocated. Returns 0, just as if it succeeded,
3001 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3002 * might occur if a page table entry has got corrupted.
355cfa73 3003 */
570a335b 3004int swap_duplicate(swp_entry_t entry)
355cfa73 3005{
570a335b
HD
3006 int err = 0;
3007
3008 while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3009 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3010 return err;
355cfa73 3011}
1da177e4 3012
cb4b86ba 3013/*
355cfa73
KH
3014 * @entry: swap entry for which we allocate swap cache.
3015 *
73c34b6a 3016 * Called when allocating swap cache for existing swap entry,
355cfa73
KH
3017 * This can return error codes. Returns 0 at success.
3018 * -EBUSY means there is a swap cache.
3019 * Note: return code is different from swap_duplicate().
cb4b86ba
KH
3020 */
3021int swapcache_prepare(swp_entry_t entry)
3022{
253d553b 3023 return __swap_duplicate(entry, SWAP_HAS_CACHE);
cb4b86ba
KH
3024}
3025
f981c595
MG
3026struct swap_info_struct *page_swap_info(struct page *page)
3027{
3028 swp_entry_t swap = { .val = page_private(page) };
f981c595
MG
3029 return swap_info[swp_type(swap)];
3030}
3031
3032/*
3033 * out-of-line __page_file_ methods to avoid include hell.
3034 */
3035struct address_space *__page_file_mapping(struct page *page)
3036{
309381fe 3037 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
f981c595
MG
3038 return page_swap_info(page)->swap_file->f_mapping;
3039}
3040EXPORT_SYMBOL_GPL(__page_file_mapping);
3041
3042pgoff_t __page_file_index(struct page *page)
3043{
3044 swp_entry_t swap = { .val = page_private(page) };
309381fe 3045 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
f981c595
MG
3046 return swp_offset(swap);
3047}
3048EXPORT_SYMBOL_GPL(__page_file_index);
3049
570a335b
HD
3050/*
3051 * add_swap_count_continuation - called when a swap count is duplicated
3052 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3053 * page of the original vmalloc'ed swap_map, to hold the continuation count
3054 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
3055 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3056 *
3057 * These continuation pages are seldom referenced: the common paths all work
3058 * on the original swap_map, only referring to a continuation page when the
3059 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3060 *
3061 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3062 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3063 * can be called after dropping locks.
3064 */
3065int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3066{
3067 struct swap_info_struct *si;
235b6217 3068 struct swap_cluster_info *ci;
570a335b
HD
3069 struct page *head;
3070 struct page *page;
3071 struct page *list_page;
3072 pgoff_t offset;
3073 unsigned char count;
3074
3075 /*
3076 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3077 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3078 */
3079 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3080
3081 si = swap_info_get(entry);
3082 if (!si) {
3083 /*
3084 * An acceptable race has occurred since the failing
3085 * __swap_duplicate(): the swap entry has been freed,
3086 * perhaps even the whole swap_map cleared for swapoff.
3087 */
3088 goto outer;
3089 }
3090
3091 offset = swp_offset(entry);
235b6217
HY
3092
3093 ci = lock_cluster(si, offset);
3094
570a335b
HD
3095 count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3096
3097 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3098 /*
3099 * The higher the swap count, the more likely it is that tasks
3100 * will race to add swap count continuation: we need to avoid
3101 * over-provisioning.
3102 */
3103 goto out;
3104 }
3105
3106 if (!page) {
235b6217 3107 unlock_cluster(ci);
ec8acf20 3108 spin_unlock(&si->lock);
570a335b
HD
3109 return -ENOMEM;
3110 }
3111
3112 /*
3113 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2de1a7e4
SJ
3114 * no architecture is using highmem pages for kernel page tables: so it
3115 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
570a335b
HD
3116 */
3117 head = vmalloc_to_page(si->swap_map + offset);
3118 offset &= ~PAGE_MASK;
3119
3120 /*
3121 * Page allocation does not initialize the page's lru field,
3122 * but it does always reset its private field.
3123 */
3124 if (!page_private(head)) {
3125 BUG_ON(count & COUNT_CONTINUED);
3126 INIT_LIST_HEAD(&head->lru);
3127 set_page_private(head, SWP_CONTINUED);
3128 si->flags |= SWP_CONTINUED;
3129 }
3130
3131 list_for_each_entry(list_page, &head->lru, lru) {
3132 unsigned char *map;
3133
3134 /*
3135 * If the previous map said no continuation, but we've found
3136 * a continuation page, free our allocation and use this one.
3137 */
3138 if (!(count & COUNT_CONTINUED))
3139 goto out;
3140
9b04c5fe 3141 map = kmap_atomic(list_page) + offset;
570a335b 3142 count = *map;
9b04c5fe 3143 kunmap_atomic(map);
570a335b
HD
3144
3145 /*
3146 * If this continuation count now has some space in it,
3147 * free our allocation and use this one.
3148 */
3149 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3150 goto out;
3151 }
3152
3153 list_add_tail(&page->lru, &head->lru);
3154 page = NULL; /* now it's attached, don't free it */
3155out:
235b6217 3156 unlock_cluster(ci);
ec8acf20 3157 spin_unlock(&si->lock);
570a335b
HD
3158outer:
3159 if (page)
3160 __free_page(page);
3161 return 0;
3162}
3163
3164/*
3165 * swap_count_continued - when the original swap_map count is incremented
3166 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3167 * into, carry if so, or else fail until a new continuation page is allocated;
3168 * when the original swap_map count is decremented from 0 with continuation,
3169 * borrow from the continuation and report whether it still holds more.
235b6217
HY
3170 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3171 * lock.
570a335b
HD
3172 */
3173static bool swap_count_continued(struct swap_info_struct *si,
3174 pgoff_t offset, unsigned char count)
3175{
3176 struct page *head;
3177 struct page *page;
3178 unsigned char *map;
3179
3180 head = vmalloc_to_page(si->swap_map + offset);
3181 if (page_private(head) != SWP_CONTINUED) {
3182 BUG_ON(count & COUNT_CONTINUED);
3183 return false; /* need to add count continuation */
3184 }
3185
3186 offset &= ~PAGE_MASK;
3187 page = list_entry(head->lru.next, struct page, lru);
9b04c5fe 3188 map = kmap_atomic(page) + offset;
570a335b
HD
3189
3190 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
3191 goto init_map; /* jump over SWAP_CONT_MAX checks */
3192
3193 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3194 /*
3195 * Think of how you add 1 to 999
3196 */
3197 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
9b04c5fe 3198 kunmap_atomic(map);
570a335b
HD
3199 page = list_entry(page->lru.next, struct page, lru);
3200 BUG_ON(page == head);
9b04c5fe 3201 map = kmap_atomic(page) + offset;
570a335b
HD
3202 }
3203 if (*map == SWAP_CONT_MAX) {
9b04c5fe 3204 kunmap_atomic(map);
570a335b
HD
3205 page = list_entry(page->lru.next, struct page, lru);
3206 if (page == head)
3207 return false; /* add count continuation */
9b04c5fe 3208 map = kmap_atomic(page) + offset;
570a335b
HD
3209init_map: *map = 0; /* we didn't zero the page */
3210 }
3211 *map += 1;
9b04c5fe 3212 kunmap_atomic(map);
570a335b
HD
3213 page = list_entry(page->lru.prev, struct page, lru);
3214 while (page != head) {
9b04c5fe 3215 map = kmap_atomic(page) + offset;
570a335b 3216 *map = COUNT_CONTINUED;
9b04c5fe 3217 kunmap_atomic(map);
570a335b
HD
3218 page = list_entry(page->lru.prev, struct page, lru);
3219 }
3220 return true; /* incremented */
3221
3222 } else { /* decrementing */
3223 /*
3224 * Think of how you subtract 1 from 1000
3225 */
3226 BUG_ON(count != COUNT_CONTINUED);
3227 while (*map == COUNT_CONTINUED) {
9b04c5fe 3228 kunmap_atomic(map);
570a335b
HD
3229 page = list_entry(page->lru.next, struct page, lru);
3230 BUG_ON(page == head);
9b04c5fe 3231 map = kmap_atomic(page) + offset;
570a335b
HD
3232 }
3233 BUG_ON(*map == 0);
3234 *map -= 1;
3235 if (*map == 0)
3236 count = 0;
9b04c5fe 3237 kunmap_atomic(map);
570a335b
HD
3238 page = list_entry(page->lru.prev, struct page, lru);
3239 while (page != head) {
9b04c5fe 3240 map = kmap_atomic(page) + offset;
570a335b
HD
3241 *map = SWAP_CONT_MAX | count;
3242 count = COUNT_CONTINUED;
9b04c5fe 3243 kunmap_atomic(map);
570a335b
HD
3244 page = list_entry(page->lru.prev, struct page, lru);
3245 }
3246 return count == COUNT_CONTINUED;
3247 }
3248}
3249
3250/*
3251 * free_swap_count_continuations - swapoff free all the continuation pages
3252 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3253 */
3254static void free_swap_count_continuations(struct swap_info_struct *si)
3255{
3256 pgoff_t offset;
3257
3258 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3259 struct page *head;
3260 head = vmalloc_to_page(si->swap_map + offset);
3261 if (page_private(head)) {
0d576d20
GT
3262 struct page *page, *next;
3263
3264 list_for_each_entry_safe(page, next, &head->lru, lru) {
3265 list_del(&page->lru);
570a335b
HD
3266 __free_page(page);
3267 }
3268 }
3269 }
3270}