]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/mm/page_io.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
6 | * | |
7 | * Swap reorganised 29.12.95, | |
8 | * Asynchronous swapping added 30.12.95. Stephen Tweedie | |
9 | * Removed race in async swapping. 14.4.1996. Bruno Haible | |
10 | * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie | |
11 | * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman | |
12 | */ | |
13 | ||
14 | #include <linux/mm.h> | |
15 | #include <linux/kernel_stat.h> | |
5a0e3ad6 | 16 | #include <linux/gfp.h> |
1da177e4 LT |
17 | #include <linux/pagemap.h> |
18 | #include <linux/swap.h> | |
19 | #include <linux/bio.h> | |
20 | #include <linux/swapops.h> | |
62c230bc | 21 | #include <linux/buffer_head.h> |
1da177e4 | 22 | #include <linux/writeback.h> |
38b5faf4 | 23 | #include <linux/frontswap.h> |
b430e9d1 | 24 | #include <linux/blkdev.h> |
93779069 | 25 | #include <linux/psi.h> |
e2e40f2c | 26 | #include <linux/uio.h> |
b0ba2d0f | 27 | #include <linux/sched/task.h> |
1da177e4 LT |
28 | #include <asm/pgtable.h> |
29 | ||
f29ad6a9 | 30 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
1da177e4 LT |
31 | struct page *page, bio_end_io_t end_io) |
32 | { | |
33 | struct bio *bio; | |
34 | ||
1a5f439c | 35 | bio = bio_alloc(gfp_flags, 1); |
1da177e4 | 36 | if (bio) { |
74d46992 CH |
37 | struct block_device *bdev; |
38 | ||
39 | bio->bi_iter.bi_sector = map_swap_page(page, &bdev); | |
40 | bio_set_dev(bio, bdev); | |
4f024f37 | 41 | bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; |
1da177e4 | 42 | bio->bi_end_io = end_io; |
6cf66b4c | 43 | |
1a5f439c | 44 | bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); |
1da177e4 LT |
45 | } |
46 | return bio; | |
47 | } | |
48 | ||
4246a0b6 | 49 | void end_swap_bio_write(struct bio *bio) |
1da177e4 | 50 | { |
263663cd | 51 | struct page *page = bio_first_page_all(bio); |
1da177e4 | 52 | |
4e4cbee9 | 53 | if (bio->bi_status) { |
1da177e4 | 54 | SetPageError(page); |
6ddab3b9 PZ |
55 | /* |
56 | * We failed to write the page out to swap-space. | |
57 | * Re-dirty the page in order to avoid it being reclaimed. | |
58 | * Also print a dire warning that things will go BAD (tm) | |
59 | * very quickly. | |
60 | * | |
61 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() | |
62 | */ | |
63 | set_page_dirty(page); | |
1170532b | 64 | pr_alert("Write-error on swap-device (%u:%u:%llu)\n", |
74d46992 | 65 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
1170532b | 66 | (unsigned long long)bio->bi_iter.bi_sector); |
6ddab3b9 PZ |
67 | ClearPageReclaim(page); |
68 | } | |
1da177e4 LT |
69 | end_page_writeback(page); |
70 | bio_put(bio); | |
1da177e4 LT |
71 | } |
72 | ||
3f2b1a04 MK |
73 | static void swap_slot_free_notify(struct page *page) |
74 | { | |
75 | struct swap_info_struct *sis; | |
76 | struct gendisk *disk; | |
5df373e9 | 77 | swp_entry_t entry; |
3f2b1a04 MK |
78 | |
79 | /* | |
80 | * There is no guarantee that the page is in swap cache - the software | |
81 | * suspend code (at least) uses end_swap_bio_read() against a non- | |
82 | * swapcache page. So we must check PG_swapcache before proceeding with | |
83 | * this optimization. | |
84 | */ | |
85 | if (unlikely(!PageSwapCache(page))) | |
86 | return; | |
87 | ||
88 | sis = page_swap_info(page); | |
89 | if (!(sis->flags & SWP_BLKDEV)) | |
90 | return; | |
91 | ||
92 | /* | |
93 | * The swap subsystem performs lazy swap slot freeing, | |
94 | * expecting that the page will be swapped out again. | |
95 | * So we can avoid an unnecessary write if the page | |
96 | * isn't redirtied. | |
97 | * This is good for real swap storage because we can | |
98 | * reduce unnecessary I/O and enhance wear-leveling | |
99 | * if an SSD is used as the as swap device. | |
100 | * But if in-memory swap device (eg zram) is used, | |
101 | * this causes a duplicated copy between uncompressed | |
102 | * data in VM-owned memory and compressed data in | |
103 | * zram-owned memory. So let's free zram-owned memory | |
104 | * and make the VM-owned decompressed page *dirty*, | |
105 | * so the page should be swapped out somewhere again if | |
106 | * we again wish to reclaim it. | |
107 | */ | |
108 | disk = sis->bdev->bd_disk; | |
5df373e9 VM |
109 | entry.val = page_private(page); |
110 | if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { | |
3f2b1a04 MK |
111 | unsigned long offset; |
112 | ||
3f2b1a04 MK |
113 | offset = swp_offset(entry); |
114 | ||
115 | SetPageDirty(page); | |
116 | disk->fops->swap_slot_free_notify(sis->bdev, | |
117 | offset); | |
118 | } | |
119 | } | |
120 | ||
4246a0b6 | 121 | static void end_swap_bio_read(struct bio *bio) |
1da177e4 | 122 | { |
263663cd | 123 | struct page *page = bio_first_page_all(bio); |
23955622 | 124 | struct task_struct *waiter = bio->bi_private; |
1da177e4 | 125 | |
4e4cbee9 | 126 | if (bio->bi_status) { |
1da177e4 LT |
127 | SetPageError(page); |
128 | ClearPageUptodate(page); | |
1170532b | 129 | pr_alert("Read-error on swap-device (%u:%u:%llu)\n", |
74d46992 | 130 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
1170532b | 131 | (unsigned long long)bio->bi_iter.bi_sector); |
b430e9d1 | 132 | goto out; |
1da177e4 | 133 | } |
b430e9d1 MK |
134 | |
135 | SetPageUptodate(page); | |
3f2b1a04 | 136 | swap_slot_free_notify(page); |
b430e9d1 | 137 | out: |
1da177e4 | 138 | unlock_page(page); |
23955622 | 139 | WRITE_ONCE(bio->bi_private, NULL); |
1da177e4 | 140 | bio_put(bio); |
87518530 ON |
141 | if (waiter) { |
142 | blk_wake_io_task(waiter); | |
143 | put_task_struct(waiter); | |
144 | } | |
1da177e4 LT |
145 | } |
146 | ||
a509bc1a MG |
147 | int generic_swapfile_activate(struct swap_info_struct *sis, |
148 | struct file *swap_file, | |
149 | sector_t *span) | |
150 | { | |
151 | struct address_space *mapping = swap_file->f_mapping; | |
152 | struct inode *inode = mapping->host; | |
153 | unsigned blocks_per_page; | |
154 | unsigned long page_no; | |
155 | unsigned blkbits; | |
156 | sector_t probe_block; | |
157 | sector_t last_block; | |
158 | sector_t lowest_block = -1; | |
159 | sector_t highest_block = 0; | |
160 | int nr_extents = 0; | |
161 | int ret; | |
162 | ||
163 | blkbits = inode->i_blkbits; | |
164 | blocks_per_page = PAGE_SIZE >> blkbits; | |
165 | ||
166 | /* | |
4efaceb1 | 167 | * Map all the blocks into the extent tree. This code doesn't try |
a509bc1a MG |
168 | * to be very smart. |
169 | */ | |
170 | probe_block = 0; | |
171 | page_no = 0; | |
172 | last_block = i_size_read(inode) >> blkbits; | |
173 | while ((probe_block + blocks_per_page) <= last_block && | |
174 | page_no < sis->max) { | |
175 | unsigned block_in_page; | |
176 | sector_t first_block; | |
177 | ||
7e4411bf MP |
178 | cond_resched(); |
179 | ||
30460e1e CM |
180 | first_block = probe_block; |
181 | ret = bmap(inode, &first_block); | |
182 | if (ret || !first_block) | |
a509bc1a MG |
183 | goto bad_bmap; |
184 | ||
185 | /* | |
186 | * It must be PAGE_SIZE aligned on-disk | |
187 | */ | |
188 | if (first_block & (blocks_per_page - 1)) { | |
189 | probe_block++; | |
190 | goto reprobe; | |
191 | } | |
192 | ||
193 | for (block_in_page = 1; block_in_page < blocks_per_page; | |
194 | block_in_page++) { | |
195 | sector_t block; | |
196 | ||
30460e1e CM |
197 | block = probe_block + block_in_page; |
198 | ret = bmap(inode, &block); | |
199 | if (ret || !block) | |
a509bc1a | 200 | goto bad_bmap; |
30460e1e | 201 | |
a509bc1a MG |
202 | if (block != first_block + block_in_page) { |
203 | /* Discontiguity */ | |
204 | probe_block++; | |
205 | goto reprobe; | |
206 | } | |
207 | } | |
208 | ||
209 | first_block >>= (PAGE_SHIFT - blkbits); | |
210 | if (page_no) { /* exclude the header page */ | |
211 | if (first_block < lowest_block) | |
212 | lowest_block = first_block; | |
213 | if (first_block > highest_block) | |
214 | highest_block = first_block; | |
215 | } | |
216 | ||
217 | /* | |
218 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks | |
219 | */ | |
220 | ret = add_swap_extent(sis, page_no, 1, first_block); | |
221 | if (ret < 0) | |
222 | goto out; | |
223 | nr_extents += ret; | |
224 | page_no++; | |
225 | probe_block += blocks_per_page; | |
226 | reprobe: | |
227 | continue; | |
228 | } | |
229 | ret = nr_extents; | |
230 | *span = 1 + highest_block - lowest_block; | |
231 | if (page_no == 0) | |
232 | page_no = 1; /* force Empty message */ | |
233 | sis->max = page_no; | |
234 | sis->pages = page_no - 1; | |
235 | sis->highest_bit = page_no - 1; | |
236 | out: | |
237 | return ret; | |
238 | bad_bmap: | |
1170532b | 239 | pr_err("swapon: swapfile has holes\n"); |
a509bc1a MG |
240 | ret = -EINVAL; |
241 | goto out; | |
242 | } | |
243 | ||
1da177e4 LT |
244 | /* |
245 | * We may have stale swap cache pages in memory: notice | |
246 | * them here and get rid of the unnecessary final write. | |
247 | */ | |
248 | int swap_writepage(struct page *page, struct writeback_control *wbc) | |
249 | { | |
2f772e6c | 250 | int ret = 0; |
1da177e4 | 251 | |
a2c43eed | 252 | if (try_to_free_swap(page)) { |
1da177e4 LT |
253 | unlock_page(page); |
254 | goto out; | |
255 | } | |
165c8aed | 256 | if (frontswap_store(page) == 0) { |
38b5faf4 DM |
257 | set_page_writeback(page); |
258 | unlock_page(page); | |
259 | end_page_writeback(page); | |
260 | goto out; | |
261 | } | |
1eec6702 | 262 | ret = __swap_writepage(page, wbc, end_swap_bio_write); |
2f772e6c SJ |
263 | out: |
264 | return ret; | |
265 | } | |
266 | ||
dd6bd0d9 MW |
267 | static sector_t swap_page_sector(struct page *page) |
268 | { | |
09cbfeaf | 269 | return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); |
dd6bd0d9 MW |
270 | } |
271 | ||
225311a4 HY |
272 | static inline void count_swpout_vm_event(struct page *page) |
273 | { | |
274 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
275 | if (unlikely(PageTransHuge(page))) | |
276 | count_vm_event(THP_SWPOUT); | |
277 | #endif | |
278 | count_vm_events(PSWPOUT, hpage_nr_pages(page)); | |
279 | } | |
280 | ||
1eec6702 | 281 | int __swap_writepage(struct page *page, struct writeback_control *wbc, |
4246a0b6 | 282 | bio_end_io_t end_write_func) |
2f772e6c SJ |
283 | { |
284 | struct bio *bio; | |
4e49ea4a | 285 | int ret; |
2f772e6c | 286 | struct swap_info_struct *sis = page_swap_info(page); |
62c230bc | 287 | |
cc30c5d6 | 288 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
bc4ae27d | 289 | if (sis->flags & SWP_FS) { |
62c230bc MG |
290 | struct kiocb kiocb; |
291 | struct file *swap_file = sis->swap_file; | |
292 | struct address_space *mapping = swap_file->f_mapping; | |
62a8067a AV |
293 | struct bio_vec bv = { |
294 | .bv_page = page, | |
295 | .bv_len = PAGE_SIZE, | |
296 | .bv_offset = 0 | |
297 | }; | |
05afcb77 | 298 | struct iov_iter from; |
62c230bc | 299 | |
aa563d7b | 300 | iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE); |
62c230bc MG |
301 | init_sync_kiocb(&kiocb, swap_file); |
302 | kiocb.ki_pos = page_file_offset(page); | |
62c230bc | 303 | |
0cdc444a | 304 | set_page_writeback(page); |
62c230bc | 305 | unlock_page(page); |
c8b8e32d | 306 | ret = mapping->a_ops->direct_IO(&kiocb, &from); |
62c230bc MG |
307 | if (ret == PAGE_SIZE) { |
308 | count_vm_event(PSWPOUT); | |
309 | ret = 0; | |
2d30d31e | 310 | } else { |
0cdc444a MG |
311 | /* |
312 | * In the case of swap-over-nfs, this can be a | |
313 | * temporary failure if the system has limited | |
314 | * memory for allocating transmit buffers. | |
315 | * Mark the page dirty and avoid | |
316 | * rotate_reclaimable_page but rate-limit the | |
317 | * messages but do not flag PageError like | |
318 | * the normal direct-to-bio case as it could | |
319 | * be temporary. | |
320 | */ | |
2d30d31e | 321 | set_page_dirty(page); |
0cdc444a | 322 | ClearPageReclaim(page); |
1170532b JP |
323 | pr_err_ratelimited("Write error on dio swapfile (%llu)\n", |
324 | page_file_offset(page)); | |
62c230bc | 325 | } |
0cdc444a | 326 | end_page_writeback(page); |
62c230bc MG |
327 | return ret; |
328 | } | |
329 | ||
dd6bd0d9 MW |
330 | ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); |
331 | if (!ret) { | |
225311a4 | 332 | count_swpout_vm_event(page); |
dd6bd0d9 MW |
333 | return 0; |
334 | } | |
335 | ||
336 | ret = 0; | |
1eec6702 | 337 | bio = get_swap_bio(GFP_NOIO, page, end_write_func); |
1da177e4 LT |
338 | if (bio == NULL) { |
339 | set_page_dirty(page); | |
340 | unlock_page(page); | |
341 | ret = -ENOMEM; | |
342 | goto out; | |
343 | } | |
0d1e0c7c | 344 | bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); |
6a7f6d86 | 345 | bio_associate_blkg_from_page(bio, page); |
225311a4 | 346 | count_swpout_vm_event(page); |
1da177e4 LT |
347 | set_page_writeback(page); |
348 | unlock_page(page); | |
4e49ea4a | 349 | submit_bio(bio); |
1da177e4 LT |
350 | out: |
351 | return ret; | |
352 | } | |
353 | ||
0bcac06f | 354 | int swap_readpage(struct page *page, bool synchronous) |
1da177e4 LT |
355 | { |
356 | struct bio *bio; | |
357 | int ret = 0; | |
62c230bc | 358 | struct swap_info_struct *sis = page_swap_info(page); |
23955622 | 359 | blk_qc_t qc; |
74d46992 | 360 | struct gendisk *disk; |
93779069 | 361 | unsigned long pflags; |
1da177e4 | 362 | |
0bcac06f | 363 | VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); |
309381fe SL |
364 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
365 | VM_BUG_ON_PAGE(PageUptodate(page), page); | |
93779069 MK |
366 | |
367 | /* | |
368 | * Count submission time as memory stall. When the device is congested, | |
369 | * or the submitting cgroup IO-throttled, submission can be a | |
370 | * significant part of overall IO time. | |
371 | */ | |
372 | psi_memstall_enter(&pflags); | |
373 | ||
165c8aed | 374 | if (frontswap_load(page) == 0) { |
38b5faf4 DM |
375 | SetPageUptodate(page); |
376 | unlock_page(page); | |
377 | goto out; | |
378 | } | |
62c230bc | 379 | |
bc4ae27d | 380 | if (sis->flags & SWP_FS) { |
62c230bc MG |
381 | struct file *swap_file = sis->swap_file; |
382 | struct address_space *mapping = swap_file->f_mapping; | |
383 | ||
384 | ret = mapping->a_ops->readpage(swap_file, page); | |
385 | if (!ret) | |
386 | count_vm_event(PSWPIN); | |
93779069 | 387 | goto out; |
62c230bc MG |
388 | } |
389 | ||
dd6bd0d9 MW |
390 | ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); |
391 | if (!ret) { | |
b06bad17 MK |
392 | if (trylock_page(page)) { |
393 | swap_slot_free_notify(page); | |
394 | unlock_page(page); | |
395 | } | |
396 | ||
dd6bd0d9 | 397 | count_vm_event(PSWPIN); |
93779069 | 398 | goto out; |
dd6bd0d9 MW |
399 | } |
400 | ||
401 | ret = 0; | |
f29ad6a9 | 402 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); |
1da177e4 LT |
403 | if (bio == NULL) { |
404 | unlock_page(page); | |
405 | ret = -ENOMEM; | |
406 | goto out; | |
407 | } | |
74d46992 | 408 | disk = bio->bi_disk; |
b0ba2d0f TH |
409 | /* |
410 | * Keep this task valid during swap readpage because the oom killer may | |
411 | * attempt to access it in the page fault retry time check. | |
412 | */ | |
95fe6c1a | 413 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
87518530 | 414 | if (synchronous) { |
b685a735 | 415 | bio->bi_opf |= REQ_HIPRI; |
87518530 ON |
416 | get_task_struct(current); |
417 | bio->bi_private = current; | |
418 | } | |
f8891e5e | 419 | count_vm_event(PSWPIN); |
23955622 SL |
420 | bio_get(bio); |
421 | qc = submit_bio(bio); | |
0bcac06f | 422 | while (synchronous) { |
1ac5cd49 | 423 | set_current_state(TASK_UNINTERRUPTIBLE); |
23955622 SL |
424 | if (!READ_ONCE(bio->bi_private)) |
425 | break; | |
426 | ||
0a1b8b87 | 427 | if (!blk_poll(disk->queue, qc, true)) |
b685a735 | 428 | io_schedule(); |
23955622 SL |
429 | } |
430 | __set_current_state(TASK_RUNNING); | |
431 | bio_put(bio); | |
432 | ||
1da177e4 | 433 | out: |
93779069 | 434 | psi_memstall_leave(&pflags); |
1da177e4 LT |
435 | return ret; |
436 | } | |
62c230bc MG |
437 | |
438 | int swap_set_page_dirty(struct page *page) | |
439 | { | |
440 | struct swap_info_struct *sis = page_swap_info(page); | |
441 | ||
bc4ae27d | 442 | if (sis->flags & SWP_FS) { |
62c230bc | 443 | struct address_space *mapping = sis->swap_file->f_mapping; |
cc30c5d6 AM |
444 | |
445 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | |
62c230bc MG |
446 | return mapping->a_ops->set_page_dirty(page); |
447 | } else { | |
448 | return __set_page_dirty_no_writeback(page); | |
449 | } | |
450 | } |