]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/power/snapshot.c
Merge tag 'soc-fsl-fix-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/leo...
[mirror_ubuntu-hirsute-kernel.git] / kernel / power / snapshot.c
CommitLineData
55716d26 1// SPDX-License-Identifier: GPL-2.0-only
25761b6e 2/*
96bc7aec 3 * linux/kernel/power/snapshot.c
25761b6e 4 *
8357376d 5 * This file provides system snapshot/restore functionality for swsusp.
25761b6e 6 *
a2531293 7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8357376d 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
25761b6e
RW
9 */
10
7a7b99bf 11#define pr_fmt(fmt) "PM: hibernation: " fmt
64ec72a1 12
f577eb30 13#include <linux/version.h>
25761b6e
RW
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
25761b6e 17#include <linux/delay.h>
25761b6e 18#include <linux/bitops.h>
25761b6e 19#include <linux/spinlock.h>
25761b6e 20#include <linux/kernel.h>
25761b6e
RW
21#include <linux/pm.h>
22#include <linux/device.h>
74dfd666 23#include <linux/init.h>
57c8a661 24#include <linux/memblock.h>
38b8d208 25#include <linux/nmi.h>
25761b6e
RW
26#include <linux/syscalls.h>
27#include <linux/console.h>
28#include <linux/highmem.h>
846705de 29#include <linux/list.h>
5a0e3ad6 30#include <linux/slab.h>
52f5684c 31#include <linux/compiler.h>
db597605 32#include <linux/ktime.h>
61f6d09a 33#include <linux/set_memory.h>
25761b6e 34
7c0f6ba6 35#include <linux/uaccess.h>
25761b6e 36#include <asm/mmu_context.h>
25761b6e
RW
37#include <asm/tlbflush.h>
38#include <asm/io.h>
39
25761b6e
RW
40#include "power.h"
41
49368a47 42#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
4c0b6c10
RW
43static bool hibernate_restore_protection;
44static bool hibernate_restore_protection_active;
45
46void enable_restore_image_protection(void)
47{
48 hibernate_restore_protection = true;
49}
50
51static inline void hibernate_restore_protection_begin(void)
52{
53 hibernate_restore_protection_active = hibernate_restore_protection;
54}
55
56static inline void hibernate_restore_protection_end(void)
57{
58 hibernate_restore_protection_active = false;
59}
60
61static inline void hibernate_restore_protect_page(void *page_address)
62{
63 if (hibernate_restore_protection_active)
64 set_memory_ro((unsigned long)page_address, 1);
65}
66
67static inline void hibernate_restore_unprotect_page(void *page_address)
68{
69 if (hibernate_restore_protection_active)
70 set_memory_rw((unsigned long)page_address, 1);
71}
72#else
73static inline void hibernate_restore_protection_begin(void) {}
74static inline void hibernate_restore_protection_end(void) {}
75static inline void hibernate_restore_protect_page(void *page_address) {}
76static inline void hibernate_restore_unprotect_page(void *page_address) {}
49368a47 77#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
4c0b6c10 78
74dfd666
RW
79static int swsusp_page_is_free(struct page *);
80static void swsusp_set_page_forbidden(struct page *);
81static void swsusp_unset_page_forbidden(struct page *);
82
ddeb6487
RW
83/*
84 * Number of bytes to reserve for memory allocations made by device drivers
85 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
86 * cause image creation to fail (tunable via /sys/power/reserved_size).
87 */
88unsigned long reserved_size;
89
90void __init hibernate_reserved_size_init(void)
91{
92 reserved_size = SPARE_PAGES * PAGE_SIZE;
93}
94
fe419535
RW
95/*
96 * Preferred image size in bytes (tunable via /sys/power/image_size).
1c1be3a9
RW
97 * When it is set to N, swsusp will do its best to ensure the image
98 * size will not exceed N bytes, but if that is impossible, it will
99 * try to create the smallest image possible.
fe419535 100 */
ac5c24ec
RW
101unsigned long image_size;
102
103void __init hibernate_image_size_init(void)
104{
ca79b0c2 105 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
ac5c24ec 106}
fe419535 107
ef96f639
RW
108/*
109 * List of PBEs needed for restoring the pages that were allocated before
8357376d
RW
110 * the suspend and included in the suspend image, but have also been
111 * allocated by the "resume" kernel, so their contents cannot be written
112 * directly to their "original" page frames.
113 */
75534b50
RW
114struct pbe *restore_pblist;
115
9c744481
RW
116/* struct linked_page is used to build chains of pages */
117
118#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
119
120struct linked_page {
121 struct linked_page *next;
122 char data[LINKED_PAGE_DATA_SIZE];
123} __packed;
124
125/*
126 * List of "safe" pages (ie. pages that were not used by the image kernel
127 * before hibernation) that may be used as temporary storage for image kernel
128 * memory contents.
129 */
130static struct linked_page *safe_pages_list;
131
8357376d 132/* Pointer to an auxiliary buffer (1 page) */
940864dd 133static void *buffer;
7088a5c0 134
0bcd888d
RW
135#define PG_ANY 0
136#define PG_SAFE 1
137#define PG_UNSAFE_CLEAR 1
138#define PG_UNSAFE_KEEP 0
139
940864dd 140static unsigned int allocated_unsafe_pages;
f6143aa6 141
ef96f639
RW
142/**
143 * get_image_page - Allocate a page for a hibernation image.
144 * @gfp_mask: GFP mask for the allocation.
145 * @safe_needed: Get pages that were not used before hibernation (restore only)
146 *
147 * During image restoration, for storing the PBE list and the image data, we can
148 * only use memory pages that do not conflict with the pages used before
149 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
150 * using allocated_unsafe_pages.
151 *
152 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
153 * swsusp_free() can release it.
154 */
8357376d 155static void *get_image_page(gfp_t gfp_mask, int safe_needed)
f6143aa6
RW
156{
157 void *res;
158
159 res = (void *)get_zeroed_page(gfp_mask);
160 if (safe_needed)
7be98234 161 while (res && swsusp_page_is_free(virt_to_page(res))) {
f6143aa6 162 /* The page is unsafe, mark it for swsusp_free() */
7be98234 163 swsusp_set_page_forbidden(virt_to_page(res));
940864dd 164 allocated_unsafe_pages++;
f6143aa6
RW
165 res = (void *)get_zeroed_page(gfp_mask);
166 }
167 if (res) {
7be98234
RW
168 swsusp_set_page_forbidden(virt_to_page(res));
169 swsusp_set_page_free(virt_to_page(res));
f6143aa6
RW
170 }
171 return res;
172}
173
9c744481
RW
174static void *__get_safe_page(gfp_t gfp_mask)
175{
176 if (safe_pages_list) {
177 void *ret = safe_pages_list;
178
179 safe_pages_list = safe_pages_list->next;
180 memset(ret, 0, PAGE_SIZE);
181 return ret;
182 }
183 return get_image_page(gfp_mask, PG_SAFE);
184}
185
f6143aa6
RW
186unsigned long get_safe_page(gfp_t gfp_mask)
187{
9c744481 188 return (unsigned long)__get_safe_page(gfp_mask);
8357376d
RW
189}
190
5b6d15de
RW
191static struct page *alloc_image_page(gfp_t gfp_mask)
192{
8357376d
RW
193 struct page *page;
194
195 page = alloc_page(gfp_mask);
196 if (page) {
7be98234
RW
197 swsusp_set_page_forbidden(page);
198 swsusp_set_page_free(page);
8357376d
RW
199 }
200 return page;
f6143aa6
RW
201}
202
307c5971
RW
203static void recycle_safe_page(void *page_address)
204{
205 struct linked_page *lp = page_address;
206
207 lp->next = safe_pages_list;
208 safe_pages_list = lp;
209}
210
f6143aa6 211/**
ef96f639
RW
212 * free_image_page - Free a page allocated for hibernation image.
213 * @addr: Address of the page to free.
214 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
215 *
216 * The page to free should have been allocated by get_image_page() (page flags
217 * set by it are affected).
f6143aa6 218 */
f6143aa6
RW
219static inline void free_image_page(void *addr, int clear_nosave_free)
220{
8357376d
RW
221 struct page *page;
222
223 BUG_ON(!virt_addr_valid(addr));
224
225 page = virt_to_page(addr);
226
7be98234 227 swsusp_unset_page_forbidden(page);
f6143aa6 228 if (clear_nosave_free)
7be98234 229 swsusp_unset_page_free(page);
8357376d
RW
230
231 __free_page(page);
f6143aa6
RW
232}
233
efd5a852
RW
234static inline void free_list_of_pages(struct linked_page *list,
235 int clear_page_nosave)
b788db79
RW
236{
237 while (list) {
238 struct linked_page *lp = list->next;
239
240 free_image_page(list, clear_page_nosave);
241 list = lp;
242 }
243}
244
ef96f639
RW
245/*
246 * struct chain_allocator is used for allocating small objects out of
247 * a linked list of pages called 'the chain'.
248 *
249 * The chain grows each time when there is no room for a new object in
250 * the current page. The allocated objects cannot be freed individually.
251 * It is only possible to free them all at once, by freeing the entire
252 * chain.
253 *
254 * NOTE: The chain allocator may be inefficient if the allocated objects
255 * are not much smaller than PAGE_SIZE.
256 */
b788db79
RW
257struct chain_allocator {
258 struct linked_page *chain; /* the chain */
259 unsigned int used_space; /* total size of objects allocated out
ef96f639 260 of the current page */
b788db79
RW
261 gfp_t gfp_mask; /* mask for allocating pages */
262 int safe_needed; /* if set, only "safe" pages are allocated */
263};
264
efd5a852
RW
265static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
266 int safe_needed)
b788db79
RW
267{
268 ca->chain = NULL;
269 ca->used_space = LINKED_PAGE_DATA_SIZE;
270 ca->gfp_mask = gfp_mask;
271 ca->safe_needed = safe_needed;
272}
273
274static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
275{
276 void *ret;
277
278 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
279 struct linked_page *lp;
280
9c744481
RW
281 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
282 get_image_page(ca->gfp_mask, PG_ANY);
b788db79
RW
283 if (!lp)
284 return NULL;
285
286 lp->next = ca->chain;
287 ca->chain = lp;
288 ca->used_space = 0;
289 }
290 ret = ca->chain->data + ca->used_space;
291 ca->used_space += size;
292 return ret;
293}
294
b788db79 295/**
ef96f639 296 * Data types related to memory bitmaps.
b788db79 297 *
ef96f639
RW
298 * Memory bitmap is a structure consiting of many linked lists of
299 * objects. The main list's elements are of type struct zone_bitmap
300 * and each of them corresonds to one zone. For each zone bitmap
301 * object there is a list of objects of type struct bm_block that
302 * represent each blocks of bitmap in which information is stored.
b788db79 303 *
ef96f639
RW
304 * struct memory_bitmap contains a pointer to the main list of zone
305 * bitmap objects, a struct bm_position used for browsing the bitmap,
306 * and a pointer to the list of pages used for allocating all of the
307 * zone bitmap objects and bitmap block objects.
b788db79 308 *
ef96f639
RW
309 * NOTE: It has to be possible to lay out the bitmap in memory
310 * using only allocations of order 0. Additionally, the bitmap is
311 * designed to work with arbitrary number of zones (this is over the
312 * top for now, but let's avoid making unnecessary assumptions ;-).
b788db79 313 *
ef96f639
RW
314 * struct zone_bitmap contains a pointer to a list of bitmap block
315 * objects and a pointer to the bitmap block object that has been
316 * most recently used for setting bits. Additionally, it contains the
317 * PFNs that correspond to the start and end of the represented zone.
b788db79 318 *
ef96f639
RW
319 * struct bm_block contains a pointer to the memory page in which
320 * information is stored (in the form of a block of bitmap)
321 * It also contains the pfns that correspond to the start and end of
322 * the represented memory area.
f469f02d 323 *
ef96f639
RW
324 * The memory bitmap is organized as a radix tree to guarantee fast random
325 * access to the bits. There is one radix tree for each zone (as returned
326 * from create_mem_extents).
f469f02d 327 *
ef96f639
RW
328 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
329 * two linked lists for the nodes of the tree, one for the inner nodes and
330 * one for the leave nodes. The linked leave nodes are used for fast linear
331 * access of the memory bitmap.
f469f02d 332 *
ef96f639 333 * The struct rtree_node represents one node of the radix tree.
b788db79
RW
334 */
335
336#define BM_END_OF_MAP (~0UL)
337
8de03073 338#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
f469f02d
JR
339#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
340#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
b788db79 341
f469f02d
JR
342/*
343 * struct rtree_node is a wrapper struct to link the nodes
344 * of the rtree together for easy linear iteration over
345 * bits and easy freeing
346 */
347struct rtree_node {
348 struct list_head list;
349 unsigned long *data;
350};
351
352/*
353 * struct mem_zone_bm_rtree represents a bitmap used for one
354 * populated memory zone.
355 */
356struct mem_zone_bm_rtree {
357 struct list_head list; /* Link Zones together */
358 struct list_head nodes; /* Radix Tree inner nodes */
359 struct list_head leaves; /* Radix Tree leaves */
360 unsigned long start_pfn; /* Zone start page frame */
361 unsigned long end_pfn; /* Zone end page frame + 1 */
362 struct rtree_node *rtree; /* Radix Tree Root */
363 int levels; /* Number of Radix Tree Levels */
364 unsigned int blocks; /* Number of Bitmap Blocks */
365};
366
b788db79
RW
367/* strcut bm_position is used for browsing memory bitmaps */
368
369struct bm_position {
3a20cb17
JR
370 struct mem_zone_bm_rtree *zone;
371 struct rtree_node *node;
372 unsigned long node_pfn;
373 int node_bit;
b788db79
RW
374};
375
376struct memory_bitmap {
f469f02d 377 struct list_head zones;
b788db79 378 struct linked_page *p_list; /* list of pages used to store zone
ef96f639
RW
379 bitmap objects and bitmap block
380 objects */
b788db79
RW
381 struct bm_position cur; /* most recently used bit position */
382};
383
384/* Functions that operate on memory bitmaps */
385
f469f02d
JR
386#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
387#if BITS_PER_LONG == 32
388#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
389#else
390#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
391#endif
392#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
393
ef96f639
RW
394/**
395 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
f469f02d 396 *
ef96f639
RW
397 * This function is used to allocate inner nodes as well as the
398 * leave nodes of the radix tree. It also adds the node to the
399 * corresponding linked list passed in by the *list parameter.
f469f02d
JR
400 */
401static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
402 struct chain_allocator *ca,
403 struct list_head *list)
404{
405 struct rtree_node *node;
406
407 node = chain_alloc(ca, sizeof(struct rtree_node));
408 if (!node)
409 return NULL;
410
411 node->data = get_image_page(gfp_mask, safe_needed);
412 if (!node->data)
413 return NULL;
414
415 list_add_tail(&node->list, list);
416
417 return node;
418}
419
ef96f639
RW
420/**
421 * add_rtree_block - Add a new leave node to the radix tree.
f469f02d 422 *
ef96f639
RW
423 * The leave nodes need to be allocated in order to keep the leaves
424 * linked list in order. This is guaranteed by the zone->blocks
425 * counter.
f469f02d
JR
426 */
427static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
428 int safe_needed, struct chain_allocator *ca)
429{
430 struct rtree_node *node, *block, **dst;
431 unsigned int levels_needed, block_nr;
432 int i;
433
434 block_nr = zone->blocks;
435 levels_needed = 0;
436
437 /* How many levels do we need for this block nr? */
438 while (block_nr) {
439 levels_needed += 1;
440 block_nr >>= BM_RTREE_LEVEL_SHIFT;
441 }
442
443 /* Make sure the rtree has enough levels */
444 for (i = zone->levels; i < levels_needed; i++) {
445 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
446 &zone->nodes);
447 if (!node)
448 return -ENOMEM;
449
450 node->data[0] = (unsigned long)zone->rtree;
451 zone->rtree = node;
452 zone->levels += 1;
453 }
454
455 /* Allocate new block */
456 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
457 if (!block)
458 return -ENOMEM;
459
460 /* Now walk the rtree to insert the block */
461 node = zone->rtree;
462 dst = &zone->rtree;
463 block_nr = zone->blocks;
464 for (i = zone->levels; i > 0; i--) {
465 int index;
466
467 if (!node) {
468 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
469 &zone->nodes);
470 if (!node)
471 return -ENOMEM;
472 *dst = node;
473 }
474
475 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
476 index &= BM_RTREE_LEVEL_MASK;
477 dst = (struct rtree_node **)&((*dst)->data[index]);
478 node = *dst;
479 }
480
481 zone->blocks += 1;
482 *dst = block;
483
484 return 0;
485}
486
487static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
488 int clear_nosave_free);
489
ef96f639
RW
490/**
491 * create_zone_bm_rtree - Create a radix tree for one zone.
f469f02d 492 *
ef96f639
RW
493 * Allocated the mem_zone_bm_rtree structure and initializes it.
494 * This function also allocated and builds the radix tree for the
495 * zone.
f469f02d 496 */
efd5a852
RW
497static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
498 int safe_needed,
499 struct chain_allocator *ca,
500 unsigned long start,
501 unsigned long end)
f469f02d
JR
502{
503 struct mem_zone_bm_rtree *zone;
504 unsigned int i, nr_blocks;
505 unsigned long pages;
506
507 pages = end - start;
508 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
509 if (!zone)
510 return NULL;
511
512 INIT_LIST_HEAD(&zone->nodes);
513 INIT_LIST_HEAD(&zone->leaves);
514 zone->start_pfn = start;
515 zone->end_pfn = end;
516 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
517
518 for (i = 0; i < nr_blocks; i++) {
519 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
520 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
521 return NULL;
522 }
523 }
524
525 return zone;
526}
527
ef96f639
RW
528/**
529 * free_zone_bm_rtree - Free the memory of the radix tree.
f469f02d 530 *
ef96f639
RW
531 * Free all node pages of the radix tree. The mem_zone_bm_rtree
532 * structure itself is not freed here nor are the rtree_node
533 * structs.
f469f02d
JR
534 */
535static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
536 int clear_nosave_free)
537{
538 struct rtree_node *node;
539
540 list_for_each_entry(node, &zone->nodes, list)
541 free_image_page(node->data, clear_nosave_free);
542
543 list_for_each_entry(node, &zone->leaves, list)
544 free_image_page(node->data, clear_nosave_free);
545}
546
b788db79
RW
547static void memory_bm_position_reset(struct memory_bitmap *bm)
548{
3a20cb17
JR
549 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
550 list);
551 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
552 struct rtree_node, list);
553 bm->cur.node_pfn = 0;
554 bm->cur.node_bit = 0;
b788db79
RW
555}
556
557static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
558
846705de
RW
559struct mem_extent {
560 struct list_head hook;
561 unsigned long start;
562 unsigned long end;
563};
564
b788db79 565/**
ef96f639
RW
566 * free_mem_extents - Free a list of memory extents.
567 * @list: List of extents to free.
b788db79 568 */
846705de
RW
569static void free_mem_extents(struct list_head *list)
570{
571 struct mem_extent *ext, *aux;
b788db79 572
846705de
RW
573 list_for_each_entry_safe(ext, aux, list, hook) {
574 list_del(&ext->hook);
575 kfree(ext);
576 }
577}
578
579/**
ef96f639
RW
580 * create_mem_extents - Create a list of memory extents.
581 * @list: List to put the extents into.
582 * @gfp_mask: Mask to use for memory allocations.
583 *
584 * The extents represent contiguous ranges of PFNs.
846705de
RW
585 */
586static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
b788db79 587{
846705de 588 struct zone *zone;
b788db79 589
846705de 590 INIT_LIST_HEAD(list);
b788db79 591
ee99c71c 592 for_each_populated_zone(zone) {
846705de
RW
593 unsigned long zone_start, zone_end;
594 struct mem_extent *ext, *cur, *aux;
595
846705de 596 zone_start = zone->zone_start_pfn;
c33bc315 597 zone_end = zone_end_pfn(zone);
846705de
RW
598
599 list_for_each_entry(ext, list, hook)
600 if (zone_start <= ext->end)
601 break;
b788db79 602
846705de
RW
603 if (&ext->hook == list || zone_end < ext->start) {
604 /* New extent is necessary */
605 struct mem_extent *new_ext;
606
607 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
608 if (!new_ext) {
609 free_mem_extents(list);
610 return -ENOMEM;
611 }
612 new_ext->start = zone_start;
613 new_ext->end = zone_end;
614 list_add_tail(&new_ext->hook, &ext->hook);
615 continue;
616 }
617
618 /* Merge this zone's range of PFNs with the existing one */
619 if (zone_start < ext->start)
620 ext->start = zone_start;
621 if (zone_end > ext->end)
622 ext->end = zone_end;
623
624 /* More merging may be possible */
625 cur = ext;
626 list_for_each_entry_safe_continue(cur, aux, list, hook) {
627 if (zone_end < cur->start)
628 break;
629 if (zone_end < cur->end)
630 ext->end = cur->end;
631 list_del(&cur->hook);
632 kfree(cur);
633 }
b788db79 634 }
846705de
RW
635
636 return 0;
b788db79
RW
637}
638
639/**
ef96f639
RW
640 * memory_bm_create - Allocate memory for a memory bitmap.
641 */
efd5a852
RW
642static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
643 int safe_needed)
b788db79
RW
644{
645 struct chain_allocator ca;
846705de
RW
646 struct list_head mem_extents;
647 struct mem_extent *ext;
648 int error;
b788db79
RW
649
650 chain_init(&ca, gfp_mask, safe_needed);
f469f02d 651 INIT_LIST_HEAD(&bm->zones);
b788db79 652
846705de
RW
653 error = create_mem_extents(&mem_extents, gfp_mask);
654 if (error)
655 return error;
b788db79 656
846705de 657 list_for_each_entry(ext, &mem_extents, hook) {
f469f02d 658 struct mem_zone_bm_rtree *zone;
f469f02d
JR
659
660 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
661 ext->start, ext->end);
9047eb62
JR
662 if (!zone) {
663 error = -ENOMEM;
f469f02d 664 goto Error;
9047eb62 665 }
f469f02d 666 list_add_tail(&zone->list, &bm->zones);
b788db79 667 }
846705de 668
b788db79
RW
669 bm->p_list = ca.chain;
670 memory_bm_position_reset(bm);
846705de
RW
671 Exit:
672 free_mem_extents(&mem_extents);
673 return error;
b788db79 674
846705de 675 Error:
b788db79
RW
676 bm->p_list = ca.chain;
677 memory_bm_free(bm, PG_UNSAFE_CLEAR);
846705de 678 goto Exit;
b788db79
RW
679}
680
681/**
ef96f639
RW
682 * memory_bm_free - Free memory occupied by the memory bitmap.
683 * @bm: Memory bitmap.
684 */
b788db79
RW
685static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
686{
f469f02d 687 struct mem_zone_bm_rtree *zone;
b788db79 688
f469f02d
JR
689 list_for_each_entry(zone, &bm->zones, list)
690 free_zone_bm_rtree(zone, clear_nosave_free);
691
b788db79 692 free_list_of_pages(bm->p_list, clear_nosave_free);
846705de 693
f469f02d 694 INIT_LIST_HEAD(&bm->zones);
b788db79
RW
695}
696
697/**
ef96f639 698 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
07a33823 699 *
ef96f639
RW
700 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
701 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
702 *
703 * Walk the radix tree to find the page containing the bit that represents @pfn
704 * and return the position of the bit in @addr and @bit_nr.
07a33823 705 */
9047eb62
JR
706static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
707 void **addr, unsigned int *bit_nr)
07a33823
JR
708{
709 struct mem_zone_bm_rtree *curr, *zone;
710 struct rtree_node *node;
711 int i, block_nr;
712
3a20cb17
JR
713 zone = bm->cur.zone;
714
715 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
716 goto zone_found;
717
07a33823
JR
718 zone = NULL;
719
720 /* Find the right zone */
721 list_for_each_entry(curr, &bm->zones, list) {
722 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
723 zone = curr;
724 break;
725 }
726 }
727
728 if (!zone)
729 return -EFAULT;
730
3a20cb17 731zone_found:
07a33823 732 /*
ef96f639
RW
733 * We have found the zone. Now walk the radix tree to find the leaf node
734 * for our PFN.
07a33823 735 */
da6043fe
AW
736
737 /*
7b7b8a2c 738 * If the zone we wish to scan is the current zone and the
da6043fe
AW
739 * pfn falls into the current node then we do not need to walk
740 * the tree.
741 */
3a20cb17 742 node = bm->cur.node;
da6043fe
AW
743 if (zone == bm->cur.zone &&
744 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
3a20cb17
JR
745 goto node_found;
746
07a33823
JR
747 node = zone->rtree;
748 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
749
750 for (i = zone->levels; i > 0; i--) {
751 int index;
752
753 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
754 index &= BM_RTREE_LEVEL_MASK;
755 BUG_ON(node->data[index] == 0);
756 node = (struct rtree_node *)node->data[index];
757 }
758
3a20cb17
JR
759node_found:
760 /* Update last position */
761 bm->cur.zone = zone;
762 bm->cur.node = node;
763 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
764
07a33823
JR
765 /* Set return values */
766 *addr = node->data;
767 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
768
769 return 0;
770}
771
74dfd666
RW
772static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
773{
774 void *addr;
775 unsigned int bit;
a82f7119 776 int error;
74dfd666 777
a82f7119
RW
778 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
779 BUG_ON(error);
74dfd666
RW
780 set_bit(bit, addr);
781}
782
a82f7119
RW
783static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
784{
785 void *addr;
786 unsigned int bit;
787 int error;
788
789 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
07a33823
JR
790 if (!error)
791 set_bit(bit, addr);
792
a82f7119
RW
793 return error;
794}
795
74dfd666
RW
796static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
797{
798 void *addr;
799 unsigned int bit;
a82f7119 800 int error;
74dfd666 801
a82f7119
RW
802 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
803 BUG_ON(error);
74dfd666
RW
804 clear_bit(bit, addr);
805}
806
fdd64ed5
JR
807static void memory_bm_clear_current(struct memory_bitmap *bm)
808{
809 int bit;
810
811 bit = max(bm->cur.node_bit - 1, 0);
812 clear_bit(bit, bm->cur.node->data);
813}
814
74dfd666
RW
815static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
816{
817 void *addr;
818 unsigned int bit;
9047eb62 819 int error;
74dfd666 820
a82f7119
RW
821 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
822 BUG_ON(error);
9047eb62 823 return test_bit(bit, addr);
b788db79
RW
824}
825
69643279
RW
826static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
827{
828 void *addr;
829 unsigned int bit;
07a33823 830
9047eb62 831 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
b788db79
RW
832}
833
3a20cb17 834/*
ef96f639 835 * rtree_next_node - Jump to the next leaf node.
3a20cb17 836 *
ef96f639
RW
837 * Set the position to the beginning of the next node in the
838 * memory bitmap. This is either the next node in the current
839 * zone's radix tree or the first node in the radix tree of the
840 * next zone.
3a20cb17 841 *
ef96f639 842 * Return true if there is a next node, false otherwise.
3a20cb17
JR
843 */
844static bool rtree_next_node(struct memory_bitmap *bm)
845{
924d8696
JM
846 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
847 bm->cur.node = list_entry(bm->cur.node->list.next,
848 struct rtree_node, list);
3a20cb17
JR
849 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
850 bm->cur.node_bit = 0;
0f7d83e8 851 touch_softlockup_watchdog();
3a20cb17
JR
852 return true;
853 }
854
855 /* No more nodes, goto next zone */
924d8696
JM
856 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
857 bm->cur.zone = list_entry(bm->cur.zone->list.next,
3a20cb17 858 struct mem_zone_bm_rtree, list);
3a20cb17
JR
859 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
860 struct rtree_node, list);
861 bm->cur.node_pfn = 0;
862 bm->cur.node_bit = 0;
863 return true;
864 }
865
866 /* No more zones */
867 return false;
868}
869
9047eb62 870/**
ef96f639
RW
871 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
872 * @bm: Memory bitmap.
3a20cb17 873 *
ef96f639
RW
874 * Starting from the last returned position this function searches for the next
875 * set bit in @bm and returns the PFN represented by it. If no more bits are
876 * set, BM_END_OF_MAP is returned.
9047eb62 877 *
ef96f639
RW
878 * It is required to run memory_bm_position_reset() before the first call to
879 * this function for the given memory bitmap.
3a20cb17 880 */
9047eb62 881static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
3a20cb17
JR
882{
883 unsigned long bits, pfn, pages;
884 int bit;
885
886 do {
887 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
888 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
889 bit = find_next_bit(bm->cur.node->data, bits,
890 bm->cur.node_bit);
891 if (bit < bits) {
892 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
893 bm->cur.node_bit = bit + 1;
894 return pfn;
895 }
896 } while (rtree_next_node(bm));
897
898 return BM_END_OF_MAP;
899}
900
ef96f639
RW
901/*
902 * This structure represents a range of page frames the contents of which
903 * should not be saved during hibernation.
74dfd666 904 */
74dfd666
RW
905struct nosave_region {
906 struct list_head list;
907 unsigned long start_pfn;
908 unsigned long end_pfn;
909};
910
911static LIST_HEAD(nosave_regions);
912
307c5971
RW
913static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
914{
915 struct rtree_node *node;
916
917 list_for_each_entry(node, &zone->nodes, list)
918 recycle_safe_page(node->data);
919
920 list_for_each_entry(node, &zone->leaves, list)
921 recycle_safe_page(node->data);
922}
923
924static void memory_bm_recycle(struct memory_bitmap *bm)
925{
926 struct mem_zone_bm_rtree *zone;
927 struct linked_page *p_list;
928
929 list_for_each_entry(zone, &bm->zones, list)
930 recycle_zone_bm_rtree(zone);
931
932 p_list = bm->p_list;
933 while (p_list) {
934 struct linked_page *lp = p_list;
935
936 p_list = lp->next;
937 recycle_safe_page(lp);
938 }
939}
940
74dfd666 941/**
ef96f639
RW
942 * register_nosave_region - Register a region of unsaveable memory.
943 *
944 * Register a range of page frames the contents of which should not be saved
945 * during hibernation (to be used in the early initialization code).
74dfd666 946 */
efd5a852
RW
947void __init __register_nosave_region(unsigned long start_pfn,
948 unsigned long end_pfn, int use_kmalloc)
74dfd666
RW
949{
950 struct nosave_region *region;
951
952 if (start_pfn >= end_pfn)
953 return;
954
955 if (!list_empty(&nosave_regions)) {
956 /* Try to extend the previous region (they should be sorted) */
957 region = list_entry(nosave_regions.prev,
958 struct nosave_region, list);
959 if (region->end_pfn == start_pfn) {
960 region->end_pfn = end_pfn;
961 goto Report;
962 }
963 }
940d67f6 964 if (use_kmalloc) {
ef96f639 965 /* During init, this shouldn't fail */
940d67f6
JB
966 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
967 BUG_ON(!region);
d5f32af3 968 } else {
940d67f6 969 /* This allocation cannot fail */
7e1c4e27
MR
970 region = memblock_alloc(sizeof(struct nosave_region),
971 SMP_CACHE_BYTES);
8a7f97b9
MR
972 if (!region)
973 panic("%s: Failed to allocate %zu bytes\n", __func__,
974 sizeof(struct nosave_region));
d5f32af3 975 }
74dfd666
RW
976 region->start_pfn = start_pfn;
977 region->end_pfn = end_pfn;
978 list_add_tail(&region->list, &nosave_regions);
979 Report:
64ec72a1 980 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
cd38ca85
BH
981 (unsigned long long) start_pfn << PAGE_SHIFT,
982 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
74dfd666
RW
983}
984
985/*
986 * Set bits in this map correspond to the page frames the contents of which
987 * should not be saved during the suspend.
988 */
989static struct memory_bitmap *forbidden_pages_map;
990
991/* Set bits in this map correspond to free page frames. */
992static struct memory_bitmap *free_pages_map;
993
994/*
995 * Each page frame allocated for creating the image is marked by setting the
996 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
997 */
998
999void swsusp_set_page_free(struct page *page)
1000{
1001 if (free_pages_map)
1002 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1003}
1004
1005static int swsusp_page_is_free(struct page *page)
1006{
1007 return free_pages_map ?
1008 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1009}
1010
1011void swsusp_unset_page_free(struct page *page)
1012{
1013 if (free_pages_map)
1014 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1015}
1016
1017static void swsusp_set_page_forbidden(struct page *page)
1018{
1019 if (forbidden_pages_map)
1020 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1021}
1022
1023int swsusp_page_is_forbidden(struct page *page)
1024{
1025 return forbidden_pages_map ?
1026 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1027}
1028
1029static void swsusp_unset_page_forbidden(struct page *page)
1030{
1031 if (forbidden_pages_map)
1032 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1033}
1034
1035/**
ef96f639
RW
1036 * mark_nosave_pages - Mark pages that should not be saved.
1037 * @bm: Memory bitmap.
1038 *
1039 * Set the bits in @bm that correspond to the page frames the contents of which
1040 * should not be saved.
74dfd666 1041 */
74dfd666
RW
1042static void mark_nosave_pages(struct memory_bitmap *bm)
1043{
1044 struct nosave_region *region;
1045
1046 if (list_empty(&nosave_regions))
1047 return;
1048
1049 list_for_each_entry(region, &nosave_regions, list) {
1050 unsigned long pfn;
1051
64ec72a1 1052 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
69f1d475
BH
1053 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1054 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1055 - 1);
74dfd666
RW
1056
1057 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
a82f7119
RW
1058 if (pfn_valid(pfn)) {
1059 /*
1060 * It is safe to ignore the result of
1061 * mem_bm_set_bit_check() here, since we won't
1062 * touch the PFNs for which the error is
1063 * returned anyway.
1064 */
1065 mem_bm_set_bit_check(bm, pfn);
1066 }
74dfd666
RW
1067 }
1068}
1069
1070/**
ef96f639
RW
1071 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1072 *
1073 * Create bitmaps needed for marking page frames that should not be saved and
1074 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1075 * only modified if everything goes well, because we don't want the bits to be
1076 * touched before both bitmaps are set up.
74dfd666 1077 */
74dfd666
RW
1078int create_basic_memory_bitmaps(void)
1079{
1080 struct memory_bitmap *bm1, *bm2;
1081 int error = 0;
1082
aab17289
RW
1083 if (forbidden_pages_map && free_pages_map)
1084 return 0;
1085 else
1086 BUG_ON(forbidden_pages_map || free_pages_map);
74dfd666 1087
0709db60 1088 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666
RW
1089 if (!bm1)
1090 return -ENOMEM;
1091
0709db60 1092 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
74dfd666
RW
1093 if (error)
1094 goto Free_first_object;
1095
0709db60 1096 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666
RW
1097 if (!bm2)
1098 goto Free_first_bitmap;
1099
0709db60 1100 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
74dfd666
RW
1101 if (error)
1102 goto Free_second_object;
1103
1104 forbidden_pages_map = bm1;
1105 free_pages_map = bm2;
1106 mark_nosave_pages(forbidden_pages_map);
1107
64ec72a1 1108 pr_debug("Basic memory bitmaps created\n");
74dfd666
RW
1109
1110 return 0;
1111
1112 Free_second_object:
1113 kfree(bm2);
1114 Free_first_bitmap:
1115 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1116 Free_first_object:
1117 kfree(bm1);
1118 return -ENOMEM;
1119}
1120
1121/**
ef96f639
RW
1122 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1123 *
1124 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1125 * auxiliary pointers are necessary so that the bitmaps themselves are not
1126 * referred to while they are being freed.
74dfd666 1127 */
74dfd666
RW
1128void free_basic_memory_bitmaps(void)
1129{
1130 struct memory_bitmap *bm1, *bm2;
1131
6a0c7cd3
RW
1132 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1133 return;
74dfd666
RW
1134
1135 bm1 = forbidden_pages_map;
1136 bm2 = free_pages_map;
1137 forbidden_pages_map = NULL;
1138 free_pages_map = NULL;
1139 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1140 kfree(bm1);
1141 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1142 kfree(bm2);
1143
64ec72a1 1144 pr_debug("Basic memory bitmaps freed\n");
74dfd666
RW
1145}
1146
1ad1410f
AA
1147void clear_free_pages(void)
1148{
1ad1410f
AA
1149 struct memory_bitmap *bm = free_pages_map;
1150 unsigned long pfn;
1151
1152 if (WARN_ON(!(free_pages_map)))
1153 return;
1154
18451f9f
AP
1155 if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
1156 memory_bm_position_reset(bm);
1ad1410f 1157 pfn = memory_bm_next_pfn(bm);
18451f9f
AP
1158 while (pfn != BM_END_OF_MAP) {
1159 if (pfn_valid(pfn))
1160 clear_highpage(pfn_to_page(pfn));
1161
1162 pfn = memory_bm_next_pfn(bm);
1163 }
1164 memory_bm_position_reset(bm);
1165 pr_info("free pages cleared after restore\n");
1ad1410f 1166 }
1ad1410f
AA
1167}
1168
b788db79 1169/**
ef96f639
RW
1170 * snapshot_additional_pages - Estimate the number of extra pages needed.
1171 * @zone: Memory zone to carry out the computation for.
1172 *
1173 * Estimate the number of additional pages needed for setting up a hibernation
1174 * image data structures for @zone (usually, the returned value is greater than
1175 * the exact number).
b788db79 1176 */
b788db79
RW
1177unsigned int snapshot_additional_pages(struct zone *zone)
1178{
f469f02d 1179 unsigned int rtree, nodes;
b788db79 1180
f469f02d
JR
1181 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1182 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1183 LINKED_PAGE_DATA_SIZE);
1184 while (nodes > 1) {
1185 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1186 rtree += nodes;
1187 }
1188
9047eb62 1189 return 2 * rtree;
b788db79
RW
1190}
1191
8357376d
RW
1192#ifdef CONFIG_HIGHMEM
1193/**
ef96f639
RW
1194 * count_free_highmem_pages - Compute the total number of free highmem pages.
1195 *
1196 * The returned number is system-wide.
8357376d 1197 */
8357376d
RW
1198static unsigned int count_free_highmem_pages(void)
1199{
1200 struct zone *zone;
1201 unsigned int cnt = 0;
1202
ee99c71c
KM
1203 for_each_populated_zone(zone)
1204 if (is_highmem(zone))
d23ad423 1205 cnt += zone_page_state(zone, NR_FREE_PAGES);
8357376d
RW
1206
1207 return cnt;
1208}
1209
1210/**
ef96f639
RW
1211 * saveable_highmem_page - Check if a highmem page is saveable.
1212 *
1213 * Determine whether a highmem page should be included in a hibernation image.
8357376d 1214 *
ef96f639
RW
1215 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1216 * and it isn't part of a free chunk of pages.
8357376d 1217 */
846705de 1218static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
8357376d
RW
1219{
1220 struct page *page;
1221
1222 if (!pfn_valid(pfn))
1223 return NULL;
1224
5b56db37
DH
1225 page = pfn_to_online_page(pfn);
1226 if (!page || page_zone(page) != zone)
846705de 1227 return NULL;
8357376d
RW
1228
1229 BUG_ON(!PageHighMem(page));
1230
abd02ac6
DH
1231 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1232 return NULL;
1233
1234 if (PageReserved(page) || PageOffline(page))
8357376d
RW
1235 return NULL;
1236
c6968e73
SG
1237 if (page_is_guard(page))
1238 return NULL;
1239
8357376d
RW
1240 return page;
1241}
1242
1243/**
ef96f639 1244 * count_highmem_pages - Compute the total number of saveable highmem pages.
8357376d 1245 */
fe419535 1246static unsigned int count_highmem_pages(void)
8357376d
RW
1247{
1248 struct zone *zone;
1249 unsigned int n = 0;
1250
98e73dc5 1251 for_each_populated_zone(zone) {
8357376d
RW
1252 unsigned long pfn, max_zone_pfn;
1253
1254 if (!is_highmem(zone))
1255 continue;
1256
1257 mark_free_pages(zone);
c33bc315 1258 max_zone_pfn = zone_end_pfn(zone);
8357376d 1259 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705de 1260 if (saveable_highmem_page(zone, pfn))
8357376d
RW
1261 n++;
1262 }
1263 return n;
1264}
1265#else
846705de
RW
1266static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1267{
1268 return NULL;
1269}
8357376d
RW
1270#endif /* CONFIG_HIGHMEM */
1271
25761b6e 1272/**
ef96f639
RW
1273 * saveable_page - Check if the given page is saveable.
1274 *
1275 * Determine whether a non-highmem page should be included in a hibernation
1276 * image.
25761b6e 1277 *
ef96f639
RW
1278 * We should save the page if it isn't Nosave, and is not in the range
1279 * of pages statically defined as 'unsaveable', and it isn't part of
1280 * a free chunk of pages.
25761b6e 1281 */
846705de 1282static struct page *saveable_page(struct zone *zone, unsigned long pfn)
25761b6e 1283{
de491861 1284 struct page *page;
25761b6e
RW
1285
1286 if (!pfn_valid(pfn))
ae83c5ee 1287 return NULL;
25761b6e 1288
5b56db37
DH
1289 page = pfn_to_online_page(pfn);
1290 if (!page || page_zone(page) != zone)
846705de 1291 return NULL;
ae83c5ee 1292
8357376d
RW
1293 BUG_ON(PageHighMem(page));
1294
7be98234 1295 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
ae83c5ee 1296 return NULL;
8357376d 1297
abd02ac6
DH
1298 if (PageOffline(page))
1299 return NULL;
1300
8a235efa
RW
1301 if (PageReserved(page)
1302 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
ae83c5ee 1303 return NULL;
25761b6e 1304
c6968e73
SG
1305 if (page_is_guard(page))
1306 return NULL;
1307
ae83c5ee 1308 return page;
25761b6e
RW
1309}
1310
8357376d 1311/**
ef96f639 1312 * count_data_pages - Compute the total number of saveable non-highmem pages.
8357376d 1313 */
fe419535 1314static unsigned int count_data_pages(void)
25761b6e
RW
1315{
1316 struct zone *zone;
ae83c5ee 1317 unsigned long pfn, max_zone_pfn;
dc19d507 1318 unsigned int n = 0;
25761b6e 1319
98e73dc5 1320 for_each_populated_zone(zone) {
25761b6e
RW
1321 if (is_highmem(zone))
1322 continue;
8357376d 1323
25761b6e 1324 mark_free_pages(zone);
c33bc315 1325 max_zone_pfn = zone_end_pfn(zone);
ae83c5ee 1326 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705de 1327 if (saveable_page(zone, pfn))
8357376d 1328 n++;
25761b6e 1329 }
a0f49651 1330 return n;
25761b6e
RW
1331}
1332
ef96f639
RW
1333/*
1334 * This is needed, because copy_page and memcpy are not usable for copying
8357376d
RW
1335 * task structs.
1336 */
1337static inline void do_copy_page(long *dst, long *src)
f623f0db
RW
1338{
1339 int n;
1340
f623f0db
RW
1341 for (n = PAGE_SIZE / sizeof(long); n; n--)
1342 *dst++ = *src++;
1343}
1344
8a235efa 1345/**
ef96f639
RW
1346 * safe_copy_page - Copy a page in a safe way.
1347 *
1348 * Check if the page we are going to copy is marked as present in the kernel
d6332692
RE
1349 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1350 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1351 * always returns 'true'.
8a235efa
RW
1352 */
1353static void safe_copy_page(void *dst, struct page *s_page)
1354{
1355 if (kernel_page_present(s_page)) {
1356 do_copy_page(dst, page_address(s_page));
1357 } else {
1358 kernel_map_pages(s_page, 1, 1);
1359 do_copy_page(dst, page_address(s_page));
1360 kernel_map_pages(s_page, 1, 0);
1361 }
1362}
1363
8357376d 1364#ifdef CONFIG_HIGHMEM
efd5a852 1365static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
8357376d
RW
1366{
1367 return is_highmem(zone) ?
846705de 1368 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
8357376d
RW
1369}
1370
8a235efa 1371static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d
RW
1372{
1373 struct page *s_page, *d_page;
1374 void *src, *dst;
1375
1376 s_page = pfn_to_page(src_pfn);
1377 d_page = pfn_to_page(dst_pfn);
1378 if (PageHighMem(s_page)) {
0de9a1e2
CW
1379 src = kmap_atomic(s_page);
1380 dst = kmap_atomic(d_page);
8357376d 1381 do_copy_page(dst, src);
0de9a1e2
CW
1382 kunmap_atomic(dst);
1383 kunmap_atomic(src);
8357376d 1384 } else {
8357376d 1385 if (PageHighMem(d_page)) {
ef96f639
RW
1386 /*
1387 * The page pointed to by src may contain some kernel
8357376d
RW
1388 * data modified by kmap_atomic()
1389 */
8a235efa 1390 safe_copy_page(buffer, s_page);
0de9a1e2 1391 dst = kmap_atomic(d_page);
3ecb01df 1392 copy_page(dst, buffer);
0de9a1e2 1393 kunmap_atomic(dst);
8357376d 1394 } else {
8a235efa 1395 safe_copy_page(page_address(d_page), s_page);
8357376d
RW
1396 }
1397 }
1398}
1399#else
846705de 1400#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
8357376d 1401
8a235efa 1402static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d 1403{
8a235efa
RW
1404 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1405 pfn_to_page(src_pfn));
8357376d
RW
1406}
1407#endif /* CONFIG_HIGHMEM */
1408
efd5a852
RW
1409static void copy_data_pages(struct memory_bitmap *copy_bm,
1410 struct memory_bitmap *orig_bm)
25761b6e
RW
1411{
1412 struct zone *zone;
b788db79 1413 unsigned long pfn;
25761b6e 1414
98e73dc5 1415 for_each_populated_zone(zone) {
b788db79
RW
1416 unsigned long max_zone_pfn;
1417
25761b6e 1418 mark_free_pages(zone);
c33bc315 1419 max_zone_pfn = zone_end_pfn(zone);
b788db79 1420 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
8357376d 1421 if (page_is_saveable(zone, pfn))
b788db79 1422 memory_bm_set_bit(orig_bm, pfn);
25761b6e 1423 }
b788db79
RW
1424 memory_bm_position_reset(orig_bm);
1425 memory_bm_position_reset(copy_bm);
df7c4872 1426 for(;;) {
b788db79 1427 pfn = memory_bm_next_pfn(orig_bm);
df7c4872
FW
1428 if (unlikely(pfn == BM_END_OF_MAP))
1429 break;
1430 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1431 }
25761b6e
RW
1432}
1433
8357376d
RW
1434/* Total number of image pages */
1435static unsigned int nr_copy_pages;
1436/* Number of pages needed for saving the original pfns of the image pages */
1437static unsigned int nr_meta_pages;
64a473cb
RW
1438/*
1439 * Numbers of normal and highmem page frames allocated for hibernation image
1440 * before suspending devices.
1441 */
0bae5fd3 1442static unsigned int alloc_normal, alloc_highmem;
64a473cb
RW
1443/*
1444 * Memory bitmap used for marking saveable pages (during hibernation) or
1445 * hibernation image pages (during restore)
1446 */
1447static struct memory_bitmap orig_bm;
1448/*
1449 * Memory bitmap used during hibernation for marking allocated page frames that
1450 * will contain copies of saveable pages. During restore it is initially used
1451 * for marking hibernation image pages, but then the set bits from it are
1452 * duplicated in @orig_bm and it is released. On highmem systems it is next
1453 * used for marking "safe" highmem pages, but it has to be reinitialized for
1454 * this purpose.
1455 */
1456static struct memory_bitmap copy_bm;
8357376d 1457
25761b6e 1458/**
ef96f639 1459 * swsusp_free - Free pages allocated for hibernation image.
cd560bb2 1460 *
ef96f639
RW
1461 * Image pages are alocated before snapshot creation, so they need to be
1462 * released after resume.
25761b6e 1463 */
25761b6e
RW
1464void swsusp_free(void)
1465{
fdd64ed5 1466 unsigned long fb_pfn, fr_pfn;
6efde38f 1467
fdd64ed5
JR
1468 if (!forbidden_pages_map || !free_pages_map)
1469 goto out;
1470
1471 memory_bm_position_reset(forbidden_pages_map);
1472 memory_bm_position_reset(free_pages_map);
1473
1474loop:
1475 fr_pfn = memory_bm_next_pfn(free_pages_map);
1476 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1477
1478 /*
1479 * Find the next bit set in both bitmaps. This is guaranteed to
1480 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1481 */
1482 do {
1483 if (fb_pfn < fr_pfn)
1484 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1485 if (fr_pfn < fb_pfn)
1486 fr_pfn = memory_bm_next_pfn(free_pages_map);
1487 } while (fb_pfn != fr_pfn);
1488
1489 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1490 struct page *page = pfn_to_page(fr_pfn);
1491
1492 memory_bm_clear_current(forbidden_pages_map);
1493 memory_bm_clear_current(free_pages_map);
4c0b6c10 1494 hibernate_restore_unprotect_page(page_address(page));
fdd64ed5
JR
1495 __free_page(page);
1496 goto loop;
25761b6e 1497 }
fdd64ed5
JR
1498
1499out:
f577eb30
RW
1500 nr_copy_pages = 0;
1501 nr_meta_pages = 0;
75534b50 1502 restore_pblist = NULL;
6e1819d6 1503 buffer = NULL;
64a473cb
RW
1504 alloc_normal = 0;
1505 alloc_highmem = 0;
4c0b6c10 1506 hibernate_restore_protection_end();
25761b6e
RW
1507}
1508
4bb33435
RW
1509/* Helper functions used for the shrinking of memory. */
1510
1511#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1512
fe419535 1513/**
ef96f639 1514 * preallocate_image_pages - Allocate a number of pages for hibernation image.
4bb33435
RW
1515 * @nr_pages: Number of page frames to allocate.
1516 * @mask: GFP flags to use for the allocation.
fe419535 1517 *
4bb33435
RW
1518 * Return value: Number of page frames actually allocated
1519 */
1520static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1521{
1522 unsigned long nr_alloc = 0;
1523
1524 while (nr_pages > 0) {
64a473cb
RW
1525 struct page *page;
1526
1527 page = alloc_image_page(mask);
1528 if (!page)
4bb33435 1529 break;
64a473cb
RW
1530 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1531 if (PageHighMem(page))
1532 alloc_highmem++;
1533 else
1534 alloc_normal++;
4bb33435
RW
1535 nr_pages--;
1536 nr_alloc++;
1537 }
1538
1539 return nr_alloc;
1540}
1541
6715045d
RW
1542static unsigned long preallocate_image_memory(unsigned long nr_pages,
1543 unsigned long avail_normal)
4bb33435 1544{
6715045d
RW
1545 unsigned long alloc;
1546
1547 if (avail_normal <= alloc_normal)
1548 return 0;
1549
1550 alloc = avail_normal - alloc_normal;
1551 if (nr_pages < alloc)
1552 alloc = nr_pages;
1553
1554 return preallocate_image_pages(alloc, GFP_IMAGE);
4bb33435
RW
1555}
1556
1557#ifdef CONFIG_HIGHMEM
1558static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1559{
1560 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1561}
1562
1563/**
ef96f639 1564 * __fraction - Compute (an approximation of) x * (multiplier / base).
fe419535 1565 */
4bb33435
RW
1566static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1567{
809ed78a 1568 return div64_u64(x * multiplier, base);
4bb33435 1569}
fe419535 1570
4bb33435 1571static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a852
RW
1572 unsigned long highmem,
1573 unsigned long total)
fe419535 1574{
4bb33435
RW
1575 unsigned long alloc = __fraction(nr_pages, highmem, total);
1576
1577 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
fe419535 1578}
4bb33435
RW
1579#else /* CONFIG_HIGHMEM */
1580static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1581{
1582 return 0;
1583}
1584
1585static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a852
RW
1586 unsigned long highmem,
1587 unsigned long total)
4bb33435
RW
1588{
1589 return 0;
1590}
1591#endif /* CONFIG_HIGHMEM */
fe419535 1592
4bb33435 1593/**
ef96f639 1594 * free_unnecessary_pages - Release preallocated pages not needed for the image.
64a473cb 1595 */
a64fc82c 1596static unsigned long free_unnecessary_pages(void)
64a473cb 1597{
a64fc82c 1598 unsigned long save, to_free_normal, to_free_highmem, free;
64a473cb 1599
6715045d
RW
1600 save = count_data_pages();
1601 if (alloc_normal >= save) {
1602 to_free_normal = alloc_normal - save;
1603 save = 0;
1604 } else {
1605 to_free_normal = 0;
1606 save -= alloc_normal;
1607 }
1608 save += count_highmem_pages();
1609 if (alloc_highmem >= save) {
1610 to_free_highmem = alloc_highmem - save;
64a473cb
RW
1611 } else {
1612 to_free_highmem = 0;
4d4cf23c
RW
1613 save -= alloc_highmem;
1614 if (to_free_normal > save)
1615 to_free_normal -= save;
1616 else
1617 to_free_normal = 0;
64a473cb 1618 }
a64fc82c 1619 free = to_free_normal + to_free_highmem;
64a473cb
RW
1620
1621 memory_bm_position_reset(&copy_bm);
1622
a9c9b442 1623 while (to_free_normal > 0 || to_free_highmem > 0) {
64a473cb
RW
1624 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1625 struct page *page = pfn_to_page(pfn);
1626
1627 if (PageHighMem(page)) {
1628 if (!to_free_highmem)
1629 continue;
1630 to_free_highmem--;
1631 alloc_highmem--;
1632 } else {
1633 if (!to_free_normal)
1634 continue;
1635 to_free_normal--;
1636 alloc_normal--;
1637 }
1638 memory_bm_clear_bit(&copy_bm, pfn);
1639 swsusp_unset_page_forbidden(page);
1640 swsusp_unset_page_free(page);
1641 __free_page(page);
1642 }
a64fc82c
WK
1643
1644 return free;
64a473cb
RW
1645}
1646
ef4aede3 1647/**
ef96f639 1648 * minimum_image_size - Estimate the minimum acceptable size of an image.
ef4aede3
RW
1649 * @saveable: Number of saveable pages in the system.
1650 *
1651 * We want to avoid attempting to free too much memory too hard, so estimate the
1652 * minimum acceptable size of a hibernation image to use as the lower limit for
1653 * preallocating memory.
1654 *
1655 * We assume that the minimum image size should be proportional to
1656 *
1657 * [number of saveable pages] - [number of pages that can be freed in theory]
1658 *
1659 * where the second term is the sum of (1) reclaimable slab pages, (2) active
bdbc98ab 1660 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
ef4aede3
RW
1661 */
1662static unsigned long minimum_image_size(unsigned long saveable)
1663{
1664 unsigned long size;
1665
d42f3245 1666 size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
599d0c95
MG
1667 + global_node_page_state(NR_ACTIVE_ANON)
1668 + global_node_page_state(NR_INACTIVE_ANON)
1669 + global_node_page_state(NR_ACTIVE_FILE)
bdbc98ab 1670 + global_node_page_state(NR_INACTIVE_FILE);
ef4aede3
RW
1671
1672 return saveable <= size ? 0 : saveable - size;
1673}
1674
64a473cb 1675/**
ef96f639 1676 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
4bb33435
RW
1677 *
1678 * To create a hibernation image it is necessary to make a copy of every page
1679 * frame in use. We also need a number of page frames to be free during
1680 * hibernation for allocations made while saving the image and for device
1681 * drivers, in case they need to allocate memory from their hibernation
ddeb6487 1682 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
b0c609ab 1683 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
ddeb6487
RW
1684 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1685 * total number of available page frames and allocate at least
4bb33435 1686 *
ddeb6487
RW
1687 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1688 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
4bb33435
RW
1689 *
1690 * of them, which corresponds to the maximum size of a hibernation image.
1691 *
1692 * If image_size is set below the number following from the above formula,
1693 * the preallocation of memory is continued until the total number of saveable
ef4aede3
RW
1694 * pages in the system is below the requested image size or the minimum
1695 * acceptable image size returned by minimum_image_size(), whichever is greater.
4bb33435 1696 */
64a473cb 1697int hibernate_preallocate_memory(void)
fe419535 1698{
fe419535 1699 struct zone *zone;
4bb33435 1700 unsigned long saveable, size, max_size, count, highmem, pages = 0;
6715045d 1701 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
db597605 1702 ktime_t start, stop;
64a473cb 1703 int error;
fe419535 1704
7a7b99bf 1705 pr_info("Preallocating image memory\n");
db597605 1706 start = ktime_get();
fe419535 1707
64a473cb 1708 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
7a7b99bf
LS
1709 if (error) {
1710 pr_err("Cannot allocate original bitmap\n");
64a473cb 1711 goto err_out;
7a7b99bf 1712 }
64a473cb
RW
1713
1714 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
7a7b99bf
LS
1715 if (error) {
1716 pr_err("Cannot allocate copy bitmap\n");
64a473cb 1717 goto err_out;
7a7b99bf 1718 }
64a473cb
RW
1719
1720 alloc_normal = 0;
1721 alloc_highmem = 0;
1722
4bb33435 1723 /* Count the number of saveable data pages. */
64a473cb 1724 save_highmem = count_highmem_pages();
4bb33435 1725 saveable = count_data_pages();
fe419535 1726
4bb33435
RW
1727 /*
1728 * Compute the total number of page frames we can use (count) and the
1729 * number of pages needed for image metadata (size).
1730 */
1731 count = saveable;
64a473cb
RW
1732 saveable += save_highmem;
1733 highmem = save_highmem;
4bb33435
RW
1734 size = 0;
1735 for_each_populated_zone(zone) {
1736 size += snapshot_additional_pages(zone);
1737 if (is_highmem(zone))
1738 highmem += zone_page_state(zone, NR_FREE_PAGES);
1739 else
1740 count += zone_page_state(zone, NR_FREE_PAGES);
1741 }
6715045d 1742 avail_normal = count;
4bb33435
RW
1743 count += highmem;
1744 count -= totalreserve_pages;
1745
1746 /* Compute the maximum number of saveable pages to leave in memory. */
ddeb6487
RW
1747 max_size = (count - (size + PAGES_FOR_IO)) / 2
1748 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
266f1a25 1749 /* Compute the desired number of image pages specified by image_size. */
4bb33435
RW
1750 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1751 if (size > max_size)
1752 size = max_size;
1753 /*
266f1a25
RW
1754 * If the desired number of image pages is at least as large as the
1755 * current number of saveable pages in memory, allocate page frames for
1756 * the image and we're done.
4bb33435 1757 */
64a473cb
RW
1758 if (size >= saveable) {
1759 pages = preallocate_image_highmem(save_highmem);
6715045d 1760 pages += preallocate_image_memory(saveable - pages, avail_normal);
4bb33435 1761 goto out;
64a473cb 1762 }
4bb33435 1763
ef4aede3
RW
1764 /* Estimate the minimum size of the image. */
1765 pages = minimum_image_size(saveable);
6715045d
RW
1766 /*
1767 * To avoid excessive pressure on the normal zone, leave room in it to
1768 * accommodate an image of the minimum size (unless it's already too
1769 * small, in which case don't preallocate pages from it at all).
1770 */
1771 if (avail_normal > pages)
1772 avail_normal -= pages;
1773 else
1774 avail_normal = 0;
ef4aede3
RW
1775 if (size < pages)
1776 size = min_t(unsigned long, pages, max_size);
1777
4bb33435
RW
1778 /*
1779 * Let the memory management subsystem know that we're going to need a
1780 * large number of page frames to allocate and make it free some memory.
1781 * NOTE: If this is not done, performance will be hurt badly in some
1782 * test cases.
1783 */
1784 shrink_all_memory(saveable - size);
1785
1786 /*
1787 * The number of saveable pages in memory was too high, so apply some
1788 * pressure to decrease it. First, make room for the largest possible
1789 * image and fail if that doesn't work. Next, try to decrease the size
ef4aede3
RW
1790 * of the image as much as indicated by 'size' using allocations from
1791 * highmem and non-highmem zones separately.
4bb33435
RW
1792 */
1793 pages_highmem = preallocate_image_highmem(highmem / 2);
fd432b9f
AL
1794 alloc = count - max_size;
1795 if (alloc > pages_highmem)
1796 alloc -= pages_highmem;
1797 else
1798 alloc = 0;
6715045d
RW
1799 pages = preallocate_image_memory(alloc, avail_normal);
1800 if (pages < alloc) {
1801 /* We have exhausted non-highmem pages, try highmem. */
1802 alloc -= pages;
1803 pages += pages_highmem;
1804 pages_highmem = preallocate_image_highmem(alloc);
7a7b99bf
LS
1805 if (pages_highmem < alloc) {
1806 pr_err("Image allocation is %lu pages short\n",
1807 alloc - pages_highmem);
6715045d 1808 goto err_out;
7a7b99bf 1809 }
6715045d
RW
1810 pages += pages_highmem;
1811 /*
1812 * size is the desired number of saveable pages to leave in
1813 * memory, so try to preallocate (all memory - size) pages.
1814 */
1815 alloc = (count - pages) - size;
1816 pages += preallocate_image_highmem(alloc);
1817 } else {
1818 /*
1819 * There are approximately max_size saveable pages at this point
1820 * and we want to reduce this number down to size.
1821 */
1822 alloc = max_size - size;
1823 size = preallocate_highmem_fraction(alloc, highmem, count);
1824 pages_highmem += size;
1825 alloc -= size;
1826 size = preallocate_image_memory(alloc, avail_normal);
1827 pages_highmem += preallocate_image_highmem(alloc - size);
1828 pages += pages_highmem + size;
1829 }
4bb33435 1830
64a473cb
RW
1831 /*
1832 * We only need as many page frames for the image as there are saveable
1833 * pages in memory, but we have allocated more. Release the excessive
1834 * ones now.
1835 */
a64fc82c 1836 pages -= free_unnecessary_pages();
4bb33435
RW
1837
1838 out:
db597605 1839 stop = ktime_get();
5c0e9de0 1840 pr_info("Allocated %lu pages for snapshot\n", pages);
db597605 1841 swsusp_show_speed(start, stop, pages, "Allocated");
fe419535
RW
1842
1843 return 0;
64a473cb
RW
1844
1845 err_out:
64a473cb
RW
1846 swsusp_free();
1847 return -ENOMEM;
fe419535
RW
1848}
1849
8357376d
RW
1850#ifdef CONFIG_HIGHMEM
1851/**
ef96f639
RW
1852 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1853 *
1854 * Compute the number of non-highmem pages that will be necessary for creating
1855 * copies of highmem pages.
1856 */
8357376d
RW
1857static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1858{
64a473cb 1859 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
8357376d
RW
1860
1861 if (free_highmem >= nr_highmem)
1862 nr_highmem = 0;
1863 else
1864 nr_highmem -= free_highmem;
1865
1866 return nr_highmem;
1867}
1868#else
efd5a852 1869static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
8357376d 1870#endif /* CONFIG_HIGHMEM */
25761b6e
RW
1871
1872/**
ef96f639 1873 * enough_free_mem - Check if there is enough free memory for the image.
25761b6e 1874 */
8357376d 1875static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
25761b6e 1876{
e5e2fa78 1877 struct zone *zone;
64a473cb 1878 unsigned int free = alloc_normal;
e5e2fa78 1879
98e73dc5 1880 for_each_populated_zone(zone)
8357376d 1881 if (!is_highmem(zone))
d23ad423 1882 free += zone_page_state(zone, NR_FREE_PAGES);
940864dd 1883
8357376d 1884 nr_pages += count_pages_for_highmem(nr_highmem);
64ec72a1
JP
1885 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1886 nr_pages, PAGES_FOR_IO, free);
940864dd 1887
64a473cb 1888 return free > nr_pages + PAGES_FOR_IO;
25761b6e
RW
1889}
1890
8357376d
RW
1891#ifdef CONFIG_HIGHMEM
1892/**
ef96f639
RW
1893 * get_highmem_buffer - Allocate a buffer for highmem pages.
1894 *
1895 * If there are some highmem pages in the hibernation image, we may need a
1896 * buffer to copy them and/or load their data.
8357376d 1897 */
8357376d
RW
1898static inline int get_highmem_buffer(int safe_needed)
1899{
453f85d4 1900 buffer = get_image_page(GFP_ATOMIC, safe_needed);
8357376d
RW
1901 return buffer ? 0 : -ENOMEM;
1902}
1903
1904/**
ef96f639
RW
1905 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1906 *
1907 * Try to allocate as many pages as needed, but if the number of free highmem
1908 * pages is less than that, allocate them all.
8357376d 1909 */
efd5a852
RW
1910static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1911 unsigned int nr_highmem)
8357376d
RW
1912{
1913 unsigned int to_alloc = count_free_highmem_pages();
1914
1915 if (to_alloc > nr_highmem)
1916 to_alloc = nr_highmem;
1917
1918 nr_highmem -= to_alloc;
1919 while (to_alloc-- > 0) {
1920 struct page *page;
1921
d0164adc 1922 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
8357376d
RW
1923 memory_bm_set_bit(bm, page_to_pfn(page));
1924 }
1925 return nr_highmem;
1926}
1927#else
1928static inline int get_highmem_buffer(int safe_needed) { return 0; }
1929
efd5a852
RW
1930static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1931 unsigned int n) { return 0; }
8357376d
RW
1932#endif /* CONFIG_HIGHMEM */
1933
1934/**
ef96f639 1935 * swsusp_alloc - Allocate memory for hibernation image.
8357376d 1936 *
ef96f639
RW
1937 * We first try to allocate as many highmem pages as there are
1938 * saveable highmem pages in the system. If that fails, we allocate
1939 * non-highmem pages for the copies of the remaining highmem ones.
8357376d 1940 *
ef96f639
RW
1941 * In this approach it is likely that the copies of highmem pages will
1942 * also be located in the high memory, because of the way in which
1943 * copy_data_pages() works.
8357376d 1944 */
eba74c29 1945static int swsusp_alloc(struct memory_bitmap *copy_bm,
efd5a852 1946 unsigned int nr_pages, unsigned int nr_highmem)
054bd4c1 1947{
8357376d 1948 if (nr_highmem > 0) {
2e725a06 1949 if (get_highmem_buffer(PG_ANY))
64a473cb
RW
1950 goto err_out;
1951 if (nr_highmem > alloc_highmem) {
1952 nr_highmem -= alloc_highmem;
1953 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1954 }
8357376d 1955 }
64a473cb
RW
1956 if (nr_pages > alloc_normal) {
1957 nr_pages -= alloc_normal;
1958 while (nr_pages-- > 0) {
1959 struct page *page;
1960
453f85d4 1961 page = alloc_image_page(GFP_ATOMIC);
64a473cb
RW
1962 if (!page)
1963 goto err_out;
1964 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1965 }
25761b6e 1966 }
64a473cb 1967
b788db79 1968 return 0;
25761b6e 1969
64a473cb 1970 err_out:
b788db79 1971 swsusp_free();
2e725a06 1972 return -ENOMEM;
25761b6e
RW
1973}
1974
722a9f92 1975asmlinkage __visible int swsusp_save(void)
25761b6e 1976{
8357376d 1977 unsigned int nr_pages, nr_highmem;
25761b6e 1978
7a7b99bf 1979 pr_info("Creating image:\n");
25761b6e 1980
9f8f2172 1981 drain_local_pages(NULL);
a0f49651 1982 nr_pages = count_data_pages();
8357376d 1983 nr_highmem = count_highmem_pages();
64ec72a1 1984 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
25761b6e 1985
8357376d 1986 if (!enough_free_mem(nr_pages, nr_highmem)) {
64ec72a1 1987 pr_err("Not enough free memory\n");
25761b6e
RW
1988 return -ENOMEM;
1989 }
1990
eba74c29 1991 if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
64ec72a1 1992 pr_err("Memory allocation failed\n");
a0f49651 1993 return -ENOMEM;
8357376d 1994 }
25761b6e 1995
ef96f639
RW
1996 /*
1997 * During allocating of suspend pagedir, new cold pages may appear.
25761b6e
RW
1998 * Kill them.
1999 */
9f8f2172 2000 drain_local_pages(NULL);
b788db79 2001 copy_data_pages(&copy_bm, &orig_bm);
25761b6e
RW
2002
2003 /*
2004 * End of critical section. From now on, we can write to memory,
2005 * but we should not touch disk. This specially means we must _not_
2006 * touch swap space! Except we must write out our image of course.
2007 */
2008
8357376d 2009 nr_pages += nr_highmem;
a0f49651 2010 nr_copy_pages = nr_pages;
8357376d 2011 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
a0f49651 2012
7a7b99bf 2013 pr_info("Image created (%d pages copied)\n", nr_pages);
8357376d 2014
25761b6e
RW
2015 return 0;
2016}
f577eb30 2017
d307c4a8
RW
2018#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2019static int init_header_complete(struct swsusp_info *info)
f577eb30 2020{
d307c4a8 2021 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
f577eb30 2022 info->version_code = LINUX_VERSION_CODE;
d307c4a8
RW
2023 return 0;
2024}
2025
02d7f400 2026static const char *check_image_kernel(struct swsusp_info *info)
d307c4a8
RW
2027{
2028 if (info->version_code != LINUX_VERSION_CODE)
2029 return "kernel version";
2030 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2031 return "system type";
2032 if (strcmp(info->uts.release,init_utsname()->release))
2033 return "kernel release";
2034 if (strcmp(info->uts.version,init_utsname()->version))
2035 return "version";
2036 if (strcmp(info->uts.machine,init_utsname()->machine))
2037 return "machine";
2038 return NULL;
2039}
2040#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2041
af508b34
RW
2042unsigned long snapshot_get_image_size(void)
2043{
2044 return nr_copy_pages + nr_meta_pages + 1;
2045}
2046
d307c4a8
RW
2047static int init_header(struct swsusp_info *info)
2048{
2049 memset(info, 0, sizeof(struct swsusp_info));
0ed5fd13 2050 info->num_physpages = get_num_physpages();
f577eb30 2051 info->image_pages = nr_copy_pages;
af508b34 2052 info->pages = snapshot_get_image_size();
6e1819d6
RW
2053 info->size = info->pages;
2054 info->size <<= PAGE_SHIFT;
d307c4a8 2055 return init_header_complete(info);
f577eb30
RW
2056}
2057
2058/**
ef96f639
RW
2059 * pack_pfns - Prepare PFNs for saving.
2060 * @bm: Memory bitmap.
2061 * @buf: Memory buffer to store the PFNs in.
2062 *
2063 * PFNs corresponding to set bits in @bm are stored in the area of memory
2064 * pointed to by @buf (1 page at a time).
f577eb30 2065 */
efd5a852 2066static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30
RW
2067{
2068 int j;
2069
b788db79 2070 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
940864dd
RW
2071 buf[j] = memory_bm_next_pfn(bm);
2072 if (unlikely(buf[j] == BM_END_OF_MAP))
b788db79 2073 break;
f577eb30 2074 }
f577eb30
RW
2075}
2076
2077/**
ef96f639
RW
2078 * snapshot_read_next - Get the address to read the next image page from.
2079 * @handle: Snapshot handle to be used for the reading.
f577eb30 2080 *
ef96f639
RW
2081 * On the first call, @handle should point to a zeroed snapshot_handle
2082 * structure. The structure gets populated then and a pointer to it should be
2083 * passed to this function every next time.
f577eb30 2084 *
ef96f639
RW
2085 * On success, the function returns a positive number. Then, the caller
2086 * is allowed to read up to the returned number of bytes from the memory
2087 * location computed by the data_of() macro.
f577eb30 2088 *
ef96f639
RW
2089 * The function returns 0 to indicate the end of the data stream condition,
2090 * and negative numbers are returned on errors. If that happens, the structure
2091 * pointed to by @handle is not updated and should not be used any more.
f577eb30 2092 */
d3c1b24c 2093int snapshot_read_next(struct snapshot_handle *handle)
f577eb30 2094{
fb13a28b 2095 if (handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30 2096 return 0;
b788db79 2097
f577eb30
RW
2098 if (!buffer) {
2099 /* This makes the buffer be freed by swsusp_free() */
8357376d 2100 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30
RW
2101 if (!buffer)
2102 return -ENOMEM;
2103 }
d3c1b24c 2104 if (!handle->cur) {
d307c4a8
RW
2105 int error;
2106
2107 error = init_header((struct swsusp_info *)buffer);
2108 if (error)
2109 return error;
f577eb30 2110 handle->buffer = buffer;
b788db79
RW
2111 memory_bm_position_reset(&orig_bm);
2112 memory_bm_position_reset(&copy_bm);
d3c1b24c 2113 } else if (handle->cur <= nr_meta_pages) {
3ecb01df 2114 clear_page(buffer);
d3c1b24c
JS
2115 pack_pfns(buffer, &orig_bm);
2116 } else {
2117 struct page *page;
b788db79 2118
d3c1b24c
JS
2119 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2120 if (PageHighMem(page)) {
ef96f639
RW
2121 /*
2122 * Highmem pages are copied to the buffer,
d3c1b24c
JS
2123 * because we can't return with a kmapped
2124 * highmem page (we may not be called again).
2125 */
2126 void *kaddr;
8357376d 2127
0de9a1e2 2128 kaddr = kmap_atomic(page);
3ecb01df 2129 copy_page(buffer, kaddr);
0de9a1e2 2130 kunmap_atomic(kaddr);
d3c1b24c
JS
2131 handle->buffer = buffer;
2132 } else {
2133 handle->buffer = page_address(page);
f577eb30 2134 }
f577eb30 2135 }
d3c1b24c
JS
2136 handle->cur++;
2137 return PAGE_SIZE;
f577eb30
RW
2138}
2139
6dbecfd3
RW
2140static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2141 struct memory_bitmap *src)
2142{
2143 unsigned long pfn;
2144
2145 memory_bm_position_reset(src);
2146 pfn = memory_bm_next_pfn(src);
2147 while (pfn != BM_END_OF_MAP) {
2148 memory_bm_set_bit(dst, pfn);
2149 pfn = memory_bm_next_pfn(src);
2150 }
2151}
2152
f577eb30 2153/**
ef96f639
RW
2154 * mark_unsafe_pages - Mark pages that were used before hibernation.
2155 *
2156 * Mark the pages that cannot be used for storing the image during restoration,
2157 * because they conflict with the pages that had been used before hibernation.
f577eb30 2158 */
6dbecfd3 2159static void mark_unsafe_pages(struct memory_bitmap *bm)
f577eb30 2160{
6dbecfd3 2161 unsigned long pfn;
f577eb30 2162
6dbecfd3
RW
2163 /* Clear the "free"/"unsafe" bit for all PFNs */
2164 memory_bm_position_reset(free_pages_map);
2165 pfn = memory_bm_next_pfn(free_pages_map);
2166 while (pfn != BM_END_OF_MAP) {
2167 memory_bm_clear_current(free_pages_map);
2168 pfn = memory_bm_next_pfn(free_pages_map);
f577eb30
RW
2169 }
2170
6dbecfd3
RW
2171 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2172 duplicate_memory_bitmap(free_pages_map, bm);
f577eb30 2173
940864dd 2174 allocated_unsafe_pages = 0;
f577eb30
RW
2175}
2176
d307c4a8 2177static int check_header(struct swsusp_info *info)
f577eb30 2178{
02d7f400 2179 const char *reason;
f577eb30 2180
d307c4a8 2181 reason = check_image_kernel(info);
0ed5fd13 2182 if (!reason && info->num_physpages != get_num_physpages())
f577eb30 2183 reason = "memory size";
f577eb30 2184 if (reason) {
64ec72a1 2185 pr_err("Image mismatch: %s\n", reason);
f577eb30
RW
2186 return -EPERM;
2187 }
2188 return 0;
2189}
2190
2191/**
ef96f639 2192 * load header - Check the image header and copy the data from it.
f577eb30 2193 */
efd5a852 2194static int load_header(struct swsusp_info *info)
f577eb30
RW
2195{
2196 int error;
f577eb30 2197
940864dd 2198 restore_pblist = NULL;
f577eb30
RW
2199 error = check_header(info);
2200 if (!error) {
f577eb30
RW
2201 nr_copy_pages = info->image_pages;
2202 nr_meta_pages = info->pages - info->image_pages - 1;
2203 }
2204 return error;
2205}
2206
2207/**
ef96f639
RW
2208 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2209 * @bm: Memory bitmap.
2210 * @buf: Area of memory containing the PFNs.
2211 *
2212 * For each element of the array pointed to by @buf (1 page at a time), set the
2213 * corresponding bit in @bm.
f577eb30 2214 */
69643279 2215static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30
RW
2216{
2217 int j;
2218
940864dd
RW
2219 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2220 if (unlikely(buf[j] == BM_END_OF_MAP))
2221 break;
2222
6dbecfd3 2223 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
69643279
RW
2224 memory_bm_set_bit(bm, buf[j]);
2225 else
2226 return -EFAULT;
f577eb30 2227 }
69643279
RW
2228
2229 return 0;
f577eb30
RW
2230}
2231
8357376d 2232#ifdef CONFIG_HIGHMEM
ef96f639
RW
2233/*
2234 * struct highmem_pbe is used for creating the list of highmem pages that
8357376d
RW
2235 * should be restored atomically during the resume from disk, because the page
2236 * frames they have occupied before the suspend are in use.
2237 */
2238struct highmem_pbe {
2239 struct page *copy_page; /* data is here now */
2240 struct page *orig_page; /* data was here before the suspend */
2241 struct highmem_pbe *next;
2242};
2243
ef96f639
RW
2244/*
2245 * List of highmem PBEs needed for restoring the highmem pages that were
8357376d
RW
2246 * allocated before the suspend and included in the suspend image, but have
2247 * also been allocated by the "resume" kernel, so their contents cannot be
2248 * written directly to their "original" page frames.
2249 */
2250static struct highmem_pbe *highmem_pblist;
2251
2252/**
ef96f639
RW
2253 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2254 * @bm: Memory bitmap.
2255 *
2256 * The bits in @bm that correspond to image pages are assumed to be set.
8357376d 2257 */
8357376d
RW
2258static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2259{
2260 unsigned long pfn;
2261 unsigned int cnt = 0;
2262
2263 memory_bm_position_reset(bm);
2264 pfn = memory_bm_next_pfn(bm);
2265 while (pfn != BM_END_OF_MAP) {
2266 if (PageHighMem(pfn_to_page(pfn)))
2267 cnt++;
2268
2269 pfn = memory_bm_next_pfn(bm);
2270 }
2271 return cnt;
2272}
2273
8357376d
RW
2274static unsigned int safe_highmem_pages;
2275
2276static struct memory_bitmap *safe_highmem_bm;
2277
ef96f639
RW
2278/**
2279 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2280 * @bm: Pointer to an uninitialized memory bitmap structure.
2281 * @nr_highmem_p: Pointer to the number of highmem image pages.
2282 *
2283 * Try to allocate as many highmem pages as there are highmem image pages
2284 * (@nr_highmem_p points to the variable containing the number of highmem image
2285 * pages). The pages that are "safe" (ie. will not be overwritten when the
2286 * hibernation image is restored entirely) have the corresponding bits set in
2287 * @bm (it must be unitialized).
2288 *
2289 * NOTE: This function should not be called if there are no highmem image pages.
2290 */
efd5a852
RW
2291static int prepare_highmem_image(struct memory_bitmap *bm,
2292 unsigned int *nr_highmem_p)
8357376d
RW
2293{
2294 unsigned int to_alloc;
2295
2296 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2297 return -ENOMEM;
2298
2299 if (get_highmem_buffer(PG_SAFE))
2300 return -ENOMEM;
2301
2302 to_alloc = count_free_highmem_pages();
2303 if (to_alloc > *nr_highmem_p)
2304 to_alloc = *nr_highmem_p;
2305 else
2306 *nr_highmem_p = to_alloc;
2307
2308 safe_highmem_pages = 0;
2309 while (to_alloc-- > 0) {
2310 struct page *page;
2311
2312 page = alloc_page(__GFP_HIGHMEM);
7be98234 2313 if (!swsusp_page_is_free(page)) {
8357376d
RW
2314 /* The page is "safe", set its bit the bitmap */
2315 memory_bm_set_bit(bm, page_to_pfn(page));
2316 safe_highmem_pages++;
2317 }
2318 /* Mark the page as allocated */
7be98234
RW
2319 swsusp_set_page_forbidden(page);
2320 swsusp_set_page_free(page);
8357376d
RW
2321 }
2322 memory_bm_position_reset(bm);
2323 safe_highmem_bm = bm;
2324 return 0;
2325}
2326
ef96f639
RW
2327static struct page *last_highmem_page;
2328
8357376d 2329/**
ef96f639
RW
2330 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2331 *
2332 * For a given highmem image page get a buffer that suspend_write_next() should
2333 * return to its caller to write to.
8357376d 2334 *
ef96f639
RW
2335 * If the page is to be saved to its "original" page frame or a copy of
2336 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2337 * the copy of the page is to be made in normal memory, so the address of
2338 * the copy is returned.
8357376d 2339 *
ef96f639
RW
2340 * If @buffer is returned, the caller of suspend_write_next() will write
2341 * the page's contents to @buffer, so they will have to be copied to the
2342 * right location on the next call to suspend_write_next() and it is done
2343 * with the help of copy_last_highmem_page(). For this purpose, if
2344 * @buffer is returned, @last_highmem_page is set to the page to which
2345 * the data will have to be copied from @buffer.
8357376d 2346 */
efd5a852
RW
2347static void *get_highmem_page_buffer(struct page *page,
2348 struct chain_allocator *ca)
8357376d
RW
2349{
2350 struct highmem_pbe *pbe;
2351 void *kaddr;
2352
7be98234 2353 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
ef96f639
RW
2354 /*
2355 * We have allocated the "original" page frame and we can
8357376d
RW
2356 * use it directly to store the loaded page.
2357 */
2358 last_highmem_page = page;
2359 return buffer;
2360 }
ef96f639
RW
2361 /*
2362 * The "original" page frame has not been allocated and we have to
8357376d
RW
2363 * use a "safe" page frame to store the loaded page.
2364 */
2365 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2366 if (!pbe) {
2367 swsusp_free();
69643279 2368 return ERR_PTR(-ENOMEM);
8357376d
RW
2369 }
2370 pbe->orig_page = page;
2371 if (safe_highmem_pages > 0) {
2372 struct page *tmp;
2373
2374 /* Copy of the page will be stored in high memory */
2375 kaddr = buffer;
2376 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2377 safe_highmem_pages--;
2378 last_highmem_page = tmp;
2379 pbe->copy_page = tmp;
2380 } else {
2381 /* Copy of the page will be stored in normal memory */
2382 kaddr = safe_pages_list;
2383 safe_pages_list = safe_pages_list->next;
2384 pbe->copy_page = virt_to_page(kaddr);
2385 }
2386 pbe->next = highmem_pblist;
2387 highmem_pblist = pbe;
2388 return kaddr;
2389}
2390
2391/**
ef96f639
RW
2392 * copy_last_highmem_page - Copy most the most recent highmem image page.
2393 *
2394 * Copy the contents of a highmem image from @buffer, where the caller of
2395 * snapshot_write_next() has stored them, to the right location represented by
2396 * @last_highmem_page .
8357376d 2397 */
8357376d
RW
2398static void copy_last_highmem_page(void)
2399{
2400 if (last_highmem_page) {
2401 void *dst;
2402
0de9a1e2 2403 dst = kmap_atomic(last_highmem_page);
3ecb01df 2404 copy_page(dst, buffer);
0de9a1e2 2405 kunmap_atomic(dst);
8357376d
RW
2406 last_highmem_page = NULL;
2407 }
2408}
2409
2410static inline int last_highmem_page_copied(void)
2411{
2412 return !last_highmem_page;
2413}
2414
2415static inline void free_highmem_data(void)
2416{
2417 if (safe_highmem_bm)
2418 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2419
2420 if (buffer)
2421 free_image_page(buffer, PG_UNSAFE_CLEAR);
2422}
2423#else
efd5a852 2424static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
8357376d 2425
efd5a852
RW
2426static inline int prepare_highmem_image(struct memory_bitmap *bm,
2427 unsigned int *nr_highmem_p) { return 0; }
8357376d 2428
efd5a852
RW
2429static inline void *get_highmem_page_buffer(struct page *page,
2430 struct chain_allocator *ca)
8357376d 2431{
69643279 2432 return ERR_PTR(-EINVAL);
8357376d
RW
2433}
2434
2435static inline void copy_last_highmem_page(void) {}
2436static inline int last_highmem_page_copied(void) { return 1; }
2437static inline void free_highmem_data(void) {}
2438#endif /* CONFIG_HIGHMEM */
2439
ef96f639
RW
2440#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2441
f577eb30 2442/**
ef96f639
RW
2443 * prepare_image - Make room for loading hibernation image.
2444 * @new_bm: Unitialized memory bitmap structure.
2445 * @bm: Memory bitmap with unsafe pages marked.
2446 *
2447 * Use @bm to mark the pages that will be overwritten in the process of
2448 * restoring the system memory state from the suspend image ("unsafe" pages)
2449 * and allocate memory for the image.
968808b8 2450 *
ef96f639
RW
2451 * The idea is to allocate a new memory bitmap first and then allocate
2452 * as many pages as needed for image data, but without specifying what those
2453 * pages will be used for just yet. Instead, we mark them all as allocated and
2454 * create a lists of "safe" pages to be used later. On systems with high
2455 * memory a list of "safe" highmem pages is created too.
f577eb30 2456 */
efd5a852 2457static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
f577eb30 2458{
8357376d 2459 unsigned int nr_pages, nr_highmem;
9c744481 2460 struct linked_page *lp;
940864dd 2461 int error;
f577eb30 2462
8357376d
RW
2463 /* If there is no highmem, the buffer will not be necessary */
2464 free_image_page(buffer, PG_UNSAFE_CLEAR);
2465 buffer = NULL;
2466
2467 nr_highmem = count_highmem_image_pages(bm);
6dbecfd3 2468 mark_unsafe_pages(bm);
940864dd
RW
2469
2470 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2471 if (error)
2472 goto Free;
2473
2474 duplicate_memory_bitmap(new_bm, bm);
2475 memory_bm_free(bm, PG_UNSAFE_KEEP);
8357376d
RW
2476 if (nr_highmem > 0) {
2477 error = prepare_highmem_image(bm, &nr_highmem);
2478 if (error)
2479 goto Free;
2480 }
ef96f639
RW
2481 /*
2482 * Reserve some safe pages for potential later use.
940864dd
RW
2483 *
2484 * NOTE: This way we make sure there will be enough safe pages for the
2485 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2486 * nr_copy_pages cannot be greater than 50% of the memory anyway.
9c744481
RW
2487 *
2488 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
940864dd 2489 */
8357376d 2490 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dd
RW
2491 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2492 while (nr_pages > 0) {
8357376d 2493 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
940864dd 2494 if (!lp) {
f577eb30 2495 error = -ENOMEM;
940864dd
RW
2496 goto Free;
2497 }
9c744481
RW
2498 lp->next = safe_pages_list;
2499 safe_pages_list = lp;
940864dd 2500 nr_pages--;
f577eb30 2501 }
940864dd 2502 /* Preallocate memory for the image */
8357376d 2503 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dd
RW
2504 while (nr_pages > 0) {
2505 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2506 if (!lp) {
2507 error = -ENOMEM;
2508 goto Free;
2509 }
7be98234 2510 if (!swsusp_page_is_free(virt_to_page(lp))) {
940864dd
RW
2511 /* The page is "safe", add it to the list */
2512 lp->next = safe_pages_list;
2513 safe_pages_list = lp;
968808b8 2514 }
940864dd 2515 /* Mark the page as allocated */
7be98234
RW
2516 swsusp_set_page_forbidden(virt_to_page(lp));
2517 swsusp_set_page_free(virt_to_page(lp));
940864dd 2518 nr_pages--;
968808b8 2519 }
940864dd
RW
2520 return 0;
2521
59a49335 2522 Free:
940864dd 2523 swsusp_free();
f577eb30
RW
2524 return error;
2525}
2526
940864dd 2527/**
ef96f639
RW
2528 * get_buffer - Get the address to store the next image data page.
2529 *
2530 * Get the address that snapshot_write_next() should return to its caller to
2531 * write to.
940864dd 2532 */
940864dd 2533static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
968808b8 2534{
940864dd 2535 struct pbe *pbe;
69643279
RW
2536 struct page *page;
2537 unsigned long pfn = memory_bm_next_pfn(bm);
968808b8 2538
69643279
RW
2539 if (pfn == BM_END_OF_MAP)
2540 return ERR_PTR(-EFAULT);
2541
2542 page = pfn_to_page(pfn);
8357376d
RW
2543 if (PageHighMem(page))
2544 return get_highmem_page_buffer(page, ca);
2545
7be98234 2546 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
ef96f639
RW
2547 /*
2548 * We have allocated the "original" page frame and we can
940864dd 2549 * use it directly to store the loaded page.
968808b8 2550 */
940864dd
RW
2551 return page_address(page);
2552
ef96f639
RW
2553 /*
2554 * The "original" page frame has not been allocated and we have to
940864dd 2555 * use a "safe" page frame to store the loaded page.
968808b8 2556 */
940864dd
RW
2557 pbe = chain_alloc(ca, sizeof(struct pbe));
2558 if (!pbe) {
2559 swsusp_free();
69643279 2560 return ERR_PTR(-ENOMEM);
940864dd 2561 }
8357376d
RW
2562 pbe->orig_address = page_address(page);
2563 pbe->address = safe_pages_list;
940864dd
RW
2564 safe_pages_list = safe_pages_list->next;
2565 pbe->next = restore_pblist;
2566 restore_pblist = pbe;
8357376d 2567 return pbe->address;
968808b8
RW
2568}
2569
f577eb30 2570/**
ef96f639
RW
2571 * snapshot_write_next - Get the address to store the next image page.
2572 * @handle: Snapshot handle structure to guide the writing.
f577eb30 2573 *
ef96f639
RW
2574 * On the first call, @handle should point to a zeroed snapshot_handle
2575 * structure. The structure gets populated then and a pointer to it should be
2576 * passed to this function every next time.
f577eb30 2577 *
ef96f639
RW
2578 * On success, the function returns a positive number. Then, the caller
2579 * is allowed to write up to the returned number of bytes to the memory
2580 * location computed by the data_of() macro.
f577eb30 2581 *
ef96f639
RW
2582 * The function returns 0 to indicate the "end of file" condition. Negative
2583 * numbers are returned on errors, in which cases the structure pointed to by
2584 * @handle is not updated and should not be used any more.
f577eb30 2585 */
d3c1b24c 2586int snapshot_write_next(struct snapshot_handle *handle)
f577eb30 2587{
940864dd 2588 static struct chain_allocator ca;
f577eb30
RW
2589 int error = 0;
2590
940864dd 2591 /* Check if we have already loaded the entire image */
d3c1b24c 2592 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30 2593 return 0;
940864dd 2594
d3c1b24c
JS
2595 handle->sync_read = 1;
2596
2597 if (!handle->cur) {
8357376d
RW
2598 if (!buffer)
2599 /* This makes the buffer be freed by swsusp_free() */
2600 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2601
f577eb30
RW
2602 if (!buffer)
2603 return -ENOMEM;
8357376d 2604
f577eb30 2605 handle->buffer = buffer;
d3c1b24c
JS
2606 } else if (handle->cur == 1) {
2607 error = load_header(buffer);
2608 if (error)
2609 return error;
940864dd 2610
9c744481
RW
2611 safe_pages_list = NULL;
2612
d3c1b24c
JS
2613 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2614 if (error)
2615 return error;
2616
4c0b6c10 2617 hibernate_restore_protection_begin();
d3c1b24c
JS
2618 } else if (handle->cur <= nr_meta_pages + 1) {
2619 error = unpack_orig_pfns(buffer, &copy_bm);
2620 if (error)
2621 return error;
940864dd 2622
d3c1b24c
JS
2623 if (handle->cur == nr_meta_pages + 1) {
2624 error = prepare_image(&orig_bm, &copy_bm);
69643279
RW
2625 if (error)
2626 return error;
2627
d3c1b24c
JS
2628 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2629 memory_bm_position_reset(&orig_bm);
2630 restore_pblist = NULL;
940864dd 2631 handle->buffer = get_buffer(&orig_bm, &ca);
d3c1b24c 2632 handle->sync_read = 0;
69643279
RW
2633 if (IS_ERR(handle->buffer))
2634 return PTR_ERR(handle->buffer);
f577eb30 2635 }
f577eb30 2636 } else {
d3c1b24c 2637 copy_last_highmem_page();
4c0b6c10 2638 hibernate_restore_protect_page(handle->buffer);
d3c1b24c
JS
2639 handle->buffer = get_buffer(&orig_bm, &ca);
2640 if (IS_ERR(handle->buffer))
2641 return PTR_ERR(handle->buffer);
2642 if (handle->buffer != buffer)
2643 handle->sync_read = 0;
f577eb30 2644 }
d3c1b24c
JS
2645 handle->cur++;
2646 return PAGE_SIZE;
f577eb30
RW
2647}
2648
8357376d 2649/**
ef96f639
RW
2650 * snapshot_write_finalize - Complete the loading of a hibernation image.
2651 *
2652 * Must be called after the last call to snapshot_write_next() in case the last
2653 * page in the image happens to be a highmem page and its contents should be
2654 * stored in highmem. Additionally, it recycles bitmap memory that's not
2655 * necessary any more.
8357376d 2656 */
8357376d
RW
2657void snapshot_write_finalize(struct snapshot_handle *handle)
2658{
2659 copy_last_highmem_page();
4c0b6c10 2660 hibernate_restore_protect_page(handle->buffer);
307c5971 2661 /* Do that only if we have loaded the image entirely */
d3c1b24c 2662 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
307c5971 2663 memory_bm_recycle(&orig_bm);
8357376d
RW
2664 free_highmem_data();
2665 }
2666}
2667
f577eb30
RW
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
8357376d 2670 return !(!nr_copy_pages || !last_highmem_page_copied() ||
940864dd
RW
2671 handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
8357376d
RW
2674#ifdef CONFIG_HIGHMEM
2675/* Assumes that @buf is ready and points to a "safe" page */
efd5a852
RW
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677 void *buf)
940864dd 2678{
8357376d
RW
2679 void *kaddr1, *kaddr2;
2680
0de9a1e2
CW
2681 kaddr1 = kmap_atomic(p1);
2682 kaddr2 = kmap_atomic(p2);
3ecb01df
JB
2683 copy_page(buf, kaddr1);
2684 copy_page(kaddr1, kaddr2);
2685 copy_page(kaddr2, buf);
0de9a1e2
CW
2686 kunmap_atomic(kaddr2);
2687 kunmap_atomic(kaddr1);
8357376d
RW
2688}
2689
2690/**
ef96f639
RW
2691 * restore_highmem - Put highmem image pages into their original locations.
2692 *
2693 * For each highmem page that was in use before hibernation and is included in
2694 * the image, and also has been allocated by the "restore" kernel, swap its
2695 * current contents with the previous (ie. "before hibernation") ones.
8357376d 2696 *
ef96f639
RW
2697 * If the restore eventually fails, we can call this function once again and
2698 * restore the highmem state as seen by the restore kernel.
8357376d 2699 */
8357376d
RW
2700int restore_highmem(void)
2701{
2702 struct highmem_pbe *pbe = highmem_pblist;
2703 void *buf;
2704
2705 if (!pbe)
2706 return 0;
2707
2708 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709 if (!buf)
2710 return -ENOMEM;
2711
2712 while (pbe) {
2713 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714 pbe = pbe->next;
2715 }
2716 free_image_page(buf, PG_UNSAFE_CLEAR);
2717 return 0;
f577eb30 2718}
8357376d 2719#endif /* CONFIG_HIGHMEM */