]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/power/snapshot.c
PM / hibernate: Simplify mark_unsafe_pages()
[mirror_ubuntu-hirsute-kernel.git] / kernel / power / snapshot.c
CommitLineData
25761b6e 1/*
96bc7aec 2 * linux/kernel/power/snapshot.c
25761b6e 3 *
8357376d 4 * This file provides system snapshot/restore functionality for swsusp.
25761b6e 5 *
a2531293 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8357376d 7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
25761b6e 8 *
8357376d 9 * This file is released under the GPLv2.
25761b6e
RW
10 *
11 */
12
f577eb30 13#include <linux/version.h>
25761b6e
RW
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
25761b6e 17#include <linux/delay.h>
25761b6e 18#include <linux/bitops.h>
25761b6e 19#include <linux/spinlock.h>
25761b6e 20#include <linux/kernel.h>
25761b6e
RW
21#include <linux/pm.h>
22#include <linux/device.h>
74dfd666 23#include <linux/init.h>
25761b6e
RW
24#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
846705de 28#include <linux/list.h>
5a0e3ad6 29#include <linux/slab.h>
52f5684c 30#include <linux/compiler.h>
db597605 31#include <linux/ktime.h>
25761b6e
RW
32
33#include <asm/uaccess.h>
34#include <asm/mmu_context.h>
35#include <asm/pgtable.h>
36#include <asm/tlbflush.h>
37#include <asm/io.h>
38
25761b6e
RW
39#include "power.h"
40
74dfd666
RW
41static int swsusp_page_is_free(struct page *);
42static void swsusp_set_page_forbidden(struct page *);
43static void swsusp_unset_page_forbidden(struct page *);
44
ddeb6487
RW
45/*
46 * Number of bytes to reserve for memory allocations made by device drivers
47 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
48 * cause image creation to fail (tunable via /sys/power/reserved_size).
49 */
50unsigned long reserved_size;
51
52void __init hibernate_reserved_size_init(void)
53{
54 reserved_size = SPARE_PAGES * PAGE_SIZE;
55}
56
fe419535
RW
57/*
58 * Preferred image size in bytes (tunable via /sys/power/image_size).
1c1be3a9
RW
59 * When it is set to N, swsusp will do its best to ensure the image
60 * size will not exceed N bytes, but if that is impossible, it will
61 * try to create the smallest image possible.
fe419535 62 */
ac5c24ec
RW
63unsigned long image_size;
64
65void __init hibernate_image_size_init(void)
66{
1c1be3a9 67 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
ac5c24ec 68}
fe419535 69
8357376d
RW
70/* List of PBEs needed for restoring the pages that were allocated before
71 * the suspend and included in the suspend image, but have also been
72 * allocated by the "resume" kernel, so their contents cannot be written
73 * directly to their "original" page frames.
74 */
75534b50
RW
75struct pbe *restore_pblist;
76
9c744481
RW
77/* struct linked_page is used to build chains of pages */
78
79#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
80
81struct linked_page {
82 struct linked_page *next;
83 char data[LINKED_PAGE_DATA_SIZE];
84} __packed;
85
86/*
87 * List of "safe" pages (ie. pages that were not used by the image kernel
88 * before hibernation) that may be used as temporary storage for image kernel
89 * memory contents.
90 */
91static struct linked_page *safe_pages_list;
92
8357376d 93/* Pointer to an auxiliary buffer (1 page) */
940864dd 94static void *buffer;
7088a5c0 95
f6143aa6
RW
96/**
97 * @safe_needed - on resume, for storing the PBE list and the image,
98 * we can only use memory pages that do not conflict with the pages
8357376d
RW
99 * used before suspend. The unsafe pages have PageNosaveFree set
100 * and we count them using unsafe_pages.
f6143aa6 101 *
8357376d
RW
102 * Each allocated image page is marked as PageNosave and PageNosaveFree
103 * so that swsusp_free() can release it.
f6143aa6
RW
104 */
105
0bcd888d
RW
106#define PG_ANY 0
107#define PG_SAFE 1
108#define PG_UNSAFE_CLEAR 1
109#define PG_UNSAFE_KEEP 0
110
940864dd 111static unsigned int allocated_unsafe_pages;
f6143aa6 112
8357376d 113static void *get_image_page(gfp_t gfp_mask, int safe_needed)
f6143aa6
RW
114{
115 void *res;
116
117 res = (void *)get_zeroed_page(gfp_mask);
118 if (safe_needed)
7be98234 119 while (res && swsusp_page_is_free(virt_to_page(res))) {
f6143aa6 120 /* The page is unsafe, mark it for swsusp_free() */
7be98234 121 swsusp_set_page_forbidden(virt_to_page(res));
940864dd 122 allocated_unsafe_pages++;
f6143aa6
RW
123 res = (void *)get_zeroed_page(gfp_mask);
124 }
125 if (res) {
7be98234
RW
126 swsusp_set_page_forbidden(virt_to_page(res));
127 swsusp_set_page_free(virt_to_page(res));
f6143aa6
RW
128 }
129 return res;
130}
131
9c744481
RW
132static void *__get_safe_page(gfp_t gfp_mask)
133{
134 if (safe_pages_list) {
135 void *ret = safe_pages_list;
136
137 safe_pages_list = safe_pages_list->next;
138 memset(ret, 0, PAGE_SIZE);
139 return ret;
140 }
141 return get_image_page(gfp_mask, PG_SAFE);
142}
143
f6143aa6
RW
144unsigned long get_safe_page(gfp_t gfp_mask)
145{
9c744481 146 return (unsigned long)__get_safe_page(gfp_mask);
8357376d
RW
147}
148
5b6d15de
RW
149static struct page *alloc_image_page(gfp_t gfp_mask)
150{
8357376d
RW
151 struct page *page;
152
153 page = alloc_page(gfp_mask);
154 if (page) {
7be98234
RW
155 swsusp_set_page_forbidden(page);
156 swsusp_set_page_free(page);
8357376d
RW
157 }
158 return page;
f6143aa6
RW
159}
160
161/**
162 * free_image_page - free page represented by @addr, allocated with
8357376d 163 * get_image_page (page flags set by it must be cleared)
f6143aa6
RW
164 */
165
166static inline void free_image_page(void *addr, int clear_nosave_free)
167{
8357376d
RW
168 struct page *page;
169
170 BUG_ON(!virt_addr_valid(addr));
171
172 page = virt_to_page(addr);
173
7be98234 174 swsusp_unset_page_forbidden(page);
f6143aa6 175 if (clear_nosave_free)
7be98234 176 swsusp_unset_page_free(page);
8357376d
RW
177
178 __free_page(page);
f6143aa6
RW
179}
180
b788db79
RW
181static inline void
182free_list_of_pages(struct linked_page *list, int clear_page_nosave)
183{
184 while (list) {
185 struct linked_page *lp = list->next;
186
187 free_image_page(list, clear_page_nosave);
188 list = lp;
189 }
190}
191
192/**
193 * struct chain_allocator is used for allocating small objects out of
194 * a linked list of pages called 'the chain'.
195 *
196 * The chain grows each time when there is no room for a new object in
197 * the current page. The allocated objects cannot be freed individually.
198 * It is only possible to free them all at once, by freeing the entire
199 * chain.
200 *
201 * NOTE: The chain allocator may be inefficient if the allocated objects
202 * are not much smaller than PAGE_SIZE.
203 */
204
205struct chain_allocator {
206 struct linked_page *chain; /* the chain */
207 unsigned int used_space; /* total size of objects allocated out
208 * of the current page
209 */
210 gfp_t gfp_mask; /* mask for allocating pages */
211 int safe_needed; /* if set, only "safe" pages are allocated */
212};
213
214static void
215chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
216{
217 ca->chain = NULL;
218 ca->used_space = LINKED_PAGE_DATA_SIZE;
219 ca->gfp_mask = gfp_mask;
220 ca->safe_needed = safe_needed;
221}
222
223static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
224{
225 void *ret;
226
227 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
228 struct linked_page *lp;
229
9c744481
RW
230 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
231 get_image_page(ca->gfp_mask, PG_ANY);
b788db79
RW
232 if (!lp)
233 return NULL;
234
235 lp->next = ca->chain;
236 ca->chain = lp;
237 ca->used_space = 0;
238 }
239 ret = ca->chain->data + ca->used_space;
240 ca->used_space += size;
241 return ret;
242}
243
b788db79
RW
244/**
245 * Data types related to memory bitmaps.
246 *
247 * Memory bitmap is a structure consiting of many linked lists of
248 * objects. The main list's elements are of type struct zone_bitmap
249 * and each of them corresonds to one zone. For each zone bitmap
250 * object there is a list of objects of type struct bm_block that
0d83304c 251 * represent each blocks of bitmap in which information is stored.
b788db79
RW
252 *
253 * struct memory_bitmap contains a pointer to the main list of zone
254 * bitmap objects, a struct bm_position used for browsing the bitmap,
255 * and a pointer to the list of pages used for allocating all of the
256 * zone bitmap objects and bitmap block objects.
257 *
258 * NOTE: It has to be possible to lay out the bitmap in memory
259 * using only allocations of order 0. Additionally, the bitmap is
260 * designed to work with arbitrary number of zones (this is over the
261 * top for now, but let's avoid making unnecessary assumptions ;-).
262 *
263 * struct zone_bitmap contains a pointer to a list of bitmap block
264 * objects and a pointer to the bitmap block object that has been
265 * most recently used for setting bits. Additionally, it contains the
266 * pfns that correspond to the start and end of the represented zone.
267 *
268 * struct bm_block contains a pointer to the memory page in which
0d83304c
AM
269 * information is stored (in the form of a block of bitmap)
270 * It also contains the pfns that correspond to the start and end of
271 * the represented memory area.
f469f02d
JR
272 *
273 * The memory bitmap is organized as a radix tree to guarantee fast random
274 * access to the bits. There is one radix tree for each zone (as returned
275 * from create_mem_extents).
276 *
277 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
278 * two linked lists for the nodes of the tree, one for the inner nodes and
279 * one for the leave nodes. The linked leave nodes are used for fast linear
280 * access of the memory bitmap.
281 *
282 * The struct rtree_node represents one node of the radix tree.
b788db79
RW
283 */
284
285#define BM_END_OF_MAP (~0UL)
286
8de03073 287#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
f469f02d
JR
288#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
289#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
b788db79 290
f469f02d
JR
291/*
292 * struct rtree_node is a wrapper struct to link the nodes
293 * of the rtree together for easy linear iteration over
294 * bits and easy freeing
295 */
296struct rtree_node {
297 struct list_head list;
298 unsigned long *data;
299};
300
301/*
302 * struct mem_zone_bm_rtree represents a bitmap used for one
303 * populated memory zone.
304 */
305struct mem_zone_bm_rtree {
306 struct list_head list; /* Link Zones together */
307 struct list_head nodes; /* Radix Tree inner nodes */
308 struct list_head leaves; /* Radix Tree leaves */
309 unsigned long start_pfn; /* Zone start page frame */
310 unsigned long end_pfn; /* Zone end page frame + 1 */
311 struct rtree_node *rtree; /* Radix Tree Root */
312 int levels; /* Number of Radix Tree Levels */
313 unsigned int blocks; /* Number of Bitmap Blocks */
314};
315
b788db79
RW
316/* strcut bm_position is used for browsing memory bitmaps */
317
318struct bm_position {
3a20cb17
JR
319 struct mem_zone_bm_rtree *zone;
320 struct rtree_node *node;
321 unsigned long node_pfn;
322 int node_bit;
b788db79
RW
323};
324
325struct memory_bitmap {
f469f02d 326 struct list_head zones;
b788db79
RW
327 struct linked_page *p_list; /* list of pages used to store zone
328 * bitmap objects and bitmap block
329 * objects
330 */
331 struct bm_position cur; /* most recently used bit position */
332};
333
334/* Functions that operate on memory bitmaps */
335
f469f02d
JR
336#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
337#if BITS_PER_LONG == 32
338#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
339#else
340#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
341#endif
342#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
343
344/*
345 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
346 *
347 * This function is used to allocate inner nodes as well as the
348 * leave nodes of the radix tree. It also adds the node to the
349 * corresponding linked list passed in by the *list parameter.
350 */
351static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
352 struct chain_allocator *ca,
353 struct list_head *list)
354{
355 struct rtree_node *node;
356
357 node = chain_alloc(ca, sizeof(struct rtree_node));
358 if (!node)
359 return NULL;
360
361 node->data = get_image_page(gfp_mask, safe_needed);
362 if (!node->data)
363 return NULL;
364
365 list_add_tail(&node->list, list);
366
367 return node;
368}
369
370/*
371 * add_rtree_block - Add a new leave node to the radix tree
372 *
373 * The leave nodes need to be allocated in order to keep the leaves
374 * linked list in order. This is guaranteed by the zone->blocks
375 * counter.
376 */
377static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
378 int safe_needed, struct chain_allocator *ca)
379{
380 struct rtree_node *node, *block, **dst;
381 unsigned int levels_needed, block_nr;
382 int i;
383
384 block_nr = zone->blocks;
385 levels_needed = 0;
386
387 /* How many levels do we need for this block nr? */
388 while (block_nr) {
389 levels_needed += 1;
390 block_nr >>= BM_RTREE_LEVEL_SHIFT;
391 }
392
393 /* Make sure the rtree has enough levels */
394 for (i = zone->levels; i < levels_needed; i++) {
395 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
396 &zone->nodes);
397 if (!node)
398 return -ENOMEM;
399
400 node->data[0] = (unsigned long)zone->rtree;
401 zone->rtree = node;
402 zone->levels += 1;
403 }
404
405 /* Allocate new block */
406 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
407 if (!block)
408 return -ENOMEM;
409
410 /* Now walk the rtree to insert the block */
411 node = zone->rtree;
412 dst = &zone->rtree;
413 block_nr = zone->blocks;
414 for (i = zone->levels; i > 0; i--) {
415 int index;
416
417 if (!node) {
418 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
419 &zone->nodes);
420 if (!node)
421 return -ENOMEM;
422 *dst = node;
423 }
424
425 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
426 index &= BM_RTREE_LEVEL_MASK;
427 dst = (struct rtree_node **)&((*dst)->data[index]);
428 node = *dst;
429 }
430
431 zone->blocks += 1;
432 *dst = block;
433
434 return 0;
435}
436
437static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
438 int clear_nosave_free);
439
440/*
441 * create_zone_bm_rtree - create a radix tree for one zone
442 *
443 * Allocated the mem_zone_bm_rtree structure and initializes it.
444 * This function also allocated and builds the radix tree for the
445 * zone.
446 */
447static struct mem_zone_bm_rtree *
448create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
449 struct chain_allocator *ca,
450 unsigned long start, unsigned long end)
451{
452 struct mem_zone_bm_rtree *zone;
453 unsigned int i, nr_blocks;
454 unsigned long pages;
455
456 pages = end - start;
457 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
458 if (!zone)
459 return NULL;
460
461 INIT_LIST_HEAD(&zone->nodes);
462 INIT_LIST_HEAD(&zone->leaves);
463 zone->start_pfn = start;
464 zone->end_pfn = end;
465 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
466
467 for (i = 0; i < nr_blocks; i++) {
468 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
469 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
470 return NULL;
471 }
472 }
473
474 return zone;
475}
476
477/*
478 * free_zone_bm_rtree - Free the memory of the radix tree
479 *
480 * Free all node pages of the radix tree. The mem_zone_bm_rtree
481 * structure itself is not freed here nor are the rtree_node
482 * structs.
483 */
484static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
485 int clear_nosave_free)
486{
487 struct rtree_node *node;
488
489 list_for_each_entry(node, &zone->nodes, list)
490 free_image_page(node->data, clear_nosave_free);
491
492 list_for_each_entry(node, &zone->leaves, list)
493 free_image_page(node->data, clear_nosave_free);
494}
495
b788db79
RW
496static void memory_bm_position_reset(struct memory_bitmap *bm)
497{
3a20cb17
JR
498 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
499 list);
500 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
501 struct rtree_node, list);
502 bm->cur.node_pfn = 0;
503 bm->cur.node_bit = 0;
b788db79
RW
504}
505
506static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
507
846705de
RW
508struct mem_extent {
509 struct list_head hook;
510 unsigned long start;
511 unsigned long end;
512};
513
b788db79 514/**
846705de
RW
515 * free_mem_extents - free a list of memory extents
516 * @list - list of extents to empty
b788db79 517 */
846705de
RW
518static void free_mem_extents(struct list_head *list)
519{
520 struct mem_extent *ext, *aux;
b788db79 521
846705de
RW
522 list_for_each_entry_safe(ext, aux, list, hook) {
523 list_del(&ext->hook);
524 kfree(ext);
525 }
526}
527
528/**
529 * create_mem_extents - create a list of memory extents representing
530 * contiguous ranges of PFNs
531 * @list - list to put the extents into
532 * @gfp_mask - mask to use for memory allocations
533 */
534static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
b788db79 535{
846705de 536 struct zone *zone;
b788db79 537
846705de 538 INIT_LIST_HEAD(list);
b788db79 539
ee99c71c 540 for_each_populated_zone(zone) {
846705de
RW
541 unsigned long zone_start, zone_end;
542 struct mem_extent *ext, *cur, *aux;
543
846705de 544 zone_start = zone->zone_start_pfn;
c33bc315 545 zone_end = zone_end_pfn(zone);
846705de
RW
546
547 list_for_each_entry(ext, list, hook)
548 if (zone_start <= ext->end)
549 break;
b788db79 550
846705de
RW
551 if (&ext->hook == list || zone_end < ext->start) {
552 /* New extent is necessary */
553 struct mem_extent *new_ext;
554
555 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
556 if (!new_ext) {
557 free_mem_extents(list);
558 return -ENOMEM;
559 }
560 new_ext->start = zone_start;
561 new_ext->end = zone_end;
562 list_add_tail(&new_ext->hook, &ext->hook);
563 continue;
564 }
565
566 /* Merge this zone's range of PFNs with the existing one */
567 if (zone_start < ext->start)
568 ext->start = zone_start;
569 if (zone_end > ext->end)
570 ext->end = zone_end;
571
572 /* More merging may be possible */
573 cur = ext;
574 list_for_each_entry_safe_continue(cur, aux, list, hook) {
575 if (zone_end < cur->start)
576 break;
577 if (zone_end < cur->end)
578 ext->end = cur->end;
579 list_del(&cur->hook);
580 kfree(cur);
581 }
b788db79 582 }
846705de
RW
583
584 return 0;
b788db79
RW
585}
586
587/**
588 * memory_bm_create - allocate memory for a memory bitmap
589 */
b788db79
RW
590static int
591memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
592{
593 struct chain_allocator ca;
846705de
RW
594 struct list_head mem_extents;
595 struct mem_extent *ext;
596 int error;
b788db79
RW
597
598 chain_init(&ca, gfp_mask, safe_needed);
f469f02d 599 INIT_LIST_HEAD(&bm->zones);
b788db79 600
846705de
RW
601 error = create_mem_extents(&mem_extents, gfp_mask);
602 if (error)
603 return error;
b788db79 604
846705de 605 list_for_each_entry(ext, &mem_extents, hook) {
f469f02d 606 struct mem_zone_bm_rtree *zone;
f469f02d
JR
607
608 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
609 ext->start, ext->end);
9047eb62
JR
610 if (!zone) {
611 error = -ENOMEM;
f469f02d 612 goto Error;
9047eb62 613 }
f469f02d 614 list_add_tail(&zone->list, &bm->zones);
b788db79 615 }
846705de 616
b788db79
RW
617 bm->p_list = ca.chain;
618 memory_bm_position_reset(bm);
846705de
RW
619 Exit:
620 free_mem_extents(&mem_extents);
621 return error;
b788db79 622
846705de 623 Error:
b788db79
RW
624 bm->p_list = ca.chain;
625 memory_bm_free(bm, PG_UNSAFE_CLEAR);
846705de 626 goto Exit;
b788db79
RW
627}
628
629/**
630 * memory_bm_free - free memory occupied by the memory bitmap @bm
631 */
b788db79
RW
632static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
633{
f469f02d 634 struct mem_zone_bm_rtree *zone;
b788db79 635
f469f02d
JR
636 list_for_each_entry(zone, &bm->zones, list)
637 free_zone_bm_rtree(zone, clear_nosave_free);
638
b788db79 639 free_list_of_pages(bm->p_list, clear_nosave_free);
846705de 640
f469f02d 641 INIT_LIST_HEAD(&bm->zones);
b788db79
RW
642}
643
644/**
9047eb62
JR
645 * memory_bm_find_bit - Find the bit for pfn in the memory
646 * bitmap
07a33823 647 *
9047eb62
JR
648 * Find the bit in the bitmap @bm that corresponds to given pfn.
649 * The cur.zone, cur.block and cur.node_pfn member of @bm are
650 * updated.
651 * It walks the radix tree to find the page which contains the bit for
07a33823
JR
652 * pfn and returns the bit position in **addr and *bit_nr.
653 */
9047eb62
JR
654static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
655 void **addr, unsigned int *bit_nr)
07a33823
JR
656{
657 struct mem_zone_bm_rtree *curr, *zone;
658 struct rtree_node *node;
659 int i, block_nr;
660
3a20cb17
JR
661 zone = bm->cur.zone;
662
663 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
664 goto zone_found;
665
07a33823
JR
666 zone = NULL;
667
668 /* Find the right zone */
669 list_for_each_entry(curr, &bm->zones, list) {
670 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
671 zone = curr;
672 break;
673 }
674 }
675
676 if (!zone)
677 return -EFAULT;
678
3a20cb17 679zone_found:
07a33823
JR
680 /*
681 * We have a zone. Now walk the radix tree to find the leave
682 * node for our pfn.
683 */
3a20cb17
JR
684
685 node = bm->cur.node;
686 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
687 goto node_found;
688
07a33823
JR
689 node = zone->rtree;
690 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
691
692 for (i = zone->levels; i > 0; i--) {
693 int index;
694
695 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
696 index &= BM_RTREE_LEVEL_MASK;
697 BUG_ON(node->data[index] == 0);
698 node = (struct rtree_node *)node->data[index];
699 }
700
3a20cb17
JR
701node_found:
702 /* Update last position */
703 bm->cur.zone = zone;
704 bm->cur.node = node;
705 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
706
07a33823
JR
707 /* Set return values */
708 *addr = node->data;
709 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
710
711 return 0;
712}
713
74dfd666
RW
714static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
715{
716 void *addr;
717 unsigned int bit;
a82f7119 718 int error;
74dfd666 719
a82f7119
RW
720 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
721 BUG_ON(error);
74dfd666
RW
722 set_bit(bit, addr);
723}
724
a82f7119
RW
725static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
726{
727 void *addr;
728 unsigned int bit;
729 int error;
730
731 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
07a33823
JR
732 if (!error)
733 set_bit(bit, addr);
734
a82f7119
RW
735 return error;
736}
737
74dfd666
RW
738static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
739{
740 void *addr;
741 unsigned int bit;
a82f7119 742 int error;
74dfd666 743
a82f7119
RW
744 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
745 BUG_ON(error);
74dfd666
RW
746 clear_bit(bit, addr);
747}
748
fdd64ed5
JR
749static void memory_bm_clear_current(struct memory_bitmap *bm)
750{
751 int bit;
752
753 bit = max(bm->cur.node_bit - 1, 0);
754 clear_bit(bit, bm->cur.node->data);
755}
756
74dfd666
RW
757static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
758{
759 void *addr;
760 unsigned int bit;
9047eb62 761 int error;
74dfd666 762
a82f7119
RW
763 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
764 BUG_ON(error);
9047eb62 765 return test_bit(bit, addr);
b788db79
RW
766}
767
69643279
RW
768static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
769{
770 void *addr;
771 unsigned int bit;
07a33823 772
9047eb62 773 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
b788db79
RW
774}
775
3a20cb17
JR
776/*
777 * rtree_next_node - Jumps to the next leave node
778 *
779 * Sets the position to the beginning of the next node in the
780 * memory bitmap. This is either the next node in the current
781 * zone's radix tree or the first node in the radix tree of the
782 * next zone.
783 *
784 * Returns true if there is a next node, false otherwise.
785 */
786static bool rtree_next_node(struct memory_bitmap *bm)
787{
788 bm->cur.node = list_entry(bm->cur.node->list.next,
789 struct rtree_node, list);
790 if (&bm->cur.node->list != &bm->cur.zone->leaves) {
791 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
792 bm->cur.node_bit = 0;
0f7d83e8 793 touch_softlockup_watchdog();
3a20cb17
JR
794 return true;
795 }
796
797 /* No more nodes, goto next zone */
798 bm->cur.zone = list_entry(bm->cur.zone->list.next,
799 struct mem_zone_bm_rtree, list);
800 if (&bm->cur.zone->list != &bm->zones) {
801 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
802 struct rtree_node, list);
803 bm->cur.node_pfn = 0;
804 bm->cur.node_bit = 0;
805 return true;
806 }
807
808 /* No more zones */
809 return false;
810}
811
9047eb62
JR
812/**
813 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
3a20cb17
JR
814 *
815 * Starting from the last returned position this function searches
816 * for the next set bit in the memory bitmap and returns its
817 * number. If no more bit is set BM_END_OF_MAP is returned.
9047eb62
JR
818 *
819 * It is required to run memory_bm_position_reset() before the
820 * first call to this function.
3a20cb17 821 */
9047eb62 822static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
3a20cb17
JR
823{
824 unsigned long bits, pfn, pages;
825 int bit;
826
827 do {
828 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
829 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
830 bit = find_next_bit(bm->cur.node->data, bits,
831 bm->cur.node_bit);
832 if (bit < bits) {
833 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
834 bm->cur.node_bit = bit + 1;
835 return pfn;
836 }
837 } while (rtree_next_node(bm));
838
839 return BM_END_OF_MAP;
840}
841
74dfd666
RW
842/**
843 * This structure represents a range of page frames the contents of which
844 * should not be saved during the suspend.
845 */
846
847struct nosave_region {
848 struct list_head list;
849 unsigned long start_pfn;
850 unsigned long end_pfn;
851};
852
853static LIST_HEAD(nosave_regions);
854
855/**
856 * register_nosave_region - register a range of page frames the contents
857 * of which should not be saved during the suspend (to be used in the early
858 * initialization code)
859 */
860
861void __init
940d67f6
JB
862__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
863 int use_kmalloc)
74dfd666
RW
864{
865 struct nosave_region *region;
866
867 if (start_pfn >= end_pfn)
868 return;
869
870 if (!list_empty(&nosave_regions)) {
871 /* Try to extend the previous region (they should be sorted) */
872 region = list_entry(nosave_regions.prev,
873 struct nosave_region, list);
874 if (region->end_pfn == start_pfn) {
875 region->end_pfn = end_pfn;
876 goto Report;
877 }
878 }
940d67f6
JB
879 if (use_kmalloc) {
880 /* during init, this shouldn't fail */
881 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
882 BUG_ON(!region);
883 } else
884 /* This allocation cannot fail */
c2f69cda 885 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
74dfd666
RW
886 region->start_pfn = start_pfn;
887 region->end_pfn = end_pfn;
888 list_add_tail(&region->list, &nosave_regions);
889 Report:
cd38ca85
BH
890 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
891 (unsigned long long) start_pfn << PAGE_SHIFT,
892 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
74dfd666
RW
893}
894
895/*
896 * Set bits in this map correspond to the page frames the contents of which
897 * should not be saved during the suspend.
898 */
899static struct memory_bitmap *forbidden_pages_map;
900
901/* Set bits in this map correspond to free page frames. */
902static struct memory_bitmap *free_pages_map;
903
904/*
905 * Each page frame allocated for creating the image is marked by setting the
906 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
907 */
908
909void swsusp_set_page_free(struct page *page)
910{
911 if (free_pages_map)
912 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
913}
914
915static int swsusp_page_is_free(struct page *page)
916{
917 return free_pages_map ?
918 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
919}
920
921void swsusp_unset_page_free(struct page *page)
922{
923 if (free_pages_map)
924 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
925}
926
927static void swsusp_set_page_forbidden(struct page *page)
928{
929 if (forbidden_pages_map)
930 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
931}
932
933int swsusp_page_is_forbidden(struct page *page)
934{
935 return forbidden_pages_map ?
936 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
937}
938
939static void swsusp_unset_page_forbidden(struct page *page)
940{
941 if (forbidden_pages_map)
942 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
943}
944
945/**
946 * mark_nosave_pages - set bits corresponding to the page frames the
947 * contents of which should not be saved in a given bitmap.
948 */
949
950static void mark_nosave_pages(struct memory_bitmap *bm)
951{
952 struct nosave_region *region;
953
954 if (list_empty(&nosave_regions))
955 return;
956
957 list_for_each_entry(region, &nosave_regions, list) {
958 unsigned long pfn;
959
69f1d475
BH
960 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
961 (unsigned long long) region->start_pfn << PAGE_SHIFT,
962 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
963 - 1);
74dfd666
RW
964
965 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
a82f7119
RW
966 if (pfn_valid(pfn)) {
967 /*
968 * It is safe to ignore the result of
969 * mem_bm_set_bit_check() here, since we won't
970 * touch the PFNs for which the error is
971 * returned anyway.
972 */
973 mem_bm_set_bit_check(bm, pfn);
974 }
74dfd666
RW
975 }
976}
977
978/**
979 * create_basic_memory_bitmaps - create bitmaps needed for marking page
980 * frames that should not be saved and free page frames. The pointers
981 * forbidden_pages_map and free_pages_map are only modified if everything
982 * goes well, because we don't want the bits to be used before both bitmaps
983 * are set up.
984 */
985
986int create_basic_memory_bitmaps(void)
987{
988 struct memory_bitmap *bm1, *bm2;
989 int error = 0;
990
aab17289
RW
991 if (forbidden_pages_map && free_pages_map)
992 return 0;
993 else
994 BUG_ON(forbidden_pages_map || free_pages_map);
74dfd666 995
0709db60 996 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666
RW
997 if (!bm1)
998 return -ENOMEM;
999
0709db60 1000 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
74dfd666
RW
1001 if (error)
1002 goto Free_first_object;
1003
0709db60 1004 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666
RW
1005 if (!bm2)
1006 goto Free_first_bitmap;
1007
0709db60 1008 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
74dfd666
RW
1009 if (error)
1010 goto Free_second_object;
1011
1012 forbidden_pages_map = bm1;
1013 free_pages_map = bm2;
1014 mark_nosave_pages(forbidden_pages_map);
1015
23976728 1016 pr_debug("PM: Basic memory bitmaps created\n");
74dfd666
RW
1017
1018 return 0;
1019
1020 Free_second_object:
1021 kfree(bm2);
1022 Free_first_bitmap:
1023 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1024 Free_first_object:
1025 kfree(bm1);
1026 return -ENOMEM;
1027}
1028
1029/**
1030 * free_basic_memory_bitmaps - free memory bitmaps allocated by
1031 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
1032 * so that the bitmaps themselves are not referred to while they are being
1033 * freed.
1034 */
1035
1036void free_basic_memory_bitmaps(void)
1037{
1038 struct memory_bitmap *bm1, *bm2;
1039
6a0c7cd3
RW
1040 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1041 return;
74dfd666
RW
1042
1043 bm1 = forbidden_pages_map;
1044 bm2 = free_pages_map;
1045 forbidden_pages_map = NULL;
1046 free_pages_map = NULL;
1047 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1048 kfree(bm1);
1049 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1050 kfree(bm2);
1051
23976728 1052 pr_debug("PM: Basic memory bitmaps freed\n");
74dfd666
RW
1053}
1054
b788db79
RW
1055/**
1056 * snapshot_additional_pages - estimate the number of additional pages
1057 * be needed for setting up the suspend image data structures for given
1058 * zone (usually the returned value is greater than the exact number)
1059 */
1060
1061unsigned int snapshot_additional_pages(struct zone *zone)
1062{
f469f02d 1063 unsigned int rtree, nodes;
b788db79 1064
f469f02d
JR
1065 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1066 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1067 LINKED_PAGE_DATA_SIZE);
1068 while (nodes > 1) {
1069 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1070 rtree += nodes;
1071 }
1072
9047eb62 1073 return 2 * rtree;
b788db79
RW
1074}
1075
8357376d
RW
1076#ifdef CONFIG_HIGHMEM
1077/**
1078 * count_free_highmem_pages - compute the total number of free highmem
1079 * pages, system-wide.
1080 */
1081
1082static unsigned int count_free_highmem_pages(void)
1083{
1084 struct zone *zone;
1085 unsigned int cnt = 0;
1086
ee99c71c
KM
1087 for_each_populated_zone(zone)
1088 if (is_highmem(zone))
d23ad423 1089 cnt += zone_page_state(zone, NR_FREE_PAGES);
8357376d
RW
1090
1091 return cnt;
1092}
1093
1094/**
1095 * saveable_highmem_page - Determine whether a highmem page should be
1096 * included in the suspend image.
1097 *
1098 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1099 * and it isn't a part of a free chunk of pages.
1100 */
846705de 1101static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
8357376d
RW
1102{
1103 struct page *page;
1104
1105 if (!pfn_valid(pfn))
1106 return NULL;
1107
1108 page = pfn_to_page(pfn);
846705de
RW
1109 if (page_zone(page) != zone)
1110 return NULL;
8357376d
RW
1111
1112 BUG_ON(!PageHighMem(page));
1113
7be98234
RW
1114 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1115 PageReserved(page))
8357376d
RW
1116 return NULL;
1117
c6968e73
SG
1118 if (page_is_guard(page))
1119 return NULL;
1120
8357376d
RW
1121 return page;
1122}
1123
1124/**
1125 * count_highmem_pages - compute the total number of saveable highmem
1126 * pages.
1127 */
1128
fe419535 1129static unsigned int count_highmem_pages(void)
8357376d
RW
1130{
1131 struct zone *zone;
1132 unsigned int n = 0;
1133
98e73dc5 1134 for_each_populated_zone(zone) {
8357376d
RW
1135 unsigned long pfn, max_zone_pfn;
1136
1137 if (!is_highmem(zone))
1138 continue;
1139
1140 mark_free_pages(zone);
c33bc315 1141 max_zone_pfn = zone_end_pfn(zone);
8357376d 1142 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705de 1143 if (saveable_highmem_page(zone, pfn))
8357376d
RW
1144 n++;
1145 }
1146 return n;
1147}
1148#else
846705de
RW
1149static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1150{
1151 return NULL;
1152}
8357376d
RW
1153#endif /* CONFIG_HIGHMEM */
1154
25761b6e 1155/**
8a235efa
RW
1156 * saveable_page - Determine whether a non-highmem page should be included
1157 * in the suspend image.
25761b6e 1158 *
8357376d
RW
1159 * We should save the page if it isn't Nosave, and is not in the range
1160 * of pages statically defined as 'unsaveable', and it isn't a part of
1161 * a free chunk of pages.
25761b6e 1162 */
846705de 1163static struct page *saveable_page(struct zone *zone, unsigned long pfn)
25761b6e 1164{
de491861 1165 struct page *page;
25761b6e
RW
1166
1167 if (!pfn_valid(pfn))
ae83c5ee 1168 return NULL;
25761b6e
RW
1169
1170 page = pfn_to_page(pfn);
846705de
RW
1171 if (page_zone(page) != zone)
1172 return NULL;
ae83c5ee 1173
8357376d
RW
1174 BUG_ON(PageHighMem(page));
1175
7be98234 1176 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
ae83c5ee 1177 return NULL;
8357376d 1178
8a235efa
RW
1179 if (PageReserved(page)
1180 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
ae83c5ee 1181 return NULL;
25761b6e 1182
c6968e73
SG
1183 if (page_is_guard(page))
1184 return NULL;
1185
ae83c5ee 1186 return page;
25761b6e
RW
1187}
1188
8357376d
RW
1189/**
1190 * count_data_pages - compute the total number of saveable non-highmem
1191 * pages.
1192 */
1193
fe419535 1194static unsigned int count_data_pages(void)
25761b6e
RW
1195{
1196 struct zone *zone;
ae83c5ee 1197 unsigned long pfn, max_zone_pfn;
dc19d507 1198 unsigned int n = 0;
25761b6e 1199
98e73dc5 1200 for_each_populated_zone(zone) {
25761b6e
RW
1201 if (is_highmem(zone))
1202 continue;
8357376d 1203
25761b6e 1204 mark_free_pages(zone);
c33bc315 1205 max_zone_pfn = zone_end_pfn(zone);
ae83c5ee 1206 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705de 1207 if (saveable_page(zone, pfn))
8357376d 1208 n++;
25761b6e 1209 }
a0f49651 1210 return n;
25761b6e
RW
1211}
1212
8357376d
RW
1213/* This is needed, because copy_page and memcpy are not usable for copying
1214 * task structs.
1215 */
1216static inline void do_copy_page(long *dst, long *src)
f623f0db
RW
1217{
1218 int n;
1219
f623f0db
RW
1220 for (n = PAGE_SIZE / sizeof(long); n; n--)
1221 *dst++ = *src++;
1222}
1223
8a235efa
RW
1224
1225/**
1226 * safe_copy_page - check if the page we are going to copy is marked as
1227 * present in the kernel page tables (this always is the case if
1228 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
1229 * kernel_page_present() always returns 'true').
1230 */
1231static void safe_copy_page(void *dst, struct page *s_page)
1232{
1233 if (kernel_page_present(s_page)) {
1234 do_copy_page(dst, page_address(s_page));
1235 } else {
1236 kernel_map_pages(s_page, 1, 1);
1237 do_copy_page(dst, page_address(s_page));
1238 kernel_map_pages(s_page, 1, 0);
1239 }
1240}
1241
1242
8357376d
RW
1243#ifdef CONFIG_HIGHMEM
1244static inline struct page *
1245page_is_saveable(struct zone *zone, unsigned long pfn)
1246{
1247 return is_highmem(zone) ?
846705de 1248 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
8357376d
RW
1249}
1250
8a235efa 1251static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d
RW
1252{
1253 struct page *s_page, *d_page;
1254 void *src, *dst;
1255
1256 s_page = pfn_to_page(src_pfn);
1257 d_page = pfn_to_page(dst_pfn);
1258 if (PageHighMem(s_page)) {
0de9a1e2
CW
1259 src = kmap_atomic(s_page);
1260 dst = kmap_atomic(d_page);
8357376d 1261 do_copy_page(dst, src);
0de9a1e2
CW
1262 kunmap_atomic(dst);
1263 kunmap_atomic(src);
8357376d 1264 } else {
8357376d
RW
1265 if (PageHighMem(d_page)) {
1266 /* Page pointed to by src may contain some kernel
1267 * data modified by kmap_atomic()
1268 */
8a235efa 1269 safe_copy_page(buffer, s_page);
0de9a1e2 1270 dst = kmap_atomic(d_page);
3ecb01df 1271 copy_page(dst, buffer);
0de9a1e2 1272 kunmap_atomic(dst);
8357376d 1273 } else {
8a235efa 1274 safe_copy_page(page_address(d_page), s_page);
8357376d
RW
1275 }
1276 }
1277}
1278#else
846705de 1279#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
8357376d 1280
8a235efa 1281static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d 1282{
8a235efa
RW
1283 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1284 pfn_to_page(src_pfn));
8357376d
RW
1285}
1286#endif /* CONFIG_HIGHMEM */
1287
b788db79
RW
1288static void
1289copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
25761b6e
RW
1290{
1291 struct zone *zone;
b788db79 1292 unsigned long pfn;
25761b6e 1293
98e73dc5 1294 for_each_populated_zone(zone) {
b788db79
RW
1295 unsigned long max_zone_pfn;
1296
25761b6e 1297 mark_free_pages(zone);
c33bc315 1298 max_zone_pfn = zone_end_pfn(zone);
b788db79 1299 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
8357376d 1300 if (page_is_saveable(zone, pfn))
b788db79 1301 memory_bm_set_bit(orig_bm, pfn);
25761b6e 1302 }
b788db79
RW
1303 memory_bm_position_reset(orig_bm);
1304 memory_bm_position_reset(copy_bm);
df7c4872 1305 for(;;) {
b788db79 1306 pfn = memory_bm_next_pfn(orig_bm);
df7c4872
FW
1307 if (unlikely(pfn == BM_END_OF_MAP))
1308 break;
1309 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1310 }
25761b6e
RW
1311}
1312
8357376d
RW
1313/* Total number of image pages */
1314static unsigned int nr_copy_pages;
1315/* Number of pages needed for saving the original pfns of the image pages */
1316static unsigned int nr_meta_pages;
64a473cb
RW
1317/*
1318 * Numbers of normal and highmem page frames allocated for hibernation image
1319 * before suspending devices.
1320 */
1321unsigned int alloc_normal, alloc_highmem;
1322/*
1323 * Memory bitmap used for marking saveable pages (during hibernation) or
1324 * hibernation image pages (during restore)
1325 */
1326static struct memory_bitmap orig_bm;
1327/*
1328 * Memory bitmap used during hibernation for marking allocated page frames that
1329 * will contain copies of saveable pages. During restore it is initially used
1330 * for marking hibernation image pages, but then the set bits from it are
1331 * duplicated in @orig_bm and it is released. On highmem systems it is next
1332 * used for marking "safe" highmem pages, but it has to be reinitialized for
1333 * this purpose.
1334 */
1335static struct memory_bitmap copy_bm;
8357376d 1336
25761b6e 1337/**
940864dd 1338 * swsusp_free - free pages allocated for the suspend.
cd560bb2 1339 *
940864dd
RW
1340 * Suspend pages are alocated before the atomic copy is made, so we
1341 * need to release them after the resume.
25761b6e
RW
1342 */
1343
1344void swsusp_free(void)
1345{
fdd64ed5 1346 unsigned long fb_pfn, fr_pfn;
6efde38f 1347
fdd64ed5
JR
1348 if (!forbidden_pages_map || !free_pages_map)
1349 goto out;
1350
1351 memory_bm_position_reset(forbidden_pages_map);
1352 memory_bm_position_reset(free_pages_map);
1353
1354loop:
1355 fr_pfn = memory_bm_next_pfn(free_pages_map);
1356 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1357
1358 /*
1359 * Find the next bit set in both bitmaps. This is guaranteed to
1360 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1361 */
1362 do {
1363 if (fb_pfn < fr_pfn)
1364 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1365 if (fr_pfn < fb_pfn)
1366 fr_pfn = memory_bm_next_pfn(free_pages_map);
1367 } while (fb_pfn != fr_pfn);
1368
1369 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1370 struct page *page = pfn_to_page(fr_pfn);
1371
1372 memory_bm_clear_current(forbidden_pages_map);
1373 memory_bm_clear_current(free_pages_map);
1374 __free_page(page);
1375 goto loop;
25761b6e 1376 }
fdd64ed5
JR
1377
1378out:
f577eb30
RW
1379 nr_copy_pages = 0;
1380 nr_meta_pages = 0;
75534b50 1381 restore_pblist = NULL;
6e1819d6 1382 buffer = NULL;
64a473cb
RW
1383 alloc_normal = 0;
1384 alloc_highmem = 0;
25761b6e
RW
1385}
1386
4bb33435
RW
1387/* Helper functions used for the shrinking of memory. */
1388
1389#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1390
fe419535 1391/**
4bb33435
RW
1392 * preallocate_image_pages - Allocate a number of pages for hibernation image
1393 * @nr_pages: Number of page frames to allocate.
1394 * @mask: GFP flags to use for the allocation.
fe419535 1395 *
4bb33435
RW
1396 * Return value: Number of page frames actually allocated
1397 */
1398static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1399{
1400 unsigned long nr_alloc = 0;
1401
1402 while (nr_pages > 0) {
64a473cb
RW
1403 struct page *page;
1404
1405 page = alloc_image_page(mask);
1406 if (!page)
4bb33435 1407 break;
64a473cb
RW
1408 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1409 if (PageHighMem(page))
1410 alloc_highmem++;
1411 else
1412 alloc_normal++;
4bb33435
RW
1413 nr_pages--;
1414 nr_alloc++;
1415 }
1416
1417 return nr_alloc;
1418}
1419
6715045d
RW
1420static unsigned long preallocate_image_memory(unsigned long nr_pages,
1421 unsigned long avail_normal)
4bb33435 1422{
6715045d
RW
1423 unsigned long alloc;
1424
1425 if (avail_normal <= alloc_normal)
1426 return 0;
1427
1428 alloc = avail_normal - alloc_normal;
1429 if (nr_pages < alloc)
1430 alloc = nr_pages;
1431
1432 return preallocate_image_pages(alloc, GFP_IMAGE);
4bb33435
RW
1433}
1434
1435#ifdef CONFIG_HIGHMEM
1436static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1437{
1438 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1439}
1440
1441/**
1442 * __fraction - Compute (an approximation of) x * (multiplier / base)
fe419535 1443 */
4bb33435
RW
1444static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1445{
1446 x *= multiplier;
1447 do_div(x, base);
1448 return (unsigned long)x;
1449}
fe419535 1450
4bb33435
RW
1451static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1452 unsigned long highmem,
1453 unsigned long total)
fe419535 1454{
4bb33435
RW
1455 unsigned long alloc = __fraction(nr_pages, highmem, total);
1456
1457 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
fe419535 1458}
4bb33435
RW
1459#else /* CONFIG_HIGHMEM */
1460static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1461{
1462 return 0;
1463}
1464
1465static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1466 unsigned long highmem,
1467 unsigned long total)
1468{
1469 return 0;
1470}
1471#endif /* CONFIG_HIGHMEM */
fe419535 1472
4bb33435 1473/**
64a473cb
RW
1474 * free_unnecessary_pages - Release preallocated pages not needed for the image
1475 */
a64fc82c 1476static unsigned long free_unnecessary_pages(void)
64a473cb 1477{
a64fc82c 1478 unsigned long save, to_free_normal, to_free_highmem, free;
64a473cb 1479
6715045d
RW
1480 save = count_data_pages();
1481 if (alloc_normal >= save) {
1482 to_free_normal = alloc_normal - save;
1483 save = 0;
1484 } else {
1485 to_free_normal = 0;
1486 save -= alloc_normal;
1487 }
1488 save += count_highmem_pages();
1489 if (alloc_highmem >= save) {
1490 to_free_highmem = alloc_highmem - save;
64a473cb
RW
1491 } else {
1492 to_free_highmem = 0;
4d4cf23c
RW
1493 save -= alloc_highmem;
1494 if (to_free_normal > save)
1495 to_free_normal -= save;
1496 else
1497 to_free_normal = 0;
64a473cb 1498 }
a64fc82c 1499 free = to_free_normal + to_free_highmem;
64a473cb
RW
1500
1501 memory_bm_position_reset(&copy_bm);
1502
a9c9b442 1503 while (to_free_normal > 0 || to_free_highmem > 0) {
64a473cb
RW
1504 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1505 struct page *page = pfn_to_page(pfn);
1506
1507 if (PageHighMem(page)) {
1508 if (!to_free_highmem)
1509 continue;
1510 to_free_highmem--;
1511 alloc_highmem--;
1512 } else {
1513 if (!to_free_normal)
1514 continue;
1515 to_free_normal--;
1516 alloc_normal--;
1517 }
1518 memory_bm_clear_bit(&copy_bm, pfn);
1519 swsusp_unset_page_forbidden(page);
1520 swsusp_unset_page_free(page);
1521 __free_page(page);
1522 }
a64fc82c
WK
1523
1524 return free;
64a473cb
RW
1525}
1526
ef4aede3
RW
1527/**
1528 * minimum_image_size - Estimate the minimum acceptable size of an image
1529 * @saveable: Number of saveable pages in the system.
1530 *
1531 * We want to avoid attempting to free too much memory too hard, so estimate the
1532 * minimum acceptable size of a hibernation image to use as the lower limit for
1533 * preallocating memory.
1534 *
1535 * We assume that the minimum image size should be proportional to
1536 *
1537 * [number of saveable pages] - [number of pages that can be freed in theory]
1538 *
1539 * where the second term is the sum of (1) reclaimable slab pages, (2) active
4d434820 1540 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
ef4aede3
RW
1541 * minus mapped file pages.
1542 */
1543static unsigned long minimum_image_size(unsigned long saveable)
1544{
1545 unsigned long size;
1546
1547 size = global_page_state(NR_SLAB_RECLAIMABLE)
1548 + global_page_state(NR_ACTIVE_ANON)
1549 + global_page_state(NR_INACTIVE_ANON)
1550 + global_page_state(NR_ACTIVE_FILE)
1551 + global_page_state(NR_INACTIVE_FILE)
1552 - global_page_state(NR_FILE_MAPPED);
1553
1554 return saveable <= size ? 0 : saveable - size;
1555}
1556
64a473cb
RW
1557/**
1558 * hibernate_preallocate_memory - Preallocate memory for hibernation image
4bb33435
RW
1559 *
1560 * To create a hibernation image it is necessary to make a copy of every page
1561 * frame in use. We also need a number of page frames to be free during
1562 * hibernation for allocations made while saving the image and for device
1563 * drivers, in case they need to allocate memory from their hibernation
ddeb6487
RW
1564 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1565 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1566 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1567 * total number of available page frames and allocate at least
4bb33435 1568 *
ddeb6487
RW
1569 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1570 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
4bb33435
RW
1571 *
1572 * of them, which corresponds to the maximum size of a hibernation image.
1573 *
1574 * If image_size is set below the number following from the above formula,
1575 * the preallocation of memory is continued until the total number of saveable
ef4aede3
RW
1576 * pages in the system is below the requested image size or the minimum
1577 * acceptable image size returned by minimum_image_size(), whichever is greater.
4bb33435 1578 */
64a473cb 1579int hibernate_preallocate_memory(void)
fe419535 1580{
fe419535 1581 struct zone *zone;
4bb33435 1582 unsigned long saveable, size, max_size, count, highmem, pages = 0;
6715045d 1583 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
db597605 1584 ktime_t start, stop;
64a473cb 1585 int error;
fe419535 1586
64a473cb 1587 printk(KERN_INFO "PM: Preallocating image memory... ");
db597605 1588 start = ktime_get();
fe419535 1589
64a473cb
RW
1590 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1591 if (error)
1592 goto err_out;
1593
1594 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1595 if (error)
1596 goto err_out;
1597
1598 alloc_normal = 0;
1599 alloc_highmem = 0;
1600
4bb33435 1601 /* Count the number of saveable data pages. */
64a473cb 1602 save_highmem = count_highmem_pages();
4bb33435 1603 saveable = count_data_pages();
fe419535 1604
4bb33435
RW
1605 /*
1606 * Compute the total number of page frames we can use (count) and the
1607 * number of pages needed for image metadata (size).
1608 */
1609 count = saveable;
64a473cb
RW
1610 saveable += save_highmem;
1611 highmem = save_highmem;
4bb33435
RW
1612 size = 0;
1613 for_each_populated_zone(zone) {
1614 size += snapshot_additional_pages(zone);
1615 if (is_highmem(zone))
1616 highmem += zone_page_state(zone, NR_FREE_PAGES);
1617 else
1618 count += zone_page_state(zone, NR_FREE_PAGES);
1619 }
6715045d 1620 avail_normal = count;
4bb33435
RW
1621 count += highmem;
1622 count -= totalreserve_pages;
1623
85055dd8
MS
1624 /* Add number of pages required for page keys (s390 only). */
1625 size += page_key_additional_pages(saveable);
1626
4bb33435 1627 /* Compute the maximum number of saveable pages to leave in memory. */
ddeb6487
RW
1628 max_size = (count - (size + PAGES_FOR_IO)) / 2
1629 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
266f1a25 1630 /* Compute the desired number of image pages specified by image_size. */
4bb33435
RW
1631 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1632 if (size > max_size)
1633 size = max_size;
1634 /*
266f1a25
RW
1635 * If the desired number of image pages is at least as large as the
1636 * current number of saveable pages in memory, allocate page frames for
1637 * the image and we're done.
4bb33435 1638 */
64a473cb
RW
1639 if (size >= saveable) {
1640 pages = preallocate_image_highmem(save_highmem);
6715045d 1641 pages += preallocate_image_memory(saveable - pages, avail_normal);
4bb33435 1642 goto out;
64a473cb 1643 }
4bb33435 1644
ef4aede3
RW
1645 /* Estimate the minimum size of the image. */
1646 pages = minimum_image_size(saveable);
6715045d
RW
1647 /*
1648 * To avoid excessive pressure on the normal zone, leave room in it to
1649 * accommodate an image of the minimum size (unless it's already too
1650 * small, in which case don't preallocate pages from it at all).
1651 */
1652 if (avail_normal > pages)
1653 avail_normal -= pages;
1654 else
1655 avail_normal = 0;
ef4aede3
RW
1656 if (size < pages)
1657 size = min_t(unsigned long, pages, max_size);
1658
4bb33435
RW
1659 /*
1660 * Let the memory management subsystem know that we're going to need a
1661 * large number of page frames to allocate and make it free some memory.
1662 * NOTE: If this is not done, performance will be hurt badly in some
1663 * test cases.
1664 */
1665 shrink_all_memory(saveable - size);
1666
1667 /*
1668 * The number of saveable pages in memory was too high, so apply some
1669 * pressure to decrease it. First, make room for the largest possible
1670 * image and fail if that doesn't work. Next, try to decrease the size
ef4aede3
RW
1671 * of the image as much as indicated by 'size' using allocations from
1672 * highmem and non-highmem zones separately.
4bb33435
RW
1673 */
1674 pages_highmem = preallocate_image_highmem(highmem / 2);
fd432b9f
AL
1675 alloc = count - max_size;
1676 if (alloc > pages_highmem)
1677 alloc -= pages_highmem;
1678 else
1679 alloc = 0;
6715045d
RW
1680 pages = preallocate_image_memory(alloc, avail_normal);
1681 if (pages < alloc) {
1682 /* We have exhausted non-highmem pages, try highmem. */
1683 alloc -= pages;
1684 pages += pages_highmem;
1685 pages_highmem = preallocate_image_highmem(alloc);
1686 if (pages_highmem < alloc)
1687 goto err_out;
1688 pages += pages_highmem;
1689 /*
1690 * size is the desired number of saveable pages to leave in
1691 * memory, so try to preallocate (all memory - size) pages.
1692 */
1693 alloc = (count - pages) - size;
1694 pages += preallocate_image_highmem(alloc);
1695 } else {
1696 /*
1697 * There are approximately max_size saveable pages at this point
1698 * and we want to reduce this number down to size.
1699 */
1700 alloc = max_size - size;
1701 size = preallocate_highmem_fraction(alloc, highmem, count);
1702 pages_highmem += size;
1703 alloc -= size;
1704 size = preallocate_image_memory(alloc, avail_normal);
1705 pages_highmem += preallocate_image_highmem(alloc - size);
1706 pages += pages_highmem + size;
1707 }
4bb33435 1708
64a473cb
RW
1709 /*
1710 * We only need as many page frames for the image as there are saveable
1711 * pages in memory, but we have allocated more. Release the excessive
1712 * ones now.
1713 */
a64fc82c 1714 pages -= free_unnecessary_pages();
4bb33435
RW
1715
1716 out:
db597605 1717 stop = ktime_get();
64a473cb 1718 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
db597605 1719 swsusp_show_speed(start, stop, pages, "Allocated");
fe419535
RW
1720
1721 return 0;
64a473cb
RW
1722
1723 err_out:
1724 printk(KERN_CONT "\n");
1725 swsusp_free();
1726 return -ENOMEM;
fe419535
RW
1727}
1728
8357376d
RW
1729#ifdef CONFIG_HIGHMEM
1730/**
1731 * count_pages_for_highmem - compute the number of non-highmem pages
1732 * that will be necessary for creating copies of highmem pages.
1733 */
1734
1735static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1736{
64a473cb 1737 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
8357376d
RW
1738
1739 if (free_highmem >= nr_highmem)
1740 nr_highmem = 0;
1741 else
1742 nr_highmem -= free_highmem;
1743
1744 return nr_highmem;
1745}
1746#else
1747static unsigned int
1748count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1749#endif /* CONFIG_HIGHMEM */
25761b6e
RW
1750
1751/**
8357376d
RW
1752 * enough_free_mem - Make sure we have enough free memory for the
1753 * snapshot image.
25761b6e
RW
1754 */
1755
8357376d 1756static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
25761b6e 1757{
e5e2fa78 1758 struct zone *zone;
64a473cb 1759 unsigned int free = alloc_normal;
e5e2fa78 1760
98e73dc5 1761 for_each_populated_zone(zone)
8357376d 1762 if (!is_highmem(zone))
d23ad423 1763 free += zone_page_state(zone, NR_FREE_PAGES);
940864dd 1764
8357376d 1765 nr_pages += count_pages_for_highmem(nr_highmem);
64a473cb
RW
1766 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1767 nr_pages, PAGES_FOR_IO, free);
940864dd 1768
64a473cb 1769 return free > nr_pages + PAGES_FOR_IO;
25761b6e
RW
1770}
1771
8357376d
RW
1772#ifdef CONFIG_HIGHMEM
1773/**
1774 * get_highmem_buffer - if there are some highmem pages in the suspend
1775 * image, we may need the buffer to copy them and/or load their data.
1776 */
1777
1778static inline int get_highmem_buffer(int safe_needed)
1779{
1780 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1781 return buffer ? 0 : -ENOMEM;
1782}
1783
1784/**
1785 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1786 * Try to allocate as many pages as needed, but if the number of free
1787 * highmem pages is lesser than that, allocate them all.
1788 */
1789
1790static inline unsigned int
64a473cb 1791alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
8357376d
RW
1792{
1793 unsigned int to_alloc = count_free_highmem_pages();
1794
1795 if (to_alloc > nr_highmem)
1796 to_alloc = nr_highmem;
1797
1798 nr_highmem -= to_alloc;
1799 while (to_alloc-- > 0) {
1800 struct page *page;
1801
d0164adc 1802 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
8357376d
RW
1803 memory_bm_set_bit(bm, page_to_pfn(page));
1804 }
1805 return nr_highmem;
1806}
1807#else
1808static inline int get_highmem_buffer(int safe_needed) { return 0; }
1809
1810static inline unsigned int
64a473cb 1811alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
8357376d
RW
1812#endif /* CONFIG_HIGHMEM */
1813
1814/**
1815 * swsusp_alloc - allocate memory for the suspend image
1816 *
1817 * We first try to allocate as many highmem pages as there are
1818 * saveable highmem pages in the system. If that fails, we allocate
1819 * non-highmem pages for the copies of the remaining highmem ones.
1820 *
1821 * In this approach it is likely that the copies of highmem pages will
1822 * also be located in the high memory, because of the way in which
1823 * copy_data_pages() works.
1824 */
1825
b788db79
RW
1826static int
1827swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
8357376d 1828 unsigned int nr_pages, unsigned int nr_highmem)
054bd4c1 1829{
8357376d 1830 if (nr_highmem > 0) {
2e725a06 1831 if (get_highmem_buffer(PG_ANY))
64a473cb
RW
1832 goto err_out;
1833 if (nr_highmem > alloc_highmem) {
1834 nr_highmem -= alloc_highmem;
1835 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1836 }
8357376d 1837 }
64a473cb
RW
1838 if (nr_pages > alloc_normal) {
1839 nr_pages -= alloc_normal;
1840 while (nr_pages-- > 0) {
1841 struct page *page;
1842
1843 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1844 if (!page)
1845 goto err_out;
1846 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1847 }
25761b6e 1848 }
64a473cb 1849
b788db79 1850 return 0;
25761b6e 1851
64a473cb 1852 err_out:
b788db79 1853 swsusp_free();
2e725a06 1854 return -ENOMEM;
25761b6e
RW
1855}
1856
722a9f92 1857asmlinkage __visible int swsusp_save(void)
25761b6e 1858{
8357376d 1859 unsigned int nr_pages, nr_highmem;
25761b6e 1860
07c3bb57 1861 printk(KERN_INFO "PM: Creating hibernation image:\n");
25761b6e 1862
9f8f2172 1863 drain_local_pages(NULL);
a0f49651 1864 nr_pages = count_data_pages();
8357376d 1865 nr_highmem = count_highmem_pages();
23976728 1866 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
25761b6e 1867
8357376d 1868 if (!enough_free_mem(nr_pages, nr_highmem)) {
23976728 1869 printk(KERN_ERR "PM: Not enough free memory\n");
25761b6e
RW
1870 return -ENOMEM;
1871 }
1872
8357376d 1873 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
23976728 1874 printk(KERN_ERR "PM: Memory allocation failed\n");
a0f49651 1875 return -ENOMEM;
8357376d 1876 }
25761b6e
RW
1877
1878 /* During allocating of suspend pagedir, new cold pages may appear.
1879 * Kill them.
1880 */
9f8f2172 1881 drain_local_pages(NULL);
b788db79 1882 copy_data_pages(&copy_bm, &orig_bm);
25761b6e
RW
1883
1884 /*
1885 * End of critical section. From now on, we can write to memory,
1886 * but we should not touch disk. This specially means we must _not_
1887 * touch swap space! Except we must write out our image of course.
1888 */
1889
8357376d 1890 nr_pages += nr_highmem;
a0f49651 1891 nr_copy_pages = nr_pages;
8357376d 1892 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
a0f49651 1893
23976728
RW
1894 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1895 nr_pages);
8357376d 1896
25761b6e
RW
1897 return 0;
1898}
f577eb30 1899
d307c4a8
RW
1900#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1901static int init_header_complete(struct swsusp_info *info)
f577eb30 1902{
d307c4a8 1903 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
f577eb30 1904 info->version_code = LINUX_VERSION_CODE;
d307c4a8
RW
1905 return 0;
1906}
1907
1908static char *check_image_kernel(struct swsusp_info *info)
1909{
1910 if (info->version_code != LINUX_VERSION_CODE)
1911 return "kernel version";
1912 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1913 return "system type";
1914 if (strcmp(info->uts.release,init_utsname()->release))
1915 return "kernel release";
1916 if (strcmp(info->uts.version,init_utsname()->version))
1917 return "version";
1918 if (strcmp(info->uts.machine,init_utsname()->machine))
1919 return "machine";
1920 return NULL;
1921}
1922#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1923
af508b34
RW
1924unsigned long snapshot_get_image_size(void)
1925{
1926 return nr_copy_pages + nr_meta_pages + 1;
1927}
1928
d307c4a8
RW
1929static int init_header(struct swsusp_info *info)
1930{
1931 memset(info, 0, sizeof(struct swsusp_info));
0ed5fd13 1932 info->num_physpages = get_num_physpages();
f577eb30 1933 info->image_pages = nr_copy_pages;
af508b34 1934 info->pages = snapshot_get_image_size();
6e1819d6
RW
1935 info->size = info->pages;
1936 info->size <<= PAGE_SHIFT;
d307c4a8 1937 return init_header_complete(info);
f577eb30
RW
1938}
1939
1940/**
940864dd
RW
1941 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1942 * are stored in the array @buf[] (1 page at a time)
f577eb30
RW
1943 */
1944
b788db79 1945static inline void
940864dd 1946pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30
RW
1947{
1948 int j;
1949
b788db79 1950 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
940864dd
RW
1951 buf[j] = memory_bm_next_pfn(bm);
1952 if (unlikely(buf[j] == BM_END_OF_MAP))
b788db79 1953 break;
85055dd8
MS
1954 /* Save page key for data page (s390 only). */
1955 page_key_read(buf + j);
f577eb30 1956 }
f577eb30
RW
1957}
1958
1959/**
1960 * snapshot_read_next - used for reading the system memory snapshot.
1961 *
1962 * On the first call to it @handle should point to a zeroed
1963 * snapshot_handle structure. The structure gets updated and a pointer
1964 * to it should be passed to this function every next time.
1965 *
f577eb30
RW
1966 * On success the function returns a positive number. Then, the caller
1967 * is allowed to read up to the returned number of bytes from the memory
d3c1b24c 1968 * location computed by the data_of() macro.
f577eb30
RW
1969 *
1970 * The function returns 0 to indicate the end of data stream condition,
1971 * and a negative number is returned on error. In such cases the
1972 * structure pointed to by @handle is not updated and should not be used
1973 * any more.
1974 */
1975
d3c1b24c 1976int snapshot_read_next(struct snapshot_handle *handle)
f577eb30 1977{
fb13a28b 1978 if (handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30 1979 return 0;
b788db79 1980
f577eb30
RW
1981 if (!buffer) {
1982 /* This makes the buffer be freed by swsusp_free() */
8357376d 1983 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30
RW
1984 if (!buffer)
1985 return -ENOMEM;
1986 }
d3c1b24c 1987 if (!handle->cur) {
d307c4a8
RW
1988 int error;
1989
1990 error = init_header((struct swsusp_info *)buffer);
1991 if (error)
1992 return error;
f577eb30 1993 handle->buffer = buffer;
b788db79
RW
1994 memory_bm_position_reset(&orig_bm);
1995 memory_bm_position_reset(&copy_bm);
d3c1b24c 1996 } else if (handle->cur <= nr_meta_pages) {
3ecb01df 1997 clear_page(buffer);
d3c1b24c
JS
1998 pack_pfns(buffer, &orig_bm);
1999 } else {
2000 struct page *page;
b788db79 2001
d3c1b24c
JS
2002 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2003 if (PageHighMem(page)) {
2004 /* Highmem pages are copied to the buffer,
2005 * because we can't return with a kmapped
2006 * highmem page (we may not be called again).
2007 */
2008 void *kaddr;
8357376d 2009
0de9a1e2 2010 kaddr = kmap_atomic(page);
3ecb01df 2011 copy_page(buffer, kaddr);
0de9a1e2 2012 kunmap_atomic(kaddr);
d3c1b24c
JS
2013 handle->buffer = buffer;
2014 } else {
2015 handle->buffer = page_address(page);
f577eb30 2016 }
f577eb30 2017 }
d3c1b24c
JS
2018 handle->cur++;
2019 return PAGE_SIZE;
f577eb30
RW
2020}
2021
6dbecfd3
RW
2022static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2023 struct memory_bitmap *src)
2024{
2025 unsigned long pfn;
2026
2027 memory_bm_position_reset(src);
2028 pfn = memory_bm_next_pfn(src);
2029 while (pfn != BM_END_OF_MAP) {
2030 memory_bm_set_bit(dst, pfn);
2031 pfn = memory_bm_next_pfn(src);
2032 }
2033}
2034
f577eb30
RW
2035/**
2036 * mark_unsafe_pages - mark the pages that cannot be used for storing
2037 * the image during resume, because they conflict with the pages that
2038 * had been used before suspend
2039 */
2040
6dbecfd3 2041static void mark_unsafe_pages(struct memory_bitmap *bm)
f577eb30 2042{
6dbecfd3 2043 unsigned long pfn;
f577eb30 2044
6dbecfd3
RW
2045 /* Clear the "free"/"unsafe" bit for all PFNs */
2046 memory_bm_position_reset(free_pages_map);
2047 pfn = memory_bm_next_pfn(free_pages_map);
2048 while (pfn != BM_END_OF_MAP) {
2049 memory_bm_clear_current(free_pages_map);
2050 pfn = memory_bm_next_pfn(free_pages_map);
f577eb30
RW
2051 }
2052
6dbecfd3
RW
2053 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2054 duplicate_memory_bitmap(free_pages_map, bm);
f577eb30 2055
940864dd 2056 allocated_unsafe_pages = 0;
f577eb30
RW
2057}
2058
d307c4a8 2059static int check_header(struct swsusp_info *info)
f577eb30 2060{
d307c4a8 2061 char *reason;
f577eb30 2062
d307c4a8 2063 reason = check_image_kernel(info);
0ed5fd13 2064 if (!reason && info->num_physpages != get_num_physpages())
f577eb30 2065 reason = "memory size";
f577eb30 2066 if (reason) {
23976728 2067 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
f577eb30
RW
2068 return -EPERM;
2069 }
2070 return 0;
2071}
2072
2073/**
2074 * load header - check the image header and copy data from it
2075 */
2076
940864dd
RW
2077static int
2078load_header(struct swsusp_info *info)
f577eb30
RW
2079{
2080 int error;
f577eb30 2081
940864dd 2082 restore_pblist = NULL;
f577eb30
RW
2083 error = check_header(info);
2084 if (!error) {
f577eb30
RW
2085 nr_copy_pages = info->image_pages;
2086 nr_meta_pages = info->pages - info->image_pages - 1;
2087 }
2088 return error;
2089}
2090
2091/**
940864dd
RW
2092 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2093 * the corresponding bit in the memory bitmap @bm
f577eb30 2094 */
69643279 2095static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30
RW
2096{
2097 int j;
2098
940864dd
RW
2099 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2100 if (unlikely(buf[j] == BM_END_OF_MAP))
2101 break;
2102
85055dd8
MS
2103 /* Extract and buffer page key for data page (s390 only). */
2104 page_key_memorize(buf + j);
2105
6dbecfd3 2106 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
69643279
RW
2107 memory_bm_set_bit(bm, buf[j]);
2108 else
2109 return -EFAULT;
f577eb30 2110 }
69643279
RW
2111
2112 return 0;
f577eb30
RW
2113}
2114
8357376d
RW
2115#ifdef CONFIG_HIGHMEM
2116/* struct highmem_pbe is used for creating the list of highmem pages that
2117 * should be restored atomically during the resume from disk, because the page
2118 * frames they have occupied before the suspend are in use.
2119 */
2120struct highmem_pbe {
2121 struct page *copy_page; /* data is here now */
2122 struct page *orig_page; /* data was here before the suspend */
2123 struct highmem_pbe *next;
2124};
2125
2126/* List of highmem PBEs needed for restoring the highmem pages that were
2127 * allocated before the suspend and included in the suspend image, but have
2128 * also been allocated by the "resume" kernel, so their contents cannot be
2129 * written directly to their "original" page frames.
2130 */
2131static struct highmem_pbe *highmem_pblist;
2132
2133/**
2134 * count_highmem_image_pages - compute the number of highmem pages in the
2135 * suspend image. The bits in the memory bitmap @bm that correspond to the
2136 * image pages are assumed to be set.
2137 */
2138
2139static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2140{
2141 unsigned long pfn;
2142 unsigned int cnt = 0;
2143
2144 memory_bm_position_reset(bm);
2145 pfn = memory_bm_next_pfn(bm);
2146 while (pfn != BM_END_OF_MAP) {
2147 if (PageHighMem(pfn_to_page(pfn)))
2148 cnt++;
2149
2150 pfn = memory_bm_next_pfn(bm);
2151 }
2152 return cnt;
2153}
2154
2155/**
2156 * prepare_highmem_image - try to allocate as many highmem pages as
2157 * there are highmem image pages (@nr_highmem_p points to the variable
2158 * containing the number of highmem image pages). The pages that are
2159 * "safe" (ie. will not be overwritten when the suspend image is
2160 * restored) have the corresponding bits set in @bm (it must be
2161 * unitialized).
2162 *
2163 * NOTE: This function should not be called if there are no highmem
2164 * image pages.
2165 */
2166
2167static unsigned int safe_highmem_pages;
2168
2169static struct memory_bitmap *safe_highmem_bm;
2170
2171static int
2172prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2173{
2174 unsigned int to_alloc;
2175
2176 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2177 return -ENOMEM;
2178
2179 if (get_highmem_buffer(PG_SAFE))
2180 return -ENOMEM;
2181
2182 to_alloc = count_free_highmem_pages();
2183 if (to_alloc > *nr_highmem_p)
2184 to_alloc = *nr_highmem_p;
2185 else
2186 *nr_highmem_p = to_alloc;
2187
2188 safe_highmem_pages = 0;
2189 while (to_alloc-- > 0) {
2190 struct page *page;
2191
2192 page = alloc_page(__GFP_HIGHMEM);
7be98234 2193 if (!swsusp_page_is_free(page)) {
8357376d
RW
2194 /* The page is "safe", set its bit the bitmap */
2195 memory_bm_set_bit(bm, page_to_pfn(page));
2196 safe_highmem_pages++;
2197 }
2198 /* Mark the page as allocated */
7be98234
RW
2199 swsusp_set_page_forbidden(page);
2200 swsusp_set_page_free(page);
8357376d
RW
2201 }
2202 memory_bm_position_reset(bm);
2203 safe_highmem_bm = bm;
2204 return 0;
2205}
2206
2207/**
2208 * get_highmem_page_buffer - for given highmem image page find the buffer
2209 * that suspend_write_next() should set for its caller to write to.
2210 *
2211 * If the page is to be saved to its "original" page frame or a copy of
2212 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2213 * the copy of the page is to be made in normal memory, so the address of
2214 * the copy is returned.
2215 *
2216 * If @buffer is returned, the caller of suspend_write_next() will write
2217 * the page's contents to @buffer, so they will have to be copied to the
2218 * right location on the next call to suspend_write_next() and it is done
2219 * with the help of copy_last_highmem_page(). For this purpose, if
2220 * @buffer is returned, @last_highmem page is set to the page to which
2221 * the data will have to be copied from @buffer.
2222 */
2223
2224static struct page *last_highmem_page;
2225
2226static void *
2227get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2228{
2229 struct highmem_pbe *pbe;
2230 void *kaddr;
2231
7be98234 2232 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
8357376d
RW
2233 /* We have allocated the "original" page frame and we can
2234 * use it directly to store the loaded page.
2235 */
2236 last_highmem_page = page;
2237 return buffer;
2238 }
2239 /* The "original" page frame has not been allocated and we have to
2240 * use a "safe" page frame to store the loaded page.
2241 */
2242 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2243 if (!pbe) {
2244 swsusp_free();
69643279 2245 return ERR_PTR(-ENOMEM);
8357376d
RW
2246 }
2247 pbe->orig_page = page;
2248 if (safe_highmem_pages > 0) {
2249 struct page *tmp;
2250
2251 /* Copy of the page will be stored in high memory */
2252 kaddr = buffer;
2253 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2254 safe_highmem_pages--;
2255 last_highmem_page = tmp;
2256 pbe->copy_page = tmp;
2257 } else {
2258 /* Copy of the page will be stored in normal memory */
2259 kaddr = safe_pages_list;
2260 safe_pages_list = safe_pages_list->next;
2261 pbe->copy_page = virt_to_page(kaddr);
2262 }
2263 pbe->next = highmem_pblist;
2264 highmem_pblist = pbe;
2265 return kaddr;
2266}
2267
2268/**
2269 * copy_last_highmem_page - copy the contents of a highmem image from
2270 * @buffer, where the caller of snapshot_write_next() has place them,
2271 * to the right location represented by @last_highmem_page .
2272 */
2273
2274static void copy_last_highmem_page(void)
2275{
2276 if (last_highmem_page) {
2277 void *dst;
2278
0de9a1e2 2279 dst = kmap_atomic(last_highmem_page);
3ecb01df 2280 copy_page(dst, buffer);
0de9a1e2 2281 kunmap_atomic(dst);
8357376d
RW
2282 last_highmem_page = NULL;
2283 }
2284}
2285
2286static inline int last_highmem_page_copied(void)
2287{
2288 return !last_highmem_page;
2289}
2290
2291static inline void free_highmem_data(void)
2292{
2293 if (safe_highmem_bm)
2294 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2295
2296 if (buffer)
2297 free_image_page(buffer, PG_UNSAFE_CLEAR);
2298}
2299#else
8357376d
RW
2300static unsigned int
2301count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2302
2303static inline int
2304prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2305{
2306 return 0;
2307}
2308
2309static inline void *
2310get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2311{
69643279 2312 return ERR_PTR(-EINVAL);
8357376d
RW
2313}
2314
2315static inline void copy_last_highmem_page(void) {}
2316static inline int last_highmem_page_copied(void) { return 1; }
2317static inline void free_highmem_data(void) {}
2318#endif /* CONFIG_HIGHMEM */
2319
f577eb30 2320/**
940864dd
RW
2321 * prepare_image - use the memory bitmap @bm to mark the pages that will
2322 * be overwritten in the process of restoring the system memory state
2323 * from the suspend image ("unsafe" pages) and allocate memory for the
2324 * image.
968808b8 2325 *
940864dd
RW
2326 * The idea is to allocate a new memory bitmap first and then allocate
2327 * as many pages as needed for the image data, but not to assign these
2328 * pages to specific tasks initially. Instead, we just mark them as
8357376d
RW
2329 * allocated and create a lists of "safe" pages that will be used
2330 * later. On systems with high memory a list of "safe" highmem pages is
2331 * also created.
f577eb30
RW
2332 */
2333
940864dd
RW
2334#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2335
940864dd
RW
2336static int
2337prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
f577eb30 2338{
8357376d 2339 unsigned int nr_pages, nr_highmem;
9c744481 2340 struct linked_page *lp;
940864dd 2341 int error;
f577eb30 2342
8357376d
RW
2343 /* If there is no highmem, the buffer will not be necessary */
2344 free_image_page(buffer, PG_UNSAFE_CLEAR);
2345 buffer = NULL;
2346
2347 nr_highmem = count_highmem_image_pages(bm);
6dbecfd3 2348 mark_unsafe_pages(bm);
940864dd
RW
2349
2350 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2351 if (error)
2352 goto Free;
2353
2354 duplicate_memory_bitmap(new_bm, bm);
2355 memory_bm_free(bm, PG_UNSAFE_KEEP);
8357376d
RW
2356 if (nr_highmem > 0) {
2357 error = prepare_highmem_image(bm, &nr_highmem);
2358 if (error)
2359 goto Free;
2360 }
940864dd
RW
2361 /* Reserve some safe pages for potential later use.
2362 *
2363 * NOTE: This way we make sure there will be enough safe pages for the
2364 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2365 * nr_copy_pages cannot be greater than 50% of the memory anyway.
9c744481
RW
2366 *
2367 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
940864dd 2368 */
8357376d 2369 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dd
RW
2370 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2371 while (nr_pages > 0) {
8357376d 2372 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
940864dd 2373 if (!lp) {
f577eb30 2374 error = -ENOMEM;
940864dd
RW
2375 goto Free;
2376 }
9c744481
RW
2377 lp->next = safe_pages_list;
2378 safe_pages_list = lp;
940864dd 2379 nr_pages--;
f577eb30 2380 }
940864dd 2381 /* Preallocate memory for the image */
8357376d 2382 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dd
RW
2383 while (nr_pages > 0) {
2384 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2385 if (!lp) {
2386 error = -ENOMEM;
2387 goto Free;
2388 }
7be98234 2389 if (!swsusp_page_is_free(virt_to_page(lp))) {
940864dd
RW
2390 /* The page is "safe", add it to the list */
2391 lp->next = safe_pages_list;
2392 safe_pages_list = lp;
968808b8 2393 }
940864dd 2394 /* Mark the page as allocated */
7be98234
RW
2395 swsusp_set_page_forbidden(virt_to_page(lp));
2396 swsusp_set_page_free(virt_to_page(lp));
940864dd 2397 nr_pages--;
968808b8 2398 }
940864dd
RW
2399 return 0;
2400
59a49335 2401 Free:
940864dd 2402 swsusp_free();
f577eb30
RW
2403 return error;
2404}
2405
940864dd
RW
2406/**
2407 * get_buffer - compute the address that snapshot_write_next() should
2408 * set for its caller to write to.
2409 */
2410
2411static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
968808b8 2412{
940864dd 2413 struct pbe *pbe;
69643279
RW
2414 struct page *page;
2415 unsigned long pfn = memory_bm_next_pfn(bm);
968808b8 2416
69643279
RW
2417 if (pfn == BM_END_OF_MAP)
2418 return ERR_PTR(-EFAULT);
2419
2420 page = pfn_to_page(pfn);
8357376d
RW
2421 if (PageHighMem(page))
2422 return get_highmem_page_buffer(page, ca);
2423
7be98234 2424 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
940864dd
RW
2425 /* We have allocated the "original" page frame and we can
2426 * use it directly to store the loaded page.
968808b8 2427 */
940864dd
RW
2428 return page_address(page);
2429
2430 /* The "original" page frame has not been allocated and we have to
2431 * use a "safe" page frame to store the loaded page.
968808b8 2432 */
940864dd
RW
2433 pbe = chain_alloc(ca, sizeof(struct pbe));
2434 if (!pbe) {
2435 swsusp_free();
69643279 2436 return ERR_PTR(-ENOMEM);
940864dd 2437 }
8357376d
RW
2438 pbe->orig_address = page_address(page);
2439 pbe->address = safe_pages_list;
940864dd
RW
2440 safe_pages_list = safe_pages_list->next;
2441 pbe->next = restore_pblist;
2442 restore_pblist = pbe;
8357376d 2443 return pbe->address;
968808b8
RW
2444}
2445
f577eb30
RW
2446/**
2447 * snapshot_write_next - used for writing the system memory snapshot.
2448 *
2449 * On the first call to it @handle should point to a zeroed
2450 * snapshot_handle structure. The structure gets updated and a pointer
2451 * to it should be passed to this function every next time.
2452 *
f577eb30
RW
2453 * On success the function returns a positive number. Then, the caller
2454 * is allowed to write up to the returned number of bytes to the memory
d3c1b24c 2455 * location computed by the data_of() macro.
f577eb30
RW
2456 *
2457 * The function returns 0 to indicate the "end of file" condition,
2458 * and a negative number is returned on error. In such cases the
2459 * structure pointed to by @handle is not updated and should not be used
2460 * any more.
2461 */
2462
d3c1b24c 2463int snapshot_write_next(struct snapshot_handle *handle)
f577eb30 2464{
940864dd 2465 static struct chain_allocator ca;
f577eb30
RW
2466 int error = 0;
2467
940864dd 2468 /* Check if we have already loaded the entire image */
d3c1b24c 2469 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30 2470 return 0;
940864dd 2471
d3c1b24c
JS
2472 handle->sync_read = 1;
2473
2474 if (!handle->cur) {
8357376d
RW
2475 if (!buffer)
2476 /* This makes the buffer be freed by swsusp_free() */
2477 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2478
f577eb30
RW
2479 if (!buffer)
2480 return -ENOMEM;
8357376d 2481
f577eb30 2482 handle->buffer = buffer;
d3c1b24c
JS
2483 } else if (handle->cur == 1) {
2484 error = load_header(buffer);
2485 if (error)
2486 return error;
940864dd 2487
9c744481
RW
2488 safe_pages_list = NULL;
2489
d3c1b24c
JS
2490 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2491 if (error)
2492 return error;
2493
85055dd8
MS
2494 /* Allocate buffer for page keys. */
2495 error = page_key_alloc(nr_copy_pages);
2496 if (error)
2497 return error;
2498
d3c1b24c
JS
2499 } else if (handle->cur <= nr_meta_pages + 1) {
2500 error = unpack_orig_pfns(buffer, &copy_bm);
2501 if (error)
2502 return error;
940864dd 2503
d3c1b24c
JS
2504 if (handle->cur == nr_meta_pages + 1) {
2505 error = prepare_image(&orig_bm, &copy_bm);
69643279
RW
2506 if (error)
2507 return error;
2508
d3c1b24c
JS
2509 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2510 memory_bm_position_reset(&orig_bm);
2511 restore_pblist = NULL;
940864dd 2512 handle->buffer = get_buffer(&orig_bm, &ca);
d3c1b24c 2513 handle->sync_read = 0;
69643279
RW
2514 if (IS_ERR(handle->buffer))
2515 return PTR_ERR(handle->buffer);
f577eb30 2516 }
f577eb30 2517 } else {
d3c1b24c 2518 copy_last_highmem_page();
85055dd8
MS
2519 /* Restore page key for data page (s390 only). */
2520 page_key_write(handle->buffer);
d3c1b24c
JS
2521 handle->buffer = get_buffer(&orig_bm, &ca);
2522 if (IS_ERR(handle->buffer))
2523 return PTR_ERR(handle->buffer);
2524 if (handle->buffer != buffer)
2525 handle->sync_read = 0;
f577eb30 2526 }
d3c1b24c
JS
2527 handle->cur++;
2528 return PAGE_SIZE;
f577eb30
RW
2529}
2530
8357376d
RW
2531/**
2532 * snapshot_write_finalize - must be called after the last call to
2533 * snapshot_write_next() in case the last page in the image happens
2534 * to be a highmem page and its contents should be stored in the
2535 * highmem. Additionally, it releases the memory that will not be
2536 * used any more.
2537 */
2538
2539void snapshot_write_finalize(struct snapshot_handle *handle)
2540{
2541 copy_last_highmem_page();
85055dd8
MS
2542 /* Restore page key for data page (s390 only). */
2543 page_key_write(handle->buffer);
2544 page_key_free();
8357376d 2545 /* Free only if we have loaded the image entirely */
d3c1b24c 2546 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
8357376d
RW
2547 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2548 free_highmem_data();
2549 }
2550}
2551
f577eb30
RW
2552int snapshot_image_loaded(struct snapshot_handle *handle)
2553{
8357376d 2554 return !(!nr_copy_pages || !last_highmem_page_copied() ||
940864dd
RW
2555 handle->cur <= nr_meta_pages + nr_copy_pages);
2556}
2557
8357376d
RW
2558#ifdef CONFIG_HIGHMEM
2559/* Assumes that @buf is ready and points to a "safe" page */
2560static inline void
2561swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
940864dd 2562{
8357376d
RW
2563 void *kaddr1, *kaddr2;
2564
0de9a1e2
CW
2565 kaddr1 = kmap_atomic(p1);
2566 kaddr2 = kmap_atomic(p2);
3ecb01df
JB
2567 copy_page(buf, kaddr1);
2568 copy_page(kaddr1, kaddr2);
2569 copy_page(kaddr2, buf);
0de9a1e2
CW
2570 kunmap_atomic(kaddr2);
2571 kunmap_atomic(kaddr1);
8357376d
RW
2572}
2573
2574/**
2575 * restore_highmem - for each highmem page that was allocated before
2576 * the suspend and included in the suspend image, and also has been
2577 * allocated by the "resume" kernel swap its current (ie. "before
2578 * resume") contents with the previous (ie. "before suspend") one.
2579 *
2580 * If the resume eventually fails, we can call this function once
2581 * again and restore the "before resume" highmem state.
2582 */
2583
2584int restore_highmem(void)
2585{
2586 struct highmem_pbe *pbe = highmem_pblist;
2587 void *buf;
2588
2589 if (!pbe)
2590 return 0;
2591
2592 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2593 if (!buf)
2594 return -ENOMEM;
2595
2596 while (pbe) {
2597 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2598 pbe = pbe->next;
2599 }
2600 free_image_page(buf, PG_UNSAFE_CLEAR);
2601 return 0;
f577eb30 2602}
8357376d 2603#endif /* CONFIG_HIGHMEM */