]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/power/snapshot.c
Merge branch 'work.const-qstr' into work.misc
[mirror_ubuntu-artful-kernel.git] / kernel / power / snapshot.c
1 /*
2 * linux/kernel/power/snapshot.c
3 *
4 * This file provides system snapshot/restore functionality for swsusp.
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8 *
9 * This file is released under the GPLv2.
10 *
11 */
12
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31 #include <linux/ktime.h>
32
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgtable.h>
36 #include <asm/tlbflush.h>
37 #include <asm/io.h>
38
39 #include "power.h"
40
41 #ifdef CONFIG_DEBUG_RODATA
42 static bool hibernate_restore_protection;
43 static bool hibernate_restore_protection_active;
44
45 void enable_restore_image_protection(void)
46 {
47 hibernate_restore_protection = true;
48 }
49
50 static inline void hibernate_restore_protection_begin(void)
51 {
52 hibernate_restore_protection_active = hibernate_restore_protection;
53 }
54
55 static inline void hibernate_restore_protection_end(void)
56 {
57 hibernate_restore_protection_active = false;
58 }
59
60 static inline void hibernate_restore_protect_page(void *page_address)
61 {
62 if (hibernate_restore_protection_active)
63 set_memory_ro((unsigned long)page_address, 1);
64 }
65
66 static inline void hibernate_restore_unprotect_page(void *page_address)
67 {
68 if (hibernate_restore_protection_active)
69 set_memory_rw((unsigned long)page_address, 1);
70 }
71 #else
72 static inline void hibernate_restore_protection_begin(void) {}
73 static inline void hibernate_restore_protection_end(void) {}
74 static inline void hibernate_restore_protect_page(void *page_address) {}
75 static inline void hibernate_restore_unprotect_page(void *page_address) {}
76 #endif /* CONFIG_DEBUG_RODATA */
77
78 static int swsusp_page_is_free(struct page *);
79 static void swsusp_set_page_forbidden(struct page *);
80 static void swsusp_unset_page_forbidden(struct page *);
81
82 /*
83 * Number of bytes to reserve for memory allocations made by device drivers
84 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
85 * cause image creation to fail (tunable via /sys/power/reserved_size).
86 */
87 unsigned long reserved_size;
88
89 void __init hibernate_reserved_size_init(void)
90 {
91 reserved_size = SPARE_PAGES * PAGE_SIZE;
92 }
93
94 /*
95 * Preferred image size in bytes (tunable via /sys/power/image_size).
96 * When it is set to N, swsusp will do its best to ensure the image
97 * size will not exceed N bytes, but if that is impossible, it will
98 * try to create the smallest image possible.
99 */
100 unsigned long image_size;
101
102 void __init hibernate_image_size_init(void)
103 {
104 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
105 }
106
107 /*
108 * List of PBEs needed for restoring the pages that were allocated before
109 * the suspend and included in the suspend image, but have also been
110 * allocated by the "resume" kernel, so their contents cannot be written
111 * directly to their "original" page frames.
112 */
113 struct pbe *restore_pblist;
114
115 /* struct linked_page is used to build chains of pages */
116
117 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
118
119 struct linked_page {
120 struct linked_page *next;
121 char data[LINKED_PAGE_DATA_SIZE];
122 } __packed;
123
124 /*
125 * List of "safe" pages (ie. pages that were not used by the image kernel
126 * before hibernation) that may be used as temporary storage for image kernel
127 * memory contents.
128 */
129 static struct linked_page *safe_pages_list;
130
131 /* Pointer to an auxiliary buffer (1 page) */
132 static void *buffer;
133
134 #define PG_ANY 0
135 #define PG_SAFE 1
136 #define PG_UNSAFE_CLEAR 1
137 #define PG_UNSAFE_KEEP 0
138
139 static unsigned int allocated_unsafe_pages;
140
141 /**
142 * get_image_page - Allocate a page for a hibernation image.
143 * @gfp_mask: GFP mask for the allocation.
144 * @safe_needed: Get pages that were not used before hibernation (restore only)
145 *
146 * During image restoration, for storing the PBE list and the image data, we can
147 * only use memory pages that do not conflict with the pages used before
148 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
149 * using allocated_unsafe_pages.
150 *
151 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
152 * swsusp_free() can release it.
153 */
154 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
155 {
156 void *res;
157
158 res = (void *)get_zeroed_page(gfp_mask);
159 if (safe_needed)
160 while (res && swsusp_page_is_free(virt_to_page(res))) {
161 /* The page is unsafe, mark it for swsusp_free() */
162 swsusp_set_page_forbidden(virt_to_page(res));
163 allocated_unsafe_pages++;
164 res = (void *)get_zeroed_page(gfp_mask);
165 }
166 if (res) {
167 swsusp_set_page_forbidden(virt_to_page(res));
168 swsusp_set_page_free(virt_to_page(res));
169 }
170 return res;
171 }
172
173 static void *__get_safe_page(gfp_t gfp_mask)
174 {
175 if (safe_pages_list) {
176 void *ret = safe_pages_list;
177
178 safe_pages_list = safe_pages_list->next;
179 memset(ret, 0, PAGE_SIZE);
180 return ret;
181 }
182 return get_image_page(gfp_mask, PG_SAFE);
183 }
184
185 unsigned long get_safe_page(gfp_t gfp_mask)
186 {
187 return (unsigned long)__get_safe_page(gfp_mask);
188 }
189
190 static struct page *alloc_image_page(gfp_t gfp_mask)
191 {
192 struct page *page;
193
194 page = alloc_page(gfp_mask);
195 if (page) {
196 swsusp_set_page_forbidden(page);
197 swsusp_set_page_free(page);
198 }
199 return page;
200 }
201
202 static void recycle_safe_page(void *page_address)
203 {
204 struct linked_page *lp = page_address;
205
206 lp->next = safe_pages_list;
207 safe_pages_list = lp;
208 }
209
210 /**
211 * free_image_page - Free a page allocated for hibernation image.
212 * @addr: Address of the page to free.
213 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
214 *
215 * The page to free should have been allocated by get_image_page() (page flags
216 * set by it are affected).
217 */
218 static inline void free_image_page(void *addr, int clear_nosave_free)
219 {
220 struct page *page;
221
222 BUG_ON(!virt_addr_valid(addr));
223
224 page = virt_to_page(addr);
225
226 swsusp_unset_page_forbidden(page);
227 if (clear_nosave_free)
228 swsusp_unset_page_free(page);
229
230 __free_page(page);
231 }
232
233 static inline void free_list_of_pages(struct linked_page *list,
234 int clear_page_nosave)
235 {
236 while (list) {
237 struct linked_page *lp = list->next;
238
239 free_image_page(list, clear_page_nosave);
240 list = lp;
241 }
242 }
243
244 /*
245 * struct chain_allocator is used for allocating small objects out of
246 * a linked list of pages called 'the chain'.
247 *
248 * The chain grows each time when there is no room for a new object in
249 * the current page. The allocated objects cannot be freed individually.
250 * It is only possible to free them all at once, by freeing the entire
251 * chain.
252 *
253 * NOTE: The chain allocator may be inefficient if the allocated objects
254 * are not much smaller than PAGE_SIZE.
255 */
256 struct chain_allocator {
257 struct linked_page *chain; /* the chain */
258 unsigned int used_space; /* total size of objects allocated out
259 of the current page */
260 gfp_t gfp_mask; /* mask for allocating pages */
261 int safe_needed; /* if set, only "safe" pages are allocated */
262 };
263
264 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
265 int safe_needed)
266 {
267 ca->chain = NULL;
268 ca->used_space = LINKED_PAGE_DATA_SIZE;
269 ca->gfp_mask = gfp_mask;
270 ca->safe_needed = safe_needed;
271 }
272
273 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
274 {
275 void *ret;
276
277 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
278 struct linked_page *lp;
279
280 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
281 get_image_page(ca->gfp_mask, PG_ANY);
282 if (!lp)
283 return NULL;
284
285 lp->next = ca->chain;
286 ca->chain = lp;
287 ca->used_space = 0;
288 }
289 ret = ca->chain->data + ca->used_space;
290 ca->used_space += size;
291 return ret;
292 }
293
294 /**
295 * Data types related to memory bitmaps.
296 *
297 * Memory bitmap is a structure consiting of many linked lists of
298 * objects. The main list's elements are of type struct zone_bitmap
299 * and each of them corresonds to one zone. For each zone bitmap
300 * object there is a list of objects of type struct bm_block that
301 * represent each blocks of bitmap in which information is stored.
302 *
303 * struct memory_bitmap contains a pointer to the main list of zone
304 * bitmap objects, a struct bm_position used for browsing the bitmap,
305 * and a pointer to the list of pages used for allocating all of the
306 * zone bitmap objects and bitmap block objects.
307 *
308 * NOTE: It has to be possible to lay out the bitmap in memory
309 * using only allocations of order 0. Additionally, the bitmap is
310 * designed to work with arbitrary number of zones (this is over the
311 * top for now, but let's avoid making unnecessary assumptions ;-).
312 *
313 * struct zone_bitmap contains a pointer to a list of bitmap block
314 * objects and a pointer to the bitmap block object that has been
315 * most recently used for setting bits. Additionally, it contains the
316 * PFNs that correspond to the start and end of the represented zone.
317 *
318 * struct bm_block contains a pointer to the memory page in which
319 * information is stored (in the form of a block of bitmap)
320 * It also contains the pfns that correspond to the start and end of
321 * the represented memory area.
322 *
323 * The memory bitmap is organized as a radix tree to guarantee fast random
324 * access to the bits. There is one radix tree for each zone (as returned
325 * from create_mem_extents).
326 *
327 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
328 * two linked lists for the nodes of the tree, one for the inner nodes and
329 * one for the leave nodes. The linked leave nodes are used for fast linear
330 * access of the memory bitmap.
331 *
332 * The struct rtree_node represents one node of the radix tree.
333 */
334
335 #define BM_END_OF_MAP (~0UL)
336
337 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
338 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
339 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
340
341 /*
342 * struct rtree_node is a wrapper struct to link the nodes
343 * of the rtree together for easy linear iteration over
344 * bits and easy freeing
345 */
346 struct rtree_node {
347 struct list_head list;
348 unsigned long *data;
349 };
350
351 /*
352 * struct mem_zone_bm_rtree represents a bitmap used for one
353 * populated memory zone.
354 */
355 struct mem_zone_bm_rtree {
356 struct list_head list; /* Link Zones together */
357 struct list_head nodes; /* Radix Tree inner nodes */
358 struct list_head leaves; /* Radix Tree leaves */
359 unsigned long start_pfn; /* Zone start page frame */
360 unsigned long end_pfn; /* Zone end page frame + 1 */
361 struct rtree_node *rtree; /* Radix Tree Root */
362 int levels; /* Number of Radix Tree Levels */
363 unsigned int blocks; /* Number of Bitmap Blocks */
364 };
365
366 /* strcut bm_position is used for browsing memory bitmaps */
367
368 struct bm_position {
369 struct mem_zone_bm_rtree *zone;
370 struct rtree_node *node;
371 unsigned long node_pfn;
372 int node_bit;
373 };
374
375 struct memory_bitmap {
376 struct list_head zones;
377 struct linked_page *p_list; /* list of pages used to store zone
378 bitmap objects and bitmap block
379 objects */
380 struct bm_position cur; /* most recently used bit position */
381 };
382
383 /* Functions that operate on memory bitmaps */
384
385 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
386 #if BITS_PER_LONG == 32
387 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
388 #else
389 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
390 #endif
391 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
392
393 /**
394 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
395 *
396 * This function is used to allocate inner nodes as well as the
397 * leave nodes of the radix tree. It also adds the node to the
398 * corresponding linked list passed in by the *list parameter.
399 */
400 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
401 struct chain_allocator *ca,
402 struct list_head *list)
403 {
404 struct rtree_node *node;
405
406 node = chain_alloc(ca, sizeof(struct rtree_node));
407 if (!node)
408 return NULL;
409
410 node->data = get_image_page(gfp_mask, safe_needed);
411 if (!node->data)
412 return NULL;
413
414 list_add_tail(&node->list, list);
415
416 return node;
417 }
418
419 /**
420 * add_rtree_block - Add a new leave node to the radix tree.
421 *
422 * The leave nodes need to be allocated in order to keep the leaves
423 * linked list in order. This is guaranteed by the zone->blocks
424 * counter.
425 */
426 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
427 int safe_needed, struct chain_allocator *ca)
428 {
429 struct rtree_node *node, *block, **dst;
430 unsigned int levels_needed, block_nr;
431 int i;
432
433 block_nr = zone->blocks;
434 levels_needed = 0;
435
436 /* How many levels do we need for this block nr? */
437 while (block_nr) {
438 levels_needed += 1;
439 block_nr >>= BM_RTREE_LEVEL_SHIFT;
440 }
441
442 /* Make sure the rtree has enough levels */
443 for (i = zone->levels; i < levels_needed; i++) {
444 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
445 &zone->nodes);
446 if (!node)
447 return -ENOMEM;
448
449 node->data[0] = (unsigned long)zone->rtree;
450 zone->rtree = node;
451 zone->levels += 1;
452 }
453
454 /* Allocate new block */
455 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
456 if (!block)
457 return -ENOMEM;
458
459 /* Now walk the rtree to insert the block */
460 node = zone->rtree;
461 dst = &zone->rtree;
462 block_nr = zone->blocks;
463 for (i = zone->levels; i > 0; i--) {
464 int index;
465
466 if (!node) {
467 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
468 &zone->nodes);
469 if (!node)
470 return -ENOMEM;
471 *dst = node;
472 }
473
474 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
475 index &= BM_RTREE_LEVEL_MASK;
476 dst = (struct rtree_node **)&((*dst)->data[index]);
477 node = *dst;
478 }
479
480 zone->blocks += 1;
481 *dst = block;
482
483 return 0;
484 }
485
486 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
487 int clear_nosave_free);
488
489 /**
490 * create_zone_bm_rtree - Create a radix tree for one zone.
491 *
492 * Allocated the mem_zone_bm_rtree structure and initializes it.
493 * This function also allocated and builds the radix tree for the
494 * zone.
495 */
496 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
497 int safe_needed,
498 struct chain_allocator *ca,
499 unsigned long start,
500 unsigned long end)
501 {
502 struct mem_zone_bm_rtree *zone;
503 unsigned int i, nr_blocks;
504 unsigned long pages;
505
506 pages = end - start;
507 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
508 if (!zone)
509 return NULL;
510
511 INIT_LIST_HEAD(&zone->nodes);
512 INIT_LIST_HEAD(&zone->leaves);
513 zone->start_pfn = start;
514 zone->end_pfn = end;
515 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
516
517 for (i = 0; i < nr_blocks; i++) {
518 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
519 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
520 return NULL;
521 }
522 }
523
524 return zone;
525 }
526
527 /**
528 * free_zone_bm_rtree - Free the memory of the radix tree.
529 *
530 * Free all node pages of the radix tree. The mem_zone_bm_rtree
531 * structure itself is not freed here nor are the rtree_node
532 * structs.
533 */
534 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
535 int clear_nosave_free)
536 {
537 struct rtree_node *node;
538
539 list_for_each_entry(node, &zone->nodes, list)
540 free_image_page(node->data, clear_nosave_free);
541
542 list_for_each_entry(node, &zone->leaves, list)
543 free_image_page(node->data, clear_nosave_free);
544 }
545
546 static void memory_bm_position_reset(struct memory_bitmap *bm)
547 {
548 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
549 list);
550 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
551 struct rtree_node, list);
552 bm->cur.node_pfn = 0;
553 bm->cur.node_bit = 0;
554 }
555
556 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
557
558 struct mem_extent {
559 struct list_head hook;
560 unsigned long start;
561 unsigned long end;
562 };
563
564 /**
565 * free_mem_extents - Free a list of memory extents.
566 * @list: List of extents to free.
567 */
568 static void free_mem_extents(struct list_head *list)
569 {
570 struct mem_extent *ext, *aux;
571
572 list_for_each_entry_safe(ext, aux, list, hook) {
573 list_del(&ext->hook);
574 kfree(ext);
575 }
576 }
577
578 /**
579 * create_mem_extents - Create a list of memory extents.
580 * @list: List to put the extents into.
581 * @gfp_mask: Mask to use for memory allocations.
582 *
583 * The extents represent contiguous ranges of PFNs.
584 */
585 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
586 {
587 struct zone *zone;
588
589 INIT_LIST_HEAD(list);
590
591 for_each_populated_zone(zone) {
592 unsigned long zone_start, zone_end;
593 struct mem_extent *ext, *cur, *aux;
594
595 zone_start = zone->zone_start_pfn;
596 zone_end = zone_end_pfn(zone);
597
598 list_for_each_entry(ext, list, hook)
599 if (zone_start <= ext->end)
600 break;
601
602 if (&ext->hook == list || zone_end < ext->start) {
603 /* New extent is necessary */
604 struct mem_extent *new_ext;
605
606 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
607 if (!new_ext) {
608 free_mem_extents(list);
609 return -ENOMEM;
610 }
611 new_ext->start = zone_start;
612 new_ext->end = zone_end;
613 list_add_tail(&new_ext->hook, &ext->hook);
614 continue;
615 }
616
617 /* Merge this zone's range of PFNs with the existing one */
618 if (zone_start < ext->start)
619 ext->start = zone_start;
620 if (zone_end > ext->end)
621 ext->end = zone_end;
622
623 /* More merging may be possible */
624 cur = ext;
625 list_for_each_entry_safe_continue(cur, aux, list, hook) {
626 if (zone_end < cur->start)
627 break;
628 if (zone_end < cur->end)
629 ext->end = cur->end;
630 list_del(&cur->hook);
631 kfree(cur);
632 }
633 }
634
635 return 0;
636 }
637
638 /**
639 * memory_bm_create - Allocate memory for a memory bitmap.
640 */
641 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
642 int safe_needed)
643 {
644 struct chain_allocator ca;
645 struct list_head mem_extents;
646 struct mem_extent *ext;
647 int error;
648
649 chain_init(&ca, gfp_mask, safe_needed);
650 INIT_LIST_HEAD(&bm->zones);
651
652 error = create_mem_extents(&mem_extents, gfp_mask);
653 if (error)
654 return error;
655
656 list_for_each_entry(ext, &mem_extents, hook) {
657 struct mem_zone_bm_rtree *zone;
658
659 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
660 ext->start, ext->end);
661 if (!zone) {
662 error = -ENOMEM;
663 goto Error;
664 }
665 list_add_tail(&zone->list, &bm->zones);
666 }
667
668 bm->p_list = ca.chain;
669 memory_bm_position_reset(bm);
670 Exit:
671 free_mem_extents(&mem_extents);
672 return error;
673
674 Error:
675 bm->p_list = ca.chain;
676 memory_bm_free(bm, PG_UNSAFE_CLEAR);
677 goto Exit;
678 }
679
680 /**
681 * memory_bm_free - Free memory occupied by the memory bitmap.
682 * @bm: Memory bitmap.
683 */
684 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
685 {
686 struct mem_zone_bm_rtree *zone;
687
688 list_for_each_entry(zone, &bm->zones, list)
689 free_zone_bm_rtree(zone, clear_nosave_free);
690
691 free_list_of_pages(bm->p_list, clear_nosave_free);
692
693 INIT_LIST_HEAD(&bm->zones);
694 }
695
696 /**
697 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
698 *
699 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
700 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
701 *
702 * Walk the radix tree to find the page containing the bit that represents @pfn
703 * and return the position of the bit in @addr and @bit_nr.
704 */
705 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
706 void **addr, unsigned int *bit_nr)
707 {
708 struct mem_zone_bm_rtree *curr, *zone;
709 struct rtree_node *node;
710 int i, block_nr;
711
712 zone = bm->cur.zone;
713
714 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
715 goto zone_found;
716
717 zone = NULL;
718
719 /* Find the right zone */
720 list_for_each_entry(curr, &bm->zones, list) {
721 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
722 zone = curr;
723 break;
724 }
725 }
726
727 if (!zone)
728 return -EFAULT;
729
730 zone_found:
731 /*
732 * We have found the zone. Now walk the radix tree to find the leaf node
733 * for our PFN.
734 */
735 node = bm->cur.node;
736 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
737 goto node_found;
738
739 node = zone->rtree;
740 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
741
742 for (i = zone->levels; i > 0; i--) {
743 int index;
744
745 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
746 index &= BM_RTREE_LEVEL_MASK;
747 BUG_ON(node->data[index] == 0);
748 node = (struct rtree_node *)node->data[index];
749 }
750
751 node_found:
752 /* Update last position */
753 bm->cur.zone = zone;
754 bm->cur.node = node;
755 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
756
757 /* Set return values */
758 *addr = node->data;
759 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
760
761 return 0;
762 }
763
764 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
765 {
766 void *addr;
767 unsigned int bit;
768 int error;
769
770 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
771 BUG_ON(error);
772 set_bit(bit, addr);
773 }
774
775 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
776 {
777 void *addr;
778 unsigned int bit;
779 int error;
780
781 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
782 if (!error)
783 set_bit(bit, addr);
784
785 return error;
786 }
787
788 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
789 {
790 void *addr;
791 unsigned int bit;
792 int error;
793
794 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
795 BUG_ON(error);
796 clear_bit(bit, addr);
797 }
798
799 static void memory_bm_clear_current(struct memory_bitmap *bm)
800 {
801 int bit;
802
803 bit = max(bm->cur.node_bit - 1, 0);
804 clear_bit(bit, bm->cur.node->data);
805 }
806
807 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
808 {
809 void *addr;
810 unsigned int bit;
811 int error;
812
813 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
814 BUG_ON(error);
815 return test_bit(bit, addr);
816 }
817
818 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
819 {
820 void *addr;
821 unsigned int bit;
822
823 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
824 }
825
826 /*
827 * rtree_next_node - Jump to the next leaf node.
828 *
829 * Set the position to the beginning of the next node in the
830 * memory bitmap. This is either the next node in the current
831 * zone's radix tree or the first node in the radix tree of the
832 * next zone.
833 *
834 * Return true if there is a next node, false otherwise.
835 */
836 static bool rtree_next_node(struct memory_bitmap *bm)
837 {
838 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
839 bm->cur.node = list_entry(bm->cur.node->list.next,
840 struct rtree_node, list);
841 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
842 bm->cur.node_bit = 0;
843 touch_softlockup_watchdog();
844 return true;
845 }
846
847 /* No more nodes, goto next zone */
848 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
849 bm->cur.zone = list_entry(bm->cur.zone->list.next,
850 struct mem_zone_bm_rtree, list);
851 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
852 struct rtree_node, list);
853 bm->cur.node_pfn = 0;
854 bm->cur.node_bit = 0;
855 return true;
856 }
857
858 /* No more zones */
859 return false;
860 }
861
862 /**
863 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
864 * @bm: Memory bitmap.
865 *
866 * Starting from the last returned position this function searches for the next
867 * set bit in @bm and returns the PFN represented by it. If no more bits are
868 * set, BM_END_OF_MAP is returned.
869 *
870 * It is required to run memory_bm_position_reset() before the first call to
871 * this function for the given memory bitmap.
872 */
873 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
874 {
875 unsigned long bits, pfn, pages;
876 int bit;
877
878 do {
879 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
880 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
881 bit = find_next_bit(bm->cur.node->data, bits,
882 bm->cur.node_bit);
883 if (bit < bits) {
884 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
885 bm->cur.node_bit = bit + 1;
886 return pfn;
887 }
888 } while (rtree_next_node(bm));
889
890 return BM_END_OF_MAP;
891 }
892
893 /*
894 * This structure represents a range of page frames the contents of which
895 * should not be saved during hibernation.
896 */
897 struct nosave_region {
898 struct list_head list;
899 unsigned long start_pfn;
900 unsigned long end_pfn;
901 };
902
903 static LIST_HEAD(nosave_regions);
904
905 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
906 {
907 struct rtree_node *node;
908
909 list_for_each_entry(node, &zone->nodes, list)
910 recycle_safe_page(node->data);
911
912 list_for_each_entry(node, &zone->leaves, list)
913 recycle_safe_page(node->data);
914 }
915
916 static void memory_bm_recycle(struct memory_bitmap *bm)
917 {
918 struct mem_zone_bm_rtree *zone;
919 struct linked_page *p_list;
920
921 list_for_each_entry(zone, &bm->zones, list)
922 recycle_zone_bm_rtree(zone);
923
924 p_list = bm->p_list;
925 while (p_list) {
926 struct linked_page *lp = p_list;
927
928 p_list = lp->next;
929 recycle_safe_page(lp);
930 }
931 }
932
933 /**
934 * register_nosave_region - Register a region of unsaveable memory.
935 *
936 * Register a range of page frames the contents of which should not be saved
937 * during hibernation (to be used in the early initialization code).
938 */
939 void __init __register_nosave_region(unsigned long start_pfn,
940 unsigned long end_pfn, int use_kmalloc)
941 {
942 struct nosave_region *region;
943
944 if (start_pfn >= end_pfn)
945 return;
946
947 if (!list_empty(&nosave_regions)) {
948 /* Try to extend the previous region (they should be sorted) */
949 region = list_entry(nosave_regions.prev,
950 struct nosave_region, list);
951 if (region->end_pfn == start_pfn) {
952 region->end_pfn = end_pfn;
953 goto Report;
954 }
955 }
956 if (use_kmalloc) {
957 /* During init, this shouldn't fail */
958 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
959 BUG_ON(!region);
960 } else {
961 /* This allocation cannot fail */
962 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
963 }
964 region->start_pfn = start_pfn;
965 region->end_pfn = end_pfn;
966 list_add_tail(&region->list, &nosave_regions);
967 Report:
968 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
969 (unsigned long long) start_pfn << PAGE_SHIFT,
970 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
971 }
972
973 /*
974 * Set bits in this map correspond to the page frames the contents of which
975 * should not be saved during the suspend.
976 */
977 static struct memory_bitmap *forbidden_pages_map;
978
979 /* Set bits in this map correspond to free page frames. */
980 static struct memory_bitmap *free_pages_map;
981
982 /*
983 * Each page frame allocated for creating the image is marked by setting the
984 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
985 */
986
987 void swsusp_set_page_free(struct page *page)
988 {
989 if (free_pages_map)
990 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
991 }
992
993 static int swsusp_page_is_free(struct page *page)
994 {
995 return free_pages_map ?
996 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
997 }
998
999 void swsusp_unset_page_free(struct page *page)
1000 {
1001 if (free_pages_map)
1002 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1003 }
1004
1005 static void swsusp_set_page_forbidden(struct page *page)
1006 {
1007 if (forbidden_pages_map)
1008 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1009 }
1010
1011 int swsusp_page_is_forbidden(struct page *page)
1012 {
1013 return forbidden_pages_map ?
1014 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1015 }
1016
1017 static void swsusp_unset_page_forbidden(struct page *page)
1018 {
1019 if (forbidden_pages_map)
1020 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1021 }
1022
1023 /**
1024 * mark_nosave_pages - Mark pages that should not be saved.
1025 * @bm: Memory bitmap.
1026 *
1027 * Set the bits in @bm that correspond to the page frames the contents of which
1028 * should not be saved.
1029 */
1030 static void mark_nosave_pages(struct memory_bitmap *bm)
1031 {
1032 struct nosave_region *region;
1033
1034 if (list_empty(&nosave_regions))
1035 return;
1036
1037 list_for_each_entry(region, &nosave_regions, list) {
1038 unsigned long pfn;
1039
1040 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
1041 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1042 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1043 - 1);
1044
1045 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1046 if (pfn_valid(pfn)) {
1047 /*
1048 * It is safe to ignore the result of
1049 * mem_bm_set_bit_check() here, since we won't
1050 * touch the PFNs for which the error is
1051 * returned anyway.
1052 */
1053 mem_bm_set_bit_check(bm, pfn);
1054 }
1055 }
1056 }
1057
1058 /**
1059 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1060 *
1061 * Create bitmaps needed for marking page frames that should not be saved and
1062 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1063 * only modified if everything goes well, because we don't want the bits to be
1064 * touched before both bitmaps are set up.
1065 */
1066 int create_basic_memory_bitmaps(void)
1067 {
1068 struct memory_bitmap *bm1, *bm2;
1069 int error = 0;
1070
1071 if (forbidden_pages_map && free_pages_map)
1072 return 0;
1073 else
1074 BUG_ON(forbidden_pages_map || free_pages_map);
1075
1076 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1077 if (!bm1)
1078 return -ENOMEM;
1079
1080 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1081 if (error)
1082 goto Free_first_object;
1083
1084 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1085 if (!bm2)
1086 goto Free_first_bitmap;
1087
1088 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1089 if (error)
1090 goto Free_second_object;
1091
1092 forbidden_pages_map = bm1;
1093 free_pages_map = bm2;
1094 mark_nosave_pages(forbidden_pages_map);
1095
1096 pr_debug("PM: Basic memory bitmaps created\n");
1097
1098 return 0;
1099
1100 Free_second_object:
1101 kfree(bm2);
1102 Free_first_bitmap:
1103 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1104 Free_first_object:
1105 kfree(bm1);
1106 return -ENOMEM;
1107 }
1108
1109 /**
1110 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1111 *
1112 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1113 * auxiliary pointers are necessary so that the bitmaps themselves are not
1114 * referred to while they are being freed.
1115 */
1116 void free_basic_memory_bitmaps(void)
1117 {
1118 struct memory_bitmap *bm1, *bm2;
1119
1120 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1121 return;
1122
1123 bm1 = forbidden_pages_map;
1124 bm2 = free_pages_map;
1125 forbidden_pages_map = NULL;
1126 free_pages_map = NULL;
1127 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1128 kfree(bm1);
1129 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1130 kfree(bm2);
1131
1132 pr_debug("PM: Basic memory bitmaps freed\n");
1133 }
1134
1135 /**
1136 * snapshot_additional_pages - Estimate the number of extra pages needed.
1137 * @zone: Memory zone to carry out the computation for.
1138 *
1139 * Estimate the number of additional pages needed for setting up a hibernation
1140 * image data structures for @zone (usually, the returned value is greater than
1141 * the exact number).
1142 */
1143 unsigned int snapshot_additional_pages(struct zone *zone)
1144 {
1145 unsigned int rtree, nodes;
1146
1147 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1148 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1149 LINKED_PAGE_DATA_SIZE);
1150 while (nodes > 1) {
1151 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1152 rtree += nodes;
1153 }
1154
1155 return 2 * rtree;
1156 }
1157
1158 #ifdef CONFIG_HIGHMEM
1159 /**
1160 * count_free_highmem_pages - Compute the total number of free highmem pages.
1161 *
1162 * The returned number is system-wide.
1163 */
1164 static unsigned int count_free_highmem_pages(void)
1165 {
1166 struct zone *zone;
1167 unsigned int cnt = 0;
1168
1169 for_each_populated_zone(zone)
1170 if (is_highmem(zone))
1171 cnt += zone_page_state(zone, NR_FREE_PAGES);
1172
1173 return cnt;
1174 }
1175
1176 /**
1177 * saveable_highmem_page - Check if a highmem page is saveable.
1178 *
1179 * Determine whether a highmem page should be included in a hibernation image.
1180 *
1181 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1182 * and it isn't part of a free chunk of pages.
1183 */
1184 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1185 {
1186 struct page *page;
1187
1188 if (!pfn_valid(pfn))
1189 return NULL;
1190
1191 page = pfn_to_page(pfn);
1192 if (page_zone(page) != zone)
1193 return NULL;
1194
1195 BUG_ON(!PageHighMem(page));
1196
1197 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1198 PageReserved(page))
1199 return NULL;
1200
1201 if (page_is_guard(page))
1202 return NULL;
1203
1204 return page;
1205 }
1206
1207 /**
1208 * count_highmem_pages - Compute the total number of saveable highmem pages.
1209 */
1210 static unsigned int count_highmem_pages(void)
1211 {
1212 struct zone *zone;
1213 unsigned int n = 0;
1214
1215 for_each_populated_zone(zone) {
1216 unsigned long pfn, max_zone_pfn;
1217
1218 if (!is_highmem(zone))
1219 continue;
1220
1221 mark_free_pages(zone);
1222 max_zone_pfn = zone_end_pfn(zone);
1223 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1224 if (saveable_highmem_page(zone, pfn))
1225 n++;
1226 }
1227 return n;
1228 }
1229 #else
1230 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1231 {
1232 return NULL;
1233 }
1234 #endif /* CONFIG_HIGHMEM */
1235
1236 /**
1237 * saveable_page - Check if the given page is saveable.
1238 *
1239 * Determine whether a non-highmem page should be included in a hibernation
1240 * image.
1241 *
1242 * We should save the page if it isn't Nosave, and is not in the range
1243 * of pages statically defined as 'unsaveable', and it isn't part of
1244 * a free chunk of pages.
1245 */
1246 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1247 {
1248 struct page *page;
1249
1250 if (!pfn_valid(pfn))
1251 return NULL;
1252
1253 page = pfn_to_page(pfn);
1254 if (page_zone(page) != zone)
1255 return NULL;
1256
1257 BUG_ON(PageHighMem(page));
1258
1259 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1260 return NULL;
1261
1262 if (PageReserved(page)
1263 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1264 return NULL;
1265
1266 if (page_is_guard(page))
1267 return NULL;
1268
1269 return page;
1270 }
1271
1272 /**
1273 * count_data_pages - Compute the total number of saveable non-highmem pages.
1274 */
1275 static unsigned int count_data_pages(void)
1276 {
1277 struct zone *zone;
1278 unsigned long pfn, max_zone_pfn;
1279 unsigned int n = 0;
1280
1281 for_each_populated_zone(zone) {
1282 if (is_highmem(zone))
1283 continue;
1284
1285 mark_free_pages(zone);
1286 max_zone_pfn = zone_end_pfn(zone);
1287 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1288 if (saveable_page(zone, pfn))
1289 n++;
1290 }
1291 return n;
1292 }
1293
1294 /*
1295 * This is needed, because copy_page and memcpy are not usable for copying
1296 * task structs.
1297 */
1298 static inline void do_copy_page(long *dst, long *src)
1299 {
1300 int n;
1301
1302 for (n = PAGE_SIZE / sizeof(long); n; n--)
1303 *dst++ = *src++;
1304 }
1305
1306 /**
1307 * safe_copy_page - Copy a page in a safe way.
1308 *
1309 * Check if the page we are going to copy is marked as present in the kernel
1310 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1311 * and in that case kernel_page_present() always returns 'true').
1312 */
1313 static void safe_copy_page(void *dst, struct page *s_page)
1314 {
1315 if (kernel_page_present(s_page)) {
1316 do_copy_page(dst, page_address(s_page));
1317 } else {
1318 kernel_map_pages(s_page, 1, 1);
1319 do_copy_page(dst, page_address(s_page));
1320 kernel_map_pages(s_page, 1, 0);
1321 }
1322 }
1323
1324 #ifdef CONFIG_HIGHMEM
1325 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1326 {
1327 return is_highmem(zone) ?
1328 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1329 }
1330
1331 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1332 {
1333 struct page *s_page, *d_page;
1334 void *src, *dst;
1335
1336 s_page = pfn_to_page(src_pfn);
1337 d_page = pfn_to_page(dst_pfn);
1338 if (PageHighMem(s_page)) {
1339 src = kmap_atomic(s_page);
1340 dst = kmap_atomic(d_page);
1341 do_copy_page(dst, src);
1342 kunmap_atomic(dst);
1343 kunmap_atomic(src);
1344 } else {
1345 if (PageHighMem(d_page)) {
1346 /*
1347 * The page pointed to by src may contain some kernel
1348 * data modified by kmap_atomic()
1349 */
1350 safe_copy_page(buffer, s_page);
1351 dst = kmap_atomic(d_page);
1352 copy_page(dst, buffer);
1353 kunmap_atomic(dst);
1354 } else {
1355 safe_copy_page(page_address(d_page), s_page);
1356 }
1357 }
1358 }
1359 #else
1360 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1361
1362 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1363 {
1364 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1365 pfn_to_page(src_pfn));
1366 }
1367 #endif /* CONFIG_HIGHMEM */
1368
1369 static void copy_data_pages(struct memory_bitmap *copy_bm,
1370 struct memory_bitmap *orig_bm)
1371 {
1372 struct zone *zone;
1373 unsigned long pfn;
1374
1375 for_each_populated_zone(zone) {
1376 unsigned long max_zone_pfn;
1377
1378 mark_free_pages(zone);
1379 max_zone_pfn = zone_end_pfn(zone);
1380 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1381 if (page_is_saveable(zone, pfn))
1382 memory_bm_set_bit(orig_bm, pfn);
1383 }
1384 memory_bm_position_reset(orig_bm);
1385 memory_bm_position_reset(copy_bm);
1386 for(;;) {
1387 pfn = memory_bm_next_pfn(orig_bm);
1388 if (unlikely(pfn == BM_END_OF_MAP))
1389 break;
1390 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1391 }
1392 }
1393
1394 /* Total number of image pages */
1395 static unsigned int nr_copy_pages;
1396 /* Number of pages needed for saving the original pfns of the image pages */
1397 static unsigned int nr_meta_pages;
1398 /*
1399 * Numbers of normal and highmem page frames allocated for hibernation image
1400 * before suspending devices.
1401 */
1402 unsigned int alloc_normal, alloc_highmem;
1403 /*
1404 * Memory bitmap used for marking saveable pages (during hibernation) or
1405 * hibernation image pages (during restore)
1406 */
1407 static struct memory_bitmap orig_bm;
1408 /*
1409 * Memory bitmap used during hibernation for marking allocated page frames that
1410 * will contain copies of saveable pages. During restore it is initially used
1411 * for marking hibernation image pages, but then the set bits from it are
1412 * duplicated in @orig_bm and it is released. On highmem systems it is next
1413 * used for marking "safe" highmem pages, but it has to be reinitialized for
1414 * this purpose.
1415 */
1416 static struct memory_bitmap copy_bm;
1417
1418 /**
1419 * swsusp_free - Free pages allocated for hibernation image.
1420 *
1421 * Image pages are alocated before snapshot creation, so they need to be
1422 * released after resume.
1423 */
1424 void swsusp_free(void)
1425 {
1426 unsigned long fb_pfn, fr_pfn;
1427
1428 if (!forbidden_pages_map || !free_pages_map)
1429 goto out;
1430
1431 memory_bm_position_reset(forbidden_pages_map);
1432 memory_bm_position_reset(free_pages_map);
1433
1434 loop:
1435 fr_pfn = memory_bm_next_pfn(free_pages_map);
1436 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1437
1438 /*
1439 * Find the next bit set in both bitmaps. This is guaranteed to
1440 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1441 */
1442 do {
1443 if (fb_pfn < fr_pfn)
1444 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1445 if (fr_pfn < fb_pfn)
1446 fr_pfn = memory_bm_next_pfn(free_pages_map);
1447 } while (fb_pfn != fr_pfn);
1448
1449 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1450 struct page *page = pfn_to_page(fr_pfn);
1451
1452 memory_bm_clear_current(forbidden_pages_map);
1453 memory_bm_clear_current(free_pages_map);
1454 hibernate_restore_unprotect_page(page_address(page));
1455 __free_page(page);
1456 goto loop;
1457 }
1458
1459 out:
1460 nr_copy_pages = 0;
1461 nr_meta_pages = 0;
1462 restore_pblist = NULL;
1463 buffer = NULL;
1464 alloc_normal = 0;
1465 alloc_highmem = 0;
1466 hibernate_restore_protection_end();
1467 }
1468
1469 /* Helper functions used for the shrinking of memory. */
1470
1471 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1472
1473 /**
1474 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1475 * @nr_pages: Number of page frames to allocate.
1476 * @mask: GFP flags to use for the allocation.
1477 *
1478 * Return value: Number of page frames actually allocated
1479 */
1480 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1481 {
1482 unsigned long nr_alloc = 0;
1483
1484 while (nr_pages > 0) {
1485 struct page *page;
1486
1487 page = alloc_image_page(mask);
1488 if (!page)
1489 break;
1490 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1491 if (PageHighMem(page))
1492 alloc_highmem++;
1493 else
1494 alloc_normal++;
1495 nr_pages--;
1496 nr_alloc++;
1497 }
1498
1499 return nr_alloc;
1500 }
1501
1502 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1503 unsigned long avail_normal)
1504 {
1505 unsigned long alloc;
1506
1507 if (avail_normal <= alloc_normal)
1508 return 0;
1509
1510 alloc = avail_normal - alloc_normal;
1511 if (nr_pages < alloc)
1512 alloc = nr_pages;
1513
1514 return preallocate_image_pages(alloc, GFP_IMAGE);
1515 }
1516
1517 #ifdef CONFIG_HIGHMEM
1518 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1519 {
1520 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1521 }
1522
1523 /**
1524 * __fraction - Compute (an approximation of) x * (multiplier / base).
1525 */
1526 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1527 {
1528 x *= multiplier;
1529 do_div(x, base);
1530 return (unsigned long)x;
1531 }
1532
1533 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1534 unsigned long highmem,
1535 unsigned long total)
1536 {
1537 unsigned long alloc = __fraction(nr_pages, highmem, total);
1538
1539 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1540 }
1541 #else /* CONFIG_HIGHMEM */
1542 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1543 {
1544 return 0;
1545 }
1546
1547 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1548 unsigned long highmem,
1549 unsigned long total)
1550 {
1551 return 0;
1552 }
1553 #endif /* CONFIG_HIGHMEM */
1554
1555 /**
1556 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1557 */
1558 static unsigned long free_unnecessary_pages(void)
1559 {
1560 unsigned long save, to_free_normal, to_free_highmem, free;
1561
1562 save = count_data_pages();
1563 if (alloc_normal >= save) {
1564 to_free_normal = alloc_normal - save;
1565 save = 0;
1566 } else {
1567 to_free_normal = 0;
1568 save -= alloc_normal;
1569 }
1570 save += count_highmem_pages();
1571 if (alloc_highmem >= save) {
1572 to_free_highmem = alloc_highmem - save;
1573 } else {
1574 to_free_highmem = 0;
1575 save -= alloc_highmem;
1576 if (to_free_normal > save)
1577 to_free_normal -= save;
1578 else
1579 to_free_normal = 0;
1580 }
1581 free = to_free_normal + to_free_highmem;
1582
1583 memory_bm_position_reset(&copy_bm);
1584
1585 while (to_free_normal > 0 || to_free_highmem > 0) {
1586 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1587 struct page *page = pfn_to_page(pfn);
1588
1589 if (PageHighMem(page)) {
1590 if (!to_free_highmem)
1591 continue;
1592 to_free_highmem--;
1593 alloc_highmem--;
1594 } else {
1595 if (!to_free_normal)
1596 continue;
1597 to_free_normal--;
1598 alloc_normal--;
1599 }
1600 memory_bm_clear_bit(&copy_bm, pfn);
1601 swsusp_unset_page_forbidden(page);
1602 swsusp_unset_page_free(page);
1603 __free_page(page);
1604 }
1605
1606 return free;
1607 }
1608
1609 /**
1610 * minimum_image_size - Estimate the minimum acceptable size of an image.
1611 * @saveable: Number of saveable pages in the system.
1612 *
1613 * We want to avoid attempting to free too much memory too hard, so estimate the
1614 * minimum acceptable size of a hibernation image to use as the lower limit for
1615 * preallocating memory.
1616 *
1617 * We assume that the minimum image size should be proportional to
1618 *
1619 * [number of saveable pages] - [number of pages that can be freed in theory]
1620 *
1621 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1622 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1623 * minus mapped file pages.
1624 */
1625 static unsigned long minimum_image_size(unsigned long saveable)
1626 {
1627 unsigned long size;
1628
1629 size = global_page_state(NR_SLAB_RECLAIMABLE)
1630 + global_node_page_state(NR_ACTIVE_ANON)
1631 + global_node_page_state(NR_INACTIVE_ANON)
1632 + global_node_page_state(NR_ACTIVE_FILE)
1633 + global_node_page_state(NR_INACTIVE_FILE)
1634 - global_node_page_state(NR_FILE_MAPPED);
1635
1636 return saveable <= size ? 0 : saveable - size;
1637 }
1638
1639 /**
1640 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1641 *
1642 * To create a hibernation image it is necessary to make a copy of every page
1643 * frame in use. We also need a number of page frames to be free during
1644 * hibernation for allocations made while saving the image and for device
1645 * drivers, in case they need to allocate memory from their hibernation
1646 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1647 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1648 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1649 * total number of available page frames and allocate at least
1650 *
1651 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1652 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1653 *
1654 * of them, which corresponds to the maximum size of a hibernation image.
1655 *
1656 * If image_size is set below the number following from the above formula,
1657 * the preallocation of memory is continued until the total number of saveable
1658 * pages in the system is below the requested image size or the minimum
1659 * acceptable image size returned by minimum_image_size(), whichever is greater.
1660 */
1661 int hibernate_preallocate_memory(void)
1662 {
1663 struct zone *zone;
1664 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1665 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1666 ktime_t start, stop;
1667 int error;
1668
1669 printk(KERN_INFO "PM: Preallocating image memory... ");
1670 start = ktime_get();
1671
1672 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1673 if (error)
1674 goto err_out;
1675
1676 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1677 if (error)
1678 goto err_out;
1679
1680 alloc_normal = 0;
1681 alloc_highmem = 0;
1682
1683 /* Count the number of saveable data pages. */
1684 save_highmem = count_highmem_pages();
1685 saveable = count_data_pages();
1686
1687 /*
1688 * Compute the total number of page frames we can use (count) and the
1689 * number of pages needed for image metadata (size).
1690 */
1691 count = saveable;
1692 saveable += save_highmem;
1693 highmem = save_highmem;
1694 size = 0;
1695 for_each_populated_zone(zone) {
1696 size += snapshot_additional_pages(zone);
1697 if (is_highmem(zone))
1698 highmem += zone_page_state(zone, NR_FREE_PAGES);
1699 else
1700 count += zone_page_state(zone, NR_FREE_PAGES);
1701 }
1702 avail_normal = count;
1703 count += highmem;
1704 count -= totalreserve_pages;
1705
1706 /* Add number of pages required for page keys (s390 only). */
1707 size += page_key_additional_pages(saveable);
1708
1709 /* Compute the maximum number of saveable pages to leave in memory. */
1710 max_size = (count - (size + PAGES_FOR_IO)) / 2
1711 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1712 /* Compute the desired number of image pages specified by image_size. */
1713 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1714 if (size > max_size)
1715 size = max_size;
1716 /*
1717 * If the desired number of image pages is at least as large as the
1718 * current number of saveable pages in memory, allocate page frames for
1719 * the image and we're done.
1720 */
1721 if (size >= saveable) {
1722 pages = preallocate_image_highmem(save_highmem);
1723 pages += preallocate_image_memory(saveable - pages, avail_normal);
1724 goto out;
1725 }
1726
1727 /* Estimate the minimum size of the image. */
1728 pages = minimum_image_size(saveable);
1729 /*
1730 * To avoid excessive pressure on the normal zone, leave room in it to
1731 * accommodate an image of the minimum size (unless it's already too
1732 * small, in which case don't preallocate pages from it at all).
1733 */
1734 if (avail_normal > pages)
1735 avail_normal -= pages;
1736 else
1737 avail_normal = 0;
1738 if (size < pages)
1739 size = min_t(unsigned long, pages, max_size);
1740
1741 /*
1742 * Let the memory management subsystem know that we're going to need a
1743 * large number of page frames to allocate and make it free some memory.
1744 * NOTE: If this is not done, performance will be hurt badly in some
1745 * test cases.
1746 */
1747 shrink_all_memory(saveable - size);
1748
1749 /*
1750 * The number of saveable pages in memory was too high, so apply some
1751 * pressure to decrease it. First, make room for the largest possible
1752 * image and fail if that doesn't work. Next, try to decrease the size
1753 * of the image as much as indicated by 'size' using allocations from
1754 * highmem and non-highmem zones separately.
1755 */
1756 pages_highmem = preallocate_image_highmem(highmem / 2);
1757 alloc = count - max_size;
1758 if (alloc > pages_highmem)
1759 alloc -= pages_highmem;
1760 else
1761 alloc = 0;
1762 pages = preallocate_image_memory(alloc, avail_normal);
1763 if (pages < alloc) {
1764 /* We have exhausted non-highmem pages, try highmem. */
1765 alloc -= pages;
1766 pages += pages_highmem;
1767 pages_highmem = preallocate_image_highmem(alloc);
1768 if (pages_highmem < alloc)
1769 goto err_out;
1770 pages += pages_highmem;
1771 /*
1772 * size is the desired number of saveable pages to leave in
1773 * memory, so try to preallocate (all memory - size) pages.
1774 */
1775 alloc = (count - pages) - size;
1776 pages += preallocate_image_highmem(alloc);
1777 } else {
1778 /*
1779 * There are approximately max_size saveable pages at this point
1780 * and we want to reduce this number down to size.
1781 */
1782 alloc = max_size - size;
1783 size = preallocate_highmem_fraction(alloc, highmem, count);
1784 pages_highmem += size;
1785 alloc -= size;
1786 size = preallocate_image_memory(alloc, avail_normal);
1787 pages_highmem += preallocate_image_highmem(alloc - size);
1788 pages += pages_highmem + size;
1789 }
1790
1791 /*
1792 * We only need as many page frames for the image as there are saveable
1793 * pages in memory, but we have allocated more. Release the excessive
1794 * ones now.
1795 */
1796 pages -= free_unnecessary_pages();
1797
1798 out:
1799 stop = ktime_get();
1800 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1801 swsusp_show_speed(start, stop, pages, "Allocated");
1802
1803 return 0;
1804
1805 err_out:
1806 printk(KERN_CONT "\n");
1807 swsusp_free();
1808 return -ENOMEM;
1809 }
1810
1811 #ifdef CONFIG_HIGHMEM
1812 /**
1813 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1814 *
1815 * Compute the number of non-highmem pages that will be necessary for creating
1816 * copies of highmem pages.
1817 */
1818 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1819 {
1820 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1821
1822 if (free_highmem >= nr_highmem)
1823 nr_highmem = 0;
1824 else
1825 nr_highmem -= free_highmem;
1826
1827 return nr_highmem;
1828 }
1829 #else
1830 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1831 #endif /* CONFIG_HIGHMEM */
1832
1833 /**
1834 * enough_free_mem - Check if there is enough free memory for the image.
1835 */
1836 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1837 {
1838 struct zone *zone;
1839 unsigned int free = alloc_normal;
1840
1841 for_each_populated_zone(zone)
1842 if (!is_highmem(zone))
1843 free += zone_page_state(zone, NR_FREE_PAGES);
1844
1845 nr_pages += count_pages_for_highmem(nr_highmem);
1846 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1847 nr_pages, PAGES_FOR_IO, free);
1848
1849 return free > nr_pages + PAGES_FOR_IO;
1850 }
1851
1852 #ifdef CONFIG_HIGHMEM
1853 /**
1854 * get_highmem_buffer - Allocate a buffer for highmem pages.
1855 *
1856 * If there are some highmem pages in the hibernation image, we may need a
1857 * buffer to copy them and/or load their data.
1858 */
1859 static inline int get_highmem_buffer(int safe_needed)
1860 {
1861 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1862 return buffer ? 0 : -ENOMEM;
1863 }
1864
1865 /**
1866 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1867 *
1868 * Try to allocate as many pages as needed, but if the number of free highmem
1869 * pages is less than that, allocate them all.
1870 */
1871 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1872 unsigned int nr_highmem)
1873 {
1874 unsigned int to_alloc = count_free_highmem_pages();
1875
1876 if (to_alloc > nr_highmem)
1877 to_alloc = nr_highmem;
1878
1879 nr_highmem -= to_alloc;
1880 while (to_alloc-- > 0) {
1881 struct page *page;
1882
1883 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1884 memory_bm_set_bit(bm, page_to_pfn(page));
1885 }
1886 return nr_highmem;
1887 }
1888 #else
1889 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1890
1891 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1892 unsigned int n) { return 0; }
1893 #endif /* CONFIG_HIGHMEM */
1894
1895 /**
1896 * swsusp_alloc - Allocate memory for hibernation image.
1897 *
1898 * We first try to allocate as many highmem pages as there are
1899 * saveable highmem pages in the system. If that fails, we allocate
1900 * non-highmem pages for the copies of the remaining highmem ones.
1901 *
1902 * In this approach it is likely that the copies of highmem pages will
1903 * also be located in the high memory, because of the way in which
1904 * copy_data_pages() works.
1905 */
1906 static int swsusp_alloc(struct memory_bitmap *orig_bm,
1907 struct memory_bitmap *copy_bm,
1908 unsigned int nr_pages, unsigned int nr_highmem)
1909 {
1910 if (nr_highmem > 0) {
1911 if (get_highmem_buffer(PG_ANY))
1912 goto err_out;
1913 if (nr_highmem > alloc_highmem) {
1914 nr_highmem -= alloc_highmem;
1915 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1916 }
1917 }
1918 if (nr_pages > alloc_normal) {
1919 nr_pages -= alloc_normal;
1920 while (nr_pages-- > 0) {
1921 struct page *page;
1922
1923 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1924 if (!page)
1925 goto err_out;
1926 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1927 }
1928 }
1929
1930 return 0;
1931
1932 err_out:
1933 swsusp_free();
1934 return -ENOMEM;
1935 }
1936
1937 asmlinkage __visible int swsusp_save(void)
1938 {
1939 unsigned int nr_pages, nr_highmem;
1940
1941 printk(KERN_INFO "PM: Creating hibernation image:\n");
1942
1943 drain_local_pages(NULL);
1944 nr_pages = count_data_pages();
1945 nr_highmem = count_highmem_pages();
1946 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1947
1948 if (!enough_free_mem(nr_pages, nr_highmem)) {
1949 printk(KERN_ERR "PM: Not enough free memory\n");
1950 return -ENOMEM;
1951 }
1952
1953 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1954 printk(KERN_ERR "PM: Memory allocation failed\n");
1955 return -ENOMEM;
1956 }
1957
1958 /*
1959 * During allocating of suspend pagedir, new cold pages may appear.
1960 * Kill them.
1961 */
1962 drain_local_pages(NULL);
1963 copy_data_pages(&copy_bm, &orig_bm);
1964
1965 /*
1966 * End of critical section. From now on, we can write to memory,
1967 * but we should not touch disk. This specially means we must _not_
1968 * touch swap space! Except we must write out our image of course.
1969 */
1970
1971 nr_pages += nr_highmem;
1972 nr_copy_pages = nr_pages;
1973 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1974
1975 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1976 nr_pages);
1977
1978 return 0;
1979 }
1980
1981 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1982 static int init_header_complete(struct swsusp_info *info)
1983 {
1984 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1985 info->version_code = LINUX_VERSION_CODE;
1986 return 0;
1987 }
1988
1989 static char *check_image_kernel(struct swsusp_info *info)
1990 {
1991 if (info->version_code != LINUX_VERSION_CODE)
1992 return "kernel version";
1993 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1994 return "system type";
1995 if (strcmp(info->uts.release,init_utsname()->release))
1996 return "kernel release";
1997 if (strcmp(info->uts.version,init_utsname()->version))
1998 return "version";
1999 if (strcmp(info->uts.machine,init_utsname()->machine))
2000 return "machine";
2001 return NULL;
2002 }
2003 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2004
2005 unsigned long snapshot_get_image_size(void)
2006 {
2007 return nr_copy_pages + nr_meta_pages + 1;
2008 }
2009
2010 static int init_header(struct swsusp_info *info)
2011 {
2012 memset(info, 0, sizeof(struct swsusp_info));
2013 info->num_physpages = get_num_physpages();
2014 info->image_pages = nr_copy_pages;
2015 info->pages = snapshot_get_image_size();
2016 info->size = info->pages;
2017 info->size <<= PAGE_SHIFT;
2018 return init_header_complete(info);
2019 }
2020
2021 /**
2022 * pack_pfns - Prepare PFNs for saving.
2023 * @bm: Memory bitmap.
2024 * @buf: Memory buffer to store the PFNs in.
2025 *
2026 * PFNs corresponding to set bits in @bm are stored in the area of memory
2027 * pointed to by @buf (1 page at a time).
2028 */
2029 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2030 {
2031 int j;
2032
2033 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2034 buf[j] = memory_bm_next_pfn(bm);
2035 if (unlikely(buf[j] == BM_END_OF_MAP))
2036 break;
2037 /* Save page key for data page (s390 only). */
2038 page_key_read(buf + j);
2039 }
2040 }
2041
2042 /**
2043 * snapshot_read_next - Get the address to read the next image page from.
2044 * @handle: Snapshot handle to be used for the reading.
2045 *
2046 * On the first call, @handle should point to a zeroed snapshot_handle
2047 * structure. The structure gets populated then and a pointer to it should be
2048 * passed to this function every next time.
2049 *
2050 * On success, the function returns a positive number. Then, the caller
2051 * is allowed to read up to the returned number of bytes from the memory
2052 * location computed by the data_of() macro.
2053 *
2054 * The function returns 0 to indicate the end of the data stream condition,
2055 * and negative numbers are returned on errors. If that happens, the structure
2056 * pointed to by @handle is not updated and should not be used any more.
2057 */
2058 int snapshot_read_next(struct snapshot_handle *handle)
2059 {
2060 if (handle->cur > nr_meta_pages + nr_copy_pages)
2061 return 0;
2062
2063 if (!buffer) {
2064 /* This makes the buffer be freed by swsusp_free() */
2065 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2066 if (!buffer)
2067 return -ENOMEM;
2068 }
2069 if (!handle->cur) {
2070 int error;
2071
2072 error = init_header((struct swsusp_info *)buffer);
2073 if (error)
2074 return error;
2075 handle->buffer = buffer;
2076 memory_bm_position_reset(&orig_bm);
2077 memory_bm_position_reset(&copy_bm);
2078 } else if (handle->cur <= nr_meta_pages) {
2079 clear_page(buffer);
2080 pack_pfns(buffer, &orig_bm);
2081 } else {
2082 struct page *page;
2083
2084 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2085 if (PageHighMem(page)) {
2086 /*
2087 * Highmem pages are copied to the buffer,
2088 * because we can't return with a kmapped
2089 * highmem page (we may not be called again).
2090 */
2091 void *kaddr;
2092
2093 kaddr = kmap_atomic(page);
2094 copy_page(buffer, kaddr);
2095 kunmap_atomic(kaddr);
2096 handle->buffer = buffer;
2097 } else {
2098 handle->buffer = page_address(page);
2099 }
2100 }
2101 handle->cur++;
2102 return PAGE_SIZE;
2103 }
2104
2105 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2106 struct memory_bitmap *src)
2107 {
2108 unsigned long pfn;
2109
2110 memory_bm_position_reset(src);
2111 pfn = memory_bm_next_pfn(src);
2112 while (pfn != BM_END_OF_MAP) {
2113 memory_bm_set_bit(dst, pfn);
2114 pfn = memory_bm_next_pfn(src);
2115 }
2116 }
2117
2118 /**
2119 * mark_unsafe_pages - Mark pages that were used before hibernation.
2120 *
2121 * Mark the pages that cannot be used for storing the image during restoration,
2122 * because they conflict with the pages that had been used before hibernation.
2123 */
2124 static void mark_unsafe_pages(struct memory_bitmap *bm)
2125 {
2126 unsigned long pfn;
2127
2128 /* Clear the "free"/"unsafe" bit for all PFNs */
2129 memory_bm_position_reset(free_pages_map);
2130 pfn = memory_bm_next_pfn(free_pages_map);
2131 while (pfn != BM_END_OF_MAP) {
2132 memory_bm_clear_current(free_pages_map);
2133 pfn = memory_bm_next_pfn(free_pages_map);
2134 }
2135
2136 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2137 duplicate_memory_bitmap(free_pages_map, bm);
2138
2139 allocated_unsafe_pages = 0;
2140 }
2141
2142 static int check_header(struct swsusp_info *info)
2143 {
2144 char *reason;
2145
2146 reason = check_image_kernel(info);
2147 if (!reason && info->num_physpages != get_num_physpages())
2148 reason = "memory size";
2149 if (reason) {
2150 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2151 return -EPERM;
2152 }
2153 return 0;
2154 }
2155
2156 /**
2157 * load header - Check the image header and copy the data from it.
2158 */
2159 static int load_header(struct swsusp_info *info)
2160 {
2161 int error;
2162
2163 restore_pblist = NULL;
2164 error = check_header(info);
2165 if (!error) {
2166 nr_copy_pages = info->image_pages;
2167 nr_meta_pages = info->pages - info->image_pages - 1;
2168 }
2169 return error;
2170 }
2171
2172 /**
2173 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2174 * @bm: Memory bitmap.
2175 * @buf: Area of memory containing the PFNs.
2176 *
2177 * For each element of the array pointed to by @buf (1 page at a time), set the
2178 * corresponding bit in @bm.
2179 */
2180 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2181 {
2182 int j;
2183
2184 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2185 if (unlikely(buf[j] == BM_END_OF_MAP))
2186 break;
2187
2188 /* Extract and buffer page key for data page (s390 only). */
2189 page_key_memorize(buf + j);
2190
2191 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2192 memory_bm_set_bit(bm, buf[j]);
2193 else
2194 return -EFAULT;
2195 }
2196
2197 return 0;
2198 }
2199
2200 #ifdef CONFIG_HIGHMEM
2201 /*
2202 * struct highmem_pbe is used for creating the list of highmem pages that
2203 * should be restored atomically during the resume from disk, because the page
2204 * frames they have occupied before the suspend are in use.
2205 */
2206 struct highmem_pbe {
2207 struct page *copy_page; /* data is here now */
2208 struct page *orig_page; /* data was here before the suspend */
2209 struct highmem_pbe *next;
2210 };
2211
2212 /*
2213 * List of highmem PBEs needed for restoring the highmem pages that were
2214 * allocated before the suspend and included in the suspend image, but have
2215 * also been allocated by the "resume" kernel, so their contents cannot be
2216 * written directly to their "original" page frames.
2217 */
2218 static struct highmem_pbe *highmem_pblist;
2219
2220 /**
2221 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2222 * @bm: Memory bitmap.
2223 *
2224 * The bits in @bm that correspond to image pages are assumed to be set.
2225 */
2226 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2227 {
2228 unsigned long pfn;
2229 unsigned int cnt = 0;
2230
2231 memory_bm_position_reset(bm);
2232 pfn = memory_bm_next_pfn(bm);
2233 while (pfn != BM_END_OF_MAP) {
2234 if (PageHighMem(pfn_to_page(pfn)))
2235 cnt++;
2236
2237 pfn = memory_bm_next_pfn(bm);
2238 }
2239 return cnt;
2240 }
2241
2242 static unsigned int safe_highmem_pages;
2243
2244 static struct memory_bitmap *safe_highmem_bm;
2245
2246 /**
2247 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2248 * @bm: Pointer to an uninitialized memory bitmap structure.
2249 * @nr_highmem_p: Pointer to the number of highmem image pages.
2250 *
2251 * Try to allocate as many highmem pages as there are highmem image pages
2252 * (@nr_highmem_p points to the variable containing the number of highmem image
2253 * pages). The pages that are "safe" (ie. will not be overwritten when the
2254 * hibernation image is restored entirely) have the corresponding bits set in
2255 * @bm (it must be unitialized).
2256 *
2257 * NOTE: This function should not be called if there are no highmem image pages.
2258 */
2259 static int prepare_highmem_image(struct memory_bitmap *bm,
2260 unsigned int *nr_highmem_p)
2261 {
2262 unsigned int to_alloc;
2263
2264 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2265 return -ENOMEM;
2266
2267 if (get_highmem_buffer(PG_SAFE))
2268 return -ENOMEM;
2269
2270 to_alloc = count_free_highmem_pages();
2271 if (to_alloc > *nr_highmem_p)
2272 to_alloc = *nr_highmem_p;
2273 else
2274 *nr_highmem_p = to_alloc;
2275
2276 safe_highmem_pages = 0;
2277 while (to_alloc-- > 0) {
2278 struct page *page;
2279
2280 page = alloc_page(__GFP_HIGHMEM);
2281 if (!swsusp_page_is_free(page)) {
2282 /* The page is "safe", set its bit the bitmap */
2283 memory_bm_set_bit(bm, page_to_pfn(page));
2284 safe_highmem_pages++;
2285 }
2286 /* Mark the page as allocated */
2287 swsusp_set_page_forbidden(page);
2288 swsusp_set_page_free(page);
2289 }
2290 memory_bm_position_reset(bm);
2291 safe_highmem_bm = bm;
2292 return 0;
2293 }
2294
2295 static struct page *last_highmem_page;
2296
2297 /**
2298 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2299 *
2300 * For a given highmem image page get a buffer that suspend_write_next() should
2301 * return to its caller to write to.
2302 *
2303 * If the page is to be saved to its "original" page frame or a copy of
2304 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2305 * the copy of the page is to be made in normal memory, so the address of
2306 * the copy is returned.
2307 *
2308 * If @buffer is returned, the caller of suspend_write_next() will write
2309 * the page's contents to @buffer, so they will have to be copied to the
2310 * right location on the next call to suspend_write_next() and it is done
2311 * with the help of copy_last_highmem_page(). For this purpose, if
2312 * @buffer is returned, @last_highmem_page is set to the page to which
2313 * the data will have to be copied from @buffer.
2314 */
2315 static void *get_highmem_page_buffer(struct page *page,
2316 struct chain_allocator *ca)
2317 {
2318 struct highmem_pbe *pbe;
2319 void *kaddr;
2320
2321 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2322 /*
2323 * We have allocated the "original" page frame and we can
2324 * use it directly to store the loaded page.
2325 */
2326 last_highmem_page = page;
2327 return buffer;
2328 }
2329 /*
2330 * The "original" page frame has not been allocated and we have to
2331 * use a "safe" page frame to store the loaded page.
2332 */
2333 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2334 if (!pbe) {
2335 swsusp_free();
2336 return ERR_PTR(-ENOMEM);
2337 }
2338 pbe->orig_page = page;
2339 if (safe_highmem_pages > 0) {
2340 struct page *tmp;
2341
2342 /* Copy of the page will be stored in high memory */
2343 kaddr = buffer;
2344 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2345 safe_highmem_pages--;
2346 last_highmem_page = tmp;
2347 pbe->copy_page = tmp;
2348 } else {
2349 /* Copy of the page will be stored in normal memory */
2350 kaddr = safe_pages_list;
2351 safe_pages_list = safe_pages_list->next;
2352 pbe->copy_page = virt_to_page(kaddr);
2353 }
2354 pbe->next = highmem_pblist;
2355 highmem_pblist = pbe;
2356 return kaddr;
2357 }
2358
2359 /**
2360 * copy_last_highmem_page - Copy most the most recent highmem image page.
2361 *
2362 * Copy the contents of a highmem image from @buffer, where the caller of
2363 * snapshot_write_next() has stored them, to the right location represented by
2364 * @last_highmem_page .
2365 */
2366 static void copy_last_highmem_page(void)
2367 {
2368 if (last_highmem_page) {
2369 void *dst;
2370
2371 dst = kmap_atomic(last_highmem_page);
2372 copy_page(dst, buffer);
2373 kunmap_atomic(dst);
2374 last_highmem_page = NULL;
2375 }
2376 }
2377
2378 static inline int last_highmem_page_copied(void)
2379 {
2380 return !last_highmem_page;
2381 }
2382
2383 static inline void free_highmem_data(void)
2384 {
2385 if (safe_highmem_bm)
2386 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2387
2388 if (buffer)
2389 free_image_page(buffer, PG_UNSAFE_CLEAR);
2390 }
2391 #else
2392 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2393
2394 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2395 unsigned int *nr_highmem_p) { return 0; }
2396
2397 static inline void *get_highmem_page_buffer(struct page *page,
2398 struct chain_allocator *ca)
2399 {
2400 return ERR_PTR(-EINVAL);
2401 }
2402
2403 static inline void copy_last_highmem_page(void) {}
2404 static inline int last_highmem_page_copied(void) { return 1; }
2405 static inline void free_highmem_data(void) {}
2406 #endif /* CONFIG_HIGHMEM */
2407
2408 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2409
2410 /**
2411 * prepare_image - Make room for loading hibernation image.
2412 * @new_bm: Unitialized memory bitmap structure.
2413 * @bm: Memory bitmap with unsafe pages marked.
2414 *
2415 * Use @bm to mark the pages that will be overwritten in the process of
2416 * restoring the system memory state from the suspend image ("unsafe" pages)
2417 * and allocate memory for the image.
2418 *
2419 * The idea is to allocate a new memory bitmap first and then allocate
2420 * as many pages as needed for image data, but without specifying what those
2421 * pages will be used for just yet. Instead, we mark them all as allocated and
2422 * create a lists of "safe" pages to be used later. On systems with high
2423 * memory a list of "safe" highmem pages is created too.
2424 */
2425 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2426 {
2427 unsigned int nr_pages, nr_highmem;
2428 struct linked_page *lp;
2429 int error;
2430
2431 /* If there is no highmem, the buffer will not be necessary */
2432 free_image_page(buffer, PG_UNSAFE_CLEAR);
2433 buffer = NULL;
2434
2435 nr_highmem = count_highmem_image_pages(bm);
2436 mark_unsafe_pages(bm);
2437
2438 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2439 if (error)
2440 goto Free;
2441
2442 duplicate_memory_bitmap(new_bm, bm);
2443 memory_bm_free(bm, PG_UNSAFE_KEEP);
2444 if (nr_highmem > 0) {
2445 error = prepare_highmem_image(bm, &nr_highmem);
2446 if (error)
2447 goto Free;
2448 }
2449 /*
2450 * Reserve some safe pages for potential later use.
2451 *
2452 * NOTE: This way we make sure there will be enough safe pages for the
2453 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2454 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2455 *
2456 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2457 */
2458 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2459 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2460 while (nr_pages > 0) {
2461 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2462 if (!lp) {
2463 error = -ENOMEM;
2464 goto Free;
2465 }
2466 lp->next = safe_pages_list;
2467 safe_pages_list = lp;
2468 nr_pages--;
2469 }
2470 /* Preallocate memory for the image */
2471 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2472 while (nr_pages > 0) {
2473 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2474 if (!lp) {
2475 error = -ENOMEM;
2476 goto Free;
2477 }
2478 if (!swsusp_page_is_free(virt_to_page(lp))) {
2479 /* The page is "safe", add it to the list */
2480 lp->next = safe_pages_list;
2481 safe_pages_list = lp;
2482 }
2483 /* Mark the page as allocated */
2484 swsusp_set_page_forbidden(virt_to_page(lp));
2485 swsusp_set_page_free(virt_to_page(lp));
2486 nr_pages--;
2487 }
2488 return 0;
2489
2490 Free:
2491 swsusp_free();
2492 return error;
2493 }
2494
2495 /**
2496 * get_buffer - Get the address to store the next image data page.
2497 *
2498 * Get the address that snapshot_write_next() should return to its caller to
2499 * write to.
2500 */
2501 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2502 {
2503 struct pbe *pbe;
2504 struct page *page;
2505 unsigned long pfn = memory_bm_next_pfn(bm);
2506
2507 if (pfn == BM_END_OF_MAP)
2508 return ERR_PTR(-EFAULT);
2509
2510 page = pfn_to_page(pfn);
2511 if (PageHighMem(page))
2512 return get_highmem_page_buffer(page, ca);
2513
2514 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2515 /*
2516 * We have allocated the "original" page frame and we can
2517 * use it directly to store the loaded page.
2518 */
2519 return page_address(page);
2520
2521 /*
2522 * The "original" page frame has not been allocated and we have to
2523 * use a "safe" page frame to store the loaded page.
2524 */
2525 pbe = chain_alloc(ca, sizeof(struct pbe));
2526 if (!pbe) {
2527 swsusp_free();
2528 return ERR_PTR(-ENOMEM);
2529 }
2530 pbe->orig_address = page_address(page);
2531 pbe->address = safe_pages_list;
2532 safe_pages_list = safe_pages_list->next;
2533 pbe->next = restore_pblist;
2534 restore_pblist = pbe;
2535 return pbe->address;
2536 }
2537
2538 /**
2539 * snapshot_write_next - Get the address to store the next image page.
2540 * @handle: Snapshot handle structure to guide the writing.
2541 *
2542 * On the first call, @handle should point to a zeroed snapshot_handle
2543 * structure. The structure gets populated then and a pointer to it should be
2544 * passed to this function every next time.
2545 *
2546 * On success, the function returns a positive number. Then, the caller
2547 * is allowed to write up to the returned number of bytes to the memory
2548 * location computed by the data_of() macro.
2549 *
2550 * The function returns 0 to indicate the "end of file" condition. Negative
2551 * numbers are returned on errors, in which cases the structure pointed to by
2552 * @handle is not updated and should not be used any more.
2553 */
2554 int snapshot_write_next(struct snapshot_handle *handle)
2555 {
2556 static struct chain_allocator ca;
2557 int error = 0;
2558
2559 /* Check if we have already loaded the entire image */
2560 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2561 return 0;
2562
2563 handle->sync_read = 1;
2564
2565 if (!handle->cur) {
2566 if (!buffer)
2567 /* This makes the buffer be freed by swsusp_free() */
2568 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2569
2570 if (!buffer)
2571 return -ENOMEM;
2572
2573 handle->buffer = buffer;
2574 } else if (handle->cur == 1) {
2575 error = load_header(buffer);
2576 if (error)
2577 return error;
2578
2579 safe_pages_list = NULL;
2580
2581 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2582 if (error)
2583 return error;
2584
2585 /* Allocate buffer for page keys. */
2586 error = page_key_alloc(nr_copy_pages);
2587 if (error)
2588 return error;
2589
2590 hibernate_restore_protection_begin();
2591 } else if (handle->cur <= nr_meta_pages + 1) {
2592 error = unpack_orig_pfns(buffer, &copy_bm);
2593 if (error)
2594 return error;
2595
2596 if (handle->cur == nr_meta_pages + 1) {
2597 error = prepare_image(&orig_bm, &copy_bm);
2598 if (error)
2599 return error;
2600
2601 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2602 memory_bm_position_reset(&orig_bm);
2603 restore_pblist = NULL;
2604 handle->buffer = get_buffer(&orig_bm, &ca);
2605 handle->sync_read = 0;
2606 if (IS_ERR(handle->buffer))
2607 return PTR_ERR(handle->buffer);
2608 }
2609 } else {
2610 copy_last_highmem_page();
2611 /* Restore page key for data page (s390 only). */
2612 page_key_write(handle->buffer);
2613 hibernate_restore_protect_page(handle->buffer);
2614 handle->buffer = get_buffer(&orig_bm, &ca);
2615 if (IS_ERR(handle->buffer))
2616 return PTR_ERR(handle->buffer);
2617 if (handle->buffer != buffer)
2618 handle->sync_read = 0;
2619 }
2620 handle->cur++;
2621 return PAGE_SIZE;
2622 }
2623
2624 /**
2625 * snapshot_write_finalize - Complete the loading of a hibernation image.
2626 *
2627 * Must be called after the last call to snapshot_write_next() in case the last
2628 * page in the image happens to be a highmem page and its contents should be
2629 * stored in highmem. Additionally, it recycles bitmap memory that's not
2630 * necessary any more.
2631 */
2632 void snapshot_write_finalize(struct snapshot_handle *handle)
2633 {
2634 copy_last_highmem_page();
2635 /* Restore page key for data page (s390 only). */
2636 page_key_write(handle->buffer);
2637 page_key_free();
2638 hibernate_restore_protect_page(handle->buffer);
2639 /* Do that only if we have loaded the image entirely */
2640 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2641 memory_bm_recycle(&orig_bm);
2642 free_highmem_data();
2643 }
2644 }
2645
2646 int snapshot_image_loaded(struct snapshot_handle *handle)
2647 {
2648 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2649 handle->cur <= nr_meta_pages + nr_copy_pages);
2650 }
2651
2652 #ifdef CONFIG_HIGHMEM
2653 /* Assumes that @buf is ready and points to a "safe" page */
2654 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2655 void *buf)
2656 {
2657 void *kaddr1, *kaddr2;
2658
2659 kaddr1 = kmap_atomic(p1);
2660 kaddr2 = kmap_atomic(p2);
2661 copy_page(buf, kaddr1);
2662 copy_page(kaddr1, kaddr2);
2663 copy_page(kaddr2, buf);
2664 kunmap_atomic(kaddr2);
2665 kunmap_atomic(kaddr1);
2666 }
2667
2668 /**
2669 * restore_highmem - Put highmem image pages into their original locations.
2670 *
2671 * For each highmem page that was in use before hibernation and is included in
2672 * the image, and also has been allocated by the "restore" kernel, swap its
2673 * current contents with the previous (ie. "before hibernation") ones.
2674 *
2675 * If the restore eventually fails, we can call this function once again and
2676 * restore the highmem state as seen by the restore kernel.
2677 */
2678 int restore_highmem(void)
2679 {
2680 struct highmem_pbe *pbe = highmem_pblist;
2681 void *buf;
2682
2683 if (!pbe)
2684 return 0;
2685
2686 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2687 if (!buf)
2688 return -ENOMEM;
2689
2690 while (pbe) {
2691 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2692 pbe = pbe->next;
2693 }
2694 free_image_page(buf, PG_UNSAFE_CLEAR);
2695 return 0;
2696 }
2697 #endif /* CONFIG_HIGHMEM */