]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/power/snapshot.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / kernel / power / snapshot.c
1 /*
2 * linux/kernel/power/snapshot.c
3 *
4 * This file provides system snapshot/restore functionality for swsusp.
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8 *
9 * This file is released under the GPLv2.
10 *
11 */
12
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33
34 #include <linux/uaccess.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgtable.h>
37 #include <asm/tlbflush.h>
38 #include <asm/io.h>
39
40 #include "power.h"
41
42 #ifdef CONFIG_STRICT_KERNEL_RWX
43 static bool hibernate_restore_protection;
44 static bool hibernate_restore_protection_active;
45
46 void enable_restore_image_protection(void)
47 {
48 hibernate_restore_protection = true;
49 }
50
51 static inline void hibernate_restore_protection_begin(void)
52 {
53 hibernate_restore_protection_active = hibernate_restore_protection;
54 }
55
56 static inline void hibernate_restore_protection_end(void)
57 {
58 hibernate_restore_protection_active = false;
59 }
60
61 static inline void hibernate_restore_protect_page(void *page_address)
62 {
63 if (hibernate_restore_protection_active)
64 set_memory_ro((unsigned long)page_address, 1);
65 }
66
67 static inline void hibernate_restore_unprotect_page(void *page_address)
68 {
69 if (hibernate_restore_protection_active)
70 set_memory_rw((unsigned long)page_address, 1);
71 }
72 #else
73 static inline void hibernate_restore_protection_begin(void) {}
74 static inline void hibernate_restore_protection_end(void) {}
75 static inline void hibernate_restore_protect_page(void *page_address) {}
76 static inline void hibernate_restore_unprotect_page(void *page_address) {}
77 #endif /* CONFIG_STRICT_KERNEL_RWX */
78
79 static int swsusp_page_is_free(struct page *);
80 static void swsusp_set_page_forbidden(struct page *);
81 static void swsusp_unset_page_forbidden(struct page *);
82
83 /*
84 * Number of bytes to reserve for memory allocations made by device drivers
85 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
86 * cause image creation to fail (tunable via /sys/power/reserved_size).
87 */
88 unsigned long reserved_size;
89
90 void __init hibernate_reserved_size_init(void)
91 {
92 reserved_size = SPARE_PAGES * PAGE_SIZE;
93 }
94
95 /*
96 * Preferred image size in bytes (tunable via /sys/power/image_size).
97 * When it is set to N, swsusp will do its best to ensure the image
98 * size will not exceed N bytes, but if that is impossible, it will
99 * try to create the smallest image possible.
100 */
101 unsigned long image_size;
102
103 void __init hibernate_image_size_init(void)
104 {
105 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
106 }
107
108 /*
109 * List of PBEs needed for restoring the pages that were allocated before
110 * the suspend and included in the suspend image, but have also been
111 * allocated by the "resume" kernel, so their contents cannot be written
112 * directly to their "original" page frames.
113 */
114 struct pbe *restore_pblist;
115
116 /* struct linked_page is used to build chains of pages */
117
118 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
119
120 struct linked_page {
121 struct linked_page *next;
122 char data[LINKED_PAGE_DATA_SIZE];
123 } __packed;
124
125 /*
126 * List of "safe" pages (ie. pages that were not used by the image kernel
127 * before hibernation) that may be used as temporary storage for image kernel
128 * memory contents.
129 */
130 static struct linked_page *safe_pages_list;
131
132 /* Pointer to an auxiliary buffer (1 page) */
133 static void *buffer;
134
135 #define PG_ANY 0
136 #define PG_SAFE 1
137 #define PG_UNSAFE_CLEAR 1
138 #define PG_UNSAFE_KEEP 0
139
140 static unsigned int allocated_unsafe_pages;
141
142 /**
143 * get_image_page - Allocate a page for a hibernation image.
144 * @gfp_mask: GFP mask for the allocation.
145 * @safe_needed: Get pages that were not used before hibernation (restore only)
146 *
147 * During image restoration, for storing the PBE list and the image data, we can
148 * only use memory pages that do not conflict with the pages used before
149 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
150 * using allocated_unsafe_pages.
151 *
152 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
153 * swsusp_free() can release it.
154 */
155 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
156 {
157 void *res;
158
159 res = (void *)get_zeroed_page(gfp_mask);
160 if (safe_needed)
161 while (res && swsusp_page_is_free(virt_to_page(res))) {
162 /* The page is unsafe, mark it for swsusp_free() */
163 swsusp_set_page_forbidden(virt_to_page(res));
164 allocated_unsafe_pages++;
165 res = (void *)get_zeroed_page(gfp_mask);
166 }
167 if (res) {
168 swsusp_set_page_forbidden(virt_to_page(res));
169 swsusp_set_page_free(virt_to_page(res));
170 }
171 return res;
172 }
173
174 static void *__get_safe_page(gfp_t gfp_mask)
175 {
176 if (safe_pages_list) {
177 void *ret = safe_pages_list;
178
179 safe_pages_list = safe_pages_list->next;
180 memset(ret, 0, PAGE_SIZE);
181 return ret;
182 }
183 return get_image_page(gfp_mask, PG_SAFE);
184 }
185
186 unsigned long get_safe_page(gfp_t gfp_mask)
187 {
188 return (unsigned long)__get_safe_page(gfp_mask);
189 }
190
191 static struct page *alloc_image_page(gfp_t gfp_mask)
192 {
193 struct page *page;
194
195 page = alloc_page(gfp_mask);
196 if (page) {
197 swsusp_set_page_forbidden(page);
198 swsusp_set_page_free(page);
199 }
200 return page;
201 }
202
203 static void recycle_safe_page(void *page_address)
204 {
205 struct linked_page *lp = page_address;
206
207 lp->next = safe_pages_list;
208 safe_pages_list = lp;
209 }
210
211 /**
212 * free_image_page - Free a page allocated for hibernation image.
213 * @addr: Address of the page to free.
214 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
215 *
216 * The page to free should have been allocated by get_image_page() (page flags
217 * set by it are affected).
218 */
219 static inline void free_image_page(void *addr, int clear_nosave_free)
220 {
221 struct page *page;
222
223 BUG_ON(!virt_addr_valid(addr));
224
225 page = virt_to_page(addr);
226
227 swsusp_unset_page_forbidden(page);
228 if (clear_nosave_free)
229 swsusp_unset_page_free(page);
230
231 __free_page(page);
232 }
233
234 static inline void free_list_of_pages(struct linked_page *list,
235 int clear_page_nosave)
236 {
237 while (list) {
238 struct linked_page *lp = list->next;
239
240 free_image_page(list, clear_page_nosave);
241 list = lp;
242 }
243 }
244
245 /*
246 * struct chain_allocator is used for allocating small objects out of
247 * a linked list of pages called 'the chain'.
248 *
249 * The chain grows each time when there is no room for a new object in
250 * the current page. The allocated objects cannot be freed individually.
251 * It is only possible to free them all at once, by freeing the entire
252 * chain.
253 *
254 * NOTE: The chain allocator may be inefficient if the allocated objects
255 * are not much smaller than PAGE_SIZE.
256 */
257 struct chain_allocator {
258 struct linked_page *chain; /* the chain */
259 unsigned int used_space; /* total size of objects allocated out
260 of the current page */
261 gfp_t gfp_mask; /* mask for allocating pages */
262 int safe_needed; /* if set, only "safe" pages are allocated */
263 };
264
265 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
266 int safe_needed)
267 {
268 ca->chain = NULL;
269 ca->used_space = LINKED_PAGE_DATA_SIZE;
270 ca->gfp_mask = gfp_mask;
271 ca->safe_needed = safe_needed;
272 }
273
274 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
275 {
276 void *ret;
277
278 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
279 struct linked_page *lp;
280
281 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
282 get_image_page(ca->gfp_mask, PG_ANY);
283 if (!lp)
284 return NULL;
285
286 lp->next = ca->chain;
287 ca->chain = lp;
288 ca->used_space = 0;
289 }
290 ret = ca->chain->data + ca->used_space;
291 ca->used_space += size;
292 return ret;
293 }
294
295 /**
296 * Data types related to memory bitmaps.
297 *
298 * Memory bitmap is a structure consiting of many linked lists of
299 * objects. The main list's elements are of type struct zone_bitmap
300 * and each of them corresonds to one zone. For each zone bitmap
301 * object there is a list of objects of type struct bm_block that
302 * represent each blocks of bitmap in which information is stored.
303 *
304 * struct memory_bitmap contains a pointer to the main list of zone
305 * bitmap objects, a struct bm_position used for browsing the bitmap,
306 * and a pointer to the list of pages used for allocating all of the
307 * zone bitmap objects and bitmap block objects.
308 *
309 * NOTE: It has to be possible to lay out the bitmap in memory
310 * using only allocations of order 0. Additionally, the bitmap is
311 * designed to work with arbitrary number of zones (this is over the
312 * top for now, but let's avoid making unnecessary assumptions ;-).
313 *
314 * struct zone_bitmap contains a pointer to a list of bitmap block
315 * objects and a pointer to the bitmap block object that has been
316 * most recently used for setting bits. Additionally, it contains the
317 * PFNs that correspond to the start and end of the represented zone.
318 *
319 * struct bm_block contains a pointer to the memory page in which
320 * information is stored (in the form of a block of bitmap)
321 * It also contains the pfns that correspond to the start and end of
322 * the represented memory area.
323 *
324 * The memory bitmap is organized as a radix tree to guarantee fast random
325 * access to the bits. There is one radix tree for each zone (as returned
326 * from create_mem_extents).
327 *
328 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
329 * two linked lists for the nodes of the tree, one for the inner nodes and
330 * one for the leave nodes. The linked leave nodes are used for fast linear
331 * access of the memory bitmap.
332 *
333 * The struct rtree_node represents one node of the radix tree.
334 */
335
336 #define BM_END_OF_MAP (~0UL)
337
338 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
339 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
340 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
341
342 /*
343 * struct rtree_node is a wrapper struct to link the nodes
344 * of the rtree together for easy linear iteration over
345 * bits and easy freeing
346 */
347 struct rtree_node {
348 struct list_head list;
349 unsigned long *data;
350 };
351
352 /*
353 * struct mem_zone_bm_rtree represents a bitmap used for one
354 * populated memory zone.
355 */
356 struct mem_zone_bm_rtree {
357 struct list_head list; /* Link Zones together */
358 struct list_head nodes; /* Radix Tree inner nodes */
359 struct list_head leaves; /* Radix Tree leaves */
360 unsigned long start_pfn; /* Zone start page frame */
361 unsigned long end_pfn; /* Zone end page frame + 1 */
362 struct rtree_node *rtree; /* Radix Tree Root */
363 int levels; /* Number of Radix Tree Levels */
364 unsigned int blocks; /* Number of Bitmap Blocks */
365 };
366
367 /* strcut bm_position is used for browsing memory bitmaps */
368
369 struct bm_position {
370 struct mem_zone_bm_rtree *zone;
371 struct rtree_node *node;
372 unsigned long node_pfn;
373 int node_bit;
374 };
375
376 struct memory_bitmap {
377 struct list_head zones;
378 struct linked_page *p_list; /* list of pages used to store zone
379 bitmap objects and bitmap block
380 objects */
381 struct bm_position cur; /* most recently used bit position */
382 };
383
384 /* Functions that operate on memory bitmaps */
385
386 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
387 #if BITS_PER_LONG == 32
388 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
389 #else
390 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
391 #endif
392 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
393
394 /**
395 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
396 *
397 * This function is used to allocate inner nodes as well as the
398 * leave nodes of the radix tree. It also adds the node to the
399 * corresponding linked list passed in by the *list parameter.
400 */
401 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
402 struct chain_allocator *ca,
403 struct list_head *list)
404 {
405 struct rtree_node *node;
406
407 node = chain_alloc(ca, sizeof(struct rtree_node));
408 if (!node)
409 return NULL;
410
411 node->data = get_image_page(gfp_mask, safe_needed);
412 if (!node->data)
413 return NULL;
414
415 list_add_tail(&node->list, list);
416
417 return node;
418 }
419
420 /**
421 * add_rtree_block - Add a new leave node to the radix tree.
422 *
423 * The leave nodes need to be allocated in order to keep the leaves
424 * linked list in order. This is guaranteed by the zone->blocks
425 * counter.
426 */
427 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
428 int safe_needed, struct chain_allocator *ca)
429 {
430 struct rtree_node *node, *block, **dst;
431 unsigned int levels_needed, block_nr;
432 int i;
433
434 block_nr = zone->blocks;
435 levels_needed = 0;
436
437 /* How many levels do we need for this block nr? */
438 while (block_nr) {
439 levels_needed += 1;
440 block_nr >>= BM_RTREE_LEVEL_SHIFT;
441 }
442
443 /* Make sure the rtree has enough levels */
444 for (i = zone->levels; i < levels_needed; i++) {
445 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
446 &zone->nodes);
447 if (!node)
448 return -ENOMEM;
449
450 node->data[0] = (unsigned long)zone->rtree;
451 zone->rtree = node;
452 zone->levels += 1;
453 }
454
455 /* Allocate new block */
456 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
457 if (!block)
458 return -ENOMEM;
459
460 /* Now walk the rtree to insert the block */
461 node = zone->rtree;
462 dst = &zone->rtree;
463 block_nr = zone->blocks;
464 for (i = zone->levels; i > 0; i--) {
465 int index;
466
467 if (!node) {
468 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
469 &zone->nodes);
470 if (!node)
471 return -ENOMEM;
472 *dst = node;
473 }
474
475 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
476 index &= BM_RTREE_LEVEL_MASK;
477 dst = (struct rtree_node **)&((*dst)->data[index]);
478 node = *dst;
479 }
480
481 zone->blocks += 1;
482 *dst = block;
483
484 return 0;
485 }
486
487 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
488 int clear_nosave_free);
489
490 /**
491 * create_zone_bm_rtree - Create a radix tree for one zone.
492 *
493 * Allocated the mem_zone_bm_rtree structure and initializes it.
494 * This function also allocated and builds the radix tree for the
495 * zone.
496 */
497 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
498 int safe_needed,
499 struct chain_allocator *ca,
500 unsigned long start,
501 unsigned long end)
502 {
503 struct mem_zone_bm_rtree *zone;
504 unsigned int i, nr_blocks;
505 unsigned long pages;
506
507 pages = end - start;
508 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
509 if (!zone)
510 return NULL;
511
512 INIT_LIST_HEAD(&zone->nodes);
513 INIT_LIST_HEAD(&zone->leaves);
514 zone->start_pfn = start;
515 zone->end_pfn = end;
516 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
517
518 for (i = 0; i < nr_blocks; i++) {
519 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
520 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
521 return NULL;
522 }
523 }
524
525 return zone;
526 }
527
528 /**
529 * free_zone_bm_rtree - Free the memory of the radix tree.
530 *
531 * Free all node pages of the radix tree. The mem_zone_bm_rtree
532 * structure itself is not freed here nor are the rtree_node
533 * structs.
534 */
535 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
536 int clear_nosave_free)
537 {
538 struct rtree_node *node;
539
540 list_for_each_entry(node, &zone->nodes, list)
541 free_image_page(node->data, clear_nosave_free);
542
543 list_for_each_entry(node, &zone->leaves, list)
544 free_image_page(node->data, clear_nosave_free);
545 }
546
547 static void memory_bm_position_reset(struct memory_bitmap *bm)
548 {
549 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
550 list);
551 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
552 struct rtree_node, list);
553 bm->cur.node_pfn = 0;
554 bm->cur.node_bit = 0;
555 }
556
557 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
558
559 struct mem_extent {
560 struct list_head hook;
561 unsigned long start;
562 unsigned long end;
563 };
564
565 /**
566 * free_mem_extents - Free a list of memory extents.
567 * @list: List of extents to free.
568 */
569 static void free_mem_extents(struct list_head *list)
570 {
571 struct mem_extent *ext, *aux;
572
573 list_for_each_entry_safe(ext, aux, list, hook) {
574 list_del(&ext->hook);
575 kfree(ext);
576 }
577 }
578
579 /**
580 * create_mem_extents - Create a list of memory extents.
581 * @list: List to put the extents into.
582 * @gfp_mask: Mask to use for memory allocations.
583 *
584 * The extents represent contiguous ranges of PFNs.
585 */
586 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
587 {
588 struct zone *zone;
589
590 INIT_LIST_HEAD(list);
591
592 for_each_populated_zone(zone) {
593 unsigned long zone_start, zone_end;
594 struct mem_extent *ext, *cur, *aux;
595
596 zone_start = zone->zone_start_pfn;
597 zone_end = zone_end_pfn(zone);
598
599 list_for_each_entry(ext, list, hook)
600 if (zone_start <= ext->end)
601 break;
602
603 if (&ext->hook == list || zone_end < ext->start) {
604 /* New extent is necessary */
605 struct mem_extent *new_ext;
606
607 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
608 if (!new_ext) {
609 free_mem_extents(list);
610 return -ENOMEM;
611 }
612 new_ext->start = zone_start;
613 new_ext->end = zone_end;
614 list_add_tail(&new_ext->hook, &ext->hook);
615 continue;
616 }
617
618 /* Merge this zone's range of PFNs with the existing one */
619 if (zone_start < ext->start)
620 ext->start = zone_start;
621 if (zone_end > ext->end)
622 ext->end = zone_end;
623
624 /* More merging may be possible */
625 cur = ext;
626 list_for_each_entry_safe_continue(cur, aux, list, hook) {
627 if (zone_end < cur->start)
628 break;
629 if (zone_end < cur->end)
630 ext->end = cur->end;
631 list_del(&cur->hook);
632 kfree(cur);
633 }
634 }
635
636 return 0;
637 }
638
639 /**
640 * memory_bm_create - Allocate memory for a memory bitmap.
641 */
642 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
643 int safe_needed)
644 {
645 struct chain_allocator ca;
646 struct list_head mem_extents;
647 struct mem_extent *ext;
648 int error;
649
650 chain_init(&ca, gfp_mask, safe_needed);
651 INIT_LIST_HEAD(&bm->zones);
652
653 error = create_mem_extents(&mem_extents, gfp_mask);
654 if (error)
655 return error;
656
657 list_for_each_entry(ext, &mem_extents, hook) {
658 struct mem_zone_bm_rtree *zone;
659
660 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
661 ext->start, ext->end);
662 if (!zone) {
663 error = -ENOMEM;
664 goto Error;
665 }
666 list_add_tail(&zone->list, &bm->zones);
667 }
668
669 bm->p_list = ca.chain;
670 memory_bm_position_reset(bm);
671 Exit:
672 free_mem_extents(&mem_extents);
673 return error;
674
675 Error:
676 bm->p_list = ca.chain;
677 memory_bm_free(bm, PG_UNSAFE_CLEAR);
678 goto Exit;
679 }
680
681 /**
682 * memory_bm_free - Free memory occupied by the memory bitmap.
683 * @bm: Memory bitmap.
684 */
685 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
686 {
687 struct mem_zone_bm_rtree *zone;
688
689 list_for_each_entry(zone, &bm->zones, list)
690 free_zone_bm_rtree(zone, clear_nosave_free);
691
692 free_list_of_pages(bm->p_list, clear_nosave_free);
693
694 INIT_LIST_HEAD(&bm->zones);
695 }
696
697 /**
698 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
699 *
700 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
701 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
702 *
703 * Walk the radix tree to find the page containing the bit that represents @pfn
704 * and return the position of the bit in @addr and @bit_nr.
705 */
706 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
707 void **addr, unsigned int *bit_nr)
708 {
709 struct mem_zone_bm_rtree *curr, *zone;
710 struct rtree_node *node;
711 int i, block_nr;
712
713 zone = bm->cur.zone;
714
715 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
716 goto zone_found;
717
718 zone = NULL;
719
720 /* Find the right zone */
721 list_for_each_entry(curr, &bm->zones, list) {
722 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
723 zone = curr;
724 break;
725 }
726 }
727
728 if (!zone)
729 return -EFAULT;
730
731 zone_found:
732 /*
733 * We have found the zone. Now walk the radix tree to find the leaf node
734 * for our PFN.
735 */
736 node = bm->cur.node;
737 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
738 goto node_found;
739
740 node = zone->rtree;
741 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
742
743 for (i = zone->levels; i > 0; i--) {
744 int index;
745
746 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
747 index &= BM_RTREE_LEVEL_MASK;
748 BUG_ON(node->data[index] == 0);
749 node = (struct rtree_node *)node->data[index];
750 }
751
752 node_found:
753 /* Update last position */
754 bm->cur.zone = zone;
755 bm->cur.node = node;
756 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
757
758 /* Set return values */
759 *addr = node->data;
760 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
761
762 return 0;
763 }
764
765 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
766 {
767 void *addr;
768 unsigned int bit;
769 int error;
770
771 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
772 BUG_ON(error);
773 set_bit(bit, addr);
774 }
775
776 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
777 {
778 void *addr;
779 unsigned int bit;
780 int error;
781
782 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
783 if (!error)
784 set_bit(bit, addr);
785
786 return error;
787 }
788
789 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
790 {
791 void *addr;
792 unsigned int bit;
793 int error;
794
795 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
796 BUG_ON(error);
797 clear_bit(bit, addr);
798 }
799
800 static void memory_bm_clear_current(struct memory_bitmap *bm)
801 {
802 int bit;
803
804 bit = max(bm->cur.node_bit - 1, 0);
805 clear_bit(bit, bm->cur.node->data);
806 }
807
808 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
809 {
810 void *addr;
811 unsigned int bit;
812 int error;
813
814 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
815 BUG_ON(error);
816 return test_bit(bit, addr);
817 }
818
819 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
820 {
821 void *addr;
822 unsigned int bit;
823
824 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
825 }
826
827 /*
828 * rtree_next_node - Jump to the next leaf node.
829 *
830 * Set the position to the beginning of the next node in the
831 * memory bitmap. This is either the next node in the current
832 * zone's radix tree or the first node in the radix tree of the
833 * next zone.
834 *
835 * Return true if there is a next node, false otherwise.
836 */
837 static bool rtree_next_node(struct memory_bitmap *bm)
838 {
839 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
840 bm->cur.node = list_entry(bm->cur.node->list.next,
841 struct rtree_node, list);
842 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
843 bm->cur.node_bit = 0;
844 touch_softlockup_watchdog();
845 return true;
846 }
847
848 /* No more nodes, goto next zone */
849 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
850 bm->cur.zone = list_entry(bm->cur.zone->list.next,
851 struct mem_zone_bm_rtree, list);
852 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
853 struct rtree_node, list);
854 bm->cur.node_pfn = 0;
855 bm->cur.node_bit = 0;
856 return true;
857 }
858
859 /* No more zones */
860 return false;
861 }
862
863 /**
864 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
865 * @bm: Memory bitmap.
866 *
867 * Starting from the last returned position this function searches for the next
868 * set bit in @bm and returns the PFN represented by it. If no more bits are
869 * set, BM_END_OF_MAP is returned.
870 *
871 * It is required to run memory_bm_position_reset() before the first call to
872 * this function for the given memory bitmap.
873 */
874 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
875 {
876 unsigned long bits, pfn, pages;
877 int bit;
878
879 do {
880 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
881 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
882 bit = find_next_bit(bm->cur.node->data, bits,
883 bm->cur.node_bit);
884 if (bit < bits) {
885 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
886 bm->cur.node_bit = bit + 1;
887 return pfn;
888 }
889 } while (rtree_next_node(bm));
890
891 return BM_END_OF_MAP;
892 }
893
894 /*
895 * This structure represents a range of page frames the contents of which
896 * should not be saved during hibernation.
897 */
898 struct nosave_region {
899 struct list_head list;
900 unsigned long start_pfn;
901 unsigned long end_pfn;
902 };
903
904 static LIST_HEAD(nosave_regions);
905
906 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
907 {
908 struct rtree_node *node;
909
910 list_for_each_entry(node, &zone->nodes, list)
911 recycle_safe_page(node->data);
912
913 list_for_each_entry(node, &zone->leaves, list)
914 recycle_safe_page(node->data);
915 }
916
917 static void memory_bm_recycle(struct memory_bitmap *bm)
918 {
919 struct mem_zone_bm_rtree *zone;
920 struct linked_page *p_list;
921
922 list_for_each_entry(zone, &bm->zones, list)
923 recycle_zone_bm_rtree(zone);
924
925 p_list = bm->p_list;
926 while (p_list) {
927 struct linked_page *lp = p_list;
928
929 p_list = lp->next;
930 recycle_safe_page(lp);
931 }
932 }
933
934 /**
935 * register_nosave_region - Register a region of unsaveable memory.
936 *
937 * Register a range of page frames the contents of which should not be saved
938 * during hibernation (to be used in the early initialization code).
939 */
940 void __init __register_nosave_region(unsigned long start_pfn,
941 unsigned long end_pfn, int use_kmalloc)
942 {
943 struct nosave_region *region;
944
945 if (start_pfn >= end_pfn)
946 return;
947
948 if (!list_empty(&nosave_regions)) {
949 /* Try to extend the previous region (they should be sorted) */
950 region = list_entry(nosave_regions.prev,
951 struct nosave_region, list);
952 if (region->end_pfn == start_pfn) {
953 region->end_pfn = end_pfn;
954 goto Report;
955 }
956 }
957 if (use_kmalloc) {
958 /* During init, this shouldn't fail */
959 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
960 BUG_ON(!region);
961 } else {
962 /* This allocation cannot fail */
963 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
964 }
965 region->start_pfn = start_pfn;
966 region->end_pfn = end_pfn;
967 list_add_tail(&region->list, &nosave_regions);
968 Report:
969 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
970 (unsigned long long) start_pfn << PAGE_SHIFT,
971 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
972 }
973
974 /*
975 * Set bits in this map correspond to the page frames the contents of which
976 * should not be saved during the suspend.
977 */
978 static struct memory_bitmap *forbidden_pages_map;
979
980 /* Set bits in this map correspond to free page frames. */
981 static struct memory_bitmap *free_pages_map;
982
983 /*
984 * Each page frame allocated for creating the image is marked by setting the
985 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
986 */
987
988 void swsusp_set_page_free(struct page *page)
989 {
990 if (free_pages_map)
991 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
992 }
993
994 static int swsusp_page_is_free(struct page *page)
995 {
996 return free_pages_map ?
997 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
998 }
999
1000 void swsusp_unset_page_free(struct page *page)
1001 {
1002 if (free_pages_map)
1003 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1004 }
1005
1006 static void swsusp_set_page_forbidden(struct page *page)
1007 {
1008 if (forbidden_pages_map)
1009 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1010 }
1011
1012 int swsusp_page_is_forbidden(struct page *page)
1013 {
1014 return forbidden_pages_map ?
1015 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1016 }
1017
1018 static void swsusp_unset_page_forbidden(struct page *page)
1019 {
1020 if (forbidden_pages_map)
1021 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1022 }
1023
1024 /**
1025 * mark_nosave_pages - Mark pages that should not be saved.
1026 * @bm: Memory bitmap.
1027 *
1028 * Set the bits in @bm that correspond to the page frames the contents of which
1029 * should not be saved.
1030 */
1031 static void mark_nosave_pages(struct memory_bitmap *bm)
1032 {
1033 struct nosave_region *region;
1034
1035 if (list_empty(&nosave_regions))
1036 return;
1037
1038 list_for_each_entry(region, &nosave_regions, list) {
1039 unsigned long pfn;
1040
1041 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
1042 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1043 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1044 - 1);
1045
1046 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1047 if (pfn_valid(pfn)) {
1048 /*
1049 * It is safe to ignore the result of
1050 * mem_bm_set_bit_check() here, since we won't
1051 * touch the PFNs for which the error is
1052 * returned anyway.
1053 */
1054 mem_bm_set_bit_check(bm, pfn);
1055 }
1056 }
1057 }
1058
1059 /**
1060 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1061 *
1062 * Create bitmaps needed for marking page frames that should not be saved and
1063 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1064 * only modified if everything goes well, because we don't want the bits to be
1065 * touched before both bitmaps are set up.
1066 */
1067 int create_basic_memory_bitmaps(void)
1068 {
1069 struct memory_bitmap *bm1, *bm2;
1070 int error = 0;
1071
1072 if (forbidden_pages_map && free_pages_map)
1073 return 0;
1074 else
1075 BUG_ON(forbidden_pages_map || free_pages_map);
1076
1077 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1078 if (!bm1)
1079 return -ENOMEM;
1080
1081 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1082 if (error)
1083 goto Free_first_object;
1084
1085 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1086 if (!bm2)
1087 goto Free_first_bitmap;
1088
1089 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1090 if (error)
1091 goto Free_second_object;
1092
1093 forbidden_pages_map = bm1;
1094 free_pages_map = bm2;
1095 mark_nosave_pages(forbidden_pages_map);
1096
1097 pr_debug("PM: Basic memory bitmaps created\n");
1098
1099 return 0;
1100
1101 Free_second_object:
1102 kfree(bm2);
1103 Free_first_bitmap:
1104 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1105 Free_first_object:
1106 kfree(bm1);
1107 return -ENOMEM;
1108 }
1109
1110 /**
1111 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1112 *
1113 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1114 * auxiliary pointers are necessary so that the bitmaps themselves are not
1115 * referred to while they are being freed.
1116 */
1117 void free_basic_memory_bitmaps(void)
1118 {
1119 struct memory_bitmap *bm1, *bm2;
1120
1121 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1122 return;
1123
1124 bm1 = forbidden_pages_map;
1125 bm2 = free_pages_map;
1126 forbidden_pages_map = NULL;
1127 free_pages_map = NULL;
1128 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1129 kfree(bm1);
1130 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1131 kfree(bm2);
1132
1133 pr_debug("PM: Basic memory bitmaps freed\n");
1134 }
1135
1136 void clear_free_pages(void)
1137 {
1138 #ifdef CONFIG_PAGE_POISONING_ZERO
1139 struct memory_bitmap *bm = free_pages_map;
1140 unsigned long pfn;
1141
1142 if (WARN_ON(!(free_pages_map)))
1143 return;
1144
1145 memory_bm_position_reset(bm);
1146 pfn = memory_bm_next_pfn(bm);
1147 while (pfn != BM_END_OF_MAP) {
1148 if (pfn_valid(pfn))
1149 clear_highpage(pfn_to_page(pfn));
1150
1151 pfn = memory_bm_next_pfn(bm);
1152 }
1153 memory_bm_position_reset(bm);
1154 pr_info("PM: free pages cleared after restore\n");
1155 #endif /* PAGE_POISONING_ZERO */
1156 }
1157
1158 /**
1159 * snapshot_additional_pages - Estimate the number of extra pages needed.
1160 * @zone: Memory zone to carry out the computation for.
1161 *
1162 * Estimate the number of additional pages needed for setting up a hibernation
1163 * image data structures for @zone (usually, the returned value is greater than
1164 * the exact number).
1165 */
1166 unsigned int snapshot_additional_pages(struct zone *zone)
1167 {
1168 unsigned int rtree, nodes;
1169
1170 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1171 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1172 LINKED_PAGE_DATA_SIZE);
1173 while (nodes > 1) {
1174 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1175 rtree += nodes;
1176 }
1177
1178 return 2 * rtree;
1179 }
1180
1181 #ifdef CONFIG_HIGHMEM
1182 /**
1183 * count_free_highmem_pages - Compute the total number of free highmem pages.
1184 *
1185 * The returned number is system-wide.
1186 */
1187 static unsigned int count_free_highmem_pages(void)
1188 {
1189 struct zone *zone;
1190 unsigned int cnt = 0;
1191
1192 for_each_populated_zone(zone)
1193 if (is_highmem(zone))
1194 cnt += zone_page_state(zone, NR_FREE_PAGES);
1195
1196 return cnt;
1197 }
1198
1199 /**
1200 * saveable_highmem_page - Check if a highmem page is saveable.
1201 *
1202 * Determine whether a highmem page should be included in a hibernation image.
1203 *
1204 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1205 * and it isn't part of a free chunk of pages.
1206 */
1207 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1208 {
1209 struct page *page;
1210
1211 if (!pfn_valid(pfn))
1212 return NULL;
1213
1214 page = pfn_to_page(pfn);
1215 if (page_zone(page) != zone)
1216 return NULL;
1217
1218 BUG_ON(!PageHighMem(page));
1219
1220 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1221 PageReserved(page))
1222 return NULL;
1223
1224 if (page_is_guard(page))
1225 return NULL;
1226
1227 return page;
1228 }
1229
1230 /**
1231 * count_highmem_pages - Compute the total number of saveable highmem pages.
1232 */
1233 static unsigned int count_highmem_pages(void)
1234 {
1235 struct zone *zone;
1236 unsigned int n = 0;
1237
1238 for_each_populated_zone(zone) {
1239 unsigned long pfn, max_zone_pfn;
1240
1241 if (!is_highmem(zone))
1242 continue;
1243
1244 mark_free_pages(zone);
1245 max_zone_pfn = zone_end_pfn(zone);
1246 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1247 if (saveable_highmem_page(zone, pfn))
1248 n++;
1249 }
1250 return n;
1251 }
1252 #else
1253 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1254 {
1255 return NULL;
1256 }
1257 #endif /* CONFIG_HIGHMEM */
1258
1259 /**
1260 * saveable_page - Check if the given page is saveable.
1261 *
1262 * Determine whether a non-highmem page should be included in a hibernation
1263 * image.
1264 *
1265 * We should save the page if it isn't Nosave, and is not in the range
1266 * of pages statically defined as 'unsaveable', and it isn't part of
1267 * a free chunk of pages.
1268 */
1269 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1270 {
1271 struct page *page;
1272
1273 if (!pfn_valid(pfn))
1274 return NULL;
1275
1276 page = pfn_to_page(pfn);
1277 if (page_zone(page) != zone)
1278 return NULL;
1279
1280 BUG_ON(PageHighMem(page));
1281
1282 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1283 return NULL;
1284
1285 if (PageReserved(page)
1286 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1287 return NULL;
1288
1289 if (page_is_guard(page))
1290 return NULL;
1291
1292 return page;
1293 }
1294
1295 /**
1296 * count_data_pages - Compute the total number of saveable non-highmem pages.
1297 */
1298 static unsigned int count_data_pages(void)
1299 {
1300 struct zone *zone;
1301 unsigned long pfn, max_zone_pfn;
1302 unsigned int n = 0;
1303
1304 for_each_populated_zone(zone) {
1305 if (is_highmem(zone))
1306 continue;
1307
1308 mark_free_pages(zone);
1309 max_zone_pfn = zone_end_pfn(zone);
1310 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1311 if (saveable_page(zone, pfn))
1312 n++;
1313 }
1314 return n;
1315 }
1316
1317 /*
1318 * This is needed, because copy_page and memcpy are not usable for copying
1319 * task structs.
1320 */
1321 static inline void do_copy_page(long *dst, long *src)
1322 {
1323 int n;
1324
1325 for (n = PAGE_SIZE / sizeof(long); n; n--)
1326 *dst++ = *src++;
1327 }
1328
1329 /**
1330 * safe_copy_page - Copy a page in a safe way.
1331 *
1332 * Check if the page we are going to copy is marked as present in the kernel
1333 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1334 * and in that case kernel_page_present() always returns 'true').
1335 */
1336 static void safe_copy_page(void *dst, struct page *s_page)
1337 {
1338 if (kernel_page_present(s_page)) {
1339 do_copy_page(dst, page_address(s_page));
1340 } else {
1341 kernel_map_pages(s_page, 1, 1);
1342 do_copy_page(dst, page_address(s_page));
1343 kernel_map_pages(s_page, 1, 0);
1344 }
1345 }
1346
1347 #ifdef CONFIG_HIGHMEM
1348 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1349 {
1350 return is_highmem(zone) ?
1351 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1352 }
1353
1354 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1355 {
1356 struct page *s_page, *d_page;
1357 void *src, *dst;
1358
1359 s_page = pfn_to_page(src_pfn);
1360 d_page = pfn_to_page(dst_pfn);
1361 if (PageHighMem(s_page)) {
1362 src = kmap_atomic(s_page);
1363 dst = kmap_atomic(d_page);
1364 do_copy_page(dst, src);
1365 kunmap_atomic(dst);
1366 kunmap_atomic(src);
1367 } else {
1368 if (PageHighMem(d_page)) {
1369 /*
1370 * The page pointed to by src may contain some kernel
1371 * data modified by kmap_atomic()
1372 */
1373 safe_copy_page(buffer, s_page);
1374 dst = kmap_atomic(d_page);
1375 copy_page(dst, buffer);
1376 kunmap_atomic(dst);
1377 } else {
1378 safe_copy_page(page_address(d_page), s_page);
1379 }
1380 }
1381 }
1382 #else
1383 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1384
1385 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1386 {
1387 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1388 pfn_to_page(src_pfn));
1389 }
1390 #endif /* CONFIG_HIGHMEM */
1391
1392 static void copy_data_pages(struct memory_bitmap *copy_bm,
1393 struct memory_bitmap *orig_bm)
1394 {
1395 struct zone *zone;
1396 unsigned long pfn;
1397
1398 for_each_populated_zone(zone) {
1399 unsigned long max_zone_pfn;
1400
1401 mark_free_pages(zone);
1402 max_zone_pfn = zone_end_pfn(zone);
1403 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1404 if (page_is_saveable(zone, pfn))
1405 memory_bm_set_bit(orig_bm, pfn);
1406 }
1407 memory_bm_position_reset(orig_bm);
1408 memory_bm_position_reset(copy_bm);
1409 for(;;) {
1410 pfn = memory_bm_next_pfn(orig_bm);
1411 if (unlikely(pfn == BM_END_OF_MAP))
1412 break;
1413 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1414 }
1415 }
1416
1417 /* Total number of image pages */
1418 static unsigned int nr_copy_pages;
1419 /* Number of pages needed for saving the original pfns of the image pages */
1420 static unsigned int nr_meta_pages;
1421 /*
1422 * Numbers of normal and highmem page frames allocated for hibernation image
1423 * before suspending devices.
1424 */
1425 unsigned int alloc_normal, alloc_highmem;
1426 /*
1427 * Memory bitmap used for marking saveable pages (during hibernation) or
1428 * hibernation image pages (during restore)
1429 */
1430 static struct memory_bitmap orig_bm;
1431 /*
1432 * Memory bitmap used during hibernation for marking allocated page frames that
1433 * will contain copies of saveable pages. During restore it is initially used
1434 * for marking hibernation image pages, but then the set bits from it are
1435 * duplicated in @orig_bm and it is released. On highmem systems it is next
1436 * used for marking "safe" highmem pages, but it has to be reinitialized for
1437 * this purpose.
1438 */
1439 static struct memory_bitmap copy_bm;
1440
1441 /**
1442 * swsusp_free - Free pages allocated for hibernation image.
1443 *
1444 * Image pages are alocated before snapshot creation, so they need to be
1445 * released after resume.
1446 */
1447 void swsusp_free(void)
1448 {
1449 unsigned long fb_pfn, fr_pfn;
1450
1451 if (!forbidden_pages_map || !free_pages_map)
1452 goto out;
1453
1454 memory_bm_position_reset(forbidden_pages_map);
1455 memory_bm_position_reset(free_pages_map);
1456
1457 loop:
1458 fr_pfn = memory_bm_next_pfn(free_pages_map);
1459 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1460
1461 /*
1462 * Find the next bit set in both bitmaps. This is guaranteed to
1463 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1464 */
1465 do {
1466 if (fb_pfn < fr_pfn)
1467 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1468 if (fr_pfn < fb_pfn)
1469 fr_pfn = memory_bm_next_pfn(free_pages_map);
1470 } while (fb_pfn != fr_pfn);
1471
1472 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1473 struct page *page = pfn_to_page(fr_pfn);
1474
1475 memory_bm_clear_current(forbidden_pages_map);
1476 memory_bm_clear_current(free_pages_map);
1477 hibernate_restore_unprotect_page(page_address(page));
1478 __free_page(page);
1479 goto loop;
1480 }
1481
1482 out:
1483 nr_copy_pages = 0;
1484 nr_meta_pages = 0;
1485 restore_pblist = NULL;
1486 buffer = NULL;
1487 alloc_normal = 0;
1488 alloc_highmem = 0;
1489 hibernate_restore_protection_end();
1490 }
1491
1492 /* Helper functions used for the shrinking of memory. */
1493
1494 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1495
1496 /**
1497 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1498 * @nr_pages: Number of page frames to allocate.
1499 * @mask: GFP flags to use for the allocation.
1500 *
1501 * Return value: Number of page frames actually allocated
1502 */
1503 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1504 {
1505 unsigned long nr_alloc = 0;
1506
1507 while (nr_pages > 0) {
1508 struct page *page;
1509
1510 page = alloc_image_page(mask);
1511 if (!page)
1512 break;
1513 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1514 if (PageHighMem(page))
1515 alloc_highmem++;
1516 else
1517 alloc_normal++;
1518 nr_pages--;
1519 nr_alloc++;
1520 }
1521
1522 return nr_alloc;
1523 }
1524
1525 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1526 unsigned long avail_normal)
1527 {
1528 unsigned long alloc;
1529
1530 if (avail_normal <= alloc_normal)
1531 return 0;
1532
1533 alloc = avail_normal - alloc_normal;
1534 if (nr_pages < alloc)
1535 alloc = nr_pages;
1536
1537 return preallocate_image_pages(alloc, GFP_IMAGE);
1538 }
1539
1540 #ifdef CONFIG_HIGHMEM
1541 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1542 {
1543 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1544 }
1545
1546 /**
1547 * __fraction - Compute (an approximation of) x * (multiplier / base).
1548 */
1549 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1550 {
1551 x *= multiplier;
1552 do_div(x, base);
1553 return (unsigned long)x;
1554 }
1555
1556 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1557 unsigned long highmem,
1558 unsigned long total)
1559 {
1560 unsigned long alloc = __fraction(nr_pages, highmem, total);
1561
1562 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1563 }
1564 #else /* CONFIG_HIGHMEM */
1565 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1566 {
1567 return 0;
1568 }
1569
1570 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1571 unsigned long highmem,
1572 unsigned long total)
1573 {
1574 return 0;
1575 }
1576 #endif /* CONFIG_HIGHMEM */
1577
1578 /**
1579 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1580 */
1581 static unsigned long free_unnecessary_pages(void)
1582 {
1583 unsigned long save, to_free_normal, to_free_highmem, free;
1584
1585 save = count_data_pages();
1586 if (alloc_normal >= save) {
1587 to_free_normal = alloc_normal - save;
1588 save = 0;
1589 } else {
1590 to_free_normal = 0;
1591 save -= alloc_normal;
1592 }
1593 save += count_highmem_pages();
1594 if (alloc_highmem >= save) {
1595 to_free_highmem = alloc_highmem - save;
1596 } else {
1597 to_free_highmem = 0;
1598 save -= alloc_highmem;
1599 if (to_free_normal > save)
1600 to_free_normal -= save;
1601 else
1602 to_free_normal = 0;
1603 }
1604 free = to_free_normal + to_free_highmem;
1605
1606 memory_bm_position_reset(&copy_bm);
1607
1608 while (to_free_normal > 0 || to_free_highmem > 0) {
1609 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1610 struct page *page = pfn_to_page(pfn);
1611
1612 if (PageHighMem(page)) {
1613 if (!to_free_highmem)
1614 continue;
1615 to_free_highmem--;
1616 alloc_highmem--;
1617 } else {
1618 if (!to_free_normal)
1619 continue;
1620 to_free_normal--;
1621 alloc_normal--;
1622 }
1623 memory_bm_clear_bit(&copy_bm, pfn);
1624 swsusp_unset_page_forbidden(page);
1625 swsusp_unset_page_free(page);
1626 __free_page(page);
1627 }
1628
1629 return free;
1630 }
1631
1632 /**
1633 * minimum_image_size - Estimate the minimum acceptable size of an image.
1634 * @saveable: Number of saveable pages in the system.
1635 *
1636 * We want to avoid attempting to free too much memory too hard, so estimate the
1637 * minimum acceptable size of a hibernation image to use as the lower limit for
1638 * preallocating memory.
1639 *
1640 * We assume that the minimum image size should be proportional to
1641 *
1642 * [number of saveable pages] - [number of pages that can be freed in theory]
1643 *
1644 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1645 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1646 * minus mapped file pages.
1647 */
1648 static unsigned long minimum_image_size(unsigned long saveable)
1649 {
1650 unsigned long size;
1651
1652 size = global_page_state(NR_SLAB_RECLAIMABLE)
1653 + global_node_page_state(NR_ACTIVE_ANON)
1654 + global_node_page_state(NR_INACTIVE_ANON)
1655 + global_node_page_state(NR_ACTIVE_FILE)
1656 + global_node_page_state(NR_INACTIVE_FILE)
1657 - global_node_page_state(NR_FILE_MAPPED);
1658
1659 return saveable <= size ? 0 : saveable - size;
1660 }
1661
1662 /**
1663 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1664 *
1665 * To create a hibernation image it is necessary to make a copy of every page
1666 * frame in use. We also need a number of page frames to be free during
1667 * hibernation for allocations made while saving the image and for device
1668 * drivers, in case they need to allocate memory from their hibernation
1669 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1670 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1671 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1672 * total number of available page frames and allocate at least
1673 *
1674 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1675 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1676 *
1677 * of them, which corresponds to the maximum size of a hibernation image.
1678 *
1679 * If image_size is set below the number following from the above formula,
1680 * the preallocation of memory is continued until the total number of saveable
1681 * pages in the system is below the requested image size or the minimum
1682 * acceptable image size returned by minimum_image_size(), whichever is greater.
1683 */
1684 int hibernate_preallocate_memory(void)
1685 {
1686 struct zone *zone;
1687 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1688 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1689 ktime_t start, stop;
1690 int error;
1691
1692 printk(KERN_INFO "PM: Preallocating image memory... ");
1693 start = ktime_get();
1694
1695 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1696 if (error)
1697 goto err_out;
1698
1699 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1700 if (error)
1701 goto err_out;
1702
1703 alloc_normal = 0;
1704 alloc_highmem = 0;
1705
1706 /* Count the number of saveable data pages. */
1707 save_highmem = count_highmem_pages();
1708 saveable = count_data_pages();
1709
1710 /*
1711 * Compute the total number of page frames we can use (count) and the
1712 * number of pages needed for image metadata (size).
1713 */
1714 count = saveable;
1715 saveable += save_highmem;
1716 highmem = save_highmem;
1717 size = 0;
1718 for_each_populated_zone(zone) {
1719 size += snapshot_additional_pages(zone);
1720 if (is_highmem(zone))
1721 highmem += zone_page_state(zone, NR_FREE_PAGES);
1722 else
1723 count += zone_page_state(zone, NR_FREE_PAGES);
1724 }
1725 avail_normal = count;
1726 count += highmem;
1727 count -= totalreserve_pages;
1728
1729 /* Add number of pages required for page keys (s390 only). */
1730 size += page_key_additional_pages(saveable);
1731
1732 /* Compute the maximum number of saveable pages to leave in memory. */
1733 max_size = (count - (size + PAGES_FOR_IO)) / 2
1734 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1735 /* Compute the desired number of image pages specified by image_size. */
1736 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1737 if (size > max_size)
1738 size = max_size;
1739 /*
1740 * If the desired number of image pages is at least as large as the
1741 * current number of saveable pages in memory, allocate page frames for
1742 * the image and we're done.
1743 */
1744 if (size >= saveable) {
1745 pages = preallocate_image_highmem(save_highmem);
1746 pages += preallocate_image_memory(saveable - pages, avail_normal);
1747 goto out;
1748 }
1749
1750 /* Estimate the minimum size of the image. */
1751 pages = minimum_image_size(saveable);
1752 /*
1753 * To avoid excessive pressure on the normal zone, leave room in it to
1754 * accommodate an image of the minimum size (unless it's already too
1755 * small, in which case don't preallocate pages from it at all).
1756 */
1757 if (avail_normal > pages)
1758 avail_normal -= pages;
1759 else
1760 avail_normal = 0;
1761 if (size < pages)
1762 size = min_t(unsigned long, pages, max_size);
1763
1764 /*
1765 * Let the memory management subsystem know that we're going to need a
1766 * large number of page frames to allocate and make it free some memory.
1767 * NOTE: If this is not done, performance will be hurt badly in some
1768 * test cases.
1769 */
1770 shrink_all_memory(saveable - size);
1771
1772 /*
1773 * The number of saveable pages in memory was too high, so apply some
1774 * pressure to decrease it. First, make room for the largest possible
1775 * image and fail if that doesn't work. Next, try to decrease the size
1776 * of the image as much as indicated by 'size' using allocations from
1777 * highmem and non-highmem zones separately.
1778 */
1779 pages_highmem = preallocate_image_highmem(highmem / 2);
1780 alloc = count - max_size;
1781 if (alloc > pages_highmem)
1782 alloc -= pages_highmem;
1783 else
1784 alloc = 0;
1785 pages = preallocate_image_memory(alloc, avail_normal);
1786 if (pages < alloc) {
1787 /* We have exhausted non-highmem pages, try highmem. */
1788 alloc -= pages;
1789 pages += pages_highmem;
1790 pages_highmem = preallocate_image_highmem(alloc);
1791 if (pages_highmem < alloc)
1792 goto err_out;
1793 pages += pages_highmem;
1794 /*
1795 * size is the desired number of saveable pages to leave in
1796 * memory, so try to preallocate (all memory - size) pages.
1797 */
1798 alloc = (count - pages) - size;
1799 pages += preallocate_image_highmem(alloc);
1800 } else {
1801 /*
1802 * There are approximately max_size saveable pages at this point
1803 * and we want to reduce this number down to size.
1804 */
1805 alloc = max_size - size;
1806 size = preallocate_highmem_fraction(alloc, highmem, count);
1807 pages_highmem += size;
1808 alloc -= size;
1809 size = preallocate_image_memory(alloc, avail_normal);
1810 pages_highmem += preallocate_image_highmem(alloc - size);
1811 pages += pages_highmem + size;
1812 }
1813
1814 /*
1815 * We only need as many page frames for the image as there are saveable
1816 * pages in memory, but we have allocated more. Release the excessive
1817 * ones now.
1818 */
1819 pages -= free_unnecessary_pages();
1820
1821 out:
1822 stop = ktime_get();
1823 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1824 swsusp_show_speed(start, stop, pages, "Allocated");
1825
1826 return 0;
1827
1828 err_out:
1829 printk(KERN_CONT "\n");
1830 swsusp_free();
1831 return -ENOMEM;
1832 }
1833
1834 #ifdef CONFIG_HIGHMEM
1835 /**
1836 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1837 *
1838 * Compute the number of non-highmem pages that will be necessary for creating
1839 * copies of highmem pages.
1840 */
1841 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1842 {
1843 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1844
1845 if (free_highmem >= nr_highmem)
1846 nr_highmem = 0;
1847 else
1848 nr_highmem -= free_highmem;
1849
1850 return nr_highmem;
1851 }
1852 #else
1853 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1854 #endif /* CONFIG_HIGHMEM */
1855
1856 /**
1857 * enough_free_mem - Check if there is enough free memory for the image.
1858 */
1859 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1860 {
1861 struct zone *zone;
1862 unsigned int free = alloc_normal;
1863
1864 for_each_populated_zone(zone)
1865 if (!is_highmem(zone))
1866 free += zone_page_state(zone, NR_FREE_PAGES);
1867
1868 nr_pages += count_pages_for_highmem(nr_highmem);
1869 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1870 nr_pages, PAGES_FOR_IO, free);
1871
1872 return free > nr_pages + PAGES_FOR_IO;
1873 }
1874
1875 #ifdef CONFIG_HIGHMEM
1876 /**
1877 * get_highmem_buffer - Allocate a buffer for highmem pages.
1878 *
1879 * If there are some highmem pages in the hibernation image, we may need a
1880 * buffer to copy them and/or load their data.
1881 */
1882 static inline int get_highmem_buffer(int safe_needed)
1883 {
1884 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1885 return buffer ? 0 : -ENOMEM;
1886 }
1887
1888 /**
1889 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1890 *
1891 * Try to allocate as many pages as needed, but if the number of free highmem
1892 * pages is less than that, allocate them all.
1893 */
1894 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1895 unsigned int nr_highmem)
1896 {
1897 unsigned int to_alloc = count_free_highmem_pages();
1898
1899 if (to_alloc > nr_highmem)
1900 to_alloc = nr_highmem;
1901
1902 nr_highmem -= to_alloc;
1903 while (to_alloc-- > 0) {
1904 struct page *page;
1905
1906 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1907 memory_bm_set_bit(bm, page_to_pfn(page));
1908 }
1909 return nr_highmem;
1910 }
1911 #else
1912 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1913
1914 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1915 unsigned int n) { return 0; }
1916 #endif /* CONFIG_HIGHMEM */
1917
1918 /**
1919 * swsusp_alloc - Allocate memory for hibernation image.
1920 *
1921 * We first try to allocate as many highmem pages as there are
1922 * saveable highmem pages in the system. If that fails, we allocate
1923 * non-highmem pages for the copies of the remaining highmem ones.
1924 *
1925 * In this approach it is likely that the copies of highmem pages will
1926 * also be located in the high memory, because of the way in which
1927 * copy_data_pages() works.
1928 */
1929 static int swsusp_alloc(struct memory_bitmap *orig_bm,
1930 struct memory_bitmap *copy_bm,
1931 unsigned int nr_pages, unsigned int nr_highmem)
1932 {
1933 if (nr_highmem > 0) {
1934 if (get_highmem_buffer(PG_ANY))
1935 goto err_out;
1936 if (nr_highmem > alloc_highmem) {
1937 nr_highmem -= alloc_highmem;
1938 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1939 }
1940 }
1941 if (nr_pages > alloc_normal) {
1942 nr_pages -= alloc_normal;
1943 while (nr_pages-- > 0) {
1944 struct page *page;
1945
1946 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1947 if (!page)
1948 goto err_out;
1949 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1950 }
1951 }
1952
1953 return 0;
1954
1955 err_out:
1956 swsusp_free();
1957 return -ENOMEM;
1958 }
1959
1960 asmlinkage __visible int swsusp_save(void)
1961 {
1962 unsigned int nr_pages, nr_highmem;
1963
1964 printk(KERN_INFO "PM: Creating hibernation image:\n");
1965
1966 drain_local_pages(NULL);
1967 nr_pages = count_data_pages();
1968 nr_highmem = count_highmem_pages();
1969 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1970
1971 if (!enough_free_mem(nr_pages, nr_highmem)) {
1972 printk(KERN_ERR "PM: Not enough free memory\n");
1973 return -ENOMEM;
1974 }
1975
1976 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1977 printk(KERN_ERR "PM: Memory allocation failed\n");
1978 return -ENOMEM;
1979 }
1980
1981 /*
1982 * During allocating of suspend pagedir, new cold pages may appear.
1983 * Kill them.
1984 */
1985 drain_local_pages(NULL);
1986 copy_data_pages(&copy_bm, &orig_bm);
1987
1988 /*
1989 * End of critical section. From now on, we can write to memory,
1990 * but we should not touch disk. This specially means we must _not_
1991 * touch swap space! Except we must write out our image of course.
1992 */
1993
1994 nr_pages += nr_highmem;
1995 nr_copy_pages = nr_pages;
1996 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1997
1998 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1999 nr_pages);
2000
2001 return 0;
2002 }
2003
2004 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2005 static int init_header_complete(struct swsusp_info *info)
2006 {
2007 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2008 info->version_code = LINUX_VERSION_CODE;
2009 return 0;
2010 }
2011
2012 static char *check_image_kernel(struct swsusp_info *info)
2013 {
2014 if (info->version_code != LINUX_VERSION_CODE)
2015 return "kernel version";
2016 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2017 return "system type";
2018 if (strcmp(info->uts.release,init_utsname()->release))
2019 return "kernel release";
2020 if (strcmp(info->uts.version,init_utsname()->version))
2021 return "version";
2022 if (strcmp(info->uts.machine,init_utsname()->machine))
2023 return "machine";
2024 return NULL;
2025 }
2026 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2027
2028 unsigned long snapshot_get_image_size(void)
2029 {
2030 return nr_copy_pages + nr_meta_pages + 1;
2031 }
2032
2033 static int init_header(struct swsusp_info *info)
2034 {
2035 memset(info, 0, sizeof(struct swsusp_info));
2036 info->num_physpages = get_num_physpages();
2037 info->image_pages = nr_copy_pages;
2038 info->pages = snapshot_get_image_size();
2039 info->size = info->pages;
2040 info->size <<= PAGE_SHIFT;
2041 return init_header_complete(info);
2042 }
2043
2044 /**
2045 * pack_pfns - Prepare PFNs for saving.
2046 * @bm: Memory bitmap.
2047 * @buf: Memory buffer to store the PFNs in.
2048 *
2049 * PFNs corresponding to set bits in @bm are stored in the area of memory
2050 * pointed to by @buf (1 page at a time).
2051 */
2052 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2053 {
2054 int j;
2055
2056 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2057 buf[j] = memory_bm_next_pfn(bm);
2058 if (unlikely(buf[j] == BM_END_OF_MAP))
2059 break;
2060 /* Save page key for data page (s390 only). */
2061 page_key_read(buf + j);
2062 }
2063 }
2064
2065 /**
2066 * snapshot_read_next - Get the address to read the next image page from.
2067 * @handle: Snapshot handle to be used for the reading.
2068 *
2069 * On the first call, @handle should point to a zeroed snapshot_handle
2070 * structure. The structure gets populated then and a pointer to it should be
2071 * passed to this function every next time.
2072 *
2073 * On success, the function returns a positive number. Then, the caller
2074 * is allowed to read up to the returned number of bytes from the memory
2075 * location computed by the data_of() macro.
2076 *
2077 * The function returns 0 to indicate the end of the data stream condition,
2078 * and negative numbers are returned on errors. If that happens, the structure
2079 * pointed to by @handle is not updated and should not be used any more.
2080 */
2081 int snapshot_read_next(struct snapshot_handle *handle)
2082 {
2083 if (handle->cur > nr_meta_pages + nr_copy_pages)
2084 return 0;
2085
2086 if (!buffer) {
2087 /* This makes the buffer be freed by swsusp_free() */
2088 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2089 if (!buffer)
2090 return -ENOMEM;
2091 }
2092 if (!handle->cur) {
2093 int error;
2094
2095 error = init_header((struct swsusp_info *)buffer);
2096 if (error)
2097 return error;
2098 handle->buffer = buffer;
2099 memory_bm_position_reset(&orig_bm);
2100 memory_bm_position_reset(&copy_bm);
2101 } else if (handle->cur <= nr_meta_pages) {
2102 clear_page(buffer);
2103 pack_pfns(buffer, &orig_bm);
2104 } else {
2105 struct page *page;
2106
2107 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2108 if (PageHighMem(page)) {
2109 /*
2110 * Highmem pages are copied to the buffer,
2111 * because we can't return with a kmapped
2112 * highmem page (we may not be called again).
2113 */
2114 void *kaddr;
2115
2116 kaddr = kmap_atomic(page);
2117 copy_page(buffer, kaddr);
2118 kunmap_atomic(kaddr);
2119 handle->buffer = buffer;
2120 } else {
2121 handle->buffer = page_address(page);
2122 }
2123 }
2124 handle->cur++;
2125 return PAGE_SIZE;
2126 }
2127
2128 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2129 struct memory_bitmap *src)
2130 {
2131 unsigned long pfn;
2132
2133 memory_bm_position_reset(src);
2134 pfn = memory_bm_next_pfn(src);
2135 while (pfn != BM_END_OF_MAP) {
2136 memory_bm_set_bit(dst, pfn);
2137 pfn = memory_bm_next_pfn(src);
2138 }
2139 }
2140
2141 /**
2142 * mark_unsafe_pages - Mark pages that were used before hibernation.
2143 *
2144 * Mark the pages that cannot be used for storing the image during restoration,
2145 * because they conflict with the pages that had been used before hibernation.
2146 */
2147 static void mark_unsafe_pages(struct memory_bitmap *bm)
2148 {
2149 unsigned long pfn;
2150
2151 /* Clear the "free"/"unsafe" bit for all PFNs */
2152 memory_bm_position_reset(free_pages_map);
2153 pfn = memory_bm_next_pfn(free_pages_map);
2154 while (pfn != BM_END_OF_MAP) {
2155 memory_bm_clear_current(free_pages_map);
2156 pfn = memory_bm_next_pfn(free_pages_map);
2157 }
2158
2159 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2160 duplicate_memory_bitmap(free_pages_map, bm);
2161
2162 allocated_unsafe_pages = 0;
2163 }
2164
2165 static int check_header(struct swsusp_info *info)
2166 {
2167 char *reason;
2168
2169 reason = check_image_kernel(info);
2170 if (!reason && info->num_physpages != get_num_physpages())
2171 reason = "memory size";
2172 if (reason) {
2173 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2174 return -EPERM;
2175 }
2176 return 0;
2177 }
2178
2179 /**
2180 * load header - Check the image header and copy the data from it.
2181 */
2182 static int load_header(struct swsusp_info *info)
2183 {
2184 int error;
2185
2186 restore_pblist = NULL;
2187 error = check_header(info);
2188 if (!error) {
2189 nr_copy_pages = info->image_pages;
2190 nr_meta_pages = info->pages - info->image_pages - 1;
2191 }
2192 return error;
2193 }
2194
2195 /**
2196 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2197 * @bm: Memory bitmap.
2198 * @buf: Area of memory containing the PFNs.
2199 *
2200 * For each element of the array pointed to by @buf (1 page at a time), set the
2201 * corresponding bit in @bm.
2202 */
2203 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2204 {
2205 int j;
2206
2207 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2208 if (unlikely(buf[j] == BM_END_OF_MAP))
2209 break;
2210
2211 /* Extract and buffer page key for data page (s390 only). */
2212 page_key_memorize(buf + j);
2213
2214 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2215 memory_bm_set_bit(bm, buf[j]);
2216 else
2217 return -EFAULT;
2218 }
2219
2220 return 0;
2221 }
2222
2223 #ifdef CONFIG_HIGHMEM
2224 /*
2225 * struct highmem_pbe is used for creating the list of highmem pages that
2226 * should be restored atomically during the resume from disk, because the page
2227 * frames they have occupied before the suspend are in use.
2228 */
2229 struct highmem_pbe {
2230 struct page *copy_page; /* data is here now */
2231 struct page *orig_page; /* data was here before the suspend */
2232 struct highmem_pbe *next;
2233 };
2234
2235 /*
2236 * List of highmem PBEs needed for restoring the highmem pages that were
2237 * allocated before the suspend and included in the suspend image, but have
2238 * also been allocated by the "resume" kernel, so their contents cannot be
2239 * written directly to their "original" page frames.
2240 */
2241 static struct highmem_pbe *highmem_pblist;
2242
2243 /**
2244 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2245 * @bm: Memory bitmap.
2246 *
2247 * The bits in @bm that correspond to image pages are assumed to be set.
2248 */
2249 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2250 {
2251 unsigned long pfn;
2252 unsigned int cnt = 0;
2253
2254 memory_bm_position_reset(bm);
2255 pfn = memory_bm_next_pfn(bm);
2256 while (pfn != BM_END_OF_MAP) {
2257 if (PageHighMem(pfn_to_page(pfn)))
2258 cnt++;
2259
2260 pfn = memory_bm_next_pfn(bm);
2261 }
2262 return cnt;
2263 }
2264
2265 static unsigned int safe_highmem_pages;
2266
2267 static struct memory_bitmap *safe_highmem_bm;
2268
2269 /**
2270 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2271 * @bm: Pointer to an uninitialized memory bitmap structure.
2272 * @nr_highmem_p: Pointer to the number of highmem image pages.
2273 *
2274 * Try to allocate as many highmem pages as there are highmem image pages
2275 * (@nr_highmem_p points to the variable containing the number of highmem image
2276 * pages). The pages that are "safe" (ie. will not be overwritten when the
2277 * hibernation image is restored entirely) have the corresponding bits set in
2278 * @bm (it must be unitialized).
2279 *
2280 * NOTE: This function should not be called if there are no highmem image pages.
2281 */
2282 static int prepare_highmem_image(struct memory_bitmap *bm,
2283 unsigned int *nr_highmem_p)
2284 {
2285 unsigned int to_alloc;
2286
2287 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2288 return -ENOMEM;
2289
2290 if (get_highmem_buffer(PG_SAFE))
2291 return -ENOMEM;
2292
2293 to_alloc = count_free_highmem_pages();
2294 if (to_alloc > *nr_highmem_p)
2295 to_alloc = *nr_highmem_p;
2296 else
2297 *nr_highmem_p = to_alloc;
2298
2299 safe_highmem_pages = 0;
2300 while (to_alloc-- > 0) {
2301 struct page *page;
2302
2303 page = alloc_page(__GFP_HIGHMEM);
2304 if (!swsusp_page_is_free(page)) {
2305 /* The page is "safe", set its bit the bitmap */
2306 memory_bm_set_bit(bm, page_to_pfn(page));
2307 safe_highmem_pages++;
2308 }
2309 /* Mark the page as allocated */
2310 swsusp_set_page_forbidden(page);
2311 swsusp_set_page_free(page);
2312 }
2313 memory_bm_position_reset(bm);
2314 safe_highmem_bm = bm;
2315 return 0;
2316 }
2317
2318 static struct page *last_highmem_page;
2319
2320 /**
2321 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2322 *
2323 * For a given highmem image page get a buffer that suspend_write_next() should
2324 * return to its caller to write to.
2325 *
2326 * If the page is to be saved to its "original" page frame or a copy of
2327 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2328 * the copy of the page is to be made in normal memory, so the address of
2329 * the copy is returned.
2330 *
2331 * If @buffer is returned, the caller of suspend_write_next() will write
2332 * the page's contents to @buffer, so they will have to be copied to the
2333 * right location on the next call to suspend_write_next() and it is done
2334 * with the help of copy_last_highmem_page(). For this purpose, if
2335 * @buffer is returned, @last_highmem_page is set to the page to which
2336 * the data will have to be copied from @buffer.
2337 */
2338 static void *get_highmem_page_buffer(struct page *page,
2339 struct chain_allocator *ca)
2340 {
2341 struct highmem_pbe *pbe;
2342 void *kaddr;
2343
2344 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2345 /*
2346 * We have allocated the "original" page frame and we can
2347 * use it directly to store the loaded page.
2348 */
2349 last_highmem_page = page;
2350 return buffer;
2351 }
2352 /*
2353 * The "original" page frame has not been allocated and we have to
2354 * use a "safe" page frame to store the loaded page.
2355 */
2356 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2357 if (!pbe) {
2358 swsusp_free();
2359 return ERR_PTR(-ENOMEM);
2360 }
2361 pbe->orig_page = page;
2362 if (safe_highmem_pages > 0) {
2363 struct page *tmp;
2364
2365 /* Copy of the page will be stored in high memory */
2366 kaddr = buffer;
2367 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2368 safe_highmem_pages--;
2369 last_highmem_page = tmp;
2370 pbe->copy_page = tmp;
2371 } else {
2372 /* Copy of the page will be stored in normal memory */
2373 kaddr = safe_pages_list;
2374 safe_pages_list = safe_pages_list->next;
2375 pbe->copy_page = virt_to_page(kaddr);
2376 }
2377 pbe->next = highmem_pblist;
2378 highmem_pblist = pbe;
2379 return kaddr;
2380 }
2381
2382 /**
2383 * copy_last_highmem_page - Copy most the most recent highmem image page.
2384 *
2385 * Copy the contents of a highmem image from @buffer, where the caller of
2386 * snapshot_write_next() has stored them, to the right location represented by
2387 * @last_highmem_page .
2388 */
2389 static void copy_last_highmem_page(void)
2390 {
2391 if (last_highmem_page) {
2392 void *dst;
2393
2394 dst = kmap_atomic(last_highmem_page);
2395 copy_page(dst, buffer);
2396 kunmap_atomic(dst);
2397 last_highmem_page = NULL;
2398 }
2399 }
2400
2401 static inline int last_highmem_page_copied(void)
2402 {
2403 return !last_highmem_page;
2404 }
2405
2406 static inline void free_highmem_data(void)
2407 {
2408 if (safe_highmem_bm)
2409 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2410
2411 if (buffer)
2412 free_image_page(buffer, PG_UNSAFE_CLEAR);
2413 }
2414 #else
2415 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2416
2417 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2418 unsigned int *nr_highmem_p) { return 0; }
2419
2420 static inline void *get_highmem_page_buffer(struct page *page,
2421 struct chain_allocator *ca)
2422 {
2423 return ERR_PTR(-EINVAL);
2424 }
2425
2426 static inline void copy_last_highmem_page(void) {}
2427 static inline int last_highmem_page_copied(void) { return 1; }
2428 static inline void free_highmem_data(void) {}
2429 #endif /* CONFIG_HIGHMEM */
2430
2431 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2432
2433 /**
2434 * prepare_image - Make room for loading hibernation image.
2435 * @new_bm: Unitialized memory bitmap structure.
2436 * @bm: Memory bitmap with unsafe pages marked.
2437 *
2438 * Use @bm to mark the pages that will be overwritten in the process of
2439 * restoring the system memory state from the suspend image ("unsafe" pages)
2440 * and allocate memory for the image.
2441 *
2442 * The idea is to allocate a new memory bitmap first and then allocate
2443 * as many pages as needed for image data, but without specifying what those
2444 * pages will be used for just yet. Instead, we mark them all as allocated and
2445 * create a lists of "safe" pages to be used later. On systems with high
2446 * memory a list of "safe" highmem pages is created too.
2447 */
2448 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2449 {
2450 unsigned int nr_pages, nr_highmem;
2451 struct linked_page *lp;
2452 int error;
2453
2454 /* If there is no highmem, the buffer will not be necessary */
2455 free_image_page(buffer, PG_UNSAFE_CLEAR);
2456 buffer = NULL;
2457
2458 nr_highmem = count_highmem_image_pages(bm);
2459 mark_unsafe_pages(bm);
2460
2461 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2462 if (error)
2463 goto Free;
2464
2465 duplicate_memory_bitmap(new_bm, bm);
2466 memory_bm_free(bm, PG_UNSAFE_KEEP);
2467 if (nr_highmem > 0) {
2468 error = prepare_highmem_image(bm, &nr_highmem);
2469 if (error)
2470 goto Free;
2471 }
2472 /*
2473 * Reserve some safe pages for potential later use.
2474 *
2475 * NOTE: This way we make sure there will be enough safe pages for the
2476 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2477 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2478 *
2479 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2480 */
2481 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2482 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2483 while (nr_pages > 0) {
2484 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2485 if (!lp) {
2486 error = -ENOMEM;
2487 goto Free;
2488 }
2489 lp->next = safe_pages_list;
2490 safe_pages_list = lp;
2491 nr_pages--;
2492 }
2493 /* Preallocate memory for the image */
2494 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2495 while (nr_pages > 0) {
2496 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2497 if (!lp) {
2498 error = -ENOMEM;
2499 goto Free;
2500 }
2501 if (!swsusp_page_is_free(virt_to_page(lp))) {
2502 /* The page is "safe", add it to the list */
2503 lp->next = safe_pages_list;
2504 safe_pages_list = lp;
2505 }
2506 /* Mark the page as allocated */
2507 swsusp_set_page_forbidden(virt_to_page(lp));
2508 swsusp_set_page_free(virt_to_page(lp));
2509 nr_pages--;
2510 }
2511 return 0;
2512
2513 Free:
2514 swsusp_free();
2515 return error;
2516 }
2517
2518 /**
2519 * get_buffer - Get the address to store the next image data page.
2520 *
2521 * Get the address that snapshot_write_next() should return to its caller to
2522 * write to.
2523 */
2524 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2525 {
2526 struct pbe *pbe;
2527 struct page *page;
2528 unsigned long pfn = memory_bm_next_pfn(bm);
2529
2530 if (pfn == BM_END_OF_MAP)
2531 return ERR_PTR(-EFAULT);
2532
2533 page = pfn_to_page(pfn);
2534 if (PageHighMem(page))
2535 return get_highmem_page_buffer(page, ca);
2536
2537 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2538 /*
2539 * We have allocated the "original" page frame and we can
2540 * use it directly to store the loaded page.
2541 */
2542 return page_address(page);
2543
2544 /*
2545 * The "original" page frame has not been allocated and we have to
2546 * use a "safe" page frame to store the loaded page.
2547 */
2548 pbe = chain_alloc(ca, sizeof(struct pbe));
2549 if (!pbe) {
2550 swsusp_free();
2551 return ERR_PTR(-ENOMEM);
2552 }
2553 pbe->orig_address = page_address(page);
2554 pbe->address = safe_pages_list;
2555 safe_pages_list = safe_pages_list->next;
2556 pbe->next = restore_pblist;
2557 restore_pblist = pbe;
2558 return pbe->address;
2559 }
2560
2561 /**
2562 * snapshot_write_next - Get the address to store the next image page.
2563 * @handle: Snapshot handle structure to guide the writing.
2564 *
2565 * On the first call, @handle should point to a zeroed snapshot_handle
2566 * structure. The structure gets populated then and a pointer to it should be
2567 * passed to this function every next time.
2568 *
2569 * On success, the function returns a positive number. Then, the caller
2570 * is allowed to write up to the returned number of bytes to the memory
2571 * location computed by the data_of() macro.
2572 *
2573 * The function returns 0 to indicate the "end of file" condition. Negative
2574 * numbers are returned on errors, in which cases the structure pointed to by
2575 * @handle is not updated and should not be used any more.
2576 */
2577 int snapshot_write_next(struct snapshot_handle *handle)
2578 {
2579 static struct chain_allocator ca;
2580 int error = 0;
2581
2582 /* Check if we have already loaded the entire image */
2583 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2584 return 0;
2585
2586 handle->sync_read = 1;
2587
2588 if (!handle->cur) {
2589 if (!buffer)
2590 /* This makes the buffer be freed by swsusp_free() */
2591 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2592
2593 if (!buffer)
2594 return -ENOMEM;
2595
2596 handle->buffer = buffer;
2597 } else if (handle->cur == 1) {
2598 error = load_header(buffer);
2599 if (error)
2600 return error;
2601
2602 safe_pages_list = NULL;
2603
2604 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2605 if (error)
2606 return error;
2607
2608 /* Allocate buffer for page keys. */
2609 error = page_key_alloc(nr_copy_pages);
2610 if (error)
2611 return error;
2612
2613 hibernate_restore_protection_begin();
2614 } else if (handle->cur <= nr_meta_pages + 1) {
2615 error = unpack_orig_pfns(buffer, &copy_bm);
2616 if (error)
2617 return error;
2618
2619 if (handle->cur == nr_meta_pages + 1) {
2620 error = prepare_image(&orig_bm, &copy_bm);
2621 if (error)
2622 return error;
2623
2624 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2625 memory_bm_position_reset(&orig_bm);
2626 restore_pblist = NULL;
2627 handle->buffer = get_buffer(&orig_bm, &ca);
2628 handle->sync_read = 0;
2629 if (IS_ERR(handle->buffer))
2630 return PTR_ERR(handle->buffer);
2631 }
2632 } else {
2633 copy_last_highmem_page();
2634 /* Restore page key for data page (s390 only). */
2635 page_key_write(handle->buffer);
2636 hibernate_restore_protect_page(handle->buffer);
2637 handle->buffer = get_buffer(&orig_bm, &ca);
2638 if (IS_ERR(handle->buffer))
2639 return PTR_ERR(handle->buffer);
2640 if (handle->buffer != buffer)
2641 handle->sync_read = 0;
2642 }
2643 handle->cur++;
2644 return PAGE_SIZE;
2645 }
2646
2647 /**
2648 * snapshot_write_finalize - Complete the loading of a hibernation image.
2649 *
2650 * Must be called after the last call to snapshot_write_next() in case the last
2651 * page in the image happens to be a highmem page and its contents should be
2652 * stored in highmem. Additionally, it recycles bitmap memory that's not
2653 * necessary any more.
2654 */
2655 void snapshot_write_finalize(struct snapshot_handle *handle)
2656 {
2657 copy_last_highmem_page();
2658 /* Restore page key for data page (s390 only). */
2659 page_key_write(handle->buffer);
2660 page_key_free();
2661 hibernate_restore_protect_page(handle->buffer);
2662 /* Do that only if we have loaded the image entirely */
2663 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2664 memory_bm_recycle(&orig_bm);
2665 free_highmem_data();
2666 }
2667 }
2668
2669 int snapshot_image_loaded(struct snapshot_handle *handle)
2670 {
2671 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2672 handle->cur <= nr_meta_pages + nr_copy_pages);
2673 }
2674
2675 #ifdef CONFIG_HIGHMEM
2676 /* Assumes that @buf is ready and points to a "safe" page */
2677 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2678 void *buf)
2679 {
2680 void *kaddr1, *kaddr2;
2681
2682 kaddr1 = kmap_atomic(p1);
2683 kaddr2 = kmap_atomic(p2);
2684 copy_page(buf, kaddr1);
2685 copy_page(kaddr1, kaddr2);
2686 copy_page(kaddr2, buf);
2687 kunmap_atomic(kaddr2);
2688 kunmap_atomic(kaddr1);
2689 }
2690
2691 /**
2692 * restore_highmem - Put highmem image pages into their original locations.
2693 *
2694 * For each highmem page that was in use before hibernation and is included in
2695 * the image, and also has been allocated by the "restore" kernel, swap its
2696 * current contents with the previous (ie. "before hibernation") ones.
2697 *
2698 * If the restore eventually fails, we can call this function once again and
2699 * restore the highmem state as seen by the restore kernel.
2700 */
2701 int restore_highmem(void)
2702 {
2703 struct highmem_pbe *pbe = highmem_pblist;
2704 void *buf;
2705
2706 if (!pbe)
2707 return 0;
2708
2709 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2710 if (!buf)
2711 return -ENOMEM;
2712
2713 while (pbe) {
2714 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2715 pbe = pbe->next;
2716 }
2717 free_image_page(buf, PG_UNSAFE_CLEAR);
2718 return 0;
2719 }
2720 #endif /* CONFIG_HIGHMEM */