2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #define pr_fmt(fmt) "PM: " fmt
15 #include <linux/version.h>
16 #include <linux/module.h>
18 #include <linux/suspend.h>
19 #include <linux/delay.h>
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/kernel.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/bootmem.h>
27 #include <linux/nmi.h>
28 #include <linux/syscalls.h>
29 #include <linux/console.h>
30 #include <linux/highmem.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
33 #include <linux/compiler.h>
34 #include <linux/ktime.h>
35 #include <linux/set_memory.h>
37 #include <linux/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/tlbflush.h>
45 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
46 static bool hibernate_restore_protection
;
47 static bool hibernate_restore_protection_active
;
49 void enable_restore_image_protection(void)
51 hibernate_restore_protection
= true;
54 static inline void hibernate_restore_protection_begin(void)
56 hibernate_restore_protection_active
= hibernate_restore_protection
;
59 static inline void hibernate_restore_protection_end(void)
61 hibernate_restore_protection_active
= false;
64 static inline void hibernate_restore_protect_page(void *page_address
)
66 if (hibernate_restore_protection_active
)
67 set_memory_ro((unsigned long)page_address
, 1);
70 static inline void hibernate_restore_unprotect_page(void *page_address
)
72 if (hibernate_restore_protection_active
)
73 set_memory_rw((unsigned long)page_address
, 1);
76 static inline void hibernate_restore_protection_begin(void) {}
77 static inline void hibernate_restore_protection_end(void) {}
78 static inline void hibernate_restore_protect_page(void *page_address
) {}
79 static inline void hibernate_restore_unprotect_page(void *page_address
) {}
80 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
82 static int swsusp_page_is_free(struct page
*);
83 static void swsusp_set_page_forbidden(struct page
*);
84 static void swsusp_unset_page_forbidden(struct page
*);
87 * Number of bytes to reserve for memory allocations made by device drivers
88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
89 * cause image creation to fail (tunable via /sys/power/reserved_size).
91 unsigned long reserved_size
;
93 void __init
hibernate_reserved_size_init(void)
95 reserved_size
= SPARE_PAGES
* PAGE_SIZE
;
99 * Preferred image size in bytes (tunable via /sys/power/image_size).
100 * When it is set to N, swsusp will do its best to ensure the image
101 * size will not exceed N bytes, but if that is impossible, it will
102 * try to create the smallest image possible.
104 unsigned long image_size
;
106 void __init
hibernate_image_size_init(void)
108 image_size
= ((totalram_pages
* 2) / 5) * PAGE_SIZE
;
112 * List of PBEs needed for restoring the pages that were allocated before
113 * the suspend and included in the suspend image, but have also been
114 * allocated by the "resume" kernel, so their contents cannot be written
115 * directly to their "original" page frames.
117 struct pbe
*restore_pblist
;
119 /* struct linked_page is used to build chains of pages */
121 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
124 struct linked_page
*next
;
125 char data
[LINKED_PAGE_DATA_SIZE
];
129 * List of "safe" pages (ie. pages that were not used by the image kernel
130 * before hibernation) that may be used as temporary storage for image kernel
133 static struct linked_page
*safe_pages_list
;
135 /* Pointer to an auxiliary buffer (1 page) */
140 #define PG_UNSAFE_CLEAR 1
141 #define PG_UNSAFE_KEEP 0
143 static unsigned int allocated_unsafe_pages
;
146 * get_image_page - Allocate a page for a hibernation image.
147 * @gfp_mask: GFP mask for the allocation.
148 * @safe_needed: Get pages that were not used before hibernation (restore only)
150 * During image restoration, for storing the PBE list and the image data, we can
151 * only use memory pages that do not conflict with the pages used before
152 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
153 * using allocated_unsafe_pages.
155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
156 * swsusp_free() can release it.
158 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
162 res
= (void *)get_zeroed_page(gfp_mask
);
164 while (res
&& swsusp_page_is_free(virt_to_page(res
))) {
165 /* The page is unsafe, mark it for swsusp_free() */
166 swsusp_set_page_forbidden(virt_to_page(res
));
167 allocated_unsafe_pages
++;
168 res
= (void *)get_zeroed_page(gfp_mask
);
171 swsusp_set_page_forbidden(virt_to_page(res
));
172 swsusp_set_page_free(virt_to_page(res
));
177 static void *__get_safe_page(gfp_t gfp_mask
)
179 if (safe_pages_list
) {
180 void *ret
= safe_pages_list
;
182 safe_pages_list
= safe_pages_list
->next
;
183 memset(ret
, 0, PAGE_SIZE
);
186 return get_image_page(gfp_mask
, PG_SAFE
);
189 unsigned long get_safe_page(gfp_t gfp_mask
)
191 return (unsigned long)__get_safe_page(gfp_mask
);
194 static struct page
*alloc_image_page(gfp_t gfp_mask
)
198 page
= alloc_page(gfp_mask
);
200 swsusp_set_page_forbidden(page
);
201 swsusp_set_page_free(page
);
206 static void recycle_safe_page(void *page_address
)
208 struct linked_page
*lp
= page_address
;
210 lp
->next
= safe_pages_list
;
211 safe_pages_list
= lp
;
215 * free_image_page - Free a page allocated for hibernation image.
216 * @addr: Address of the page to free.
217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
219 * The page to free should have been allocated by get_image_page() (page flags
220 * set by it are affected).
222 static inline void free_image_page(void *addr
, int clear_nosave_free
)
226 BUG_ON(!virt_addr_valid(addr
));
228 page
= virt_to_page(addr
);
230 swsusp_unset_page_forbidden(page
);
231 if (clear_nosave_free
)
232 swsusp_unset_page_free(page
);
237 static inline void free_list_of_pages(struct linked_page
*list
,
238 int clear_page_nosave
)
241 struct linked_page
*lp
= list
->next
;
243 free_image_page(list
, clear_page_nosave
);
249 * struct chain_allocator is used for allocating small objects out of
250 * a linked list of pages called 'the chain'.
252 * The chain grows each time when there is no room for a new object in
253 * the current page. The allocated objects cannot be freed individually.
254 * It is only possible to free them all at once, by freeing the entire
257 * NOTE: The chain allocator may be inefficient if the allocated objects
258 * are not much smaller than PAGE_SIZE.
260 struct chain_allocator
{
261 struct linked_page
*chain
; /* the chain */
262 unsigned int used_space
; /* total size of objects allocated out
263 of the current page */
264 gfp_t gfp_mask
; /* mask for allocating pages */
265 int safe_needed
; /* if set, only "safe" pages are allocated */
268 static void chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
,
272 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
273 ca
->gfp_mask
= gfp_mask
;
274 ca
->safe_needed
= safe_needed
;
277 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
281 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
282 struct linked_page
*lp
;
284 lp
= ca
->safe_needed
? __get_safe_page(ca
->gfp_mask
) :
285 get_image_page(ca
->gfp_mask
, PG_ANY
);
289 lp
->next
= ca
->chain
;
293 ret
= ca
->chain
->data
+ ca
->used_space
;
294 ca
->used_space
+= size
;
299 * Data types related to memory bitmaps.
301 * Memory bitmap is a structure consiting of many linked lists of
302 * objects. The main list's elements are of type struct zone_bitmap
303 * and each of them corresonds to one zone. For each zone bitmap
304 * object there is a list of objects of type struct bm_block that
305 * represent each blocks of bitmap in which information is stored.
307 * struct memory_bitmap contains a pointer to the main list of zone
308 * bitmap objects, a struct bm_position used for browsing the bitmap,
309 * and a pointer to the list of pages used for allocating all of the
310 * zone bitmap objects and bitmap block objects.
312 * NOTE: It has to be possible to lay out the bitmap in memory
313 * using only allocations of order 0. Additionally, the bitmap is
314 * designed to work with arbitrary number of zones (this is over the
315 * top for now, but let's avoid making unnecessary assumptions ;-).
317 * struct zone_bitmap contains a pointer to a list of bitmap block
318 * objects and a pointer to the bitmap block object that has been
319 * most recently used for setting bits. Additionally, it contains the
320 * PFNs that correspond to the start and end of the represented zone.
322 * struct bm_block contains a pointer to the memory page in which
323 * information is stored (in the form of a block of bitmap)
324 * It also contains the pfns that correspond to the start and end of
325 * the represented memory area.
327 * The memory bitmap is organized as a radix tree to guarantee fast random
328 * access to the bits. There is one radix tree for each zone (as returned
329 * from create_mem_extents).
331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
332 * two linked lists for the nodes of the tree, one for the inner nodes and
333 * one for the leave nodes. The linked leave nodes are used for fast linear
334 * access of the memory bitmap.
336 * The struct rtree_node represents one node of the radix tree.
339 #define BM_END_OF_MAP (~0UL)
341 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
342 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
343 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
346 * struct rtree_node is a wrapper struct to link the nodes
347 * of the rtree together for easy linear iteration over
348 * bits and easy freeing
351 struct list_head list
;
356 * struct mem_zone_bm_rtree represents a bitmap used for one
357 * populated memory zone.
359 struct mem_zone_bm_rtree
{
360 struct list_head list
; /* Link Zones together */
361 struct list_head nodes
; /* Radix Tree inner nodes */
362 struct list_head leaves
; /* Radix Tree leaves */
363 unsigned long start_pfn
; /* Zone start page frame */
364 unsigned long end_pfn
; /* Zone end page frame + 1 */
365 struct rtree_node
*rtree
; /* Radix Tree Root */
366 int levels
; /* Number of Radix Tree Levels */
367 unsigned int blocks
; /* Number of Bitmap Blocks */
370 /* strcut bm_position is used for browsing memory bitmaps */
373 struct mem_zone_bm_rtree
*zone
;
374 struct rtree_node
*node
;
375 unsigned long node_pfn
;
379 struct memory_bitmap
{
380 struct list_head zones
;
381 struct linked_page
*p_list
; /* list of pages used to store zone
382 bitmap objects and bitmap block
384 struct bm_position cur
; /* most recently used bit position */
387 /* Functions that operate on memory bitmaps */
389 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
390 #if BITS_PER_LONG == 32
391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
393 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
395 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
398 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
400 * This function is used to allocate inner nodes as well as the
401 * leave nodes of the radix tree. It also adds the node to the
402 * corresponding linked list passed in by the *list parameter.
404 static struct rtree_node
*alloc_rtree_node(gfp_t gfp_mask
, int safe_needed
,
405 struct chain_allocator
*ca
,
406 struct list_head
*list
)
408 struct rtree_node
*node
;
410 node
= chain_alloc(ca
, sizeof(struct rtree_node
));
414 node
->data
= get_image_page(gfp_mask
, safe_needed
);
418 list_add_tail(&node
->list
, list
);
424 * add_rtree_block - Add a new leave node to the radix tree.
426 * The leave nodes need to be allocated in order to keep the leaves
427 * linked list in order. This is guaranteed by the zone->blocks
430 static int add_rtree_block(struct mem_zone_bm_rtree
*zone
, gfp_t gfp_mask
,
431 int safe_needed
, struct chain_allocator
*ca
)
433 struct rtree_node
*node
, *block
, **dst
;
434 unsigned int levels_needed
, block_nr
;
437 block_nr
= zone
->blocks
;
440 /* How many levels do we need for this block nr? */
443 block_nr
>>= BM_RTREE_LEVEL_SHIFT
;
446 /* Make sure the rtree has enough levels */
447 for (i
= zone
->levels
; i
< levels_needed
; i
++) {
448 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
453 node
->data
[0] = (unsigned long)zone
->rtree
;
458 /* Allocate new block */
459 block
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
, &zone
->leaves
);
463 /* Now walk the rtree to insert the block */
466 block_nr
= zone
->blocks
;
467 for (i
= zone
->levels
; i
> 0; i
--) {
471 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
478 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
479 index
&= BM_RTREE_LEVEL_MASK
;
480 dst
= (struct rtree_node
**)&((*dst
)->data
[index
]);
490 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
491 int clear_nosave_free
);
494 * create_zone_bm_rtree - Create a radix tree for one zone.
496 * Allocated the mem_zone_bm_rtree structure and initializes it.
497 * This function also allocated and builds the radix tree for the
500 static struct mem_zone_bm_rtree
*create_zone_bm_rtree(gfp_t gfp_mask
,
502 struct chain_allocator
*ca
,
506 struct mem_zone_bm_rtree
*zone
;
507 unsigned int i
, nr_blocks
;
511 zone
= chain_alloc(ca
, sizeof(struct mem_zone_bm_rtree
));
515 INIT_LIST_HEAD(&zone
->nodes
);
516 INIT_LIST_HEAD(&zone
->leaves
);
517 zone
->start_pfn
= start
;
519 nr_blocks
= DIV_ROUND_UP(pages
, BM_BITS_PER_BLOCK
);
521 for (i
= 0; i
< nr_blocks
; i
++) {
522 if (add_rtree_block(zone
, gfp_mask
, safe_needed
, ca
)) {
523 free_zone_bm_rtree(zone
, PG_UNSAFE_CLEAR
);
532 * free_zone_bm_rtree - Free the memory of the radix tree.
534 * Free all node pages of the radix tree. The mem_zone_bm_rtree
535 * structure itself is not freed here nor are the rtree_node
538 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
539 int clear_nosave_free
)
541 struct rtree_node
*node
;
543 list_for_each_entry(node
, &zone
->nodes
, list
)
544 free_image_page(node
->data
, clear_nosave_free
);
546 list_for_each_entry(node
, &zone
->leaves
, list
)
547 free_image_page(node
->data
, clear_nosave_free
);
550 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
552 bm
->cur
.zone
= list_entry(bm
->zones
.next
, struct mem_zone_bm_rtree
,
554 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
555 struct rtree_node
, list
);
556 bm
->cur
.node_pfn
= 0;
557 bm
->cur
.node_bit
= 0;
560 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
563 struct list_head hook
;
569 * free_mem_extents - Free a list of memory extents.
570 * @list: List of extents to free.
572 static void free_mem_extents(struct list_head
*list
)
574 struct mem_extent
*ext
, *aux
;
576 list_for_each_entry_safe(ext
, aux
, list
, hook
) {
577 list_del(&ext
->hook
);
583 * create_mem_extents - Create a list of memory extents.
584 * @list: List to put the extents into.
585 * @gfp_mask: Mask to use for memory allocations.
587 * The extents represent contiguous ranges of PFNs.
589 static int create_mem_extents(struct list_head
*list
, gfp_t gfp_mask
)
593 INIT_LIST_HEAD(list
);
595 for_each_populated_zone(zone
) {
596 unsigned long zone_start
, zone_end
;
597 struct mem_extent
*ext
, *cur
, *aux
;
599 zone_start
= zone
->zone_start_pfn
;
600 zone_end
= zone_end_pfn(zone
);
602 list_for_each_entry(ext
, list
, hook
)
603 if (zone_start
<= ext
->end
)
606 if (&ext
->hook
== list
|| zone_end
< ext
->start
) {
607 /* New extent is necessary */
608 struct mem_extent
*new_ext
;
610 new_ext
= kzalloc(sizeof(struct mem_extent
), gfp_mask
);
612 free_mem_extents(list
);
615 new_ext
->start
= zone_start
;
616 new_ext
->end
= zone_end
;
617 list_add_tail(&new_ext
->hook
, &ext
->hook
);
621 /* Merge this zone's range of PFNs with the existing one */
622 if (zone_start
< ext
->start
)
623 ext
->start
= zone_start
;
624 if (zone_end
> ext
->end
)
627 /* More merging may be possible */
629 list_for_each_entry_safe_continue(cur
, aux
, list
, hook
) {
630 if (zone_end
< cur
->start
)
632 if (zone_end
< cur
->end
)
634 list_del(&cur
->hook
);
643 * memory_bm_create - Allocate memory for a memory bitmap.
645 static int memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
,
648 struct chain_allocator ca
;
649 struct list_head mem_extents
;
650 struct mem_extent
*ext
;
653 chain_init(&ca
, gfp_mask
, safe_needed
);
654 INIT_LIST_HEAD(&bm
->zones
);
656 error
= create_mem_extents(&mem_extents
, gfp_mask
);
660 list_for_each_entry(ext
, &mem_extents
, hook
) {
661 struct mem_zone_bm_rtree
*zone
;
663 zone
= create_zone_bm_rtree(gfp_mask
, safe_needed
, &ca
,
664 ext
->start
, ext
->end
);
669 list_add_tail(&zone
->list
, &bm
->zones
);
672 bm
->p_list
= ca
.chain
;
673 memory_bm_position_reset(bm
);
675 free_mem_extents(&mem_extents
);
679 bm
->p_list
= ca
.chain
;
680 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
685 * memory_bm_free - Free memory occupied by the memory bitmap.
686 * @bm: Memory bitmap.
688 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
690 struct mem_zone_bm_rtree
*zone
;
692 list_for_each_entry(zone
, &bm
->zones
, list
)
693 free_zone_bm_rtree(zone
, clear_nosave_free
);
695 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
697 INIT_LIST_HEAD(&bm
->zones
);
701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
703 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
706 * Walk the radix tree to find the page containing the bit that represents @pfn
707 * and return the position of the bit in @addr and @bit_nr.
709 static int memory_bm_find_bit(struct memory_bitmap
*bm
, unsigned long pfn
,
710 void **addr
, unsigned int *bit_nr
)
712 struct mem_zone_bm_rtree
*curr
, *zone
;
713 struct rtree_node
*node
;
718 if (pfn
>= zone
->start_pfn
&& pfn
< zone
->end_pfn
)
723 /* Find the right zone */
724 list_for_each_entry(curr
, &bm
->zones
, list
) {
725 if (pfn
>= curr
->start_pfn
&& pfn
< curr
->end_pfn
) {
736 * We have found the zone. Now walk the radix tree to find the leaf node
741 * If the zone we wish to scan is the the current zone and the
742 * pfn falls into the current node then we do not need to walk
746 if (zone
== bm
->cur
.zone
&&
747 ((pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
) == bm
->cur
.node_pfn
)
751 block_nr
= (pfn
- zone
->start_pfn
) >> BM_BLOCK_SHIFT
;
753 for (i
= zone
->levels
; i
> 0; i
--) {
756 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
757 index
&= BM_RTREE_LEVEL_MASK
;
758 BUG_ON(node
->data
[index
] == 0);
759 node
= (struct rtree_node
*)node
->data
[index
];
763 /* Update last position */
766 bm
->cur
.node_pfn
= (pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
;
768 /* Set return values */
770 *bit_nr
= (pfn
- zone
->start_pfn
) & BM_BLOCK_MASK
;
775 static void memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
781 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
786 static int mem_bm_set_bit_check(struct memory_bitmap
*bm
, unsigned long pfn
)
792 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
799 static void memory_bm_clear_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
805 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
807 clear_bit(bit
, addr
);
810 static void memory_bm_clear_current(struct memory_bitmap
*bm
)
814 bit
= max(bm
->cur
.node_bit
- 1, 0);
815 clear_bit(bit
, bm
->cur
.node
->data
);
818 static int memory_bm_test_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
824 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
826 return test_bit(bit
, addr
);
829 static bool memory_bm_pfn_present(struct memory_bitmap
*bm
, unsigned long pfn
)
834 return !memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
838 * rtree_next_node - Jump to the next leaf node.
840 * Set the position to the beginning of the next node in the
841 * memory bitmap. This is either the next node in the current
842 * zone's radix tree or the first node in the radix tree of the
845 * Return true if there is a next node, false otherwise.
847 static bool rtree_next_node(struct memory_bitmap
*bm
)
849 if (!list_is_last(&bm
->cur
.node
->list
, &bm
->cur
.zone
->leaves
)) {
850 bm
->cur
.node
= list_entry(bm
->cur
.node
->list
.next
,
851 struct rtree_node
, list
);
852 bm
->cur
.node_pfn
+= BM_BITS_PER_BLOCK
;
853 bm
->cur
.node_bit
= 0;
854 touch_softlockup_watchdog();
858 /* No more nodes, goto next zone */
859 if (!list_is_last(&bm
->cur
.zone
->list
, &bm
->zones
)) {
860 bm
->cur
.zone
= list_entry(bm
->cur
.zone
->list
.next
,
861 struct mem_zone_bm_rtree
, list
);
862 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
863 struct rtree_node
, list
);
864 bm
->cur
.node_pfn
= 0;
865 bm
->cur
.node_bit
= 0;
874 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
875 * @bm: Memory bitmap.
877 * Starting from the last returned position this function searches for the next
878 * set bit in @bm and returns the PFN represented by it. If no more bits are
879 * set, BM_END_OF_MAP is returned.
881 * It is required to run memory_bm_position_reset() before the first call to
882 * this function for the given memory bitmap.
884 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
886 unsigned long bits
, pfn
, pages
;
890 pages
= bm
->cur
.zone
->end_pfn
- bm
->cur
.zone
->start_pfn
;
891 bits
= min(pages
- bm
->cur
.node_pfn
, BM_BITS_PER_BLOCK
);
892 bit
= find_next_bit(bm
->cur
.node
->data
, bits
,
895 pfn
= bm
->cur
.zone
->start_pfn
+ bm
->cur
.node_pfn
+ bit
;
896 bm
->cur
.node_bit
= bit
+ 1;
899 } while (rtree_next_node(bm
));
901 return BM_END_OF_MAP
;
905 * This structure represents a range of page frames the contents of which
906 * should not be saved during hibernation.
908 struct nosave_region
{
909 struct list_head list
;
910 unsigned long start_pfn
;
911 unsigned long end_pfn
;
914 static LIST_HEAD(nosave_regions
);
916 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
)
918 struct rtree_node
*node
;
920 list_for_each_entry(node
, &zone
->nodes
, list
)
921 recycle_safe_page(node
->data
);
923 list_for_each_entry(node
, &zone
->leaves
, list
)
924 recycle_safe_page(node
->data
);
927 static void memory_bm_recycle(struct memory_bitmap
*bm
)
929 struct mem_zone_bm_rtree
*zone
;
930 struct linked_page
*p_list
;
932 list_for_each_entry(zone
, &bm
->zones
, list
)
933 recycle_zone_bm_rtree(zone
);
937 struct linked_page
*lp
= p_list
;
940 recycle_safe_page(lp
);
945 * register_nosave_region - Register a region of unsaveable memory.
947 * Register a range of page frames the contents of which should not be saved
948 * during hibernation (to be used in the early initialization code).
950 void __init
__register_nosave_region(unsigned long start_pfn
,
951 unsigned long end_pfn
, int use_kmalloc
)
953 struct nosave_region
*region
;
955 if (start_pfn
>= end_pfn
)
958 if (!list_empty(&nosave_regions
)) {
959 /* Try to extend the previous region (they should be sorted) */
960 region
= list_entry(nosave_regions
.prev
,
961 struct nosave_region
, list
);
962 if (region
->end_pfn
== start_pfn
) {
963 region
->end_pfn
= end_pfn
;
968 /* During init, this shouldn't fail */
969 region
= kmalloc(sizeof(struct nosave_region
), GFP_KERNEL
);
972 /* This allocation cannot fail */
973 region
= memblock_virt_alloc(sizeof(struct nosave_region
), 0);
975 region
->start_pfn
= start_pfn
;
976 region
->end_pfn
= end_pfn
;
977 list_add_tail(®ion
->list
, &nosave_regions
);
979 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
980 (unsigned long long) start_pfn
<< PAGE_SHIFT
,
981 ((unsigned long long) end_pfn
<< PAGE_SHIFT
) - 1);
985 * Set bits in this map correspond to the page frames the contents of which
986 * should not be saved during the suspend.
988 static struct memory_bitmap
*forbidden_pages_map
;
990 /* Set bits in this map correspond to free page frames. */
991 static struct memory_bitmap
*free_pages_map
;
994 * Each page frame allocated for creating the image is marked by setting the
995 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
998 void swsusp_set_page_free(struct page
*page
)
1001 memory_bm_set_bit(free_pages_map
, page_to_pfn(page
));
1004 static int swsusp_page_is_free(struct page
*page
)
1006 return free_pages_map
?
1007 memory_bm_test_bit(free_pages_map
, page_to_pfn(page
)) : 0;
1010 void swsusp_unset_page_free(struct page
*page
)
1013 memory_bm_clear_bit(free_pages_map
, page_to_pfn(page
));
1016 static void swsusp_set_page_forbidden(struct page
*page
)
1018 if (forbidden_pages_map
)
1019 memory_bm_set_bit(forbidden_pages_map
, page_to_pfn(page
));
1022 int swsusp_page_is_forbidden(struct page
*page
)
1024 return forbidden_pages_map
?
1025 memory_bm_test_bit(forbidden_pages_map
, page_to_pfn(page
)) : 0;
1028 static void swsusp_unset_page_forbidden(struct page
*page
)
1030 if (forbidden_pages_map
)
1031 memory_bm_clear_bit(forbidden_pages_map
, page_to_pfn(page
));
1035 * mark_nosave_pages - Mark pages that should not be saved.
1036 * @bm: Memory bitmap.
1038 * Set the bits in @bm that correspond to the page frames the contents of which
1039 * should not be saved.
1041 static void mark_nosave_pages(struct memory_bitmap
*bm
)
1043 struct nosave_region
*region
;
1045 if (list_empty(&nosave_regions
))
1048 list_for_each_entry(region
, &nosave_regions
, list
) {
1051 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1052 (unsigned long long) region
->start_pfn
<< PAGE_SHIFT
,
1053 ((unsigned long long) region
->end_pfn
<< PAGE_SHIFT
)
1056 for (pfn
= region
->start_pfn
; pfn
< region
->end_pfn
; pfn
++)
1057 if (pfn_valid(pfn
)) {
1059 * It is safe to ignore the result of
1060 * mem_bm_set_bit_check() here, since we won't
1061 * touch the PFNs for which the error is
1064 mem_bm_set_bit_check(bm
, pfn
);
1070 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1072 * Create bitmaps needed for marking page frames that should not be saved and
1073 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1074 * only modified if everything goes well, because we don't want the bits to be
1075 * touched before both bitmaps are set up.
1077 int create_basic_memory_bitmaps(void)
1079 struct memory_bitmap
*bm1
, *bm2
;
1082 if (forbidden_pages_map
&& free_pages_map
)
1085 BUG_ON(forbidden_pages_map
|| free_pages_map
);
1087 bm1
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
1091 error
= memory_bm_create(bm1
, GFP_KERNEL
, PG_ANY
);
1093 goto Free_first_object
;
1095 bm2
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
1097 goto Free_first_bitmap
;
1099 error
= memory_bm_create(bm2
, GFP_KERNEL
, PG_ANY
);
1101 goto Free_second_object
;
1103 forbidden_pages_map
= bm1
;
1104 free_pages_map
= bm2
;
1105 mark_nosave_pages(forbidden_pages_map
);
1107 pr_debug("Basic memory bitmaps created\n");
1114 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1121 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1123 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1124 * auxiliary pointers are necessary so that the bitmaps themselves are not
1125 * referred to while they are being freed.
1127 void free_basic_memory_bitmaps(void)
1129 struct memory_bitmap
*bm1
, *bm2
;
1131 if (WARN_ON(!(forbidden_pages_map
&& free_pages_map
)))
1134 bm1
= forbidden_pages_map
;
1135 bm2
= free_pages_map
;
1136 forbidden_pages_map
= NULL
;
1137 free_pages_map
= NULL
;
1138 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1140 memory_bm_free(bm2
, PG_UNSAFE_CLEAR
);
1143 pr_debug("Basic memory bitmaps freed\n");
1146 void clear_free_pages(void)
1148 #ifdef CONFIG_PAGE_POISONING_ZERO
1149 struct memory_bitmap
*bm
= free_pages_map
;
1152 if (WARN_ON(!(free_pages_map
)))
1155 memory_bm_position_reset(bm
);
1156 pfn
= memory_bm_next_pfn(bm
);
1157 while (pfn
!= BM_END_OF_MAP
) {
1159 clear_highpage(pfn_to_page(pfn
));
1161 pfn
= memory_bm_next_pfn(bm
);
1163 memory_bm_position_reset(bm
);
1164 pr_info("free pages cleared after restore\n");
1165 #endif /* PAGE_POISONING_ZERO */
1169 * snapshot_additional_pages - Estimate the number of extra pages needed.
1170 * @zone: Memory zone to carry out the computation for.
1172 * Estimate the number of additional pages needed for setting up a hibernation
1173 * image data structures for @zone (usually, the returned value is greater than
1174 * the exact number).
1176 unsigned int snapshot_additional_pages(struct zone
*zone
)
1178 unsigned int rtree
, nodes
;
1180 rtree
= nodes
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
1181 rtree
+= DIV_ROUND_UP(rtree
* sizeof(struct rtree_node
),
1182 LINKED_PAGE_DATA_SIZE
);
1184 nodes
= DIV_ROUND_UP(nodes
, BM_ENTRIES_PER_LEVEL
);
1191 #ifdef CONFIG_HIGHMEM
1193 * count_free_highmem_pages - Compute the total number of free highmem pages.
1195 * The returned number is system-wide.
1197 static unsigned int count_free_highmem_pages(void)
1200 unsigned int cnt
= 0;
1202 for_each_populated_zone(zone
)
1203 if (is_highmem(zone
))
1204 cnt
+= zone_page_state(zone
, NR_FREE_PAGES
);
1210 * saveable_highmem_page - Check if a highmem page is saveable.
1212 * Determine whether a highmem page should be included in a hibernation image.
1214 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1215 * and it isn't part of a free chunk of pages.
1217 static struct page
*saveable_highmem_page(struct zone
*zone
, unsigned long pfn
)
1221 if (!pfn_valid(pfn
))
1224 page
= pfn_to_page(pfn
);
1225 if (page_zone(page
) != zone
)
1228 BUG_ON(!PageHighMem(page
));
1230 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
) ||
1234 if (page_is_guard(page
))
1241 * count_highmem_pages - Compute the total number of saveable highmem pages.
1243 static unsigned int count_highmem_pages(void)
1248 for_each_populated_zone(zone
) {
1249 unsigned long pfn
, max_zone_pfn
;
1251 if (!is_highmem(zone
))
1254 mark_free_pages(zone
);
1255 max_zone_pfn
= zone_end_pfn(zone
);
1256 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1257 if (saveable_highmem_page(zone
, pfn
))
1263 static inline void *saveable_highmem_page(struct zone
*z
, unsigned long p
)
1267 #endif /* CONFIG_HIGHMEM */
1270 * saveable_page - Check if the given page is saveable.
1272 * Determine whether a non-highmem page should be included in a hibernation
1275 * We should save the page if it isn't Nosave, and is not in the range
1276 * of pages statically defined as 'unsaveable', and it isn't part of
1277 * a free chunk of pages.
1279 static struct page
*saveable_page(struct zone
*zone
, unsigned long pfn
)
1283 if (!pfn_valid(pfn
))
1286 page
= pfn_to_page(pfn
);
1287 if (page_zone(page
) != zone
)
1290 BUG_ON(PageHighMem(page
));
1292 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
))
1295 if (PageReserved(page
)
1296 && (!kernel_page_present(page
) || pfn_is_nosave(pfn
)))
1299 if (page_is_guard(page
))
1306 * count_data_pages - Compute the total number of saveable non-highmem pages.
1308 static unsigned int count_data_pages(void)
1311 unsigned long pfn
, max_zone_pfn
;
1314 for_each_populated_zone(zone
) {
1315 if (is_highmem(zone
))
1318 mark_free_pages(zone
);
1319 max_zone_pfn
= zone_end_pfn(zone
);
1320 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1321 if (saveable_page(zone
, pfn
))
1328 * This is needed, because copy_page and memcpy are not usable for copying
1331 static inline void do_copy_page(long *dst
, long *src
)
1335 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
1340 * safe_copy_page - Copy a page in a safe way.
1342 * Check if the page we are going to copy is marked as present in the kernel
1343 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1344 * and in that case kernel_page_present() always returns 'true').
1346 static void safe_copy_page(void *dst
, struct page
*s_page
)
1348 if (kernel_page_present(s_page
)) {
1349 do_copy_page(dst
, page_address(s_page
));
1351 kernel_map_pages(s_page
, 1, 1);
1352 do_copy_page(dst
, page_address(s_page
));
1353 kernel_map_pages(s_page
, 1, 0);
1357 #ifdef CONFIG_HIGHMEM
1358 static inline struct page
*page_is_saveable(struct zone
*zone
, unsigned long pfn
)
1360 return is_highmem(zone
) ?
1361 saveable_highmem_page(zone
, pfn
) : saveable_page(zone
, pfn
);
1364 static void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1366 struct page
*s_page
, *d_page
;
1369 s_page
= pfn_to_page(src_pfn
);
1370 d_page
= pfn_to_page(dst_pfn
);
1371 if (PageHighMem(s_page
)) {
1372 src
= kmap_atomic(s_page
);
1373 dst
= kmap_atomic(d_page
);
1374 do_copy_page(dst
, src
);
1378 if (PageHighMem(d_page
)) {
1380 * The page pointed to by src may contain some kernel
1381 * data modified by kmap_atomic()
1383 safe_copy_page(buffer
, s_page
);
1384 dst
= kmap_atomic(d_page
);
1385 copy_page(dst
, buffer
);
1388 safe_copy_page(page_address(d_page
), s_page
);
1393 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1395 static inline void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1397 safe_copy_page(page_address(pfn_to_page(dst_pfn
)),
1398 pfn_to_page(src_pfn
));
1400 #endif /* CONFIG_HIGHMEM */
1402 static void copy_data_pages(struct memory_bitmap
*copy_bm
,
1403 struct memory_bitmap
*orig_bm
)
1408 for_each_populated_zone(zone
) {
1409 unsigned long max_zone_pfn
;
1411 mark_free_pages(zone
);
1412 max_zone_pfn
= zone_end_pfn(zone
);
1413 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1414 if (page_is_saveable(zone
, pfn
))
1415 memory_bm_set_bit(orig_bm
, pfn
);
1417 memory_bm_position_reset(orig_bm
);
1418 memory_bm_position_reset(copy_bm
);
1420 pfn
= memory_bm_next_pfn(orig_bm
);
1421 if (unlikely(pfn
== BM_END_OF_MAP
))
1423 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
1427 /* Total number of image pages */
1428 static unsigned int nr_copy_pages
;
1429 /* Number of pages needed for saving the original pfns of the image pages */
1430 static unsigned int nr_meta_pages
;
1432 * Numbers of normal and highmem page frames allocated for hibernation image
1433 * before suspending devices.
1435 static unsigned int alloc_normal
, alloc_highmem
;
1437 * Memory bitmap used for marking saveable pages (during hibernation) or
1438 * hibernation image pages (during restore)
1440 static struct memory_bitmap orig_bm
;
1442 * Memory bitmap used during hibernation for marking allocated page frames that
1443 * will contain copies of saveable pages. During restore it is initially used
1444 * for marking hibernation image pages, but then the set bits from it are
1445 * duplicated in @orig_bm and it is released. On highmem systems it is next
1446 * used for marking "safe" highmem pages, but it has to be reinitialized for
1449 static struct memory_bitmap copy_bm
;
1452 * swsusp_free - Free pages allocated for hibernation image.
1454 * Image pages are alocated before snapshot creation, so they need to be
1455 * released after resume.
1457 void swsusp_free(void)
1459 unsigned long fb_pfn
, fr_pfn
;
1461 if (!forbidden_pages_map
|| !free_pages_map
)
1464 memory_bm_position_reset(forbidden_pages_map
);
1465 memory_bm_position_reset(free_pages_map
);
1468 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1469 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1472 * Find the next bit set in both bitmaps. This is guaranteed to
1473 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1476 if (fb_pfn
< fr_pfn
)
1477 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1478 if (fr_pfn
< fb_pfn
)
1479 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1480 } while (fb_pfn
!= fr_pfn
);
1482 if (fr_pfn
!= BM_END_OF_MAP
&& pfn_valid(fr_pfn
)) {
1483 struct page
*page
= pfn_to_page(fr_pfn
);
1485 memory_bm_clear_current(forbidden_pages_map
);
1486 memory_bm_clear_current(free_pages_map
);
1487 hibernate_restore_unprotect_page(page_address(page
));
1495 restore_pblist
= NULL
;
1499 hibernate_restore_protection_end();
1502 /* Helper functions used for the shrinking of memory. */
1504 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1507 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1508 * @nr_pages: Number of page frames to allocate.
1509 * @mask: GFP flags to use for the allocation.
1511 * Return value: Number of page frames actually allocated
1513 static unsigned long preallocate_image_pages(unsigned long nr_pages
, gfp_t mask
)
1515 unsigned long nr_alloc
= 0;
1517 while (nr_pages
> 0) {
1520 page
= alloc_image_page(mask
);
1523 memory_bm_set_bit(©_bm
, page_to_pfn(page
));
1524 if (PageHighMem(page
))
1535 static unsigned long preallocate_image_memory(unsigned long nr_pages
,
1536 unsigned long avail_normal
)
1538 unsigned long alloc
;
1540 if (avail_normal
<= alloc_normal
)
1543 alloc
= avail_normal
- alloc_normal
;
1544 if (nr_pages
< alloc
)
1547 return preallocate_image_pages(alloc
, GFP_IMAGE
);
1550 #ifdef CONFIG_HIGHMEM
1551 static unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1553 return preallocate_image_pages(nr_pages
, GFP_IMAGE
| __GFP_HIGHMEM
);
1557 * __fraction - Compute (an approximation of) x * (multiplier / base).
1559 static unsigned long __fraction(u64 x
, u64 multiplier
, u64 base
)
1563 return (unsigned long)x
;
1566 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1567 unsigned long highmem
,
1568 unsigned long total
)
1570 unsigned long alloc
= __fraction(nr_pages
, highmem
, total
);
1572 return preallocate_image_pages(alloc
, GFP_IMAGE
| __GFP_HIGHMEM
);
1574 #else /* CONFIG_HIGHMEM */
1575 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1580 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1581 unsigned long highmem
,
1582 unsigned long total
)
1586 #endif /* CONFIG_HIGHMEM */
1589 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1591 static unsigned long free_unnecessary_pages(void)
1593 unsigned long save
, to_free_normal
, to_free_highmem
, free
;
1595 save
= count_data_pages();
1596 if (alloc_normal
>= save
) {
1597 to_free_normal
= alloc_normal
- save
;
1601 save
-= alloc_normal
;
1603 save
+= count_highmem_pages();
1604 if (alloc_highmem
>= save
) {
1605 to_free_highmem
= alloc_highmem
- save
;
1607 to_free_highmem
= 0;
1608 save
-= alloc_highmem
;
1609 if (to_free_normal
> save
)
1610 to_free_normal
-= save
;
1614 free
= to_free_normal
+ to_free_highmem
;
1616 memory_bm_position_reset(©_bm
);
1618 while (to_free_normal
> 0 || to_free_highmem
> 0) {
1619 unsigned long pfn
= memory_bm_next_pfn(©_bm
);
1620 struct page
*page
= pfn_to_page(pfn
);
1622 if (PageHighMem(page
)) {
1623 if (!to_free_highmem
)
1628 if (!to_free_normal
)
1633 memory_bm_clear_bit(©_bm
, pfn
);
1634 swsusp_unset_page_forbidden(page
);
1635 swsusp_unset_page_free(page
);
1643 * minimum_image_size - Estimate the minimum acceptable size of an image.
1644 * @saveable: Number of saveable pages in the system.
1646 * We want to avoid attempting to free too much memory too hard, so estimate the
1647 * minimum acceptable size of a hibernation image to use as the lower limit for
1648 * preallocating memory.
1650 * We assume that the minimum image size should be proportional to
1652 * [number of saveable pages] - [number of pages that can be freed in theory]
1654 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1655 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1656 * minus mapped file pages.
1658 static unsigned long minimum_image_size(unsigned long saveable
)
1662 size
= global_node_page_state(NR_SLAB_RECLAIMABLE
)
1663 + global_node_page_state(NR_ACTIVE_ANON
)
1664 + global_node_page_state(NR_INACTIVE_ANON
)
1665 + global_node_page_state(NR_ACTIVE_FILE
)
1666 + global_node_page_state(NR_INACTIVE_FILE
)
1667 - global_node_page_state(NR_FILE_MAPPED
);
1669 return saveable
<= size
? 0 : saveable
- size
;
1673 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1675 * To create a hibernation image it is necessary to make a copy of every page
1676 * frame in use. We also need a number of page frames to be free during
1677 * hibernation for allocations made while saving the image and for device
1678 * drivers, in case they need to allocate memory from their hibernation
1679 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1680 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1681 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1682 * total number of available page frames and allocate at least
1684 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1685 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1687 * of them, which corresponds to the maximum size of a hibernation image.
1689 * If image_size is set below the number following from the above formula,
1690 * the preallocation of memory is continued until the total number of saveable
1691 * pages in the system is below the requested image size or the minimum
1692 * acceptable image size returned by minimum_image_size(), whichever is greater.
1694 int hibernate_preallocate_memory(void)
1697 unsigned long saveable
, size
, max_size
, count
, highmem
, pages
= 0;
1698 unsigned long alloc
, save_highmem
, pages_highmem
, avail_normal
;
1699 ktime_t start
, stop
;
1702 pr_info("Preallocating image memory... ");
1703 start
= ktime_get();
1705 error
= memory_bm_create(&orig_bm
, GFP_IMAGE
, PG_ANY
);
1709 error
= memory_bm_create(©_bm
, GFP_IMAGE
, PG_ANY
);
1716 /* Count the number of saveable data pages. */
1717 save_highmem
= count_highmem_pages();
1718 saveable
= count_data_pages();
1721 * Compute the total number of page frames we can use (count) and the
1722 * number of pages needed for image metadata (size).
1725 saveable
+= save_highmem
;
1726 highmem
= save_highmem
;
1728 for_each_populated_zone(zone
) {
1729 size
+= snapshot_additional_pages(zone
);
1730 if (is_highmem(zone
))
1731 highmem
+= zone_page_state(zone
, NR_FREE_PAGES
);
1733 count
+= zone_page_state(zone
, NR_FREE_PAGES
);
1735 avail_normal
= count
;
1737 count
-= totalreserve_pages
;
1739 /* Add number of pages required for page keys (s390 only). */
1740 size
+= page_key_additional_pages(saveable
);
1742 /* Compute the maximum number of saveable pages to leave in memory. */
1743 max_size
= (count
- (size
+ PAGES_FOR_IO
)) / 2
1744 - 2 * DIV_ROUND_UP(reserved_size
, PAGE_SIZE
);
1745 /* Compute the desired number of image pages specified by image_size. */
1746 size
= DIV_ROUND_UP(image_size
, PAGE_SIZE
);
1747 if (size
> max_size
)
1750 * If the desired number of image pages is at least as large as the
1751 * current number of saveable pages in memory, allocate page frames for
1752 * the image and we're done.
1754 if (size
>= saveable
) {
1755 pages
= preallocate_image_highmem(save_highmem
);
1756 pages
+= preallocate_image_memory(saveable
- pages
, avail_normal
);
1760 /* Estimate the minimum size of the image. */
1761 pages
= minimum_image_size(saveable
);
1763 * To avoid excessive pressure on the normal zone, leave room in it to
1764 * accommodate an image of the minimum size (unless it's already too
1765 * small, in which case don't preallocate pages from it at all).
1767 if (avail_normal
> pages
)
1768 avail_normal
-= pages
;
1772 size
= min_t(unsigned long, pages
, max_size
);
1775 * Let the memory management subsystem know that we're going to need a
1776 * large number of page frames to allocate and make it free some memory.
1777 * NOTE: If this is not done, performance will be hurt badly in some
1780 shrink_all_memory(saveable
- size
);
1783 * The number of saveable pages in memory was too high, so apply some
1784 * pressure to decrease it. First, make room for the largest possible
1785 * image and fail if that doesn't work. Next, try to decrease the size
1786 * of the image as much as indicated by 'size' using allocations from
1787 * highmem and non-highmem zones separately.
1789 pages_highmem
= preallocate_image_highmem(highmem
/ 2);
1790 alloc
= count
- max_size
;
1791 if (alloc
> pages_highmem
)
1792 alloc
-= pages_highmem
;
1795 pages
= preallocate_image_memory(alloc
, avail_normal
);
1796 if (pages
< alloc
) {
1797 /* We have exhausted non-highmem pages, try highmem. */
1799 pages
+= pages_highmem
;
1800 pages_highmem
= preallocate_image_highmem(alloc
);
1801 if (pages_highmem
< alloc
)
1803 pages
+= pages_highmem
;
1805 * size is the desired number of saveable pages to leave in
1806 * memory, so try to preallocate (all memory - size) pages.
1808 alloc
= (count
- pages
) - size
;
1809 pages
+= preallocate_image_highmem(alloc
);
1812 * There are approximately max_size saveable pages at this point
1813 * and we want to reduce this number down to size.
1815 alloc
= max_size
- size
;
1816 size
= preallocate_highmem_fraction(alloc
, highmem
, count
);
1817 pages_highmem
+= size
;
1819 size
= preallocate_image_memory(alloc
, avail_normal
);
1820 pages_highmem
+= preallocate_image_highmem(alloc
- size
);
1821 pages
+= pages_highmem
+ size
;
1825 * We only need as many page frames for the image as there are saveable
1826 * pages in memory, but we have allocated more. Release the excessive
1829 pages
-= free_unnecessary_pages();
1833 pr_cont("done (allocated %lu pages)\n", pages
);
1834 swsusp_show_speed(start
, stop
, pages
, "Allocated");
1844 #ifdef CONFIG_HIGHMEM
1846 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1848 * Compute the number of non-highmem pages that will be necessary for creating
1849 * copies of highmem pages.
1851 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
1853 unsigned int free_highmem
= count_free_highmem_pages() + alloc_highmem
;
1855 if (free_highmem
>= nr_highmem
)
1858 nr_highmem
-= free_highmem
;
1863 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
1864 #endif /* CONFIG_HIGHMEM */
1867 * enough_free_mem - Check if there is enough free memory for the image.
1869 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
1872 unsigned int free
= alloc_normal
;
1874 for_each_populated_zone(zone
)
1875 if (!is_highmem(zone
))
1876 free
+= zone_page_state(zone
, NR_FREE_PAGES
);
1878 nr_pages
+= count_pages_for_highmem(nr_highmem
);
1879 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1880 nr_pages
, PAGES_FOR_IO
, free
);
1882 return free
> nr_pages
+ PAGES_FOR_IO
;
1885 #ifdef CONFIG_HIGHMEM
1887 * get_highmem_buffer - Allocate a buffer for highmem pages.
1889 * If there are some highmem pages in the hibernation image, we may need a
1890 * buffer to copy them and/or load their data.
1892 static inline int get_highmem_buffer(int safe_needed
)
1894 buffer
= get_image_page(GFP_ATOMIC
, safe_needed
);
1895 return buffer
? 0 : -ENOMEM
;
1899 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1901 * Try to allocate as many pages as needed, but if the number of free highmem
1902 * pages is less than that, allocate them all.
1904 static inline unsigned int alloc_highmem_pages(struct memory_bitmap
*bm
,
1905 unsigned int nr_highmem
)
1907 unsigned int to_alloc
= count_free_highmem_pages();
1909 if (to_alloc
> nr_highmem
)
1910 to_alloc
= nr_highmem
;
1912 nr_highmem
-= to_alloc
;
1913 while (to_alloc
-- > 0) {
1916 page
= alloc_image_page(__GFP_HIGHMEM
|__GFP_KSWAPD_RECLAIM
);
1917 memory_bm_set_bit(bm
, page_to_pfn(page
));
1922 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
1924 static inline unsigned int alloc_highmem_pages(struct memory_bitmap
*bm
,
1925 unsigned int n
) { return 0; }
1926 #endif /* CONFIG_HIGHMEM */
1929 * swsusp_alloc - Allocate memory for hibernation image.
1931 * We first try to allocate as many highmem pages as there are
1932 * saveable highmem pages in the system. If that fails, we allocate
1933 * non-highmem pages for the copies of the remaining highmem ones.
1935 * In this approach it is likely that the copies of highmem pages will
1936 * also be located in the high memory, because of the way in which
1937 * copy_data_pages() works.
1939 static int swsusp_alloc(struct memory_bitmap
*copy_bm
,
1940 unsigned int nr_pages
, unsigned int nr_highmem
)
1942 if (nr_highmem
> 0) {
1943 if (get_highmem_buffer(PG_ANY
))
1945 if (nr_highmem
> alloc_highmem
) {
1946 nr_highmem
-= alloc_highmem
;
1947 nr_pages
+= alloc_highmem_pages(copy_bm
, nr_highmem
);
1950 if (nr_pages
> alloc_normal
) {
1951 nr_pages
-= alloc_normal
;
1952 while (nr_pages
-- > 0) {
1955 page
= alloc_image_page(GFP_ATOMIC
);
1958 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
1969 asmlinkage __visible
int swsusp_save(void)
1971 unsigned int nr_pages
, nr_highmem
;
1973 pr_info("Creating hibernation image:\n");
1975 drain_local_pages(NULL
);
1976 nr_pages
= count_data_pages();
1977 nr_highmem
= count_highmem_pages();
1978 pr_info("Need to copy %u pages\n", nr_pages
+ nr_highmem
);
1980 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
1981 pr_err("Not enough free memory\n");
1985 if (swsusp_alloc(©_bm
, nr_pages
, nr_highmem
)) {
1986 pr_err("Memory allocation failed\n");
1991 * During allocating of suspend pagedir, new cold pages may appear.
1994 drain_local_pages(NULL
);
1995 copy_data_pages(©_bm
, &orig_bm
);
1998 * End of critical section. From now on, we can write to memory,
1999 * but we should not touch disk. This specially means we must _not_
2000 * touch swap space! Except we must write out our image of course.
2003 nr_pages
+= nr_highmem
;
2004 nr_copy_pages
= nr_pages
;
2005 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
2007 pr_info("Hibernation image created (%d pages copied)\n", nr_pages
);
2012 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2013 static int init_header_complete(struct swsusp_info
*info
)
2015 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
2016 info
->version_code
= LINUX_VERSION_CODE
;
2020 static char *check_image_kernel(struct swsusp_info
*info
)
2022 if (info
->version_code
!= LINUX_VERSION_CODE
)
2023 return "kernel version";
2024 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
2025 return "system type";
2026 if (strcmp(info
->uts
.release
,init_utsname()->release
))
2027 return "kernel release";
2028 if (strcmp(info
->uts
.version
,init_utsname()->version
))
2030 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
2034 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2036 unsigned long snapshot_get_image_size(void)
2038 return nr_copy_pages
+ nr_meta_pages
+ 1;
2041 static int init_header(struct swsusp_info
*info
)
2043 memset(info
, 0, sizeof(struct swsusp_info
));
2044 info
->num_physpages
= get_num_physpages();
2045 info
->image_pages
= nr_copy_pages
;
2046 info
->pages
= snapshot_get_image_size();
2047 info
->size
= info
->pages
;
2048 info
->size
<<= PAGE_SHIFT
;
2049 return init_header_complete(info
);
2053 * pack_pfns - Prepare PFNs for saving.
2054 * @bm: Memory bitmap.
2055 * @buf: Memory buffer to store the PFNs in.
2057 * PFNs corresponding to set bits in @bm are stored in the area of memory
2058 * pointed to by @buf (1 page at a time).
2060 static inline void pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
2064 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
2065 buf
[j
] = memory_bm_next_pfn(bm
);
2066 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
2068 /* Save page key for data page (s390 only). */
2069 page_key_read(buf
+ j
);
2074 * snapshot_read_next - Get the address to read the next image page from.
2075 * @handle: Snapshot handle to be used for the reading.
2077 * On the first call, @handle should point to a zeroed snapshot_handle
2078 * structure. The structure gets populated then and a pointer to it should be
2079 * passed to this function every next time.
2081 * On success, the function returns a positive number. Then, the caller
2082 * is allowed to read up to the returned number of bytes from the memory
2083 * location computed by the data_of() macro.
2085 * The function returns 0 to indicate the end of the data stream condition,
2086 * and negative numbers are returned on errors. If that happens, the structure
2087 * pointed to by @handle is not updated and should not be used any more.
2089 int snapshot_read_next(struct snapshot_handle
*handle
)
2091 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
2095 /* This makes the buffer be freed by swsusp_free() */
2096 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
2103 error
= init_header((struct swsusp_info
*)buffer
);
2106 handle
->buffer
= buffer
;
2107 memory_bm_position_reset(&orig_bm
);
2108 memory_bm_position_reset(©_bm
);
2109 } else if (handle
->cur
<= nr_meta_pages
) {
2111 pack_pfns(buffer
, &orig_bm
);
2115 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
2116 if (PageHighMem(page
)) {
2118 * Highmem pages are copied to the buffer,
2119 * because we can't return with a kmapped
2120 * highmem page (we may not be called again).
2124 kaddr
= kmap_atomic(page
);
2125 copy_page(buffer
, kaddr
);
2126 kunmap_atomic(kaddr
);
2127 handle
->buffer
= buffer
;
2129 handle
->buffer
= page_address(page
);
2136 static void duplicate_memory_bitmap(struct memory_bitmap
*dst
,
2137 struct memory_bitmap
*src
)
2141 memory_bm_position_reset(src
);
2142 pfn
= memory_bm_next_pfn(src
);
2143 while (pfn
!= BM_END_OF_MAP
) {
2144 memory_bm_set_bit(dst
, pfn
);
2145 pfn
= memory_bm_next_pfn(src
);
2150 * mark_unsafe_pages - Mark pages that were used before hibernation.
2152 * Mark the pages that cannot be used for storing the image during restoration,
2153 * because they conflict with the pages that had been used before hibernation.
2155 static void mark_unsafe_pages(struct memory_bitmap
*bm
)
2159 /* Clear the "free"/"unsafe" bit for all PFNs */
2160 memory_bm_position_reset(free_pages_map
);
2161 pfn
= memory_bm_next_pfn(free_pages_map
);
2162 while (pfn
!= BM_END_OF_MAP
) {
2163 memory_bm_clear_current(free_pages_map
);
2164 pfn
= memory_bm_next_pfn(free_pages_map
);
2167 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2168 duplicate_memory_bitmap(free_pages_map
, bm
);
2170 allocated_unsafe_pages
= 0;
2173 static int check_header(struct swsusp_info
*info
)
2177 reason
= check_image_kernel(info
);
2178 if (!reason
&& info
->num_physpages
!= get_num_physpages())
2179 reason
= "memory size";
2181 pr_err("Image mismatch: %s\n", reason
);
2188 * load header - Check the image header and copy the data from it.
2190 static int load_header(struct swsusp_info
*info
)
2194 restore_pblist
= NULL
;
2195 error
= check_header(info
);
2197 nr_copy_pages
= info
->image_pages
;
2198 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
2204 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2205 * @bm: Memory bitmap.
2206 * @buf: Area of memory containing the PFNs.
2208 * For each element of the array pointed to by @buf (1 page at a time), set the
2209 * corresponding bit in @bm.
2211 static int unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
2215 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
2216 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
2219 /* Extract and buffer page key for data page (s390 only). */
2220 page_key_memorize(buf
+ j
);
2222 if (pfn_valid(buf
[j
]) && memory_bm_pfn_present(bm
, buf
[j
]))
2223 memory_bm_set_bit(bm
, buf
[j
]);
2231 #ifdef CONFIG_HIGHMEM
2233 * struct highmem_pbe is used for creating the list of highmem pages that
2234 * should be restored atomically during the resume from disk, because the page
2235 * frames they have occupied before the suspend are in use.
2237 struct highmem_pbe
{
2238 struct page
*copy_page
; /* data is here now */
2239 struct page
*orig_page
; /* data was here before the suspend */
2240 struct highmem_pbe
*next
;
2244 * List of highmem PBEs needed for restoring the highmem pages that were
2245 * allocated before the suspend and included in the suspend image, but have
2246 * also been allocated by the "resume" kernel, so their contents cannot be
2247 * written directly to their "original" page frames.
2249 static struct highmem_pbe
*highmem_pblist
;
2252 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2253 * @bm: Memory bitmap.
2255 * The bits in @bm that correspond to image pages are assumed to be set.
2257 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
2260 unsigned int cnt
= 0;
2262 memory_bm_position_reset(bm
);
2263 pfn
= memory_bm_next_pfn(bm
);
2264 while (pfn
!= BM_END_OF_MAP
) {
2265 if (PageHighMem(pfn_to_page(pfn
)))
2268 pfn
= memory_bm_next_pfn(bm
);
2273 static unsigned int safe_highmem_pages
;
2275 static struct memory_bitmap
*safe_highmem_bm
;
2278 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2279 * @bm: Pointer to an uninitialized memory bitmap structure.
2280 * @nr_highmem_p: Pointer to the number of highmem image pages.
2282 * Try to allocate as many highmem pages as there are highmem image pages
2283 * (@nr_highmem_p points to the variable containing the number of highmem image
2284 * pages). The pages that are "safe" (ie. will not be overwritten when the
2285 * hibernation image is restored entirely) have the corresponding bits set in
2286 * @bm (it must be unitialized).
2288 * NOTE: This function should not be called if there are no highmem image pages.
2290 static int prepare_highmem_image(struct memory_bitmap
*bm
,
2291 unsigned int *nr_highmem_p
)
2293 unsigned int to_alloc
;
2295 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
2298 if (get_highmem_buffer(PG_SAFE
))
2301 to_alloc
= count_free_highmem_pages();
2302 if (to_alloc
> *nr_highmem_p
)
2303 to_alloc
= *nr_highmem_p
;
2305 *nr_highmem_p
= to_alloc
;
2307 safe_highmem_pages
= 0;
2308 while (to_alloc
-- > 0) {
2311 page
= alloc_page(__GFP_HIGHMEM
);
2312 if (!swsusp_page_is_free(page
)) {
2313 /* The page is "safe", set its bit the bitmap */
2314 memory_bm_set_bit(bm
, page_to_pfn(page
));
2315 safe_highmem_pages
++;
2317 /* Mark the page as allocated */
2318 swsusp_set_page_forbidden(page
);
2319 swsusp_set_page_free(page
);
2321 memory_bm_position_reset(bm
);
2322 safe_highmem_bm
= bm
;
2326 static struct page
*last_highmem_page
;
2329 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2331 * For a given highmem image page get a buffer that suspend_write_next() should
2332 * return to its caller to write to.
2334 * If the page is to be saved to its "original" page frame or a copy of
2335 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2336 * the copy of the page is to be made in normal memory, so the address of
2337 * the copy is returned.
2339 * If @buffer is returned, the caller of suspend_write_next() will write
2340 * the page's contents to @buffer, so they will have to be copied to the
2341 * right location on the next call to suspend_write_next() and it is done
2342 * with the help of copy_last_highmem_page(). For this purpose, if
2343 * @buffer is returned, @last_highmem_page is set to the page to which
2344 * the data will have to be copied from @buffer.
2346 static void *get_highmem_page_buffer(struct page
*page
,
2347 struct chain_allocator
*ca
)
2349 struct highmem_pbe
*pbe
;
2352 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
)) {
2354 * We have allocated the "original" page frame and we can
2355 * use it directly to store the loaded page.
2357 last_highmem_page
= page
;
2361 * The "original" page frame has not been allocated and we have to
2362 * use a "safe" page frame to store the loaded page.
2364 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
2367 return ERR_PTR(-ENOMEM
);
2369 pbe
->orig_page
= page
;
2370 if (safe_highmem_pages
> 0) {
2373 /* Copy of the page will be stored in high memory */
2375 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
2376 safe_highmem_pages
--;
2377 last_highmem_page
= tmp
;
2378 pbe
->copy_page
= tmp
;
2380 /* Copy of the page will be stored in normal memory */
2381 kaddr
= safe_pages_list
;
2382 safe_pages_list
= safe_pages_list
->next
;
2383 pbe
->copy_page
= virt_to_page(kaddr
);
2385 pbe
->next
= highmem_pblist
;
2386 highmem_pblist
= pbe
;
2391 * copy_last_highmem_page - Copy most the most recent highmem image page.
2393 * Copy the contents of a highmem image from @buffer, where the caller of
2394 * snapshot_write_next() has stored them, to the right location represented by
2395 * @last_highmem_page .
2397 static void copy_last_highmem_page(void)
2399 if (last_highmem_page
) {
2402 dst
= kmap_atomic(last_highmem_page
);
2403 copy_page(dst
, buffer
);
2405 last_highmem_page
= NULL
;
2409 static inline int last_highmem_page_copied(void)
2411 return !last_highmem_page
;
2414 static inline void free_highmem_data(void)
2416 if (safe_highmem_bm
)
2417 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
2420 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2423 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
2425 static inline int prepare_highmem_image(struct memory_bitmap
*bm
,
2426 unsigned int *nr_highmem_p
) { return 0; }
2428 static inline void *get_highmem_page_buffer(struct page
*page
,
2429 struct chain_allocator
*ca
)
2431 return ERR_PTR(-EINVAL
);
2434 static inline void copy_last_highmem_page(void) {}
2435 static inline int last_highmem_page_copied(void) { return 1; }
2436 static inline void free_highmem_data(void) {}
2437 #endif /* CONFIG_HIGHMEM */
2439 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2442 * prepare_image - Make room for loading hibernation image.
2443 * @new_bm: Unitialized memory bitmap structure.
2444 * @bm: Memory bitmap with unsafe pages marked.
2446 * Use @bm to mark the pages that will be overwritten in the process of
2447 * restoring the system memory state from the suspend image ("unsafe" pages)
2448 * and allocate memory for the image.
2450 * The idea is to allocate a new memory bitmap first and then allocate
2451 * as many pages as needed for image data, but without specifying what those
2452 * pages will be used for just yet. Instead, we mark them all as allocated and
2453 * create a lists of "safe" pages to be used later. On systems with high
2454 * memory a list of "safe" highmem pages is created too.
2456 static int prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
2458 unsigned int nr_pages
, nr_highmem
;
2459 struct linked_page
*lp
;
2462 /* If there is no highmem, the buffer will not be necessary */
2463 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2466 nr_highmem
= count_highmem_image_pages(bm
);
2467 mark_unsafe_pages(bm
);
2469 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
2473 duplicate_memory_bitmap(new_bm
, bm
);
2474 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
2475 if (nr_highmem
> 0) {
2476 error
= prepare_highmem_image(bm
, &nr_highmem
);
2481 * Reserve some safe pages for potential later use.
2483 * NOTE: This way we make sure there will be enough safe pages for the
2484 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2485 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2487 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2489 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2490 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
2491 while (nr_pages
> 0) {
2492 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2497 lp
->next
= safe_pages_list
;
2498 safe_pages_list
= lp
;
2501 /* Preallocate memory for the image */
2502 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2503 while (nr_pages
> 0) {
2504 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
2509 if (!swsusp_page_is_free(virt_to_page(lp
))) {
2510 /* The page is "safe", add it to the list */
2511 lp
->next
= safe_pages_list
;
2512 safe_pages_list
= lp
;
2514 /* Mark the page as allocated */
2515 swsusp_set_page_forbidden(virt_to_page(lp
));
2516 swsusp_set_page_free(virt_to_page(lp
));
2527 * get_buffer - Get the address to store the next image data page.
2529 * Get the address that snapshot_write_next() should return to its caller to
2532 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
2536 unsigned long pfn
= memory_bm_next_pfn(bm
);
2538 if (pfn
== BM_END_OF_MAP
)
2539 return ERR_PTR(-EFAULT
);
2541 page
= pfn_to_page(pfn
);
2542 if (PageHighMem(page
))
2543 return get_highmem_page_buffer(page
, ca
);
2545 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
))
2547 * We have allocated the "original" page frame and we can
2548 * use it directly to store the loaded page.
2550 return page_address(page
);
2553 * The "original" page frame has not been allocated and we have to
2554 * use a "safe" page frame to store the loaded page.
2556 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
2559 return ERR_PTR(-ENOMEM
);
2561 pbe
->orig_address
= page_address(page
);
2562 pbe
->address
= safe_pages_list
;
2563 safe_pages_list
= safe_pages_list
->next
;
2564 pbe
->next
= restore_pblist
;
2565 restore_pblist
= pbe
;
2566 return pbe
->address
;
2570 * snapshot_write_next - Get the address to store the next image page.
2571 * @handle: Snapshot handle structure to guide the writing.
2573 * On the first call, @handle should point to a zeroed snapshot_handle
2574 * structure. The structure gets populated then and a pointer to it should be
2575 * passed to this function every next time.
2577 * On success, the function returns a positive number. Then, the caller
2578 * is allowed to write up to the returned number of bytes to the memory
2579 * location computed by the data_of() macro.
2581 * The function returns 0 to indicate the "end of file" condition. Negative
2582 * numbers are returned on errors, in which cases the structure pointed to by
2583 * @handle is not updated and should not be used any more.
2585 int snapshot_write_next(struct snapshot_handle
*handle
)
2587 static struct chain_allocator ca
;
2590 /* Check if we have already loaded the entire image */
2591 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
2594 handle
->sync_read
= 1;
2598 /* This makes the buffer be freed by swsusp_free() */
2599 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
2604 handle
->buffer
= buffer
;
2605 } else if (handle
->cur
== 1) {
2606 error
= load_header(buffer
);
2610 safe_pages_list
= NULL
;
2612 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
2616 /* Allocate buffer for page keys. */
2617 error
= page_key_alloc(nr_copy_pages
);
2621 hibernate_restore_protection_begin();
2622 } else if (handle
->cur
<= nr_meta_pages
+ 1) {
2623 error
= unpack_orig_pfns(buffer
, ©_bm
);
2627 if (handle
->cur
== nr_meta_pages
+ 1) {
2628 error
= prepare_image(&orig_bm
, ©_bm
);
2632 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
2633 memory_bm_position_reset(&orig_bm
);
2634 restore_pblist
= NULL
;
2635 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2636 handle
->sync_read
= 0;
2637 if (IS_ERR(handle
->buffer
))
2638 return PTR_ERR(handle
->buffer
);
2641 copy_last_highmem_page();
2642 /* Restore page key for data page (s390 only). */
2643 page_key_write(handle
->buffer
);
2644 hibernate_restore_protect_page(handle
->buffer
);
2645 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2646 if (IS_ERR(handle
->buffer
))
2647 return PTR_ERR(handle
->buffer
);
2648 if (handle
->buffer
!= buffer
)
2649 handle
->sync_read
= 0;
2656 * snapshot_write_finalize - Complete the loading of a hibernation image.
2658 * Must be called after the last call to snapshot_write_next() in case the last
2659 * page in the image happens to be a highmem page and its contents should be
2660 * stored in highmem. Additionally, it recycles bitmap memory that's not
2661 * necessary any more.
2663 void snapshot_write_finalize(struct snapshot_handle
*handle
)
2665 copy_last_highmem_page();
2666 /* Restore page key for data page (s390 only). */
2667 page_key_write(handle
->buffer
);
2669 hibernate_restore_protect_page(handle
->buffer
);
2670 /* Do that only if we have loaded the image entirely */
2671 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
2672 memory_bm_recycle(&orig_bm
);
2673 free_highmem_data();
2677 int snapshot_image_loaded(struct snapshot_handle
*handle
)
2679 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
2680 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
2683 #ifdef CONFIG_HIGHMEM
2684 /* Assumes that @buf is ready and points to a "safe" page */
2685 static inline void swap_two_pages_data(struct page
*p1
, struct page
*p2
,
2688 void *kaddr1
, *kaddr2
;
2690 kaddr1
= kmap_atomic(p1
);
2691 kaddr2
= kmap_atomic(p2
);
2692 copy_page(buf
, kaddr1
);
2693 copy_page(kaddr1
, kaddr2
);
2694 copy_page(kaddr2
, buf
);
2695 kunmap_atomic(kaddr2
);
2696 kunmap_atomic(kaddr1
);
2700 * restore_highmem - Put highmem image pages into their original locations.
2702 * For each highmem page that was in use before hibernation and is included in
2703 * the image, and also has been allocated by the "restore" kernel, swap its
2704 * current contents with the previous (ie. "before hibernation") ones.
2706 * If the restore eventually fails, we can call this function once again and
2707 * restore the highmem state as seen by the restore kernel.
2709 int restore_highmem(void)
2711 struct highmem_pbe
*pbe
= highmem_pblist
;
2717 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2722 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
2725 free_image_page(buf
, PG_UNSAFE_CLEAR
);
2728 #endif /* CONFIG_HIGHMEM */