]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/power/snapshot.c
2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
40 static int swsusp_page_is_free(struct page
*);
41 static void swsusp_set_page_forbidden(struct page
*);
42 static void swsusp_unset_page_forbidden(struct page
*);
45 * Number of bytes to reserve for memory allocations made by device drivers
46 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47 * cause image creation to fail (tunable via /sys/power/reserved_size).
49 unsigned long reserved_size
;
51 void __init
hibernate_reserved_size_init(void)
53 reserved_size
= SPARE_PAGES
* PAGE_SIZE
;
57 * Preferred image size in bytes (tunable via /sys/power/image_size).
58 * When it is set to N, swsusp will do its best to ensure the image
59 * size will not exceed N bytes, but if that is impossible, it will
60 * try to create the smallest image possible.
62 unsigned long image_size
;
64 void __init
hibernate_image_size_init(void)
66 image_size
= ((totalram_pages
* 2) / 5) * PAGE_SIZE
;
69 /* List of PBEs needed for restoring the pages that were allocated before
70 * the suspend and included in the suspend image, but have also been
71 * allocated by the "resume" kernel, so their contents cannot be written
72 * directly to their "original" page frames.
74 struct pbe
*restore_pblist
;
76 /* Pointer to an auxiliary buffer (1 page) */
80 * @safe_needed - on resume, for storing the PBE list and the image,
81 * we can only use memory pages that do not conflict with the pages
82 * used before suspend. The unsafe pages have PageNosaveFree set
83 * and we count them using unsafe_pages.
85 * Each allocated image page is marked as PageNosave and PageNosaveFree
86 * so that swsusp_free() can release it.
91 #define PG_UNSAFE_CLEAR 1
92 #define PG_UNSAFE_KEEP 0
94 static unsigned int allocated_unsafe_pages
;
96 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
100 res
= (void *)get_zeroed_page(gfp_mask
);
102 while (res
&& swsusp_page_is_free(virt_to_page(res
))) {
103 /* The page is unsafe, mark it for swsusp_free() */
104 swsusp_set_page_forbidden(virt_to_page(res
));
105 allocated_unsafe_pages
++;
106 res
= (void *)get_zeroed_page(gfp_mask
);
109 swsusp_set_page_forbidden(virt_to_page(res
));
110 swsusp_set_page_free(virt_to_page(res
));
115 unsigned long get_safe_page(gfp_t gfp_mask
)
117 return (unsigned long)get_image_page(gfp_mask
, PG_SAFE
);
120 static struct page
*alloc_image_page(gfp_t gfp_mask
)
124 page
= alloc_page(gfp_mask
);
126 swsusp_set_page_forbidden(page
);
127 swsusp_set_page_free(page
);
133 * free_image_page - free page represented by @addr, allocated with
134 * get_image_page (page flags set by it must be cleared)
137 static inline void free_image_page(void *addr
, int clear_nosave_free
)
141 BUG_ON(!virt_addr_valid(addr
));
143 page
= virt_to_page(addr
);
145 swsusp_unset_page_forbidden(page
);
146 if (clear_nosave_free
)
147 swsusp_unset_page_free(page
);
152 /* struct linked_page is used to build chains of pages */
154 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
157 struct linked_page
*next
;
158 char data
[LINKED_PAGE_DATA_SIZE
];
162 free_list_of_pages(struct linked_page
*list
, int clear_page_nosave
)
165 struct linked_page
*lp
= list
->next
;
167 free_image_page(list
, clear_page_nosave
);
173 * struct chain_allocator is used for allocating small objects out of
174 * a linked list of pages called 'the chain'.
176 * The chain grows each time when there is no room for a new object in
177 * the current page. The allocated objects cannot be freed individually.
178 * It is only possible to free them all at once, by freeing the entire
181 * NOTE: The chain allocator may be inefficient if the allocated objects
182 * are not much smaller than PAGE_SIZE.
185 struct chain_allocator
{
186 struct linked_page
*chain
; /* the chain */
187 unsigned int used_space
; /* total size of objects allocated out
188 * of the current page
190 gfp_t gfp_mask
; /* mask for allocating pages */
191 int safe_needed
; /* if set, only "safe" pages are allocated */
195 chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
, int safe_needed
)
198 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
199 ca
->gfp_mask
= gfp_mask
;
200 ca
->safe_needed
= safe_needed
;
203 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
207 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
208 struct linked_page
*lp
;
210 lp
= get_image_page(ca
->gfp_mask
, ca
->safe_needed
);
214 lp
->next
= ca
->chain
;
218 ret
= ca
->chain
->data
+ ca
->used_space
;
219 ca
->used_space
+= size
;
224 * Data types related to memory bitmaps.
226 * Memory bitmap is a structure consiting of many linked lists of
227 * objects. The main list's elements are of type struct zone_bitmap
228 * and each of them corresonds to one zone. For each zone bitmap
229 * object there is a list of objects of type struct bm_block that
230 * represent each blocks of bitmap in which information is stored.
232 * struct memory_bitmap contains a pointer to the main list of zone
233 * bitmap objects, a struct bm_position used for browsing the bitmap,
234 * and a pointer to the list of pages used for allocating all of the
235 * zone bitmap objects and bitmap block objects.
237 * NOTE: It has to be possible to lay out the bitmap in memory
238 * using only allocations of order 0. Additionally, the bitmap is
239 * designed to work with arbitrary number of zones (this is over the
240 * top for now, but let's avoid making unnecessary assumptions ;-).
242 * struct zone_bitmap contains a pointer to a list of bitmap block
243 * objects and a pointer to the bitmap block object that has been
244 * most recently used for setting bits. Additionally, it contains the
245 * pfns that correspond to the start and end of the represented zone.
247 * struct bm_block contains a pointer to the memory page in which
248 * information is stored (in the form of a block of bitmap)
249 * It also contains the pfns that correspond to the start and end of
250 * the represented memory area.
252 * The memory bitmap is organized as a radix tree to guarantee fast random
253 * access to the bits. There is one radix tree for each zone (as returned
254 * from create_mem_extents).
256 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
257 * two linked lists for the nodes of the tree, one for the inner nodes and
258 * one for the leave nodes. The linked leave nodes are used for fast linear
259 * access of the memory bitmap.
261 * The struct rtree_node represents one node of the radix tree.
264 #define BM_END_OF_MAP (~0UL)
266 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
267 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
268 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
271 * struct rtree_node is a wrapper struct to link the nodes
272 * of the rtree together for easy linear iteration over
273 * bits and easy freeing
276 struct list_head list
;
281 * struct mem_zone_bm_rtree represents a bitmap used for one
282 * populated memory zone.
284 struct mem_zone_bm_rtree
{
285 struct list_head list
; /* Link Zones together */
286 struct list_head nodes
; /* Radix Tree inner nodes */
287 struct list_head leaves
; /* Radix Tree leaves */
288 unsigned long start_pfn
; /* Zone start page frame */
289 unsigned long end_pfn
; /* Zone end page frame + 1 */
290 struct rtree_node
*rtree
; /* Radix Tree Root */
291 int levels
; /* Number of Radix Tree Levels */
292 unsigned int blocks
; /* Number of Bitmap Blocks */
295 /* strcut bm_position is used for browsing memory bitmaps */
298 struct mem_zone_bm_rtree
*zone
;
299 struct rtree_node
*node
;
300 unsigned long node_pfn
;
304 struct memory_bitmap
{
305 struct list_head zones
;
306 struct linked_page
*p_list
; /* list of pages used to store zone
307 * bitmap objects and bitmap block
310 struct bm_position cur
; /* most recently used bit position */
313 /* Functions that operate on memory bitmaps */
315 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
316 #if BITS_PER_LONG == 32
317 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
319 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
321 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
324 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
326 * This function is used to allocate inner nodes as well as the
327 * leave nodes of the radix tree. It also adds the node to the
328 * corresponding linked list passed in by the *list parameter.
330 static struct rtree_node
*alloc_rtree_node(gfp_t gfp_mask
, int safe_needed
,
331 struct chain_allocator
*ca
,
332 struct list_head
*list
)
334 struct rtree_node
*node
;
336 node
= chain_alloc(ca
, sizeof(struct rtree_node
));
340 node
->data
= get_image_page(gfp_mask
, safe_needed
);
344 list_add_tail(&node
->list
, list
);
350 * add_rtree_block - Add a new leave node to the radix tree
352 * The leave nodes need to be allocated in order to keep the leaves
353 * linked list in order. This is guaranteed by the zone->blocks
356 static int add_rtree_block(struct mem_zone_bm_rtree
*zone
, gfp_t gfp_mask
,
357 int safe_needed
, struct chain_allocator
*ca
)
359 struct rtree_node
*node
, *block
, **dst
;
360 unsigned int levels_needed
, block_nr
;
363 block_nr
= zone
->blocks
;
366 /* How many levels do we need for this block nr? */
369 block_nr
>>= BM_RTREE_LEVEL_SHIFT
;
372 /* Make sure the rtree has enough levels */
373 for (i
= zone
->levels
; i
< levels_needed
; i
++) {
374 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
379 node
->data
[0] = (unsigned long)zone
->rtree
;
384 /* Allocate new block */
385 block
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
, &zone
->leaves
);
389 /* Now walk the rtree to insert the block */
392 block_nr
= zone
->blocks
;
393 for (i
= zone
->levels
; i
> 0; i
--) {
397 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
404 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
405 index
&= BM_RTREE_LEVEL_MASK
;
406 dst
= (struct rtree_node
**)&((*dst
)->data
[index
]);
416 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
417 int clear_nosave_free
);
420 * create_zone_bm_rtree - create a radix tree for one zone
422 * Allocated the mem_zone_bm_rtree structure and initializes it.
423 * This function also allocated and builds the radix tree for the
426 static struct mem_zone_bm_rtree
*
427 create_zone_bm_rtree(gfp_t gfp_mask
, int safe_needed
,
428 struct chain_allocator
*ca
,
429 unsigned long start
, unsigned long end
)
431 struct mem_zone_bm_rtree
*zone
;
432 unsigned int i
, nr_blocks
;
436 zone
= chain_alloc(ca
, sizeof(struct mem_zone_bm_rtree
));
440 INIT_LIST_HEAD(&zone
->nodes
);
441 INIT_LIST_HEAD(&zone
->leaves
);
442 zone
->start_pfn
= start
;
444 nr_blocks
= DIV_ROUND_UP(pages
, BM_BITS_PER_BLOCK
);
446 for (i
= 0; i
< nr_blocks
; i
++) {
447 if (add_rtree_block(zone
, gfp_mask
, safe_needed
, ca
)) {
448 free_zone_bm_rtree(zone
, PG_UNSAFE_CLEAR
);
457 * free_zone_bm_rtree - Free the memory of the radix tree
459 * Free all node pages of the radix tree. The mem_zone_bm_rtree
460 * structure itself is not freed here nor are the rtree_node
463 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
464 int clear_nosave_free
)
466 struct rtree_node
*node
;
468 list_for_each_entry(node
, &zone
->nodes
, list
)
469 free_image_page(node
->data
, clear_nosave_free
);
471 list_for_each_entry(node
, &zone
->leaves
, list
)
472 free_image_page(node
->data
, clear_nosave_free
);
475 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
477 bm
->cur
.zone
= list_entry(bm
->zones
.next
, struct mem_zone_bm_rtree
,
479 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
480 struct rtree_node
, list
);
481 bm
->cur
.node_pfn
= 0;
482 bm
->cur
.node_bit
= 0;
485 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
488 struct list_head hook
;
494 * free_mem_extents - free a list of memory extents
495 * @list - list of extents to empty
497 static void free_mem_extents(struct list_head
*list
)
499 struct mem_extent
*ext
, *aux
;
501 list_for_each_entry_safe(ext
, aux
, list
, hook
) {
502 list_del(&ext
->hook
);
508 * create_mem_extents - create a list of memory extents representing
509 * contiguous ranges of PFNs
510 * @list - list to put the extents into
511 * @gfp_mask - mask to use for memory allocations
513 static int create_mem_extents(struct list_head
*list
, gfp_t gfp_mask
)
517 INIT_LIST_HEAD(list
);
519 for_each_populated_zone(zone
) {
520 unsigned long zone_start
, zone_end
;
521 struct mem_extent
*ext
, *cur
, *aux
;
523 zone_start
= zone
->zone_start_pfn
;
524 zone_end
= zone_end_pfn(zone
);
526 list_for_each_entry(ext
, list
, hook
)
527 if (zone_start
<= ext
->end
)
530 if (&ext
->hook
== list
|| zone_end
< ext
->start
) {
531 /* New extent is necessary */
532 struct mem_extent
*new_ext
;
534 new_ext
= kzalloc(sizeof(struct mem_extent
), gfp_mask
);
536 free_mem_extents(list
);
539 new_ext
->start
= zone_start
;
540 new_ext
->end
= zone_end
;
541 list_add_tail(&new_ext
->hook
, &ext
->hook
);
545 /* Merge this zone's range of PFNs with the existing one */
546 if (zone_start
< ext
->start
)
547 ext
->start
= zone_start
;
548 if (zone_end
> ext
->end
)
551 /* More merging may be possible */
553 list_for_each_entry_safe_continue(cur
, aux
, list
, hook
) {
554 if (zone_end
< cur
->start
)
556 if (zone_end
< cur
->end
)
558 list_del(&cur
->hook
);
567 * memory_bm_create - allocate memory for a memory bitmap
570 memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
, int safe_needed
)
572 struct chain_allocator ca
;
573 struct list_head mem_extents
;
574 struct mem_extent
*ext
;
577 chain_init(&ca
, gfp_mask
, safe_needed
);
578 INIT_LIST_HEAD(&bm
->zones
);
580 error
= create_mem_extents(&mem_extents
, gfp_mask
);
584 list_for_each_entry(ext
, &mem_extents
, hook
) {
585 struct mem_zone_bm_rtree
*zone
;
587 zone
= create_zone_bm_rtree(gfp_mask
, safe_needed
, &ca
,
588 ext
->start
, ext
->end
);
593 list_add_tail(&zone
->list
, &bm
->zones
);
596 bm
->p_list
= ca
.chain
;
597 memory_bm_position_reset(bm
);
599 free_mem_extents(&mem_extents
);
603 bm
->p_list
= ca
.chain
;
604 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
609 * memory_bm_free - free memory occupied by the memory bitmap @bm
611 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
613 struct mem_zone_bm_rtree
*zone
;
615 list_for_each_entry(zone
, &bm
->zones
, list
)
616 free_zone_bm_rtree(zone
, clear_nosave_free
);
618 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
620 INIT_LIST_HEAD(&bm
->zones
);
624 * memory_bm_find_bit - Find the bit for pfn in the memory
627 * Find the bit in the bitmap @bm that corresponds to given pfn.
628 * The cur.zone, cur.block and cur.node_pfn member of @bm are
630 * It walks the radix tree to find the page which contains the bit for
631 * pfn and returns the bit position in **addr and *bit_nr.
633 static int memory_bm_find_bit(struct memory_bitmap
*bm
, unsigned long pfn
,
634 void **addr
, unsigned int *bit_nr
)
636 struct mem_zone_bm_rtree
*curr
, *zone
;
637 struct rtree_node
*node
;
642 if (pfn
>= zone
->start_pfn
&& pfn
< zone
->end_pfn
)
647 /* Find the right zone */
648 list_for_each_entry(curr
, &bm
->zones
, list
) {
649 if (pfn
>= curr
->start_pfn
&& pfn
< curr
->end_pfn
) {
660 * We have a zone. Now walk the radix tree to find the leave
665 if (((pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
) == bm
->cur
.node_pfn
)
669 block_nr
= (pfn
- zone
->start_pfn
) >> BM_BLOCK_SHIFT
;
671 for (i
= zone
->levels
; i
> 0; i
--) {
674 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
675 index
&= BM_RTREE_LEVEL_MASK
;
676 BUG_ON(node
->data
[index
] == 0);
677 node
= (struct rtree_node
*)node
->data
[index
];
681 /* Update last position */
684 bm
->cur
.node_pfn
= (pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
;
686 /* Set return values */
688 *bit_nr
= (pfn
- zone
->start_pfn
) & BM_BLOCK_MASK
;
693 static void memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
699 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
704 static int mem_bm_set_bit_check(struct memory_bitmap
*bm
, unsigned long pfn
)
710 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
717 static void memory_bm_clear_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
723 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
725 clear_bit(bit
, addr
);
728 static void memory_bm_clear_current(struct memory_bitmap
*bm
)
732 bit
= max(bm
->cur
.node_bit
- 1, 0);
733 clear_bit(bit
, bm
->cur
.node
->data
);
736 static int memory_bm_test_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
742 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
744 return test_bit(bit
, addr
);
747 static bool memory_bm_pfn_present(struct memory_bitmap
*bm
, unsigned long pfn
)
752 return !memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
756 * rtree_next_node - Jumps to the next leave node
758 * Sets the position to the beginning of the next node in the
759 * memory bitmap. This is either the next node in the current
760 * zone's radix tree or the first node in the radix tree of the
763 * Returns true if there is a next node, false otherwise.
765 static bool rtree_next_node(struct memory_bitmap
*bm
)
767 bm
->cur
.node
= list_entry(bm
->cur
.node
->list
.next
,
768 struct rtree_node
, list
);
769 if (&bm
->cur
.node
->list
!= &bm
->cur
.zone
->leaves
) {
770 bm
->cur
.node_pfn
+= BM_BITS_PER_BLOCK
;
771 bm
->cur
.node_bit
= 0;
772 touch_softlockup_watchdog();
776 /* No more nodes, goto next zone */
777 bm
->cur
.zone
= list_entry(bm
->cur
.zone
->list
.next
,
778 struct mem_zone_bm_rtree
, list
);
779 if (&bm
->cur
.zone
->list
!= &bm
->zones
) {
780 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
781 struct rtree_node
, list
);
782 bm
->cur
.node_pfn
= 0;
783 bm
->cur
.node_bit
= 0;
792 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
794 * Starting from the last returned position this function searches
795 * for the next set bit in the memory bitmap and returns its
796 * number. If no more bit is set BM_END_OF_MAP is returned.
798 * It is required to run memory_bm_position_reset() before the
799 * first call to this function.
801 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
803 unsigned long bits
, pfn
, pages
;
807 pages
= bm
->cur
.zone
->end_pfn
- bm
->cur
.zone
->start_pfn
;
808 bits
= min(pages
- bm
->cur
.node_pfn
, BM_BITS_PER_BLOCK
);
809 bit
= find_next_bit(bm
->cur
.node
->data
, bits
,
812 pfn
= bm
->cur
.zone
->start_pfn
+ bm
->cur
.node_pfn
+ bit
;
813 bm
->cur
.node_bit
= bit
+ 1;
816 } while (rtree_next_node(bm
));
818 return BM_END_OF_MAP
;
822 * This structure represents a range of page frames the contents of which
823 * should not be saved during the suspend.
826 struct nosave_region
{
827 struct list_head list
;
828 unsigned long start_pfn
;
829 unsigned long end_pfn
;
832 static LIST_HEAD(nosave_regions
);
835 * register_nosave_region - register a range of page frames the contents
836 * of which should not be saved during the suspend (to be used in the early
837 * initialization code)
841 __register_nosave_region(unsigned long start_pfn
, unsigned long end_pfn
,
844 struct nosave_region
*region
;
846 if (start_pfn
>= end_pfn
)
849 if (!list_empty(&nosave_regions
)) {
850 /* Try to extend the previous region (they should be sorted) */
851 region
= list_entry(nosave_regions
.prev
,
852 struct nosave_region
, list
);
853 if (region
->end_pfn
== start_pfn
) {
854 region
->end_pfn
= end_pfn
;
859 /* during init, this shouldn't fail */
860 region
= kmalloc(sizeof(struct nosave_region
), GFP_KERNEL
);
863 /* This allocation cannot fail */
864 region
= memblock_virt_alloc(sizeof(struct nosave_region
), 0);
865 region
->start_pfn
= start_pfn
;
866 region
->end_pfn
= end_pfn
;
867 list_add_tail(®ion
->list
, &nosave_regions
);
869 printk(KERN_INFO
"PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
870 (unsigned long long) start_pfn
<< PAGE_SHIFT
,
871 ((unsigned long long) end_pfn
<< PAGE_SHIFT
) - 1);
875 * Set bits in this map correspond to the page frames the contents of which
876 * should not be saved during the suspend.
878 static struct memory_bitmap
*forbidden_pages_map
;
880 /* Set bits in this map correspond to free page frames. */
881 static struct memory_bitmap
*free_pages_map
;
884 * Each page frame allocated for creating the image is marked by setting the
885 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
888 void swsusp_set_page_free(struct page
*page
)
891 memory_bm_set_bit(free_pages_map
, page_to_pfn(page
));
894 static int swsusp_page_is_free(struct page
*page
)
896 return free_pages_map
?
897 memory_bm_test_bit(free_pages_map
, page_to_pfn(page
)) : 0;
900 void swsusp_unset_page_free(struct page
*page
)
903 memory_bm_clear_bit(free_pages_map
, page_to_pfn(page
));
906 static void swsusp_set_page_forbidden(struct page
*page
)
908 if (forbidden_pages_map
)
909 memory_bm_set_bit(forbidden_pages_map
, page_to_pfn(page
));
912 int swsusp_page_is_forbidden(struct page
*page
)
914 return forbidden_pages_map
?
915 memory_bm_test_bit(forbidden_pages_map
, page_to_pfn(page
)) : 0;
918 static void swsusp_unset_page_forbidden(struct page
*page
)
920 if (forbidden_pages_map
)
921 memory_bm_clear_bit(forbidden_pages_map
, page_to_pfn(page
));
925 * mark_nosave_pages - set bits corresponding to the page frames the
926 * contents of which should not be saved in a given bitmap.
929 static void mark_nosave_pages(struct memory_bitmap
*bm
)
931 struct nosave_region
*region
;
933 if (list_empty(&nosave_regions
))
936 list_for_each_entry(region
, &nosave_regions
, list
) {
939 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
940 (unsigned long long) region
->start_pfn
<< PAGE_SHIFT
,
941 ((unsigned long long) region
->end_pfn
<< PAGE_SHIFT
)
944 for (pfn
= region
->start_pfn
; pfn
< region
->end_pfn
; pfn
++)
945 if (pfn_valid(pfn
)) {
947 * It is safe to ignore the result of
948 * mem_bm_set_bit_check() here, since we won't
949 * touch the PFNs for which the error is
952 mem_bm_set_bit_check(bm
, pfn
);
957 static bool is_nosave_page(unsigned long pfn
)
959 struct nosave_region
*region
;
961 list_for_each_entry(region
, &nosave_regions
, list
) {
962 if (pfn
>= region
->start_pfn
&& pfn
< region
->end_pfn
) {
963 pr_err("PM: %#010llx in e820 nosave region: "
964 "[mem %#010llx-%#010llx]\n",
965 (unsigned long long) pfn
<< PAGE_SHIFT
,
966 (unsigned long long) region
->start_pfn
<< PAGE_SHIFT
,
967 ((unsigned long long) region
->end_pfn
<< PAGE_SHIFT
)
977 * create_basic_memory_bitmaps - create bitmaps needed for marking page
978 * frames that should not be saved and free page frames. The pointers
979 * forbidden_pages_map and free_pages_map are only modified if everything
980 * goes well, because we don't want the bits to be used before both bitmaps
984 int create_basic_memory_bitmaps(void)
986 struct memory_bitmap
*bm1
, *bm2
;
989 if (forbidden_pages_map
&& free_pages_map
)
992 BUG_ON(forbidden_pages_map
|| free_pages_map
);
994 bm1
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
998 error
= memory_bm_create(bm1
, GFP_KERNEL
, PG_ANY
);
1000 goto Free_first_object
;
1002 bm2
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
1004 goto Free_first_bitmap
;
1006 error
= memory_bm_create(bm2
, GFP_KERNEL
, PG_ANY
);
1008 goto Free_second_object
;
1010 forbidden_pages_map
= bm1
;
1011 free_pages_map
= bm2
;
1012 mark_nosave_pages(forbidden_pages_map
);
1014 pr_debug("PM: Basic memory bitmaps created\n");
1021 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1028 * free_basic_memory_bitmaps - free memory bitmaps allocated by
1029 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
1030 * so that the bitmaps themselves are not referred to while they are being
1034 void free_basic_memory_bitmaps(void)
1036 struct memory_bitmap
*bm1
, *bm2
;
1038 if (WARN_ON(!(forbidden_pages_map
&& free_pages_map
)))
1041 bm1
= forbidden_pages_map
;
1042 bm2
= free_pages_map
;
1043 forbidden_pages_map
= NULL
;
1044 free_pages_map
= NULL
;
1045 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1047 memory_bm_free(bm2
, PG_UNSAFE_CLEAR
);
1050 pr_debug("PM: Basic memory bitmaps freed\n");
1054 * snapshot_additional_pages - estimate the number of additional pages
1055 * be needed for setting up the suspend image data structures for given
1056 * zone (usually the returned value is greater than the exact number)
1059 unsigned int snapshot_additional_pages(struct zone
*zone
)
1061 unsigned int rtree
, nodes
;
1063 rtree
= nodes
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
1064 rtree
+= DIV_ROUND_UP(rtree
* sizeof(struct rtree_node
),
1065 LINKED_PAGE_DATA_SIZE
);
1067 nodes
= DIV_ROUND_UP(nodes
, BM_ENTRIES_PER_LEVEL
);
1074 #ifdef CONFIG_HIGHMEM
1076 * count_free_highmem_pages - compute the total number of free highmem
1077 * pages, system-wide.
1080 static unsigned int count_free_highmem_pages(void)
1083 unsigned int cnt
= 0;
1085 for_each_populated_zone(zone
)
1086 if (is_highmem(zone
))
1087 cnt
+= zone_page_state(zone
, NR_FREE_PAGES
);
1093 * saveable_highmem_page - Determine whether a highmem page should be
1094 * included in the suspend image.
1096 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1097 * and it isn't a part of a free chunk of pages.
1099 static struct page
*saveable_highmem_page(struct zone
*zone
, unsigned long pfn
)
1103 if (!pfn_valid(pfn
))
1106 page
= pfn_to_page(pfn
);
1107 if (page_zone(page
) != zone
)
1110 BUG_ON(!PageHighMem(page
));
1112 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
) ||
1116 if (page_is_guard(page
))
1123 * count_highmem_pages - compute the total number of saveable highmem
1127 static unsigned int count_highmem_pages(void)
1132 for_each_populated_zone(zone
) {
1133 unsigned long pfn
, max_zone_pfn
;
1135 if (!is_highmem(zone
))
1138 mark_free_pages(zone
);
1139 max_zone_pfn
= zone_end_pfn(zone
);
1140 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1141 if (saveable_highmem_page(zone
, pfn
))
1147 static inline void *saveable_highmem_page(struct zone
*z
, unsigned long p
)
1151 #endif /* CONFIG_HIGHMEM */
1154 * saveable_page - Determine whether a non-highmem page should be included
1155 * in the suspend image.
1157 * We should save the page if it isn't Nosave, and is not in the range
1158 * of pages statically defined as 'unsaveable', and it isn't a part of
1159 * a free chunk of pages.
1161 static struct page
*saveable_page(struct zone
*zone
, unsigned long pfn
)
1165 if (!pfn_valid(pfn
))
1168 page
= pfn_to_page(pfn
);
1169 if (page_zone(page
) != zone
)
1172 BUG_ON(PageHighMem(page
));
1174 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
))
1177 if (PageReserved(page
)
1178 && (!kernel_page_present(page
) || pfn_is_nosave(pfn
)))
1181 if (page_is_guard(page
))
1188 * count_data_pages - compute the total number of saveable non-highmem
1192 static unsigned int count_data_pages(void)
1195 unsigned long pfn
, max_zone_pfn
;
1198 for_each_populated_zone(zone
) {
1199 if (is_highmem(zone
))
1202 mark_free_pages(zone
);
1203 max_zone_pfn
= zone_end_pfn(zone
);
1204 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1205 if (saveable_page(zone
, pfn
))
1211 /* This is needed, because copy_page and memcpy are not usable for copying
1214 static inline void do_copy_page(long *dst
, long *src
)
1218 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
1224 * safe_copy_page - check if the page we are going to copy is marked as
1225 * present in the kernel page tables (this always is the case if
1226 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
1227 * kernel_page_present() always returns 'true').
1229 static void safe_copy_page(void *dst
, struct page
*s_page
)
1231 if (kernel_page_present(s_page
)) {
1232 do_copy_page(dst
, page_address(s_page
));
1234 kernel_map_pages(s_page
, 1, 1);
1235 do_copy_page(dst
, page_address(s_page
));
1236 kernel_map_pages(s_page
, 1, 0);
1241 #ifdef CONFIG_HIGHMEM
1242 static inline struct page
*
1243 page_is_saveable(struct zone
*zone
, unsigned long pfn
)
1245 return is_highmem(zone
) ?
1246 saveable_highmem_page(zone
, pfn
) : saveable_page(zone
, pfn
);
1249 static void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1251 struct page
*s_page
, *d_page
;
1254 s_page
= pfn_to_page(src_pfn
);
1255 d_page
= pfn_to_page(dst_pfn
);
1256 if (PageHighMem(s_page
)) {
1257 src
= kmap_atomic(s_page
);
1258 dst
= kmap_atomic(d_page
);
1259 do_copy_page(dst
, src
);
1263 if (PageHighMem(d_page
)) {
1264 /* Page pointed to by src may contain some kernel
1265 * data modified by kmap_atomic()
1267 safe_copy_page(buffer
, s_page
);
1268 dst
= kmap_atomic(d_page
);
1269 copy_page(dst
, buffer
);
1272 safe_copy_page(page_address(d_page
), s_page
);
1277 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1279 static inline void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1281 safe_copy_page(page_address(pfn_to_page(dst_pfn
)),
1282 pfn_to_page(src_pfn
));
1284 #endif /* CONFIG_HIGHMEM */
1287 copy_data_pages(struct memory_bitmap
*copy_bm
, struct memory_bitmap
*orig_bm
)
1292 for_each_populated_zone(zone
) {
1293 unsigned long max_zone_pfn
;
1295 mark_free_pages(zone
);
1296 max_zone_pfn
= zone_end_pfn(zone
);
1297 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1298 if (page_is_saveable(zone
, pfn
))
1299 memory_bm_set_bit(orig_bm
, pfn
);
1301 memory_bm_position_reset(orig_bm
);
1302 memory_bm_position_reset(copy_bm
);
1304 pfn
= memory_bm_next_pfn(orig_bm
);
1305 if (unlikely(pfn
== BM_END_OF_MAP
))
1307 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
1311 /* Total number of image pages */
1312 static unsigned int nr_copy_pages
;
1313 /* Number of pages needed for saving the original pfns of the image pages */
1314 static unsigned int nr_meta_pages
;
1316 * Numbers of normal and highmem page frames allocated for hibernation image
1317 * before suspending devices.
1319 unsigned int alloc_normal
, alloc_highmem
;
1321 * Memory bitmap used for marking saveable pages (during hibernation) or
1322 * hibernation image pages (during restore)
1324 static struct memory_bitmap orig_bm
;
1326 * Memory bitmap used during hibernation for marking allocated page frames that
1327 * will contain copies of saveable pages. During restore it is initially used
1328 * for marking hibernation image pages, but then the set bits from it are
1329 * duplicated in @orig_bm and it is released. On highmem systems it is next
1330 * used for marking "safe" highmem pages, but it has to be reinitialized for
1333 static struct memory_bitmap copy_bm
;
1336 * swsusp_free - free pages allocated for the suspend.
1338 * Suspend pages are alocated before the atomic copy is made, so we
1339 * need to release them after the resume.
1342 void swsusp_free(void)
1344 unsigned long fb_pfn
, fr_pfn
;
1346 memory_bm_position_reset(forbidden_pages_map
);
1347 memory_bm_position_reset(free_pages_map
);
1350 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1351 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1354 * Find the next bit set in both bitmaps. This is guaranteed to
1355 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1358 if (fb_pfn
< fr_pfn
)
1359 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1360 if (fr_pfn
< fb_pfn
)
1361 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1362 } while (fb_pfn
!= fr_pfn
);
1364 if (fr_pfn
!= BM_END_OF_MAP
&& pfn_valid(fr_pfn
)) {
1365 struct page
*page
= pfn_to_page(fr_pfn
);
1367 memory_bm_clear_current(forbidden_pages_map
);
1368 memory_bm_clear_current(free_pages_map
);
1375 restore_pblist
= NULL
;
1381 /* Helper functions used for the shrinking of memory. */
1383 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1386 * preallocate_image_pages - Allocate a number of pages for hibernation image
1387 * @nr_pages: Number of page frames to allocate.
1388 * @mask: GFP flags to use for the allocation.
1390 * Return value: Number of page frames actually allocated
1392 static unsigned long preallocate_image_pages(unsigned long nr_pages
, gfp_t mask
)
1394 unsigned long nr_alloc
= 0;
1396 while (nr_pages
> 0) {
1399 page
= alloc_image_page(mask
);
1402 memory_bm_set_bit(©_bm
, page_to_pfn(page
));
1403 if (PageHighMem(page
))
1414 static unsigned long preallocate_image_memory(unsigned long nr_pages
,
1415 unsigned long avail_normal
)
1417 unsigned long alloc
;
1419 if (avail_normal
<= alloc_normal
)
1422 alloc
= avail_normal
- alloc_normal
;
1423 if (nr_pages
< alloc
)
1426 return preallocate_image_pages(alloc
, GFP_IMAGE
);
1429 #ifdef CONFIG_HIGHMEM
1430 static unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1432 return preallocate_image_pages(nr_pages
, GFP_IMAGE
| __GFP_HIGHMEM
);
1436 * __fraction - Compute (an approximation of) x * (multiplier / base)
1438 static unsigned long __fraction(u64 x
, u64 multiplier
, u64 base
)
1442 return (unsigned long)x
;
1445 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1446 unsigned long highmem
,
1447 unsigned long total
)
1449 unsigned long alloc
= __fraction(nr_pages
, highmem
, total
);
1451 return preallocate_image_pages(alloc
, GFP_IMAGE
| __GFP_HIGHMEM
);
1453 #else /* CONFIG_HIGHMEM */
1454 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1459 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1460 unsigned long highmem
,
1461 unsigned long total
)
1465 #endif /* CONFIG_HIGHMEM */
1468 * free_unnecessary_pages - Release preallocated pages not needed for the image
1470 static void free_unnecessary_pages(void)
1472 unsigned long save
, to_free_normal
, to_free_highmem
;
1474 save
= count_data_pages();
1475 if (alloc_normal
>= save
) {
1476 to_free_normal
= alloc_normal
- save
;
1480 save
-= alloc_normal
;
1482 save
+= count_highmem_pages();
1483 if (alloc_highmem
>= save
) {
1484 to_free_highmem
= alloc_highmem
- save
;
1486 to_free_highmem
= 0;
1487 save
-= alloc_highmem
;
1488 if (to_free_normal
> save
)
1489 to_free_normal
-= save
;
1494 memory_bm_position_reset(©_bm
);
1496 while (to_free_normal
> 0 || to_free_highmem
> 0) {
1497 unsigned long pfn
= memory_bm_next_pfn(©_bm
);
1498 struct page
*page
= pfn_to_page(pfn
);
1500 if (PageHighMem(page
)) {
1501 if (!to_free_highmem
)
1506 if (!to_free_normal
)
1511 memory_bm_clear_bit(©_bm
, pfn
);
1512 swsusp_unset_page_forbidden(page
);
1513 swsusp_unset_page_free(page
);
1519 * minimum_image_size - Estimate the minimum acceptable size of an image
1520 * @saveable: Number of saveable pages in the system.
1522 * We want to avoid attempting to free too much memory too hard, so estimate the
1523 * minimum acceptable size of a hibernation image to use as the lower limit for
1524 * preallocating memory.
1526 * We assume that the minimum image size should be proportional to
1528 * [number of saveable pages] - [number of pages that can be freed in theory]
1530 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1531 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1532 * minus mapped file pages.
1534 static unsigned long minimum_image_size(unsigned long saveable
)
1538 size
= global_page_state(NR_SLAB_RECLAIMABLE
)
1539 + global_page_state(NR_ACTIVE_ANON
)
1540 + global_page_state(NR_INACTIVE_ANON
)
1541 + global_page_state(NR_ACTIVE_FILE
)
1542 + global_page_state(NR_INACTIVE_FILE
)
1543 - global_page_state(NR_FILE_MAPPED
);
1545 return saveable
<= size
? 0 : saveable
- size
;
1549 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1551 * To create a hibernation image it is necessary to make a copy of every page
1552 * frame in use. We also need a number of page frames to be free during
1553 * hibernation for allocations made while saving the image and for device
1554 * drivers, in case they need to allocate memory from their hibernation
1555 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1556 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1557 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1558 * total number of available page frames and allocate at least
1560 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1561 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1563 * of them, which corresponds to the maximum size of a hibernation image.
1565 * If image_size is set below the number following from the above formula,
1566 * the preallocation of memory is continued until the total number of saveable
1567 * pages in the system is below the requested image size or the minimum
1568 * acceptable image size returned by minimum_image_size(), whichever is greater.
1570 int hibernate_preallocate_memory(void)
1573 unsigned long saveable
, size
, max_size
, count
, highmem
, pages
= 0;
1574 unsigned long alloc
, save_highmem
, pages_highmem
, avail_normal
;
1575 struct timeval start
, stop
;
1578 printk(KERN_INFO
"PM: Preallocating image memory... ");
1579 do_gettimeofday(&start
);
1581 error
= memory_bm_create(&orig_bm
, GFP_IMAGE
, PG_ANY
);
1585 error
= memory_bm_create(©_bm
, GFP_IMAGE
, PG_ANY
);
1592 /* Count the number of saveable data pages. */
1593 save_highmem
= count_highmem_pages();
1594 saveable
= count_data_pages();
1597 * Compute the total number of page frames we can use (count) and the
1598 * number of pages needed for image metadata (size).
1601 saveable
+= save_highmem
;
1602 highmem
= save_highmem
;
1604 for_each_populated_zone(zone
) {
1605 size
+= snapshot_additional_pages(zone
);
1606 if (is_highmem(zone
))
1607 highmem
+= zone_page_state(zone
, NR_FREE_PAGES
);
1609 count
+= zone_page_state(zone
, NR_FREE_PAGES
);
1611 avail_normal
= count
;
1613 count
-= totalreserve_pages
;
1615 /* Add number of pages required for page keys (s390 only). */
1616 size
+= page_key_additional_pages(saveable
);
1618 /* Compute the maximum number of saveable pages to leave in memory. */
1619 max_size
= (count
- (size
+ PAGES_FOR_IO
)) / 2
1620 - 2 * DIV_ROUND_UP(reserved_size
, PAGE_SIZE
);
1621 /* Compute the desired number of image pages specified by image_size. */
1622 size
= DIV_ROUND_UP(image_size
, PAGE_SIZE
);
1623 if (size
> max_size
)
1626 * If the desired number of image pages is at least as large as the
1627 * current number of saveable pages in memory, allocate page frames for
1628 * the image and we're done.
1630 if (size
>= saveable
) {
1631 pages
= preallocate_image_highmem(save_highmem
);
1632 pages
+= preallocate_image_memory(saveable
- pages
, avail_normal
);
1636 /* Estimate the minimum size of the image. */
1637 pages
= minimum_image_size(saveable
);
1639 * To avoid excessive pressure on the normal zone, leave room in it to
1640 * accommodate an image of the minimum size (unless it's already too
1641 * small, in which case don't preallocate pages from it at all).
1643 if (avail_normal
> pages
)
1644 avail_normal
-= pages
;
1648 size
= min_t(unsigned long, pages
, max_size
);
1651 * Let the memory management subsystem know that we're going to need a
1652 * large number of page frames to allocate and make it free some memory.
1653 * NOTE: If this is not done, performance will be hurt badly in some
1656 shrink_all_memory(saveable
- size
);
1659 * The number of saveable pages in memory was too high, so apply some
1660 * pressure to decrease it. First, make room for the largest possible
1661 * image and fail if that doesn't work. Next, try to decrease the size
1662 * of the image as much as indicated by 'size' using allocations from
1663 * highmem and non-highmem zones separately.
1665 pages_highmem
= preallocate_image_highmem(highmem
/ 2);
1666 alloc
= count
- max_size
;
1667 if (alloc
> pages_highmem
)
1668 alloc
-= pages_highmem
;
1671 pages
= preallocate_image_memory(alloc
, avail_normal
);
1672 if (pages
< alloc
) {
1673 /* We have exhausted non-highmem pages, try highmem. */
1675 pages
+= pages_highmem
;
1676 pages_highmem
= preallocate_image_highmem(alloc
);
1677 if (pages_highmem
< alloc
)
1679 pages
+= pages_highmem
;
1681 * size is the desired number of saveable pages to leave in
1682 * memory, so try to preallocate (all memory - size) pages.
1684 alloc
= (count
- pages
) - size
;
1685 pages
+= preallocate_image_highmem(alloc
);
1688 * There are approximately max_size saveable pages at this point
1689 * and we want to reduce this number down to size.
1691 alloc
= max_size
- size
;
1692 size
= preallocate_highmem_fraction(alloc
, highmem
, count
);
1693 pages_highmem
+= size
;
1695 size
= preallocate_image_memory(alloc
, avail_normal
);
1696 pages_highmem
+= preallocate_image_highmem(alloc
- size
);
1697 pages
+= pages_highmem
+ size
;
1701 * We only need as many page frames for the image as there are saveable
1702 * pages in memory, but we have allocated more. Release the excessive
1705 free_unnecessary_pages();
1708 do_gettimeofday(&stop
);
1709 printk(KERN_CONT
"done (allocated %lu pages)\n", pages
);
1710 swsusp_show_speed(&start
, &stop
, pages
, "Allocated");
1715 printk(KERN_CONT
"\n");
1720 #ifdef CONFIG_HIGHMEM
1722 * count_pages_for_highmem - compute the number of non-highmem pages
1723 * that will be necessary for creating copies of highmem pages.
1726 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
1728 unsigned int free_highmem
= count_free_highmem_pages() + alloc_highmem
;
1730 if (free_highmem
>= nr_highmem
)
1733 nr_highmem
-= free_highmem
;
1739 count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
1740 #endif /* CONFIG_HIGHMEM */
1743 * enough_free_mem - Make sure we have enough free memory for the
1747 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
1750 unsigned int free
= alloc_normal
;
1752 for_each_populated_zone(zone
)
1753 if (!is_highmem(zone
))
1754 free
+= zone_page_state(zone
, NR_FREE_PAGES
);
1756 nr_pages
+= count_pages_for_highmem(nr_highmem
);
1757 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1758 nr_pages
, PAGES_FOR_IO
, free
);
1760 return free
> nr_pages
+ PAGES_FOR_IO
;
1763 #ifdef CONFIG_HIGHMEM
1765 * get_highmem_buffer - if there are some highmem pages in the suspend
1766 * image, we may need the buffer to copy them and/or load their data.
1769 static inline int get_highmem_buffer(int safe_needed
)
1771 buffer
= get_image_page(GFP_ATOMIC
| __GFP_COLD
, safe_needed
);
1772 return buffer
? 0 : -ENOMEM
;
1776 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1777 * Try to allocate as many pages as needed, but if the number of free
1778 * highmem pages is lesser than that, allocate them all.
1781 static inline unsigned int
1782 alloc_highmem_pages(struct memory_bitmap
*bm
, unsigned int nr_highmem
)
1784 unsigned int to_alloc
= count_free_highmem_pages();
1786 if (to_alloc
> nr_highmem
)
1787 to_alloc
= nr_highmem
;
1789 nr_highmem
-= to_alloc
;
1790 while (to_alloc
-- > 0) {
1793 page
= alloc_image_page(__GFP_HIGHMEM
);
1794 memory_bm_set_bit(bm
, page_to_pfn(page
));
1799 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
1801 static inline unsigned int
1802 alloc_highmem_pages(struct memory_bitmap
*bm
, unsigned int n
) { return 0; }
1803 #endif /* CONFIG_HIGHMEM */
1806 * swsusp_alloc - allocate memory for the suspend image
1808 * We first try to allocate as many highmem pages as there are
1809 * saveable highmem pages in the system. If that fails, we allocate
1810 * non-highmem pages for the copies of the remaining highmem ones.
1812 * In this approach it is likely that the copies of highmem pages will
1813 * also be located in the high memory, because of the way in which
1814 * copy_data_pages() works.
1818 swsusp_alloc(struct memory_bitmap
*orig_bm
, struct memory_bitmap
*copy_bm
,
1819 unsigned int nr_pages
, unsigned int nr_highmem
)
1821 if (nr_highmem
> 0) {
1822 if (get_highmem_buffer(PG_ANY
))
1824 if (nr_highmem
> alloc_highmem
) {
1825 nr_highmem
-= alloc_highmem
;
1826 nr_pages
+= alloc_highmem_pages(copy_bm
, nr_highmem
);
1829 if (nr_pages
> alloc_normal
) {
1830 nr_pages
-= alloc_normal
;
1831 while (nr_pages
-- > 0) {
1834 page
= alloc_image_page(GFP_ATOMIC
| __GFP_COLD
);
1837 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
1848 asmlinkage __visible
int swsusp_save(void)
1850 unsigned int nr_pages
, nr_highmem
;
1852 printk(KERN_INFO
"PM: Creating hibernation image:\n");
1854 drain_local_pages(NULL
);
1855 nr_pages
= count_data_pages();
1856 nr_highmem
= count_highmem_pages();
1857 printk(KERN_INFO
"PM: Need to copy %u pages\n", nr_pages
+ nr_highmem
);
1859 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
1860 printk(KERN_ERR
"PM: Not enough free memory\n");
1864 if (swsusp_alloc(&orig_bm
, ©_bm
, nr_pages
, nr_highmem
)) {
1865 printk(KERN_ERR
"PM: Memory allocation failed\n");
1869 /* During allocating of suspend pagedir, new cold pages may appear.
1872 drain_local_pages(NULL
);
1873 copy_data_pages(©_bm
, &orig_bm
);
1876 * End of critical section. From now on, we can write to memory,
1877 * but we should not touch disk. This specially means we must _not_
1878 * touch swap space! Except we must write out our image of course.
1881 nr_pages
+= nr_highmem
;
1882 nr_copy_pages
= nr_pages
;
1883 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
1885 printk(KERN_INFO
"PM: Hibernation image created (%d pages copied)\n",
1891 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1892 static int init_header_complete(struct swsusp_info
*info
)
1894 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
1895 info
->version_code
= LINUX_VERSION_CODE
;
1899 static char *check_image_kernel(struct swsusp_info
*info
)
1901 if (info
->version_code
!= LINUX_VERSION_CODE
)
1902 return "kernel version";
1903 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
1904 return "system type";
1905 if (strcmp(info
->uts
.release
,init_utsname()->release
))
1906 return "kernel release";
1907 if (strcmp(info
->uts
.version
,init_utsname()->version
))
1909 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
1913 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1915 unsigned long snapshot_get_image_size(void)
1917 return nr_copy_pages
+ nr_meta_pages
+ 1;
1920 static int init_header(struct swsusp_info
*info
)
1922 memset(info
, 0, sizeof(struct swsusp_info
));
1923 info
->num_physpages
= get_num_physpages();
1924 info
->image_pages
= nr_copy_pages
;
1925 info
->pages
= snapshot_get_image_size();
1926 info
->size
= info
->pages
;
1927 info
->size
<<= PAGE_SHIFT
;
1928 return init_header_complete(info
);
1932 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1933 * are stored in the array @buf[] (1 page at a time)
1937 pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1941 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1942 buf
[j
] = memory_bm_next_pfn(bm
);
1943 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1945 /* Save page key for data page (s390 only). */
1946 page_key_read(buf
+ j
);
1951 * snapshot_read_next - used for reading the system memory snapshot.
1953 * On the first call to it @handle should point to a zeroed
1954 * snapshot_handle structure. The structure gets updated and a pointer
1955 * to it should be passed to this function every next time.
1957 * On success the function returns a positive number. Then, the caller
1958 * is allowed to read up to the returned number of bytes from the memory
1959 * location computed by the data_of() macro.
1961 * The function returns 0 to indicate the end of data stream condition,
1962 * and a negative number is returned on error. In such cases the
1963 * structure pointed to by @handle is not updated and should not be used
1967 int snapshot_read_next(struct snapshot_handle
*handle
)
1969 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1973 /* This makes the buffer be freed by swsusp_free() */
1974 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1981 error
= init_header((struct swsusp_info
*)buffer
);
1984 handle
->buffer
= buffer
;
1985 memory_bm_position_reset(&orig_bm
);
1986 memory_bm_position_reset(©_bm
);
1987 } else if (handle
->cur
<= nr_meta_pages
) {
1989 pack_pfns(buffer
, &orig_bm
);
1993 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
1994 if (PageHighMem(page
)) {
1995 /* Highmem pages are copied to the buffer,
1996 * because we can't return with a kmapped
1997 * highmem page (we may not be called again).
2001 kaddr
= kmap_atomic(page
);
2002 copy_page(buffer
, kaddr
);
2003 kunmap_atomic(kaddr
);
2004 handle
->buffer
= buffer
;
2006 handle
->buffer
= page_address(page
);
2014 * mark_unsafe_pages - mark the pages that cannot be used for storing
2015 * the image during resume, because they conflict with the pages that
2016 * had been used before suspend
2019 static int mark_unsafe_pages(struct memory_bitmap
*bm
)
2022 unsigned long pfn
, max_zone_pfn
;
2024 /* Clear page flags */
2025 for_each_populated_zone(zone
) {
2026 max_zone_pfn
= zone_end_pfn(zone
);
2027 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
2029 swsusp_unset_page_free(pfn_to_page(pfn
));
2032 /* Mark pages that correspond to the "original" pfns as "unsafe" */
2033 memory_bm_position_reset(bm
);
2035 pfn
= memory_bm_next_pfn(bm
);
2036 if (likely(pfn
!= BM_END_OF_MAP
)) {
2037 if (likely(pfn_valid(pfn
)) && !is_nosave_page(pfn
))
2038 swsusp_set_page_free(pfn_to_page(pfn
));
2042 } while (pfn
!= BM_END_OF_MAP
);
2044 allocated_unsafe_pages
= 0;
2050 duplicate_memory_bitmap(struct memory_bitmap
*dst
, struct memory_bitmap
*src
)
2054 memory_bm_position_reset(src
);
2055 pfn
= memory_bm_next_pfn(src
);
2056 while (pfn
!= BM_END_OF_MAP
) {
2057 memory_bm_set_bit(dst
, pfn
);
2058 pfn
= memory_bm_next_pfn(src
);
2062 static int check_header(struct swsusp_info
*info
)
2066 reason
= check_image_kernel(info
);
2067 if (!reason
&& info
->num_physpages
!= get_num_physpages())
2068 reason
= "memory size";
2070 printk(KERN_ERR
"PM: Image mismatch: %s\n", reason
);
2077 * load header - check the image header and copy data from it
2081 load_header(struct swsusp_info
*info
)
2085 restore_pblist
= NULL
;
2086 error
= check_header(info
);
2088 nr_copy_pages
= info
->image_pages
;
2089 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
2095 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2096 * the corresponding bit in the memory bitmap @bm
2098 static int unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
2102 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
2103 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
2106 /* Extract and buffer page key for data page (s390 only). */
2107 page_key_memorize(buf
+ j
);
2109 if (memory_bm_pfn_present(bm
, buf
[j
]))
2110 memory_bm_set_bit(bm
, buf
[j
]);
2118 /* List of "safe" pages that may be used to store data loaded from the suspend
2121 static struct linked_page
*safe_pages_list
;
2123 #ifdef CONFIG_HIGHMEM
2124 /* struct highmem_pbe is used for creating the list of highmem pages that
2125 * should be restored atomically during the resume from disk, because the page
2126 * frames they have occupied before the suspend are in use.
2128 struct highmem_pbe
{
2129 struct page
*copy_page
; /* data is here now */
2130 struct page
*orig_page
; /* data was here before the suspend */
2131 struct highmem_pbe
*next
;
2134 /* List of highmem PBEs needed for restoring the highmem pages that were
2135 * allocated before the suspend and included in the suspend image, but have
2136 * also been allocated by the "resume" kernel, so their contents cannot be
2137 * written directly to their "original" page frames.
2139 static struct highmem_pbe
*highmem_pblist
;
2142 * count_highmem_image_pages - compute the number of highmem pages in the
2143 * suspend image. The bits in the memory bitmap @bm that correspond to the
2144 * image pages are assumed to be set.
2147 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
2150 unsigned int cnt
= 0;
2152 memory_bm_position_reset(bm
);
2153 pfn
= memory_bm_next_pfn(bm
);
2154 while (pfn
!= BM_END_OF_MAP
) {
2155 if (PageHighMem(pfn_to_page(pfn
)))
2158 pfn
= memory_bm_next_pfn(bm
);
2164 * prepare_highmem_image - try to allocate as many highmem pages as
2165 * there are highmem image pages (@nr_highmem_p points to the variable
2166 * containing the number of highmem image pages). The pages that are
2167 * "safe" (ie. will not be overwritten when the suspend image is
2168 * restored) have the corresponding bits set in @bm (it must be
2171 * NOTE: This function should not be called if there are no highmem
2175 static unsigned int safe_highmem_pages
;
2177 static struct memory_bitmap
*safe_highmem_bm
;
2180 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
2182 unsigned int to_alloc
;
2184 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
2187 if (get_highmem_buffer(PG_SAFE
))
2190 to_alloc
= count_free_highmem_pages();
2191 if (to_alloc
> *nr_highmem_p
)
2192 to_alloc
= *nr_highmem_p
;
2194 *nr_highmem_p
= to_alloc
;
2196 safe_highmem_pages
= 0;
2197 while (to_alloc
-- > 0) {
2200 page
= alloc_page(__GFP_HIGHMEM
);
2201 if (!swsusp_page_is_free(page
)) {
2202 /* The page is "safe", set its bit the bitmap */
2203 memory_bm_set_bit(bm
, page_to_pfn(page
));
2204 safe_highmem_pages
++;
2206 /* Mark the page as allocated */
2207 swsusp_set_page_forbidden(page
);
2208 swsusp_set_page_free(page
);
2210 memory_bm_position_reset(bm
);
2211 safe_highmem_bm
= bm
;
2216 * get_highmem_page_buffer - for given highmem image page find the buffer
2217 * that suspend_write_next() should set for its caller to write to.
2219 * If the page is to be saved to its "original" page frame or a copy of
2220 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2221 * the copy of the page is to be made in normal memory, so the address of
2222 * the copy is returned.
2224 * If @buffer is returned, the caller of suspend_write_next() will write
2225 * the page's contents to @buffer, so they will have to be copied to the
2226 * right location on the next call to suspend_write_next() and it is done
2227 * with the help of copy_last_highmem_page(). For this purpose, if
2228 * @buffer is returned, @last_highmem page is set to the page to which
2229 * the data will have to be copied from @buffer.
2232 static struct page
*last_highmem_page
;
2235 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
2237 struct highmem_pbe
*pbe
;
2240 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
)) {
2241 /* We have allocated the "original" page frame and we can
2242 * use it directly to store the loaded page.
2244 last_highmem_page
= page
;
2247 /* The "original" page frame has not been allocated and we have to
2248 * use a "safe" page frame to store the loaded page.
2250 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
2253 return ERR_PTR(-ENOMEM
);
2255 pbe
->orig_page
= page
;
2256 if (safe_highmem_pages
> 0) {
2259 /* Copy of the page will be stored in high memory */
2261 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
2262 safe_highmem_pages
--;
2263 last_highmem_page
= tmp
;
2264 pbe
->copy_page
= tmp
;
2266 /* Copy of the page will be stored in normal memory */
2267 kaddr
= safe_pages_list
;
2268 safe_pages_list
= safe_pages_list
->next
;
2269 pbe
->copy_page
= virt_to_page(kaddr
);
2271 pbe
->next
= highmem_pblist
;
2272 highmem_pblist
= pbe
;
2277 * copy_last_highmem_page - copy the contents of a highmem image from
2278 * @buffer, where the caller of snapshot_write_next() has place them,
2279 * to the right location represented by @last_highmem_page .
2282 static void copy_last_highmem_page(void)
2284 if (last_highmem_page
) {
2287 dst
= kmap_atomic(last_highmem_page
);
2288 copy_page(dst
, buffer
);
2290 last_highmem_page
= NULL
;
2294 static inline int last_highmem_page_copied(void)
2296 return !last_highmem_page
;
2299 static inline void free_highmem_data(void)
2301 if (safe_highmem_bm
)
2302 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
2305 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2308 static inline int get_safe_write_buffer(void) { return 0; }
2311 count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
2314 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
2319 static inline void *
2320 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
2322 return ERR_PTR(-EINVAL
);
2325 static inline void copy_last_highmem_page(void) {}
2326 static inline int last_highmem_page_copied(void) { return 1; }
2327 static inline void free_highmem_data(void) {}
2328 #endif /* CONFIG_HIGHMEM */
2331 * prepare_image - use the memory bitmap @bm to mark the pages that will
2332 * be overwritten in the process of restoring the system memory state
2333 * from the suspend image ("unsafe" pages) and allocate memory for the
2336 * The idea is to allocate a new memory bitmap first and then allocate
2337 * as many pages as needed for the image data, but not to assign these
2338 * pages to specific tasks initially. Instead, we just mark them as
2339 * allocated and create a lists of "safe" pages that will be used
2340 * later. On systems with high memory a list of "safe" highmem pages is
2344 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2347 prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
2349 unsigned int nr_pages
, nr_highmem
;
2350 struct linked_page
*sp_list
, *lp
;
2353 /* If there is no highmem, the buffer will not be necessary */
2354 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2357 nr_highmem
= count_highmem_image_pages(bm
);
2358 error
= mark_unsafe_pages(bm
);
2362 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
2366 duplicate_memory_bitmap(new_bm
, bm
);
2367 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
2368 if (nr_highmem
> 0) {
2369 error
= prepare_highmem_image(bm
, &nr_highmem
);
2373 /* Reserve some safe pages for potential later use.
2375 * NOTE: This way we make sure there will be enough safe pages for the
2376 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2377 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2380 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2381 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2382 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
2383 while (nr_pages
> 0) {
2384 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2393 /* Preallocate memory for the image */
2394 safe_pages_list
= NULL
;
2395 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2396 while (nr_pages
> 0) {
2397 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
2402 if (!swsusp_page_is_free(virt_to_page(lp
))) {
2403 /* The page is "safe", add it to the list */
2404 lp
->next
= safe_pages_list
;
2405 safe_pages_list
= lp
;
2407 /* Mark the page as allocated */
2408 swsusp_set_page_forbidden(virt_to_page(lp
));
2409 swsusp_set_page_free(virt_to_page(lp
));
2412 /* Free the reserved safe pages so that chain_alloc() can use them */
2415 free_image_page(sp_list
, PG_UNSAFE_CLEAR
);
2426 * get_buffer - compute the address that snapshot_write_next() should
2427 * set for its caller to write to.
2430 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
2434 unsigned long pfn
= memory_bm_next_pfn(bm
);
2436 if (pfn
== BM_END_OF_MAP
)
2437 return ERR_PTR(-EFAULT
);
2439 page
= pfn_to_page(pfn
);
2440 if (PageHighMem(page
))
2441 return get_highmem_page_buffer(page
, ca
);
2443 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
))
2444 /* We have allocated the "original" page frame and we can
2445 * use it directly to store the loaded page.
2447 return page_address(page
);
2449 /* The "original" page frame has not been allocated and we have to
2450 * use a "safe" page frame to store the loaded page.
2452 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
2455 return ERR_PTR(-ENOMEM
);
2457 pbe
->orig_address
= page_address(page
);
2458 pbe
->address
= safe_pages_list
;
2459 safe_pages_list
= safe_pages_list
->next
;
2460 pbe
->next
= restore_pblist
;
2461 restore_pblist
= pbe
;
2462 return pbe
->address
;
2466 * snapshot_write_next - used for writing the system memory snapshot.
2468 * On the first call to it @handle should point to a zeroed
2469 * snapshot_handle structure. The structure gets updated and a pointer
2470 * to it should be passed to this function every next time.
2472 * On success the function returns a positive number. Then, the caller
2473 * is allowed to write up to the returned number of bytes to the memory
2474 * location computed by the data_of() macro.
2476 * The function returns 0 to indicate the "end of file" condition,
2477 * and a negative number is returned on error. In such cases the
2478 * structure pointed to by @handle is not updated and should not be used
2482 int snapshot_write_next(struct snapshot_handle
*handle
)
2484 static struct chain_allocator ca
;
2487 /* Check if we have already loaded the entire image */
2488 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
2491 handle
->sync_read
= 1;
2495 /* This makes the buffer be freed by swsusp_free() */
2496 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
2501 handle
->buffer
= buffer
;
2502 } else if (handle
->cur
== 1) {
2503 error
= load_header(buffer
);
2507 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
2511 /* Allocate buffer for page keys. */
2512 error
= page_key_alloc(nr_copy_pages
);
2516 } else if (handle
->cur
<= nr_meta_pages
+ 1) {
2517 error
= unpack_orig_pfns(buffer
, ©_bm
);
2521 if (handle
->cur
== nr_meta_pages
+ 1) {
2522 error
= prepare_image(&orig_bm
, ©_bm
);
2526 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
2527 memory_bm_position_reset(&orig_bm
);
2528 restore_pblist
= NULL
;
2529 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2530 handle
->sync_read
= 0;
2531 if (IS_ERR(handle
->buffer
))
2532 return PTR_ERR(handle
->buffer
);
2535 copy_last_highmem_page();
2536 /* Restore page key for data page (s390 only). */
2537 page_key_write(handle
->buffer
);
2538 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2539 if (IS_ERR(handle
->buffer
))
2540 return PTR_ERR(handle
->buffer
);
2541 if (handle
->buffer
!= buffer
)
2542 handle
->sync_read
= 0;
2549 * snapshot_write_finalize - must be called after the last call to
2550 * snapshot_write_next() in case the last page in the image happens
2551 * to be a highmem page and its contents should be stored in the
2552 * highmem. Additionally, it releases the memory that will not be
2556 void snapshot_write_finalize(struct snapshot_handle
*handle
)
2558 copy_last_highmem_page();
2559 /* Restore page key for data page (s390 only). */
2560 page_key_write(handle
->buffer
);
2562 /* Free only if we have loaded the image entirely */
2563 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
2564 memory_bm_free(&orig_bm
, PG_UNSAFE_CLEAR
);
2565 free_highmem_data();
2569 int snapshot_image_loaded(struct snapshot_handle
*handle
)
2571 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
2572 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
2575 #ifdef CONFIG_HIGHMEM
2576 /* Assumes that @buf is ready and points to a "safe" page */
2578 swap_two_pages_data(struct page
*p1
, struct page
*p2
, void *buf
)
2580 void *kaddr1
, *kaddr2
;
2582 kaddr1
= kmap_atomic(p1
);
2583 kaddr2
= kmap_atomic(p2
);
2584 copy_page(buf
, kaddr1
);
2585 copy_page(kaddr1
, kaddr2
);
2586 copy_page(kaddr2
, buf
);
2587 kunmap_atomic(kaddr2
);
2588 kunmap_atomic(kaddr1
);
2592 * restore_highmem - for each highmem page that was allocated before
2593 * the suspend and included in the suspend image, and also has been
2594 * allocated by the "resume" kernel swap its current (ie. "before
2595 * resume") contents with the previous (ie. "before suspend") one.
2597 * If the resume eventually fails, we can call this function once
2598 * again and restore the "before resume" highmem state.
2601 int restore_highmem(void)
2603 struct highmem_pbe
*pbe
= highmem_pblist
;
2609 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2614 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
2617 free_image_page(buf
, PG_UNSAFE_CLEAR
);
2620 #endif /* CONFIG_HIGHMEM */