]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/power/snapshot.c
Merge tag 'master-2014-09-23' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[mirror_ubuntu-artful-kernel.git] / kernel / power / snapshot.c
1 /*
2 * linux/kernel/power/snapshot.c
3 *
4 * This file provides system snapshot/restore functionality for swsusp.
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8 *
9 * This file is released under the GPLv2.
10 *
11 */
12
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/io.h>
37
38 #include "power.h"
39
40 static int swsusp_page_is_free(struct page *);
41 static void swsusp_set_page_forbidden(struct page *);
42 static void swsusp_unset_page_forbidden(struct page *);
43
44 /*
45 * Number of bytes to reserve for memory allocations made by device drivers
46 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47 * cause image creation to fail (tunable via /sys/power/reserved_size).
48 */
49 unsigned long reserved_size;
50
51 void __init hibernate_reserved_size_init(void)
52 {
53 reserved_size = SPARE_PAGES * PAGE_SIZE;
54 }
55
56 /*
57 * Preferred image size in bytes (tunable via /sys/power/image_size).
58 * When it is set to N, swsusp will do its best to ensure the image
59 * size will not exceed N bytes, but if that is impossible, it will
60 * try to create the smallest image possible.
61 */
62 unsigned long image_size;
63
64 void __init hibernate_image_size_init(void)
65 {
66 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67 }
68
69 /* List of PBEs needed for restoring the pages that were allocated before
70 * the suspend and included in the suspend image, but have also been
71 * allocated by the "resume" kernel, so their contents cannot be written
72 * directly to their "original" page frames.
73 */
74 struct pbe *restore_pblist;
75
76 /* Pointer to an auxiliary buffer (1 page) */
77 static void *buffer;
78
79 /**
80 * @safe_needed - on resume, for storing the PBE list and the image,
81 * we can only use memory pages that do not conflict with the pages
82 * used before suspend. The unsafe pages have PageNosaveFree set
83 * and we count them using unsafe_pages.
84 *
85 * Each allocated image page is marked as PageNosave and PageNosaveFree
86 * so that swsusp_free() can release it.
87 */
88
89 #define PG_ANY 0
90 #define PG_SAFE 1
91 #define PG_UNSAFE_CLEAR 1
92 #define PG_UNSAFE_KEEP 0
93
94 static unsigned int allocated_unsafe_pages;
95
96 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97 {
98 void *res;
99
100 res = (void *)get_zeroed_page(gfp_mask);
101 if (safe_needed)
102 while (res && swsusp_page_is_free(virt_to_page(res))) {
103 /* The page is unsafe, mark it for swsusp_free() */
104 swsusp_set_page_forbidden(virt_to_page(res));
105 allocated_unsafe_pages++;
106 res = (void *)get_zeroed_page(gfp_mask);
107 }
108 if (res) {
109 swsusp_set_page_forbidden(virt_to_page(res));
110 swsusp_set_page_free(virt_to_page(res));
111 }
112 return res;
113 }
114
115 unsigned long get_safe_page(gfp_t gfp_mask)
116 {
117 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118 }
119
120 static struct page *alloc_image_page(gfp_t gfp_mask)
121 {
122 struct page *page;
123
124 page = alloc_page(gfp_mask);
125 if (page) {
126 swsusp_set_page_forbidden(page);
127 swsusp_set_page_free(page);
128 }
129 return page;
130 }
131
132 /**
133 * free_image_page - free page represented by @addr, allocated with
134 * get_image_page (page flags set by it must be cleared)
135 */
136
137 static inline void free_image_page(void *addr, int clear_nosave_free)
138 {
139 struct page *page;
140
141 BUG_ON(!virt_addr_valid(addr));
142
143 page = virt_to_page(addr);
144
145 swsusp_unset_page_forbidden(page);
146 if (clear_nosave_free)
147 swsusp_unset_page_free(page);
148
149 __free_page(page);
150 }
151
152 /* struct linked_page is used to build chains of pages */
153
154 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
155
156 struct linked_page {
157 struct linked_page *next;
158 char data[LINKED_PAGE_DATA_SIZE];
159 } __packed;
160
161 static inline void
162 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163 {
164 while (list) {
165 struct linked_page *lp = list->next;
166
167 free_image_page(list, clear_page_nosave);
168 list = lp;
169 }
170 }
171
172 /**
173 * struct chain_allocator is used for allocating small objects out of
174 * a linked list of pages called 'the chain'.
175 *
176 * The chain grows each time when there is no room for a new object in
177 * the current page. The allocated objects cannot be freed individually.
178 * It is only possible to free them all at once, by freeing the entire
179 * chain.
180 *
181 * NOTE: The chain allocator may be inefficient if the allocated objects
182 * are not much smaller than PAGE_SIZE.
183 */
184
185 struct chain_allocator {
186 struct linked_page *chain; /* the chain */
187 unsigned int used_space; /* total size of objects allocated out
188 * of the current page
189 */
190 gfp_t gfp_mask; /* mask for allocating pages */
191 int safe_needed; /* if set, only "safe" pages are allocated */
192 };
193
194 static void
195 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196 {
197 ca->chain = NULL;
198 ca->used_space = LINKED_PAGE_DATA_SIZE;
199 ca->gfp_mask = gfp_mask;
200 ca->safe_needed = safe_needed;
201 }
202
203 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204 {
205 void *ret;
206
207 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 struct linked_page *lp;
209
210 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 if (!lp)
212 return NULL;
213
214 lp->next = ca->chain;
215 ca->chain = lp;
216 ca->used_space = 0;
217 }
218 ret = ca->chain->data + ca->used_space;
219 ca->used_space += size;
220 return ret;
221 }
222
223 /**
224 * Data types related to memory bitmaps.
225 *
226 * Memory bitmap is a structure consiting of many linked lists of
227 * objects. The main list's elements are of type struct zone_bitmap
228 * and each of them corresonds to one zone. For each zone bitmap
229 * object there is a list of objects of type struct bm_block that
230 * represent each blocks of bitmap in which information is stored.
231 *
232 * struct memory_bitmap contains a pointer to the main list of zone
233 * bitmap objects, a struct bm_position used for browsing the bitmap,
234 * and a pointer to the list of pages used for allocating all of the
235 * zone bitmap objects and bitmap block objects.
236 *
237 * NOTE: It has to be possible to lay out the bitmap in memory
238 * using only allocations of order 0. Additionally, the bitmap is
239 * designed to work with arbitrary number of zones (this is over the
240 * top for now, but let's avoid making unnecessary assumptions ;-).
241 *
242 * struct zone_bitmap contains a pointer to a list of bitmap block
243 * objects and a pointer to the bitmap block object that has been
244 * most recently used for setting bits. Additionally, it contains the
245 * pfns that correspond to the start and end of the represented zone.
246 *
247 * struct bm_block contains a pointer to the memory page in which
248 * information is stored (in the form of a block of bitmap)
249 * It also contains the pfns that correspond to the start and end of
250 * the represented memory area.
251 *
252 * The memory bitmap is organized as a radix tree to guarantee fast random
253 * access to the bits. There is one radix tree for each zone (as returned
254 * from create_mem_extents).
255 *
256 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
257 * two linked lists for the nodes of the tree, one for the inner nodes and
258 * one for the leave nodes. The linked leave nodes are used for fast linear
259 * access of the memory bitmap.
260 *
261 * The struct rtree_node represents one node of the radix tree.
262 */
263
264 #define BM_END_OF_MAP (~0UL)
265
266 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
267 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
268 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
269
270 /*
271 * struct rtree_node is a wrapper struct to link the nodes
272 * of the rtree together for easy linear iteration over
273 * bits and easy freeing
274 */
275 struct rtree_node {
276 struct list_head list;
277 unsigned long *data;
278 };
279
280 /*
281 * struct mem_zone_bm_rtree represents a bitmap used for one
282 * populated memory zone.
283 */
284 struct mem_zone_bm_rtree {
285 struct list_head list; /* Link Zones together */
286 struct list_head nodes; /* Radix Tree inner nodes */
287 struct list_head leaves; /* Radix Tree leaves */
288 unsigned long start_pfn; /* Zone start page frame */
289 unsigned long end_pfn; /* Zone end page frame + 1 */
290 struct rtree_node *rtree; /* Radix Tree Root */
291 int levels; /* Number of Radix Tree Levels */
292 unsigned int blocks; /* Number of Bitmap Blocks */
293 };
294
295 /* strcut bm_position is used for browsing memory bitmaps */
296
297 struct bm_position {
298 struct mem_zone_bm_rtree *zone;
299 struct rtree_node *node;
300 unsigned long node_pfn;
301 int node_bit;
302 };
303
304 struct memory_bitmap {
305 struct list_head zones;
306 struct linked_page *p_list; /* list of pages used to store zone
307 * bitmap objects and bitmap block
308 * objects
309 */
310 struct bm_position cur; /* most recently used bit position */
311 };
312
313 /* Functions that operate on memory bitmaps */
314
315 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
316 #if BITS_PER_LONG == 32
317 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
318 #else
319 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
320 #endif
321 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
322
323 /*
324 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
325 *
326 * This function is used to allocate inner nodes as well as the
327 * leave nodes of the radix tree. It also adds the node to the
328 * corresponding linked list passed in by the *list parameter.
329 */
330 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
331 struct chain_allocator *ca,
332 struct list_head *list)
333 {
334 struct rtree_node *node;
335
336 node = chain_alloc(ca, sizeof(struct rtree_node));
337 if (!node)
338 return NULL;
339
340 node->data = get_image_page(gfp_mask, safe_needed);
341 if (!node->data)
342 return NULL;
343
344 list_add_tail(&node->list, list);
345
346 return node;
347 }
348
349 /*
350 * add_rtree_block - Add a new leave node to the radix tree
351 *
352 * The leave nodes need to be allocated in order to keep the leaves
353 * linked list in order. This is guaranteed by the zone->blocks
354 * counter.
355 */
356 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
357 int safe_needed, struct chain_allocator *ca)
358 {
359 struct rtree_node *node, *block, **dst;
360 unsigned int levels_needed, block_nr;
361 int i;
362
363 block_nr = zone->blocks;
364 levels_needed = 0;
365
366 /* How many levels do we need for this block nr? */
367 while (block_nr) {
368 levels_needed += 1;
369 block_nr >>= BM_RTREE_LEVEL_SHIFT;
370 }
371
372 /* Make sure the rtree has enough levels */
373 for (i = zone->levels; i < levels_needed; i++) {
374 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
375 &zone->nodes);
376 if (!node)
377 return -ENOMEM;
378
379 node->data[0] = (unsigned long)zone->rtree;
380 zone->rtree = node;
381 zone->levels += 1;
382 }
383
384 /* Allocate new block */
385 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
386 if (!block)
387 return -ENOMEM;
388
389 /* Now walk the rtree to insert the block */
390 node = zone->rtree;
391 dst = &zone->rtree;
392 block_nr = zone->blocks;
393 for (i = zone->levels; i > 0; i--) {
394 int index;
395
396 if (!node) {
397 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
398 &zone->nodes);
399 if (!node)
400 return -ENOMEM;
401 *dst = node;
402 }
403
404 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
405 index &= BM_RTREE_LEVEL_MASK;
406 dst = (struct rtree_node **)&((*dst)->data[index]);
407 node = *dst;
408 }
409
410 zone->blocks += 1;
411 *dst = block;
412
413 return 0;
414 }
415
416 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
417 int clear_nosave_free);
418
419 /*
420 * create_zone_bm_rtree - create a radix tree for one zone
421 *
422 * Allocated the mem_zone_bm_rtree structure and initializes it.
423 * This function also allocated and builds the radix tree for the
424 * zone.
425 */
426 static struct mem_zone_bm_rtree *
427 create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
428 struct chain_allocator *ca,
429 unsigned long start, unsigned long end)
430 {
431 struct mem_zone_bm_rtree *zone;
432 unsigned int i, nr_blocks;
433 unsigned long pages;
434
435 pages = end - start;
436 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
437 if (!zone)
438 return NULL;
439
440 INIT_LIST_HEAD(&zone->nodes);
441 INIT_LIST_HEAD(&zone->leaves);
442 zone->start_pfn = start;
443 zone->end_pfn = end;
444 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
445
446 for (i = 0; i < nr_blocks; i++) {
447 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
448 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
449 return NULL;
450 }
451 }
452
453 return zone;
454 }
455
456 /*
457 * free_zone_bm_rtree - Free the memory of the radix tree
458 *
459 * Free all node pages of the radix tree. The mem_zone_bm_rtree
460 * structure itself is not freed here nor are the rtree_node
461 * structs.
462 */
463 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
464 int clear_nosave_free)
465 {
466 struct rtree_node *node;
467
468 list_for_each_entry(node, &zone->nodes, list)
469 free_image_page(node->data, clear_nosave_free);
470
471 list_for_each_entry(node, &zone->leaves, list)
472 free_image_page(node->data, clear_nosave_free);
473 }
474
475 static void memory_bm_position_reset(struct memory_bitmap *bm)
476 {
477 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
478 list);
479 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
480 struct rtree_node, list);
481 bm->cur.node_pfn = 0;
482 bm->cur.node_bit = 0;
483 }
484
485 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
486
487 struct mem_extent {
488 struct list_head hook;
489 unsigned long start;
490 unsigned long end;
491 };
492
493 /**
494 * free_mem_extents - free a list of memory extents
495 * @list - list of extents to empty
496 */
497 static void free_mem_extents(struct list_head *list)
498 {
499 struct mem_extent *ext, *aux;
500
501 list_for_each_entry_safe(ext, aux, list, hook) {
502 list_del(&ext->hook);
503 kfree(ext);
504 }
505 }
506
507 /**
508 * create_mem_extents - create a list of memory extents representing
509 * contiguous ranges of PFNs
510 * @list - list to put the extents into
511 * @gfp_mask - mask to use for memory allocations
512 */
513 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
514 {
515 struct zone *zone;
516
517 INIT_LIST_HEAD(list);
518
519 for_each_populated_zone(zone) {
520 unsigned long zone_start, zone_end;
521 struct mem_extent *ext, *cur, *aux;
522
523 zone_start = zone->zone_start_pfn;
524 zone_end = zone_end_pfn(zone);
525
526 list_for_each_entry(ext, list, hook)
527 if (zone_start <= ext->end)
528 break;
529
530 if (&ext->hook == list || zone_end < ext->start) {
531 /* New extent is necessary */
532 struct mem_extent *new_ext;
533
534 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
535 if (!new_ext) {
536 free_mem_extents(list);
537 return -ENOMEM;
538 }
539 new_ext->start = zone_start;
540 new_ext->end = zone_end;
541 list_add_tail(&new_ext->hook, &ext->hook);
542 continue;
543 }
544
545 /* Merge this zone's range of PFNs with the existing one */
546 if (zone_start < ext->start)
547 ext->start = zone_start;
548 if (zone_end > ext->end)
549 ext->end = zone_end;
550
551 /* More merging may be possible */
552 cur = ext;
553 list_for_each_entry_safe_continue(cur, aux, list, hook) {
554 if (zone_end < cur->start)
555 break;
556 if (zone_end < cur->end)
557 ext->end = cur->end;
558 list_del(&cur->hook);
559 kfree(cur);
560 }
561 }
562
563 return 0;
564 }
565
566 /**
567 * memory_bm_create - allocate memory for a memory bitmap
568 */
569 static int
570 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
571 {
572 struct chain_allocator ca;
573 struct list_head mem_extents;
574 struct mem_extent *ext;
575 int error;
576
577 chain_init(&ca, gfp_mask, safe_needed);
578 INIT_LIST_HEAD(&bm->zones);
579
580 error = create_mem_extents(&mem_extents, gfp_mask);
581 if (error)
582 return error;
583
584 list_for_each_entry(ext, &mem_extents, hook) {
585 struct mem_zone_bm_rtree *zone;
586
587 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
588 ext->start, ext->end);
589 if (!zone) {
590 error = -ENOMEM;
591 goto Error;
592 }
593 list_add_tail(&zone->list, &bm->zones);
594 }
595
596 bm->p_list = ca.chain;
597 memory_bm_position_reset(bm);
598 Exit:
599 free_mem_extents(&mem_extents);
600 return error;
601
602 Error:
603 bm->p_list = ca.chain;
604 memory_bm_free(bm, PG_UNSAFE_CLEAR);
605 goto Exit;
606 }
607
608 /**
609 * memory_bm_free - free memory occupied by the memory bitmap @bm
610 */
611 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
612 {
613 struct mem_zone_bm_rtree *zone;
614
615 list_for_each_entry(zone, &bm->zones, list)
616 free_zone_bm_rtree(zone, clear_nosave_free);
617
618 free_list_of_pages(bm->p_list, clear_nosave_free);
619
620 INIT_LIST_HEAD(&bm->zones);
621 }
622
623 /**
624 * memory_bm_find_bit - Find the bit for pfn in the memory
625 * bitmap
626 *
627 * Find the bit in the bitmap @bm that corresponds to given pfn.
628 * The cur.zone, cur.block and cur.node_pfn member of @bm are
629 * updated.
630 * It walks the radix tree to find the page which contains the bit for
631 * pfn and returns the bit position in **addr and *bit_nr.
632 */
633 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
634 void **addr, unsigned int *bit_nr)
635 {
636 struct mem_zone_bm_rtree *curr, *zone;
637 struct rtree_node *node;
638 int i, block_nr;
639
640 zone = bm->cur.zone;
641
642 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
643 goto zone_found;
644
645 zone = NULL;
646
647 /* Find the right zone */
648 list_for_each_entry(curr, &bm->zones, list) {
649 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
650 zone = curr;
651 break;
652 }
653 }
654
655 if (!zone)
656 return -EFAULT;
657
658 zone_found:
659 /*
660 * We have a zone. Now walk the radix tree to find the leave
661 * node for our pfn.
662 */
663
664 node = bm->cur.node;
665 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
666 goto node_found;
667
668 node = zone->rtree;
669 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
670
671 for (i = zone->levels; i > 0; i--) {
672 int index;
673
674 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
675 index &= BM_RTREE_LEVEL_MASK;
676 BUG_ON(node->data[index] == 0);
677 node = (struct rtree_node *)node->data[index];
678 }
679
680 node_found:
681 /* Update last position */
682 bm->cur.zone = zone;
683 bm->cur.node = node;
684 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
685
686 /* Set return values */
687 *addr = node->data;
688 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
689
690 return 0;
691 }
692
693 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
694 {
695 void *addr;
696 unsigned int bit;
697 int error;
698
699 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
700 BUG_ON(error);
701 set_bit(bit, addr);
702 }
703
704 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
705 {
706 void *addr;
707 unsigned int bit;
708 int error;
709
710 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
711 if (!error)
712 set_bit(bit, addr);
713
714 return error;
715 }
716
717 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
718 {
719 void *addr;
720 unsigned int bit;
721 int error;
722
723 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
724 BUG_ON(error);
725 clear_bit(bit, addr);
726 }
727
728 static void memory_bm_clear_current(struct memory_bitmap *bm)
729 {
730 int bit;
731
732 bit = max(bm->cur.node_bit - 1, 0);
733 clear_bit(bit, bm->cur.node->data);
734 }
735
736 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
737 {
738 void *addr;
739 unsigned int bit;
740 int error;
741
742 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
743 BUG_ON(error);
744 return test_bit(bit, addr);
745 }
746
747 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
748 {
749 void *addr;
750 unsigned int bit;
751
752 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
753 }
754
755 /*
756 * rtree_next_node - Jumps to the next leave node
757 *
758 * Sets the position to the beginning of the next node in the
759 * memory bitmap. This is either the next node in the current
760 * zone's radix tree or the first node in the radix tree of the
761 * next zone.
762 *
763 * Returns true if there is a next node, false otherwise.
764 */
765 static bool rtree_next_node(struct memory_bitmap *bm)
766 {
767 bm->cur.node = list_entry(bm->cur.node->list.next,
768 struct rtree_node, list);
769 if (&bm->cur.node->list != &bm->cur.zone->leaves) {
770 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
771 bm->cur.node_bit = 0;
772 touch_softlockup_watchdog();
773 return true;
774 }
775
776 /* No more nodes, goto next zone */
777 bm->cur.zone = list_entry(bm->cur.zone->list.next,
778 struct mem_zone_bm_rtree, list);
779 if (&bm->cur.zone->list != &bm->zones) {
780 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
781 struct rtree_node, list);
782 bm->cur.node_pfn = 0;
783 bm->cur.node_bit = 0;
784 return true;
785 }
786
787 /* No more zones */
788 return false;
789 }
790
791 /**
792 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
793 *
794 * Starting from the last returned position this function searches
795 * for the next set bit in the memory bitmap and returns its
796 * number. If no more bit is set BM_END_OF_MAP is returned.
797 *
798 * It is required to run memory_bm_position_reset() before the
799 * first call to this function.
800 */
801 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
802 {
803 unsigned long bits, pfn, pages;
804 int bit;
805
806 do {
807 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
808 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
809 bit = find_next_bit(bm->cur.node->data, bits,
810 bm->cur.node_bit);
811 if (bit < bits) {
812 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
813 bm->cur.node_bit = bit + 1;
814 return pfn;
815 }
816 } while (rtree_next_node(bm));
817
818 return BM_END_OF_MAP;
819 }
820
821 /**
822 * This structure represents a range of page frames the contents of which
823 * should not be saved during the suspend.
824 */
825
826 struct nosave_region {
827 struct list_head list;
828 unsigned long start_pfn;
829 unsigned long end_pfn;
830 };
831
832 static LIST_HEAD(nosave_regions);
833
834 /**
835 * register_nosave_region - register a range of page frames the contents
836 * of which should not be saved during the suspend (to be used in the early
837 * initialization code)
838 */
839
840 void __init
841 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
842 int use_kmalloc)
843 {
844 struct nosave_region *region;
845
846 if (start_pfn >= end_pfn)
847 return;
848
849 if (!list_empty(&nosave_regions)) {
850 /* Try to extend the previous region (they should be sorted) */
851 region = list_entry(nosave_regions.prev,
852 struct nosave_region, list);
853 if (region->end_pfn == start_pfn) {
854 region->end_pfn = end_pfn;
855 goto Report;
856 }
857 }
858 if (use_kmalloc) {
859 /* during init, this shouldn't fail */
860 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
861 BUG_ON(!region);
862 } else
863 /* This allocation cannot fail */
864 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
865 region->start_pfn = start_pfn;
866 region->end_pfn = end_pfn;
867 list_add_tail(&region->list, &nosave_regions);
868 Report:
869 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
870 (unsigned long long) start_pfn << PAGE_SHIFT,
871 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
872 }
873
874 /*
875 * Set bits in this map correspond to the page frames the contents of which
876 * should not be saved during the suspend.
877 */
878 static struct memory_bitmap *forbidden_pages_map;
879
880 /* Set bits in this map correspond to free page frames. */
881 static struct memory_bitmap *free_pages_map;
882
883 /*
884 * Each page frame allocated for creating the image is marked by setting the
885 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
886 */
887
888 void swsusp_set_page_free(struct page *page)
889 {
890 if (free_pages_map)
891 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
892 }
893
894 static int swsusp_page_is_free(struct page *page)
895 {
896 return free_pages_map ?
897 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
898 }
899
900 void swsusp_unset_page_free(struct page *page)
901 {
902 if (free_pages_map)
903 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
904 }
905
906 static void swsusp_set_page_forbidden(struct page *page)
907 {
908 if (forbidden_pages_map)
909 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
910 }
911
912 int swsusp_page_is_forbidden(struct page *page)
913 {
914 return forbidden_pages_map ?
915 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
916 }
917
918 static void swsusp_unset_page_forbidden(struct page *page)
919 {
920 if (forbidden_pages_map)
921 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
922 }
923
924 /**
925 * mark_nosave_pages - set bits corresponding to the page frames the
926 * contents of which should not be saved in a given bitmap.
927 */
928
929 static void mark_nosave_pages(struct memory_bitmap *bm)
930 {
931 struct nosave_region *region;
932
933 if (list_empty(&nosave_regions))
934 return;
935
936 list_for_each_entry(region, &nosave_regions, list) {
937 unsigned long pfn;
938
939 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
940 (unsigned long long) region->start_pfn << PAGE_SHIFT,
941 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
942 - 1);
943
944 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
945 if (pfn_valid(pfn)) {
946 /*
947 * It is safe to ignore the result of
948 * mem_bm_set_bit_check() here, since we won't
949 * touch the PFNs for which the error is
950 * returned anyway.
951 */
952 mem_bm_set_bit_check(bm, pfn);
953 }
954 }
955 }
956
957 static bool is_nosave_page(unsigned long pfn)
958 {
959 struct nosave_region *region;
960
961 list_for_each_entry(region, &nosave_regions, list) {
962 if (pfn >= region->start_pfn && pfn < region->end_pfn) {
963 pr_err("PM: %#010llx in e820 nosave region: "
964 "[mem %#010llx-%#010llx]\n",
965 (unsigned long long) pfn << PAGE_SHIFT,
966 (unsigned long long) region->start_pfn << PAGE_SHIFT,
967 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
968 - 1);
969 return true;
970 }
971 }
972
973 return false;
974 }
975
976 /**
977 * create_basic_memory_bitmaps - create bitmaps needed for marking page
978 * frames that should not be saved and free page frames. The pointers
979 * forbidden_pages_map and free_pages_map are only modified if everything
980 * goes well, because we don't want the bits to be used before both bitmaps
981 * are set up.
982 */
983
984 int create_basic_memory_bitmaps(void)
985 {
986 struct memory_bitmap *bm1, *bm2;
987 int error = 0;
988
989 if (forbidden_pages_map && free_pages_map)
990 return 0;
991 else
992 BUG_ON(forbidden_pages_map || free_pages_map);
993
994 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
995 if (!bm1)
996 return -ENOMEM;
997
998 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
999 if (error)
1000 goto Free_first_object;
1001
1002 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1003 if (!bm2)
1004 goto Free_first_bitmap;
1005
1006 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1007 if (error)
1008 goto Free_second_object;
1009
1010 forbidden_pages_map = bm1;
1011 free_pages_map = bm2;
1012 mark_nosave_pages(forbidden_pages_map);
1013
1014 pr_debug("PM: Basic memory bitmaps created\n");
1015
1016 return 0;
1017
1018 Free_second_object:
1019 kfree(bm2);
1020 Free_first_bitmap:
1021 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1022 Free_first_object:
1023 kfree(bm1);
1024 return -ENOMEM;
1025 }
1026
1027 /**
1028 * free_basic_memory_bitmaps - free memory bitmaps allocated by
1029 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
1030 * so that the bitmaps themselves are not referred to while they are being
1031 * freed.
1032 */
1033
1034 void free_basic_memory_bitmaps(void)
1035 {
1036 struct memory_bitmap *bm1, *bm2;
1037
1038 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1039 return;
1040
1041 bm1 = forbidden_pages_map;
1042 bm2 = free_pages_map;
1043 forbidden_pages_map = NULL;
1044 free_pages_map = NULL;
1045 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1046 kfree(bm1);
1047 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1048 kfree(bm2);
1049
1050 pr_debug("PM: Basic memory bitmaps freed\n");
1051 }
1052
1053 /**
1054 * snapshot_additional_pages - estimate the number of additional pages
1055 * be needed for setting up the suspend image data structures for given
1056 * zone (usually the returned value is greater than the exact number)
1057 */
1058
1059 unsigned int snapshot_additional_pages(struct zone *zone)
1060 {
1061 unsigned int rtree, nodes;
1062
1063 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1064 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1065 LINKED_PAGE_DATA_SIZE);
1066 while (nodes > 1) {
1067 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1068 rtree += nodes;
1069 }
1070
1071 return 2 * rtree;
1072 }
1073
1074 #ifdef CONFIG_HIGHMEM
1075 /**
1076 * count_free_highmem_pages - compute the total number of free highmem
1077 * pages, system-wide.
1078 */
1079
1080 static unsigned int count_free_highmem_pages(void)
1081 {
1082 struct zone *zone;
1083 unsigned int cnt = 0;
1084
1085 for_each_populated_zone(zone)
1086 if (is_highmem(zone))
1087 cnt += zone_page_state(zone, NR_FREE_PAGES);
1088
1089 return cnt;
1090 }
1091
1092 /**
1093 * saveable_highmem_page - Determine whether a highmem page should be
1094 * included in the suspend image.
1095 *
1096 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1097 * and it isn't a part of a free chunk of pages.
1098 */
1099 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1100 {
1101 struct page *page;
1102
1103 if (!pfn_valid(pfn))
1104 return NULL;
1105
1106 page = pfn_to_page(pfn);
1107 if (page_zone(page) != zone)
1108 return NULL;
1109
1110 BUG_ON(!PageHighMem(page));
1111
1112 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1113 PageReserved(page))
1114 return NULL;
1115
1116 if (page_is_guard(page))
1117 return NULL;
1118
1119 return page;
1120 }
1121
1122 /**
1123 * count_highmem_pages - compute the total number of saveable highmem
1124 * pages.
1125 */
1126
1127 static unsigned int count_highmem_pages(void)
1128 {
1129 struct zone *zone;
1130 unsigned int n = 0;
1131
1132 for_each_populated_zone(zone) {
1133 unsigned long pfn, max_zone_pfn;
1134
1135 if (!is_highmem(zone))
1136 continue;
1137
1138 mark_free_pages(zone);
1139 max_zone_pfn = zone_end_pfn(zone);
1140 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1141 if (saveable_highmem_page(zone, pfn))
1142 n++;
1143 }
1144 return n;
1145 }
1146 #else
1147 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1148 {
1149 return NULL;
1150 }
1151 #endif /* CONFIG_HIGHMEM */
1152
1153 /**
1154 * saveable_page - Determine whether a non-highmem page should be included
1155 * in the suspend image.
1156 *
1157 * We should save the page if it isn't Nosave, and is not in the range
1158 * of pages statically defined as 'unsaveable', and it isn't a part of
1159 * a free chunk of pages.
1160 */
1161 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1162 {
1163 struct page *page;
1164
1165 if (!pfn_valid(pfn))
1166 return NULL;
1167
1168 page = pfn_to_page(pfn);
1169 if (page_zone(page) != zone)
1170 return NULL;
1171
1172 BUG_ON(PageHighMem(page));
1173
1174 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1175 return NULL;
1176
1177 if (PageReserved(page)
1178 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1179 return NULL;
1180
1181 if (page_is_guard(page))
1182 return NULL;
1183
1184 return page;
1185 }
1186
1187 /**
1188 * count_data_pages - compute the total number of saveable non-highmem
1189 * pages.
1190 */
1191
1192 static unsigned int count_data_pages(void)
1193 {
1194 struct zone *zone;
1195 unsigned long pfn, max_zone_pfn;
1196 unsigned int n = 0;
1197
1198 for_each_populated_zone(zone) {
1199 if (is_highmem(zone))
1200 continue;
1201
1202 mark_free_pages(zone);
1203 max_zone_pfn = zone_end_pfn(zone);
1204 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1205 if (saveable_page(zone, pfn))
1206 n++;
1207 }
1208 return n;
1209 }
1210
1211 /* This is needed, because copy_page and memcpy are not usable for copying
1212 * task structs.
1213 */
1214 static inline void do_copy_page(long *dst, long *src)
1215 {
1216 int n;
1217
1218 for (n = PAGE_SIZE / sizeof(long); n; n--)
1219 *dst++ = *src++;
1220 }
1221
1222
1223 /**
1224 * safe_copy_page - check if the page we are going to copy is marked as
1225 * present in the kernel page tables (this always is the case if
1226 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
1227 * kernel_page_present() always returns 'true').
1228 */
1229 static void safe_copy_page(void *dst, struct page *s_page)
1230 {
1231 if (kernel_page_present(s_page)) {
1232 do_copy_page(dst, page_address(s_page));
1233 } else {
1234 kernel_map_pages(s_page, 1, 1);
1235 do_copy_page(dst, page_address(s_page));
1236 kernel_map_pages(s_page, 1, 0);
1237 }
1238 }
1239
1240
1241 #ifdef CONFIG_HIGHMEM
1242 static inline struct page *
1243 page_is_saveable(struct zone *zone, unsigned long pfn)
1244 {
1245 return is_highmem(zone) ?
1246 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1247 }
1248
1249 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1250 {
1251 struct page *s_page, *d_page;
1252 void *src, *dst;
1253
1254 s_page = pfn_to_page(src_pfn);
1255 d_page = pfn_to_page(dst_pfn);
1256 if (PageHighMem(s_page)) {
1257 src = kmap_atomic(s_page);
1258 dst = kmap_atomic(d_page);
1259 do_copy_page(dst, src);
1260 kunmap_atomic(dst);
1261 kunmap_atomic(src);
1262 } else {
1263 if (PageHighMem(d_page)) {
1264 /* Page pointed to by src may contain some kernel
1265 * data modified by kmap_atomic()
1266 */
1267 safe_copy_page(buffer, s_page);
1268 dst = kmap_atomic(d_page);
1269 copy_page(dst, buffer);
1270 kunmap_atomic(dst);
1271 } else {
1272 safe_copy_page(page_address(d_page), s_page);
1273 }
1274 }
1275 }
1276 #else
1277 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1278
1279 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1280 {
1281 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1282 pfn_to_page(src_pfn));
1283 }
1284 #endif /* CONFIG_HIGHMEM */
1285
1286 static void
1287 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1288 {
1289 struct zone *zone;
1290 unsigned long pfn;
1291
1292 for_each_populated_zone(zone) {
1293 unsigned long max_zone_pfn;
1294
1295 mark_free_pages(zone);
1296 max_zone_pfn = zone_end_pfn(zone);
1297 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1298 if (page_is_saveable(zone, pfn))
1299 memory_bm_set_bit(orig_bm, pfn);
1300 }
1301 memory_bm_position_reset(orig_bm);
1302 memory_bm_position_reset(copy_bm);
1303 for(;;) {
1304 pfn = memory_bm_next_pfn(orig_bm);
1305 if (unlikely(pfn == BM_END_OF_MAP))
1306 break;
1307 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1308 }
1309 }
1310
1311 /* Total number of image pages */
1312 static unsigned int nr_copy_pages;
1313 /* Number of pages needed for saving the original pfns of the image pages */
1314 static unsigned int nr_meta_pages;
1315 /*
1316 * Numbers of normal and highmem page frames allocated for hibernation image
1317 * before suspending devices.
1318 */
1319 unsigned int alloc_normal, alloc_highmem;
1320 /*
1321 * Memory bitmap used for marking saveable pages (during hibernation) or
1322 * hibernation image pages (during restore)
1323 */
1324 static struct memory_bitmap orig_bm;
1325 /*
1326 * Memory bitmap used during hibernation for marking allocated page frames that
1327 * will contain copies of saveable pages. During restore it is initially used
1328 * for marking hibernation image pages, but then the set bits from it are
1329 * duplicated in @orig_bm and it is released. On highmem systems it is next
1330 * used for marking "safe" highmem pages, but it has to be reinitialized for
1331 * this purpose.
1332 */
1333 static struct memory_bitmap copy_bm;
1334
1335 /**
1336 * swsusp_free - free pages allocated for the suspend.
1337 *
1338 * Suspend pages are alocated before the atomic copy is made, so we
1339 * need to release them after the resume.
1340 */
1341
1342 void swsusp_free(void)
1343 {
1344 unsigned long fb_pfn, fr_pfn;
1345
1346 memory_bm_position_reset(forbidden_pages_map);
1347 memory_bm_position_reset(free_pages_map);
1348
1349 loop:
1350 fr_pfn = memory_bm_next_pfn(free_pages_map);
1351 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1352
1353 /*
1354 * Find the next bit set in both bitmaps. This is guaranteed to
1355 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1356 */
1357 do {
1358 if (fb_pfn < fr_pfn)
1359 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1360 if (fr_pfn < fb_pfn)
1361 fr_pfn = memory_bm_next_pfn(free_pages_map);
1362 } while (fb_pfn != fr_pfn);
1363
1364 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1365 struct page *page = pfn_to_page(fr_pfn);
1366
1367 memory_bm_clear_current(forbidden_pages_map);
1368 memory_bm_clear_current(free_pages_map);
1369 __free_page(page);
1370 goto loop;
1371 }
1372
1373 nr_copy_pages = 0;
1374 nr_meta_pages = 0;
1375 restore_pblist = NULL;
1376 buffer = NULL;
1377 alloc_normal = 0;
1378 alloc_highmem = 0;
1379 }
1380
1381 /* Helper functions used for the shrinking of memory. */
1382
1383 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1384
1385 /**
1386 * preallocate_image_pages - Allocate a number of pages for hibernation image
1387 * @nr_pages: Number of page frames to allocate.
1388 * @mask: GFP flags to use for the allocation.
1389 *
1390 * Return value: Number of page frames actually allocated
1391 */
1392 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1393 {
1394 unsigned long nr_alloc = 0;
1395
1396 while (nr_pages > 0) {
1397 struct page *page;
1398
1399 page = alloc_image_page(mask);
1400 if (!page)
1401 break;
1402 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1403 if (PageHighMem(page))
1404 alloc_highmem++;
1405 else
1406 alloc_normal++;
1407 nr_pages--;
1408 nr_alloc++;
1409 }
1410
1411 return nr_alloc;
1412 }
1413
1414 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1415 unsigned long avail_normal)
1416 {
1417 unsigned long alloc;
1418
1419 if (avail_normal <= alloc_normal)
1420 return 0;
1421
1422 alloc = avail_normal - alloc_normal;
1423 if (nr_pages < alloc)
1424 alloc = nr_pages;
1425
1426 return preallocate_image_pages(alloc, GFP_IMAGE);
1427 }
1428
1429 #ifdef CONFIG_HIGHMEM
1430 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1431 {
1432 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1433 }
1434
1435 /**
1436 * __fraction - Compute (an approximation of) x * (multiplier / base)
1437 */
1438 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1439 {
1440 x *= multiplier;
1441 do_div(x, base);
1442 return (unsigned long)x;
1443 }
1444
1445 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1446 unsigned long highmem,
1447 unsigned long total)
1448 {
1449 unsigned long alloc = __fraction(nr_pages, highmem, total);
1450
1451 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1452 }
1453 #else /* CONFIG_HIGHMEM */
1454 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1455 {
1456 return 0;
1457 }
1458
1459 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1460 unsigned long highmem,
1461 unsigned long total)
1462 {
1463 return 0;
1464 }
1465 #endif /* CONFIG_HIGHMEM */
1466
1467 /**
1468 * free_unnecessary_pages - Release preallocated pages not needed for the image
1469 */
1470 static void free_unnecessary_pages(void)
1471 {
1472 unsigned long save, to_free_normal, to_free_highmem;
1473
1474 save = count_data_pages();
1475 if (alloc_normal >= save) {
1476 to_free_normal = alloc_normal - save;
1477 save = 0;
1478 } else {
1479 to_free_normal = 0;
1480 save -= alloc_normal;
1481 }
1482 save += count_highmem_pages();
1483 if (alloc_highmem >= save) {
1484 to_free_highmem = alloc_highmem - save;
1485 } else {
1486 to_free_highmem = 0;
1487 save -= alloc_highmem;
1488 if (to_free_normal > save)
1489 to_free_normal -= save;
1490 else
1491 to_free_normal = 0;
1492 }
1493
1494 memory_bm_position_reset(&copy_bm);
1495
1496 while (to_free_normal > 0 || to_free_highmem > 0) {
1497 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1498 struct page *page = pfn_to_page(pfn);
1499
1500 if (PageHighMem(page)) {
1501 if (!to_free_highmem)
1502 continue;
1503 to_free_highmem--;
1504 alloc_highmem--;
1505 } else {
1506 if (!to_free_normal)
1507 continue;
1508 to_free_normal--;
1509 alloc_normal--;
1510 }
1511 memory_bm_clear_bit(&copy_bm, pfn);
1512 swsusp_unset_page_forbidden(page);
1513 swsusp_unset_page_free(page);
1514 __free_page(page);
1515 }
1516 }
1517
1518 /**
1519 * minimum_image_size - Estimate the minimum acceptable size of an image
1520 * @saveable: Number of saveable pages in the system.
1521 *
1522 * We want to avoid attempting to free too much memory too hard, so estimate the
1523 * minimum acceptable size of a hibernation image to use as the lower limit for
1524 * preallocating memory.
1525 *
1526 * We assume that the minimum image size should be proportional to
1527 *
1528 * [number of saveable pages] - [number of pages that can be freed in theory]
1529 *
1530 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1531 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1532 * minus mapped file pages.
1533 */
1534 static unsigned long minimum_image_size(unsigned long saveable)
1535 {
1536 unsigned long size;
1537
1538 size = global_page_state(NR_SLAB_RECLAIMABLE)
1539 + global_page_state(NR_ACTIVE_ANON)
1540 + global_page_state(NR_INACTIVE_ANON)
1541 + global_page_state(NR_ACTIVE_FILE)
1542 + global_page_state(NR_INACTIVE_FILE)
1543 - global_page_state(NR_FILE_MAPPED);
1544
1545 return saveable <= size ? 0 : saveable - size;
1546 }
1547
1548 /**
1549 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1550 *
1551 * To create a hibernation image it is necessary to make a copy of every page
1552 * frame in use. We also need a number of page frames to be free during
1553 * hibernation for allocations made while saving the image and for device
1554 * drivers, in case they need to allocate memory from their hibernation
1555 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1556 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1557 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1558 * total number of available page frames and allocate at least
1559 *
1560 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1561 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1562 *
1563 * of them, which corresponds to the maximum size of a hibernation image.
1564 *
1565 * If image_size is set below the number following from the above formula,
1566 * the preallocation of memory is continued until the total number of saveable
1567 * pages in the system is below the requested image size or the minimum
1568 * acceptable image size returned by minimum_image_size(), whichever is greater.
1569 */
1570 int hibernate_preallocate_memory(void)
1571 {
1572 struct zone *zone;
1573 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1574 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1575 struct timeval start, stop;
1576 int error;
1577
1578 printk(KERN_INFO "PM: Preallocating image memory... ");
1579 do_gettimeofday(&start);
1580
1581 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1582 if (error)
1583 goto err_out;
1584
1585 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1586 if (error)
1587 goto err_out;
1588
1589 alloc_normal = 0;
1590 alloc_highmem = 0;
1591
1592 /* Count the number of saveable data pages. */
1593 save_highmem = count_highmem_pages();
1594 saveable = count_data_pages();
1595
1596 /*
1597 * Compute the total number of page frames we can use (count) and the
1598 * number of pages needed for image metadata (size).
1599 */
1600 count = saveable;
1601 saveable += save_highmem;
1602 highmem = save_highmem;
1603 size = 0;
1604 for_each_populated_zone(zone) {
1605 size += snapshot_additional_pages(zone);
1606 if (is_highmem(zone))
1607 highmem += zone_page_state(zone, NR_FREE_PAGES);
1608 else
1609 count += zone_page_state(zone, NR_FREE_PAGES);
1610 }
1611 avail_normal = count;
1612 count += highmem;
1613 count -= totalreserve_pages;
1614
1615 /* Add number of pages required for page keys (s390 only). */
1616 size += page_key_additional_pages(saveable);
1617
1618 /* Compute the maximum number of saveable pages to leave in memory. */
1619 max_size = (count - (size + PAGES_FOR_IO)) / 2
1620 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1621 /* Compute the desired number of image pages specified by image_size. */
1622 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1623 if (size > max_size)
1624 size = max_size;
1625 /*
1626 * If the desired number of image pages is at least as large as the
1627 * current number of saveable pages in memory, allocate page frames for
1628 * the image and we're done.
1629 */
1630 if (size >= saveable) {
1631 pages = preallocate_image_highmem(save_highmem);
1632 pages += preallocate_image_memory(saveable - pages, avail_normal);
1633 goto out;
1634 }
1635
1636 /* Estimate the minimum size of the image. */
1637 pages = minimum_image_size(saveable);
1638 /*
1639 * To avoid excessive pressure on the normal zone, leave room in it to
1640 * accommodate an image of the minimum size (unless it's already too
1641 * small, in which case don't preallocate pages from it at all).
1642 */
1643 if (avail_normal > pages)
1644 avail_normal -= pages;
1645 else
1646 avail_normal = 0;
1647 if (size < pages)
1648 size = min_t(unsigned long, pages, max_size);
1649
1650 /*
1651 * Let the memory management subsystem know that we're going to need a
1652 * large number of page frames to allocate and make it free some memory.
1653 * NOTE: If this is not done, performance will be hurt badly in some
1654 * test cases.
1655 */
1656 shrink_all_memory(saveable - size);
1657
1658 /*
1659 * The number of saveable pages in memory was too high, so apply some
1660 * pressure to decrease it. First, make room for the largest possible
1661 * image and fail if that doesn't work. Next, try to decrease the size
1662 * of the image as much as indicated by 'size' using allocations from
1663 * highmem and non-highmem zones separately.
1664 */
1665 pages_highmem = preallocate_image_highmem(highmem / 2);
1666 alloc = count - max_size;
1667 if (alloc > pages_highmem)
1668 alloc -= pages_highmem;
1669 else
1670 alloc = 0;
1671 pages = preallocate_image_memory(alloc, avail_normal);
1672 if (pages < alloc) {
1673 /* We have exhausted non-highmem pages, try highmem. */
1674 alloc -= pages;
1675 pages += pages_highmem;
1676 pages_highmem = preallocate_image_highmem(alloc);
1677 if (pages_highmem < alloc)
1678 goto err_out;
1679 pages += pages_highmem;
1680 /*
1681 * size is the desired number of saveable pages to leave in
1682 * memory, so try to preallocate (all memory - size) pages.
1683 */
1684 alloc = (count - pages) - size;
1685 pages += preallocate_image_highmem(alloc);
1686 } else {
1687 /*
1688 * There are approximately max_size saveable pages at this point
1689 * and we want to reduce this number down to size.
1690 */
1691 alloc = max_size - size;
1692 size = preallocate_highmem_fraction(alloc, highmem, count);
1693 pages_highmem += size;
1694 alloc -= size;
1695 size = preallocate_image_memory(alloc, avail_normal);
1696 pages_highmem += preallocate_image_highmem(alloc - size);
1697 pages += pages_highmem + size;
1698 }
1699
1700 /*
1701 * We only need as many page frames for the image as there are saveable
1702 * pages in memory, but we have allocated more. Release the excessive
1703 * ones now.
1704 */
1705 free_unnecessary_pages();
1706
1707 out:
1708 do_gettimeofday(&stop);
1709 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1710 swsusp_show_speed(&start, &stop, pages, "Allocated");
1711
1712 return 0;
1713
1714 err_out:
1715 printk(KERN_CONT "\n");
1716 swsusp_free();
1717 return -ENOMEM;
1718 }
1719
1720 #ifdef CONFIG_HIGHMEM
1721 /**
1722 * count_pages_for_highmem - compute the number of non-highmem pages
1723 * that will be necessary for creating copies of highmem pages.
1724 */
1725
1726 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1727 {
1728 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1729
1730 if (free_highmem >= nr_highmem)
1731 nr_highmem = 0;
1732 else
1733 nr_highmem -= free_highmem;
1734
1735 return nr_highmem;
1736 }
1737 #else
1738 static unsigned int
1739 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1740 #endif /* CONFIG_HIGHMEM */
1741
1742 /**
1743 * enough_free_mem - Make sure we have enough free memory for the
1744 * snapshot image.
1745 */
1746
1747 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1748 {
1749 struct zone *zone;
1750 unsigned int free = alloc_normal;
1751
1752 for_each_populated_zone(zone)
1753 if (!is_highmem(zone))
1754 free += zone_page_state(zone, NR_FREE_PAGES);
1755
1756 nr_pages += count_pages_for_highmem(nr_highmem);
1757 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1758 nr_pages, PAGES_FOR_IO, free);
1759
1760 return free > nr_pages + PAGES_FOR_IO;
1761 }
1762
1763 #ifdef CONFIG_HIGHMEM
1764 /**
1765 * get_highmem_buffer - if there are some highmem pages in the suspend
1766 * image, we may need the buffer to copy them and/or load their data.
1767 */
1768
1769 static inline int get_highmem_buffer(int safe_needed)
1770 {
1771 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1772 return buffer ? 0 : -ENOMEM;
1773 }
1774
1775 /**
1776 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1777 * Try to allocate as many pages as needed, but if the number of free
1778 * highmem pages is lesser than that, allocate them all.
1779 */
1780
1781 static inline unsigned int
1782 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1783 {
1784 unsigned int to_alloc = count_free_highmem_pages();
1785
1786 if (to_alloc > nr_highmem)
1787 to_alloc = nr_highmem;
1788
1789 nr_highmem -= to_alloc;
1790 while (to_alloc-- > 0) {
1791 struct page *page;
1792
1793 page = alloc_image_page(__GFP_HIGHMEM);
1794 memory_bm_set_bit(bm, page_to_pfn(page));
1795 }
1796 return nr_highmem;
1797 }
1798 #else
1799 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1800
1801 static inline unsigned int
1802 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1803 #endif /* CONFIG_HIGHMEM */
1804
1805 /**
1806 * swsusp_alloc - allocate memory for the suspend image
1807 *
1808 * We first try to allocate as many highmem pages as there are
1809 * saveable highmem pages in the system. If that fails, we allocate
1810 * non-highmem pages for the copies of the remaining highmem ones.
1811 *
1812 * In this approach it is likely that the copies of highmem pages will
1813 * also be located in the high memory, because of the way in which
1814 * copy_data_pages() works.
1815 */
1816
1817 static int
1818 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1819 unsigned int nr_pages, unsigned int nr_highmem)
1820 {
1821 if (nr_highmem > 0) {
1822 if (get_highmem_buffer(PG_ANY))
1823 goto err_out;
1824 if (nr_highmem > alloc_highmem) {
1825 nr_highmem -= alloc_highmem;
1826 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1827 }
1828 }
1829 if (nr_pages > alloc_normal) {
1830 nr_pages -= alloc_normal;
1831 while (nr_pages-- > 0) {
1832 struct page *page;
1833
1834 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1835 if (!page)
1836 goto err_out;
1837 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1838 }
1839 }
1840
1841 return 0;
1842
1843 err_out:
1844 swsusp_free();
1845 return -ENOMEM;
1846 }
1847
1848 asmlinkage __visible int swsusp_save(void)
1849 {
1850 unsigned int nr_pages, nr_highmem;
1851
1852 printk(KERN_INFO "PM: Creating hibernation image:\n");
1853
1854 drain_local_pages(NULL);
1855 nr_pages = count_data_pages();
1856 nr_highmem = count_highmem_pages();
1857 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1858
1859 if (!enough_free_mem(nr_pages, nr_highmem)) {
1860 printk(KERN_ERR "PM: Not enough free memory\n");
1861 return -ENOMEM;
1862 }
1863
1864 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1865 printk(KERN_ERR "PM: Memory allocation failed\n");
1866 return -ENOMEM;
1867 }
1868
1869 /* During allocating of suspend pagedir, new cold pages may appear.
1870 * Kill them.
1871 */
1872 drain_local_pages(NULL);
1873 copy_data_pages(&copy_bm, &orig_bm);
1874
1875 /*
1876 * End of critical section. From now on, we can write to memory,
1877 * but we should not touch disk. This specially means we must _not_
1878 * touch swap space! Except we must write out our image of course.
1879 */
1880
1881 nr_pages += nr_highmem;
1882 nr_copy_pages = nr_pages;
1883 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1884
1885 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1886 nr_pages);
1887
1888 return 0;
1889 }
1890
1891 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1892 static int init_header_complete(struct swsusp_info *info)
1893 {
1894 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1895 info->version_code = LINUX_VERSION_CODE;
1896 return 0;
1897 }
1898
1899 static char *check_image_kernel(struct swsusp_info *info)
1900 {
1901 if (info->version_code != LINUX_VERSION_CODE)
1902 return "kernel version";
1903 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1904 return "system type";
1905 if (strcmp(info->uts.release,init_utsname()->release))
1906 return "kernel release";
1907 if (strcmp(info->uts.version,init_utsname()->version))
1908 return "version";
1909 if (strcmp(info->uts.machine,init_utsname()->machine))
1910 return "machine";
1911 return NULL;
1912 }
1913 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1914
1915 unsigned long snapshot_get_image_size(void)
1916 {
1917 return nr_copy_pages + nr_meta_pages + 1;
1918 }
1919
1920 static int init_header(struct swsusp_info *info)
1921 {
1922 memset(info, 0, sizeof(struct swsusp_info));
1923 info->num_physpages = get_num_physpages();
1924 info->image_pages = nr_copy_pages;
1925 info->pages = snapshot_get_image_size();
1926 info->size = info->pages;
1927 info->size <<= PAGE_SHIFT;
1928 return init_header_complete(info);
1929 }
1930
1931 /**
1932 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1933 * are stored in the array @buf[] (1 page at a time)
1934 */
1935
1936 static inline void
1937 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1938 {
1939 int j;
1940
1941 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1942 buf[j] = memory_bm_next_pfn(bm);
1943 if (unlikely(buf[j] == BM_END_OF_MAP))
1944 break;
1945 /* Save page key for data page (s390 only). */
1946 page_key_read(buf + j);
1947 }
1948 }
1949
1950 /**
1951 * snapshot_read_next - used for reading the system memory snapshot.
1952 *
1953 * On the first call to it @handle should point to a zeroed
1954 * snapshot_handle structure. The structure gets updated and a pointer
1955 * to it should be passed to this function every next time.
1956 *
1957 * On success the function returns a positive number. Then, the caller
1958 * is allowed to read up to the returned number of bytes from the memory
1959 * location computed by the data_of() macro.
1960 *
1961 * The function returns 0 to indicate the end of data stream condition,
1962 * and a negative number is returned on error. In such cases the
1963 * structure pointed to by @handle is not updated and should not be used
1964 * any more.
1965 */
1966
1967 int snapshot_read_next(struct snapshot_handle *handle)
1968 {
1969 if (handle->cur > nr_meta_pages + nr_copy_pages)
1970 return 0;
1971
1972 if (!buffer) {
1973 /* This makes the buffer be freed by swsusp_free() */
1974 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1975 if (!buffer)
1976 return -ENOMEM;
1977 }
1978 if (!handle->cur) {
1979 int error;
1980
1981 error = init_header((struct swsusp_info *)buffer);
1982 if (error)
1983 return error;
1984 handle->buffer = buffer;
1985 memory_bm_position_reset(&orig_bm);
1986 memory_bm_position_reset(&copy_bm);
1987 } else if (handle->cur <= nr_meta_pages) {
1988 clear_page(buffer);
1989 pack_pfns(buffer, &orig_bm);
1990 } else {
1991 struct page *page;
1992
1993 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1994 if (PageHighMem(page)) {
1995 /* Highmem pages are copied to the buffer,
1996 * because we can't return with a kmapped
1997 * highmem page (we may not be called again).
1998 */
1999 void *kaddr;
2000
2001 kaddr = kmap_atomic(page);
2002 copy_page(buffer, kaddr);
2003 kunmap_atomic(kaddr);
2004 handle->buffer = buffer;
2005 } else {
2006 handle->buffer = page_address(page);
2007 }
2008 }
2009 handle->cur++;
2010 return PAGE_SIZE;
2011 }
2012
2013 /**
2014 * mark_unsafe_pages - mark the pages that cannot be used for storing
2015 * the image during resume, because they conflict with the pages that
2016 * had been used before suspend
2017 */
2018
2019 static int mark_unsafe_pages(struct memory_bitmap *bm)
2020 {
2021 struct zone *zone;
2022 unsigned long pfn, max_zone_pfn;
2023
2024 /* Clear page flags */
2025 for_each_populated_zone(zone) {
2026 max_zone_pfn = zone_end_pfn(zone);
2027 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2028 if (pfn_valid(pfn))
2029 swsusp_unset_page_free(pfn_to_page(pfn));
2030 }
2031
2032 /* Mark pages that correspond to the "original" pfns as "unsafe" */
2033 memory_bm_position_reset(bm);
2034 do {
2035 pfn = memory_bm_next_pfn(bm);
2036 if (likely(pfn != BM_END_OF_MAP)) {
2037 if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
2038 swsusp_set_page_free(pfn_to_page(pfn));
2039 else
2040 return -EFAULT;
2041 }
2042 } while (pfn != BM_END_OF_MAP);
2043
2044 allocated_unsafe_pages = 0;
2045
2046 return 0;
2047 }
2048
2049 static void
2050 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
2051 {
2052 unsigned long pfn;
2053
2054 memory_bm_position_reset(src);
2055 pfn = memory_bm_next_pfn(src);
2056 while (pfn != BM_END_OF_MAP) {
2057 memory_bm_set_bit(dst, pfn);
2058 pfn = memory_bm_next_pfn(src);
2059 }
2060 }
2061
2062 static int check_header(struct swsusp_info *info)
2063 {
2064 char *reason;
2065
2066 reason = check_image_kernel(info);
2067 if (!reason && info->num_physpages != get_num_physpages())
2068 reason = "memory size";
2069 if (reason) {
2070 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2071 return -EPERM;
2072 }
2073 return 0;
2074 }
2075
2076 /**
2077 * load header - check the image header and copy data from it
2078 */
2079
2080 static int
2081 load_header(struct swsusp_info *info)
2082 {
2083 int error;
2084
2085 restore_pblist = NULL;
2086 error = check_header(info);
2087 if (!error) {
2088 nr_copy_pages = info->image_pages;
2089 nr_meta_pages = info->pages - info->image_pages - 1;
2090 }
2091 return error;
2092 }
2093
2094 /**
2095 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2096 * the corresponding bit in the memory bitmap @bm
2097 */
2098 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2099 {
2100 int j;
2101
2102 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2103 if (unlikely(buf[j] == BM_END_OF_MAP))
2104 break;
2105
2106 /* Extract and buffer page key for data page (s390 only). */
2107 page_key_memorize(buf + j);
2108
2109 if (memory_bm_pfn_present(bm, buf[j]))
2110 memory_bm_set_bit(bm, buf[j]);
2111 else
2112 return -EFAULT;
2113 }
2114
2115 return 0;
2116 }
2117
2118 /* List of "safe" pages that may be used to store data loaded from the suspend
2119 * image
2120 */
2121 static struct linked_page *safe_pages_list;
2122
2123 #ifdef CONFIG_HIGHMEM
2124 /* struct highmem_pbe is used for creating the list of highmem pages that
2125 * should be restored atomically during the resume from disk, because the page
2126 * frames they have occupied before the suspend are in use.
2127 */
2128 struct highmem_pbe {
2129 struct page *copy_page; /* data is here now */
2130 struct page *orig_page; /* data was here before the suspend */
2131 struct highmem_pbe *next;
2132 };
2133
2134 /* List of highmem PBEs needed for restoring the highmem pages that were
2135 * allocated before the suspend and included in the suspend image, but have
2136 * also been allocated by the "resume" kernel, so their contents cannot be
2137 * written directly to their "original" page frames.
2138 */
2139 static struct highmem_pbe *highmem_pblist;
2140
2141 /**
2142 * count_highmem_image_pages - compute the number of highmem pages in the
2143 * suspend image. The bits in the memory bitmap @bm that correspond to the
2144 * image pages are assumed to be set.
2145 */
2146
2147 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2148 {
2149 unsigned long pfn;
2150 unsigned int cnt = 0;
2151
2152 memory_bm_position_reset(bm);
2153 pfn = memory_bm_next_pfn(bm);
2154 while (pfn != BM_END_OF_MAP) {
2155 if (PageHighMem(pfn_to_page(pfn)))
2156 cnt++;
2157
2158 pfn = memory_bm_next_pfn(bm);
2159 }
2160 return cnt;
2161 }
2162
2163 /**
2164 * prepare_highmem_image - try to allocate as many highmem pages as
2165 * there are highmem image pages (@nr_highmem_p points to the variable
2166 * containing the number of highmem image pages). The pages that are
2167 * "safe" (ie. will not be overwritten when the suspend image is
2168 * restored) have the corresponding bits set in @bm (it must be
2169 * unitialized).
2170 *
2171 * NOTE: This function should not be called if there are no highmem
2172 * image pages.
2173 */
2174
2175 static unsigned int safe_highmem_pages;
2176
2177 static struct memory_bitmap *safe_highmem_bm;
2178
2179 static int
2180 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2181 {
2182 unsigned int to_alloc;
2183
2184 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2185 return -ENOMEM;
2186
2187 if (get_highmem_buffer(PG_SAFE))
2188 return -ENOMEM;
2189
2190 to_alloc = count_free_highmem_pages();
2191 if (to_alloc > *nr_highmem_p)
2192 to_alloc = *nr_highmem_p;
2193 else
2194 *nr_highmem_p = to_alloc;
2195
2196 safe_highmem_pages = 0;
2197 while (to_alloc-- > 0) {
2198 struct page *page;
2199
2200 page = alloc_page(__GFP_HIGHMEM);
2201 if (!swsusp_page_is_free(page)) {
2202 /* The page is "safe", set its bit the bitmap */
2203 memory_bm_set_bit(bm, page_to_pfn(page));
2204 safe_highmem_pages++;
2205 }
2206 /* Mark the page as allocated */
2207 swsusp_set_page_forbidden(page);
2208 swsusp_set_page_free(page);
2209 }
2210 memory_bm_position_reset(bm);
2211 safe_highmem_bm = bm;
2212 return 0;
2213 }
2214
2215 /**
2216 * get_highmem_page_buffer - for given highmem image page find the buffer
2217 * that suspend_write_next() should set for its caller to write to.
2218 *
2219 * If the page is to be saved to its "original" page frame or a copy of
2220 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2221 * the copy of the page is to be made in normal memory, so the address of
2222 * the copy is returned.
2223 *
2224 * If @buffer is returned, the caller of suspend_write_next() will write
2225 * the page's contents to @buffer, so they will have to be copied to the
2226 * right location on the next call to suspend_write_next() and it is done
2227 * with the help of copy_last_highmem_page(). For this purpose, if
2228 * @buffer is returned, @last_highmem page is set to the page to which
2229 * the data will have to be copied from @buffer.
2230 */
2231
2232 static struct page *last_highmem_page;
2233
2234 static void *
2235 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2236 {
2237 struct highmem_pbe *pbe;
2238 void *kaddr;
2239
2240 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2241 /* We have allocated the "original" page frame and we can
2242 * use it directly to store the loaded page.
2243 */
2244 last_highmem_page = page;
2245 return buffer;
2246 }
2247 /* The "original" page frame has not been allocated and we have to
2248 * use a "safe" page frame to store the loaded page.
2249 */
2250 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2251 if (!pbe) {
2252 swsusp_free();
2253 return ERR_PTR(-ENOMEM);
2254 }
2255 pbe->orig_page = page;
2256 if (safe_highmem_pages > 0) {
2257 struct page *tmp;
2258
2259 /* Copy of the page will be stored in high memory */
2260 kaddr = buffer;
2261 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2262 safe_highmem_pages--;
2263 last_highmem_page = tmp;
2264 pbe->copy_page = tmp;
2265 } else {
2266 /* Copy of the page will be stored in normal memory */
2267 kaddr = safe_pages_list;
2268 safe_pages_list = safe_pages_list->next;
2269 pbe->copy_page = virt_to_page(kaddr);
2270 }
2271 pbe->next = highmem_pblist;
2272 highmem_pblist = pbe;
2273 return kaddr;
2274 }
2275
2276 /**
2277 * copy_last_highmem_page - copy the contents of a highmem image from
2278 * @buffer, where the caller of snapshot_write_next() has place them,
2279 * to the right location represented by @last_highmem_page .
2280 */
2281
2282 static void copy_last_highmem_page(void)
2283 {
2284 if (last_highmem_page) {
2285 void *dst;
2286
2287 dst = kmap_atomic(last_highmem_page);
2288 copy_page(dst, buffer);
2289 kunmap_atomic(dst);
2290 last_highmem_page = NULL;
2291 }
2292 }
2293
2294 static inline int last_highmem_page_copied(void)
2295 {
2296 return !last_highmem_page;
2297 }
2298
2299 static inline void free_highmem_data(void)
2300 {
2301 if (safe_highmem_bm)
2302 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2303
2304 if (buffer)
2305 free_image_page(buffer, PG_UNSAFE_CLEAR);
2306 }
2307 #else
2308 static inline int get_safe_write_buffer(void) { return 0; }
2309
2310 static unsigned int
2311 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2312
2313 static inline int
2314 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2315 {
2316 return 0;
2317 }
2318
2319 static inline void *
2320 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2321 {
2322 return ERR_PTR(-EINVAL);
2323 }
2324
2325 static inline void copy_last_highmem_page(void) {}
2326 static inline int last_highmem_page_copied(void) { return 1; }
2327 static inline void free_highmem_data(void) {}
2328 #endif /* CONFIG_HIGHMEM */
2329
2330 /**
2331 * prepare_image - use the memory bitmap @bm to mark the pages that will
2332 * be overwritten in the process of restoring the system memory state
2333 * from the suspend image ("unsafe" pages) and allocate memory for the
2334 * image.
2335 *
2336 * The idea is to allocate a new memory bitmap first and then allocate
2337 * as many pages as needed for the image data, but not to assign these
2338 * pages to specific tasks initially. Instead, we just mark them as
2339 * allocated and create a lists of "safe" pages that will be used
2340 * later. On systems with high memory a list of "safe" highmem pages is
2341 * also created.
2342 */
2343
2344 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2345
2346 static int
2347 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2348 {
2349 unsigned int nr_pages, nr_highmem;
2350 struct linked_page *sp_list, *lp;
2351 int error;
2352
2353 /* If there is no highmem, the buffer will not be necessary */
2354 free_image_page(buffer, PG_UNSAFE_CLEAR);
2355 buffer = NULL;
2356
2357 nr_highmem = count_highmem_image_pages(bm);
2358 error = mark_unsafe_pages(bm);
2359 if (error)
2360 goto Free;
2361
2362 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2363 if (error)
2364 goto Free;
2365
2366 duplicate_memory_bitmap(new_bm, bm);
2367 memory_bm_free(bm, PG_UNSAFE_KEEP);
2368 if (nr_highmem > 0) {
2369 error = prepare_highmem_image(bm, &nr_highmem);
2370 if (error)
2371 goto Free;
2372 }
2373 /* Reserve some safe pages for potential later use.
2374 *
2375 * NOTE: This way we make sure there will be enough safe pages for the
2376 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2377 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2378 */
2379 sp_list = NULL;
2380 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2381 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2382 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2383 while (nr_pages > 0) {
2384 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2385 if (!lp) {
2386 error = -ENOMEM;
2387 goto Free;
2388 }
2389 lp->next = sp_list;
2390 sp_list = lp;
2391 nr_pages--;
2392 }
2393 /* Preallocate memory for the image */
2394 safe_pages_list = NULL;
2395 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2396 while (nr_pages > 0) {
2397 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2398 if (!lp) {
2399 error = -ENOMEM;
2400 goto Free;
2401 }
2402 if (!swsusp_page_is_free(virt_to_page(lp))) {
2403 /* The page is "safe", add it to the list */
2404 lp->next = safe_pages_list;
2405 safe_pages_list = lp;
2406 }
2407 /* Mark the page as allocated */
2408 swsusp_set_page_forbidden(virt_to_page(lp));
2409 swsusp_set_page_free(virt_to_page(lp));
2410 nr_pages--;
2411 }
2412 /* Free the reserved safe pages so that chain_alloc() can use them */
2413 while (sp_list) {
2414 lp = sp_list->next;
2415 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2416 sp_list = lp;
2417 }
2418 return 0;
2419
2420 Free:
2421 swsusp_free();
2422 return error;
2423 }
2424
2425 /**
2426 * get_buffer - compute the address that snapshot_write_next() should
2427 * set for its caller to write to.
2428 */
2429
2430 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2431 {
2432 struct pbe *pbe;
2433 struct page *page;
2434 unsigned long pfn = memory_bm_next_pfn(bm);
2435
2436 if (pfn == BM_END_OF_MAP)
2437 return ERR_PTR(-EFAULT);
2438
2439 page = pfn_to_page(pfn);
2440 if (PageHighMem(page))
2441 return get_highmem_page_buffer(page, ca);
2442
2443 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2444 /* We have allocated the "original" page frame and we can
2445 * use it directly to store the loaded page.
2446 */
2447 return page_address(page);
2448
2449 /* The "original" page frame has not been allocated and we have to
2450 * use a "safe" page frame to store the loaded page.
2451 */
2452 pbe = chain_alloc(ca, sizeof(struct pbe));
2453 if (!pbe) {
2454 swsusp_free();
2455 return ERR_PTR(-ENOMEM);
2456 }
2457 pbe->orig_address = page_address(page);
2458 pbe->address = safe_pages_list;
2459 safe_pages_list = safe_pages_list->next;
2460 pbe->next = restore_pblist;
2461 restore_pblist = pbe;
2462 return pbe->address;
2463 }
2464
2465 /**
2466 * snapshot_write_next - used for writing the system memory snapshot.
2467 *
2468 * On the first call to it @handle should point to a zeroed
2469 * snapshot_handle structure. The structure gets updated and a pointer
2470 * to it should be passed to this function every next time.
2471 *
2472 * On success the function returns a positive number. Then, the caller
2473 * is allowed to write up to the returned number of bytes to the memory
2474 * location computed by the data_of() macro.
2475 *
2476 * The function returns 0 to indicate the "end of file" condition,
2477 * and a negative number is returned on error. In such cases the
2478 * structure pointed to by @handle is not updated and should not be used
2479 * any more.
2480 */
2481
2482 int snapshot_write_next(struct snapshot_handle *handle)
2483 {
2484 static struct chain_allocator ca;
2485 int error = 0;
2486
2487 /* Check if we have already loaded the entire image */
2488 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2489 return 0;
2490
2491 handle->sync_read = 1;
2492
2493 if (!handle->cur) {
2494 if (!buffer)
2495 /* This makes the buffer be freed by swsusp_free() */
2496 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2497
2498 if (!buffer)
2499 return -ENOMEM;
2500
2501 handle->buffer = buffer;
2502 } else if (handle->cur == 1) {
2503 error = load_header(buffer);
2504 if (error)
2505 return error;
2506
2507 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2508 if (error)
2509 return error;
2510
2511 /* Allocate buffer for page keys. */
2512 error = page_key_alloc(nr_copy_pages);
2513 if (error)
2514 return error;
2515
2516 } else if (handle->cur <= nr_meta_pages + 1) {
2517 error = unpack_orig_pfns(buffer, &copy_bm);
2518 if (error)
2519 return error;
2520
2521 if (handle->cur == nr_meta_pages + 1) {
2522 error = prepare_image(&orig_bm, &copy_bm);
2523 if (error)
2524 return error;
2525
2526 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2527 memory_bm_position_reset(&orig_bm);
2528 restore_pblist = NULL;
2529 handle->buffer = get_buffer(&orig_bm, &ca);
2530 handle->sync_read = 0;
2531 if (IS_ERR(handle->buffer))
2532 return PTR_ERR(handle->buffer);
2533 }
2534 } else {
2535 copy_last_highmem_page();
2536 /* Restore page key for data page (s390 only). */
2537 page_key_write(handle->buffer);
2538 handle->buffer = get_buffer(&orig_bm, &ca);
2539 if (IS_ERR(handle->buffer))
2540 return PTR_ERR(handle->buffer);
2541 if (handle->buffer != buffer)
2542 handle->sync_read = 0;
2543 }
2544 handle->cur++;
2545 return PAGE_SIZE;
2546 }
2547
2548 /**
2549 * snapshot_write_finalize - must be called after the last call to
2550 * snapshot_write_next() in case the last page in the image happens
2551 * to be a highmem page and its contents should be stored in the
2552 * highmem. Additionally, it releases the memory that will not be
2553 * used any more.
2554 */
2555
2556 void snapshot_write_finalize(struct snapshot_handle *handle)
2557 {
2558 copy_last_highmem_page();
2559 /* Restore page key for data page (s390 only). */
2560 page_key_write(handle->buffer);
2561 page_key_free();
2562 /* Free only if we have loaded the image entirely */
2563 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2564 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2565 free_highmem_data();
2566 }
2567 }
2568
2569 int snapshot_image_loaded(struct snapshot_handle *handle)
2570 {
2571 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2572 handle->cur <= nr_meta_pages + nr_copy_pages);
2573 }
2574
2575 #ifdef CONFIG_HIGHMEM
2576 /* Assumes that @buf is ready and points to a "safe" page */
2577 static inline void
2578 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2579 {
2580 void *kaddr1, *kaddr2;
2581
2582 kaddr1 = kmap_atomic(p1);
2583 kaddr2 = kmap_atomic(p2);
2584 copy_page(buf, kaddr1);
2585 copy_page(kaddr1, kaddr2);
2586 copy_page(kaddr2, buf);
2587 kunmap_atomic(kaddr2);
2588 kunmap_atomic(kaddr1);
2589 }
2590
2591 /**
2592 * restore_highmem - for each highmem page that was allocated before
2593 * the suspend and included in the suspend image, and also has been
2594 * allocated by the "resume" kernel swap its current (ie. "before
2595 * resume") contents with the previous (ie. "before suspend") one.
2596 *
2597 * If the resume eventually fails, we can call this function once
2598 * again and restore the "before resume" highmem state.
2599 */
2600
2601 int restore_highmem(void)
2602 {
2603 struct highmem_pbe *pbe = highmem_pblist;
2604 void *buf;
2605
2606 if (!pbe)
2607 return 0;
2608
2609 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2610 if (!buf)
2611 return -ENOMEM;
2612
2613 while (pbe) {
2614 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2615 pbe = pbe->next;
2616 }
2617 free_image_page(buf, PG_UNSAFE_CLEAR);
2618 return 0;
2619 }
2620 #endif /* CONFIG_HIGHMEM */