]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kernel/suspend.c
[S390] sparse: fix sparse ANSI-C warnings
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kernel / suspend.c
CommitLineData
155af2f9 1/*
c48ff644 2 * Suspend support specific for s390.
155af2f9
HJP
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
155af2f9
HJP
7 */
8
c48ff644 9#include <linux/pfn.h>
85055dd8 10#include <linux/mm.h>
c63b196a 11#include <asm/system.h>
c48ff644
HC
12
13/*
14 * References to section boundaries
15 */
16extern const void __nosave_begin, __nosave_end;
17
85055dd8
MS
18/*
19 * The restore of the saved pages in an hibernation image will set
20 * the change and referenced bits in the storage key for each page.
21 * Overindication of the referenced bits after an hibernation cycle
22 * does not cause any harm but the overindication of the change bits
23 * would cause trouble.
24 * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
25 * page to the most significant byte of the associated page frame
26 * number in the hibernation image.
27 */
28
29/*
30 * Key storage is allocated as a linked list of pages.
31 * The size of the keys array is (PAGE_SIZE - sizeof(long))
32 */
33struct page_key_data {
34 struct page_key_data *next;
35 unsigned char data[];
36};
37
38#define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
39
40static struct page_key_data *page_key_data;
41static struct page_key_data *page_key_rp, *page_key_wp;
42static unsigned long page_key_rx, page_key_wx;
43
44/*
45 * For each page in the hibernation image one additional byte is
46 * stored in the most significant byte of the page frame number.
47 * On suspend no additional memory is required but on resume the
48 * keys need to be memorized until the page data has been restored.
49 * Only then can the storage keys be set to their old state.
50 */
51unsigned long page_key_additional_pages(unsigned long pages)
52{
53 return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
54}
55
56/*
57 * Free page_key_data list of arrays.
58 */
59void page_key_free(void)
60{
61 struct page_key_data *pkd;
62
63 while (page_key_data) {
64 pkd = page_key_data;
65 page_key_data = pkd->next;
66 free_page((unsigned long) pkd);
67 }
68}
69
70/*
71 * Allocate page_key_data list of arrays with enough room to store
72 * one byte for each page in the hibernation image.
73 */
74int page_key_alloc(unsigned long pages)
75{
76 struct page_key_data *pk;
77 unsigned long size;
78
79 size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
80 while (size--) {
81 pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
82 if (!pk) {
83 page_key_free();
84 return -ENOMEM;
85 }
86 pk->next = page_key_data;
87 page_key_data = pk;
88 }
89 page_key_rp = page_key_wp = page_key_data;
90 page_key_rx = page_key_wx = 0;
91 return 0;
92}
93
94/*
95 * Save the storage key into the upper 8 bits of the page frame number.
96 */
97void page_key_read(unsigned long *pfn)
98{
99 unsigned long addr;
100
101 addr = (unsigned long) page_address(pfn_to_page(*pfn));
102 *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
103}
104
105/*
106 * Extract the storage key from the upper 8 bits of the page frame number
107 * and store it in the page_key_data list of arrays.
108 */
109void page_key_memorize(unsigned long *pfn)
110{
111 page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
112 *(unsigned char *) pfn = 0;
113 if (++page_key_wx < PAGE_KEY_DATA_SIZE)
114 return;
115 page_key_wp = page_key_wp->next;
116 page_key_wx = 0;
117}
118
119/*
120 * Get the next key from the page_key_data list of arrays and set the
121 * storage key of the page referred by @address. If @address refers to
122 * a "safe" page the swsusp_arch_resume code will transfer the storage
123 * key from the buffer page to the original page.
124 */
125void page_key_write(void *address)
126{
127 page_set_storage_key((unsigned long) address,
128 page_key_rp->data[page_key_rx], 0);
129 if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
130 return;
131 page_key_rp = page_key_rp->next;
132 page_key_rx = 0;
133}
134
c48ff644
HC
135int pfn_is_nosave(unsigned long pfn)
136{
2573a575
HC
137 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
138 unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
c48ff644 139
2573a575
HC
140 /* Always save lowcore pages (LC protection might be enabled). */
141 if (pfn <= LC_PAGES)
142 return 0;
c48ff644
HC
143 if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
144 return 1;
2573a575
HC
145 /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
146 if (tprot(PFN_PHYS(pfn)))
c48ff644
HC
147 return 1;
148 return 0;
149}
155af2f9 150
155af2f9
HJP
151void save_processor_state(void)
152{
c63b196a
HC
153 /* swsusp_arch_suspend() actually saves all cpu register contents.
154 * Machine checks must be disabled since swsusp_arch_suspend() stores
155 * register contents to their lowcore save areas. That's the same
156 * place where register contents on machine checks would be saved.
157 * To avoid register corruption disable machine checks.
158 * We must also disable machine checks in the new psw mask for
159 * program checks, since swsusp_arch_suspend() may generate program
160 * checks. Disabling machine checks for all other new psw masks is
161 * just paranoia.
155af2f9 162 */
c63b196a
HC
163 local_mcck_disable();
164 /* Disable lowcore protection */
165 __ctl_clear_bit(0,28);
166 S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
167 S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
168 S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
169 S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
155af2f9
HJP
170}
171
155af2f9
HJP
172void restore_processor_state(void)
173{
c63b196a
HC
174 S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
175 S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
176 S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
177 S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
178 /* Enable lowcore protection */
179 __ctl_set_bit(0,28);
180 local_mcck_enable();
155af2f9 181}