s390/pci: update function handle after resume from hibernate
[deliverable/linux.git] / arch / s390 / kernel / suspend.c
1 /*
2 * Suspend support specific for s390.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
7 */
8
9 #include <linux/pfn.h>
10 #include <linux/suspend.h>
11 #include <linux/mm.h>
12 #include <asm/ctl_reg.h>
13 #include <asm/ipl.h>
14 #include <asm/cio.h>
15 #include <asm/pci.h>
16
17 /*
18 * References to section boundaries
19 */
20 extern const void __nosave_begin, __nosave_end;
21
22 /*
23 * The restore of the saved pages in an hibernation image will set
24 * the change and referenced bits in the storage key for each page.
25 * Overindication of the referenced bits after an hibernation cycle
26 * does not cause any harm but the overindication of the change bits
27 * would cause trouble.
28 * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
29 * page to the most significant byte of the associated page frame
30 * number in the hibernation image.
31 */
32
33 /*
34 * Key storage is allocated as a linked list of pages.
35 * The size of the keys array is (PAGE_SIZE - sizeof(long))
36 */
37 struct page_key_data {
38 struct page_key_data *next;
39 unsigned char data[];
40 };
41
42 #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
43
44 static struct page_key_data *page_key_data;
45 static struct page_key_data *page_key_rp, *page_key_wp;
46 static unsigned long page_key_rx, page_key_wx;
47 unsigned long suspend_zero_pages;
48
49 /*
50 * For each page in the hibernation image one additional byte is
51 * stored in the most significant byte of the page frame number.
52 * On suspend no additional memory is required but on resume the
53 * keys need to be memorized until the page data has been restored.
54 * Only then can the storage keys be set to their old state.
55 */
56 unsigned long page_key_additional_pages(unsigned long pages)
57 {
58 return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
59 }
60
61 /*
62 * Free page_key_data list of arrays.
63 */
64 void page_key_free(void)
65 {
66 struct page_key_data *pkd;
67
68 while (page_key_data) {
69 pkd = page_key_data;
70 page_key_data = pkd->next;
71 free_page((unsigned long) pkd);
72 }
73 }
74
75 /*
76 * Allocate page_key_data list of arrays with enough room to store
77 * one byte for each page in the hibernation image.
78 */
79 int page_key_alloc(unsigned long pages)
80 {
81 struct page_key_data *pk;
82 unsigned long size;
83
84 size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
85 while (size--) {
86 pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
87 if (!pk) {
88 page_key_free();
89 return -ENOMEM;
90 }
91 pk->next = page_key_data;
92 page_key_data = pk;
93 }
94 page_key_rp = page_key_wp = page_key_data;
95 page_key_rx = page_key_wx = 0;
96 return 0;
97 }
98
99 /*
100 * Save the storage key into the upper 8 bits of the page frame number.
101 */
102 void page_key_read(unsigned long *pfn)
103 {
104 unsigned long addr;
105
106 addr = (unsigned long) page_address(pfn_to_page(*pfn));
107 *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
108 }
109
110 /*
111 * Extract the storage key from the upper 8 bits of the page frame number
112 * and store it in the page_key_data list of arrays.
113 */
114 void page_key_memorize(unsigned long *pfn)
115 {
116 page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
117 *(unsigned char *) pfn = 0;
118 if (++page_key_wx < PAGE_KEY_DATA_SIZE)
119 return;
120 page_key_wp = page_key_wp->next;
121 page_key_wx = 0;
122 }
123
124 /*
125 * Get the next key from the page_key_data list of arrays and set the
126 * storage key of the page referred by @address. If @address refers to
127 * a "safe" page the swsusp_arch_resume code will transfer the storage
128 * key from the buffer page to the original page.
129 */
130 void page_key_write(void *address)
131 {
132 page_set_storage_key((unsigned long) address,
133 page_key_rp->data[page_key_rx], 0);
134 if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
135 return;
136 page_key_rp = page_key_rp->next;
137 page_key_rx = 0;
138 }
139
140 int pfn_is_nosave(unsigned long pfn)
141 {
142 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
143 unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
144
145 /* Always save lowcore pages (LC protection might be enabled). */
146 if (pfn <= LC_PAGES)
147 return 0;
148 if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
149 return 1;
150 /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
151 if (tprot(PFN_PHYS(pfn)))
152 return 1;
153 return 0;
154 }
155
156 /*
157 * PM notifier callback for suspend
158 */
159 static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
160 void *ptr)
161 {
162 switch (action) {
163 case PM_SUSPEND_PREPARE:
164 case PM_HIBERNATION_PREPARE:
165 suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
166 if (!suspend_zero_pages)
167 return NOTIFY_BAD;
168 break;
169 case PM_POST_SUSPEND:
170 case PM_POST_HIBERNATION:
171 free_pages(suspend_zero_pages, LC_ORDER);
172 break;
173 default:
174 return NOTIFY_DONE;
175 }
176 return NOTIFY_OK;
177 }
178
179 static int __init suspend_pm_init(void)
180 {
181 pm_notifier(suspend_pm_cb, 0);
182 return 0;
183 }
184 arch_initcall(suspend_pm_init);
185
186 void save_processor_state(void)
187 {
188 /* swsusp_arch_suspend() actually saves all cpu register contents.
189 * Machine checks must be disabled since swsusp_arch_suspend() stores
190 * register contents to their lowcore save areas. That's the same
191 * place where register contents on machine checks would be saved.
192 * To avoid register corruption disable machine checks.
193 * We must also disable machine checks in the new psw mask for
194 * program checks, since swsusp_arch_suspend() may generate program
195 * checks. Disabling machine checks for all other new psw masks is
196 * just paranoia.
197 */
198 local_mcck_disable();
199 /* Disable lowcore protection */
200 __ctl_clear_bit(0,28);
201 S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
202 S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
203 S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
204 S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
205 }
206
207 void restore_processor_state(void)
208 {
209 S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
210 S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
211 S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
212 S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
213 /* Enable lowcore protection */
214 __ctl_set_bit(0,28);
215 local_mcck_enable();
216 }
217
218 /* Called at the end of swsusp_arch_resume */
219 void s390_early_resume(void)
220 {
221 lgr_info_log();
222 channel_subsystem_reinit();
223 zpci_rescan();
224 }
This page took 0.048344 seconds and 5 git commands to generate.