[S390] New header file ipl.h
[deliverable/linux.git] / arch / s390 / kernel / early.c
1 /*
2 * arch/s390/kernel/early.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/ctype.h>
13 #include <linux/lockdep.h>
14 #include <linux/module.h>
15 #include <linux/pfn.h>
16 #include <linux/uaccess.h>
17 #include <asm/ipl.h>
18 #include <asm/lowcore.h>
19 #include <asm/processor.h>
20 #include <asm/sections.h>
21 #include <asm/setup.h>
22 #include <asm/cpcmd.h>
23 #include <asm/sclp.h>
24
25 /*
26 * Create a Kernel NSS if the SAVESYS= parameter is defined
27 */
28 #define DEFSYS_CMD_SIZE 96
29 #define SAVESYS_CMD_SIZE 32
30
31 char kernel_nss_name[NSS_NAME_SIZE + 1];
32
33 #ifdef CONFIG_SHARED_KERNEL
34 static noinline __init void create_kernel_nss(void)
35 {
36 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
37 #ifdef CONFIG_BLK_DEV_INITRD
38 unsigned int sinitrd_pfn, einitrd_pfn;
39 #endif
40 int response;
41 char *savesys_ptr;
42 char upper_command_line[COMMAND_LINE_SIZE];
43 char defsys_cmd[DEFSYS_CMD_SIZE];
44 char savesys_cmd[SAVESYS_CMD_SIZE];
45
46 /* Do nothing if we are not running under VM */
47 if (!MACHINE_IS_VM)
48 return;
49
50 /* Convert COMMAND_LINE to upper case */
51 for (i = 0; i < strlen(COMMAND_LINE); i++)
52 upper_command_line[i] = toupper(COMMAND_LINE[i]);
53
54 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
55
56 if (!savesys_ptr)
57 return;
58
59 savesys_ptr += 8; /* Point to the beginning of the NSS name */
60 for (i = 0; i < NSS_NAME_SIZE; i++) {
61 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
62 break;
63 kernel_nss_name[i] = savesys_ptr[i];
64 }
65
66 stext_pfn = PFN_DOWN(__pa(&_stext));
67 eshared_pfn = PFN_DOWN(__pa(&_eshared));
68 end_pfn = PFN_UP(__pa(&_end));
69 min_size = end_pfn << 2;
70
71 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
72 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
73 eshared_pfn, end_pfn);
74
75 #ifdef CONFIG_BLK_DEV_INITRD
76 if (INITRD_START && INITRD_SIZE) {
77 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
78 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
79 min_size = einitrd_pfn << 2;
80 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
81 sinitrd_pfn, einitrd_pfn);
82 }
83 #endif
84
85 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
86 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
87 kernel_nss_name, kernel_nss_name);
88
89 __cpcmd(defsys_cmd, NULL, 0, &response);
90
91 if (response != 0)
92 return;
93
94 __cpcmd(savesys_cmd, NULL, 0, &response);
95
96 if (response != strlen(savesys_cmd))
97 return;
98
99 ipl_flags = IPL_NSS_VALID;
100 }
101
102 #else /* CONFIG_SHARED_KERNEL */
103
104 static inline void create_kernel_nss(void) { }
105
106 #endif /* CONFIG_SHARED_KERNEL */
107
108 /*
109 * Clear bss memory
110 */
111 static noinline __init void clear_bss_section(void)
112 {
113 memset(__bss_start, 0, _end - __bss_start);
114 }
115
116 /*
117 * Initialize storage key for kernel pages
118 */
119 static noinline __init void init_kernel_storage_key(void)
120 {
121 unsigned long end_pfn, init_pfn;
122
123 end_pfn = PFN_UP(__pa(&_end));
124
125 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
126 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
127 }
128
129 static noinline __init void detect_machine_type(void)
130 {
131 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
132
133 get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
134
135 /* Running under z/VM ? */
136 if (cpuinfo->cpu_id.version == 0xff)
137 machine_flags |= 1;
138
139 /* Running on a P/390 ? */
140 if (cpuinfo->cpu_id.machine == 0x7490)
141 machine_flags |= 4;
142 }
143
144 static noinline __init int memory_fast_detect(void)
145 {
146
147 unsigned long val0 = 0;
148 unsigned long val1 = 0xc;
149 int ret = -ENOSYS;
150
151 if (ipl_flags & IPL_NSS_VALID)
152 return -ENOSYS;
153
154 asm volatile(
155 " diag %1,%2,0x260\n"
156 "0: lhi %0,0\n"
157 "1:\n"
158 EX_TABLE(0b,1b)
159 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
160
161 if (ret || val0 != val1)
162 return -ENOSYS;
163
164 memory_chunk[0].size = val0;
165 return 0;
166 }
167
168 #define ADDR2G (1UL << 31)
169
170 static noinline __init unsigned long sclp_memory_detect(void)
171 {
172 struct sclp_readinfo_sccb *sccb;
173 unsigned long long memsize;
174
175 sccb = &s390_readinfo_sccb;
176
177 if (sccb->header.response_code != 0x10)
178 return 0;
179
180 if (sccb->rnsize)
181 memsize = sccb->rnsize << 20;
182 else
183 memsize = sccb->rnsize2 << 20;
184 if (sccb->rnmax)
185 memsize *= sccb->rnmax;
186 else
187 memsize *= sccb->rnmax2;
188 #ifndef CONFIG_64BIT
189 /*
190 * Can't deal with more than 2G in 31 bit addressing mode, so
191 * limit the value in order to avoid strange side effects.
192 */
193 if (memsize > ADDR2G)
194 memsize = ADDR2G;
195 #endif
196 return (unsigned long) memsize;
197 }
198
199 static inline __init unsigned long __tprot(unsigned long addr)
200 {
201 int cc = -1;
202
203 asm volatile(
204 " tprot 0(%1),0\n"
205 "0: ipm %0\n"
206 " srl %0,28\n"
207 "1:\n"
208 EX_TABLE(0b,1b)
209 : "+d" (cc) : "a" (addr) : "cc");
210 return (unsigned long)cc;
211 }
212
213 /* Checking memory in 128KB increments. */
214 #define CHUNK_INCR (1UL << 17)
215
216 static noinline __init void find_memory_chunks(unsigned long memsize)
217 {
218 unsigned long addr = 0, old_addr = 0;
219 unsigned long old_cc = CHUNK_READ_WRITE;
220 unsigned long cc;
221 int chunk = 0;
222
223 while (chunk < MEMORY_CHUNKS) {
224 cc = __tprot(addr);
225 while (cc == old_cc) {
226 addr += CHUNK_INCR;
227 cc = __tprot(addr);
228 #ifndef CONFIG_64BIT
229 if (addr == ADDR2G)
230 break;
231 #endif
232 }
233
234 if (old_addr != addr &&
235 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
236 memory_chunk[chunk].addr = old_addr;
237 memory_chunk[chunk].size = addr - old_addr;
238 memory_chunk[chunk].type = old_cc;
239 chunk++;
240 }
241
242 old_addr = addr;
243 old_cc = cc;
244
245 #ifndef CONFIG_64BIT
246 if (addr == ADDR2G)
247 break;
248 #endif
249 /*
250 * Finish memory detection at the first hole, unless
251 * - we reached the hsa -> skip it.
252 * - we know there must be more.
253 */
254 if (cc == -1UL && !memsize && old_addr != ADDR2G)
255 break;
256 if (memsize && addr >= memsize)
257 break;
258 }
259 }
260
261 static __init void early_pgm_check_handler(void)
262 {
263 unsigned long addr;
264 const struct exception_table_entry *fixup;
265
266 addr = S390_lowcore.program_old_psw.addr;
267 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
268 if (!fixup)
269 disabled_wait(0);
270 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
271 }
272
273 static noinline __init void setup_lowcore_early(void)
274 {
275 psw_t psw;
276
277 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
278 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
279 S390_lowcore.external_new_psw = psw;
280 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
281 S390_lowcore.program_new_psw = psw;
282 s390_base_pgm_handler_fn = early_pgm_check_handler;
283 }
284
285 /*
286 * Save ipl parameters, clear bss memory, initialize storage keys
287 * and create a kernel NSS at startup if the SAVESYS= parm is defined
288 */
289 void __init startup_init(void)
290 {
291 unsigned long memsize;
292
293 ipl_save_parameters();
294 clear_bss_section();
295 init_kernel_storage_key();
296 lockdep_init();
297 lockdep_off();
298 detect_machine_type();
299 create_kernel_nss();
300 sort_main_extable();
301 setup_lowcore_early();
302 sclp_readinfo_early();
303 memsize = sclp_memory_detect();
304 if (memory_fast_detect() < 0)
305 find_memory_chunks(memsize);
306 lockdep_on();
307 }
This page took 0.081955 seconds and 5 git commands to generate.