[S390] __syscall_return error check.
[deliverable/linux.git] / arch / s390 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * arch/s390/kernel/setup.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "arch/i386/kernel/setup.c"
10 * Copyright (C) 1995, Linus Torvalds
11 */
12
13/*
14 * This file handles the architecture-dependent parts of initialization
15 */
16
17#include <linux/errno.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/tty.h>
29#include <linux/ioport.h>
30#include <linux/delay.h>
31#include <linux/config.h>
32#include <linux/init.h>
33#include <linux/initrd.h>
34#include <linux/bootmem.h>
35#include <linux/root_dev.h>
36#include <linux/console.h>
37#include <linux/seq_file.h>
38#include <linux/kernel_stat.h>
1e8e3383 39#include <linux/device.h>
1da177e4
LT
40
41#include <asm/uaccess.h>
42#include <asm/system.h>
43#include <asm/smp.h>
44#include <asm/mmu_context.h>
45#include <asm/cpcmd.h>
46#include <asm/lowcore.h>
47#include <asm/irq.h>
0b642ede
PO
48#include <asm/page.h>
49#include <asm/ptrace.h>
cc13ad62 50#include <asm/sections.h>
1da177e4
LT
51
52/*
53 * Machine setup..
54 */
55unsigned int console_mode = 0;
56unsigned int console_devno = -1;
57unsigned int console_irq = -1;
58unsigned long memory_size = 0;
59unsigned long machine_flags = 0;
1da177e4
LT
60struct {
61 unsigned long addr, size, type;
62} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
63#define CHUNK_READ_WRITE 0
64#define CHUNK_READ_ONLY 1
65volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
c9e37353
HC
66unsigned long __initdata zholes_size[MAX_NR_ZONES];
67static unsigned long __initdata memory_end;
1da177e4 68
1da177e4
LT
69/*
70 * This is set up by the setup-routine at boot-time
71 * for S390 need to find out, what we have to setup
72 * using address 0x10400 ...
73 */
74
75#include <asm/setup.h>
76
1da177e4
LT
77static struct resource code_resource = {
78 .name = "Kernel code",
79 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
80};
81
82static struct resource data_resource = {
83 .name = "Kernel data",
84 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
85};
86
87/*
88 * cpu_init() initializes state that is per-CPU.
89 */
90void __devinit cpu_init (void)
91{
92 int addr = hard_smp_processor_id();
93
94 /*
95 * Store processor id in lowcore (used e.g. in timer_interrupt)
96 */
97 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
98 S390_lowcore.cpu_data.cpu_addr = addr;
99
100 /*
101 * Force FPU initialization:
102 */
103 clear_thread_flag(TIF_USEDFPU);
104 clear_used_math();
105
106 atomic_inc(&init_mm.mm_count);
107 current->active_mm = &init_mm;
108 if (current->mm)
109 BUG();
110 enter_lazy_tlb(&init_mm, current);
111}
112
113/*
114 * VM halt and poweroff setup routines
115 */
116char vmhalt_cmd[128] = "";
117char vmpoff_cmd[128] = "";
118
119static inline void strncpy_skip_quote(char *dst, char *src, int n)
120{
121 int sx, dx;
122
123 dx = 0;
124 for (sx = 0; src[sx] != 0; sx++) {
125 if (src[sx] == '"') continue;
126 dst[dx++] = src[sx];
127 if (dx >= n) break;
128 }
129}
130
131static int __init vmhalt_setup(char *str)
132{
133 strncpy_skip_quote(vmhalt_cmd, str, 127);
134 vmhalt_cmd[127] = 0;
135 return 1;
136}
137
138__setup("vmhalt=", vmhalt_setup);
139
140static int __init vmpoff_setup(char *str)
141{
142 strncpy_skip_quote(vmpoff_cmd, str, 127);
143 vmpoff_cmd[127] = 0;
144 return 1;
145}
146
147__setup("vmpoff=", vmpoff_setup);
148
149/*
150 * condev= and conmode= setup parameter.
151 */
152
153static int __init condev_setup(char *str)
154{
155 int vdev;
156
157 vdev = simple_strtoul(str, &str, 0);
158 if (vdev >= 0 && vdev < 65536) {
159 console_devno = vdev;
160 console_irq = -1;
161 }
162 return 1;
163}
164
165__setup("condev=", condev_setup);
166
167static int __init conmode_setup(char *str)
168{
169#if defined(CONFIG_SCLP_CONSOLE)
170 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
171 SET_CONSOLE_SCLP;
172#endif
173#if defined(CONFIG_TN3215_CONSOLE)
174 if (strncmp(str, "3215", 5) == 0)
175 SET_CONSOLE_3215;
176#endif
177#if defined(CONFIG_TN3270_CONSOLE)
178 if (strncmp(str, "3270", 5) == 0)
179 SET_CONSOLE_3270;
180#endif
181 return 1;
182}
183
184__setup("conmode=", conmode_setup);
185
186static void __init conmode_default(void)
187{
188 char query_buffer[1024];
189 char *ptr;
190
191 if (MACHINE_IS_VM) {
6b979de3 192 __cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
1da177e4
LT
193 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
194 ptr = strstr(query_buffer, "SUBCHANNEL =");
195 console_irq = simple_strtoul(ptr + 13, NULL, 16);
6b979de3 196 __cpcmd("QUERY TERM", query_buffer, 1024, NULL);
1da177e4
LT
197 ptr = strstr(query_buffer, "CONMODE");
198 /*
199 * Set the conmode to 3215 so that the device recognition
200 * will set the cu_type of the console to 3215. If the
201 * conmode is 3270 and we don't set it back then both
202 * 3215 and the 3270 driver will try to access the console
203 * device (3215 as console and 3270 as normal tty).
204 */
6b979de3 205 __cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
1da177e4
LT
206 if (ptr == NULL) {
207#if defined(CONFIG_SCLP_CONSOLE)
208 SET_CONSOLE_SCLP;
209#endif
210 return;
211 }
212 if (strncmp(ptr + 8, "3270", 4) == 0) {
213#if defined(CONFIG_TN3270_CONSOLE)
214 SET_CONSOLE_3270;
215#elif defined(CONFIG_TN3215_CONSOLE)
216 SET_CONSOLE_3215;
217#elif defined(CONFIG_SCLP_CONSOLE)
218 SET_CONSOLE_SCLP;
219#endif
220 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
221#if defined(CONFIG_TN3215_CONSOLE)
222 SET_CONSOLE_3215;
223#elif defined(CONFIG_TN3270_CONSOLE)
224 SET_CONSOLE_3270;
225#elif defined(CONFIG_SCLP_CONSOLE)
226 SET_CONSOLE_SCLP;
227#endif
228 }
229 } else if (MACHINE_IS_P390) {
230#if defined(CONFIG_TN3215_CONSOLE)
231 SET_CONSOLE_3215;
232#elif defined(CONFIG_TN3270_CONSOLE)
233 SET_CONSOLE_3270;
234#endif
235 } else {
236#if defined(CONFIG_SCLP_CONSOLE)
237 SET_CONSOLE_SCLP;
238#endif
239 }
240}
241
242#ifdef CONFIG_SMP
243extern void machine_restart_smp(char *);
244extern void machine_halt_smp(void);
245extern void machine_power_off_smp(void);
246
247void (*_machine_restart)(char *command) = machine_restart_smp;
248void (*_machine_halt)(void) = machine_halt_smp;
249void (*_machine_power_off)(void) = machine_power_off_smp;
250#else
251/*
252 * Reboot, halt and power_off routines for non SMP.
253 */
254extern void reipl(unsigned long devno);
c782268b 255extern void reipl_diag(void);
1da177e4
LT
256static void do_machine_restart_nonsmp(char * __unused)
257{
c782268b
VS
258 reipl_diag();
259
1da177e4 260 if (MACHINE_IS_VM)
68c11917 261 cpcmd ("IPL", NULL, 0, NULL);
1da177e4
LT
262 else
263 reipl (0x10000 | S390_lowcore.ipl_device);
264}
265
266static void do_machine_halt_nonsmp(void)
267{
268 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
68c11917 269 cpcmd(vmhalt_cmd, NULL, 0, NULL);
1da177e4
LT
270 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
271}
272
273static void do_machine_power_off_nonsmp(void)
274{
275 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
68c11917 276 cpcmd(vmpoff_cmd, NULL, 0, NULL);
1da177e4
LT
277 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
278}
279
280void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
281void (*_machine_halt)(void) = do_machine_halt_nonsmp;
282void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
283#endif
284
285 /*
286 * Reboot, halt and power_off stubs. They just call _machine_restart,
287 * _machine_halt or _machine_power_off.
288 */
289
290void machine_restart(char *command)
291{
292 console_unblank();
293 _machine_restart(command);
294}
295
1da177e4
LT
296void machine_halt(void)
297{
298 console_unblank();
299 _machine_halt();
300}
301
1da177e4
LT
302void machine_power_off(void)
303{
304 console_unblank();
305 _machine_power_off();
306}
307
53df751c
MS
308/*
309 * Dummy power off function.
310 */
311void (*pm_power_off)(void) = machine_power_off;
312
c9e37353
HC
313static void __init
314add_memory_hole(unsigned long start, unsigned long end)
1da177e4 315{
c9e37353
HC
316 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
317
318 if (end <= dma_pfn)
319 zholes_size[ZONE_DMA] += end - start + 1;
320 else if (start > dma_pfn)
321 zholes_size[ZONE_NORMAL] += end - start + 1;
322 else {
323 zholes_size[ZONE_DMA] += dma_pfn - start + 1;
324 zholes_size[ZONE_NORMAL] += end - dma_pfn;
325 }
326}
1da177e4 327
59685296
HC
328static int __init early_parse_mem(char *p)
329{
330 memory_end = memparse(p, &p);
331 return 0;
332}
333early_param("mem", early_parse_mem);
334
335/*
336 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
337 */
338static int __init early_parse_ipldelay(char *p)
c9e37353 339{
c9e37353 340 unsigned long delay = 0;
1da177e4 341
59685296 342 delay = simple_strtoul(p, &p, 0);
1da177e4 343
59685296
HC
344 switch (*p) {
345 case 's':
346 case 'S':
347 delay *= 1000000;
348 break;
349 case 'm':
350 case 'M':
351 delay *= 60 * 1000000;
c9e37353 352 }
59685296
HC
353
354 /* now wait for the requested amount of time */
355 udelay(delay);
356
357 return 0;
c9e37353 358}
59685296 359early_param("ipldelay", early_parse_ipldelay);
c9e37353
HC
360
361static void __init
362setup_lowcore(void)
363{
364 struct _lowcore *lc;
365 int lc_pages;
366
367 /*
368 * Setup lowcore for boot cpu
369 */
370 lc_pages = sizeof(void *) == 8 ? 2 : 1;
371 lc = (struct _lowcore *)
372 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
373 memset(lc, 0, lc_pages * PAGE_SIZE);
0b642ede 374 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
c9e37353
HC
375 lc->restart_psw.addr =
376 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
377 lc->external_new_psw.mask = PSW_KERNEL_BITS;
378 lc->external_new_psw.addr =
379 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
380 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
381 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
382 lc->program_new_psw.mask = PSW_KERNEL_BITS;
383 lc->program_new_psw.addr =
384 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
77fa2245
HC
385 lc->mcck_new_psw.mask =
386 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
c9e37353
HC
387 lc->mcck_new_psw.addr =
388 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
389 lc->io_new_psw.mask = PSW_KERNEL_BITS;
390 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
391 lc->ipl_device = S390_lowcore.ipl_device;
392 lc->jiffy_timer = -1LL;
393 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
394 lc->async_stack = (unsigned long)
395 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
c9e37353
HC
396 lc->panic_stack = (unsigned long)
397 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
c9e37353
HC
398 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
399 lc->thread_info = (unsigned long) &init_thread_union;
347a8dc3 400#ifndef CONFIG_64BIT
77fa2245
HC
401 if (MACHINE_HAS_IEEE) {
402 lc->extended_save_area_addr = (__u32)
403 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
404 /* enable extended save area */
405 ctl_set_bit(14, 29);
406 }
407#endif
c9e37353
HC
408 set_prefix((u32)(unsigned long) lc);
409}
410
411static void __init
412setup_resources(void)
413{
414 struct resource *res;
415 int i;
416
cc13ad62
HC
417 code_resource.start = (unsigned long) &_text;
418 code_resource.end = (unsigned long) &_etext - 1;
419 data_resource.start = (unsigned long) &_etext;
420 data_resource.end = (unsigned long) &_edata - 1;
421
c9e37353
HC
422 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
423 res = alloc_bootmem_low(sizeof(struct resource));
424 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
425 switch (memory_chunk[i].type) {
426 case CHUNK_READ_WRITE:
427 res->name = "System RAM";
428 break;
429 case CHUNK_READ_ONLY:
430 res->name = "System ROM";
431 res->flags |= IORESOURCE_READONLY;
432 break;
433 default:
434 res->name = "reserved";
435 }
436 res->start = memory_chunk[i].addr;
437 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
438 request_resource(&iomem_resource, res);
439 request_resource(res, &code_resource);
440 request_resource(res, &data_resource);
441 }
442}
443
444static void __init
445setup_memory(void)
446{
447 unsigned long bootmap_size;
0b642ede 448 unsigned long start_pfn, end_pfn, init_pfn;
c9e37353
HC
449 unsigned long last_rw_end;
450 int i;
1da177e4
LT
451
452 /*
453 * partially used pages are not usable - thus
454 * we are rounding upwards:
455 */
456 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT;
457 end_pfn = max_pfn = memory_end >> PAGE_SHIFT;
458
0b642ede
PO
459 /* Initialize storage key for kernel pages */
460 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
461 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
462
1da177e4
LT
463 /*
464 * Initialize the boot-time allocator (with low memory only):
465 */
466 bootmap_size = init_bootmem(start_pfn, end_pfn);
467
468 /*
469 * Register RAM areas with the bootmem allocator.
470 */
c9e37353
HC
471 last_rw_end = start_pfn;
472
0b642ede 473 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
1da177e4
LT
474 unsigned long start_chunk, end_chunk;
475
476 if (memory_chunk[i].type != CHUNK_READ_WRITE)
477 continue;
478 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
479 start_chunk >>= PAGE_SHIFT;
480 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
481 end_chunk >>= PAGE_SHIFT;
482 if (start_chunk < start_pfn)
483 start_chunk = start_pfn;
484 if (end_chunk > end_pfn)
485 end_chunk = end_pfn;
c9e37353 486 if (start_chunk < end_chunk) {
0b642ede
PO
487 /* Initialize storage key for RAM pages */
488 for (init_pfn = start_chunk ; init_pfn < end_chunk;
489 init_pfn++)
490 page_set_storage_key(init_pfn << PAGE_SHIFT,
491 PAGE_DEFAULT_KEY);
1da177e4
LT
492 free_bootmem(start_chunk << PAGE_SHIFT,
493 (end_chunk - start_chunk) << PAGE_SHIFT);
c9e37353
HC
494 if (last_rw_end < start_chunk)
495 add_memory_hole(last_rw_end, start_chunk - 1);
496 last_rw_end = end_chunk;
497 }
1da177e4
LT
498 }
499
0b642ede
PO
500 psw_set_key(PAGE_DEFAULT_KEY);
501
c9e37353
HC
502 if (last_rw_end < end_pfn - 1)
503 add_memory_hole(last_rw_end, end_pfn - 1);
504
505 /*
506 * Reserve the bootmem bitmap itself as well. We do this in two
507 * steps (first step was init_bootmem()) because this catches
508 * the (very unlikely) case of us accidentally initializing the
509 * bootmem allocator with an invalid RAM area.
510 */
511 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
1da177e4
LT
512
513#ifdef CONFIG_BLK_DEV_INITRD
c9e37353 514 if (INITRD_START) {
1da177e4
LT
515 if (INITRD_START + INITRD_SIZE <= memory_end) {
516 reserve_bootmem(INITRD_START, INITRD_SIZE);
517 initrd_start = INITRD_START;
518 initrd_end = initrd_start + INITRD_SIZE;
519 } else {
c9e37353
HC
520 printk("initrd extends beyond end of memory "
521 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
522 initrd_start + INITRD_SIZE, memory_end);
523 initrd_start = initrd_end = 0;
1da177e4 524 }
c9e37353 525 }
1da177e4 526#endif
c9e37353 527}
1da177e4 528
c9e37353
HC
529/*
530 * Setup function called from init/main.c just after the banner
531 * was printed.
532 */
1da177e4 533
c9e37353
HC
534void __init
535setup_arch(char **cmdline_p)
536{
1da177e4 537 /*
c9e37353 538 * print what head.S has found out about the machine
1da177e4 539 */
347a8dc3 540#ifndef CONFIG_64BIT
c9e37353
HC
541 printk((MACHINE_IS_VM) ?
542 "We are running under VM (31 bit mode)\n" :
543 "We are running native (31 bit mode)\n");
544 printk((MACHINE_HAS_IEEE) ?
545 "This machine has an IEEE fpu\n" :
546 "This machine has no IEEE fpu\n");
347a8dc3 547#else /* CONFIG_64BIT */
c9e37353
HC
548 printk((MACHINE_IS_VM) ?
549 "We are running under VM (64 bit mode)\n" :
550 "We are running native (64 bit mode)\n");
347a8dc3 551#endif /* CONFIG_64BIT */
c9e37353 552
59685296
HC
553 /* Save unparsed command line copy for /proc/cmdline */
554 strlcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
555
556 *cmdline_p = COMMAND_LINE;
557 *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
558
c9e37353 559 ROOT_DEV = Root_RAM0;
59685296
HC
560
561 init_mm.start_code = PAGE_OFFSET;
562 init_mm.end_code = (unsigned long) &_etext;
563 init_mm.end_data = (unsigned long) &_edata;
564 init_mm.brk = (unsigned long) &_end;
565
566 memory_end = memory_size;
567
568 parse_early_param();
569
347a8dc3 570#ifndef CONFIG_64BIT
59685296
HC
571 memory_end &= ~0x400000UL;
572
c9e37353
HC
573 /*
574 * We need some free virtual space to be able to do vmalloc.
575 * On a machine with 2GB memory we make sure that we have at
576 * least 128 MB free space for vmalloc.
577 */
578 if (memory_end > 1920*1024*1024)
579 memory_end = 1920*1024*1024;
347a8dc3 580#else /* CONFIG_64BIT */
59685296 581 memory_end &= ~0x200000UL;
347a8dc3 582#endif /* CONFIG_64BIT */
c9e37353 583
c9e37353
HC
584 setup_memory();
585 setup_resources();
586 setup_lowcore();
587
1da177e4
LT
588 cpu_init();
589 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
255acee7 590 smp_setup_cpu_possible_map();
1da177e4
LT
591
592 /*
593 * Create kernel page tables and switch to virtual addressing.
594 */
595 paging_init();
596
597 /* Setup default console */
598 conmode_default();
599}
600
601void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
602{
603 printk("cpu %d "
604#ifdef CONFIG_SMP
605 "phys_idx=%d "
606#endif
607 "vers=%02X ident=%06X machine=%04X unused=%04X\n",
608 cpuinfo->cpu_nr,
609#ifdef CONFIG_SMP
610 cpuinfo->cpu_addr,
611#endif
612 cpuinfo->cpu_id.version,
613 cpuinfo->cpu_id.ident,
614 cpuinfo->cpu_id.machine,
615 cpuinfo->cpu_id.unused);
616}
617
618/*
619 * show_cpuinfo - Get information on one CPU for use by procfs.
620 */
621
622static int show_cpuinfo(struct seq_file *m, void *v)
623{
624 struct cpuinfo_S390 *cpuinfo;
625 unsigned long n = (unsigned long) v - 1;
626
b7ae9dd8 627 preempt_disable();
1da177e4
LT
628 if (!n) {
629 seq_printf(m, "vendor_id : IBM/S390\n"
630 "# processors : %i\n"
631 "bogomips per cpu: %lu.%02lu\n",
632 num_online_cpus(), loops_per_jiffy/(500000/HZ),
633 (loops_per_jiffy/(5000/HZ))%100);
634 }
635 if (cpu_online(n)) {
636#ifdef CONFIG_SMP
637 if (smp_processor_id() == n)
638 cpuinfo = &S390_lowcore.cpu_data;
639 else
640 cpuinfo = &lowcore_ptr[n]->cpu_data;
641#else
642 cpuinfo = &S390_lowcore.cpu_data;
643#endif
644 seq_printf(m, "processor %li: "
645 "version = %02X, "
646 "identification = %06X, "
647 "machine = %04X\n",
648 n, cpuinfo->cpu_id.version,
649 cpuinfo->cpu_id.ident,
650 cpuinfo->cpu_id.machine);
651 }
b7ae9dd8 652 preempt_enable();
1da177e4
LT
653 return 0;
654}
655
656static void *c_start(struct seq_file *m, loff_t *pos)
657{
658 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
659}
660static void *c_next(struct seq_file *m, void *v, loff_t *pos)
661{
662 ++*pos;
663 return c_start(m, pos);
664}
665static void c_stop(struct seq_file *m, void *v)
666{
667}
668struct seq_operations cpuinfo_op = {
669 .start = c_start,
670 .next = c_next,
671 .stop = c_stop,
672 .show = show_cpuinfo,
673};
674
1e8e3383
HC
675#define DEFINE_IPL_ATTR(_name, _format, _value) \
676static ssize_t ipl_##_name##_show(struct subsystem *subsys, \
677 char *page) \
678{ \
679 return sprintf(page, _format, _value); \
680} \
681static struct subsys_attribute ipl_##_name##_attr = \
682 __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL);
683
684DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long)
685 IPL_PARMBLOCK_START->fcp.wwpn);
686DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long)
687 IPL_PARMBLOCK_START->fcp.lun);
688DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long)
689 IPL_PARMBLOCK_START->fcp.bootprog);
690DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long)
691 IPL_PARMBLOCK_START->fcp.br_lba);
692
693enum ipl_type_type {
694 ipl_type_unknown,
695 ipl_type_ccw,
696 ipl_type_fcp,
697};
698
699static enum ipl_type_type
700get_ipl_type(void)
701{
702 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
703
704 if (!IPL_DEVNO_VALID)
705 return ipl_type_unknown;
706 if (!IPL_PARMBLOCK_VALID)
707 return ipl_type_ccw;
708 if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION)
709 return ipl_type_unknown;
710 if (ipl->fcp.pbt != IPL_TYPE_FCP)
711 return ipl_type_unknown;
712 return ipl_type_fcp;
713}
714
715static ssize_t
716ipl_type_show(struct subsystem *subsys, char *page)
717{
718 switch (get_ipl_type()) {
719 case ipl_type_ccw:
720 return sprintf(page, "ccw\n");
721 case ipl_type_fcp:
722 return sprintf(page, "fcp\n");
723 default:
724 return sprintf(page, "unknown\n");
725 }
726}
727
728static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type);
729
730static ssize_t
731ipl_device_show(struct subsystem *subsys, char *page)
732{
733 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
734
735 switch (get_ipl_type()) {
736 case ipl_type_ccw:
737 return sprintf(page, "0.0.%04x\n", ipl_devno);
738 case ipl_type_fcp:
739 return sprintf(page, "0.0.%04x\n", ipl->fcp.devno);
740 default:
741 return 0;
742 }
743}
744
745static struct subsys_attribute ipl_device_attr =
746 __ATTR(device, S_IRUGO, ipl_device_show, NULL);
747
748static struct attribute *ipl_fcp_attrs[] = {
749 &ipl_type_attr.attr,
750 &ipl_device_attr.attr,
751 &ipl_wwpn_attr.attr,
752 &ipl_lun_attr.attr,
753 &ipl_bootprog_attr.attr,
754 &ipl_br_lba_attr.attr,
755 NULL,
756};
757
758static struct attribute_group ipl_fcp_attr_group = {
759 .attrs = ipl_fcp_attrs,
760};
761
762static struct attribute *ipl_ccw_attrs[] = {
763 &ipl_type_attr.attr,
764 &ipl_device_attr.attr,
765 NULL,
766};
767
768static struct attribute_group ipl_ccw_attr_group = {
769 .attrs = ipl_ccw_attrs,
770};
771
772static struct attribute *ipl_unknown_attrs[] = {
773 &ipl_type_attr.attr,
774 NULL,
775};
776
777static struct attribute_group ipl_unknown_attr_group = {
778 .attrs = ipl_unknown_attrs,
779};
780
781static ssize_t
782ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
783{
784 unsigned int size = IPL_PARMBLOCK_SIZE;
785
786 if (off > size)
787 return 0;
788 if (off + count > size)
789 count = size - off;
790
791 memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count);
792 return count;
793}
794
795static struct bin_attribute ipl_parameter_attr = {
796 .attr = {
797 .name = "binary_parameter",
798 .mode = S_IRUGO,
799 .owner = THIS_MODULE,
800 },
801 .size = PAGE_SIZE,
802 .read = &ipl_parameter_read,
803};
804
805static ssize_t
806ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
807{
808 unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len;
809 void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data;
810
811 if (off > size)
812 return 0;
813 if (off + count > size)
814 count = size - off;
815
816 memcpy(buf, scp_data + off, count);
817 return count;
818}
819
820static struct bin_attribute ipl_scp_data_attr = {
821 .attr = {
822 .name = "scp_data",
823 .mode = S_IRUGO,
824 .owner = THIS_MODULE,
825 },
826 .size = PAGE_SIZE,
827 .read = &ipl_scp_data_read,
828};
829
830static decl_subsys(ipl, NULL, NULL);
831
832static int __init
833ipl_device_sysfs_register(void) {
834 int rc;
835
836 rc = firmware_register(&ipl_subsys);
837 if (rc)
838 return rc;
839
840 switch (get_ipl_type()) {
841 case ipl_type_ccw:
842 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group);
843 break;
844 case ipl_type_fcp:
845 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
846 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
847 &ipl_parameter_attr);
848 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
849 &ipl_scp_data_attr);
850 break;
851 default:
852 sysfs_create_group(&ipl_subsys.kset.kobj,
853 &ipl_unknown_attr_group);
854 break;
855 }
856 return 0;
857}
858
859__initcall(ipl_device_sysfs_register);
This page took 0.176351 seconds and 5 git commands to generate.