Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/kernel/entry.S | |
3 | * S390 low-level entry points. | |
4 | * | |
54dfe5dd | 5 | * Copyright (C) IBM Corp. 1999,2006 |
1da177e4 | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
25d83cbf HC |
7 | * Hartmut Penner (hp@de.ibm.com), |
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | |
77fa2245 | 9 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/sys.h> | |
13 | #include <linux/linkage.h> | |
2bc89b5e | 14 | #include <linux/init.h> |
1da177e4 LT |
15 | #include <asm/cache.h> |
16 | #include <asm/lowcore.h> | |
17 | #include <asm/errno.h> | |
18 | #include <asm/ptrace.h> | |
19 | #include <asm/thread_info.h> | |
0013a854 | 20 | #include <asm/asm-offsets.h> |
1da177e4 LT |
21 | #include <asm/unistd.h> |
22 | #include <asm/page.h> | |
23 | ||
24 | /* | |
25 | * Stack layout for the system_call stack entry. | |
26 | * The first few entries are identical to the user_regs_struct. | |
27 | */ | |
25d83cbf HC |
28 | SP_PTREGS = STACK_FRAME_OVERHEAD |
29 | SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS | |
30 | SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW | |
31 | SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS | |
32 | SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 | |
33 | SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 | |
34 | SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 | |
35 | SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 | |
36 | SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 | |
37 | SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 | |
38 | SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 | |
39 | SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 | |
40 | SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 | |
41 | SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 | |
42 | SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 | |
43 | SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 | |
44 | SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | |
45 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | |
46 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | |
47 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | |
48 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | |
49 | SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP | |
50 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | |
1da177e4 | 51 | |
753c4dd6 | 52 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54dfe5dd | 53 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) |
753c4dd6 | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54dfe5dd | 55 | _TIF_MCCK_PENDING) |
1da177e4 LT |
56 | |
57 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | |
58 | STACK_SIZE = 1 << STACK_SHIFT | |
59 | ||
60 | #define BASED(name) name-system_call(%r13) | |
61 | ||
1f194a4c HC |
62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | .macro TRACE_IRQS_ON | |
50bec4ce HC |
64 | basr %r2,%r0 |
65 | l %r1,BASED(.Ltrace_irq_on_caller) | |
1f194a4c HC |
66 | basr %r14,%r1 |
67 | .endm | |
68 | ||
69 | .macro TRACE_IRQS_OFF | |
50bec4ce HC |
70 | basr %r2,%r0 |
71 | l %r1,BASED(.Ltrace_irq_off_caller) | |
1f194a4c HC |
72 | basr %r14,%r1 |
73 | .endm | |
523b44cf | 74 | |
411788ea | 75 | .macro TRACE_IRQS_CHECK |
50bec4ce | 76 | basr %r2,%r0 |
411788ea HC |
77 | tm SP_PSW(%r15),0x03 # irqs enabled? |
78 | jz 0f | |
50bec4ce | 79 | l %r1,BASED(.Ltrace_irq_on_caller) |
523b44cf | 80 | basr %r14,%r1 |
411788ea | 81 | j 1f |
50bec4ce | 82 | 0: l %r1,BASED(.Ltrace_irq_off_caller) |
411788ea HC |
83 | basr %r14,%r1 |
84 | 1: | |
523b44cf | 85 | .endm |
1f194a4c HC |
86 | #else |
87 | #define TRACE_IRQS_ON | |
88 | #define TRACE_IRQS_OFF | |
411788ea HC |
89 | #define TRACE_IRQS_CHECK |
90 | #endif | |
91 | ||
92 | #ifdef CONFIG_LOCKDEP | |
93 | .macro LOCKDEP_SYS_EXIT | |
94 | tm SP_PSW+1(%r15),0x01 # returning to user ? | |
95 | jz 0f | |
96 | l %r1,BASED(.Llockdep_sys_exit) | |
97 | basr %r14,%r1 | |
98 | 0: | |
99 | .endm | |
100 | #else | |
523b44cf | 101 | #define LOCKDEP_SYS_EXIT |
1f194a4c HC |
102 | #endif |
103 | ||
1da177e4 LT |
104 | /* |
105 | * Register usage in interrupt handlers: | |
106 | * R9 - pointer to current task structure | |
107 | * R13 - pointer to literal pool | |
108 | * R14 - return register for function calls | |
109 | * R15 - kernel stack pointer | |
110 | */ | |
111 | ||
25d83cbf | 112 | .macro STORE_TIMER lc_offset |
1da177e4 LT |
113 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
114 | stpt \lc_offset | |
115 | #endif | |
116 | .endm | |
117 | ||
118 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
25d83cbf | 119 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
1da177e4 LT |
120 | lm %r10,%r11,\lc_from |
121 | sl %r10,\lc_to | |
122 | sl %r11,\lc_to+4 | |
123 | bc 3,BASED(0f) | |
124 | sl %r10,BASED(.Lc_1) | |
125 | 0: al %r10,\lc_sum | |
126 | al %r11,\lc_sum+4 | |
127 | bc 12,BASED(1f) | |
128 | al %r10,BASED(.Lc_1) | |
129 | 1: stm %r10,%r11,\lc_sum | |
130 | .endm | |
131 | #endif | |
132 | ||
133 | .macro SAVE_ALL_BASE savearea | |
134 | stm %r12,%r15,\savearea | |
135 | l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 | |
136 | .endm | |
137 | ||
987ad70a MS |
138 | .macro SAVE_ALL_SVC psworg,savearea |
139 | la %r12,\psworg | |
140 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | |
141 | .endm | |
142 | ||
63b12246 | 143 | .macro SAVE_ALL_SYNC psworg,savearea |
1da177e4 | 144 | la %r12,\psworg |
1da177e4 LT |
145 | tm \psworg+1,0x01 # test problem state bit |
146 | bz BASED(2f) # skip stack setup save | |
147 | l %r15,__LC_KERNEL_STACK # problem state -> load ksp | |
63b12246 MS |
148 | #ifdef CONFIG_CHECK_STACK |
149 | b BASED(3f) | |
150 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | |
151 | bz BASED(stack_overflow) | |
152 | 3: | |
153 | #endif | |
154 | 2: | |
155 | .endm | |
156 | ||
157 | .macro SAVE_ALL_ASYNC psworg,savearea | |
158 | la %r12,\psworg | |
1da177e4 LT |
159 | tm \psworg+1,0x01 # test problem state bit |
160 | bnz BASED(1f) # from user -> load async stack | |
161 | clc \psworg+4(4),BASED(.Lcritical_end) | |
162 | bhe BASED(0f) | |
163 | clc \psworg+4(4),BASED(.Lcritical_start) | |
164 | bl BASED(0f) | |
165 | l %r14,BASED(.Lcleanup_critical) | |
166 | basr %r14,%r14 | |
6add9f7f | 167 | tm 1(%r12),0x01 # retest problem state after cleanup |
1da177e4 LT |
168 | bnz BASED(1f) |
169 | 0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? | |
170 | slr %r14,%r15 | |
171 | sra %r14,STACK_SHIFT | |
172 | be BASED(2f) | |
173 | 1: l %r15,__LC_ASYNC_STACK | |
1da177e4 LT |
174 | #ifdef CONFIG_CHECK_STACK |
175 | b BASED(3f) | |
176 | 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD | |
177 | bz BASED(stack_overflow) | |
178 | 3: | |
179 | #endif | |
77fa2245 HC |
180 | 2: |
181 | .endm | |
182 | ||
25d83cbf | 183 | .macro CREATE_STACK_FRAME psworg,savearea |
77fa2245 | 184 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
1da177e4 LT |
185 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack |
186 | la %r12,\psworg | |
187 | st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 | |
188 | icm %r12,12,__LC_SVC_ILC | |
189 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | |
190 | st %r12,SP_ILC(%r15) | |
191 | mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack | |
192 | la %r12,0 | |
193 | st %r12,__SF_BACKCHAIN(%r15) # clear back chain | |
194 | .endm | |
195 | ||
25d83cbf | 196 | .macro RESTORE_ALL psworg,sync |
ae6aa2ea | 197 | mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore |
1da177e4 | 198 | .if !\sync |
ae6aa2ea | 199 | ni \psworg+1,0xfd # clear wait state bit |
1da177e4 LT |
200 | .endif |
201 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | |
202 | STORE_TIMER __LC_EXIT_TIMER | |
ae6aa2ea | 203 | lpsw \psworg # back to caller |
1da177e4 LT |
204 | .endm |
205 | ||
206 | /* | |
207 | * Scheduler resume function, called by switch_to | |
208 | * gpr2 = (task_struct *) prev | |
209 | * gpr3 = (task_struct *) next | |
210 | * Returns: | |
211 | * gpr2 = prev | |
212 | */ | |
25d83cbf | 213 | .globl __switch_to |
1da177e4 | 214 | __switch_to: |
25d83cbf | 215 | basr %r1,0 |
1da177e4 LT |
216 | __switch_to_base: |
217 | tm __THREAD_per(%r3),0xe8 # new process is using per ? | |
218 | bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine | |
25d83cbf HC |
219 | stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff |
220 | clc __THREAD_per(12,%r3),__SF_EMPTY(%r15) | |
221 | be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's | |
222 | lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't | |
1da177e4 | 223 | __switch_to_noper: |
77fa2245 HC |
224 | l %r4,__THREAD_info(%r2) # get thread_info of prev |
225 | tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? | |
226 | bz __switch_to_no_mcck-__switch_to_base(%r1) | |
227 | ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev | |
228 | l %r4,__THREAD_info(%r3) # get thread_info of next | |
229 | oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next | |
230 | __switch_to_no_mcck: | |
25d83cbf | 231 | stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task |
1da177e4 LT |
232 | st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp |
233 | l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp | |
234 | lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task | |
235 | st %r3,__LC_CURRENT # __LC_CURRENT = current task struct | |
236 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | |
25d83cbf | 237 | l %r3,__THREAD_info(%r3) # load thread_info from task struct |
1da177e4 LT |
238 | st %r3,__LC_THREAD_INFO |
239 | ahi %r3,STACK_SIZE | |
240 | st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack | |
241 | br %r14 | |
242 | ||
243 | __critical_start: | |
244 | /* | |
245 | * SVC interrupt handler routine. System calls are synchronous events and | |
246 | * are executed with interrupts enabled. | |
247 | */ | |
248 | ||
25d83cbf | 249 | .globl system_call |
1da177e4 LT |
250 | system_call: |
251 | STORE_TIMER __LC_SYNC_ENTER_TIMER | |
252 | sysc_saveall: | |
253 | SAVE_ALL_BASE __LC_SAVE_AREA | |
987ad70a | 254 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 255 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
1da177e4 LT |
256 | lh %r7,0x8a # get svc number from lowcore |
257 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
258 | sysc_vtime: | |
1da177e4 LT |
259 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
260 | sysc_stime: | |
261 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
262 | sysc_update: | |
263 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
264 | #endif | |
265 | sysc_do_svc: | |
266 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
25d83cbf HC |
267 | sla %r7,2 # *4 and test for svc 0 |
268 | bnz BASED(sysc_nr_ok) # svc number > 0 | |
1da177e4 LT |
269 | # svc 0: system call number in %r1 |
270 | cl %r1,BASED(.Lnr_syscalls) | |
271 | bnl BASED(sysc_nr_ok) | |
25d83cbf HC |
272 | lr %r7,%r1 # copy svc number to %r7 |
273 | sla %r7,2 # *4 | |
1da177e4 LT |
274 | sysc_nr_ok: |
275 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | |
276 | sysc_do_restart: | |
d882b172 | 277 | l %r8,BASED(.Lsysc_table) |
1da177e4 | 278 | tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) |
d882b172 | 279 | l %r8,0(%r7,%r8) # get system call addr. |
25d83cbf HC |
280 | bnz BASED(sysc_tracesys) |
281 | basr %r14,%r8 # call sys_xxxx | |
282 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) | |
1da177e4 LT |
283 | |
284 | sysc_return: | |
1da177e4 LT |
285 | tm __TI_flags+3(%r9),_TIF_WORK_SVC |
286 | bnz BASED(sysc_work) # there is work to do (signals etc.) | |
411788ea HC |
287 | sysc_restore: |
288 | #ifdef CONFIG_TRACE_IRQFLAGS | |
289 | la %r1,BASED(sysc_restore_trace_psw) | |
290 | lpsw 0(%r1) | |
291 | sysc_restore_trace: | |
292 | TRACE_IRQS_CHECK | |
523b44cf | 293 | LOCKDEP_SYS_EXIT |
411788ea | 294 | #endif |
1da177e4 | 295 | sysc_leave: |
25d83cbf | 296 | RESTORE_ALL __LC_RETURN_PSW,1 |
411788ea HC |
297 | sysc_done: |
298 | ||
299 | #ifdef CONFIG_TRACE_IRQFLAGS | |
300 | .align 8 | |
301 | .globl sysc_restore_trace_psw | |
302 | sysc_restore_trace_psw: | |
303 | .long 0, sysc_restore_trace + 0x80000000 | |
304 | #endif | |
1da177e4 LT |
305 | |
306 | # | |
307 | # recheck if there is more work to do | |
308 | # | |
309 | sysc_work_loop: | |
310 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | |
411788ea | 311 | bz BASED(sysc_restore) # there is no work to do |
1da177e4 LT |
312 | # |
313 | # One of the work bits is on. Find out which one. | |
314 | # | |
315 | sysc_work: | |
2688905e MS |
316 | tm SP_PSW+1(%r15),0x01 # returning to user ? |
317 | bno BASED(sysc_restore) | |
77fa2245 HC |
318 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING |
319 | bo BASED(sysc_mcck_pending) | |
1da177e4 LT |
320 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED |
321 | bo BASED(sysc_reschedule) | |
02a029b3 | 322 | tm __TI_flags+3(%r9),_TIF_SIGPENDING |
54dfe5dd | 323 | bnz BASED(sysc_sigpending) |
753c4dd6 MS |
324 | tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME |
325 | bnz BASED(sysc_notify_resume) | |
1da177e4 LT |
326 | tm __TI_flags+3(%r9),_TIF_RESTART_SVC |
327 | bo BASED(sysc_restart) | |
328 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | |
329 | bo BASED(sysc_singlestep) | |
411788ea HC |
330 | b BASED(sysc_restore) |
331 | sysc_work_done: | |
1da177e4 LT |
332 | |
333 | # | |
334 | # _TIF_NEED_RESCHED is set, call schedule | |
25d83cbf HC |
335 | # |
336 | sysc_reschedule: | |
337 | l %r1,BASED(.Lschedule) | |
338 | la %r14,BASED(sysc_work_loop) | |
339 | br %r1 # call scheduler | |
1da177e4 | 340 | |
77fa2245 HC |
341 | # |
342 | # _TIF_MCCK_PENDING is set, call handler | |
343 | # | |
344 | sysc_mcck_pending: | |
345 | l %r1,BASED(.Ls390_handle_mcck) | |
346 | la %r14,BASED(sysc_work_loop) | |
347 | br %r1 # TIF bit will be cleared by handler | |
348 | ||
1da177e4 | 349 | # |
02a029b3 | 350 | # _TIF_SIGPENDING is set, call do_signal |
1da177e4 | 351 | # |
25d83cbf | 352 | sysc_sigpending: |
1da177e4 | 353 | ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP |
25d83cbf HC |
354 | la %r2,SP_PTREGS(%r15) # load pt_regs |
355 | l %r1,BASED(.Ldo_signal) | |
356 | basr %r14,%r1 # call do_signal | |
1da177e4 LT |
357 | tm __TI_flags+3(%r9),_TIF_RESTART_SVC |
358 | bo BASED(sysc_restart) | |
359 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | |
360 | bo BASED(sysc_singlestep) | |
e1c3ad96 | 361 | b BASED(sysc_work_loop) |
1da177e4 | 362 | |
753c4dd6 MS |
363 | # |
364 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | |
365 | # | |
366 | sysc_notify_resume: | |
367 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
368 | l %r1,BASED(.Ldo_notify_resume) | |
369 | la %r14,BASED(sysc_work_loop) | |
370 | br %r1 # call do_notify_resume | |
371 | ||
372 | ||
1da177e4 LT |
373 | # |
374 | # _TIF_RESTART_SVC is set, set up registers and restart svc | |
375 | # | |
376 | sysc_restart: | |
377 | ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | |
25d83cbf | 378 | l %r7,SP_R2(%r15) # load new svc number |
1da177e4 LT |
379 | sla %r7,2 |
380 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument | |
25d83cbf HC |
381 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
382 | b BASED(sysc_do_restart) # restart svc | |
1da177e4 LT |
383 | |
384 | # | |
385 | # _TIF_SINGLE_STEP is set, call do_single_step | |
386 | # | |
387 | sysc_singlestep: | |
388 | ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP | |
389 | mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check | |
390 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
391 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | |
392 | la %r14,BASED(sysc_return) # load adr. of system return | |
393 | br %r1 # branch to do_single_step | |
394 | ||
1da177e4 | 395 | # |
753c4dd6 MS |
396 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
397 | # and after the system call | |
1da177e4 LT |
398 | # |
399 | sysc_tracesys: | |
753c4dd6 | 400 | l %r1,BASED(.Ltrace_entry) |
25d83cbf | 401 | la %r2,SP_PTREGS(%r15) # load pt_regs |
1da177e4 LT |
402 | la %r3,0 |
403 | srl %r7,2 | |
404 | st %r7,SP_R2(%r15) | |
405 | basr %r14,%r1 | |
753c4dd6 | 406 | cl %r2,BASED(.Lnr_syscalls) |
1da177e4 | 407 | bnl BASED(sysc_tracenogo) |
d882b172 | 408 | l %r8,BASED(.Lsysc_table) |
753c4dd6 MS |
409 | lr %r7,%r2 |
410 | sll %r7,2 # *4 | |
d882b172 | 411 | l %r8,0(%r7,%r8) |
1da177e4 LT |
412 | sysc_tracego: |
413 | lm %r3,%r6,SP_R3(%r15) | |
414 | l %r2,SP_ORIG_R2(%r15) | |
25d83cbf HC |
415 | basr %r14,%r8 # call sys_xxx |
416 | st %r2,SP_R2(%r15) # store return value | |
1da177e4 LT |
417 | sysc_tracenogo: |
418 | tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | |
25d83cbf | 419 | bz BASED(sysc_return) |
753c4dd6 | 420 | l %r1,BASED(.Ltrace_exit) |
25d83cbf | 421 | la %r2,SP_PTREGS(%r15) # load pt_regs |
1da177e4 LT |
422 | la %r14,BASED(sysc_return) |
423 | br %r1 | |
424 | ||
425 | # | |
426 | # a new process exits the kernel with ret_from_fork | |
427 | # | |
25d83cbf | 428 | .globl ret_from_fork |
1da177e4 LT |
429 | ret_from_fork: |
430 | l %r13,__LC_SVC_NEW_PSW+4 | |
431 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
432 | tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? | |
433 | bo BASED(0f) | |
434 | st %r15,SP_R15(%r15) # store stack pointer for new kthread | |
25d83cbf HC |
435 | 0: l %r1,BASED(.Lschedtail) |
436 | basr %r14,%r1 | |
1f194a4c | 437 | TRACE_IRQS_ON |
25d83cbf | 438 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
1da177e4 LT |
439 | b BASED(sysc_return) |
440 | ||
441 | # | |
03ff9a23 MS |
442 | # kernel_execve function needs to deal with pt_regs that is not |
443 | # at the usual place | |
1da177e4 | 444 | # |
03ff9a23 MS |
445 | .globl kernel_execve |
446 | kernel_execve: | |
447 | stm %r12,%r15,48(%r15) | |
448 | lr %r14,%r15 | |
449 | l %r13,__LC_SVC_NEW_PSW+4 | |
450 | s %r15,BASED(.Lc_spsize) | |
451 | st %r14,__SF_BACKCHAIN(%r15) | |
452 | la %r12,SP_PTREGS(%r15) | |
453 | xc 0(__PT_SIZE,%r12),0(%r12) | |
454 | l %r1,BASED(.Ldo_execve) | |
455 | lr %r5,%r12 | |
456 | basr %r14,%r1 | |
457 | ltr %r2,%r2 | |
458 | be BASED(0f) | |
459 | a %r15,BASED(.Lc_spsize) | |
460 | lm %r12,%r15,48(%r15) | |
461 | br %r14 | |
462 | # execve succeeded. | |
463 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | |
464 | l %r15,__LC_KERNEL_STACK # load ksp | |
465 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | |
466 | l %r9,__LC_THREAD_INFO | |
467 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | |
468 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | |
469 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | |
470 | l %r1,BASED(.Lexecve_tail) | |
471 | basr %r14,%r1 | |
472 | b BASED(sysc_return) | |
1da177e4 LT |
473 | |
474 | /* | |
475 | * Program check handler routine | |
476 | */ | |
477 | ||
25d83cbf | 478 | .globl pgm_check_handler |
1da177e4 LT |
479 | pgm_check_handler: |
480 | /* | |
481 | * First we need to check for a special case: | |
482 | * Single stepping an instruction that disables the PER event mask will | |
483 | * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. | |
484 | * For a single stepped SVC the program check handler gets control after | |
485 | * the SVC new PSW has been loaded. But we want to execute the SVC first and | |
486 | * then handle the PER event. Therefore we update the SVC old PSW to point | |
487 | * to the pgm_check_handler and branch to the SVC handler after we checked | |
488 | * if we have to load the kernel stack register. | |
489 | * For every other possible cause for PER event without the PER mask set | |
490 | * we just ignore the PER event (FIXME: is there anything we have to do | |
491 | * for LPSW?). | |
492 | */ | |
493 | STORE_TIMER __LC_SYNC_ENTER_TIMER | |
494 | SAVE_ALL_BASE __LC_SAVE_AREA | |
25d83cbf HC |
495 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
496 | bnz BASED(pgm_per) # got per exception -> special case | |
63b12246 | 497 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 498 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
1da177e4 LT |
499 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
500 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
501 | bz BASED(pgm_no_vtime) | |
502 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | |
503 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
504 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
505 | pgm_no_vtime: | |
506 | #endif | |
507 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
411788ea | 508 | TRACE_IRQS_OFF |
25d83cbf | 509 | l %r3,__LC_PGM_ILC # load program interruption code |
1da177e4 LT |
510 | la %r8,0x7f |
511 | nr %r8,%r3 | |
512 | pgm_do_call: | |
25d83cbf HC |
513 | l %r7,BASED(.Ljump_table) |
514 | sll %r8,2 | |
515 | l %r7,0(%r8,%r7) # load address of handler routine | |
516 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
517 | la %r14,BASED(sysc_return) | |
518 | br %r7 # branch to interrupt-handler | |
1da177e4 LT |
519 | |
520 | # | |
521 | # handle per exception | |
522 | # | |
523 | pgm_per: | |
25d83cbf HC |
524 | tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on |
525 | bnz BASED(pgm_per_std) # ok, normal per event from user space | |
1da177e4 | 526 | # ok its one of the special cases, now we need to find out which one |
25d83cbf HC |
527 | clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW |
528 | be BASED(pgm_svcper) | |
1da177e4 | 529 | # no interesting special case, ignore PER event |
25d83cbf HC |
530 | lm %r12,%r15,__LC_SAVE_AREA |
531 | lpsw 0x28 | |
1da177e4 LT |
532 | |
533 | # | |
534 | # Normal per exception | |
535 | # | |
536 | pgm_per_std: | |
63b12246 | 537 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 538 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
1da177e4 LT |
539 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
540 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
541 | bz BASED(pgm_no_vtime2) | |
542 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | |
543 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
544 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
545 | pgm_no_vtime2: | |
546 | #endif | |
547 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
411788ea | 548 | TRACE_IRQS_OFF |
1da177e4 LT |
549 | l %r1,__TI_task(%r9) |
550 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | |
551 | mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS | |
552 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | |
553 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | |
4ba069b8 MG |
554 | tm SP_PSW+1(%r15),0x01 # kernel per event ? |
555 | bz BASED(kernel_per) | |
25d83cbf | 556 | l %r3,__LC_PGM_ILC # load program interruption code |
1da177e4 | 557 | la %r8,0x7f |
25d83cbf HC |
558 | nr %r8,%r3 # clear per-event-bit and ilc |
559 | be BASED(sysc_return) # only per or per+check ? | |
1da177e4 LT |
560 | b BASED(pgm_do_call) |
561 | ||
562 | # | |
563 | # it was a single stepped SVC that is causing all the trouble | |
564 | # | |
565 | pgm_svcper: | |
63b12246 | 566 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 567 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
1da177e4 | 568 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1da177e4 LT |
569 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
570 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
571 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
1da177e4 LT |
572 | #endif |
573 | lh %r7,0x8a # get svc number from lowcore | |
574 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
411788ea | 575 | TRACE_IRQS_OFF |
1da177e4 LT |
576 | l %r1,__TI_task(%r9) |
577 | mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID | |
578 | mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS | |
579 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | |
580 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | |
1f194a4c | 581 | TRACE_IRQS_ON |
1da177e4 LT |
582 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
583 | b BASED(sysc_do_svc) | |
584 | ||
4ba069b8 MG |
585 | # |
586 | # per was called from kernel, must be kprobes | |
587 | # | |
588 | kernel_per: | |
589 | mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check | |
590 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
591 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | |
411788ea | 592 | la %r14,BASED(sysc_restore)# load adr. of system return |
4ba069b8 MG |
593 | br %r1 # branch to do_single_step |
594 | ||
1da177e4 LT |
595 | /* |
596 | * IO interrupt handler routine | |
597 | */ | |
598 | ||
25d83cbf | 599 | .globl io_int_handler |
1da177e4 LT |
600 | io_int_handler: |
601 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | |
602 | stck __LC_INT_CLOCK | |
603 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | |
63b12246 | 604 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
77fa2245 | 605 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
1da177e4 LT |
606 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
607 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
608 | bz BASED(io_no_vtime) | |
609 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | |
610 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
611 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
612 | io_no_vtime: | |
613 | #endif | |
614 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
1f194a4c | 615 | TRACE_IRQS_OFF |
25d83cbf HC |
616 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ |
617 | la %r2,SP_PTREGS(%r15) # address of register-save area | |
618 | basr %r14,%r1 # branch to standard irq handler | |
1da177e4 | 619 | io_return: |
1da177e4 | 620 | tm __TI_flags+3(%r9),_TIF_WORK_INT |
25d83cbf | 621 | bnz BASED(io_work) # there is work to do (signals etc.) |
411788ea HC |
622 | io_restore: |
623 | #ifdef CONFIG_TRACE_IRQFLAGS | |
624 | la %r1,BASED(io_restore_trace_psw) | |
625 | lpsw 0(%r1) | |
626 | io_restore_trace: | |
627 | TRACE_IRQS_CHECK | |
523b44cf | 628 | LOCKDEP_SYS_EXIT |
411788ea | 629 | #endif |
1da177e4 | 630 | io_leave: |
25d83cbf | 631 | RESTORE_ALL __LC_RETURN_PSW,0 |
ae6aa2ea | 632 | io_done: |
1da177e4 | 633 | |
411788ea HC |
634 | #ifdef CONFIG_TRACE_IRQFLAGS |
635 | .align 8 | |
636 | .globl io_restore_trace_psw | |
637 | io_restore_trace_psw: | |
638 | .long 0, io_restore_trace + 0x80000000 | |
639 | #endif | |
640 | ||
2688905e MS |
641 | # |
642 | # switch to kernel stack, then check the TIF bits | |
643 | # | |
644 | io_work: | |
645 | tm SP_PSW+1(%r15),0x01 # returning to user ? | |
646 | #ifndef CONFIG_PREEMPT | |
647 | bno BASED(io_restore) # no-> skip resched & signal | |
648 | #else | |
649 | bnz BASED(io_work_user) # no -> check for preemptive scheduling | |
650 | # check for preemptive scheduling | |
1da177e4 | 651 | icm %r0,15,__TI_precount(%r9) |
2688905e | 652 | bnz BASED(io_restore) # preemption disabled |
1da177e4 LT |
653 | l %r1,SP_R15(%r15) |
654 | s %r1,BASED(.Lc_spsize) | |
655 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | |
25d83cbf | 656 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
1da177e4 LT |
657 | lr %r15,%r1 |
658 | io_resume_loop: | |
659 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | |
411788ea | 660 | bno BASED(io_restore) |
b8e7a54c HC |
661 | l %r1,BASED(.Lpreempt_schedule_irq) |
662 | la %r14,BASED(io_resume_loop) | |
663 | br %r1 # call schedule | |
1da177e4 LT |
664 | #endif |
665 | ||
2688905e | 666 | io_work_user: |
1da177e4 LT |
667 | l %r1,__LC_KERNEL_STACK |
668 | s %r1,BASED(.Lc_spsize) | |
669 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | |
25d83cbf | 670 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
1da177e4 LT |
671 | lr %r15,%r1 |
672 | # | |
673 | # One of the work bits is on. Find out which one. | |
02a029b3 | 674 | # Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED |
25d83cbf | 675 | # and _TIF_MCCK_PENDING |
1da177e4 LT |
676 | # |
677 | io_work_loop: | |
77fa2245 | 678 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING |
25d83cbf | 679 | bo BASED(io_mcck_pending) |
1da177e4 LT |
680 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED |
681 | bo BASED(io_reschedule) | |
02a029b3 | 682 | tm __TI_flags+3(%r9),_TIF_SIGPENDING |
54dfe5dd | 683 | bnz BASED(io_sigpending) |
753c4dd6 MS |
684 | tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME |
685 | bnz BASED(io_notify_resume) | |
411788ea HC |
686 | b BASED(io_restore) |
687 | io_work_done: | |
1da177e4 | 688 | |
77fa2245 HC |
689 | # |
690 | # _TIF_MCCK_PENDING is set, call handler | |
691 | # | |
692 | io_mcck_pending: | |
693 | l %r1,BASED(.Ls390_handle_mcck) | |
b771aeac | 694 | basr %r14,%r1 # TIF bit will be cleared by handler |
b771aeac | 695 | b BASED(io_work_loop) |
77fa2245 | 696 | |
1da177e4 LT |
697 | # |
698 | # _TIF_NEED_RESCHED is set, call schedule | |
25d83cbf HC |
699 | # |
700 | io_reschedule: | |
411788ea | 701 | TRACE_IRQS_ON |
25d83cbf HC |
702 | l %r1,BASED(.Lschedule) |
703 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | |
704 | basr %r14,%r1 # call scheduler | |
705 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | |
411788ea | 706 | TRACE_IRQS_OFF |
1da177e4 | 707 | tm __TI_flags+3(%r9),_TIF_WORK_INT |
411788ea | 708 | bz BASED(io_restore) # there is no work to do |
1da177e4 LT |
709 | b BASED(io_work_loop) |
710 | ||
711 | # | |
02a029b3 | 712 | # _TIF_SIGPENDING is set, call do_signal |
1da177e4 | 713 | # |
25d83cbf | 714 | io_sigpending: |
411788ea | 715 | TRACE_IRQS_ON |
25d83cbf HC |
716 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
717 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
718 | l %r1,BASED(.Ldo_signal) | |
719 | basr %r14,%r1 # call do_signal | |
720 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | |
411788ea | 721 | TRACE_IRQS_OFF |
e1c3ad96 | 722 | b BASED(io_work_loop) |
1da177e4 | 723 | |
753c4dd6 MS |
724 | # |
725 | # _TIF_SIGPENDING is set, call do_signal | |
726 | # | |
727 | io_notify_resume: | |
728 | TRACE_IRQS_ON | |
729 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | |
730 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
731 | l %r1,BASED(.Ldo_notify_resume) | |
732 | basr %r14,%r1 # call do_signal | |
733 | stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts | |
734 | TRACE_IRQS_OFF | |
735 | b BASED(io_work_loop) | |
736 | ||
1da177e4 LT |
737 | /* |
738 | * External interrupt handler routine | |
739 | */ | |
740 | ||
25d83cbf | 741 | .globl ext_int_handler |
1da177e4 LT |
742 | ext_int_handler: |
743 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | |
744 | stck __LC_INT_CLOCK | |
745 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | |
63b12246 | 746 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
77fa2245 | 747 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
1da177e4 LT |
748 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
749 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | |
750 | bz BASED(ext_no_vtime) | |
751 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | |
752 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
753 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
754 | ext_no_vtime: | |
755 | #endif | |
756 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | |
1f194a4c | 757 | TRACE_IRQS_OFF |
25d83cbf HC |
758 | la %r2,SP_PTREGS(%r15) # address of register-save area |
759 | lh %r3,__LC_EXT_INT_CODE # get interruption code | |
1da177e4 LT |
760 | l %r1,BASED(.Ldo_extint) |
761 | basr %r14,%r1 | |
762 | b BASED(io_return) | |
763 | ||
ae6aa2ea MS |
764 | __critical_end: |
765 | ||
1da177e4 LT |
766 | /* |
767 | * Machine check handler routines | |
768 | */ | |
769 | ||
25d83cbf | 770 | .globl mcck_int_handler |
1da177e4 | 771 | mcck_int_handler: |
77fa2245 HC |
772 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer |
773 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | |
1da177e4 | 774 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
77fa2245 | 775 | la %r12,__LC_MCK_OLD_PSW |
25d83cbf | 776 | tm __LC_MCCK_CODE,0x80 # system damage? |
77fa2245 | 777 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid |
1da177e4 | 778 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
63b12246 MS |
779 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER |
780 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | |
781 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | |
782 | bo BASED(1f) | |
783 | la %r14,__LC_SYNC_ENTER_TIMER | |
784 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | |
785 | bl BASED(0f) | |
786 | la %r14,__LC_ASYNC_ENTER_TIMER | |
787 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | |
788 | bl BASED(0f) | |
789 | la %r14,__LC_EXIT_TIMER | |
790 | 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | |
791 | bl BASED(0f) | |
792 | la %r14,__LC_LAST_UPDATE_TIMER | |
793 | 0: spt 0(%r14) | |
794 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | |
795 | 1: | |
1da177e4 | 796 | #endif |
63b12246 | 797 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
77fa2245 HC |
798 | bno BASED(mcck_int_main) # no -> skip cleanup critical |
799 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | |
800 | bnz BASED(mcck_int_main) # from user -> load async stack | |
801 | clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) | |
802 | bhe BASED(mcck_int_main) | |
803 | clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) | |
804 | bl BASED(mcck_int_main) | |
805 | l %r14,BASED(.Lcleanup_critical) | |
806 | basr %r14,%r14 | |
807 | mcck_int_main: | |
808 | l %r14,__LC_PANIC_STACK # are we already on the panic stack? | |
809 | slr %r14,%r15 | |
810 | sra %r14,PAGE_SHIFT | |
811 | be BASED(0f) | |
812 | l %r15,__LC_PANIC_STACK # load panic stack | |
813 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 | |
ae6aa2ea | 814 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
25d83cbf | 815 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
ae6aa2ea | 816 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical |
63b12246 | 817 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
ae6aa2ea MS |
818 | bz BASED(mcck_no_vtime) |
819 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | |
820 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
821 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
822 | mcck_no_vtime: | |
823 | #endif | |
77fa2245 HC |
824 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
825 | la %r2,SP_PTREGS(%r15) # load pt_regs | |
25d83cbf HC |
826 | l %r1,BASED(.Ls390_mcck) |
827 | basr %r14,%r1 # call machine check handler | |
828 | tm SP_PSW+1(%r15),0x01 # returning to user ? | |
77fa2245 | 829 | bno BASED(mcck_return) |
25d83cbf | 830 | l %r1,__LC_KERNEL_STACK # switch to kernel stack |
77fa2245 HC |
831 | s %r1,BASED(.Lc_spsize) |
832 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | |
25d83cbf | 833 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain |
77fa2245 HC |
834 | lr %r15,%r1 |
835 | stosm __SF_EMPTY(%r15),0x04 # turn dat on | |
836 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING | |
837 | bno BASED(mcck_return) | |
1f194a4c | 838 | TRACE_IRQS_OFF |
77fa2245 HC |
839 | l %r1,BASED(.Ls390_handle_mcck) |
840 | basr %r14,%r1 # call machine check handler | |
1f194a4c | 841 | TRACE_IRQS_ON |
1da177e4 | 842 | mcck_return: |
63b12246 MS |
843 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW |
844 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | |
845 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
846 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 | |
847 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | |
848 | bno BASED(0f) | |
849 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | |
850 | stpt __LC_EXIT_TIMER | |
851 | lpsw __LC_RETURN_MCCK_PSW # back to caller | |
852 | 0: | |
853 | #endif | |
854 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | |
855 | lpsw __LC_RETURN_MCCK_PSW # back to caller | |
856 | ||
25d83cbf | 857 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 |
1da177e4 | 858 | |
1da177e4 LT |
859 | /* |
860 | * Restart interruption handler, kick starter for additional CPUs | |
861 | */ | |
84b36a8e | 862 | #ifdef CONFIG_SMP |
2bc89b5e | 863 | __CPUINIT |
25d83cbf | 864 | .globl restart_int_handler |
1da177e4 | 865 | restart_int_handler: |
25d83cbf HC |
866 | l %r15,__LC_SAVE_AREA+60 # load ksp |
867 | lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs | |
868 | lam %a0,%a15,__LC_AREGS_SAVE_AREA | |
869 | lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone | |
870 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | |
871 | basr %r14,0 | |
872 | l %r14,restart_addr-.(%r14) | |
873 | br %r14 # branch to start_secondary | |
1da177e4 | 874 | restart_addr: |
25d83cbf | 875 | .long start_secondary |
84b36a8e | 876 | .previous |
1da177e4 LT |
877 | #else |
878 | /* | |
879 | * If we do not run with SMP enabled, let the new CPU crash ... | |
880 | */ | |
25d83cbf | 881 | .globl restart_int_handler |
1da177e4 | 882 | restart_int_handler: |
25d83cbf | 883 | basr %r1,0 |
1da177e4 | 884 | restart_base: |
25d83cbf HC |
885 | lpsw restart_crash-restart_base(%r1) |
886 | .align 8 | |
1da177e4 | 887 | restart_crash: |
25d83cbf | 888 | .long 0x000a0000,0x00000000 |
1da177e4 LT |
889 | restart_go: |
890 | #endif | |
891 | ||
892 | #ifdef CONFIG_CHECK_STACK | |
893 | /* | |
894 | * The synchronous or the asynchronous stack overflowed. We are dead. | |
895 | * No need to properly save the registers, we are going to panic anyway. | |
896 | * Setup a pt_regs so that show_trace can provide a good call trace. | |
897 | */ | |
898 | stack_overflow: | |
899 | l %r15,__LC_PANIC_STACK # change to panic stack | |
900 | sl %r15,BASED(.Lc_spsize) | |
901 | mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack | |
902 | stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack | |
903 | la %r1,__LC_SAVE_AREA | |
904 | ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? | |
905 | be BASED(0f) | |
906 | ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? | |
907 | be BASED(0f) | |
908 | la %r1,__LC_SAVE_AREA+16 | |
909 | 0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack | |
25d83cbf | 910 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain |
1da177e4 | 911 | l %r1,BASED(1f) # branch to kernel_stack_overflow |
25d83cbf | 912 | la %r2,SP_PTREGS(%r15) # load pt_regs |
1da177e4 | 913 | br %r1 |
25d83cbf | 914 | 1: .long kernel_stack_overflow |
1da177e4 LT |
915 | #endif |
916 | ||
917 | cleanup_table_system_call: | |
918 | .long system_call + 0x80000000, sysc_do_svc + 0x80000000 | |
919 | cleanup_table_sysc_return: | |
920 | .long sysc_return + 0x80000000, sysc_leave + 0x80000000 | |
921 | cleanup_table_sysc_leave: | |
411788ea | 922 | .long sysc_leave + 0x80000000, sysc_done + 0x80000000 |
1da177e4 | 923 | cleanup_table_sysc_work_loop: |
411788ea | 924 | .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 |
63b12246 MS |
925 | cleanup_table_io_return: |
926 | .long io_return + 0x80000000, io_leave + 0x80000000 | |
ae6aa2ea MS |
927 | cleanup_table_io_leave: |
928 | .long io_leave + 0x80000000, io_done + 0x80000000 | |
929 | cleanup_table_io_work_loop: | |
411788ea | 930 | .long io_work_loop + 0x80000000, io_work_done + 0x80000000 |
1da177e4 LT |
931 | |
932 | cleanup_critical: | |
933 | clc 4(4,%r12),BASED(cleanup_table_system_call) | |
934 | bl BASED(0f) | |
935 | clc 4(4,%r12),BASED(cleanup_table_system_call+4) | |
936 | bl BASED(cleanup_system_call) | |
937 | 0: | |
938 | clc 4(4,%r12),BASED(cleanup_table_sysc_return) | |
939 | bl BASED(0f) | |
940 | clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) | |
941 | bl BASED(cleanup_sysc_return) | |
942 | 0: | |
943 | clc 4(4,%r12),BASED(cleanup_table_sysc_leave) | |
944 | bl BASED(0f) | |
945 | clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) | |
946 | bl BASED(cleanup_sysc_leave) | |
947 | 0: | |
948 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) | |
949 | bl BASED(0f) | |
950 | clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) | |
77fa2245 | 951 | bl BASED(cleanup_sysc_return) |
63b12246 MS |
952 | 0: |
953 | clc 4(4,%r12),BASED(cleanup_table_io_return) | |
954 | bl BASED(0f) | |
955 | clc 4(4,%r12),BASED(cleanup_table_io_return+4) | |
956 | bl BASED(cleanup_io_return) | |
ae6aa2ea MS |
957 | 0: |
958 | clc 4(4,%r12),BASED(cleanup_table_io_leave) | |
959 | bl BASED(0f) | |
960 | clc 4(4,%r12),BASED(cleanup_table_io_leave+4) | |
961 | bl BASED(cleanup_io_leave) | |
962 | 0: | |
963 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop) | |
964 | bl BASED(0f) | |
965 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) | |
966 | bl BASED(cleanup_io_return) | |
1da177e4 LT |
967 | 0: |
968 | br %r14 | |
969 | ||
970 | cleanup_system_call: | |
971 | mvc __LC_RETURN_PSW(8),0(%r12) | |
ae6aa2ea MS |
972 | c %r12,BASED(.Lmck_old_psw) |
973 | be BASED(0f) | |
974 | la %r12,__LC_SAVE_AREA+16 | |
975 | b BASED(1f) | |
976 | 0: la %r12,__LC_SAVE_AREA+32 | |
977 | 1: | |
1da177e4 LT |
978 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
979 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | |
980 | bh BASED(0f) | |
981 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
982 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | |
983 | bhe BASED(cleanup_vtime) | |
984 | #endif | |
985 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | |
986 | bh BASED(0f) | |
ae6aa2ea MS |
987 | mvc __LC_SAVE_AREA(16),0(%r12) |
988 | 0: st %r13,4(%r12) | |
989 | st %r12,__LC_SAVE_AREA+48 # argh | |
63b12246 | 990 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
77fa2245 | 991 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
ae6aa2ea MS |
992 | l %r12,__LC_SAVE_AREA+48 # argh |
993 | st %r15,12(%r12) | |
1da177e4 LT |
994 | lh %r7,0x8a |
995 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
996 | cleanup_vtime: | |
997 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | |
998 | bhe BASED(cleanup_stime) | |
1da177e4 LT |
999 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
1000 | cleanup_stime: | |
1001 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) | |
1002 | bh BASED(cleanup_update) | |
1003 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | |
1004 | cleanup_update: | |
1005 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | |
1da177e4 LT |
1006 | #endif |
1007 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) | |
1008 | la %r12,__LC_RETURN_PSW | |
1009 | br %r14 | |
1010 | cleanup_system_call_insn: | |
1011 | .long sysc_saveall + 0x80000000 | |
1012 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
25d83cbf HC |
1013 | .long system_call + 0x80000000 |
1014 | .long sysc_vtime + 0x80000000 | |
1015 | .long sysc_stime + 0x80000000 | |
1016 | .long sysc_update + 0x80000000 | |
1da177e4 LT |
1017 | #endif |
1018 | ||
1019 | cleanup_sysc_return: | |
1020 | mvc __LC_RETURN_PSW(4),0(%r12) | |
1021 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) | |
1022 | la %r12,__LC_RETURN_PSW | |
1023 | br %r14 | |
1024 | ||
1025 | cleanup_sysc_leave: | |
1026 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) | |
ae6aa2ea | 1027 | be BASED(2f) |
1da177e4 LT |
1028 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1029 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
1030 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) | |
ae6aa2ea | 1031 | be BASED(2f) |
1da177e4 LT |
1032 | #endif |
1033 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | |
ae6aa2ea MS |
1034 | c %r12,BASED(.Lmck_old_psw) |
1035 | bne BASED(0f) | |
1036 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) | |
1037 | b BASED(1f) | |
1038 | 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) | |
1039 | 1: lm %r0,%r11,SP_R0(%r15) | |
1da177e4 | 1040 | l %r15,SP_R15(%r15) |
ae6aa2ea | 1041 | 2: la %r12,__LC_RETURN_PSW |
1da177e4 LT |
1042 | br %r14 |
1043 | cleanup_sysc_leave_insn: | |
411788ea | 1044 | .long sysc_done - 4 + 0x80000000 |
1da177e4 | 1045 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
411788ea | 1046 | .long sysc_done - 8 + 0x80000000 |
1da177e4 | 1047 | #endif |
1da177e4 | 1048 | |
ae6aa2ea MS |
1049 | cleanup_io_return: |
1050 | mvc __LC_RETURN_PSW(4),0(%r12) | |
1051 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) | |
1052 | la %r12,__LC_RETURN_PSW | |
1053 | br %r14 | |
1054 | ||
1055 | cleanup_io_leave: | |
1056 | clc 4(4,%r12),BASED(cleanup_io_leave_insn) | |
1057 | be BASED(2f) | |
1058 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
1059 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | |
1060 | clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) | |
1061 | be BASED(2f) | |
1062 | #endif | |
1063 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | |
1064 | c %r12,BASED(.Lmck_old_psw) | |
1065 | bne BASED(0f) | |
1066 | mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) | |
1067 | b BASED(1f) | |
1068 | 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) | |
1069 | 1: lm %r0,%r11,SP_R0(%r15) | |
1070 | l %r15,SP_R15(%r15) | |
1071 | 2: la %r12,__LC_RETURN_PSW | |
1072 | br %r14 | |
1073 | cleanup_io_leave_insn: | |
411788ea | 1074 | .long io_done - 4 + 0x80000000 |
ae6aa2ea | 1075 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
411788ea | 1076 | .long io_done - 8 + 0x80000000 |
ae6aa2ea | 1077 | #endif |
ae6aa2ea | 1078 | |
1da177e4 LT |
1079 | /* |
1080 | * Integer constants | |
1081 | */ | |
25d83cbf HC |
1082 | .align 4 |
1083 | .Lc_spsize: .long SP_SIZE | |
1084 | .Lc_overhead: .long STACK_FRAME_OVERHEAD | |
25d83cbf HC |
1085 | .Lnr_syscalls: .long NR_syscalls |
1086 | .L0x018: .short 0x018 | |
1087 | .L0x020: .short 0x020 | |
1088 | .L0x028: .short 0x028 | |
1089 | .L0x030: .short 0x030 | |
1090 | .L0x038: .short 0x038 | |
1091 | .Lc_1: .long 1 | |
1da177e4 LT |
1092 | |
1093 | /* | |
1094 | * Symbol constants | |
1095 | */ | |
25d83cbf | 1096 | .Ls390_mcck: .long s390_do_machine_check |
77fa2245 | 1097 | .Ls390_handle_mcck: |
25d83cbf HC |
1098 | .long s390_handle_mcck |
1099 | .Lmck_old_psw: .long __LC_MCK_OLD_PSW | |
1100 | .Ldo_IRQ: .long do_IRQ | |
1101 | .Ldo_extint: .long do_extint | |
1102 | .Ldo_signal: .long do_signal | |
753c4dd6 MS |
1103 | .Ldo_notify_resume: |
1104 | .long do_notify_resume | |
25d83cbf | 1105 | .Lhandle_per: .long do_single_step |
03ff9a23 MS |
1106 | .Ldo_execve: .long do_execve |
1107 | .Lexecve_tail: .long execve_tail | |
25d83cbf HC |
1108 | .Ljump_table: .long pgm_check_table |
1109 | .Lschedule: .long schedule | |
ab1809b4 | 1110 | #ifdef CONFIG_PREEMPT |
b8e7a54c HC |
1111 | .Lpreempt_schedule_irq: |
1112 | .long preempt_schedule_irq | |
ab1809b4 | 1113 | #endif |
753c4dd6 MS |
1114 | .Ltrace_entry: .long do_syscall_trace_enter |
1115 | .Ltrace_exit: .long do_syscall_trace_exit | |
25d83cbf HC |
1116 | .Lschedtail: .long schedule_tail |
1117 | .Lsysc_table: .long sys_call_table | |
1f194a4c | 1118 | #ifdef CONFIG_TRACE_IRQFLAGS |
50bec4ce HC |
1119 | .Ltrace_irq_on_caller: |
1120 | .long trace_hardirqs_on_caller | |
1121 | .Ltrace_irq_off_caller: | |
1122 | .long trace_hardirqs_off_caller | |
af4c6874 HC |
1123 | #endif |
1124 | #ifdef CONFIG_LOCKDEP | |
523b44cf HC |
1125 | .Llockdep_sys_exit: |
1126 | .long lockdep_sys_exit | |
1f194a4c | 1127 | #endif |
1da177e4 | 1128 | .Lcritical_start: |
25d83cbf | 1129 | .long __critical_start + 0x80000000 |
1da177e4 | 1130 | .Lcritical_end: |
25d83cbf | 1131 | .long __critical_end + 0x80000000 |
1da177e4 | 1132 | .Lcleanup_critical: |
25d83cbf | 1133 | .long cleanup_critical |
1da177e4 | 1134 | |
25d83cbf | 1135 | .section .rodata, "a" |
1da177e4 | 1136 | #define SYSCALL(esa,esame,emu) .long esa |
1da177e4 LT |
1137 | sys_call_table: |
1138 | #include "syscalls.S" | |
1139 | #undef SYSCALL |