[S390] Fix yet another two section mismatches.
[deliverable/linux.git] / arch / s390 / kernel / entry.S
1 /*
2 * arch/s390/kernel/entry.S
3 * S390 low-level entry points.
4 *
5 * Copyright (C) IBM Corp. 1999,2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12 #include <linux/sys.h>
13 #include <linux/linkage.h>
14 #include <asm/cache.h>
15 #include <asm/lowcore.h>
16 #include <asm/errno.h>
17 #include <asm/ptrace.h>
18 #include <asm/thread_info.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/unistd.h>
21 #include <asm/page.h>
22
23 /*
24 * Stack layout for the system_call stack entry.
25 * The first few entries are identical to the user_regs_struct.
26 */
27 SP_PTREGS = STACK_FRAME_OVERHEAD
28 SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
29 SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
30 SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
31 SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
32 SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
33 SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
34 SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
35 SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
36 SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
37 SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
38 SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
39 SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
40 SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
41 SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
42 SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
43 SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
44 SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
45 SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
46 SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
47 SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
48 SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
49 SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
50
51 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
52 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
53 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING)
55
56 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
57 STACK_SIZE = 1 << STACK_SHIFT
58
59 #define BASED(name) name-system_call(%r13)
60
61 #ifdef CONFIG_TRACE_IRQFLAGS
62 .macro TRACE_IRQS_ON
63 l %r1,BASED(.Ltrace_irq_on)
64 basr %r14,%r1
65 .endm
66
67 .macro TRACE_IRQS_OFF
68 l %r1,BASED(.Ltrace_irq_off)
69 basr %r14,%r1
70 .endm
71 #else
72 #define TRACE_IRQS_ON
73 #define TRACE_IRQS_OFF
74 #endif
75
76 /*
77 * Register usage in interrupt handlers:
78 * R9 - pointer to current task structure
79 * R13 - pointer to literal pool
80 * R14 - return register for function calls
81 * R15 - kernel stack pointer
82 */
83
84 .macro STORE_TIMER lc_offset
85 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
86 stpt \lc_offset
87 #endif
88 .endm
89
90 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
91 .macro UPDATE_VTIME lc_from,lc_to,lc_sum
92 lm %r10,%r11,\lc_from
93 sl %r10,\lc_to
94 sl %r11,\lc_to+4
95 bc 3,BASED(0f)
96 sl %r10,BASED(.Lc_1)
97 0: al %r10,\lc_sum
98 al %r11,\lc_sum+4
99 bc 12,BASED(1f)
100 al %r10,BASED(.Lc_1)
101 1: stm %r10,%r11,\lc_sum
102 .endm
103 #endif
104
105 .macro SAVE_ALL_BASE savearea
106 stm %r12,%r15,\savearea
107 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
108 .endm
109
110 .macro SAVE_ALL_SYNC psworg,savearea
111 la %r12,\psworg
112 tm \psworg+1,0x01 # test problem state bit
113 bz BASED(2f) # skip stack setup save
114 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
115 #ifdef CONFIG_CHECK_STACK
116 b BASED(3f)
117 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
118 bz BASED(stack_overflow)
119 3:
120 #endif
121 2:
122 .endm
123
124 .macro SAVE_ALL_ASYNC psworg,savearea
125 la %r12,\psworg
126 tm \psworg+1,0x01 # test problem state bit
127 bnz BASED(1f) # from user -> load async stack
128 clc \psworg+4(4),BASED(.Lcritical_end)
129 bhe BASED(0f)
130 clc \psworg+4(4),BASED(.Lcritical_start)
131 bl BASED(0f)
132 l %r14,BASED(.Lcleanup_critical)
133 basr %r14,%r14
134 tm 1(%r12),0x01 # retest problem state after cleanup
135 bnz BASED(1f)
136 0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
137 slr %r14,%r15
138 sra %r14,STACK_SHIFT
139 be BASED(2f)
140 1: l %r15,__LC_ASYNC_STACK
141 #ifdef CONFIG_CHECK_STACK
142 b BASED(3f)
143 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
144 bz BASED(stack_overflow)
145 3:
146 #endif
147 2:
148 .endm
149
150 .macro CREATE_STACK_FRAME psworg,savearea
151 s %r15,BASED(.Lc_spsize) # make room for registers & psw
152 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
153 la %r12,\psworg
154 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
155 icm %r12,12,__LC_SVC_ILC
156 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
157 st %r12,SP_ILC(%r15)
158 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
159 la %r12,0
160 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
161 .endm
162
163 .macro RESTORE_ALL psworg,sync
164 mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
165 .if !\sync
166 ni \psworg+1,0xfd # clear wait state bit
167 .endif
168 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
169 STORE_TIMER __LC_EXIT_TIMER
170 lpsw \psworg # back to caller
171 .endm
172
173 /*
174 * Scheduler resume function, called by switch_to
175 * gpr2 = (task_struct *) prev
176 * gpr3 = (task_struct *) next
177 * Returns:
178 * gpr2 = prev
179 */
180 .globl __switch_to
181 __switch_to:
182 basr %r1,0
183 __switch_to_base:
184 tm __THREAD_per(%r3),0xe8 # new process is using per ?
185 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
186 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
187 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
188 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
189 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
190 __switch_to_noper:
191 l %r4,__THREAD_info(%r2) # get thread_info of prev
192 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
193 bz __switch_to_no_mcck-__switch_to_base(%r1)
194 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
195 l %r4,__THREAD_info(%r3) # get thread_info of next
196 oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
197 __switch_to_no_mcck:
198 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
199 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
200 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
201 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
202 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct
203 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
204 l %r3,__THREAD_info(%r3) # load thread_info from task struct
205 st %r3,__LC_THREAD_INFO
206 ahi %r3,STACK_SIZE
207 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
208 br %r14
209
210 __critical_start:
211 /*
212 * SVC interrupt handler routine. System calls are synchronous events and
213 * are executed with interrupts enabled.
214 */
215
216 .globl system_call
217 system_call:
218 STORE_TIMER __LC_SYNC_ENTER_TIMER
219 sysc_saveall:
220 SAVE_ALL_BASE __LC_SAVE_AREA
221 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
222 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
223 lh %r7,0x8a # get svc number from lowcore
224 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
225 sysc_vtime:
226 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
227 bz BASED(sysc_do_svc)
228 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
229 sysc_stime:
230 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
231 sysc_update:
232 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
233 #endif
234 sysc_do_svc:
235 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
236 sla %r7,2 # *4 and test for svc 0
237 bnz BASED(sysc_nr_ok) # svc number > 0
238 # svc 0: system call number in %r1
239 cl %r1,BASED(.Lnr_syscalls)
240 bnl BASED(sysc_nr_ok)
241 lr %r7,%r1 # copy svc number to %r7
242 sla %r7,2 # *4
243 sysc_nr_ok:
244 mvc SP_ARGS(4,%r15),SP_R7(%r15)
245 sysc_do_restart:
246 l %r8,BASED(.Lsysc_table)
247 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
248 l %r8,0(%r7,%r8) # get system call addr.
249 bnz BASED(sysc_tracesys)
250 basr %r14,%r8 # call sys_xxxx
251 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
252
253 sysc_return:
254 tm SP_PSW+1(%r15),0x01 # returning to user ?
255 bno BASED(sysc_leave)
256 tm __TI_flags+3(%r9),_TIF_WORK_SVC
257 bnz BASED(sysc_work) # there is work to do (signals etc.)
258 sysc_leave:
259 RESTORE_ALL __LC_RETURN_PSW,1
260
261 #
262 # recheck if there is more work to do
263 #
264 sysc_work_loop:
265 tm __TI_flags+3(%r9),_TIF_WORK_SVC
266 bz BASED(sysc_leave) # there is no work to do
267 #
268 # One of the work bits is on. Find out which one.
269 #
270 sysc_work:
271 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
272 bo BASED(sysc_mcck_pending)
273 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
274 bo BASED(sysc_reschedule)
275 tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
276 bnz BASED(sysc_sigpending)
277 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
278 bo BASED(sysc_restart)
279 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
280 bo BASED(sysc_singlestep)
281 b BASED(sysc_leave)
282
283 #
284 # _TIF_NEED_RESCHED is set, call schedule
285 #
286 sysc_reschedule:
287 l %r1,BASED(.Lschedule)
288 la %r14,BASED(sysc_work_loop)
289 br %r1 # call scheduler
290
291 #
292 # _TIF_MCCK_PENDING is set, call handler
293 #
294 sysc_mcck_pending:
295 l %r1,BASED(.Ls390_handle_mcck)
296 la %r14,BASED(sysc_work_loop)
297 br %r1 # TIF bit will be cleared by handler
298
299 #
300 # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
301 #
302 sysc_sigpending:
303 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
304 la %r2,SP_PTREGS(%r15) # load pt_regs
305 l %r1,BASED(.Ldo_signal)
306 basr %r14,%r1 # call do_signal
307 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
308 bo BASED(sysc_restart)
309 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
310 bo BASED(sysc_singlestep)
311 b BASED(sysc_work_loop)
312
313 #
314 # _TIF_RESTART_SVC is set, set up registers and restart svc
315 #
316 sysc_restart:
317 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
318 l %r7,SP_R2(%r15) # load new svc number
319 sla %r7,2
320 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
321 lm %r2,%r6,SP_R2(%r15) # load svc arguments
322 b BASED(sysc_do_restart) # restart svc
323
324 #
325 # _TIF_SINGLE_STEP is set, call do_single_step
326 #
327 sysc_singlestep:
328 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
329 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
330 la %r2,SP_PTREGS(%r15) # address of register-save area
331 l %r1,BASED(.Lhandle_per) # load adr. of per handler
332 la %r14,BASED(sysc_return) # load adr. of system return
333 br %r1 # branch to do_single_step
334
335 #
336 # call trace before and after sys_call
337 #
338 sysc_tracesys:
339 l %r1,BASED(.Ltrace)
340 la %r2,SP_PTREGS(%r15) # load pt_regs
341 la %r3,0
342 srl %r7,2
343 st %r7,SP_R2(%r15)
344 basr %r14,%r1
345 clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
346 bnl BASED(sysc_tracenogo)
347 l %r8,BASED(.Lsysc_table)
348 l %r7,SP_R2(%r15) # strace might have changed the
349 sll %r7,2 # system call
350 l %r8,0(%r7,%r8)
351 sysc_tracego:
352 lm %r3,%r6,SP_R3(%r15)
353 l %r2,SP_ORIG_R2(%r15)
354 basr %r14,%r8 # call sys_xxx
355 st %r2,SP_R2(%r15) # store return value
356 sysc_tracenogo:
357 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
358 bz BASED(sysc_return)
359 l %r1,BASED(.Ltrace)
360 la %r2,SP_PTREGS(%r15) # load pt_regs
361 la %r3,1
362 la %r14,BASED(sysc_return)
363 br %r1
364
365 #
366 # a new process exits the kernel with ret_from_fork
367 #
368 .globl ret_from_fork
369 ret_from_fork:
370 l %r13,__LC_SVC_NEW_PSW+4
371 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
372 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
373 bo BASED(0f)
374 st %r15,SP_R15(%r15) # store stack pointer for new kthread
375 0: l %r1,BASED(.Lschedtail)
376 basr %r14,%r1
377 TRACE_IRQS_ON
378 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
379 b BASED(sysc_return)
380
381 #
382 # kernel_execve function needs to deal with pt_regs that is not
383 # at the usual place
384 #
385 .globl kernel_execve
386 kernel_execve:
387 stm %r12,%r15,48(%r15)
388 lr %r14,%r15
389 l %r13,__LC_SVC_NEW_PSW+4
390 s %r15,BASED(.Lc_spsize)
391 st %r14,__SF_BACKCHAIN(%r15)
392 la %r12,SP_PTREGS(%r15)
393 xc 0(__PT_SIZE,%r12),0(%r12)
394 l %r1,BASED(.Ldo_execve)
395 lr %r5,%r12
396 basr %r14,%r1
397 ltr %r2,%r2
398 be BASED(0f)
399 a %r15,BASED(.Lc_spsize)
400 lm %r12,%r15,48(%r15)
401 br %r14
402 # execve succeeded.
403 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
404 l %r15,__LC_KERNEL_STACK # load ksp
405 s %r15,BASED(.Lc_spsize) # make room for registers & psw
406 l %r9,__LC_THREAD_INFO
407 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
408 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
409 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
410 l %r1,BASED(.Lexecve_tail)
411 basr %r14,%r1
412 b BASED(sysc_return)
413
414 /*
415 * Program check handler routine
416 */
417
418 .globl pgm_check_handler
419 pgm_check_handler:
420 /*
421 * First we need to check for a special case:
422 * Single stepping an instruction that disables the PER event mask will
423 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
424 * For a single stepped SVC the program check handler gets control after
425 * the SVC new PSW has been loaded. But we want to execute the SVC first and
426 * then handle the PER event. Therefore we update the SVC old PSW to point
427 * to the pgm_check_handler and branch to the SVC handler after we checked
428 * if we have to load the kernel stack register.
429 * For every other possible cause for PER event without the PER mask set
430 * we just ignore the PER event (FIXME: is there anything we have to do
431 * for LPSW?).
432 */
433 STORE_TIMER __LC_SYNC_ENTER_TIMER
434 SAVE_ALL_BASE __LC_SAVE_AREA
435 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
436 bnz BASED(pgm_per) # got per exception -> special case
437 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
438 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
439 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
440 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
441 bz BASED(pgm_no_vtime)
442 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
443 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
444 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
445 pgm_no_vtime:
446 #endif
447 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
448 l %r3,__LC_PGM_ILC # load program interruption code
449 la %r8,0x7f
450 nr %r8,%r3
451 pgm_do_call:
452 l %r7,BASED(.Ljump_table)
453 sll %r8,2
454 l %r7,0(%r8,%r7) # load address of handler routine
455 la %r2,SP_PTREGS(%r15) # address of register-save area
456 la %r14,BASED(sysc_return)
457 br %r7 # branch to interrupt-handler
458
459 #
460 # handle per exception
461 #
462 pgm_per:
463 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
464 bnz BASED(pgm_per_std) # ok, normal per event from user space
465 # ok its one of the special cases, now we need to find out which one
466 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
467 be BASED(pgm_svcper)
468 # no interesting special case, ignore PER event
469 lm %r12,%r15,__LC_SAVE_AREA
470 lpsw 0x28
471
472 #
473 # Normal per exception
474 #
475 pgm_per_std:
476 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
477 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
478 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
479 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
480 bz BASED(pgm_no_vtime2)
481 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
482 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
483 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
484 pgm_no_vtime2:
485 #endif
486 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
487 l %r1,__TI_task(%r9)
488 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
489 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
490 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
491 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
492 tm SP_PSW+1(%r15),0x01 # kernel per event ?
493 bz BASED(kernel_per)
494 l %r3,__LC_PGM_ILC # load program interruption code
495 la %r8,0x7f
496 nr %r8,%r3 # clear per-event-bit and ilc
497 be BASED(sysc_return) # only per or per+check ?
498 b BASED(pgm_do_call)
499
500 #
501 # it was a single stepped SVC that is causing all the trouble
502 #
503 pgm_svcper:
504 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
505 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
506 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
507 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
508 bz BASED(pgm_no_vtime3)
509 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
510 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
511 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
512 pgm_no_vtime3:
513 #endif
514 lh %r7,0x8a # get svc number from lowcore
515 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
516 l %r1,__TI_task(%r9)
517 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
518 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
519 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
520 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
521 TRACE_IRQS_ON
522 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
523 b BASED(sysc_do_svc)
524
525 #
526 # per was called from kernel, must be kprobes
527 #
528 kernel_per:
529 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
530 la %r2,SP_PTREGS(%r15) # address of register-save area
531 l %r1,BASED(.Lhandle_per) # load adr. of per handler
532 la %r14,BASED(sysc_leave) # load adr. of system return
533 br %r1 # branch to do_single_step
534
535 /*
536 * IO interrupt handler routine
537 */
538
539 .globl io_int_handler
540 io_int_handler:
541 STORE_TIMER __LC_ASYNC_ENTER_TIMER
542 stck __LC_INT_CLOCK
543 SAVE_ALL_BASE __LC_SAVE_AREA+16
544 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
545 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
546 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
547 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
548 bz BASED(io_no_vtime)
549 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
550 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
551 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
552 io_no_vtime:
553 #endif
554 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
555 TRACE_IRQS_OFF
556 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
557 la %r2,SP_PTREGS(%r15) # address of register-save area
558 basr %r14,%r1 # branch to standard irq handler
559 TRACE_IRQS_ON
560
561 io_return:
562 tm SP_PSW+1(%r15),0x01 # returning to user ?
563 #ifdef CONFIG_PREEMPT
564 bno BASED(io_preempt) # no -> check for preemptive scheduling
565 #else
566 bno BASED(io_leave) # no-> skip resched & signal
567 #endif
568 tm __TI_flags+3(%r9),_TIF_WORK_INT
569 bnz BASED(io_work) # there is work to do (signals etc.)
570 io_leave:
571 RESTORE_ALL __LC_RETURN_PSW,0
572 io_done:
573
574 #ifdef CONFIG_PREEMPT
575 io_preempt:
576 icm %r0,15,__TI_precount(%r9)
577 bnz BASED(io_leave)
578 l %r1,SP_R15(%r15)
579 s %r1,BASED(.Lc_spsize)
580 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
581 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
582 lr %r15,%r1
583 io_resume_loop:
584 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
585 bno BASED(io_leave)
586 mvc __TI_precount(4,%r9),BASED(.Lc_pactive)
587 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
588 l %r1,BASED(.Lschedule)
589 basr %r14,%r1 # call schedule
590 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
591 xc __TI_precount(4,%r9),__TI_precount(%r9)
592 b BASED(io_resume_loop)
593 #endif
594
595 #
596 # switch to kernel stack, then check the TIF bits
597 #
598 io_work:
599 l %r1,__LC_KERNEL_STACK
600 s %r1,BASED(.Lc_spsize)
601 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
602 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
603 lr %r15,%r1
604 #
605 # One of the work bits is on. Find out which one.
606 # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED
607 # and _TIF_MCCK_PENDING
608 #
609 io_work_loop:
610 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
611 bo BASED(io_mcck_pending)
612 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
613 bo BASED(io_reschedule)
614 tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
615 bnz BASED(io_sigpending)
616 b BASED(io_leave)
617
618 #
619 # _TIF_MCCK_PENDING is set, call handler
620 #
621 io_mcck_pending:
622 l %r1,BASED(.Ls390_handle_mcck)
623 la %r14,BASED(io_work_loop)
624 br %r1 # TIF bit will be cleared by handler
625
626 #
627 # _TIF_NEED_RESCHED is set, call schedule
628 #
629 io_reschedule:
630 l %r1,BASED(.Lschedule)
631 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
632 basr %r14,%r1 # call scheduler
633 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
634 tm __TI_flags+3(%r9),_TIF_WORK_INT
635 bz BASED(io_leave) # there is no work to do
636 b BASED(io_work_loop)
637
638 #
639 # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
640 #
641 io_sigpending:
642 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
643 la %r2,SP_PTREGS(%r15) # load pt_regs
644 l %r1,BASED(.Ldo_signal)
645 basr %r14,%r1 # call do_signal
646 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
647 b BASED(io_work_loop)
648
649 /*
650 * External interrupt handler routine
651 */
652
653 .globl ext_int_handler
654 ext_int_handler:
655 STORE_TIMER __LC_ASYNC_ENTER_TIMER
656 stck __LC_INT_CLOCK
657 SAVE_ALL_BASE __LC_SAVE_AREA+16
658 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
659 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
660 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
661 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
662 bz BASED(ext_no_vtime)
663 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
664 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
665 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
666 ext_no_vtime:
667 #endif
668 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
669 TRACE_IRQS_OFF
670 la %r2,SP_PTREGS(%r15) # address of register-save area
671 lh %r3,__LC_EXT_INT_CODE # get interruption code
672 l %r1,BASED(.Ldo_extint)
673 basr %r14,%r1
674 TRACE_IRQS_ON
675 b BASED(io_return)
676
677 __critical_end:
678
679 /*
680 * Machine check handler routines
681 */
682
683 .globl mcck_int_handler
684 mcck_int_handler:
685 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
686 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
687 SAVE_ALL_BASE __LC_SAVE_AREA+32
688 la %r12,__LC_MCK_OLD_PSW
689 tm __LC_MCCK_CODE,0x80 # system damage?
690 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
691 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
692 mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
693 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
694 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
695 bo BASED(1f)
696 la %r14,__LC_SYNC_ENTER_TIMER
697 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
698 bl BASED(0f)
699 la %r14,__LC_ASYNC_ENTER_TIMER
700 0: clc 0(8,%r14),__LC_EXIT_TIMER
701 bl BASED(0f)
702 la %r14,__LC_EXIT_TIMER
703 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
704 bl BASED(0f)
705 la %r14,__LC_LAST_UPDATE_TIMER
706 0: spt 0(%r14)
707 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
708 1:
709 #endif
710 tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
711 bno BASED(mcck_int_main) # no -> skip cleanup critical
712 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
713 bnz BASED(mcck_int_main) # from user -> load async stack
714 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
715 bhe BASED(mcck_int_main)
716 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
717 bl BASED(mcck_int_main)
718 l %r14,BASED(.Lcleanup_critical)
719 basr %r14,%r14
720 mcck_int_main:
721 l %r14,__LC_PANIC_STACK # are we already on the panic stack?
722 slr %r14,%r15
723 sra %r14,PAGE_SHIFT
724 be BASED(0f)
725 l %r15,__LC_PANIC_STACK # load panic stack
726 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
727 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
728 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
729 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
730 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
731 bz BASED(mcck_no_vtime)
732 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
733 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
734 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
735 mcck_no_vtime:
736 #endif
737 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
738 la %r2,SP_PTREGS(%r15) # load pt_regs
739 l %r1,BASED(.Ls390_mcck)
740 basr %r14,%r1 # call machine check handler
741 tm SP_PSW+1(%r15),0x01 # returning to user ?
742 bno BASED(mcck_return)
743 l %r1,__LC_KERNEL_STACK # switch to kernel stack
744 s %r1,BASED(.Lc_spsize)
745 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
746 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
747 lr %r15,%r1
748 stosm __SF_EMPTY(%r15),0x04 # turn dat on
749 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
750 bno BASED(mcck_return)
751 TRACE_IRQS_OFF
752 l %r1,BASED(.Ls390_handle_mcck)
753 basr %r14,%r1 # call machine check handler
754 TRACE_IRQS_ON
755 mcck_return:
756 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
757 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
758 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
759 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
760 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
761 bno BASED(0f)
762 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
763 stpt __LC_EXIT_TIMER
764 lpsw __LC_RETURN_MCCK_PSW # back to caller
765 0:
766 #endif
767 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
768 lpsw __LC_RETURN_MCCK_PSW # back to caller
769
770 RESTORE_ALL __LC_RETURN_MCCK_PSW,0
771
772 /*
773 * Restart interruption handler, kick starter for additional CPUs
774 */
775 #ifdef CONFIG_SMP
776 #ifndef CONFIG_HOTPLUG_CPU
777 .section .init.text,"ax"
778 #endif
779 .globl restart_int_handler
780 restart_int_handler:
781 l %r15,__LC_SAVE_AREA+60 # load ksp
782 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
783 lam %a0,%a15,__LC_AREGS_SAVE_AREA
784 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
785 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
786 basr %r14,0
787 l %r14,restart_addr-.(%r14)
788 br %r14 # branch to start_secondary
789 restart_addr:
790 .long start_secondary
791 #ifndef CONFIG_HOTPLUG_CPU
792 .previous
793 #endif
794 #else
795 /*
796 * If we do not run with SMP enabled, let the new CPU crash ...
797 */
798 .globl restart_int_handler
799 restart_int_handler:
800 basr %r1,0
801 restart_base:
802 lpsw restart_crash-restart_base(%r1)
803 .align 8
804 restart_crash:
805 .long 0x000a0000,0x00000000
806 restart_go:
807 #endif
808
809 #ifdef CONFIG_CHECK_STACK
810 /*
811 * The synchronous or the asynchronous stack overflowed. We are dead.
812 * No need to properly save the registers, we are going to panic anyway.
813 * Setup a pt_regs so that show_trace can provide a good call trace.
814 */
815 stack_overflow:
816 l %r15,__LC_PANIC_STACK # change to panic stack
817 sl %r15,BASED(.Lc_spsize)
818 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
819 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
820 la %r1,__LC_SAVE_AREA
821 ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ?
822 be BASED(0f)
823 ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ?
824 be BASED(0f)
825 la %r1,__LC_SAVE_AREA+16
826 0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
827 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
828 l %r1,BASED(1f) # branch to kernel_stack_overflow
829 la %r2,SP_PTREGS(%r15) # load pt_regs
830 br %r1
831 1: .long kernel_stack_overflow
832 #endif
833
834 cleanup_table_system_call:
835 .long system_call + 0x80000000, sysc_do_svc + 0x80000000
836 cleanup_table_sysc_return:
837 .long sysc_return + 0x80000000, sysc_leave + 0x80000000
838 cleanup_table_sysc_leave:
839 .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
840 cleanup_table_sysc_work_loop:
841 .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
842 cleanup_table_io_return:
843 .long io_return + 0x80000000, io_leave + 0x80000000
844 cleanup_table_io_leave:
845 .long io_leave + 0x80000000, io_done + 0x80000000
846 cleanup_table_io_work_loop:
847 .long io_work_loop + 0x80000000, io_mcck_pending + 0x80000000
848
849 cleanup_critical:
850 clc 4(4,%r12),BASED(cleanup_table_system_call)
851 bl BASED(0f)
852 clc 4(4,%r12),BASED(cleanup_table_system_call+4)
853 bl BASED(cleanup_system_call)
854 0:
855 clc 4(4,%r12),BASED(cleanup_table_sysc_return)
856 bl BASED(0f)
857 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
858 bl BASED(cleanup_sysc_return)
859 0:
860 clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
861 bl BASED(0f)
862 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
863 bl BASED(cleanup_sysc_leave)
864 0:
865 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
866 bl BASED(0f)
867 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
868 bl BASED(cleanup_sysc_return)
869 0:
870 clc 4(4,%r12),BASED(cleanup_table_io_return)
871 bl BASED(0f)
872 clc 4(4,%r12),BASED(cleanup_table_io_return+4)
873 bl BASED(cleanup_io_return)
874 0:
875 clc 4(4,%r12),BASED(cleanup_table_io_leave)
876 bl BASED(0f)
877 clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
878 bl BASED(cleanup_io_leave)
879 0:
880 clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
881 bl BASED(0f)
882 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
883 bl BASED(cleanup_io_return)
884 0:
885 br %r14
886
887 cleanup_system_call:
888 mvc __LC_RETURN_PSW(8),0(%r12)
889 c %r12,BASED(.Lmck_old_psw)
890 be BASED(0f)
891 la %r12,__LC_SAVE_AREA+16
892 b BASED(1f)
893 0: la %r12,__LC_SAVE_AREA+32
894 1:
895 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
896 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
897 bh BASED(0f)
898 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
899 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
900 bhe BASED(cleanup_vtime)
901 #endif
902 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
903 bh BASED(0f)
904 mvc __LC_SAVE_AREA(16),0(%r12)
905 0: st %r13,4(%r12)
906 st %r12,__LC_SAVE_AREA+48 # argh
907 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
908 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
909 l %r12,__LC_SAVE_AREA+48 # argh
910 st %r15,12(%r12)
911 lh %r7,0x8a
912 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
913 cleanup_vtime:
914 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
915 bhe BASED(cleanup_stime)
916 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
917 bz BASED(cleanup_novtime)
918 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
919 cleanup_stime:
920 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
921 bh BASED(cleanup_update)
922 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
923 cleanup_update:
924 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
925 cleanup_novtime:
926 #endif
927 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
928 la %r12,__LC_RETURN_PSW
929 br %r14
930 cleanup_system_call_insn:
931 .long sysc_saveall + 0x80000000
932 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
933 .long system_call + 0x80000000
934 .long sysc_vtime + 0x80000000
935 .long sysc_stime + 0x80000000
936 .long sysc_update + 0x80000000
937 #endif
938
939 cleanup_sysc_return:
940 mvc __LC_RETURN_PSW(4),0(%r12)
941 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
942 la %r12,__LC_RETURN_PSW
943 br %r14
944
945 cleanup_sysc_leave:
946 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
947 be BASED(2f)
948 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
949 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
950 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
951 be BASED(2f)
952 #endif
953 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
954 c %r12,BASED(.Lmck_old_psw)
955 bne BASED(0f)
956 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
957 b BASED(1f)
958 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
959 1: lm %r0,%r11,SP_R0(%r15)
960 l %r15,SP_R15(%r15)
961 2: la %r12,__LC_RETURN_PSW
962 br %r14
963 cleanup_sysc_leave_insn:
964 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
965 .long sysc_leave + 14 + 0x80000000
966 #endif
967 .long sysc_leave + 10 + 0x80000000
968
969 cleanup_io_return:
970 mvc __LC_RETURN_PSW(4),0(%r12)
971 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
972 la %r12,__LC_RETURN_PSW
973 br %r14
974
975 cleanup_io_leave:
976 clc 4(4,%r12),BASED(cleanup_io_leave_insn)
977 be BASED(2f)
978 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
979 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
980 clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
981 be BASED(2f)
982 #endif
983 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
984 c %r12,BASED(.Lmck_old_psw)
985 bne BASED(0f)
986 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
987 b BASED(1f)
988 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
989 1: lm %r0,%r11,SP_R0(%r15)
990 l %r15,SP_R15(%r15)
991 2: la %r12,__LC_RETURN_PSW
992 br %r14
993 cleanup_io_leave_insn:
994 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
995 .long io_leave + 18 + 0x80000000
996 #endif
997 .long io_leave + 14 + 0x80000000
998
999 /*
1000 * Integer constants
1001 */
1002 .align 4
1003 .Lc_spsize: .long SP_SIZE
1004 .Lc_overhead: .long STACK_FRAME_OVERHEAD
1005 .Lc_pactive: .long PREEMPT_ACTIVE
1006 .Lnr_syscalls: .long NR_syscalls
1007 .L0x018: .short 0x018
1008 .L0x020: .short 0x020
1009 .L0x028: .short 0x028
1010 .L0x030: .short 0x030
1011 .L0x038: .short 0x038
1012 .Lc_1: .long 1
1013
1014 /*
1015 * Symbol constants
1016 */
1017 .Ls390_mcck: .long s390_do_machine_check
1018 .Ls390_handle_mcck:
1019 .long s390_handle_mcck
1020 .Lmck_old_psw: .long __LC_MCK_OLD_PSW
1021 .Ldo_IRQ: .long do_IRQ
1022 .Ldo_extint: .long do_extint
1023 .Ldo_signal: .long do_signal
1024 .Lhandle_per: .long do_single_step
1025 .Ldo_execve: .long do_execve
1026 .Lexecve_tail: .long execve_tail
1027 .Ljump_table: .long pgm_check_table
1028 .Lschedule: .long schedule
1029 .Ltrace: .long syscall_trace
1030 .Lschedtail: .long schedule_tail
1031 .Lsysc_table: .long sys_call_table
1032 #ifdef CONFIG_TRACE_IRQFLAGS
1033 .Ltrace_irq_on: .long trace_hardirqs_on
1034 .Ltrace_irq_off:
1035 .long trace_hardirqs_off
1036 #endif
1037 .Lcritical_start:
1038 .long __critical_start + 0x80000000
1039 .Lcritical_end:
1040 .long __critical_end + 0x80000000
1041 .Lcleanup_critical:
1042 .long cleanup_critical
1043
1044 .section .rodata, "a"
1045 #define SYSCALL(esa,esame,emu) .long esa
1046 sys_call_table:
1047 #include "syscalls.S"
1048 #undef SYSCALL
This page took 0.06313 seconds and 5 git commands to generate.