Commit | Line | Data |
---|---|---|
5f97f7f9 HS |
1 | /* |
2 | * Copyright (C) 2004-2006 Atmel Corporation | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | /* | |
10 | * This file contains the low-level entry-points into the kernel, that is, | |
11 | * exception handlers, debug trap handlers, interrupt handlers and the | |
12 | * system call handler. | |
13 | */ | |
14 | #include <linux/errno.h> | |
15 | ||
16 | #include <asm/asm.h> | |
17 | #include <asm/hardirq.h> | |
18 | #include <asm/irq.h> | |
19 | #include <asm/ocd.h> | |
20 | #include <asm/page.h> | |
21 | #include <asm/pgtable.h> | |
22 | #include <asm/ptrace.h> | |
23 | #include <asm/sysreg.h> | |
24 | #include <asm/thread_info.h> | |
25 | #include <asm/unistd.h> | |
26 | ||
27 | #ifdef CONFIG_PREEMPT | |
28 | # define preempt_stop mask_interrupts | |
29 | #else | |
30 | # define preempt_stop | |
31 | # define fault_resume_kernel fault_restore_all | |
32 | #endif | |
33 | ||
34 | #define __MASK(x) ((1 << (x)) - 1) | |
35 | #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \ | |
36 | (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)) | |
37 | ||
38 | .section .ex.text,"ax",@progbits | |
39 | .align 2 | |
40 | exception_vectors: | |
41 | bral handle_critical | |
42 | .align 2 | |
43 | bral handle_critical | |
44 | .align 2 | |
45 | bral do_bus_error_write | |
46 | .align 2 | |
47 | bral do_bus_error_read | |
48 | .align 2 | |
49 | bral do_nmi_ll | |
50 | .align 2 | |
51 | bral handle_address_fault | |
52 | .align 2 | |
53 | bral handle_protection_fault | |
54 | .align 2 | |
55 | bral handle_debug | |
56 | .align 2 | |
57 | bral do_illegal_opcode_ll | |
58 | .align 2 | |
59 | bral do_illegal_opcode_ll | |
60 | .align 2 | |
61 | bral do_illegal_opcode_ll | |
62 | .align 2 | |
63 | bral do_fpe_ll | |
64 | .align 2 | |
65 | bral do_illegal_opcode_ll | |
66 | .align 2 | |
67 | bral handle_address_fault | |
68 | .align 2 | |
69 | bral handle_address_fault | |
70 | .align 2 | |
71 | bral handle_protection_fault | |
72 | .align 2 | |
73 | bral handle_protection_fault | |
74 | .align 2 | |
75 | bral do_dtlb_modified | |
76 | ||
77 | /* | |
78 | * r0 : PGD/PT/PTE | |
79 | * r1 : Offending address | |
80 | * r2 : Scratch register | |
81 | * r3 : Cause (5, 12 or 13) | |
82 | */ | |
83 | #define tlbmiss_save pushm r0-r3 | |
84 | #define tlbmiss_restore popm r0-r3 | |
85 | ||
86 | .section .tlbx.ex.text,"ax",@progbits | |
87 | .global itlb_miss | |
88 | itlb_miss: | |
89 | tlbmiss_save | |
90 | rjmp tlb_miss_common | |
91 | ||
92 | .section .tlbr.ex.text,"ax",@progbits | |
93 | dtlb_miss_read: | |
94 | tlbmiss_save | |
95 | rjmp tlb_miss_common | |
96 | ||
97 | .section .tlbw.ex.text,"ax",@progbits | |
98 | dtlb_miss_write: | |
99 | tlbmiss_save | |
100 | ||
101 | .global tlb_miss_common | |
102 | tlb_miss_common: | |
c0c3e816 HS |
103 | mfsr r0, SYSREG_TLBEAR |
104 | mfsr r1, SYSREG_PTBR | |
5f97f7f9 HS |
105 | |
106 | /* Is it the vmalloc space? */ | |
c0c3e816 | 107 | bld r0, 31 |
5f97f7f9 HS |
108 | brcs handle_vmalloc_miss |
109 | ||
110 | /* First level lookup */ | |
111 | pgtbl_lookup: | |
c0c3e816 HS |
112 | lsr r2, r0, PGDIR_SHIFT |
113 | ld.w r3, r1[r2 << 2] | |
114 | bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT | |
115 | bld r3, _PAGE_BIT_PRESENT | |
5f97f7f9 HS |
116 | brcc page_table_not_present |
117 | ||
5f97f7f9 | 118 | /* Translate to virtual address in P1. */ |
c0c3e816 HS |
119 | andl r3, 0xf000 |
120 | sbr r3, 31 | |
5f97f7f9 HS |
121 | |
122 | /* Second level lookup */ | |
c0c3e816 HS |
123 | ld.w r2, r3[r1 << 2] |
124 | mfsr r0, SYSREG_TLBARLO | |
125 | bld r2, _PAGE_BIT_PRESENT | |
5f97f7f9 HS |
126 | brcc page_not_present |
127 | ||
128 | /* Mark the page as accessed */ | |
c0c3e816 HS |
129 | sbr r2, _PAGE_BIT_ACCESSED |
130 | st.w r3[r1 << 2], r2 | |
5f97f7f9 HS |
131 | |
132 | /* Drop software flags */ | |
c0c3e816 HS |
133 | andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff |
134 | mtsr SYSREG_TLBELO, r2 | |
5f97f7f9 HS |
135 | |
136 | /* Figure out which entry we want to replace */ | |
c0c3e816 | 137 | mfsr r1, SYSREG_MMUCR |
5f97f7f9 HS |
138 | clz r2, r0 |
139 | brcc 1f | |
c0c3e816 HS |
140 | mov r3, -1 /* All entries have been accessed, */ |
141 | mov r2, 0 /* so start at 0 */ | |
142 | mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */ | |
5f97f7f9 | 143 | |
c0c3e816 HS |
144 | 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE |
145 | mtsr SYSREG_MMUCR, r1 | |
5f97f7f9 HS |
146 | tlbw |
147 | ||
148 | tlbmiss_restore | |
149 | rete | |
150 | ||
151 | handle_vmalloc_miss: | |
152 | /* Simply do the lookup in init's page table */ | |
c0c3e816 HS |
153 | mov r1, lo(swapper_pg_dir) |
154 | orh r1, hi(swapper_pg_dir) | |
5f97f7f9 HS |
155 | rjmp pgtbl_lookup |
156 | ||
157 | ||
158 | /* --- System Call --- */ | |
159 | ||
160 | .section .scall.text,"ax",@progbits | |
161 | system_call: | |
a7e30b8d PR |
162 | #ifdef CONFIG_PREEMPT |
163 | mask_interrupts | |
164 | #endif | |
5f97f7f9 HS |
165 | pushm r12 /* r12_orig */ |
166 | stmts --sp, r0-lr | |
a7e30b8d | 167 | |
5f97f7f9 HS |
168 | mfsr r0, SYSREG_RAR_SUP |
169 | mfsr r1, SYSREG_RSR_SUP | |
a7e30b8d PR |
170 | #ifdef CONFIG_PREEMPT |
171 | unmask_interrupts | |
172 | #endif | |
173 | zero_fp | |
5f97f7f9 HS |
174 | stm --sp, r0-r1 |
175 | ||
176 | /* check for syscall tracing */ | |
177 | get_thread_info r0 | |
178 | ld.w r1, r0[TI_flags] | |
179 | bld r1, TIF_SYSCALL_TRACE | |
180 | brcs syscall_trace_enter | |
181 | ||
182 | syscall_trace_cont: | |
183 | cp.w r8, NR_syscalls | |
184 | brhs syscall_badsys | |
185 | ||
186 | lddpc lr, syscall_table_addr | |
187 | ld.w lr, lr[r8 << 2] | |
188 | mov r8, r5 /* 5th argument (6th is pushed by stub) */ | |
189 | icall lr | |
190 | ||
191 | .global syscall_return | |
192 | syscall_return: | |
193 | get_thread_info r0 | |
194 | mask_interrupts /* make sure we don't miss an interrupt | |
195 | setting need_resched or sigpending | |
196 | between sampling and the rets */ | |
197 | ||
198 | /* Store the return value so that the correct value is loaded below */ | |
199 | stdsp sp[REG_R12], r12 | |
200 | ||
201 | ld.w r1, r0[TI_flags] | |
202 | andl r1, _TIF_ALLWORK_MASK, COH | |
203 | brne syscall_exit_work | |
204 | ||
205 | syscall_exit_cont: | |
206 | popm r8-r9 | |
207 | mtsr SYSREG_RAR_SUP, r8 | |
208 | mtsr SYSREG_RSR_SUP, r9 | |
209 | ldmts sp++, r0-lr | |
210 | sub sp, -4 /* r12_orig */ | |
211 | rets | |
212 | ||
213 | .align 2 | |
214 | syscall_table_addr: | |
215 | .long sys_call_table | |
216 | ||
217 | syscall_badsys: | |
218 | mov r12, -ENOSYS | |
219 | rjmp syscall_return | |
220 | ||
221 | .global ret_from_fork | |
222 | ret_from_fork: | |
223 | rcall schedule_tail | |
224 | ||
225 | /* check for syscall tracing */ | |
226 | get_thread_info r0 | |
227 | ld.w r1, r0[TI_flags] | |
228 | andl r1, _TIF_ALLWORK_MASK, COH | |
229 | brne syscall_exit_work | |
230 | rjmp syscall_exit_cont | |
231 | ||
232 | syscall_trace_enter: | |
233 | pushm r8-r12 | |
234 | rcall syscall_trace | |
235 | popm r8-r12 | |
236 | rjmp syscall_trace_cont | |
237 | ||
238 | syscall_exit_work: | |
239 | bld r1, TIF_SYSCALL_TRACE | |
240 | brcc 1f | |
241 | unmask_interrupts | |
242 | rcall syscall_trace | |
243 | mask_interrupts | |
244 | ld.w r1, r0[TI_flags] | |
245 | ||
246 | 1: bld r1, TIF_NEED_RESCHED | |
247 | brcc 2f | |
248 | unmask_interrupts | |
249 | rcall schedule | |
250 | mask_interrupts | |
251 | ld.w r1, r0[TI_flags] | |
252 | rjmp 1b | |
253 | ||
254 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | |
255 | tst r1, r2 | |
256 | breq 3f | |
257 | unmask_interrupts | |
258 | mov r12, sp | |
259 | mov r11, r0 | |
260 | rcall do_notify_resume | |
261 | mask_interrupts | |
262 | ld.w r1, r0[TI_flags] | |
263 | rjmp 1b | |
264 | ||
265 | 3: bld r1, TIF_BREAKPOINT | |
266 | brcc syscall_exit_cont | |
267 | mfsr r3, SYSREG_TLBEHI | |
268 | lddsp r2, sp[REG_PC] | |
269 | andl r3, 0xff, COH | |
270 | lsl r3, 1 | |
271 | sbr r3, 30 | |
272 | sbr r3, 0 | |
273 | mtdr DBGREG_BWA2A, r2 | |
274 | mtdr DBGREG_BWC2A, r3 | |
275 | rjmp syscall_exit_cont | |
276 | ||
277 | ||
278 | /* The slow path of the TLB miss handler */ | |
279 | page_table_not_present: | |
280 | page_not_present: | |
281 | tlbmiss_restore | |
282 | sub sp, 4 | |
283 | stmts --sp, r0-lr | |
284 | rcall save_full_context_ex | |
285 | mfsr r12, SYSREG_ECR | |
286 | mov r11, sp | |
287 | rcall do_page_fault | |
288 | rjmp ret_from_exception | |
289 | ||
290 | /* This function expects to find offending PC in SYSREG_RAR_EX */ | |
291 | save_full_context_ex: | |
292 | mfsr r8, SYSREG_RSR_EX | |
293 | mov r12, r8 | |
294 | andh r8, (MODE_MASK >> 16), COH | |
295 | mfsr r11, SYSREG_RAR_EX | |
296 | brne 2f | |
297 | ||
298 | 1: pushm r11, r12 /* PC and SR */ | |
299 | unmask_exceptions | |
300 | ret r12 | |
301 | ||
302 | 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR) | |
303 | stdsp sp[4], r10 /* replace saved SP */ | |
304 | rjmp 1b | |
305 | ||
306 | /* Low-level exception handlers */ | |
307 | handle_critical: | |
308 | pushm r12 | |
309 | pushm r0-r12 | |
310 | rcall save_full_context_ex | |
311 | mfsr r12, SYSREG_ECR | |
312 | mov r11, sp | |
313 | rcall do_critical_exception | |
314 | ||
315 | /* We should never get here... */ | |
316 | bad_return: | |
317 | sub r12, pc, (. - 1f) | |
318 | bral panic | |
319 | .align 2 | |
320 | 1: .asciz "Return from critical exception!" | |
321 | ||
322 | .align 1 | |
323 | do_bus_error_write: | |
324 | sub sp, 4 | |
325 | stmts --sp, r0-lr | |
326 | rcall save_full_context_ex | |
327 | mov r11, 1 | |
328 | rjmp 1f | |
329 | ||
330 | do_bus_error_read: | |
331 | sub sp, 4 | |
332 | stmts --sp, r0-lr | |
333 | rcall save_full_context_ex | |
334 | mov r11, 0 | |
335 | 1: mfsr r12, SYSREG_BEAR | |
336 | mov r10, sp | |
337 | rcall do_bus_error | |
338 | rjmp ret_from_exception | |
339 | ||
340 | .align 1 | |
341 | do_nmi_ll: | |
342 | sub sp, 4 | |
343 | stmts --sp, r0-lr | |
92b728c1 HS |
344 | mfsr r9, SYSREG_RSR_NMI |
345 | mfsr r8, SYSREG_RAR_NMI | |
346 | bfextu r0, r9, MODE_SHIFT, 3 | |
347 | brne 2f | |
348 | ||
349 | 1: pushm r8, r9 /* PC and SR */ | |
5f97f7f9 HS |
350 | mfsr r12, SYSREG_ECR |
351 | mov r11, sp | |
352 | rcall do_nmi | |
92b728c1 HS |
353 | popm r8-r9 |
354 | mtsr SYSREG_RAR_NMI, r8 | |
355 | tst r0, r0 | |
356 | mtsr SYSREG_RSR_NMI, r9 | |
357 | brne 3f | |
358 | ||
359 | ldmts sp++, r0-lr | |
360 | sub sp, -4 /* skip r12_orig */ | |
361 | rete | |
362 | ||
363 | 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR) | |
364 | stdsp sp[4], r10 /* replace saved SP */ | |
365 | rjmp 1b | |
366 | ||
367 | 3: popm lr | |
368 | sub sp, -4 /* skip sp */ | |
369 | popm r0-r12 | |
370 | sub sp, -4 /* skip r12_orig */ | |
371 | rete | |
5f97f7f9 HS |
372 | |
373 | handle_address_fault: | |
374 | sub sp, 4 | |
375 | stmts --sp, r0-lr | |
376 | rcall save_full_context_ex | |
377 | mfsr r12, SYSREG_ECR | |
378 | mov r11, sp | |
379 | rcall do_address_exception | |
380 | rjmp ret_from_exception | |
381 | ||
382 | handle_protection_fault: | |
383 | sub sp, 4 | |
384 | stmts --sp, r0-lr | |
385 | rcall save_full_context_ex | |
386 | mfsr r12, SYSREG_ECR | |
387 | mov r11, sp | |
388 | rcall do_page_fault | |
389 | rjmp ret_from_exception | |
390 | ||
391 | .align 1 | |
392 | do_illegal_opcode_ll: | |
393 | sub sp, 4 | |
394 | stmts --sp, r0-lr | |
395 | rcall save_full_context_ex | |
396 | mfsr r12, SYSREG_ECR | |
397 | mov r11, sp | |
398 | rcall do_illegal_opcode | |
399 | rjmp ret_from_exception | |
400 | ||
401 | do_dtlb_modified: | |
402 | pushm r0-r3 | |
403 | mfsr r1, SYSREG_TLBEAR | |
404 | mfsr r0, SYSREG_PTBR | |
405 | lsr r2, r1, PGDIR_SHIFT | |
406 | ld.w r0, r0[r2 << 2] | |
407 | lsl r1, (32 - PGDIR_SHIFT) | |
408 | lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT | |
409 | ||
410 | /* Translate to virtual address in P1 */ | |
411 | andl r0, 0xf000 | |
412 | sbr r0, 31 | |
413 | add r2, r0, r1 << 2 | |
414 | ld.w r3, r2[0] | |
415 | sbr r3, _PAGE_BIT_DIRTY | |
416 | mov r0, r3 | |
417 | st.w r2[0], r3 | |
418 | ||
419 | /* The page table is up-to-date. Update the TLB entry as well */ | |
420 | andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK) | |
421 | mtsr SYSREG_TLBELO, r0 | |
422 | ||
423 | /* MMUCR[DRP] is updated automatically, so let's go... */ | |
424 | tlbw | |
425 | ||
426 | popm r0-r3 | |
427 | rete | |
428 | ||
429 | do_fpe_ll: | |
430 | sub sp, 4 | |
431 | stmts --sp, r0-lr | |
432 | rcall save_full_context_ex | |
433 | unmask_interrupts | |
434 | mov r12, 26 | |
435 | mov r11, sp | |
436 | rcall do_fpe | |
437 | rjmp ret_from_exception | |
438 | ||
439 | ret_from_exception: | |
440 | mask_interrupts | |
441 | lddsp r4, sp[REG_SR] | |
442 | andh r4, (MODE_MASK >> 16), COH | |
443 | brne fault_resume_kernel | |
444 | ||
445 | get_thread_info r0 | |
446 | ld.w r1, r0[TI_flags] | |
447 | andl r1, _TIF_WORK_MASK, COH | |
448 | brne fault_exit_work | |
449 | ||
450 | fault_resume_user: | |
451 | popm r8-r9 | |
452 | mask_exceptions | |
453 | mtsr SYSREG_RAR_EX, r8 | |
454 | mtsr SYSREG_RSR_EX, r9 | |
455 | ldmts sp++, r0-lr | |
456 | sub sp, -4 | |
457 | rete | |
458 | ||
459 | fault_resume_kernel: | |
460 | #ifdef CONFIG_PREEMPT | |
461 | get_thread_info r0 | |
462 | ld.w r2, r0[TI_preempt_count] | |
463 | cp.w r2, 0 | |
464 | brne 1f | |
465 | ld.w r1, r0[TI_flags] | |
466 | bld r1, TIF_NEED_RESCHED | |
467 | brcc 1f | |
468 | lddsp r4, sp[REG_SR] | |
469 | bld r4, SYSREG_GM_OFFSET | |
470 | brcs 1f | |
471 | rcall preempt_schedule_irq | |
472 | 1: | |
473 | #endif | |
474 | ||
475 | popm r8-r9 | |
476 | mask_exceptions | |
477 | mfsr r1, SYSREG_SR | |
478 | mtsr SYSREG_RAR_EX, r8 | |
479 | mtsr SYSREG_RSR_EX, r9 | |
480 | popm lr | |
481 | sub sp, -4 /* ignore SP */ | |
482 | popm r0-r12 | |
483 | sub sp, -4 /* ignore r12_orig */ | |
484 | rete | |
485 | ||
486 | irq_exit_work: | |
487 | /* Switch to exception mode so that we can share the same code. */ | |
488 | mfsr r8, SYSREG_SR | |
489 | cbr r8, SYSREG_M0_OFFSET | |
490 | orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2)) | |
491 | mtsr SYSREG_SR, r8 | |
492 | sub pc, -2 | |
493 | get_thread_info r0 | |
494 | ld.w r1, r0[TI_flags] | |
495 | ||
496 | fault_exit_work: | |
497 | bld r1, TIF_NEED_RESCHED | |
498 | brcc 1f | |
499 | unmask_interrupts | |
500 | rcall schedule | |
501 | mask_interrupts | |
502 | ld.w r1, r0[TI_flags] | |
503 | rjmp fault_exit_work | |
504 | ||
505 | 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | |
506 | tst r1, r2 | |
507 | breq 2f | |
508 | unmask_interrupts | |
509 | mov r12, sp | |
510 | mov r11, r0 | |
511 | rcall do_notify_resume | |
512 | mask_interrupts | |
513 | ld.w r1, r0[TI_flags] | |
514 | rjmp fault_exit_work | |
515 | ||
516 | 2: bld r1, TIF_BREAKPOINT | |
517 | brcc fault_resume_user | |
518 | mfsr r3, SYSREG_TLBEHI | |
519 | lddsp r2, sp[REG_PC] | |
520 | andl r3, 0xff, COH | |
521 | lsl r3, 1 | |
522 | sbr r3, 30 | |
523 | sbr r3, 0 | |
524 | mtdr DBGREG_BWA2A, r2 | |
525 | mtdr DBGREG_BWC2A, r3 | |
526 | rjmp fault_resume_user | |
527 | ||
528 | /* If we get a debug trap from privileged context we end up here */ | |
529 | handle_debug_priv: | |
530 | /* Fix up LR and SP in regs. r11 contains the mode we came from */ | |
531 | mfsr r8, SYSREG_SR | |
532 | mov r9, r8 | |
533 | andh r8, hi(~MODE_MASK) | |
534 | or r8, r11 | |
535 | mtsr SYSREG_SR, r8 | |
536 | sub pc, -2 | |
537 | stdsp sp[REG_LR], lr | |
538 | mtsr SYSREG_SR, r9 | |
539 | sub pc, -2 | |
540 | sub r10, sp, -FRAME_SIZE_FULL | |
541 | stdsp sp[REG_SP], r10 | |
542 | mov r12, sp | |
543 | rcall do_debug_priv | |
544 | ||
545 | /* Now, put everything back */ | |
546 | ssrf SR_EM_BIT | |
547 | popm r10, r11 | |
548 | mtsr SYSREG_RAR_DBG, r10 | |
549 | mtsr SYSREG_RSR_DBG, r11 | |
550 | mfsr r8, SYSREG_SR | |
551 | mov r9, r8 | |
552 | andh r8, hi(~MODE_MASK) | |
553 | andh r11, hi(MODE_MASK) | |
554 | or r8, r11 | |
555 | mtsr SYSREG_SR, r8 | |
556 | sub pc, -2 | |
557 | popm lr | |
558 | mtsr SYSREG_SR, r9 | |
559 | sub pc, -2 | |
560 | sub sp, -4 /* skip SP */ | |
561 | popm r0-r12 | |
562 | sub sp, -4 | |
563 | retd | |
564 | ||
565 | /* | |
566 | * At this point, everything is masked, that is, interrupts, | |
567 | * exceptions and debugging traps. We might get called from | |
568 | * interrupt or exception context in some rare cases, but this | |
569 | * will be taken care of by do_debug(), so we're not going to | |
570 | * do a 100% correct context save here. | |
571 | */ | |
572 | handle_debug: | |
573 | sub sp, 4 /* r12_orig */ | |
574 | stmts --sp, r0-lr | |
575 | mfsr r10, SYSREG_RAR_DBG | |
576 | mfsr r11, SYSREG_RSR_DBG | |
577 | unmask_exceptions | |
578 | pushm r10,r11 | |
579 | andh r11, (MODE_MASK >> 16), COH | |
580 | brne handle_debug_priv | |
581 | ||
582 | mov r12, sp | |
583 | rcall do_debug | |
584 | ||
585 | lddsp r10, sp[REG_SR] | |
586 | andh r10, (MODE_MASK >> 16), COH | |
587 | breq debug_resume_user | |
588 | ||
589 | debug_restore_all: | |
590 | popm r10,r11 | |
591 | mask_exceptions | |
592 | mtsr SYSREG_RSR_DBG, r11 | |
593 | mtsr SYSREG_RAR_DBG, r10 | |
594 | ldmts sp++, r0-lr | |
595 | sub sp, -4 | |
596 | retd | |
597 | ||
598 | debug_resume_user: | |
599 | get_thread_info r0 | |
600 | mask_interrupts | |
601 | ||
602 | ld.w r1, r0[TI_flags] | |
603 | andl r1, _TIF_DBGWORK_MASK, COH | |
604 | breq debug_restore_all | |
605 | ||
606 | 1: bld r1, TIF_NEED_RESCHED | |
607 | brcc 2f | |
608 | unmask_interrupts | |
609 | rcall schedule | |
610 | mask_interrupts | |
611 | ld.w r1, r0[TI_flags] | |
612 | rjmp 1b | |
613 | ||
614 | 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | |
615 | tst r1, r2 | |
616 | breq 3f | |
617 | unmask_interrupts | |
618 | mov r12, sp | |
619 | mov r11, r0 | |
620 | rcall do_notify_resume | |
621 | mask_interrupts | |
622 | ld.w r1, r0[TI_flags] | |
623 | rjmp 1b | |
624 | ||
625 | 3: bld r1, TIF_SINGLE_STEP | |
626 | brcc debug_restore_all | |
627 | mfdr r2, DBGREG_DC | |
628 | sbr r2, DC_SS_BIT | |
629 | mtdr DBGREG_DC, r2 | |
630 | rjmp debug_restore_all | |
631 | ||
632 | .set rsr_int0, SYSREG_RSR_INT0 | |
633 | .set rsr_int1, SYSREG_RSR_INT1 | |
634 | .set rsr_int2, SYSREG_RSR_INT2 | |
635 | .set rsr_int3, SYSREG_RSR_INT3 | |
636 | .set rar_int0, SYSREG_RAR_INT0 | |
637 | .set rar_int1, SYSREG_RAR_INT1 | |
638 | .set rar_int2, SYSREG_RAR_INT2 | |
639 | .set rar_int3, SYSREG_RAR_INT3 | |
640 | ||
641 | .macro IRQ_LEVEL level | |
642 | .type irq_level\level, @function | |
643 | irq_level\level: | |
644 | sub sp, 4 /* r12_orig */ | |
645 | stmts --sp,r0-lr | |
646 | mfsr r8, rar_int\level | |
647 | mfsr r9, rsr_int\level | |
a7e30b8d PR |
648 | |
649 | #ifdef CONFIG_PREEMPT | |
650 | sub r11, pc, (. - system_call) | |
651 | cp.w r11, r8 | |
652 | breq 4f | |
653 | #endif | |
654 | ||
5f97f7f9 HS |
655 | pushm r8-r9 |
656 | ||
657 | mov r11, sp | |
658 | mov r12, \level | |
659 | ||
660 | rcall do_IRQ | |
661 | ||
662 | lddsp r4, sp[REG_SR] | |
19b7ce8b HCE |
663 | bfextu r4, r4, SYSREG_M0_OFFSET, 3 |
664 | cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET | |
665 | breq 2f | |
666 | cp.w r4, MODE_USER >> SYSREG_M0_OFFSET | |
5f97f7f9 | 667 | #ifdef CONFIG_PREEMPT |
19b7ce8b | 668 | brne 3f |
5f97f7f9 HS |
669 | #else |
670 | brne 1f | |
671 | #endif | |
672 | ||
673 | get_thread_info r0 | |
674 | ld.w r1, r0[TI_flags] | |
675 | andl r1, _TIF_WORK_MASK, COH | |
676 | brne irq_exit_work | |
677 | ||
678 | 1: popm r8-r9 | |
679 | mtsr rar_int\level, r8 | |
680 | mtsr rsr_int\level, r9 | |
681 | ldmts sp++,r0-lr | |
682 | sub sp, -4 /* ignore r12_orig */ | |
683 | rete | |
684 | ||
a7e30b8d PR |
685 | #ifdef CONFIG_PREEMPT |
686 | 4: mask_interrupts | |
687 | mfsr r8, rsr_int\level | |
688 | sbr r8, 16 | |
689 | mtsr rsr_int\level, r8 | |
690 | ldmts sp++, r0-lr | |
691 | sub sp, -4 /* ignore r12_orig */ | |
692 | rete | |
693 | #endif | |
694 | ||
19b7ce8b HCE |
695 | 2: get_thread_info r0 |
696 | ld.w r1, r0[TI_flags] | |
697 | bld r1, TIF_CPU_GOING_TO_SLEEP | |
5f97f7f9 | 698 | #ifdef CONFIG_PREEMPT |
19b7ce8b HCE |
699 | brcc 3f |
700 | #else | |
701 | brcc 1b | |
702 | #endif | |
703 | sub r1, pc, . - cpu_idle_skip_sleep | |
704 | stdsp sp[REG_PC], r1 | |
705 | #ifdef CONFIG_PREEMPT | |
706 | 3: get_thread_info r0 | |
5f97f7f9 HS |
707 | ld.w r2, r0[TI_preempt_count] |
708 | cp.w r2, 0 | |
709 | brne 1b | |
710 | ld.w r1, r0[TI_flags] | |
711 | bld r1, TIF_NEED_RESCHED | |
712 | brcc 1b | |
713 | lddsp r4, sp[REG_SR] | |
714 | bld r4, SYSREG_GM_OFFSET | |
715 | brcs 1b | |
716 | rcall preempt_schedule_irq | |
5f97f7f9 | 717 | #endif |
19b7ce8b | 718 | rjmp 1b |
5f97f7f9 HS |
719 | .endm |
720 | ||
721 | .section .irq.text,"ax",@progbits | |
722 | ||
19b7ce8b HCE |
723 | .global cpu_idle_sleep |
724 | cpu_idle_sleep: | |
725 | mask_interrupts | |
726 | get_thread_info r8 | |
727 | ld.w r9, r8[TI_flags] | |
728 | bld r9, TIF_NEED_RESCHED | |
729 | brcs cpu_idle_enable_int_and_exit | |
730 | sbr r9, TIF_CPU_GOING_TO_SLEEP | |
731 | st.w r8[TI_flags], r9 | |
732 | unmask_interrupts | |
733 | sleep 0 | |
734 | cpu_idle_skip_sleep: | |
735 | mask_interrupts | |
736 | ld.w r9, r8[TI_flags] | |
737 | cbr r9, TIF_CPU_GOING_TO_SLEEP | |
738 | st.w r8[TI_flags], r9 | |
739 | cpu_idle_enable_int_and_exit: | |
740 | unmask_interrupts | |
741 | retal r12 | |
742 | ||
5f97f7f9 HS |
743 | .global irq_level0 |
744 | .global irq_level1 | |
745 | .global irq_level2 | |
746 | .global irq_level3 | |
747 | IRQ_LEVEL 0 | |
748 | IRQ_LEVEL 1 | |
749 | IRQ_LEVEL 2 | |
750 | IRQ_LEVEL 3 |