tile/jump_label: add jump label support for TILE-Gx
[deliverable/linux.git] / arch / tile / kernel / intvec_32.S
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Linux interrupt vectors.
15 */
16
17#include <linux/linkage.h>
18#include <linux/errno.h>
19#include <linux/init.h>
9f9c0382 20#include <linux/unistd.h>
867e359b
CM
21#include <asm/ptrace.h>
22#include <asm/thread_info.h>
867e359b 23#include <asm/irqflags.h>
d52104b2 24#include <asm/atomic_32.h>
867e359b
CM
25#include <asm/asm-offsets.h>
26#include <hv/hypervisor.h>
27#include <arch/abi.h>
28#include <arch/interrupts.h>
29#include <arch/spr_def.h>
30
867e359b
CM
31#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
32
33#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
34
867e359b
CM
35 .macro push_reg reg, ptr=sp, delta=-4
36 {
37 sw \ptr, \reg
38 addli \ptr, \ptr, \delta
39 }
40 .endm
41
42 .macro pop_reg reg, ptr=sp, delta=4
43 {
44 lw \reg, \ptr
45 addli \ptr, \ptr, \delta
46 }
47 .endm
48
49 .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
50 {
51 move \zreg, zero
52 lw \reg, \ptr
53 addi \ptr, \ptr, \delta
54 }
55 .endm
56
57 .macro push_extra_callee_saves reg
58 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
59 push_reg r51, \reg
60 push_reg r50, \reg
61 push_reg r49, \reg
62 push_reg r48, \reg
63 push_reg r47, \reg
64 push_reg r46, \reg
65 push_reg r45, \reg
66 push_reg r44, \reg
67 push_reg r43, \reg
68 push_reg r42, \reg
69 push_reg r41, \reg
70 push_reg r40, \reg
71 push_reg r39, \reg
72 push_reg r38, \reg
73 push_reg r37, \reg
74 push_reg r36, \reg
75 push_reg r35, \reg
76 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
77 .endm
78
79 .macro panic str
80 .pushsection .rodata, "a"
811:
82 .asciz "\str"
83 .popsection
84 {
85 moveli r0, lo16(1b)
86 }
87 {
88 auli r0, r0, ha16(1b)
89 jal panic
90 }
91 .endm
92
93#ifdef __COLLECT_LINKER_FEEDBACK__
94 .pushsection .text.intvec_feedback,"ax"
95intvec_feedback:
96 .popsection
97#endif
98
99 /*
100 * Default interrupt handler.
101 *
102 * vecnum is where we'll put this code.
103 * c_routine is the C routine we'll call.
104 *
105 * The C routine is passed two arguments:
106 * - A pointer to the pt_regs state.
107 * - The interrupt vector number.
108 *
109 * The "processing" argument specifies the code for processing
110 * the interrupt. Defaults to "handle_interrupt".
111 */
112 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
113 .org (\vecnum << 8)
114intvec_\vecname:
115 .ifc \vecnum, INT_SWINT_1
116 blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
117 .endif
118
119 /* Temporarily save a register so we have somewhere to work. */
120
a78c942d
CM
121 mtspr SPR_SYSTEM_SAVE_K_1, r0
122 mfspr r0, SPR_EX_CONTEXT_K_1
867e359b
CM
123
124 /* The cmpxchg code clears sp to force us to reset it here on fault. */
125 {
126 bz sp, 2f
127 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
128 }
129
130 .ifc \vecnum, INT_DOUBLE_FAULT
131 /*
132 * For double-faults from user-space, fall through to the normal
133 * register save and stack setup path. Otherwise, it's the
134 * hypervisor giving us one last chance to dump diagnostics, and we
135 * branch to the kernel_double_fault routine to do so.
136 */
137 bz r0, 1f
138 j _kernel_double_fault
1391:
140 .else
141 /*
142 * If we're coming from user-space, then set sp to the top of
143 * the kernel stack. Otherwise, assume sp is already valid.
144 */
145 {
146 bnz r0, 0f
147 move r0, sp
148 }
149 .endif
150
151 .ifc \c_routine, do_page_fault
152 /*
153 * The page_fault handler may be downcalled directly by the
154 * hypervisor even when Linux is running and has ICS set.
155 *
a78c942d 156 * In this case the contents of EX_CONTEXT_K_1 reflect the
867e359b
CM
157 * previous fault and can't be relied on to choose whether or
158 * not to reinitialize the stack pointer. So we add a test
a78c942d 159 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
867e359b
CM
160 * and if so we don't reinitialize sp, since we must be coming
161 * from Linux. (In fact the precise case is !(val & ~1),
162 * but any Linux PC has to have the high bit set.)
163 *
a78c942d 164 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
867e359b
CM
165 * any path that turns into a downcall to one of our TLB handlers.
166 */
a78c942d 167 mfspr r0, SPR_SYSTEM_SAVE_K_2
867e359b
CM
168 {
169 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
170 move r0, sp
171 }
172 .endif
173
1742:
175 /*
a78c942d 176 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
867e359b
CM
177 * the current stack top in the higher bits. So we recover
178 * our stack top by just masking off the low bits, then
179 * point sp at the top aligned address on the actual stack page.
180 */
a78c942d 181 mfspr r0, SPR_SYSTEM_SAVE_K_0
35f05976 182 mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
867e359b
CM
183
1840:
185 /*
186 * Align the stack mod 64 so we can properly predict what
187 * cache lines we need to write-hint to reduce memory fetch
188 * latency as we enter the kernel. The layout of memory is
189 * as follows, with cache line 0 at the lowest VA, and cache
190 * line 4 just below the r0 value this "andi" computes.
191 * Note that we never write to cache line 4, and we skip
192 * cache line 1 for syscalls.
193 *
194 * cache line 4: ptregs padding (two words)
195 * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
196 * cache line 2: r30...r45
197 * cache line 1: r14...r29
198 * cache line 0: 2 x frame, r0..r13
199 */
35f05976
CM
200#if STACK_TOP_DELTA != 64
201#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
202#endif
867e359b
CM
203 andi r0, r0, -64
204
205 /*
206 * Push the first four registers on the stack, so that we can set
207 * them to vector-unique values before we jump to the common code.
208 *
209 * Registers are pushed on the stack as a struct pt_regs,
210 * with the sp initially just above the struct, and when we're
211 * done, sp points to the base of the struct, minus
212 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
213 *
214 * This routine saves just the first four registers, plus the
215 * stack context so we can do proper backtracing right away,
216 * and defers to handle_interrupt to save the rest.
217 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
218 */
219 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
220 wh64 r0 /* cache line 3 */
221 {
222 sw r0, lr
223 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
224 }
225 {
226 sw r0, sp
227 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
228 }
229 {
230 sw sp, r52
231 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
232 }
233 wh64 sp /* cache line 0 */
234 {
235 sw sp, r1
236 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
237 }
238 {
239 sw sp, r2
240 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
241 }
242 {
243 sw sp, r3
244 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
245 }
a78c942d 246 mfspr r0, SPR_EX_CONTEXT_K_0
867e359b
CM
247 .ifc \processing,handle_syscall
248 /*
249 * Bump the saved PC by one bundle so that when we return, we won't
250 * execute the same swint instruction again. We need to do this while
251 * we're in the critical section.
252 */
253 addi r0, r0, 8
254 .endif
255 {
256 sw sp, r0
257 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
258 }
a78c942d 259 mfspr r0, SPR_EX_CONTEXT_K_1
867e359b
CM
260 {
261 sw sp, r0
262 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
263 /*
264 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
265 * so that it gets passed through unchanged to the handler routine.
266 * Note that the .if conditional confusingly spans bundles.
267 */
268 .ifc \processing,handle_syscall
269 movei r0, \vecnum
270 }
271 {
272 sw sp, r0
273 .else
274 movei r1, \vecnum
275 }
276 {
277 sw sp, r1
278 .endif
279 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
280 }
a78c942d 281 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
867e359b
CM
282 {
283 sw sp, r0
284 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
285 }
286 {
287 sw sp, zero /* write zero into "Next SP" frame pointer */
288 addi sp, sp, -4 /* leave SP pointing at bottom of frame */
289 }
290 .ifc \processing,handle_syscall
291 j handle_syscall
292 .else
293 /*
294 * Capture per-interrupt SPR context to registers.
295 * We overload the meaning of r3 on this path such that if its bit 31
296 * is set, we have to mask all interrupts including NMIs before
297 * clearing the interrupt critical section bit.
298 * See discussion below at "finish_interrupt_save".
299 */
300 .ifc \c_routine, do_page_fault
a78c942d
CM
301 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
302 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
867e359b
CM
303 .else
304 .ifc \vecnum, INT_DOUBLE_FAULT
305 {
a78c942d 306 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
867e359b
CM
307 movei r3, 0
308 }
309 .else
310 .ifc \c_routine, do_trap
311 {
312 mfspr r2, GPV_REASON
313 movei r3, 0
314 }
315 .else
8e3441eb 316 .ifc \c_routine, handle_perf_interrupt
867e359b
CM
317 {
318 mfspr r2, PERF_COUNT_STS
319 movei r3, -1 /* not used, but set for consistency */
320 }
321 .else
8e3441eb 322 .ifc \c_routine, handle_perf_interrupt
867e359b
CM
323 {
324 mfspr r2, AUX_PERF_COUNT_STS
325 movei r3, -1 /* not used, but set for consistency */
326 }
327 .else
867e359b 328 movei r3, 0
867e359b 329 .endif
867e359b
CM
330 .endif
331 .endif
332 .endif
333 .endif
334 /* Put function pointer in r0 */
335 moveli r0, lo16(\c_routine)
336 {
337 auli r0, r0, ha16(\c_routine)
338 j \processing
339 }
340 .endif
341 ENDPROC(intvec_\vecname)
342
343#ifdef __COLLECT_LINKER_FEEDBACK__
344 .pushsection .text.intvec_feedback,"ax"
345 .org (\vecnum << 5)
acbde1db 346 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
867e359b
CM
347 jrp lr
348 .popsection
349#endif
350
351 .endm
352
353
354 /*
355 * Save the rest of the registers that we didn't save in the actual
356 * vector itself. We can't use r0-r10 inclusive here.
357 */
358 .macro finish_interrupt_save, function
359
360 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
361 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
362 {
363 .ifc \function,handle_syscall
364 sw r52, r0
365 .else
366 sw r52, zero
367 .endif
368 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
369 }
370
371 /*
372 * For ordinary syscalls, we save neither caller- nor callee-
373 * save registers, since the syscall invoker doesn't expect the
374 * caller-saves to be saved, and the called kernel functions will
375 * take care of saving the callee-saves for us.
376 *
377 * For interrupts we save just the caller-save registers. Saving
378 * them is required (since the "caller" can't save them). Again,
379 * the called kernel functions will restore the callee-save
380 * registers for us appropriately.
381 *
382 * On return, we normally restore nothing special for syscalls,
383 * and just the caller-save registers for interrupts.
384 *
385 * However, there are some important caveats to all this:
386 *
387 * - We always save a few callee-save registers to give us
388 * some scratchpad registers to carry across function calls.
389 *
390 * - fork/vfork/etc require us to save all the callee-save
391 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
392 *
393 * - We always save r0..r5 and r10 for syscalls, since we need
394 * to reload them a bit later for the actual kernel call, and
395 * since we might need them for -ERESTARTNOINTR, etc.
396 *
397 * - Before invoking a signal handler, we save the unsaved
398 * callee-save registers so they are visible to the
399 * signal handler or any ptracer.
400 *
401 * - If the unsaved callee-save registers are modified, we set
402 * a bit in pt_regs so we know to reload them from pt_regs
403 * and not just rely on the kernel function unwinding.
404 * (Done for ptrace register writes and SA_SIGINFO handler.)
405 */
406 {
407 sw r52, tp
408 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
409 }
410 wh64 r52 /* cache line 2 */
411 push_reg r33, r52
412 push_reg r32, r52
413 push_reg r31, r52
414 .ifc \function,handle_syscall
415 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
416 push_reg TREG_SYSCALL_NR_NAME, r52, \
417 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
418 .else
419
420 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
421 wh64 r52 /* cache line 1 */
422 push_reg r29, r52
423 push_reg r28, r52
424 push_reg r27, r52
425 push_reg r26, r52
426 push_reg r25, r52
427 push_reg r24, r52
428 push_reg r23, r52
429 push_reg r22, r52
430 push_reg r21, r52
431 push_reg r20, r52
432 push_reg r19, r52
433 push_reg r18, r52
434 push_reg r17, r52
435 push_reg r16, r52
436 push_reg r15, r52
437 push_reg r14, r52
438 push_reg r13, r52
439 push_reg r12, r52
440 push_reg r11, r52
441 push_reg r10, r52
442 push_reg r9, r52
443 push_reg r8, r52
444 push_reg r7, r52
445 push_reg r6, r52
446
447 .endif
448
449 push_reg r5, r52
450 sw r52, r4
451
452 /* Load tp with our per-cpu offset. */
453#ifdef CONFIG_SMP
454 {
a78c942d 455 mfspr r20, SPR_SYSTEM_SAVE_K_0
867e359b
CM
456 moveli r21, lo16(__per_cpu_offset)
457 }
458 {
459 auli r21, r21, ha16(__per_cpu_offset)
35f05976 460 mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
867e359b
CM
461 }
462 s2a r20, r20, r21
463 lw tp, r20
464#else
465 move tp, zero
466#endif
467
468 /*
469 * If we will be returning to the kernel, we will need to
470 * reset the interrupt masks to the state they had before.
471 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
472 * We load flags in r32 here so we can jump to .Lrestore_regs
473 * directly after do_page_fault_ics() if necessary.
474 */
a78c942d 475 mfspr r32, SPR_EX_CONTEXT_K_1
867e359b
CM
476 {
477 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
478 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
479 }
480 bzt r32, 1f /* zero if from user space */
481 IRQS_DISABLED(r32) /* zero if irqs enabled */
482#if PT_FLAGS_DISABLE_IRQ != 1
483# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
484#endif
4851:
486 .ifnc \function,handle_syscall
487 /* Record the fact that we saved the caller-save registers above. */
488 ori r32, r32, PT_FLAGS_CALLER_SAVES
489 .endif
490 sw r21, r32
491
492#ifdef __COLLECT_LINKER_FEEDBACK__
493 /*
494 * Notify the feedback routines that we were in the
495 * appropriate fixed interrupt vector area. Note that we
496 * still have ICS set at this point, so we can't invoke any
497 * atomic operations or we will panic. The feedback
498 * routines internally preserve r0..r10 and r30 up.
499 */
500 .ifnc \function,handle_syscall
501 shli r20, r1, 5
502 .else
503 moveli r20, INT_SWINT_1 << 5
504 .endif
505 addli r20, r20, lo16(intvec_feedback)
506 auli r20, r20, ha16(intvec_feedback)
507 jalr r20
508
509 /* And now notify the feedback routines that we are here. */
510 FEEDBACK_ENTER(\function)
511#endif
512
513 /*
514 * we've captured enough state to the stack (including in
515 * particular our EX_CONTEXT state) that we can now release
516 * the interrupt critical section and replace it with our
517 * standard "interrupts disabled" mask value. This allows
518 * synchronous interrupts (and profile interrupts) to punch
519 * through from this point onwards.
520 *
521 * If bit 31 of r3 is set during a non-NMI interrupt, we know we
522 * are on the path where the hypervisor has punched through our
523 * ICS with a page fault, so we call out to do_page_fault_ics()
524 * to figure out what to do with it. If the fault was in
525 * an atomic op, we unlock the atomic lock, adjust the
526 * saved register state a little, and return "zero" in r4,
527 * falling through into the normal page-fault interrupt code.
528 * If the fault was in a kernel-space atomic operation, then
529 * do_page_fault_ics() resolves it itself, returns "one" in r4,
530 * and as a result goes directly to restoring registers and iret,
531 * without trying to adjust the interrupt masks at all.
532 * The do_page_fault_ics() API involves passing and returning
533 * a five-word struct (in registers) to avoid writing the
534 * save and restore code here.
535 */
536 .ifc \function,handle_nmi
537 IRQ_DISABLE_ALL(r20)
538 .else
539 .ifnc \function,handle_syscall
540 bgezt r3, 1f
541 {
542 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
543 jal do_page_fault_ics
544 }
545 FEEDBACK_REENTER(\function)
546 bzt r4, 1f
547 j .Lrestore_regs
5481:
549 .endif
550 IRQ_DISABLE(r20, r21)
551 .endif
552 mtspr INTERRUPT_CRITICAL_SECTION, zero
553
867e359b
CM
554 /*
555 * Prepare the first 256 stack bytes to be rapidly accessible
556 * without having to fetch the background data. We don't really
557 * know how far to write-hint, but kernel stacks generally
558 * aren't that big, and write-hinting here does take some time.
559 */
560 addi r52, sp, -64
561 {
562 wh64 r52
563 addi r52, r52, -64
564 }
565 {
566 wh64 r52
567 addi r52, r52, -64
568 }
569 {
570 wh64 r52
571 addi r52, r52, -64
572 }
573 wh64 r52
867e359b
CM
574
575#ifdef CONFIG_TRACE_IRQFLAGS
576 .ifnc \function,handle_nmi
577 /*
578 * We finally have enough state set up to notify the irq
579 * tracing code that irqs were disabled on entry to the handler.
580 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
581 * For syscalls, we already have the register state saved away
582 * on the stack, so we don't bother to do any register saves here,
583 * and later we pop the registers back off the kernel stack.
584 * For interrupt handlers, save r0-r3 in callee-saved registers.
585 */
586 .ifnc \function,handle_syscall
587 { move r30, r0; move r31, r1 }
588 { move r32, r2; move r33, r3 }
589 .endif
590 TRACE_IRQS_OFF
591 .ifnc \function,handle_syscall
592 { move r0, r30; move r1, r31 }
593 { move r2, r32; move r3, r33 }
594 .endif
595 .endif
596#endif
597
598 .endm
599
600 .macro check_single_stepping, kind, not_single_stepping
601 /*
602 * Check for single stepping in user-level priv
603 * kind can be "normal", "ill", or "syscall"
604 * At end, if fall-thru
605 * r29: thread_info->step_state
606 * r28: &pt_regs->pc
607 * r27: pt_regs->pc
608 * r26: thread_info->step_state->buffer
609 */
610
611 /* Check for single stepping */
612 GET_THREAD_INFO(r29)
613 {
614 /* Get pointer to field holding step state */
615 addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
616
617 /* Get pointer to EX1 in register state */
618 PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
619 }
620 {
621 /* Get pointer to field holding PC */
622 PTREGS_PTR(r28, PTREGS_OFFSET_PC)
623
624 /* Load the pointer to the step state */
625 lw r29, r29
626 }
627 /* Load EX1 */
628 lw r27, r27
629 {
630 /* Points to flags */
631 addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
632
633 /* No single stepping if there is no step state structure */
634 bzt r29, \not_single_stepping
635 }
636 {
637 /* mask off ICS and any other high bits */
638 andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
639
640 /* Load pointer to single step instruction buffer */
641 lw r26, r29
642 }
643 /* Check priv state */
644 bnz r27, \not_single_stepping
645
646 /* Get flags */
647 lw r22, r23
648 {
649 /* Branch if single-step mode not enabled */
650 bbnst r22, \not_single_stepping
651
652 /* Clear enabled flag */
653 andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
654 }
655 .ifc \kind,normal
656 {
657 /* Load PC */
658 lw r27, r28
659
660 /* Point to the entry containing the original PC */
661 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
662 }
663 {
664 /* Disable single stepping flag */
665 sw r23, r22
666 }
667 {
668 /* Get the original pc */
669 lw r24, r24
670
671 /* See if the PC is at the start of the single step buffer */
672 seq r25, r26, r27
673 }
674 /*
675 * NOTE: it is really expected that the PC be in the single step buffer
676 * at this point
677 */
678 bzt r25, \not_single_stepping
679
680 /* Restore the original PC */
681 sw r28, r24
682 .else
683 .ifc \kind,syscall
684 {
685 /* Load PC */
686 lw r27, r28
687
688 /* Point to the entry containing the next PC */
689 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
690 }
691 {
692 /* Increment the stopped PC by the bundle size */
693 addi r26, r26, 8
694
695 /* Disable single stepping flag */
696 sw r23, r22
697 }
698 {
699 /* Get the next pc */
700 lw r24, r24
701
702 /*
703 * See if the PC is one bundle past the start of the
704 * single step buffer
705 */
706 seq r25, r26, r27
707 }
708 {
709 /*
710 * NOTE: it is really expected that the PC be in the
711 * single step buffer at this point
712 */
713 bzt r25, \not_single_stepping
714 }
715 /* Set to the next PC */
716 sw r28, r24
717 .else
718 {
719 /* Point to 3rd bundle in buffer */
720 addi r25, r26, 16
721
722 /* Load PC */
723 lw r27, r28
724 }
725 {
726 /* Disable single stepping flag */
727 sw r23, r22
728
729 /* See if the PC is in the single step buffer */
730 slte_u r24, r26, r27
731 }
732 {
733 slte_u r25, r27, r25
734
735 /*
736 * NOTE: it is really expected that the PC be in the
737 * single step buffer at this point
738 */
739 bzt r24, \not_single_stepping
740 }
741 bzt r25, \not_single_stepping
742 .endif
743 .endif
744 .endm
745
746 /*
747 * Redispatch a downcall.
748 */
749 .macro dc_dispatch vecnum, vecname
750 .org (\vecnum << 8)
751intvec_\vecname:
9ae09838 752 j _hv_downcall_dispatch
867e359b
CM
753 ENDPROC(intvec_\vecname)
754 .endm
755
756 /*
757 * Common code for most interrupts. The C function we're eventually
758 * going to is in r0, and the faultnum is in r1; the original
759 * values for those registers are on the stack.
760 */
761 .pushsection .text.handle_interrupt,"ax"
762handle_interrupt:
763 finish_interrupt_save handle_interrupt
764
765 /*
766 * Check for if we are single stepping in user level. If so, then
767 * we need to restore the PC.
768 */
769
770 check_single_stepping normal, .Ldispatch_interrupt
771.Ldispatch_interrupt:
772
773 /* Jump to the C routine; it should enable irqs as soon as possible. */
774 {
775 jalr r0
776 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
777 }
778 FEEDBACK_REENTER(handle_interrupt)
779 {
780 movei r30, 0 /* not an NMI */
781 j interrupt_return
782 }
783 STD_ENDPROC(handle_interrupt)
784
785/*
786 * This routine takes a boolean in r30 indicating if this is an NMI.
787 * If so, we also expect a boolean in r31 indicating whether to
788 * re-enable the oprofile interrupts.
e1d5c019
CM
789 *
790 * Note that .Lresume_userspace is jumped to directly in several
791 * places, and we need to make sure r30 is set correctly in those
792 * callers as well.
867e359b
CM
793 */
794STD_ENTRY(interrupt_return)
795 /* If we're resuming to kernel space, don't check thread flags. */
796 {
797 bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
798 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
799 }
800 lw r29, r29
801 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
bc1a298f
CM
802 bzt r29, .Lresume_userspace
803
804#ifdef CONFIG_PREEMPT
805 /* Returning to kernel space. Check if we need preemption. */
806 GET_THREAD_INFO(r29)
807 addli r28, r29, THREAD_INFO_FLAGS_OFFSET
867e359b 808 {
bc1a298f
CM
809 lw r28, r28
810 addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
811 }
812 {
813 andi r28, r28, _TIF_NEED_RESCHED
814 lw r29, r29
867e359b 815 }
bc1a298f
CM
816 bzt r28, 1f
817 bnz r29, 1f
3f725c5b
CM
818 /* Disable interrupts explicitly for preemption. */
819 IRQ_DISABLE(r20,r21)
820 TRACE_IRQS_OFF
bc1a298f
CM
821 jal preempt_schedule_irq
822 FEEDBACK_REENTER(interrupt_return)
8231:
824#endif
867e359b
CM
825
826 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
827 {
bc1a298f 828 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
867e359b
CM
829 moveli r27, lo16(_cpu_idle_nap)
830 }
831 {
bc1a298f 832 lw r28, r29
867e359b
CM
833 auli r27, r27, ha16(_cpu_idle_nap)
834 }
835 {
836 seq r27, r27, r28
837 }
838 {
839 bbns r27, .Lrestore_all
840 addi r28, r28, 8
841 }
842 sw r29, r28
843 j .Lrestore_all
844
845.Lresume_userspace:
846 FEEDBACK_REENTER(interrupt_return)
847
fc327e26
CM
848 /*
849 * Use r33 to hold whether we have already loaded the callee-saves
850 * into ptregs. We don't want to do it twice in this loop, since
851 * then we'd clobber whatever changes are made by ptrace, etc.
852 * Get base of stack in r32.
853 */
854 {
855 GET_THREAD_INFO(r32)
856 movei r33, 0
857 }
858
859.Lretry_work_pending:
867e359b
CM
860 /*
861 * Disable interrupts so as to make sure we don't
862 * miss an interrupt that sets any of the thread flags (like
863 * need_resched or sigpending) between sampling and the iret.
864 * Routines like schedule() or do_signal() may re-enable
865 * interrupts before returning.
866 */
867 IRQ_DISABLE(r20, r21)
868 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
869
867e359b
CM
870
871 /* Check to see if there is any work to do before returning to user. */
872 {
873 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
313ce674 874 moveli r1, lo16(_TIF_ALLWORK_MASK)
867e359b
CM
875 }
876 {
877 lw r29, r29
313ce674 878 auli r1, r1, ha16(_TIF_ALLWORK_MASK)
867e359b 879 }
313ce674
CM
880 and r1, r29, r1
881 bzt r1, .Lrestore_all
882
883 /*
884 * Make sure we have all the registers saved for signal
fc327e26
CM
885 * handling, notify-resume, or single-step. Call out to C
886 * code to figure out exactly what we need to do for each flag bit,
887 * then if necessary, reload the flags and recheck.
313ce674 888 */
313ce674
CM
889 {
890 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
fc327e26 891 bnz r33, 1f
313ce674 892 }
fc327e26
CM
893 push_extra_callee_saves r0
894 movei r33, 1
8951: jal do_work_pending
896 bnz r0, .Lretry_work_pending
867e359b
CM
897
898 /*
899 * In the NMI case we
900 * omit the call to single_process_check_nohz, which normally checks
901 * to see if we should start or stop the scheduler tick, because
902 * we can't call arbitrary Linux code from an NMI context.
903 * We always call the homecache TLB deferral code to re-trigger
904 * the deferral mechanism.
905 *
906 * The other chunk of responsibility this code has is to reset the
907 * interrupt masks appropriately to reset irqs and NMIs. We have
908 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
909 * lockdep-type stuff, but we can't set ICS until afterwards, since
910 * ICS can only be used in very tight chunks of code to avoid
911 * tripping over various assertions that it is off.
912 *
913 * (There is what looks like a window of vulnerability here since
914 * we might take a profile interrupt between the two SPR writes
915 * that set the mask, but since we write the low SPR word first,
916 * and our interrupt entry code checks the low SPR word, any
917 * profile interrupt will actually disable interrupts in both SPRs
918 * before returning, which is OK.)
919 */
920.Lrestore_all:
921 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
922 {
923 lw r0, r0
924 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
925 }
926 {
927 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
928 lw r32, r32
929 }
930 bnz r0, 1f
931 j 2f
932#if PT_FLAGS_DISABLE_IRQ != 1
933# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
934#endif
9351: bbnst r32, 2f
936 IRQ_DISABLE(r20,r21)
937 TRACE_IRQS_OFF
938 movei r0, 1
939 mtspr INTERRUPT_CRITICAL_SECTION, r0
940 bzt r30, .Lrestore_regs
941 j 3f
9422: TRACE_IRQS_ON
943 movei r0, 1
944 mtspr INTERRUPT_CRITICAL_SECTION, r0
945 IRQ_ENABLE(r20, r21)
946 bzt r30, .Lrestore_regs
9473:
948
ba678231
ZL
949 /* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */
950 {
951 moveli r0, lo16(1 << (INT_PERF_COUNT - 32))
952 bz r31, .Lrestore_regs
953 }
954 auli r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32))
955 mtspr SPR_INTERRUPT_MASK_RESET_K_1, r0
867e359b
CM
956
957 /*
958 * We now commit to returning from this interrupt, since we will be
959 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
960 * frame. No calls should be made to any other code after this point.
961 * This code should only be entered with ICS set.
962 * r32 must still be set to ptregs.flags.
963 * We launch loads to each cache line separately first, so we can
964 * get some parallelism out of the memory subsystem.
965 * We start zeroing caller-saved registers throughout, since
966 * that will save some cycles if this turns out to be a syscall.
967 */
968.Lrestore_regs:
969 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
970
971 /*
972 * Rotate so we have one high bit and one low bit to test.
973 * - low bit says whether to restore all the callee-saved registers,
974 * or just r30-r33, and r52 up.
975 * - high bit (i.e. sign bit) says whether to restore all the
976 * caller-saved registers, or just r0.
977 */
978#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
979# error Rotate trick does not work :-)
980#endif
981 {
982 rli r20, r32, 30
983 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
984 }
985
986 /*
987 * Load cache lines 0, 2, and 3 in that order, then use
988 * the last loaded value, which makes it likely that the other
989 * cache lines have also loaded, at which point we should be
990 * able to safely read all the remaining words on those cache
991 * lines without waiting for the memory subsystem.
992 */
ba00376b 993 pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
867e359b
CM
994 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
995 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
996 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
997 {
a78c942d 998 mtspr SPR_EX_CONTEXT_K_0, r21
867e359b
CM
999 move r5, zero
1000 }
1001 {
a78c942d 1002 mtspr SPR_EX_CONTEXT_K_1, lr
867e359b
CM
1003 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
1004 }
1005
1006 /* Restore callee-saveds that we actually use. */
1007 pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
1008 pop_reg_zero r31, r7
1009 pop_reg_zero r32, r8
1010 pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
1011
1012 /*
1013 * If we modified other callee-saveds, restore them now.
1014 * This is rare, but could be via ptrace or signal handler.
1015 */
1016 {
1017 move r10, zero
1018 bbs r20, .Lrestore_callees
1019 }
1020.Lcontinue_restore_regs:
1021
1022 /* Check if we're returning from a syscall. */
1023 {
1024 move r11, zero
1025 blzt r20, 1f /* no, so go restore callee-save registers */
1026 }
1027
1028 /*
1029 * Check if we're returning to userspace.
1030 * Note that if we're not, we don't worry about zeroing everything.
1031 */
1032 {
1033 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
1034 bnz lr, .Lkernel_return
1035 }
1036
1037 /*
1038 * On return from syscall, we've restored r0 from pt_regs, but we
1039 * clear the remainder of the caller-saved registers. We could
1040 * restore the syscall arguments, but there's not much point,
1041 * and it ensures user programs aren't trying to use the
1042 * caller-saves if we clear them, as well as avoiding leaking
1043 * kernel pointers into userspace.
1044 */
1045 pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1046 pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1047 {
1048 lw sp, sp
1049 move r14, zero
1050 move r15, zero
1051 }
1052 { move r16, zero; move r17, zero }
1053 { move r18, zero; move r19, zero }
1054 { move r20, zero; move r21, zero }
1055 { move r22, zero; move r23, zero }
1056 { move r24, zero; move r25, zero }
1057 { move r26, zero; move r27, zero }
ba00376b
CM
1058
1059 /* Set r1 to errno if we are returning an error, otherwise zero. */
1060 {
a4dbc5ee 1061 moveli r29, 4096
ba00376b
CM
1062 sub r1, zero, r0
1063 }
1064 slt_u r29, r1, r29
1065 {
1066 mnz r1, r29, r1
1067 move r29, zero
1068 }
867e359b
CM
1069 iret
1070
1071 /*
1072 * Not a syscall, so restore caller-saved registers.
1073 * First kick off a load for cache line 1, which we're touching
1074 * for the first time here.
1075 */
1076 .align 64
10771: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
1078 pop_reg r1
1079 pop_reg r2
1080 pop_reg r3
1081 pop_reg r4
1082 pop_reg r5
1083 pop_reg r6
1084 pop_reg r7
1085 pop_reg r8
1086 pop_reg r9
1087 pop_reg r10
1088 pop_reg r11
1089 pop_reg r12
1090 pop_reg r13
1091 pop_reg r14
1092 pop_reg r15
1093 pop_reg r16
1094 pop_reg r17
1095 pop_reg r18
1096 pop_reg r19
1097 pop_reg r20
1098 pop_reg r21
1099 pop_reg r22
1100 pop_reg r23
1101 pop_reg r24
1102 pop_reg r25
1103 pop_reg r26
1104 pop_reg r27
1105 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
1106 /* r29 already restored above */
1107 bnz lr, .Lkernel_return
1108 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1109 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1110 lw sp, sp
1111 iret
1112
1113 /*
1114 * We can't restore tp when in kernel mode, since a thread might
1115 * have migrated from another cpu and brought a stale tp value.
1116 */
1117.Lkernel_return:
1118 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
1119 lw sp, sp
1120 iret
1121
1122 /* Restore callee-saved registers from r34 to r51. */
1123.Lrestore_callees:
1124 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
1125 pop_reg r34
1126 pop_reg r35
1127 pop_reg r36
1128 pop_reg r37
1129 pop_reg r38
1130 pop_reg r39
1131 pop_reg r40
1132 pop_reg r41
1133 pop_reg r42
1134 pop_reg r43
1135 pop_reg r44
1136 pop_reg r45
1137 pop_reg r46
1138 pop_reg r47
1139 pop_reg r48
1140 pop_reg r49
1141 pop_reg r50
1142 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1143 j .Lcontinue_restore_regs
867e359b
CM
1144 STD_ENDPROC(interrupt_return)
1145
867e359b
CM
1146 /*
1147 * Some interrupts don't check for single stepping
1148 */
1149 .pushsection .text.handle_interrupt_no_single_step,"ax"
1150handle_interrupt_no_single_step:
1151 finish_interrupt_save handle_interrupt_no_single_step
1152 {
1153 jalr r0
1154 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1155 }
1156 FEEDBACK_REENTER(handle_interrupt_no_single_step)
1157 {
1158 movei r30, 0 /* not an NMI */
1159 j interrupt_return
1160 }
1161 STD_ENDPROC(handle_interrupt_no_single_step)
1162
1163 /*
1164 * "NMI" interrupts mask ALL interrupts before calling the
1165 * handler, and don't check thread flags, etc., on the way
1166 * back out. In general, the only things we do here for NMIs
1167 * are the register save/restore, fixing the PC if we were
1168 * doing single step, and the dataplane kernel-TLB management.
1169 * We don't (for example) deal with start/stop of the sched tick.
1170 */
1171 .pushsection .text.handle_nmi,"ax"
1172handle_nmi:
1173 finish_interrupt_save handle_nmi
1174 check_single_stepping normal, .Ldispatch_nmi
1175.Ldispatch_nmi:
1176 {
1177 jalr r0
1178 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1179 }
1180 FEEDBACK_REENTER(handle_nmi)
ba678231
ZL
1181 {
1182 movei r30, 1
1183 seq r31, r0, zero
1184 }
867e359b
CM
1185 j interrupt_return
1186 STD_ENDPROC(handle_nmi)
1187
1188 /*
1189 * Parallel code for syscalls to handle_interrupt.
1190 */
1191 .pushsection .text.handle_syscall,"ax"
1192handle_syscall:
1193 finish_interrupt_save handle_syscall
1194
1195 /*
1196 * Check for if we are single stepping in user level. If so, then
1197 * we need to restore the PC.
1198 */
1199 check_single_stepping syscall, .Ldispatch_syscall
1200.Ldispatch_syscall:
1201
1202 /* Enable irqs. */
1203 TRACE_IRQS_ON
1204 IRQ_ENABLE(r20, r21)
1205
1206 /* Bump the counter for syscalls made on this tile. */
1207 moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1208 auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1209 add r20, r20, tp
1210 lw r21, r20
1211 addi r21, r21, 1
fc327e26
CM
1212 {
1213 sw r20, r21
1214 GET_THREAD_INFO(r31)
1215 }
867e359b
CM
1216
1217 /* Trace syscalls, if requested. */
867e359b
CM
1218 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
1219 lw r30, r31
1220 andi r30, r30, _TIF_SYSCALL_TRACE
1221 bzt r30, .Lrestore_syscall_regs
ef182724
SM
1222 {
1223 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1224 jal do_syscall_trace_enter
1225 }
867e359b 1226 FEEDBACK_REENTER(handle_syscall)
a0ddef81 1227 blz r0, .Lsyscall_sigreturn_skip
867e359b
CM
1228
1229 /*
1230 * We always reload our registers from the stack at this
1231 * point. They might be valid, if we didn't build with
1232 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
1233 * doing syscall tracing, but there are enough cases now that it
1234 * seems simplest just to do the reload unconditionally.
1235 */
1236.Lrestore_syscall_regs:
1237 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
1238 pop_reg r0, r11
1239 pop_reg r1, r11
1240 pop_reg r2, r11
1241 pop_reg r3, r11
1242 pop_reg r4, r11
1243 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1244 pop_reg TREG_SYSCALL_NR_NAME, r11
1245
1246 /* Ensure that the syscall number is within the legal range. */
1247 moveli r21, __NR_syscalls
1248 {
1249 slt_u r21, TREG_SYSCALL_NR_NAME, r21
1250 moveli r20, lo16(sys_call_table)
1251 }
1252 {
1253 bbns r21, .Linvalid_syscall
1254 auli r20, r20, ha16(sys_call_table)
1255 }
1256 s2a r20, TREG_SYSCALL_NR_NAME, r20
1257 lw r20, r20
1258
1259 /* Jump to syscall handler. */
81711cee
CM
1260 jalr r20
1261.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
867e359b
CM
1262
1263 /*
1264 * Write our r0 onto the stack so it gets restored instead
1265 * of whatever the user had there before.
1266 */
1267 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1268 sw r29, r0
1269
81711cee
CM
1270.Lsyscall_sigreturn_skip:
1271 FEEDBACK_REENTER(handle_syscall)
1272
867e359b
CM
1273 /* Do syscall trace again, if requested. */
1274 lw r30, r31
1275 andi r30, r30, _TIF_SYSCALL_TRACE
1276 bzt r30, 1f
ef182724
SM
1277 {
1278 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1279 jal do_syscall_trace_exit
1280 }
867e359b 1281 FEEDBACK_REENTER(handle_syscall)
e1d5c019
CM
12821: {
1283 movei r30, 0 /* not an NMI */
1284 j .Lresume_userspace /* jump into middle of interrupt_return */
1285 }
867e359b
CM
1286
1287.Linvalid_syscall:
1288 /* Report an invalid syscall back to the user program */
1289 {
1290 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1291 movei r28, -ENOSYS
1292 }
1293 sw r29, r28
e1d5c019
CM
1294 {
1295 movei r30, 0 /* not an NMI */
1296 j .Lresume_userspace /* jump into middle of interrupt_return */
1297 }
867e359b
CM
1298 STD_ENDPROC(handle_syscall)
1299
1300 /* Return the address for oprofile to suppress in backtraces. */
1301STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1302 lnk r0
1303 {
1304 addli r0, r0, .Lhandle_syscall_link - .
1305 jrp lr
1306 }
1307 STD_ENDPROC(handle_syscall_link_address)
1308
1309STD_ENTRY(ret_from_fork)
1310 jal sim_notify_fork
1311 jal schedule_tail
1312 FEEDBACK_REENTER(ret_from_fork)
e1d5c019
CM
1313 {
1314 movei r30, 0 /* not an NMI */
1315 j .Lresume_userspace /* jump into middle of interrupt_return */
1316 }
867e359b
CM
1317 STD_ENDPROC(ret_from_fork)
1318
0f8b9838
CM
1319STD_ENTRY(ret_from_kernel_thread)
1320 jal sim_notify_fork
1321 jal schedule_tail
1322 FEEDBACK_REENTER(ret_from_fork)
1323 {
1324 move r0, r31
1325 jalr r30
1326 }
1327 FEEDBACK_REENTER(ret_from_kernel_thread)
1328 {
1329 movei r30, 0 /* not an NMI */
1330 j .Lresume_userspace /* jump into middle of interrupt_return */
1331 }
1332 STD_ENDPROC(ret_from_kernel_thread)
1333
867e359b
CM
1334 /*
1335 * Code for ill interrupt.
1336 */
1337 .pushsection .text.handle_ill,"ax"
1338handle_ill:
1339 finish_interrupt_save handle_ill
1340
1341 /*
1342 * Check for if we are single stepping in user level. If so, then
1343 * we need to restore the PC.
1344 */
1345 check_single_stepping ill, .Ldispatch_normal_ill
1346
1347 {
1348 /* See if the PC is the 1st bundle in the buffer */
1349 seq r25, r27, r26
1350
1351 /* Point to the 2nd bundle in the buffer */
1352 addi r26, r26, 8
1353 }
1354 {
1355 /* Point to the original pc */
1356 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
1357
1358 /* Branch if the PC is the 1st bundle in the buffer */
1359 bnz r25, 3f
1360 }
1361 {
1362 /* See if the PC is the 2nd bundle of the buffer */
1363 seq r25, r27, r26
1364
1365 /* Set PC to next instruction */
1366 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
1367 }
1368 {
1369 /* Point to flags */
1370 addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
1371
1372 /* Branch if PC is in the second bundle */
1373 bz r25, 2f
1374 }
1375 /* Load flags */
1376 lw r25, r25
1377 {
1378 /*
1379 * Get the offset for the register to restore
1380 * Note: the lower bound is 2, so we have implicit scaling by 4.
1381 * No multiplication of the register number by the size of a register
1382 * is needed.
1383 */
1384 mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
1385 SINGLESTEP_STATE_TARGET_UB
1386
1387 /* Mask Rewrite_LR */
1388 andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
1389 }
1390 {
1391 addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
1392
1393 /* Don't rewrite temp register */
1394 bz r25, 3f
1395 }
1396 {
1397 /* Get the temp value */
1398 lw r29, r29
1399
1400 /* Point to where the register is stored */
1401 add r27, r27, sp
1402 }
1403
1404 /* Add in the C ABI save area size to the register offset */
1405 addi r27, r27, C_ABI_SAVE_AREA_SIZE
1406
1407 /* Restore the user's register with the temp value */
1408 sw r27, r29
1409 j 3f
1410
14112:
1412 /* Must be in the third bundle */
1413 addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
1414
14153:
1416 /* set PC and continue */
1417 lw r26, r24
fc327e26
CM
1418 {
1419 sw r28, r26
1420 GET_THREAD_INFO(r0)
1421 }
867e359b 1422
233325b9
CM
1423 /*
1424 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
1425 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
1426 * need to clear it here and can't really impose on all other arches.
1427 * So what's another write between friends?
1428 */
867e359b
CM
1429
1430 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
1431 {
1432 lw r2, r1
1433 addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
1434 }
1435 andi r2, r2, ~_TIF_SINGLESTEP
1436 sw r1, r2
1437
1438 /* Issue a sigtrap */
1439 {
1440 lw r0, r0 /* indirect thru thread_info to get task_info*/
1441 addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
867e359b
CM
1442 }
1443
1444 jal send_sigtrap /* issue a SIGTRAP */
1445 FEEDBACK_REENTER(handle_ill)
e1d5c019
CM
1446 {
1447 movei r30, 0 /* not an NMI */
1448 j .Lresume_userspace /* jump into middle of interrupt_return */
1449 }
867e359b
CM
1450
1451.Ldispatch_normal_ill:
1452 {
1453 jalr r0
1454 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1455 }
1456 FEEDBACK_REENTER(handle_ill)
1457 {
1458 movei r30, 0 /* not an NMI */
1459 j interrupt_return
1460 }
1461 STD_ENDPROC(handle_ill)
1462
867e359b
CM
1463/* Various stub interrupt handlers and syscall handlers */
1464
1465STD_ENTRY_LOCAL(_kernel_double_fault)
a78c942d 1466 mfspr r1, SPR_EX_CONTEXT_K_0
867e359b
CM
1467 move r2, lr
1468 move r3, sp
1469 move r4, r52
1470 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1471 j kernel_double_fault
1472 STD_ENDPROC(_kernel_double_fault)
1473
1474STD_ENTRY_LOCAL(bad_intr)
a78c942d 1475 mfspr r2, SPR_EX_CONTEXT_K_0
867e359b
CM
1476 panic "Unhandled interrupt %#x: PC %#lx"
1477 STD_ENDPROC(bad_intr)
1478
81711cee
CM
1479/*
1480 * Special-case sigreturn to not write r0 to the stack on return.
1481 * This is technically more efficient, but it also avoids difficulties
1482 * in the 64-bit OS when handling 32-bit compat code, since we must not
1483 * sign-extend r0 for the sigreturn return-value case.
1484 */
1485#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1486 STD_ENTRY(_##x); \
1487 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1488 { \
1489 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1490 j x \
1491 }; \
1492 STD_ENDPROC(_##x)
1493
81711cee 1494PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
867e359b 1495
6b14e419 1496/* Save additional callee-saves to pt_regs and jump to standard function. */
d929b6ae
CM
1497STD_ENTRY(_sys_clone)
1498 push_extra_callee_saves r4
1499 j sys_clone
1500 STD_ENDPROC(_sys_clone)
867e359b
CM
1501
1502/*
1503 * This entrypoint is taken for the cmpxchg and atomic_update fast
1504 * swints. We may wish to generalize it to other fast swints at some
1505 * point, but for now there are just two very similar ones, which
1506 * makes it faster.
1507 *
1508 * The fast swint code is designed to have a small footprint. It does
1509 * not save or restore any GPRs, counting on the caller-save registers
1510 * to be available to it on entry. It does not modify any callee-save
1511 * registers (including "lr"). It does not check what PL it is being
1512 * called at, so you'd better not call it other than at PL0.
d6f0f22c
CM
1513 * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
1514 * it ever is necessary to use more registers, be aware.
867e359b
CM
1515 *
1516 * It does not use the stack, but since it might be re-interrupted by
1517 * a page fault which would assume the stack was valid, it does
1518 * save/restore the stack pointer and zero it out to make sure it gets reset.
1519 * Since we always keep interrupts disabled, the hypervisor won't
a78c942d 1520 * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
867e359b
CM
1521 * (other than to advance the PC on return).
1522 *
1523 * We have to manually validate the user vs kernel address range
1524 * (since at PL1 we can read/write both), and for performance reasons
1525 * we don't allow cmpxchg on the fc000000 memory region, since we only
1526 * validate that the user address is below PAGE_OFFSET.
1527 *
1528 * We place it in the __HEAD section to ensure it is relatively
1529 * near to the intvec_SWINT_1 code (reachable by a conditional branch).
1530 *
df29ccb6
CM
1531 * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
1532 *
1533 * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
1534 * would store is the same as the value we just loaded.
867e359b
CM
1535 */
1536 __HEAD
1537 .align 64
1538 /* Align much later jump on the start of a cache line. */
76c567fb
CM
1539 nop
1540#if PAGE_SIZE >= 0x10000
1541 nop
1542#endif
867e359b
CM
1543ENTRY(sys_cmpxchg)
1544
1545 /*
1546 * Save "sp" and set it zero for any possible page fault.
1547 *
1548 * HACK: We want to both zero sp and check r0's alignment,
1549 * so we do both at once. If "sp" becomes nonzero we
1550 * know r0 is unaligned and branch to the error handler that
1551 * restores sp, so this is OK.
1552 *
1553 * ICS is disabled right now so having a garbage but nonzero
1554 * sp is OK, since we won't execute any faulting instructions
1555 * when it is nonzero.
1556 */
1557 {
1558 move r27, sp
1559 andi sp, r0, 3
1560 }
1561
1562 /*
1563 * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
1564 * address is less than PAGE_OFFSET, since that won't trap at PL1.
1565 * We only use bits less than PAGE_SHIFT to avoid having to worry
1566 * about aliasing among multiple mappings of the same physical page,
1567 * and we ignore the low 3 bits so we have one lock that covers
1568 * both a cmpxchg64() and a cmpxchg() on either its low or high word.
5fb682b0 1569 * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
867e359b
CM
1570 */
1571
76c567fb
CM
1572#if (PAGE_OFFSET & 0xffff) != 0
1573# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
1574#endif
1575
867e359b
CM
1576 {
1577 /* Check for unaligned input. */
1578 bnz sp, .Lcmpxchg_badaddr
1579 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1580 }
1581 {
1582 /*
1583 * Slide bits into position for 'mm'. We want to ignore
1584 * the low 3 bits of r0, and consider only the next
1585 * ATOMIC_HASH_SHIFT bits.
1586 * Because of C pointer arithmetic, we want to compute this:
1587 *
1588 * ((char*)atomic_locks +
b63ea712 1589 * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
867e359b
CM
1590 *
1591 * Instead of two shifts we just ">> 1", and use 'mm'
1592 * to ignore the low and high bits we don't want.
1593 */
1594 shri r25, r0, 1
1595
1596 slt_u r23, r0, r23
1597
1598 /*
1599 * Ensure that the TLB is loaded before we take out the lock.
d7c96611
CM
1600 * This will start fetching the value all the way into our L1
1601 * as well (and if it gets modified before we grab the lock,
1602 * it will be invalidated from our cache before we reload it).
867e359b
CM
1603 */
1604 lw r26, r0
1605 }
1606 {
76c567fb 1607 auli r21, zero, ha16(atomic_locks)
867e359b
CM
1608
1609 bbns r23, .Lcmpxchg_badaddr
1610 }
76c567fb
CM
1611#if PAGE_SIZE < 0x10000
1612 /* atomic_locks is page-aligned so for big pages we don't need this. */
1613 addli r21, r21, lo16(atomic_locks)
1614#endif
867e359b
CM
1615 {
1616 /*
1617 * Insert the hash bits into the page-aligned pointer.
1618 * ATOMIC_HASH_SHIFT is so big that we don't actually hash
1619 * the unmasked address bits, as that may cause unnecessary
1620 * collisions.
1621 */
1622 mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
1623
1624 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1625 }
1626 {
1627 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1628 bbs r23, .Lcmpxchg64
1629 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1630 }
1631 {
1632 /*
1633 * We very carefully align the code that actually runs with
df29ccb6 1634 * the lock held (twelve bundles) so that we know it is all in
867e359b
CM
1635 * the icache when we start. This instruction (the jump) is
1636 * at the start of the first cache line, address zero mod 64;
df29ccb6
CM
1637 * we jump to the very end of the second cache line to get that
1638 * line loaded in the icache, then fall through to issue the tns
1639 * in the third cache line, at which point it's all cached.
1640 * Note that is for performance, not correctness.
867e359b
CM
1641 */
1642 j .Lcmpxchg32_tns
1643 }
1644
df29ccb6
CM
1645/* Symbol for do_page_fault_ics() to use to compare against the PC. */
1646.global __sys_cmpxchg_grab_lock
1647__sys_cmpxchg_grab_lock:
867e359b
CM
1648
1649 /*
1650 * Perform the actual cmpxchg or atomic_update.
867e359b
CM
1651 */
1652.Ldo_cmpxchg32:
1653 {
1654 lw r21, r0
1655 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
1656 move r24, r2
1657 }
1658 {
1659 seq r22, r21, r1 /* See if cmpxchg matches. */
1660 and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
1661 }
1662 {
1663 or r22, r22, r23 /* Skip compare branch for atomic_update. */
1664 add r25, r25, r2 /* Compute (*mem & mask) + addend. */
1665 }
1666 {
1667 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
df29ccb6 1668 bbns r22, .Lcmpxchg32_nostore
867e359b 1669 }
df29ccb6
CM
1670 seq r22, r24, r21 /* Are we storing the value we loaded? */
1671 bbs r22, .Lcmpxchg32_nostore
867e359b
CM
1672 sw r0, r24
1673
df29ccb6 1674 /* The following instruction is the start of the second cache line. */
867e359b
CM
1675 /* Do slow mtspr here so the following "mf" waits less. */
1676 {
1677 move sp, r27
a78c942d 1678 mtspr SPR_EX_CONTEXT_K_0, r28
867e359b
CM
1679 }
1680 mf
1681
867e359b
CM
1682 {
1683 move r0, r21
1684 sw ATOMIC_LOCK_REG_NAME, zero
1685 }
1686 iret
1687
1688 /* Duplicated code here in the case where we don't overlap "mf" */
df29ccb6 1689.Lcmpxchg32_nostore:
867e359b
CM
1690 {
1691 move r0, r21
1692 sw ATOMIC_LOCK_REG_NAME, zero
1693 }
1694 {
1695 move sp, r27
a78c942d 1696 mtspr SPR_EX_CONTEXT_K_0, r28
867e359b
CM
1697 }
1698 iret
1699
1700 /*
1701 * The locking code is the same for 32-bit cmpxchg/atomic_update,
1702 * and for 64-bit cmpxchg. We provide it as a macro and put
1703 * it into both versions. We can't share the code literally
1704 * since it depends on having the right branch-back address.
867e359b
CM
1705 */
1706 .macro cmpxchg_lock, bitwidth
1707
1708 /* Lock; if we succeed, jump back up to the read-modify-write. */
1709#ifdef CONFIG_SMP
1710 tns r21, ATOMIC_LOCK_REG_NAME
1711#else
1712 /*
1713 * Non-SMP preserves all the lock infrastructure, to keep the
1714 * code simpler for the interesting (SMP) case. However, we do
1715 * one small optimization here and in atomic_asm.S, which is
1716 * to fake out acquiring the actual lock in the atomic_lock table.
1717 */
1718 movei r21, 0
1719#endif
1720
1721 /* Issue the slow SPR here while the tns result is in flight. */
a78c942d 1722 mfspr r28, SPR_EX_CONTEXT_K_0
867e359b
CM
1723
1724 {
1725 addi r28, r28, 8 /* return to the instruction after the swint1 */
1726 bzt r21, .Ldo_cmpxchg\bitwidth
1727 }
1728 /*
1729 * The preceding instruction is the last thing that must be
df29ccb6 1730 * hot in the icache before we do the "tns" above.
867e359b
CM
1731 */
1732
1733#ifdef CONFIG_SMP
1734 /*
1735 * We failed to acquire the tns lock on our first try. Now use
1736 * bounded exponential backoff to retry, like __atomic_spinlock().
1737 */
1738 {
1739 moveli r23, 2048 /* maximum backoff time in cycles */
1740 moveli r25, 32 /* starting backoff time in cycles */
1741 }
17421: mfspr r26, CYCLE_LOW /* get start point for this backoff */
17432: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
1744 sub r22, r22, r26
1745 slt r22, r22, r25
1746 bbst r22, 2b
1747 {
1748 shli r25, r25, 1 /* double the backoff; retry the tns */
1749 tns r21, ATOMIC_LOCK_REG_NAME
1750 }
1751 slt r26, r23, r25 /* is the proposed backoff too big? */
1752 {
1753 mvnz r25, r26, r23
1754 bzt r21, .Ldo_cmpxchg\bitwidth
1755 }
1756 j 1b
1757#endif /* CONFIG_SMP */
1758 .endm
1759
1760.Lcmpxchg32_tns:
df29ccb6
CM
1761 /*
1762 * This is the last instruction on the second cache line.
1763 * The nop here loads the second line, then we fall through
1764 * to the tns to load the third line before we take the lock.
1765 */
1766 nop
867e359b
CM
1767 cmpxchg_lock 32
1768
1769 /*
1770 * This code is invoked from sys_cmpxchg after most of the
1771 * preconditions have been checked. We still need to check
1772 * that r0 is 8-byte aligned, since if it's not we won't
1773 * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
1774 * lock pointer and r27/r28 have the saved SP/PC.
1775 * r23 is holding "r0 & 7" so we can test for alignment.
1776 * The compare value is in r2/r3; the new value is in r4/r5.
1777 * On return, we must put the old value in r0/r1.
1778 */
1779 .align 64
1780.Lcmpxchg64:
1781 {
867e359b
CM
1782 bzt r23, .Lcmpxchg64_tns
1783 }
1784 j .Lcmpxchg_badaddr
1785
1786.Ldo_cmpxchg64:
1787 {
1788 lw r21, r0
1789 addi r25, r0, 4
1790 }
1791 {
1792 lw r1, r25
1793 }
1794 seq r26, r21, r2
1795 {
1796 bz r26, .Lcmpxchg64_mismatch
1797 seq r26, r1, r3
1798 }
1799 {
1800 bz r26, .Lcmpxchg64_mismatch
1801 }
1802 sw r0, r4
1803 sw r25, r5
1804
1805 /*
1806 * The 32-bit path provides optimized "match" and "mismatch"
1807 * iret paths, but we don't have enough bundles in this cache line
1808 * to do that, so we just make even the "mismatch" path do an "mf".
1809 */
1810.Lcmpxchg64_mismatch:
1811 {
1812 move sp, r27
a78c942d 1813 mtspr SPR_EX_CONTEXT_K_0, r28
867e359b
CM
1814 }
1815 mf
1816 {
1817 move r0, r21
1818 sw ATOMIC_LOCK_REG_NAME, zero
1819 }
1820 iret
1821
1822.Lcmpxchg64_tns:
1823 cmpxchg_lock 64
1824
1825
1826 /*
1827 * Reset sp and revector to sys_cmpxchg_badaddr(), which will
1828 * just raise the appropriate signal and exit. Doing it this
1829 * way means we don't have to duplicate the code in intvec.S's
1830 * int_hand macro that locates the top of the stack.
1831 */
1832.Lcmpxchg_badaddr:
1833 {
1834 moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
1835 move sp, r27
1836 }
1837 j intvec_SWINT_1
1838 ENDPROC(sys_cmpxchg)
1839 ENTRY(__sys_cmpxchg_end)
1840
1841
1842/* The single-step support may need to read all the registers. */
1843int_unalign:
1844 push_extra_callee_saves r0
1845 j do_trap
1846
acbde1db
CM
1847/* Include .intrpt array of interrupt vectors */
1848 .section ".intrpt", "ax"
867e359b 1849
8e3441eb
ZL
1850#ifndef CONFIG_USE_PMC
1851#define handle_perf_interrupt bad_intr
1852#endif
867e359b 1853
9f9c0382 1854#ifndef CONFIG_HARDWALL
867e359b 1855#define do_hardwall_trap bad_intr
9f9c0382 1856#endif
867e359b
CM
1857
1858 int_hand INT_ITLB_MISS, ITLB_MISS, \
1859 do_page_fault, handle_interrupt_no_single_step
1860 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1861 int_hand INT_ILL, ILL, do_trap, handle_ill
1862 int_hand INT_GPV, GPV, do_trap
1863 int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
1864 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1865 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1866 int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
1867 int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
1868 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1869 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1870 int_hand INT_SWINT_3, SWINT_3, do_trap
1871 int_hand INT_SWINT_2, SWINT_2, do_trap
1872 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1873 int_hand INT_SWINT_0, SWINT_0, do_trap
1874 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1875 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1876 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1877 int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
1878 int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
1879 int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
1880 int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
1881 int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
1882 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1883 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1884 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1885 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1886 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1887 int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
1888 int_hand INT_IDN_CA, IDN_CA, bad_intr
1889 int_hand INT_UDN_CA, UDN_CA, bad_intr
1890 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1891 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1892 int_hand INT_PERF_COUNT, PERF_COUNT, \
8e3441eb 1893 handle_perf_interrupt, handle_nmi
867e359b 1894 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
a78c942d
CM
1895#if CONFIG_KERNEL_PL == 2
1896 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1897 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1898#else
867e359b
CM
1899 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1900 dc_dispatch INT_INTCTRL_1, INTCTRL_1
a78c942d 1901#endif
867e359b
CM
1902 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1903 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
b2ce2bda 1904 hv_message_intr
867e359b 1905 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
b2ce2bda 1906 tile_dev_intr
867e359b
CM
1907 int_hand INT_I_ASID, I_ASID, bad_intr
1908 int_hand INT_D_ASID, D_ASID, bad_intr
1909 int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
b2ce2bda 1910 do_page_fault
867e359b 1911 int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
b2ce2bda 1912 do_page_fault
867e359b 1913 int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
b2ce2bda 1914 do_page_fault
867e359b
CM
1915 int_hand INT_SN_CPL, SN_CPL, bad_intr
1916 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
867e359b 1917 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
8e3441eb 1918 handle_perf_interrupt, handle_nmi
867e359b
CM
1919
1920 /* Synthetic interrupt delivered only by the simulator */
1921 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
This page took 0.316861 seconds and 5 git commands to generate.