Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / mips / kernel / r2300_switch.S
CommitLineData
1da177e4
LT
1/*
2 * r2300_switch.S: R2300 specific task switching code.
3 *
4 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
6 *
7 * Multi-cpu abstraction and macros for easier reading:
79add627 8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
1da177e4
LT
9 *
10 * Further modifications to make this work:
11 * Copyright (c) 1998-2000 Harald Koerfgen
12 */
1da177e4
LT
13#include <asm/asm.h>
14#include <asm/cachectl.h>
15#include <asm/fpregdef.h>
16#include <asm/mipsregs.h>
048eb582 17#include <asm/asm-offsets.h>
1da177e4
LT
18#include <asm/page.h>
19#include <asm/regdef.h>
20#include <asm/stackframe.h>
21#include <asm/thread_info.h>
22
23#include <asm/asmmacro.h>
24
25 .set mips1
26 .align 5
27
28/*
29 * Offset to the current process status flags, the first 32 bytes of the
30 * stack are not used.
31 */
32#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
33
34/*
35 * FPU context is saved iff the process has used it's FPU in the current
36 * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
42a3b4f2 37 * space STATUS register should be 0, so that a process *always* starts its
1da177e4
LT
38 * userland with FPU disabled after each context switch.
39 *
40 * FPU will be enabled as soon as the process accesses FPU again, through
41 * do_cpu() trap.
42 */
43
44/*
45 * task_struct *resume(task_struct *prev, task_struct *next,
2dd17030 46 * struct thread_info *next_ti, int usedfpu)
1da177e4
LT
47 */
48LEAF(resume)
5323180d
AN
49 mfc0 t1, CP0_STATUS
50 sw t1, THREAD_STATUS(a0)
1da177e4
LT
51 cpu_save_nonscratch a0
52 sw ra, THREAD_REG31(a0)
53
2dd17030 54 beqz a3, 1f
1da177e4 55
2dd17030 56 PTR_L t3, TASK_THREAD_INFO(a0)
1da177e4
LT
57
58 /*
59 * clear saved user stack CU1 bit
60 */
61 lw t0, ST_OFF(t3)
62 li t1, ~ST0_CU1
63 and t0, t0, t1
64 sw t0, ST_OFF(t3)
65
66 fpu_save_single a0, t0 # clobbers t0
67
681:
69 /*
70 * The order of restoring the registers takes care of the race
71 * updating $28, $29 and kernelsp without disabling ints.
72 */
73 move $28, a2
74 cpu_restore_nonscratch a1
75
76 addiu t1, $28, _THREAD_SIZE - 32
77 sw t1, kernelsp
78
79 mfc0 t1, CP0_STATUS /* Do we really need this? */
80 li a3, 0xff01
81 and t1, a3
82 lw a2, THREAD_STATUS(a1)
83 nor a3, $0, a3
84 and a2, a3
85 or a2, t1
86 mtc0 a2, CP0_STATUS
87 move v0, a0
88 jr ra
89 END(resume)
90
91/*
92 * Save a thread's fp context.
93 */
94LEAF(_save_fp)
95 fpu_save_single a0, t1 # clobbers t1
96 jr ra
97 END(_save_fp)
98
99/*
100 * Restore a thread's fp context.
101 */
102LEAF(_restore_fp)
103 fpu_restore_single a0, t1 # clobbers t1
104 jr ra
105 END(_restore_fp)
106
107/*
108 * Load the FPU with signalling NANS. This bit pattern we're using has
109 * the property that no matter whether considered as single or as double
110 * precision represents signaling NANS.
111 *
112 * We initialize fcr31 to rounding to nearest, no exceptions.
113 */
114
115#define FPU_DEFAULT 0x00000000
116
117LEAF(_init_fpu)
118 mfc0 t0, CP0_STATUS
119 li t1, ST0_CU1
120 or t0, t1
121 mtc0 t0, CP0_STATUS
122
123 li t1, FPU_DEFAULT
124 ctc1 t1, fcr31
125
126 li t0, -1
127
128 mtc1 t0, $f0
129 mtc1 t0, $f1
130 mtc1 t0, $f2
131 mtc1 t0, $f3
132 mtc1 t0, $f4
133 mtc1 t0, $f5
134 mtc1 t0, $f6
135 mtc1 t0, $f7
136 mtc1 t0, $f8
137 mtc1 t0, $f9
138 mtc1 t0, $f10
139 mtc1 t0, $f11
140 mtc1 t0, $f12
141 mtc1 t0, $f13
142 mtc1 t0, $f14
143 mtc1 t0, $f15
144 mtc1 t0, $f16
145 mtc1 t0, $f17
146 mtc1 t0, $f18
147 mtc1 t0, $f19
148 mtc1 t0, $f20
149 mtc1 t0, $f21
150 mtc1 t0, $f22
151 mtc1 t0, $f23
152 mtc1 t0, $f24
153 mtc1 t0, $f25
154 mtc1 t0, $f26
155 mtc1 t0, $f27
156 mtc1 t0, $f28
157 mtc1 t0, $f29
158 mtc1 t0, $f30
159 mtc1 t0, $f31
160 jr ra
161 END(_init_fpu)
This page took 0.574635 seconds and 5 git commands to generate.