ext3: Flush disk caches on fsync when needed
[deliverable/linux.git] / arch / mips / kernel / r2300_switch.S
1 /*
2 * r2300_switch.S: R2300 specific task switching code.
3 *
4 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
6 *
7 * Multi-cpu abstraction and macros for easier reading:
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 *
10 * Further modifications to make this work:
11 * Copyright (c) 1998-2000 Harald Koerfgen
12 */
13 #include <asm/asm.h>
14 #include <asm/cachectl.h>
15 #include <asm/fpregdef.h>
16 #include <asm/mipsregs.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/page.h>
19 #include <asm/regdef.h>
20 #include <asm/stackframe.h>
21 #include <asm/thread_info.h>
22
23 #include <asm/asmmacro.h>
24
25 .set mips1
26 .align 5
27
28 /*
29 * Offset to the current process status flags, the first 32 bytes of the
30 * stack are not used.
31 */
32 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
33
34 /*
35 * FPU context is saved iff the process has used it's FPU in the current
36 * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
37 * space STATUS register should be 0, so that a process *always* starts its
38 * userland with FPU disabled after each context switch.
39 *
40 * FPU will be enabled as soon as the process accesses FPU again, through
41 * do_cpu() trap.
42 */
43
44 /*
45 * task_struct *resume(task_struct *prev, task_struct *next,
46 * struct thread_info *next_ti) )
47 */
48 LEAF(resume)
49 #ifndef CONFIG_CPU_HAS_LLSC
50 sw zero, ll_bit
51 #endif
52 mfc0 t1, CP0_STATUS
53 sw t1, THREAD_STATUS(a0)
54 cpu_save_nonscratch a0
55 sw ra, THREAD_REG31(a0)
56
57 /*
58 * check if we need to save FPU registers
59 */
60 lw t3, TASK_THREAD_INFO(a0)
61 lw t0, TI_FLAGS(t3)
62 li t1, _TIF_USEDFPU
63 and t2, t0, t1
64 beqz t2, 1f
65 nor t1, zero, t1
66
67 and t0, t0, t1
68 sw t0, TI_FLAGS(t3)
69
70 /*
71 * clear saved user stack CU1 bit
72 */
73 lw t0, ST_OFF(t3)
74 li t1, ~ST0_CU1
75 and t0, t0, t1
76 sw t0, ST_OFF(t3)
77
78 fpu_save_single a0, t0 # clobbers t0
79
80 1:
81 /*
82 * The order of restoring the registers takes care of the race
83 * updating $28, $29 and kernelsp without disabling ints.
84 */
85 move $28, a2
86 cpu_restore_nonscratch a1
87
88 addiu t1, $28, _THREAD_SIZE - 32
89 sw t1, kernelsp
90
91 mfc0 t1, CP0_STATUS /* Do we really need this? */
92 li a3, 0xff01
93 and t1, a3
94 lw a2, THREAD_STATUS(a1)
95 nor a3, $0, a3
96 and a2, a3
97 or a2, t1
98 mtc0 a2, CP0_STATUS
99 move v0, a0
100 jr ra
101 END(resume)
102
103 /*
104 * Save a thread's fp context.
105 */
106 LEAF(_save_fp)
107 fpu_save_single a0, t1 # clobbers t1
108 jr ra
109 END(_save_fp)
110
111 /*
112 * Restore a thread's fp context.
113 */
114 LEAF(_restore_fp)
115 fpu_restore_single a0, t1 # clobbers t1
116 jr ra
117 END(_restore_fp)
118
119 /*
120 * Load the FPU with signalling NANS. This bit pattern we're using has
121 * the property that no matter whether considered as single or as double
122 * precision represents signaling NANS.
123 *
124 * We initialize fcr31 to rounding to nearest, no exceptions.
125 */
126
127 #define FPU_DEFAULT 0x00000000
128
129 LEAF(_init_fpu)
130 mfc0 t0, CP0_STATUS
131 li t1, ST0_CU1
132 or t0, t1
133 mtc0 t0, CP0_STATUS
134
135 li t1, FPU_DEFAULT
136 ctc1 t1, fcr31
137
138 li t0, -1
139
140 mtc1 t0, $f0
141 mtc1 t0, $f1
142 mtc1 t0, $f2
143 mtc1 t0, $f3
144 mtc1 t0, $f4
145 mtc1 t0, $f5
146 mtc1 t0, $f6
147 mtc1 t0, $f7
148 mtc1 t0, $f8
149 mtc1 t0, $f9
150 mtc1 t0, $f10
151 mtc1 t0, $f11
152 mtc1 t0, $f12
153 mtc1 t0, $f13
154 mtc1 t0, $f14
155 mtc1 t0, $f15
156 mtc1 t0, $f16
157 mtc1 t0, $f17
158 mtc1 t0, $f18
159 mtc1 t0, $f19
160 mtc1 t0, $f20
161 mtc1 t0, $f21
162 mtc1 t0, $f22
163 mtc1 t0, $f23
164 mtc1 t0, $f24
165 mtc1 t0, $f25
166 mtc1 t0, $f26
167 mtc1 t0, $f27
168 mtc1 t0, $f28
169 mtc1 t0, $f29
170 mtc1 t0, $f30
171 mtc1 t0, $f31
172 jr ra
173 END(_init_fpu)
This page took 0.033724 seconds and 5 git commands to generate.