MIPS: COP2: CPP macro safety fixes.
[deliverable/linux.git] / arch / mips / include / asm / switch_to.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12 #ifndef _ASM_SWITCH_TO_H
13 #define _ASM_SWITCH_TO_H
14
15 #include <asm/cpu-features.h>
16 #include <asm/watch.h>
17 #include <asm/dsp.h>
18 #include <asm/cop2.h>
19 #include <asm/msa.h>
20
21 struct task_struct;
22
23 enum {
24 FP_SAVE_NONE = 0,
25 FP_SAVE_VECTOR = -1,
26 FP_SAVE_SCALAR = 1,
27 };
28
29 /**
30 * resume - resume execution of a task
31 * @prev: The task previously executed.
32 * @next: The task to begin executing.
33 * @next_ti: task_thread_info(next).
34 * @fp_save: Which, if any, FP context to save for prev.
35 *
36 * This function is used whilst scheduling to save the context of prev & load
37 * the context of next. Returns prev.
38 */
39 extern asmlinkage struct task_struct *resume(struct task_struct *prev,
40 struct task_struct *next, struct thread_info *next_ti,
41 s32 fp_save);
42
43 extern unsigned int ll_bit;
44 extern struct task_struct *ll_task;
45
46 #ifdef CONFIG_MIPS_MT_FPAFF
47
48 /*
49 * Handle the scheduler resume end of FPU affinity management. We do this
50 * inline to try to keep the overhead down. If we have been forced to run on
51 * a "CPU" with an FPU because of a previous high level of FP computation,
52 * but did not actually use the FPU during the most recent time-slice (CU1
53 * isn't set), we undo the restriction on cpus_allowed.
54 *
55 * We're not calling set_cpus_allowed() here, because we have no need to
56 * force prompt migration - we're already switching the current CPU to a
57 * different thread.
58 */
59
60 #define __mips_mt_fpaff_switch_to(prev) \
61 do { \
62 struct thread_info *__prev_ti = task_thread_info(prev); \
63 \
64 if (cpu_has_fpu && \
65 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
66 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
67 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
68 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
69 } \
70 next->thread.emulated_fp = 0; \
71 } while(0)
72
73 #else
74 #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
75 #endif
76
77 #define __clear_software_ll_bit() \
78 do { \
79 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
80 ll_bit = 0; \
81 } while (0)
82
83 #define switch_to(prev, next, last) \
84 do { \
85 u32 __c0_stat; \
86 s32 __fpsave = FP_SAVE_NONE; \
87 __mips_mt_fpaff_switch_to(prev); \
88 if (cpu_has_dsp) \
89 __save_dsp(prev); \
90 if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \
91 if (cop2_lazy_restore) \
92 KSTK_STATUS(prev) &= ~ST0_CU2; \
93 __c0_stat = read_c0_status(); \
94 write_c0_status(__c0_stat | ST0_CU2); \
95 cop2_save(prev); \
96 write_c0_status(__c0_stat & ~ST0_CU2); \
97 } \
98 __clear_software_ll_bit(); \
99 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
100 __fpsave = FP_SAVE_SCALAR; \
101 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
102 __fpsave = FP_SAVE_VECTOR; \
103 (last) = resume(prev, next, task_thread_info(next), __fpsave); \
104 disable_msa(); \
105 } while (0)
106
107 #define finish_arch_switch(prev) \
108 do { \
109 u32 __c0_stat; \
110 if (cop2_present && !cop2_lazy_restore && \
111 (KSTK_STATUS(current) & ST0_CU2)) { \
112 __c0_stat = read_c0_status(); \
113 write_c0_status(__c0_stat | ST0_CU2); \
114 cop2_restore(current); \
115 write_c0_status(__c0_stat & ~ST0_CU2); \
116 } \
117 if (cpu_has_dsp) \
118 __restore_dsp(current); \
119 if (cpu_has_userlocal) \
120 write_c0_userlocal(current_thread_info()->tp_value); \
121 __restore_watch(); \
122 } while (0)
123
124 #endif /* _ASM_SWITCH_TO_H */
This page took 0.034593 seconds and 5 git commands to generate.