Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _M68KNOMMU_SYSTEM_H |
2 | #define _M68KNOMMU_SYSTEM_H | |
3 | ||
1da177e4 | 4 | #include <linux/linkage.h> |
df9ee292 | 5 | #include <linux/irqflags.h> |
1da177e4 LT |
6 | #include <asm/segment.h> |
7 | #include <asm/entry.h> | |
8 | ||
9 | /* | |
10 | * switch_to(n) should switch tasks to task ptr, first checking that | |
11 | * ptr isn't the current task, in which case it does nothing. This | |
12 | * also clears the TS-flag if the task we switched to has used the | |
13 | * math co-processor latest. | |
14 | */ | |
15 | /* | |
16 | * switch_to() saves the extra registers, that are not saved | |
17 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | |
18 | * a0-a1. Some of these are used by schedule() and its predecessors | |
19 | * and so we might get see unexpected behaviors when a task returns | |
20 | * with unexpected register values. | |
21 | * | |
22 | * syscall stores these registers itself and none of them are used | |
23 | * by syscall after the function in the syscall has been called. | |
24 | * | |
25 | * Beware that resume now expects *next to be in d1 and the offset of | |
26 | * tss to be in a1. This saves a few instructions as we no longer have | |
27 | * to push them onto the stack and read them back right after. | |
28 | * | |
29 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | |
30 | * | |
31 | * Changed 96/09/19 by Andreas Schwab | |
32 | * pass prev in a0, next in a1, offset of tss in d1, and whether | |
33 | * the mm structures are shared in d2 (to avoid atc flushing). | |
34 | */ | |
35 | asmlinkage void resume(void); | |
36 | #define switch_to(prev,next,last) \ | |
37 | { \ | |
38 | void *_last; \ | |
39 | __asm__ __volatile__( \ | |
40 | "movel %1, %%a0\n\t" \ | |
41 | "movel %2, %%a1\n\t" \ | |
42 | "jbsr resume\n\t" \ | |
43 | "movel %%d1, %0\n\t" \ | |
44 | : "=d" (_last) \ | |
45 | : "d" (prev), "d" (next) \ | |
46 | : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ | |
47 | (last) = _last; \ | |
48 | } | |
49 | ||
1da177e4 LT |
50 | #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") |
51 | ||
52 | /* | |
53 | * Force strict CPU ordering. | |
54 | * Not really required on m68k... | |
55 | */ | |
56 | #define nop() asm volatile ("nop"::) | |
57 | #define mb() asm volatile ("" : : :"memory") | |
58 | #define rmb() asm volatile ("" : : :"memory") | |
59 | #define wmb() asm volatile ("" : : :"memory") | |
091b76d6 | 60 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) |
1da177e4 LT |
61 | |
62 | #ifdef CONFIG_SMP | |
63 | #define smp_mb() mb() | |
64 | #define smp_rmb() rmb() | |
65 | #define smp_wmb() wmb() | |
66 | #define smp_read_barrier_depends() read_barrier_depends() | |
67 | #else | |
68 | #define smp_mb() barrier() | |
69 | #define smp_rmb() barrier() | |
70 | #define smp_wmb() barrier() | |
71 | #define smp_read_barrier_depends() do { } while(0) | |
72 | #endif | |
73 | ||
6dbeb456 SS |
74 | #define read_barrier_depends() ((void)0) |
75 | ||
1da177e4 | 76 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
1da177e4 LT |
77 | |
78 | struct __xchg_dummy { unsigned long a[100]; }; | |
79 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | |
80 | ||
81 | #ifndef CONFIG_RMW_INSNS | |
82 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
83 | { | |
84 | unsigned long tmp, flags; | |
85 | ||
86 | local_irq_save(flags); | |
87 | ||
88 | switch (size) { | |
89 | case 1: | |
90 | __asm__ __volatile__ | |
91 | ("moveb %2,%0\n\t" | |
92 | "moveb %1,%2" | |
93 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
94 | break; | |
95 | case 2: | |
96 | __asm__ __volatile__ | |
97 | ("movew %2,%0\n\t" | |
98 | "movew %1,%2" | |
99 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
100 | break; | |
101 | case 4: | |
102 | __asm__ __volatile__ | |
103 | ("movel %2,%0\n\t" | |
104 | "movel %1,%2" | |
105 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
106 | break; | |
107 | } | |
108 | local_irq_restore(flags); | |
109 | return tmp; | |
110 | } | |
111 | #else | |
112 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
113 | { | |
114 | switch (size) { | |
115 | case 1: | |
116 | __asm__ __volatile__ | |
117 | ("moveb %2,%0\n\t" | |
118 | "1:\n\t" | |
119 | "casb %0,%1,%2\n\t" | |
120 | "jne 1b" | |
121 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
122 | break; | |
123 | case 2: | |
124 | __asm__ __volatile__ | |
125 | ("movew %2,%0\n\t" | |
126 | "1:\n\t" | |
127 | "casw %0,%1,%2\n\t" | |
128 | "jne 1b" | |
129 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
130 | break; | |
131 | case 4: | |
132 | __asm__ __volatile__ | |
133 | ("movel %2,%0\n\t" | |
134 | "1:\n\t" | |
135 | "casl %0,%1,%2\n\t" | |
136 | "jne 1b" | |
137 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
138 | break; | |
139 | } | |
140 | return x; | |
141 | } | |
142 | #endif | |
143 | ||
027bcc27 MD |
144 | #include <asm-generic/cmpxchg-local.h> |
145 | ||
1da177e4 | 146 | /* |
027bcc27 MD |
147 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make |
148 | * them available. | |
1da177e4 | 149 | */ |
027bcc27 MD |
150 | #define cmpxchg_local(ptr, o, n) \ |
151 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | |
152 | (unsigned long)(n), sizeof(*(ptr)))) | |
153 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
1da177e4 | 154 | |
027bcc27 MD |
155 | #ifndef CONFIG_SMP |
156 | #include <asm-generic/cmpxchg.h> | |
157 | #endif | |
1da177e4 | 158 | |
1da177e4 LT |
159 | #define arch_align_stack(x) (x) |
160 | ||
6dbeb456 | 161 | |
1da177e4 | 162 | #endif /* _M68KNOMMU_SYSTEM_H */ |