x86: add hooks for kmemcheck
[deliverable/linux.git] / arch / x86 / include / asm / string_32.h
1 #ifndef _ASM_X86_STRING_32_H
2 #define _ASM_X86_STRING_32_H
3
4 #ifdef __KERNEL__
5
6 /* Let gcc decide whether to inline or use the out of line functions */
7
8 #define __HAVE_ARCH_STRCPY
9 extern char *strcpy(char *dest, const char *src);
10
11 #define __HAVE_ARCH_STRNCPY
12 extern char *strncpy(char *dest, const char *src, size_t count);
13
14 #define __HAVE_ARCH_STRCAT
15 extern char *strcat(char *dest, const char *src);
16
17 #define __HAVE_ARCH_STRNCAT
18 extern char *strncat(char *dest, const char *src, size_t count);
19
20 #define __HAVE_ARCH_STRCMP
21 extern int strcmp(const char *cs, const char *ct);
22
23 #define __HAVE_ARCH_STRNCMP
24 extern int strncmp(const char *cs, const char *ct, size_t count);
25
26 #define __HAVE_ARCH_STRCHR
27 extern char *strchr(const char *s, int c);
28
29 #define __HAVE_ARCH_STRLEN
30 extern size_t strlen(const char *s);
31
32 static __always_inline void *__memcpy(void *to, const void *from, size_t n)
33 {
34 int d0, d1, d2;
35 asm volatile("rep ; movsl\n\t"
36 "movl %4,%%ecx\n\t"
37 "andl $3,%%ecx\n\t"
38 "jz 1f\n\t"
39 "rep ; movsb\n\t"
40 "1:"
41 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
42 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
43 : "memory");
44 return to;
45 }
46
47 /*
48 * This looks ugly, but the compiler can optimize it totally,
49 * as the count is constant.
50 */
51 static __always_inline void *__constant_memcpy(void *to, const void *from,
52 size_t n)
53 {
54 long esi, edi;
55 if (!n)
56 return to;
57
58 switch (n) {
59 case 1:
60 *(char *)to = *(char *)from;
61 return to;
62 case 2:
63 *(short *)to = *(short *)from;
64 return to;
65 case 4:
66 *(int *)to = *(int *)from;
67 return to;
68
69 case 3:
70 *(short *)to = *(short *)from;
71 *((char *)to + 2) = *((char *)from + 2);
72 return to;
73 case 5:
74 *(int *)to = *(int *)from;
75 *((char *)to + 4) = *((char *)from + 4);
76 return to;
77 case 6:
78 *(int *)to = *(int *)from;
79 *((short *)to + 2) = *((short *)from + 2);
80 return to;
81 case 8:
82 *(int *)to = *(int *)from;
83 *((int *)to + 1) = *((int *)from + 1);
84 return to;
85 }
86
87 esi = (long)from;
88 edi = (long)to;
89 if (n >= 5 * 4) {
90 /* large block: use rep prefix */
91 int ecx;
92 asm volatile("rep ; movsl"
93 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
94 : "0" (n / 4), "1" (edi), "2" (esi)
95 : "memory"
96 );
97 } else {
98 /* small block: don't clobber ecx + smaller code */
99 if (n >= 4 * 4)
100 asm volatile("movsl"
101 : "=&D"(edi), "=&S"(esi)
102 : "0"(edi), "1"(esi)
103 : "memory");
104 if (n >= 3 * 4)
105 asm volatile("movsl"
106 : "=&D"(edi), "=&S"(esi)
107 : "0"(edi), "1"(esi)
108 : "memory");
109 if (n >= 2 * 4)
110 asm volatile("movsl"
111 : "=&D"(edi), "=&S"(esi)
112 : "0"(edi), "1"(esi)
113 : "memory");
114 if (n >= 1 * 4)
115 asm volatile("movsl"
116 : "=&D"(edi), "=&S"(esi)
117 : "0"(edi), "1"(esi)
118 : "memory");
119 }
120 switch (n % 4) {
121 /* tail */
122 case 0:
123 return to;
124 case 1:
125 asm volatile("movsb"
126 : "=&D"(edi), "=&S"(esi)
127 : "0"(edi), "1"(esi)
128 : "memory");
129 return to;
130 case 2:
131 asm volatile("movsw"
132 : "=&D"(edi), "=&S"(esi)
133 : "0"(edi), "1"(esi)
134 : "memory");
135 return to;
136 default:
137 asm volatile("movsw\n\tmovsb"
138 : "=&D"(edi), "=&S"(esi)
139 : "0"(edi), "1"(esi)
140 : "memory");
141 return to;
142 }
143 }
144
145 #define __HAVE_ARCH_MEMCPY
146
147 #ifdef CONFIG_X86_USE_3DNOW
148
149 #include <asm/mmx.h>
150
151 /*
152 * This CPU favours 3DNow strongly (eg AMD Athlon)
153 */
154
155 static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
156 {
157 if (len < 512)
158 return __constant_memcpy(to, from, len);
159 return _mmx_memcpy(to, from, len);
160 }
161
162 static inline void *__memcpy3d(void *to, const void *from, size_t len)
163 {
164 if (len < 512)
165 return __memcpy(to, from, len);
166 return _mmx_memcpy(to, from, len);
167 }
168
169 #define memcpy(t, f, n) \
170 (__builtin_constant_p((n)) \
171 ? __constant_memcpy3d((t), (f), (n)) \
172 : __memcpy3d((t), (f), (n)))
173
174 #else
175
176 /*
177 * No 3D Now!
178 */
179
180 #ifndef CONFIG_KMEMCHECK
181 #define memcpy(t, f, n) \
182 (__builtin_constant_p((n)) \
183 ? __constant_memcpy((t), (f), (n)) \
184 : __memcpy((t), (f), (n)))
185 #else
186 /*
187 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
188 * because it means that we know both memory operands in advance.
189 */
190 #define memcpy(t, f, n) __memcpy((t), (f), (n))
191 #endif
192
193 #endif
194
195 #define __HAVE_ARCH_MEMMOVE
196 void *memmove(void *dest, const void *src, size_t n);
197
198 #define memcmp __builtin_memcmp
199
200 #define __HAVE_ARCH_MEMCHR
201 extern void *memchr(const void *cs, int c, size_t count);
202
203 static inline void *__memset_generic(void *s, char c, size_t count)
204 {
205 int d0, d1;
206 asm volatile("rep\n\t"
207 "stosb"
208 : "=&c" (d0), "=&D" (d1)
209 : "a" (c), "1" (s), "0" (count)
210 : "memory");
211 return s;
212 }
213
214 /* we might want to write optimized versions of these later */
215 #define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
216
217 /*
218 * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
219 * things 32 bits at a time even when we don't know the size of the
220 * area at compile-time..
221 */
222 static __always_inline
223 void *__constant_c_memset(void *s, unsigned long c, size_t count)
224 {
225 int d0, d1;
226 asm volatile("rep ; stosl\n\t"
227 "testb $2,%b3\n\t"
228 "je 1f\n\t"
229 "stosw\n"
230 "1:\ttestb $1,%b3\n\t"
231 "je 2f\n\t"
232 "stosb\n"
233 "2:"
234 : "=&c" (d0), "=&D" (d1)
235 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
236 : "memory");
237 return s;
238 }
239
240 /* Added by Gertjan van Wingerde to make minix and sysv module work */
241 #define __HAVE_ARCH_STRNLEN
242 extern size_t strnlen(const char *s, size_t count);
243 /* end of additional stuff */
244
245 #define __HAVE_ARCH_STRSTR
246 extern char *strstr(const char *cs, const char *ct);
247
248 /*
249 * This looks horribly ugly, but the compiler can optimize it totally,
250 * as we by now know that both pattern and count is constant..
251 */
252 static __always_inline
253 void *__constant_c_and_count_memset(void *s, unsigned long pattern,
254 size_t count)
255 {
256 switch (count) {
257 case 0:
258 return s;
259 case 1:
260 *(unsigned char *)s = pattern & 0xff;
261 return s;
262 case 2:
263 *(unsigned short *)s = pattern & 0xffff;
264 return s;
265 case 3:
266 *(unsigned short *)s = pattern & 0xffff;
267 *((unsigned char *)s + 2) = pattern & 0xff;
268 return s;
269 case 4:
270 *(unsigned long *)s = pattern;
271 return s;
272 }
273
274 #define COMMON(x) \
275 asm volatile("rep ; stosl" \
276 x \
277 : "=&c" (d0), "=&D" (d1) \
278 : "a" (eax), "0" (count/4), "1" ((long)s) \
279 : "memory")
280
281 {
282 int d0, d1;
283 #if __GNUC__ == 4 && __GNUC_MINOR__ == 0
284 /* Workaround for broken gcc 4.0 */
285 register unsigned long eax asm("%eax") = pattern;
286 #else
287 unsigned long eax = pattern;
288 #endif
289
290 switch (count % 4) {
291 case 0:
292 COMMON("");
293 return s;
294 case 1:
295 COMMON("\n\tstosb");
296 return s;
297 case 2:
298 COMMON("\n\tstosw");
299 return s;
300 default:
301 COMMON("\n\tstosw\n\tstosb");
302 return s;
303 }
304 }
305
306 #undef COMMON
307 }
308
309 #define __constant_c_x_memset(s, c, count) \
310 (__builtin_constant_p(count) \
311 ? __constant_c_and_count_memset((s), (c), (count)) \
312 : __constant_c_memset((s), (c), (count)))
313
314 #define __memset(s, c, count) \
315 (__builtin_constant_p(count) \
316 ? __constant_count_memset((s), (c), (count)) \
317 : __memset_generic((s), (c), (count)))
318
319 #define __HAVE_ARCH_MEMSET
320 #define memset(s, c, count) \
321 (__builtin_constant_p(c) \
322 ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
323 (count)) \
324 : __memset((s), (c), (count)))
325
326 /*
327 * find the first occurrence of byte 'c', or 1 past the area if none
328 */
329 #define __HAVE_ARCH_MEMSCAN
330 extern void *memscan(void *addr, int c, size_t size);
331
332 #endif /* __KERNEL__ */
333
334 #endif /* _ASM_X86_STRING_32_H */
This page took 0.074914 seconds and 5 git commands to generate.