Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * IP/TCP/UDP checksumming routines | |
7 | * | |
8 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | |
9 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
10 | * Tom May, <ftom@netcom.com> | |
11 | * Pentium Pro/II routines: | |
12 | * Alexander Kjeldaas <astor@guardian.no> | |
13 | * Finn Arne Gangstad <finnag@guardian.no> | |
14 | * Lots of code moved from tcp.c and ip.c; see those files | |
15 | * for more names. | |
16 | * | |
17 | * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception | |
18 | * handling. | |
19 | * Andi Kleen, add zeroing on error | |
20 | * converted to pure assembler | |
21 | * | |
22 | * This program is free software; you can redistribute it and/or | |
23 | * modify it under the terms of the GNU General Public License | |
24 | * as published by the Free Software Foundation; either version | |
25 | * 2 of the License, or (at your option) any later version. | |
26 | */ | |
27 | ||
00e065ea | 28 | #include <linux/linkage.h> |
1da177e4 | 29 | #include <asm/errno.h> |
5f2e8a84 | 30 | #include <asm/asm.h> |
1da177e4 LT |
31 | |
32 | /* | |
33 | * computes a partial checksum, e.g. for TCP/UDP fragments | |
34 | */ | |
35 | ||
36 | /* | |
37 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |
38 | */ | |
39 | ||
40 | .text | |
1da177e4 LT |
41 | |
42 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | |
43 | ||
44 | /* | |
45 | * Experiments with Ethernet and SLIP connections show that buff | |
46 | * is aligned on either a 2-byte or 4-byte boundary. We get at | |
47 | * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. | |
48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | |
49 | * alignment for the unrolled loop. | |
50 | */ | |
00e065ea | 51 | ENTRY(csum_partial) |
131484c8 IM |
52 | pushl %esi |
53 | pushl %ebx | |
1da177e4 LT |
54 | movl 20(%esp),%eax # Function arg: unsigned int sum |
55 | movl 16(%esp),%ecx # Function arg: int len | |
56 | movl 12(%esp),%esi # Function arg: unsigned char *buff | |
57 | testl $3, %esi # Check alignment. | |
58 | jz 2f # Jump if alignment is ok. | |
59 | testl $1, %esi # Check alignment. | |
d50ba368 | 60 | jz 10f # Jump if alignment is boundary of 2 bytes. |
1da177e4 LT |
61 | |
62 | # buf is odd | |
63 | dec %ecx | |
64 | jl 8f | |
65 | movzbl (%esi), %ebx | |
66 | adcl %ebx, %eax | |
67 | roll $8, %eax | |
68 | inc %esi | |
69 | testl $2, %esi | |
70 | jz 2f | |
71 | 10: | |
72 | subl $2, %ecx # Alignment uses up two bytes. | |
73 | jae 1f # Jump if we had at least two bytes. | |
74 | addl $2, %ecx # ecx was < 2. Deal with it. | |
75 | jmp 4f | |
76 | 1: movw (%esi), %bx | |
77 | addl $2, %esi | |
78 | addw %bx, %ax | |
79 | adcl $0, %eax | |
80 | 2: | |
81 | movl %ecx, %edx | |
82 | shrl $5, %ecx | |
83 | jz 2f | |
84 | testl %esi, %esi | |
85 | 1: movl (%esi), %ebx | |
86 | adcl %ebx, %eax | |
87 | movl 4(%esi), %ebx | |
88 | adcl %ebx, %eax | |
89 | movl 8(%esi), %ebx | |
90 | adcl %ebx, %eax | |
91 | movl 12(%esi), %ebx | |
92 | adcl %ebx, %eax | |
93 | movl 16(%esi), %ebx | |
94 | adcl %ebx, %eax | |
95 | movl 20(%esi), %ebx | |
96 | adcl %ebx, %eax | |
97 | movl 24(%esi), %ebx | |
98 | adcl %ebx, %eax | |
99 | movl 28(%esi), %ebx | |
100 | adcl %ebx, %eax | |
101 | lea 32(%esi), %esi | |
102 | dec %ecx | |
103 | jne 1b | |
104 | adcl $0, %eax | |
105 | 2: movl %edx, %ecx | |
106 | andl $0x1c, %edx | |
107 | je 4f | |
108 | shrl $2, %edx # This clears CF | |
109 | 3: adcl (%esi), %eax | |
110 | lea 4(%esi), %esi | |
111 | dec %edx | |
112 | jne 3b | |
113 | adcl $0, %eax | |
114 | 4: andl $3, %ecx | |
115 | jz 7f | |
116 | cmpl $2, %ecx | |
117 | jb 5f | |
118 | movw (%esi),%cx | |
119 | leal 2(%esi),%esi | |
120 | je 6f | |
121 | shll $16,%ecx | |
122 | 5: movb (%esi),%cl | |
123 | 6: addl %ecx,%eax | |
124 | adcl $0, %eax | |
125 | 7: | |
3e1aa7cb | 126 | testb $1, 12(%esp) |
1da177e4 LT |
127 | jz 8f |
128 | roll $8, %eax | |
129 | 8: | |
131484c8 IM |
130 | popl %ebx |
131 | popl %esi | |
1da177e4 | 132 | ret |
00e065ea | 133 | ENDPROC(csum_partial) |
1da177e4 LT |
134 | |
135 | #else | |
136 | ||
137 | /* Version for PentiumII/PPro */ | |
138 | ||
00e065ea | 139 | ENTRY(csum_partial) |
131484c8 IM |
140 | pushl %esi |
141 | pushl %ebx | |
1da177e4 LT |
142 | movl 20(%esp),%eax # Function arg: unsigned int sum |
143 | movl 16(%esp),%ecx # Function arg: int len | |
144 | movl 12(%esp),%esi # Function arg: const unsigned char *buf | |
145 | ||
146 | testl $3, %esi | |
147 | jnz 25f | |
148 | 10: | |
149 | movl %ecx, %edx | |
150 | movl %ecx, %ebx | |
151 | andl $0x7c, %ebx | |
152 | shrl $7, %ecx | |
153 | addl %ebx,%esi | |
154 | shrl $2, %ebx | |
155 | negl %ebx | |
156 | lea 45f(%ebx,%ebx,2), %ebx | |
157 | testl %esi, %esi | |
158 | jmp *%ebx | |
159 | ||
160 | # Handle 2-byte-aligned regions | |
161 | 20: addw (%esi), %ax | |
162 | lea 2(%esi), %esi | |
163 | adcl $0, %eax | |
164 | jmp 10b | |
165 | 25: | |
166 | testl $1, %esi | |
167 | jz 30f | |
168 | # buf is odd | |
169 | dec %ecx | |
170 | jl 90f | |
171 | movzbl (%esi), %ebx | |
172 | addl %ebx, %eax | |
173 | adcl $0, %eax | |
174 | roll $8, %eax | |
175 | inc %esi | |
176 | testl $2, %esi | |
177 | jz 10b | |
178 | ||
179 | 30: subl $2, %ecx | |
180 | ja 20b | |
181 | je 32f | |
182 | addl $2, %ecx | |
183 | jz 80f | |
184 | movzbl (%esi),%ebx # csumming 1 byte, 2-aligned | |
185 | addl %ebx, %eax | |
186 | adcl $0, %eax | |
187 | jmp 80f | |
188 | 32: | |
189 | addw (%esi), %ax # csumming 2 bytes, 2-aligned | |
190 | adcl $0, %eax | |
191 | jmp 80f | |
192 | ||
193 | 40: | |
194 | addl -128(%esi), %eax | |
195 | adcl -124(%esi), %eax | |
196 | adcl -120(%esi), %eax | |
197 | adcl -116(%esi), %eax | |
198 | adcl -112(%esi), %eax | |
199 | adcl -108(%esi), %eax | |
200 | adcl -104(%esi), %eax | |
201 | adcl -100(%esi), %eax | |
202 | adcl -96(%esi), %eax | |
203 | adcl -92(%esi), %eax | |
204 | adcl -88(%esi), %eax | |
205 | adcl -84(%esi), %eax | |
206 | adcl -80(%esi), %eax | |
207 | adcl -76(%esi), %eax | |
208 | adcl -72(%esi), %eax | |
209 | adcl -68(%esi), %eax | |
210 | adcl -64(%esi), %eax | |
211 | adcl -60(%esi), %eax | |
212 | adcl -56(%esi), %eax | |
213 | adcl -52(%esi), %eax | |
214 | adcl -48(%esi), %eax | |
215 | adcl -44(%esi), %eax | |
216 | adcl -40(%esi), %eax | |
217 | adcl -36(%esi), %eax | |
218 | adcl -32(%esi), %eax | |
219 | adcl -28(%esi), %eax | |
220 | adcl -24(%esi), %eax | |
221 | adcl -20(%esi), %eax | |
222 | adcl -16(%esi), %eax | |
223 | adcl -12(%esi), %eax | |
224 | adcl -8(%esi), %eax | |
225 | adcl -4(%esi), %eax | |
226 | 45: | |
227 | lea 128(%esi), %esi | |
228 | adcl $0, %eax | |
229 | dec %ecx | |
230 | jge 40b | |
231 | movl %edx, %ecx | |
232 | 50: andl $3, %ecx | |
233 | jz 80f | |
234 | ||
235 | # Handle the last 1-3 bytes without jumping | |
236 | notl %ecx # 1->2, 2->1, 3->0, higher bits are masked | |
237 | movl $0xffffff,%ebx # by the shll and shrl instructions | |
238 | shll $3,%ecx | |
239 | shrl %cl,%ebx | |
240 | andl -128(%esi),%ebx # esi is 4-aligned so should be ok | |
241 | addl %ebx,%eax | |
242 | adcl $0,%eax | |
243 | 80: | |
3e1aa7cb | 244 | testb $1, 12(%esp) |
1da177e4 LT |
245 | jz 90f |
246 | roll $8, %eax | |
247 | 90: | |
131484c8 IM |
248 | popl %ebx |
249 | popl %esi | |
1da177e4 | 250 | ret |
00e065ea | 251 | ENDPROC(csum_partial) |
1da177e4 LT |
252 | |
253 | #endif | |
254 | ||
255 | /* | |
256 | unsigned int csum_partial_copy_generic (const char *src, char *dst, | |
257 | int len, int sum, int *src_err_ptr, int *dst_err_ptr) | |
258 | */ | |
259 | ||
260 | /* | |
261 | * Copy from ds while checksumming, otherwise like csum_partial | |
262 | * | |
263 | * The macros SRC and DST specify the type of access for the instruction. | |
264 | * thus we can call a custom exception handler for all access types. | |
265 | * | |
266 | * FIXME: could someone double-check whether I haven't mixed up some SRC and | |
267 | * DST definitions? It's damn hard to trigger all cases. I hope I got | |
268 | * them all but there's no guarantee. | |
269 | */ | |
270 | ||
271 | #define SRC(y...) \ | |
272 | 9999: y; \ | |
5f2e8a84 | 273 | _ASM_EXTABLE(9999b, 6001f) |
1da177e4 LT |
274 | |
275 | #define DST(y...) \ | |
276 | 9999: y; \ | |
5f2e8a84 | 277 | _ASM_EXTABLE(9999b, 6002f) |
1da177e4 | 278 | |
1da177e4 LT |
279 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM |
280 | ||
281 | #define ARGBASE 16 | |
282 | #define FP 12 | |
283 | ||
00e065ea | 284 | ENTRY(csum_partial_copy_generic) |
1da177e4 | 285 | subl $4,%esp |
131484c8 IM |
286 | pushl %edi |
287 | pushl %esi | |
288 | pushl %ebx | |
1da177e4 LT |
289 | movl ARGBASE+16(%esp),%eax # sum |
290 | movl ARGBASE+12(%esp),%ecx # len | |
291 | movl ARGBASE+4(%esp),%esi # src | |
292 | movl ARGBASE+8(%esp),%edi # dst | |
293 | ||
294 | testl $2, %edi # Check alignment. | |
295 | jz 2f # Jump if alignment is ok. | |
296 | subl $2, %ecx # Alignment uses up two bytes. | |
297 | jae 1f # Jump if we had at least two bytes. | |
298 | addl $2, %ecx # ecx was < 2. Deal with it. | |
299 | jmp 4f | |
300 | SRC(1: movw (%esi), %bx ) | |
301 | addl $2, %esi | |
302 | DST( movw %bx, (%edi) ) | |
303 | addl $2, %edi | |
304 | addw %bx, %ax | |
305 | adcl $0, %eax | |
306 | 2: | |
307 | movl %ecx, FP(%esp) | |
308 | shrl $5, %ecx | |
309 | jz 2f | |
310 | testl %esi, %esi | |
311 | SRC(1: movl (%esi), %ebx ) | |
312 | SRC( movl 4(%esi), %edx ) | |
313 | adcl %ebx, %eax | |
314 | DST( movl %ebx, (%edi) ) | |
315 | adcl %edx, %eax | |
316 | DST( movl %edx, 4(%edi) ) | |
317 | ||
318 | SRC( movl 8(%esi), %ebx ) | |
319 | SRC( movl 12(%esi), %edx ) | |
320 | adcl %ebx, %eax | |
321 | DST( movl %ebx, 8(%edi) ) | |
322 | adcl %edx, %eax | |
323 | DST( movl %edx, 12(%edi) ) | |
324 | ||
325 | SRC( movl 16(%esi), %ebx ) | |
326 | SRC( movl 20(%esi), %edx ) | |
327 | adcl %ebx, %eax | |
328 | DST( movl %ebx, 16(%edi) ) | |
329 | adcl %edx, %eax | |
330 | DST( movl %edx, 20(%edi) ) | |
331 | ||
332 | SRC( movl 24(%esi), %ebx ) | |
333 | SRC( movl 28(%esi), %edx ) | |
334 | adcl %ebx, %eax | |
335 | DST( movl %ebx, 24(%edi) ) | |
336 | adcl %edx, %eax | |
337 | DST( movl %edx, 28(%edi) ) | |
338 | ||
339 | lea 32(%esi), %esi | |
340 | lea 32(%edi), %edi | |
341 | dec %ecx | |
342 | jne 1b | |
343 | adcl $0, %eax | |
344 | 2: movl FP(%esp), %edx | |
345 | movl %edx, %ecx | |
346 | andl $0x1c, %edx | |
347 | je 4f | |
348 | shrl $2, %edx # This clears CF | |
349 | SRC(3: movl (%esi), %ebx ) | |
350 | adcl %ebx, %eax | |
351 | DST( movl %ebx, (%edi) ) | |
352 | lea 4(%esi), %esi | |
353 | lea 4(%edi), %edi | |
354 | dec %edx | |
355 | jne 3b | |
356 | adcl $0, %eax | |
357 | 4: andl $3, %ecx | |
358 | jz 7f | |
359 | cmpl $2, %ecx | |
360 | jb 5f | |
361 | SRC( movw (%esi), %cx ) | |
362 | leal 2(%esi), %esi | |
363 | DST( movw %cx, (%edi) ) | |
364 | leal 2(%edi), %edi | |
365 | je 6f | |
366 | shll $16,%ecx | |
367 | SRC(5: movb (%esi), %cl ) | |
368 | DST( movb %cl, (%edi) ) | |
369 | 6: addl %ecx, %eax | |
370 | adcl $0, %eax | |
371 | 7: | |
372 | 5000: | |
373 | ||
374 | # Exception handler: | |
375 | .section .fixup, "ax" | |
376 | ||
377 | 6001: | |
378 | movl ARGBASE+20(%esp), %ebx # src_err_ptr | |
379 | movl $-EFAULT, (%ebx) | |
380 | ||
381 | # zero the complete destination - computing the rest | |
382 | # is too much work | |
383 | movl ARGBASE+8(%esp), %edi # dst | |
384 | movl ARGBASE+12(%esp), %ecx # len | |
385 | xorl %eax,%eax | |
386 | rep ; stosb | |
387 | ||
388 | jmp 5000b | |
389 | ||
390 | 6002: | |
391 | movl ARGBASE+24(%esp), %ebx # dst_err_ptr | |
392 | movl $-EFAULT,(%ebx) | |
393 | jmp 5000b | |
394 | ||
395 | .previous | |
396 | ||
131484c8 IM |
397 | popl %ebx |
398 | popl %esi | |
399 | popl %edi | |
400 | popl %ecx # equivalent to addl $4,%esp | |
1da177e4 | 401 | ret |
00e065ea | 402 | ENDPROC(csum_partial_copy_generic) |
1da177e4 LT |
403 | |
404 | #else | |
405 | ||
406 | /* Version for PentiumII/PPro */ | |
407 | ||
408 | #define ROUND1(x) \ | |
409 | SRC(movl x(%esi), %ebx ) ; \ | |
410 | addl %ebx, %eax ; \ | |
411 | DST(movl %ebx, x(%edi) ) ; | |
412 | ||
413 | #define ROUND(x) \ | |
414 | SRC(movl x(%esi), %ebx ) ; \ | |
415 | adcl %ebx, %eax ; \ | |
416 | DST(movl %ebx, x(%edi) ) ; | |
417 | ||
418 | #define ARGBASE 12 | |
419 | ||
00e065ea | 420 | ENTRY(csum_partial_copy_generic) |
131484c8 IM |
421 | pushl %ebx |
422 | pushl %edi | |
423 | pushl %esi | |
1da177e4 LT |
424 | movl ARGBASE+4(%esp),%esi #src |
425 | movl ARGBASE+8(%esp),%edi #dst | |
426 | movl ARGBASE+12(%esp),%ecx #len | |
427 | movl ARGBASE+16(%esp),%eax #sum | |
428 | # movl %ecx, %edx | |
429 | movl %ecx, %ebx | |
430 | movl %esi, %edx | |
431 | shrl $6, %ecx | |
432 | andl $0x3c, %ebx | |
433 | negl %ebx | |
434 | subl %ebx, %esi | |
435 | subl %ebx, %edi | |
436 | lea -1(%esi),%edx | |
437 | andl $-32,%edx | |
438 | lea 3f(%ebx,%ebx), %ebx | |
439 | testl %esi, %esi | |
440 | jmp *%ebx | |
441 | 1: addl $64,%esi | |
442 | addl $64,%edi | |
443 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | |
444 | ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) | |
445 | ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) | |
446 | ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) | |
447 | ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) | |
448 | 3: adcl $0,%eax | |
449 | addl $64, %edx | |
450 | dec %ecx | |
451 | jge 1b | |
452 | 4: movl ARGBASE+12(%esp),%edx #len | |
453 | andl $3, %edx | |
454 | jz 7f | |
455 | cmpl $2, %edx | |
456 | jb 5f | |
457 | SRC( movw (%esi), %dx ) | |
458 | leal 2(%esi), %esi | |
459 | DST( movw %dx, (%edi) ) | |
460 | leal 2(%edi), %edi | |
461 | je 6f | |
462 | shll $16,%edx | |
463 | 5: | |
464 | SRC( movb (%esi), %dl ) | |
465 | DST( movb %dl, (%edi) ) | |
466 | 6: addl %edx, %eax | |
467 | adcl $0, %eax | |
468 | 7: | |
469 | .section .fixup, "ax" | |
470 | 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr | |
471 | movl $-EFAULT, (%ebx) | |
472 | # zero the complete destination (computing the rest is too much work) | |
473 | movl ARGBASE+8(%esp),%edi # dst | |
474 | movl ARGBASE+12(%esp),%ecx # len | |
475 | xorl %eax,%eax | |
476 | rep; stosb | |
477 | jmp 7b | |
478 | 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr | |
479 | movl $-EFAULT, (%ebx) | |
480 | jmp 7b | |
481 | .previous | |
482 | ||
131484c8 IM |
483 | popl %esi |
484 | popl %edi | |
485 | popl %ebx | |
1da177e4 | 486 | ret |
00e065ea | 487 | ENDPROC(csum_partial_copy_generic) |
1da177e4 LT |
488 | |
489 | #undef ROUND | |
490 | #undef ROUND1 | |
491 | ||
492 | #endif |