Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * IP/TCP/UDP checksumming routines | |
7 | * | |
8 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | |
9 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
10 | * Tom May, <ftom@netcom.com> | |
11 | * Pentium Pro/II routines: | |
12 | * Alexander Kjeldaas <astor@guardian.no> | |
13 | * Finn Arne Gangstad <finnag@guardian.no> | |
14 | * Lots of code moved from tcp.c and ip.c; see those files | |
15 | * for more names. | |
16 | * | |
17 | * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception | |
18 | * handling. | |
19 | * Andi Kleen, add zeroing on error | |
20 | * converted to pure assembler | |
21 | * | |
22 | * This program is free software; you can redistribute it and/or | |
23 | * modify it under the terms of the GNU General Public License | |
24 | * as published by the Free Software Foundation; either version | |
25 | * 2 of the License, or (at your option) any later version. | |
26 | */ | |
27 | ||
00e065ea JB |
28 | #include <linux/linkage.h> |
29 | #include <asm/dwarf2.h> | |
1da177e4 | 30 | #include <asm/errno.h> |
5f2e8a84 | 31 | #include <asm/asm.h> |
1da177e4 LT |
32 | |
33 | /* | |
34 | * computes a partial checksum, e.g. for TCP/UDP fragments | |
35 | */ | |
36 | ||
37 | /* | |
38 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |
39 | */ | |
40 | ||
41 | .text | |
1da177e4 LT |
42 | |
43 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | |
44 | ||
45 | /* | |
46 | * Experiments with Ethernet and SLIP connections show that buff | |
47 | * is aligned on either a 2-byte or 4-byte boundary. We get at | |
48 | * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. | |
49 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | |
50 | * alignment for the unrolled loop. | |
51 | */ | |
00e065ea JB |
52 | ENTRY(csum_partial) |
53 | CFI_STARTPROC | |
60cf637a | 54 | pushl_cfi %esi |
00e065ea | 55 | CFI_REL_OFFSET esi, 0 |
60cf637a | 56 | pushl_cfi %ebx |
00e065ea | 57 | CFI_REL_OFFSET ebx, 0 |
1da177e4 LT |
58 | movl 20(%esp),%eax # Function arg: unsigned int sum |
59 | movl 16(%esp),%ecx # Function arg: int len | |
60 | movl 12(%esp),%esi # Function arg: unsigned char *buff | |
61 | testl $3, %esi # Check alignment. | |
62 | jz 2f # Jump if alignment is ok. | |
63 | testl $1, %esi # Check alignment. | |
d50ba368 | 64 | jz 10f # Jump if alignment is boundary of 2 bytes. |
1da177e4 LT |
65 | |
66 | # buf is odd | |
67 | dec %ecx | |
68 | jl 8f | |
69 | movzbl (%esi), %ebx | |
70 | adcl %ebx, %eax | |
71 | roll $8, %eax | |
72 | inc %esi | |
73 | testl $2, %esi | |
74 | jz 2f | |
75 | 10: | |
76 | subl $2, %ecx # Alignment uses up two bytes. | |
77 | jae 1f # Jump if we had at least two bytes. | |
78 | addl $2, %ecx # ecx was < 2. Deal with it. | |
79 | jmp 4f | |
80 | 1: movw (%esi), %bx | |
81 | addl $2, %esi | |
82 | addw %bx, %ax | |
83 | adcl $0, %eax | |
84 | 2: | |
85 | movl %ecx, %edx | |
86 | shrl $5, %ecx | |
87 | jz 2f | |
88 | testl %esi, %esi | |
89 | 1: movl (%esi), %ebx | |
90 | adcl %ebx, %eax | |
91 | movl 4(%esi), %ebx | |
92 | adcl %ebx, %eax | |
93 | movl 8(%esi), %ebx | |
94 | adcl %ebx, %eax | |
95 | movl 12(%esi), %ebx | |
96 | adcl %ebx, %eax | |
97 | movl 16(%esi), %ebx | |
98 | adcl %ebx, %eax | |
99 | movl 20(%esi), %ebx | |
100 | adcl %ebx, %eax | |
101 | movl 24(%esi), %ebx | |
102 | adcl %ebx, %eax | |
103 | movl 28(%esi), %ebx | |
104 | adcl %ebx, %eax | |
105 | lea 32(%esi), %esi | |
106 | dec %ecx | |
107 | jne 1b | |
108 | adcl $0, %eax | |
109 | 2: movl %edx, %ecx | |
110 | andl $0x1c, %edx | |
111 | je 4f | |
112 | shrl $2, %edx # This clears CF | |
113 | 3: adcl (%esi), %eax | |
114 | lea 4(%esi), %esi | |
115 | dec %edx | |
116 | jne 3b | |
117 | adcl $0, %eax | |
118 | 4: andl $3, %ecx | |
119 | jz 7f | |
120 | cmpl $2, %ecx | |
121 | jb 5f | |
122 | movw (%esi),%cx | |
123 | leal 2(%esi),%esi | |
124 | je 6f | |
125 | shll $16,%ecx | |
126 | 5: movb (%esi),%cl | |
127 | 6: addl %ecx,%eax | |
128 | adcl $0, %eax | |
129 | 7: | |
130 | testl $1, 12(%esp) | |
131 | jz 8f | |
132 | roll $8, %eax | |
133 | 8: | |
60cf637a | 134 | popl_cfi %ebx |
00e065ea | 135 | CFI_RESTORE ebx |
60cf637a | 136 | popl_cfi %esi |
00e065ea | 137 | CFI_RESTORE esi |
1da177e4 | 138 | ret |
00e065ea JB |
139 | CFI_ENDPROC |
140 | ENDPROC(csum_partial) | |
1da177e4 LT |
141 | |
142 | #else | |
143 | ||
144 | /* Version for PentiumII/PPro */ | |
145 | ||
00e065ea JB |
146 | ENTRY(csum_partial) |
147 | CFI_STARTPROC | |
60cf637a | 148 | pushl_cfi %esi |
00e065ea | 149 | CFI_REL_OFFSET esi, 0 |
60cf637a | 150 | pushl_cfi %ebx |
00e065ea | 151 | CFI_REL_OFFSET ebx, 0 |
1da177e4 LT |
152 | movl 20(%esp),%eax # Function arg: unsigned int sum |
153 | movl 16(%esp),%ecx # Function arg: int len | |
154 | movl 12(%esp),%esi # Function arg: const unsigned char *buf | |
155 | ||
156 | testl $3, %esi | |
157 | jnz 25f | |
158 | 10: | |
159 | movl %ecx, %edx | |
160 | movl %ecx, %ebx | |
161 | andl $0x7c, %ebx | |
162 | shrl $7, %ecx | |
163 | addl %ebx,%esi | |
164 | shrl $2, %ebx | |
165 | negl %ebx | |
166 | lea 45f(%ebx,%ebx,2), %ebx | |
167 | testl %esi, %esi | |
168 | jmp *%ebx | |
169 | ||
170 | # Handle 2-byte-aligned regions | |
171 | 20: addw (%esi), %ax | |
172 | lea 2(%esi), %esi | |
173 | adcl $0, %eax | |
174 | jmp 10b | |
175 | 25: | |
176 | testl $1, %esi | |
177 | jz 30f | |
178 | # buf is odd | |
179 | dec %ecx | |
180 | jl 90f | |
181 | movzbl (%esi), %ebx | |
182 | addl %ebx, %eax | |
183 | adcl $0, %eax | |
184 | roll $8, %eax | |
185 | inc %esi | |
186 | testl $2, %esi | |
187 | jz 10b | |
188 | ||
189 | 30: subl $2, %ecx | |
190 | ja 20b | |
191 | je 32f | |
192 | addl $2, %ecx | |
193 | jz 80f | |
194 | movzbl (%esi),%ebx # csumming 1 byte, 2-aligned | |
195 | addl %ebx, %eax | |
196 | adcl $0, %eax | |
197 | jmp 80f | |
198 | 32: | |
199 | addw (%esi), %ax # csumming 2 bytes, 2-aligned | |
200 | adcl $0, %eax | |
201 | jmp 80f | |
202 | ||
203 | 40: | |
204 | addl -128(%esi), %eax | |
205 | adcl -124(%esi), %eax | |
206 | adcl -120(%esi), %eax | |
207 | adcl -116(%esi), %eax | |
208 | adcl -112(%esi), %eax | |
209 | adcl -108(%esi), %eax | |
210 | adcl -104(%esi), %eax | |
211 | adcl -100(%esi), %eax | |
212 | adcl -96(%esi), %eax | |
213 | adcl -92(%esi), %eax | |
214 | adcl -88(%esi), %eax | |
215 | adcl -84(%esi), %eax | |
216 | adcl -80(%esi), %eax | |
217 | adcl -76(%esi), %eax | |
218 | adcl -72(%esi), %eax | |
219 | adcl -68(%esi), %eax | |
220 | adcl -64(%esi), %eax | |
221 | adcl -60(%esi), %eax | |
222 | adcl -56(%esi), %eax | |
223 | adcl -52(%esi), %eax | |
224 | adcl -48(%esi), %eax | |
225 | adcl -44(%esi), %eax | |
226 | adcl -40(%esi), %eax | |
227 | adcl -36(%esi), %eax | |
228 | adcl -32(%esi), %eax | |
229 | adcl -28(%esi), %eax | |
230 | adcl -24(%esi), %eax | |
231 | adcl -20(%esi), %eax | |
232 | adcl -16(%esi), %eax | |
233 | adcl -12(%esi), %eax | |
234 | adcl -8(%esi), %eax | |
235 | adcl -4(%esi), %eax | |
236 | 45: | |
237 | lea 128(%esi), %esi | |
238 | adcl $0, %eax | |
239 | dec %ecx | |
240 | jge 40b | |
241 | movl %edx, %ecx | |
242 | 50: andl $3, %ecx | |
243 | jz 80f | |
244 | ||
245 | # Handle the last 1-3 bytes without jumping | |
246 | notl %ecx # 1->2, 2->1, 3->0, higher bits are masked | |
247 | movl $0xffffff,%ebx # by the shll and shrl instructions | |
248 | shll $3,%ecx | |
249 | shrl %cl,%ebx | |
250 | andl -128(%esi),%ebx # esi is 4-aligned so should be ok | |
251 | addl %ebx,%eax | |
252 | adcl $0,%eax | |
253 | 80: | |
254 | testl $1, 12(%esp) | |
255 | jz 90f | |
256 | roll $8, %eax | |
257 | 90: | |
60cf637a | 258 | popl_cfi %ebx |
00e065ea | 259 | CFI_RESTORE ebx |
60cf637a | 260 | popl_cfi %esi |
00e065ea | 261 | CFI_RESTORE esi |
1da177e4 | 262 | ret |
00e065ea JB |
263 | CFI_ENDPROC |
264 | ENDPROC(csum_partial) | |
1da177e4 LT |
265 | |
266 | #endif | |
267 | ||
268 | /* | |
269 | unsigned int csum_partial_copy_generic (const char *src, char *dst, | |
270 | int len, int sum, int *src_err_ptr, int *dst_err_ptr) | |
271 | */ | |
272 | ||
273 | /* | |
274 | * Copy from ds while checksumming, otherwise like csum_partial | |
275 | * | |
276 | * The macros SRC and DST specify the type of access for the instruction. | |
277 | * thus we can call a custom exception handler for all access types. | |
278 | * | |
279 | * FIXME: could someone double-check whether I haven't mixed up some SRC and | |
280 | * DST definitions? It's damn hard to trigger all cases. I hope I got | |
281 | * them all but there's no guarantee. | |
282 | */ | |
283 | ||
284 | #define SRC(y...) \ | |
285 | 9999: y; \ | |
5f2e8a84 | 286 | _ASM_EXTABLE(9999b, 6001f) |
1da177e4 LT |
287 | |
288 | #define DST(y...) \ | |
289 | 9999: y; \ | |
5f2e8a84 | 290 | _ASM_EXTABLE(9999b, 6002f) |
1da177e4 | 291 | |
1da177e4 LT |
292 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM |
293 | ||
294 | #define ARGBASE 16 | |
295 | #define FP 12 | |
296 | ||
00e065ea JB |
297 | ENTRY(csum_partial_copy_generic) |
298 | CFI_STARTPROC | |
1da177e4 | 299 | subl $4,%esp |
00e065ea | 300 | CFI_ADJUST_CFA_OFFSET 4 |
60cf637a | 301 | pushl_cfi %edi |
00e065ea | 302 | CFI_REL_OFFSET edi, 0 |
60cf637a | 303 | pushl_cfi %esi |
00e065ea | 304 | CFI_REL_OFFSET esi, 0 |
60cf637a | 305 | pushl_cfi %ebx |
00e065ea | 306 | CFI_REL_OFFSET ebx, 0 |
1da177e4 LT |
307 | movl ARGBASE+16(%esp),%eax # sum |
308 | movl ARGBASE+12(%esp),%ecx # len | |
309 | movl ARGBASE+4(%esp),%esi # src | |
310 | movl ARGBASE+8(%esp),%edi # dst | |
311 | ||
312 | testl $2, %edi # Check alignment. | |
313 | jz 2f # Jump if alignment is ok. | |
314 | subl $2, %ecx # Alignment uses up two bytes. | |
315 | jae 1f # Jump if we had at least two bytes. | |
316 | addl $2, %ecx # ecx was < 2. Deal with it. | |
317 | jmp 4f | |
318 | SRC(1: movw (%esi), %bx ) | |
319 | addl $2, %esi | |
320 | DST( movw %bx, (%edi) ) | |
321 | addl $2, %edi | |
322 | addw %bx, %ax | |
323 | adcl $0, %eax | |
324 | 2: | |
325 | movl %ecx, FP(%esp) | |
326 | shrl $5, %ecx | |
327 | jz 2f | |
328 | testl %esi, %esi | |
329 | SRC(1: movl (%esi), %ebx ) | |
330 | SRC( movl 4(%esi), %edx ) | |
331 | adcl %ebx, %eax | |
332 | DST( movl %ebx, (%edi) ) | |
333 | adcl %edx, %eax | |
334 | DST( movl %edx, 4(%edi) ) | |
335 | ||
336 | SRC( movl 8(%esi), %ebx ) | |
337 | SRC( movl 12(%esi), %edx ) | |
338 | adcl %ebx, %eax | |
339 | DST( movl %ebx, 8(%edi) ) | |
340 | adcl %edx, %eax | |
341 | DST( movl %edx, 12(%edi) ) | |
342 | ||
343 | SRC( movl 16(%esi), %ebx ) | |
344 | SRC( movl 20(%esi), %edx ) | |
345 | adcl %ebx, %eax | |
346 | DST( movl %ebx, 16(%edi) ) | |
347 | adcl %edx, %eax | |
348 | DST( movl %edx, 20(%edi) ) | |
349 | ||
350 | SRC( movl 24(%esi), %ebx ) | |
351 | SRC( movl 28(%esi), %edx ) | |
352 | adcl %ebx, %eax | |
353 | DST( movl %ebx, 24(%edi) ) | |
354 | adcl %edx, %eax | |
355 | DST( movl %edx, 28(%edi) ) | |
356 | ||
357 | lea 32(%esi), %esi | |
358 | lea 32(%edi), %edi | |
359 | dec %ecx | |
360 | jne 1b | |
361 | adcl $0, %eax | |
362 | 2: movl FP(%esp), %edx | |
363 | movl %edx, %ecx | |
364 | andl $0x1c, %edx | |
365 | je 4f | |
366 | shrl $2, %edx # This clears CF | |
367 | SRC(3: movl (%esi), %ebx ) | |
368 | adcl %ebx, %eax | |
369 | DST( movl %ebx, (%edi) ) | |
370 | lea 4(%esi), %esi | |
371 | lea 4(%edi), %edi | |
372 | dec %edx | |
373 | jne 3b | |
374 | adcl $0, %eax | |
375 | 4: andl $3, %ecx | |
376 | jz 7f | |
377 | cmpl $2, %ecx | |
378 | jb 5f | |
379 | SRC( movw (%esi), %cx ) | |
380 | leal 2(%esi), %esi | |
381 | DST( movw %cx, (%edi) ) | |
382 | leal 2(%edi), %edi | |
383 | je 6f | |
384 | shll $16,%ecx | |
385 | SRC(5: movb (%esi), %cl ) | |
386 | DST( movb %cl, (%edi) ) | |
387 | 6: addl %ecx, %eax | |
388 | adcl $0, %eax | |
389 | 7: | |
390 | 5000: | |
391 | ||
392 | # Exception handler: | |
393 | .section .fixup, "ax" | |
394 | ||
395 | 6001: | |
396 | movl ARGBASE+20(%esp), %ebx # src_err_ptr | |
397 | movl $-EFAULT, (%ebx) | |
398 | ||
399 | # zero the complete destination - computing the rest | |
400 | # is too much work | |
401 | movl ARGBASE+8(%esp), %edi # dst | |
402 | movl ARGBASE+12(%esp), %ecx # len | |
403 | xorl %eax,%eax | |
404 | rep ; stosb | |
405 | ||
406 | jmp 5000b | |
407 | ||
408 | 6002: | |
409 | movl ARGBASE+24(%esp), %ebx # dst_err_ptr | |
410 | movl $-EFAULT,(%ebx) | |
411 | jmp 5000b | |
412 | ||
413 | .previous | |
414 | ||
60cf637a | 415 | popl_cfi %ebx |
00e065ea | 416 | CFI_RESTORE ebx |
60cf637a | 417 | popl_cfi %esi |
00e065ea | 418 | CFI_RESTORE esi |
60cf637a | 419 | popl_cfi %edi |
00e065ea | 420 | CFI_RESTORE edi |
60cf637a | 421 | popl_cfi %ecx # equivalent to addl $4,%esp |
1da177e4 | 422 | ret |
00e065ea JB |
423 | CFI_ENDPROC |
424 | ENDPROC(csum_partial_copy_generic) | |
1da177e4 LT |
425 | |
426 | #else | |
427 | ||
428 | /* Version for PentiumII/PPro */ | |
429 | ||
430 | #define ROUND1(x) \ | |
431 | SRC(movl x(%esi), %ebx ) ; \ | |
432 | addl %ebx, %eax ; \ | |
433 | DST(movl %ebx, x(%edi) ) ; | |
434 | ||
435 | #define ROUND(x) \ | |
436 | SRC(movl x(%esi), %ebx ) ; \ | |
437 | adcl %ebx, %eax ; \ | |
438 | DST(movl %ebx, x(%edi) ) ; | |
439 | ||
440 | #define ARGBASE 12 | |
441 | ||
00e065ea JB |
442 | ENTRY(csum_partial_copy_generic) |
443 | CFI_STARTPROC | |
60cf637a | 444 | pushl_cfi %ebx |
00e065ea | 445 | CFI_REL_OFFSET ebx, 0 |
60cf637a | 446 | pushl_cfi %edi |
00e065ea | 447 | CFI_REL_OFFSET edi, 0 |
60cf637a | 448 | pushl_cfi %esi |
00e065ea | 449 | CFI_REL_OFFSET esi, 0 |
1da177e4 LT |
450 | movl ARGBASE+4(%esp),%esi #src |
451 | movl ARGBASE+8(%esp),%edi #dst | |
452 | movl ARGBASE+12(%esp),%ecx #len | |
453 | movl ARGBASE+16(%esp),%eax #sum | |
454 | # movl %ecx, %edx | |
455 | movl %ecx, %ebx | |
456 | movl %esi, %edx | |
457 | shrl $6, %ecx | |
458 | andl $0x3c, %ebx | |
459 | negl %ebx | |
460 | subl %ebx, %esi | |
461 | subl %ebx, %edi | |
462 | lea -1(%esi),%edx | |
463 | andl $-32,%edx | |
464 | lea 3f(%ebx,%ebx), %ebx | |
465 | testl %esi, %esi | |
466 | jmp *%ebx | |
467 | 1: addl $64,%esi | |
468 | addl $64,%edi | |
469 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | |
470 | ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) | |
471 | ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) | |
472 | ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) | |
473 | ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) | |
474 | 3: adcl $0,%eax | |
475 | addl $64, %edx | |
476 | dec %ecx | |
477 | jge 1b | |
478 | 4: movl ARGBASE+12(%esp),%edx #len | |
479 | andl $3, %edx | |
480 | jz 7f | |
481 | cmpl $2, %edx | |
482 | jb 5f | |
483 | SRC( movw (%esi), %dx ) | |
484 | leal 2(%esi), %esi | |
485 | DST( movw %dx, (%edi) ) | |
486 | leal 2(%edi), %edi | |
487 | je 6f | |
488 | shll $16,%edx | |
489 | 5: | |
490 | SRC( movb (%esi), %dl ) | |
491 | DST( movb %dl, (%edi) ) | |
492 | 6: addl %edx, %eax | |
493 | adcl $0, %eax | |
494 | 7: | |
495 | .section .fixup, "ax" | |
496 | 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr | |
497 | movl $-EFAULT, (%ebx) | |
498 | # zero the complete destination (computing the rest is too much work) | |
499 | movl ARGBASE+8(%esp),%edi # dst | |
500 | movl ARGBASE+12(%esp),%ecx # len | |
501 | xorl %eax,%eax | |
502 | rep; stosb | |
503 | jmp 7b | |
504 | 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr | |
505 | movl $-EFAULT, (%ebx) | |
506 | jmp 7b | |
507 | .previous | |
508 | ||
60cf637a | 509 | popl_cfi %esi |
00e065ea | 510 | CFI_RESTORE esi |
60cf637a | 511 | popl_cfi %edi |
00e065ea | 512 | CFI_RESTORE edi |
60cf637a | 513 | popl_cfi %ebx |
00e065ea | 514 | CFI_RESTORE ebx |
1da177e4 | 515 | ret |
00e065ea JB |
516 | CFI_ENDPROC |
517 | ENDPROC(csum_partial_copy_generic) | |
1da177e4 LT |
518 | |
519 | #undef ROUND | |
520 | #undef ROUND1 | |
521 | ||
522 | #endif |