Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/lib/memzero.S | |
3 | * | |
4 | * Copyright (C) 1995-2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/linkage.h> | |
11 | #include <asm/assembler.h> | |
12 | ||
13 | .text | |
14 | .align 5 | |
15 | .word 0 | |
16 | /* | |
17 | * Align the pointer in r0. r3 contains the number of bytes that we are | |
18 | * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we | |
19 | * don't bother; we use byte stores instead. | |
20 | */ | |
21 | 1: subs r1, r1, #4 @ 1 do we have enough | |
22 | blt 5f @ 1 bytes to align with? | |
23 | cmp r3, #2 @ 1 | |
24 | strltb r2, [r0], #1 @ 1 | |
25 | strleb r2, [r0], #1 @ 1 | |
26 | strb r2, [r0], #1 @ 1 | |
27 | add r1, r1, r3 @ 1 (r1 = r1 - (4 - r3)) | |
28 | /* | |
29 | * The pointer is now aligned and the length is adjusted. Try doing the | |
30 | * memzero again. | |
31 | */ | |
32 | ||
33 | ENTRY(__memzero) | |
34 | mov r2, #0 @ 1 | |
35 | ands r3, r0, #3 @ 1 unaligned? | |
36 | bne 1b @ 1 | |
37 | /* | |
38 | * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary. | |
39 | */ | |
40 | cmp r1, #16 @ 1 we can skip this chunk if we | |
41 | blt 4f @ 1 have < 16 bytes | |
f91a8dcc NP |
42 | |
43 | #if ! CALGN(1)+0 | |
44 | ||
1da177e4 LT |
45 | /* |
46 | * We need an extra register for this loop - save the return address and | |
47 | * use the LR | |
48 | */ | |
49 | str lr, [sp, #-4]! @ 1 | |
50 | mov ip, r2 @ 1 | |
51 | mov lr, r2 @ 1 | |
52 | ||
53 | 3: subs r1, r1, #64 @ 1 write 32 bytes out per loop | |
54 | stmgeia r0!, {r2, r3, ip, lr} @ 4 | |
55 | stmgeia r0!, {r2, r3, ip, lr} @ 4 | |
56 | stmgeia r0!, {r2, r3, ip, lr} @ 4 | |
57 | stmgeia r0!, {r2, r3, ip, lr} @ 4 | |
58 | bgt 3b @ 1 | |
1b93a717 | 59 | ldmeqfd sp!, {pc} @ 1/2 quick exit |
1da177e4 LT |
60 | /* |
61 | * No need to correct the count; we're only testing bits from now on | |
62 | */ | |
63 | tst r1, #32 @ 1 | |
64 | stmneia r0!, {r2, r3, ip, lr} @ 4 | |
65 | stmneia r0!, {r2, r3, ip, lr} @ 4 | |
66 | tst r1, #16 @ 1 16 bytes or more? | |
67 | stmneia r0!, {r2, r3, ip, lr} @ 4 | |
68 | ldr lr, [sp], #4 @ 1 | |
69 | ||
f91a8dcc NP |
70 | #else |
71 | ||
72 | /* | |
73 | * This version aligns the destination pointer in order to write | |
74 | * whole cache lines at once. | |
75 | */ | |
76 | ||
77 | stmfd sp!, {r4-r7, lr} | |
78 | mov r4, r2 | |
79 | mov r5, r2 | |
80 | mov r6, r2 | |
81 | mov r7, r2 | |
82 | mov ip, r2 | |
83 | mov lr, r2 | |
84 | ||
85 | cmp r1, #96 | |
86 | andgts ip, r0, #31 | |
87 | ble 3f | |
88 | ||
89 | rsb ip, ip, #32 | |
90 | sub r1, r1, ip | |
91 | movs ip, ip, lsl #(32 - 4) | |
92 | stmcsia r0!, {r4, r5, r6, r7} | |
93 | stmmiia r0!, {r4, r5} | |
94 | movs ip, ip, lsl #2 | |
95 | strcs r2, [r0], #4 | |
96 | ||
97 | 3: subs r1, r1, #64 | |
98 | stmgeia r0!, {r2-r7, ip, lr} | |
99 | stmgeia r0!, {r2-r7, ip, lr} | |
100 | bgt 3b | |
101 | ldmeqfd sp!, {r4-r7, pc} | |
102 | ||
103 | tst r1, #32 | |
104 | stmneia r0!, {r2-r7, ip, lr} | |
105 | tst r1, #16 | |
106 | stmneia r0!, {r4-r7} | |
107 | ldmfd sp!, {r4-r7, lr} | |
108 | ||
109 | #endif | |
110 | ||
1da177e4 LT |
111 | 4: tst r1, #8 @ 1 8 bytes or more? |
112 | stmneia r0!, {r2, r3} @ 2 | |
113 | tst r1, #4 @ 1 4 bytes or more? | |
114 | strne r2, [r0], #4 @ 1 | |
115 | /* | |
116 | * When we get here, we've got less than 4 bytes to zero. We | |
117 | * may have an unaligned pointer as well. | |
118 | */ | |
119 | 5: tst r1, #2 @ 1 2 bytes or more? | |
120 | strneb r2, [r0], #1 @ 1 | |
121 | strneb r2, [r0], #1 @ 1 | |
122 | tst r1, #1 @ 1 a byte left over | |
123 | strneb r2, [r0], #1 @ 1 | |
6ebbf2ce | 124 | ret lr @ 1 |
93ed3970 | 125 | ENDPROC(__memzero) |