Commit | Line | Data |
---|---|---|
4a899227 CM |
1 | /* |
2 | * Copyright (C) 2013 ARM Ltd. | |
b29a51fe | 3 | * Copyright (C) 2013 Linaro. |
4 | * | |
5 | * This code is based on glibc cortex strings work originally authored by Linaro | |
6 | * and re-licensed under GPLv2 for the Linux kernel. The original code can | |
7 | * be found @ | |
8 | * | |
9 | * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ | |
10 | * files/head:/src/aarch64/ | |
4a899227 CM |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
19 | * GNU General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
23 | */ | |
24 | ||
25 | #include <linux/linkage.h> | |
26 | #include <asm/assembler.h> | |
b29a51fe | 27 | #include <asm/cache.h> |
4a899227 CM |
28 | |
29 | /* | |
30 | * Fill in the buffer with character c (alignment handled by the hardware) | |
31 | * | |
32 | * Parameters: | |
33 | * x0 - buf | |
34 | * x1 - c | |
35 | * x2 - n | |
36 | * Returns: | |
37 | * x0 - buf | |
38 | */ | |
b29a51fe | 39 | |
40 | dstin .req x0 | |
41 | val .req w1 | |
42 | count .req x2 | |
43 | tmp1 .req x3 | |
44 | tmp1w .req w3 | |
45 | tmp2 .req x4 | |
46 | tmp2w .req w4 | |
47 | zva_len_x .req x5 | |
48 | zva_len .req w5 | |
49 | zva_bits_x .req x6 | |
50 | ||
51 | A_l .req x7 | |
52 | A_lw .req w7 | |
53 | dst .req x8 | |
54 | tmp3w .req w9 | |
55 | tmp3 .req x9 | |
56 | ||
39d114dd AR |
57 | .weak memset |
58 | ENTRY(__memset) | |
4a899227 | 59 | ENTRY(memset) |
b29a51fe | 60 | mov dst, dstin /* Preserve return value. */ |
61 | and A_lw, val, #255 | |
62 | orr A_lw, A_lw, A_lw, lsl #8 | |
63 | orr A_lw, A_lw, A_lw, lsl #16 | |
64 | orr A_l, A_l, A_l, lsl #32 | |
65 | ||
66 | cmp count, #15 | |
67 | b.hi .Lover16_proc | |
68 | /*All store maybe are non-aligned..*/ | |
69 | tbz count, #3, 1f | |
70 | str A_l, [dst], #8 | |
71 | 1: | |
72 | tbz count, #2, 2f | |
73 | str A_lw, [dst], #4 | |
74 | 2: | |
75 | tbz count, #1, 3f | |
76 | strh A_lw, [dst], #2 | |
77 | 3: | |
78 | tbz count, #0, 4f | |
79 | strb A_lw, [dst] | |
80 | 4: | |
81 | ret | |
82 | ||
83 | .Lover16_proc: | |
84 | /*Whether the start address is aligned with 16.*/ | |
85 | neg tmp2, dst | |
86 | ands tmp2, tmp2, #15 | |
87 | b.eq .Laligned | |
88 | /* | |
89 | * The count is not less than 16, we can use stp to store the start 16 bytes, | |
90 | * then adjust the dst aligned with 16.This process will make the current | |
91 | * memory address at alignment boundary. | |
92 | */ | |
93 | stp A_l, A_l, [dst] /*non-aligned store..*/ | |
94 | /*make the dst aligned..*/ | |
95 | sub count, count, tmp2 | |
96 | add dst, dst, tmp2 | |
97 | ||
98 | .Laligned: | |
99 | cbz A_l, .Lzero_mem | |
100 | ||
101 | .Ltail_maybe_long: | |
102 | cmp count, #64 | |
103 | b.ge .Lnot_short | |
104 | .Ltail63: | |
105 | ands tmp1, count, #0x30 | |
106 | b.eq 3f | |
107 | cmp tmp1w, #0x20 | |
108 | b.eq 1f | |
109 | b.lt 2f | |
110 | stp A_l, A_l, [dst], #16 | |
111 | 1: | |
112 | stp A_l, A_l, [dst], #16 | |
113 | 2: | |
114 | stp A_l, A_l, [dst], #16 | |
115 | /* | |
116 | * The last store length is less than 16,use stp to write last 16 bytes. | |
117 | * It will lead some bytes written twice and the access is non-aligned. | |
118 | */ | |
119 | 3: | |
120 | ands count, count, #15 | |
121 | cbz count, 4f | |
122 | add dst, dst, count | |
123 | stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */ | |
124 | 4: | |
125 | ret | |
126 | ||
127 | /* | |
128 | * Critical loop. Start at a new cache line boundary. Assuming | |
129 | * 64 bytes per line, this ensures the entire loop is in one line. | |
130 | */ | |
131 | .p2align L1_CACHE_SHIFT | |
132 | .Lnot_short: | |
133 | sub dst, dst, #16/* Pre-bias. */ | |
134 | sub count, count, #64 | |
135 | 1: | |
136 | stp A_l, A_l, [dst, #16] | |
137 | stp A_l, A_l, [dst, #32] | |
138 | stp A_l, A_l, [dst, #48] | |
139 | stp A_l, A_l, [dst, #64]! | |
140 | subs count, count, #64 | |
141 | b.ge 1b | |
142 | tst count, #0x3f | |
143 | add dst, dst, #16 | |
144 | b.ne .Ltail63 | |
145 | .Lexitfunc: | |
146 | ret | |
147 | ||
148 | /* | |
149 | * For zeroing memory, check to see if we can use the ZVA feature to | |
150 | * zero entire 'cache' lines. | |
151 | */ | |
152 | .Lzero_mem: | |
153 | cmp count, #63 | |
154 | b.le .Ltail63 | |
155 | /* | |
156 | * For zeroing small amounts of memory, it's not worth setting up | |
157 | * the line-clear code. | |
158 | */ | |
159 | cmp count, #128 | |
160 | b.lt .Lnot_short /*count is at least 128 bytes*/ | |
161 | ||
162 | mrs tmp1, dczid_el0 | |
163 | tbnz tmp1, #4, .Lnot_short | |
164 | mov tmp3w, #4 | |
165 | and zva_len, tmp1w, #15 /* Safety: other bits reserved. */ | |
166 | lsl zva_len, tmp3w, zva_len | |
167 | ||
168 | ands tmp3w, zva_len, #63 | |
169 | /* | |
170 | * ensure the zva_len is not less than 64. | |
171 | * It is not meaningful to use ZVA if the block size is less than 64. | |
172 | */ | |
173 | b.ne .Lnot_short | |
174 | .Lzero_by_line: | |
175 | /* | |
176 | * Compute how far we need to go to become suitably aligned. We're | |
177 | * already at quad-word alignment. | |
178 | */ | |
179 | cmp count, zva_len_x | |
180 | b.lt .Lnot_short /* Not enough to reach alignment. */ | |
181 | sub zva_bits_x, zva_len_x, #1 | |
182 | neg tmp2, dst | |
183 | ands tmp2, tmp2, zva_bits_x | |
184 | b.eq 2f /* Already aligned. */ | |
185 | /* Not aligned, check that there's enough to copy after alignment.*/ | |
186 | sub tmp1, count, tmp2 | |
187 | /* | |
188 | * grantee the remain length to be ZVA is bigger than 64, | |
189 | * avoid to make the 2f's process over mem range.*/ | |
190 | cmp tmp1, #64 | |
191 | ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */ | |
192 | b.lt .Lnot_short | |
193 | /* | |
194 | * We know that there's at least 64 bytes to zero and that it's safe | |
195 | * to overrun by 64 bytes. | |
196 | */ | |
197 | mov count, tmp1 | |
198 | 1: | |
199 | stp A_l, A_l, [dst] | |
200 | stp A_l, A_l, [dst, #16] | |
201 | stp A_l, A_l, [dst, #32] | |
202 | subs tmp2, tmp2, #64 | |
203 | stp A_l, A_l, [dst, #48] | |
204 | add dst, dst, #64 | |
205 | b.ge 1b | |
206 | /* We've overrun a bit, so adjust dst downwards.*/ | |
207 | add dst, dst, tmp2 | |
208 | 2: | |
209 | sub count, count, zva_len_x | |
210 | 3: | |
211 | dc zva, dst | |
212 | add dst, dst, zva_len_x | |
213 | subs count, count, zva_len_x | |
214 | b.ge 3b | |
215 | ands count, count, zva_bits_x | |
216 | b.ne .Ltail_maybe_long | |
217 | ret | |
20791846 | 218 | ENDPIPROC(memset) |
39d114dd | 219 | ENDPROC(__memset) |