Commit | Line | Data |
---|---|---|
ad2fc2cd VM |
1 | /* |
2 | * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> | |
3 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 4 | * Subject to the GNU Public License v2. |
ad2fc2cd VM |
5 | * |
6 | * Functions to copy from and to user space. | |
7 | */ | |
1da177e4 | 8 | |
8d379dad JB |
9 | #include <linux/linkage.h> |
10 | #include <asm/dwarf2.h> | |
3022d734 AK |
11 | #include <asm/current.h> |
12 | #include <asm/asm-offsets.h> | |
13 | #include <asm/thread_info.h> | |
14 | #include <asm/cpufeature.h> | |
4307bec9 | 15 | #include <asm/alternative-asm.h> |
9732da8c | 16 | #include <asm/asm.h> |
63bcff2a | 17 | #include <asm/smap.h> |
3022d734 | 18 | |
4307bec9 FY |
19 | /* |
20 | * By placing feature2 after feature1 in altinstructions section, we logically | |
21 | * implement: | |
22 | * If CPU has feature2, jmp to alt2 is used | |
23 | * else if CPU has feature1, jmp to alt1 is used | |
24 | * else jmp to orig is used. | |
25 | */ | |
26 | .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2 | |
3022d734 | 27 | 0: |
48c7a250 | 28 | jmp \orig |
3022d734 AK |
29 | 1: |
30 | .section .altinstr_replacement,"ax" | |
48c7a250 BP |
31 | 2: |
32 | jmp \alt1 | |
33 | 3: | |
34 | jmp \alt2 | |
3022d734 | 35 | .previous |
4307bec9 | 36 | |
3022d734 | 37 | .section .altinstructions,"a" |
4332195c BP |
38 | altinstruction_entry 0b,2b,\feature1,5,5,0 |
39 | altinstruction_entry 0b,3b,\feature2,5,5,0 | |
3022d734 AK |
40 | .previous |
41 | .endm | |
1da177e4 | 42 | |
ad2fc2cd | 43 | .macro ALIGN_DESTINATION |
ad2fc2cd VM |
44 | /* check for bad alignment of destination */ |
45 | movl %edi,%ecx | |
46 | andl $7,%ecx | |
47 | jz 102f /* already aligned */ | |
48 | subl $8,%ecx | |
49 | negl %ecx | |
50 | subl %ecx,%edx | |
51 | 100: movb (%rsi),%al | |
52 | 101: movb %al,(%rdi) | |
53 | incq %rsi | |
54 | incq %rdi | |
55 | decl %ecx | |
56 | jnz 100b | |
57 | 102: | |
58 | .section .fixup,"ax" | |
afd962a9 | 59 | 103: addl %ecx,%edx /* ecx is zerorest also */ |
ad2fc2cd VM |
60 | jmp copy_user_handle_tail |
61 | .previous | |
62 | ||
9732da8c PA |
63 | _ASM_EXTABLE(100b,103b) |
64 | _ASM_EXTABLE(101b,103b) | |
ad2fc2cd VM |
65 | .endm |
66 | ||
67 | /* Standard copy_to_user with segment limit checking */ | |
3c93ca00 | 68 | ENTRY(_copy_to_user) |
8d379dad | 69 | CFI_STARTPROC |
1da177e4 LT |
70 | GET_THREAD_INFO(%rax) |
71 | movq %rdi,%rcx | |
72 | addq %rdx,%rcx | |
ad2fc2cd | 73 | jc bad_to_user |
26ccb8a7 | 74 | cmpq TI_addr_limit(%rax),%rcx |
26afb7c6 | 75 | ja bad_to_user |
4307bec9 FY |
76 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ |
77 | copy_user_generic_unrolled,copy_user_generic_string, \ | |
78 | copy_user_enhanced_fast_string | |
8d379dad | 79 | CFI_ENDPROC |
3c93ca00 | 80 | ENDPROC(_copy_to_user) |
7bcd3f34 | 81 | |
ad2fc2cd | 82 | /* Standard copy_from_user with segment limit checking */ |
9f0cf4ad | 83 | ENTRY(_copy_from_user) |
3022d734 | 84 | CFI_STARTPROC |
ad2fc2cd VM |
85 | GET_THREAD_INFO(%rax) |
86 | movq %rsi,%rcx | |
87 | addq %rdx,%rcx | |
88 | jc bad_from_user | |
89 | cmpq TI_addr_limit(%rax),%rcx | |
26afb7c6 | 90 | ja bad_from_user |
4307bec9 FY |
91 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ |
92 | copy_user_generic_unrolled,copy_user_generic_string, \ | |
93 | copy_user_enhanced_fast_string | |
3022d734 | 94 | CFI_ENDPROC |
9f0cf4ad | 95 | ENDPROC(_copy_from_user) |
3022d734 | 96 | |
1da177e4 LT |
97 | .section .fixup,"ax" |
98 | /* must zero dest */ | |
ad2fc2cd | 99 | ENTRY(bad_from_user) |
1da177e4 | 100 | bad_from_user: |
8d379dad | 101 | CFI_STARTPROC |
1da177e4 LT |
102 | movl %edx,%ecx |
103 | xorl %eax,%eax | |
104 | rep | |
105 | stosb | |
106 | bad_to_user: | |
ad2fc2cd | 107 | movl %edx,%eax |
1da177e4 | 108 | ret |
8d379dad | 109 | CFI_ENDPROC |
ad2fc2cd | 110 | ENDPROC(bad_from_user) |
1da177e4 | 111 | .previous |
ad2fc2cd | 112 | |
1da177e4 | 113 | /* |
3022d734 | 114 | * copy_user_generic_unrolled - memory copy with exception handling. |
ad2fc2cd VM |
115 | * This version is for CPUs like P4 that don't have efficient micro |
116 | * code for rep movsq | |
117 | * | |
118 | * Input: | |
1da177e4 LT |
119 | * rdi destination |
120 | * rsi source | |
121 | * rdx count | |
122 | * | |
ad2fc2cd | 123 | * Output: |
0d2eb44f | 124 | * eax uncopied bytes or 0 if successful. |
1da177e4 | 125 | */ |
3022d734 | 126 | ENTRY(copy_user_generic_unrolled) |
8d379dad | 127 | CFI_STARTPROC |
63bcff2a | 128 | ASM_STAC |
ad2fc2cd VM |
129 | cmpl $8,%edx |
130 | jb 20f /* less then 8 bytes, go to byte copy loop */ | |
131 | ALIGN_DESTINATION | |
132 | movl %edx,%ecx | |
133 | andl $63,%edx | |
134 | shrl $6,%ecx | |
135 | jz 17f | |
136 | 1: movq (%rsi),%r8 | |
137 | 2: movq 1*8(%rsi),%r9 | |
138 | 3: movq 2*8(%rsi),%r10 | |
139 | 4: movq 3*8(%rsi),%r11 | |
140 | 5: movq %r8,(%rdi) | |
141 | 6: movq %r9,1*8(%rdi) | |
142 | 7: movq %r10,2*8(%rdi) | |
143 | 8: movq %r11,3*8(%rdi) | |
144 | 9: movq 4*8(%rsi),%r8 | |
145 | 10: movq 5*8(%rsi),%r9 | |
146 | 11: movq 6*8(%rsi),%r10 | |
147 | 12: movq 7*8(%rsi),%r11 | |
148 | 13: movq %r8,4*8(%rdi) | |
149 | 14: movq %r9,5*8(%rdi) | |
150 | 15: movq %r10,6*8(%rdi) | |
151 | 16: movq %r11,7*8(%rdi) | |
7bcd3f34 AK |
152 | leaq 64(%rsi),%rsi |
153 | leaq 64(%rdi),%rdi | |
7bcd3f34 | 154 | decl %ecx |
ad2fc2cd VM |
155 | jnz 1b |
156 | 17: movl %edx,%ecx | |
157 | andl $7,%edx | |
158 | shrl $3,%ecx | |
159 | jz 20f | |
160 | 18: movq (%rsi),%r8 | |
161 | 19: movq %r8,(%rdi) | |
7bcd3f34 | 162 | leaq 8(%rsi),%rsi |
ad2fc2cd VM |
163 | leaq 8(%rdi),%rdi |
164 | decl %ecx | |
165 | jnz 18b | |
166 | 20: andl %edx,%edx | |
167 | jz 23f | |
7bcd3f34 | 168 | movl %edx,%ecx |
ad2fc2cd VM |
169 | 21: movb (%rsi),%al |
170 | 22: movb %al,(%rdi) | |
7bcd3f34 | 171 | incq %rsi |
ad2fc2cd | 172 | incq %rdi |
7bcd3f34 | 173 | decl %ecx |
ad2fc2cd VM |
174 | jnz 21b |
175 | 23: xor %eax,%eax | |
63bcff2a | 176 | ASM_CLAC |
7bcd3f34 AK |
177 | ret |
178 | ||
ad2fc2cd VM |
179 | .section .fixup,"ax" |
180 | 30: shll $6,%ecx | |
181 | addl %ecx,%edx | |
182 | jmp 60f | |
661c8019 | 183 | 40: leal (%rdx,%rcx,8),%edx |
ad2fc2cd VM |
184 | jmp 60f |
185 | 50: movl %ecx,%edx | |
186 | 60: jmp copy_user_handle_tail /* ecx is zerorest also */ | |
187 | .previous | |
7bcd3f34 | 188 | |
9732da8c PA |
189 | _ASM_EXTABLE(1b,30b) |
190 | _ASM_EXTABLE(2b,30b) | |
191 | _ASM_EXTABLE(3b,30b) | |
192 | _ASM_EXTABLE(4b,30b) | |
193 | _ASM_EXTABLE(5b,30b) | |
194 | _ASM_EXTABLE(6b,30b) | |
195 | _ASM_EXTABLE(7b,30b) | |
196 | _ASM_EXTABLE(8b,30b) | |
197 | _ASM_EXTABLE(9b,30b) | |
198 | _ASM_EXTABLE(10b,30b) | |
199 | _ASM_EXTABLE(11b,30b) | |
200 | _ASM_EXTABLE(12b,30b) | |
201 | _ASM_EXTABLE(13b,30b) | |
202 | _ASM_EXTABLE(14b,30b) | |
203 | _ASM_EXTABLE(15b,30b) | |
204 | _ASM_EXTABLE(16b,30b) | |
205 | _ASM_EXTABLE(18b,40b) | |
206 | _ASM_EXTABLE(19b,40b) | |
207 | _ASM_EXTABLE(21b,50b) | |
208 | _ASM_EXTABLE(22b,50b) | |
8d379dad | 209 | CFI_ENDPROC |
ad2fc2cd | 210 | ENDPROC(copy_user_generic_unrolled) |
8d379dad | 211 | |
ad2fc2cd VM |
212 | /* Some CPUs run faster using the string copy instructions. |
213 | * This is also a lot simpler. Use them when possible. | |
214 | * | |
215 | * Only 4GB of copy is supported. This shouldn't be a problem | |
216 | * because the kernel normally only writes from/to page sized chunks | |
217 | * even if user space passed a longer buffer. | |
218 | * And more would be dangerous because both Intel and AMD have | |
219 | * errata with rep movsq > 4GB. If someone feels the need to fix | |
220 | * this please consider this. | |
221 | * | |
222 | * Input: | |
223 | * rdi destination | |
224 | * rsi source | |
225 | * rdx count | |
226 | * | |
227 | * Output: | |
228 | * eax uncopied bytes or 0 if successful. | |
229 | */ | |
3022d734 | 230 | ENTRY(copy_user_generic_string) |
8d379dad | 231 | CFI_STARTPROC |
63bcff2a | 232 | ASM_STAC |
ad2fc2cd VM |
233 | cmpl $8,%edx |
234 | jb 2f /* less than 8 bytes, go to byte copy loop */ | |
235 | ALIGN_DESTINATION | |
1da177e4 LT |
236 | movl %edx,%ecx |
237 | shrl $3,%ecx | |
ad2fc2cd VM |
238 | andl $7,%edx |
239 | 1: rep | |
3022d734 | 240 | movsq |
ad2fc2cd VM |
241 | 2: movl %edx,%ecx |
242 | 3: rep | |
243 | movsb | |
f4cb1cc1 | 244 | xorl %eax,%eax |
63bcff2a | 245 | ASM_CLAC |
1da177e4 | 246 | ret |
3022d734 | 247 | |
ad2fc2cd | 248 | .section .fixup,"ax" |
661c8019 | 249 | 11: leal (%rdx,%rcx,8),%ecx |
ad2fc2cd VM |
250 | 12: movl %ecx,%edx /* ecx is zerorest also */ |
251 | jmp copy_user_handle_tail | |
252 | .previous | |
2cbc9ee3 | 253 | |
9732da8c PA |
254 | _ASM_EXTABLE(1b,11b) |
255 | _ASM_EXTABLE(3b,12b) | |
ad2fc2cd VM |
256 | CFI_ENDPROC |
257 | ENDPROC(copy_user_generic_string) | |
4307bec9 FY |
258 | |
259 | /* | |
260 | * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. | |
261 | * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. | |
262 | * | |
263 | * Input: | |
264 | * rdi destination | |
265 | * rsi source | |
266 | * rdx count | |
267 | * | |
268 | * Output: | |
269 | * eax uncopied bytes or 0 if successful. | |
270 | */ | |
271 | ENTRY(copy_user_enhanced_fast_string) | |
272 | CFI_STARTPROC | |
63bcff2a | 273 | ASM_STAC |
4307bec9 FY |
274 | movl %edx,%ecx |
275 | 1: rep | |
276 | movsb | |
f4cb1cc1 | 277 | xorl %eax,%eax |
63bcff2a | 278 | ASM_CLAC |
4307bec9 FY |
279 | ret |
280 | ||
281 | .section .fixup,"ax" | |
282 | 12: movl %ecx,%edx /* ecx is zerorest also */ | |
283 | jmp copy_user_handle_tail | |
284 | .previous | |
285 | ||
9732da8c | 286 | _ASM_EXTABLE(1b,12b) |
4307bec9 FY |
287 | CFI_ENDPROC |
288 | ENDPROC(copy_user_enhanced_fast_string) |