Commit | Line | Data |
---|---|---|
ad2fc2cd VM |
1 | /* |
2 | * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> | |
3 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
0812a579 AK |
4 | * Subject to the GNU Public License v2. |
5 | * | |
6 | * Functions to copy from and to user space. | |
7 | */ | |
8 | ||
9 | #include <linux/linkage.h> | |
10 | #include <asm/dwarf2.h> | |
11 | ||
12 | #define FIX_ALIGNMENT 1 | |
13 | ||
14 | #include <asm/current.h> | |
15 | #include <asm/asm-offsets.h> | |
16 | #include <asm/thread_info.h> | |
0812a579 | 17 | |
ad2fc2cd | 18 | .macro ALIGN_DESTINATION |
0812a579 AK |
19 | #ifdef FIX_ALIGNMENT |
20 | /* check for bad alignment of destination */ | |
21 | movl %edi,%ecx | |
22 | andl $7,%ecx | |
ad2fc2cd VM |
23 | jz 102f /* already aligned */ |
24 | subl $8,%ecx | |
25 | negl %ecx | |
26 | subl %ecx,%edx | |
27 | 100: movb (%rsi),%al | |
28 | 101: movb %al,(%rdi) | |
29 | incq %rsi | |
30 | incq %rdi | |
31 | decl %ecx | |
32 | jnz 100b | |
33 | 102: | |
34 | .section .fixup,"ax" | |
afd962a9 | 35 | 103: addl %ecx,%edx /* ecx is zerorest also */ |
ad2fc2cd VM |
36 | jmp copy_user_handle_tail |
37 | .previous | |
0812a579 | 38 | |
ad2fc2cd VM |
39 | .section __ex_table,"a" |
40 | .align 8 | |
41 | .quad 100b,103b | |
42 | .quad 101b,103b | |
43 | .previous | |
44 | #endif | |
45 | .endm | |
0812a579 | 46 | |
ad2fc2cd VM |
47 | /* |
48 | * copy_user_nocache - Uncached memory copy with exception handling | |
49 | * This will force destination/source out of cache for more performance. | |
50 | */ | |
51 | ENTRY(__copy_user_nocache) | |
52 | CFI_STARTPROC | |
53 | cmpl $8,%edx | |
54 | jb 20f /* less then 8 bytes, go to byte copy loop */ | |
55 | ALIGN_DESTINATION | |
56 | movl %edx,%ecx | |
57 | andl $63,%edx | |
58 | shrl $6,%ecx | |
59 | jz 17f | |
60 | 1: movq (%rsi),%r8 | |
61 | 2: movq 1*8(%rsi),%r9 | |
62 | 3: movq 2*8(%rsi),%r10 | |
63 | 4: movq 3*8(%rsi),%r11 | |
64 | 5: movnti %r8,(%rdi) | |
65 | 6: movnti %r9,1*8(%rdi) | |
66 | 7: movnti %r10,2*8(%rdi) | |
67 | 8: movnti %r11,3*8(%rdi) | |
68 | 9: movq 4*8(%rsi),%r8 | |
69 | 10: movq 5*8(%rsi),%r9 | |
70 | 11: movq 6*8(%rsi),%r10 | |
71 | 12: movq 7*8(%rsi),%r11 | |
72 | 13: movnti %r8,4*8(%rdi) | |
73 | 14: movnti %r9,5*8(%rdi) | |
74 | 15: movnti %r10,6*8(%rdi) | |
75 | 16: movnti %r11,7*8(%rdi) | |
0812a579 AK |
76 | leaq 64(%rsi),%rsi |
77 | leaq 64(%rdi),%rdi | |
0812a579 | 78 | decl %ecx |
ad2fc2cd VM |
79 | jnz 1b |
80 | 17: movl %edx,%ecx | |
81 | andl $7,%edx | |
82 | shrl $3,%ecx | |
83 | jz 20f | |
84 | 18: movq (%rsi),%r8 | |
85 | 19: movnti %r8,(%rdi) | |
0812a579 | 86 | leaq 8(%rsi),%rsi |
ad2fc2cd VM |
87 | leaq 8(%rdi),%rdi |
88 | decl %ecx | |
89 | jnz 18b | |
90 | 20: andl %edx,%edx | |
91 | jz 23f | |
0812a579 | 92 | movl %edx,%ecx |
ad2fc2cd VM |
93 | 21: movb (%rsi),%al |
94 | 22: movb %al,(%rdi) | |
0812a579 | 95 | incq %rsi |
ad2fc2cd | 96 | incq %rdi |
0812a579 | 97 | decl %ecx |
ad2fc2cd VM |
98 | jnz 21b |
99 | 23: xorl %eax,%eax | |
df1bdc06 | 100 | sfence |
0812a579 | 101 | ret |
0812a579 | 102 | |
ad2fc2cd VM |
103 | .section .fixup,"ax" |
104 | 30: shll $6,%ecx | |
105 | addl %ecx,%edx | |
106 | jmp 60f | |
27cb0a75 | 107 | 40: lea (%rdx,%rcx,8),%rdx |
ad2fc2cd VM |
108 | jmp 60f |
109 | 50: movl %ecx,%edx | |
110 | 60: sfence | |
ad2fc2cd VM |
111 | jmp copy_user_handle_tail |
112 | .previous | |
0812a579 | 113 | |
0812a579 | 114 | .section __ex_table,"a" |
ad2fc2cd VM |
115 | .quad 1b,30b |
116 | .quad 2b,30b | |
117 | .quad 3b,30b | |
118 | .quad 4b,30b | |
119 | .quad 5b,30b | |
120 | .quad 6b,30b | |
121 | .quad 7b,30b | |
122 | .quad 8b,30b | |
123 | .quad 9b,30b | |
124 | .quad 10b,30b | |
125 | .quad 11b,30b | |
126 | .quad 12b,30b | |
127 | .quad 13b,30b | |
128 | .quad 14b,30b | |
129 | .quad 15b,30b | |
130 | .quad 16b,30b | |
131 | .quad 18b,40b | |
132 | .quad 19b,40b | |
133 | .quad 21b,50b | |
134 | .quad 22b,50b | |
0812a579 | 135 | .previous |
0812a579 AK |
136 | CFI_ENDPROC |
137 | ENDPROC(__copy_user_nocache) |