x86: wrong register was used in align macro
[deliverable/linux.git] / arch / x86 / lib / copy_user_nocache_64.S
CommitLineData
ad2fc2cd
VM
1/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
0812a579
AK
4 * Subject to the GNU Public License v2.
5 *
6 * Functions to copy from and to user space.
7 */
8
9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11
12#define FIX_ALIGNMENT 1
13
14#include <asm/current.h>
15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h>
0812a579 17
ad2fc2cd 18 .macro ALIGN_DESTINATION
0812a579
AK
19#ifdef FIX_ALIGNMENT
20 /* check for bad alignment of destination */
21 movl %edi,%ecx
22 andl $7,%ecx
ad2fc2cd
VM
23 jz 102f /* already aligned */
24 subl $8,%ecx
25 negl %ecx
26 subl %ecx,%edx
27100: movb (%rsi),%al
28101: movb %al,(%rdi)
29 incq %rsi
30 incq %rdi
31 decl %ecx
32 jnz 100b
33102:
34 .section .fixup,"ax"
afd962a9 35103: addl %ecx,%edx /* ecx is zerorest also */
ad2fc2cd
VM
36 jmp copy_user_handle_tail
37 .previous
0812a579 38
ad2fc2cd
VM
39 .section __ex_table,"a"
40 .align 8
41 .quad 100b,103b
42 .quad 101b,103b
43 .previous
44#endif
45 .endm
0812a579 46
ad2fc2cd
VM
47/*
48 * copy_user_nocache - Uncached memory copy with exception handling
49 * This will force destination/source out of cache for more performance.
50 */
51ENTRY(__copy_user_nocache)
52 CFI_STARTPROC
53 cmpl $8,%edx
54 jb 20f /* less then 8 bytes, go to byte copy loop */
55 ALIGN_DESTINATION
56 movl %edx,%ecx
57 andl $63,%edx
58 shrl $6,%ecx
59 jz 17f
601: movq (%rsi),%r8
612: movq 1*8(%rsi),%r9
623: movq 2*8(%rsi),%r10
634: movq 3*8(%rsi),%r11
645: movnti %r8,(%rdi)
656: movnti %r9,1*8(%rdi)
667: movnti %r10,2*8(%rdi)
678: movnti %r11,3*8(%rdi)
689: movq 4*8(%rsi),%r8
6910: movq 5*8(%rsi),%r9
7011: movq 6*8(%rsi),%r10
7112: movq 7*8(%rsi),%r11
7213: movnti %r8,4*8(%rdi)
7314: movnti %r9,5*8(%rdi)
7415: movnti %r10,6*8(%rdi)
7516: movnti %r11,7*8(%rdi)
0812a579
AK
76 leaq 64(%rsi),%rsi
77 leaq 64(%rdi),%rdi
0812a579 78 decl %ecx
ad2fc2cd
VM
79 jnz 1b
8017: movl %edx,%ecx
81 andl $7,%edx
82 shrl $3,%ecx
83 jz 20f
8418: movq (%rsi),%r8
8519: movnti %r8,(%rdi)
0812a579 86 leaq 8(%rsi),%rsi
ad2fc2cd
VM
87 leaq 8(%rdi),%rdi
88 decl %ecx
89 jnz 18b
9020: andl %edx,%edx
91 jz 23f
0812a579 92 movl %edx,%ecx
ad2fc2cd
VM
9321: movb (%rsi),%al
9422: movb %al,(%rdi)
0812a579 95 incq %rsi
ad2fc2cd 96 incq %rdi
0812a579 97 decl %ecx
ad2fc2cd
VM
98 jnz 21b
9923: xorl %eax,%eax
df1bdc06 100 sfence
0812a579 101 ret
0812a579 102
ad2fc2cd
VM
103 .section .fixup,"ax"
10430: shll $6,%ecx
105 addl %ecx,%edx
106 jmp 60f
27cb0a75 10740: lea (%rdx,%rcx,8),%rdx
ad2fc2cd
VM
108 jmp 60f
10950: movl %ecx,%edx
11060: sfence
ad2fc2cd
VM
111 jmp copy_user_handle_tail
112 .previous
0812a579 113
0812a579 114 .section __ex_table,"a"
ad2fc2cd
VM
115 .quad 1b,30b
116 .quad 2b,30b
117 .quad 3b,30b
118 .quad 4b,30b
119 .quad 5b,30b
120 .quad 6b,30b
121 .quad 7b,30b
122 .quad 8b,30b
123 .quad 9b,30b
124 .quad 10b,30b
125 .quad 11b,30b
126 .quad 12b,30b
127 .quad 13b,30b
128 .quad 14b,30b
129 .quad 15b,30b
130 .quad 16b,30b
131 .quad 18b,40b
132 .quad 19b,40b
133 .quad 21b,50b
134 .quad 22b,50b
0812a579 135 .previous
0812a579
AK
136 CFI_ENDPROC
137ENDPROC(__copy_user_nocache)
This page took 0.206557 seconds and 5 git commands to generate.