x86/lib/memmove_64.S: Convert memmove() to ALTERNATIVE macro
[deliverable/linux.git] / arch / x86 / lib / memcpy_64.S
CommitLineData
1da177e4 1/* Copyright 2002 Andi Kleen */
038b0a6d 2
8d379dad 3#include <linux/linkage.h>
f3b6eaf0 4
8d379dad 5#include <asm/cpufeature.h>
f3b6eaf0 6#include <asm/dwarf2.h>
101068c1 7#include <asm/alternative-asm.h>
8d379dad 8
1da177e4
LT
9/*
10 * memcpy - Copy a memory block.
11 *
f3b6eaf0
IM
12 * Input:
13 * rdi destination
14 * rsi source
15 * rdx count
16 *
1da177e4
LT
17 * Output:
18 * rax original destination
f3b6eaf0 19 */
1da177e4 20
f3b6eaf0
IM
21/*
22 * memcpy_c() - fast string ops (REP MOVSQ) based variant.
23 *
7269e881 24 * This gets patched over the unrolled variant (below) via the
f3b6eaf0
IM
25 * alternative instructions framework:
26 */
7269e881
JB
27 .section .altinstr_replacement, "ax", @progbits
28.Lmemcpy_c:
f3b6eaf0 29 movq %rdi, %rax
2ab56091
JB
30 movq %rdx, %rcx
31 shrq $3, %rcx
f3b6eaf0 32 andl $7, %edx
8d379dad 33 rep movsq
f3b6eaf0 34 movl %edx, %ecx
8d379dad
JB
35 rep movsb
36 ret
7269e881
JB
37.Lmemcpy_e:
38 .previous
8d379dad 39
101068c1
FY
40/*
41 * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than
42 * memcpy_c. Use memcpy_c_e when possible.
43 *
44 * This gets patched over the unrolled variant (below) via the
45 * alternative instructions framework:
46 */
47 .section .altinstr_replacement, "ax", @progbits
48.Lmemcpy_c_e:
49 movq %rdi, %rax
2ab56091 50 movq %rdx, %rcx
101068c1
FY
51 rep movsb
52 ret
53.Lmemcpy_e_e:
54 .previous
55
393f203f
AR
56.weak memcpy
57
8d379dad
JB
58ENTRY(__memcpy)
59ENTRY(memcpy)
60 CFI_STARTPROC
59daa706 61 movq %rdi, %rax
7bcd3f34 62
2ab56091 63 cmpq $0x20, %rdx
59daa706 64 jb .Lhandle_tail
7bcd3f34 65
f3b6eaf0 66 /*
9de4966a 67 * We check whether memory false dependence could occur,
59daa706 68 * then jump to corresponding copy mode.
f3b6eaf0 69 */
59daa706
ML
70 cmp %dil, %sil
71 jl .Lcopy_backward
2ab56091 72 subq $0x20, %rdx
59daa706
ML
73.Lcopy_forward_loop:
74 subq $0x20, %rdx
7bcd3f34 75
f3b6eaf0 76 /*
59daa706 77 * Move in blocks of 4x8 bytes:
f3b6eaf0 78 */
59daa706
ML
79 movq 0*8(%rsi), %r8
80 movq 1*8(%rsi), %r9
81 movq 2*8(%rsi), %r10
82 movq 3*8(%rsi), %r11
83 leaq 4*8(%rsi), %rsi
84
85 movq %r8, 0*8(%rdi)
86 movq %r9, 1*8(%rdi)
87 movq %r10, 2*8(%rdi)
88 movq %r11, 3*8(%rdi)
89 leaq 4*8(%rdi), %rdi
90 jae .Lcopy_forward_loop
2ab56091 91 addl $0x20, %edx
59daa706
ML
92 jmp .Lhandle_tail
93
94.Lcopy_backward:
95 /*
96 * Calculate copy position to tail.
97 */
98 addq %rdx, %rsi
99 addq %rdx, %rdi
100 subq $0x20, %rdx
101 /*
102 * At most 3 ALU operations in one cycle,
d50ba368 103 * so append NOPS in the same 16 bytes trunk.
59daa706
ML
104 */
105 .p2align 4
106.Lcopy_backward_loop:
107 subq $0x20, %rdx
108 movq -1*8(%rsi), %r8
109 movq -2*8(%rsi), %r9
110 movq -3*8(%rsi), %r10
111 movq -4*8(%rsi), %r11
112 leaq -4*8(%rsi), %rsi
113 movq %r8, -1*8(%rdi)
114 movq %r9, -2*8(%rdi)
115 movq %r10, -3*8(%rdi)
116 movq %r11, -4*8(%rdi)
117 leaq -4*8(%rdi), %rdi
118 jae .Lcopy_backward_loop
7bcd3f34 119
59daa706
ML
120 /*
121 * Calculate copy position to head.
122 */
2ab56091 123 addl $0x20, %edx
59daa706
ML
124 subq %rdx, %rsi
125 subq %rdx, %rdi
7bcd3f34 126.Lhandle_tail:
2ab56091 127 cmpl $16, %edx
59daa706 128 jb .Lless_16bytes
f3b6eaf0 129
59daa706
ML
130 /*
131 * Move data from 16 bytes to 31 bytes.
132 */
133 movq 0*8(%rsi), %r8
134 movq 1*8(%rsi), %r9
135 movq -2*8(%rsi, %rdx), %r10
136 movq -1*8(%rsi, %rdx), %r11
137 movq %r8, 0*8(%rdi)
138 movq %r9, 1*8(%rdi)
139 movq %r10, -2*8(%rdi, %rdx)
140 movq %r11, -1*8(%rdi, %rdx)
141 retq
7bcd3f34 142 .p2align 4
59daa706 143.Lless_16bytes:
2ab56091 144 cmpl $8, %edx
59daa706
ML
145 jb .Lless_8bytes
146 /*
147 * Move data from 8 bytes to 15 bytes.
148 */
149 movq 0*8(%rsi), %r8
150 movq -1*8(%rsi, %rdx), %r9
151 movq %r8, 0*8(%rdi)
152 movq %r9, -1*8(%rdi, %rdx)
153 retq
154 .p2align 4
155.Lless_8bytes:
2ab56091 156 cmpl $4, %edx
59daa706 157 jb .Lless_3bytes
f3b6eaf0 158
59daa706
ML
159 /*
160 * Move data from 4 bytes to 7 bytes.
161 */
162 movl (%rsi), %ecx
163 movl -4(%rsi, %rdx), %r8d
164 movl %ecx, (%rdi)
165 movl %r8d, -4(%rdi, %rdx)
166 retq
7bcd3f34 167 .p2align 4
59daa706 168.Lless_3bytes:
9d8e2277
JB
169 subl $1, %edx
170 jb .Lend
59daa706
ML
171 /*
172 * Move data from 1 bytes to 3 bytes.
173 */
9d8e2277
JB
174 movzbl (%rsi), %ecx
175 jz .Lstore_1byte
176 movzbq 1(%rsi), %r8
177 movzbq (%rsi, %rdx), %r9
178 movb %r8b, 1(%rdi)
179 movb %r9b, (%rdi, %rdx)
180.Lstore_1byte:
181 movb %cl, (%rdi)
7bcd3f34 182
f3b6eaf0 183.Lend:
59daa706 184 retq
8d379dad
JB
185 CFI_ENDPROC
186ENDPROC(memcpy)
187ENDPROC(__memcpy)
7bcd3f34 188
f3b6eaf0 189 /*
101068c1
FY
190 * Some CPUs are adding enhanced REP MOVSB/STOSB feature
191 * If the feature is supported, memcpy_c_e() is the first choice.
192 * If enhanced rep movsb copy is not available, use fast string copy
193 * memcpy_c() when possible. This is faster and code is simpler than
194 * original memcpy().
195 * Otherwise, original memcpy() is used.
196 * In .altinstructions section, ERMS feature is placed after REG_GOOD
197 * feature to implement the right patch order.
198 *
f3b6eaf0
IM
199 * Replace only beginning, memcpy is used to apply alternatives,
200 * so it is silly to overwrite itself with nops - reboot is the
201 * only outcome...
202 */
101068c1 203 .section .altinstructions, "a"
393f203f 204 altinstruction_entry __memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
4332195c 205 .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c,0
393f203f 206 altinstruction_entry __memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
4332195c 207 .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e,0
7bcd3f34 208 .previous
This page took 0.675593 seconds and 5 git commands to generate.