1 /* memcpy.S: Sparc optimized memcpy and memmove code
2 * Hand optimized from GNU libc's memcpy and memmove
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <asm/export.h>
17 /* Both these macros have to start with exactly the same insn */
18 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
19 ldd [%src + (offset) + 0x00], %t0; \
20 ldd [%src + (offset) + 0x08], %t2; \
21 ldd [%src + (offset) + 0x10], %t4; \
22 ldd [%src + (offset) + 0x18], %t6; \
23 st %t0, [%dst + (offset) + 0x00]; \
24 st %t1, [%dst + (offset) + 0x04]; \
25 st %t2, [%dst + (offset) + 0x08]; \
26 st %t3, [%dst + (offset) + 0x0c]; \
27 st %t4, [%dst + (offset) + 0x10]; \
28 st %t5, [%dst + (offset) + 0x14]; \
29 st %t6, [%dst + (offset) + 0x18]; \
30 st %t7, [%dst + (offset) + 0x1c];
32 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
33 ldd [%src + (offset) + 0x00], %t0; \
34 ldd [%src + (offset) + 0x08], %t2; \
35 ldd [%src + (offset) + 0x10], %t4; \
36 ldd [%src + (offset) + 0x18], %t6; \
37 std %t0, [%dst + (offset) + 0x00]; \
38 std %t2, [%dst + (offset) + 0x08]; \
39 std %t4, [%dst + (offset) + 0x10]; \
40 std %t6, [%dst + (offset) + 0x18];
42 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
43 ldd [%src - (offset) - 0x10], %t0; \
44 ldd [%src - (offset) - 0x08], %t2; \
45 st %t0, [%dst - (offset) - 0x10]; \
46 st %t1, [%dst - (offset) - 0x0c]; \
47 st %t2, [%dst - (offset) - 0x08]; \
48 st %t3, [%dst - (offset) - 0x04];
50 #define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
51 ldd [%src - (offset) - 0x10], %t0; \
52 ldd [%src - (offset) - 0x08], %t2; \
53 std %t0, [%dst - (offset) - 0x10]; \
54 std %t2, [%dst - (offset) - 0x08];
56 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
57 ldub [%src - (offset) - 0x02], %t0; \
58 ldub [%src - (offset) - 0x01], %t1; \
59 stb %t0, [%dst - (offset) - 0x02]; \
60 stb %t1, [%dst - (offset) - 0x01];
62 /* Both these macros have to start with exactly the same insn */
63 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
64 ldd [%src - (offset) - 0x20], %t0; \
65 ldd [%src - (offset) - 0x18], %t2; \
66 ldd [%src - (offset) - 0x10], %t4; \
67 ldd [%src - (offset) - 0x08], %t6; \
68 st %t0, [%dst - (offset) - 0x20]; \
69 st %t1, [%dst - (offset) - 0x1c]; \
70 st %t2, [%dst - (offset) - 0x18]; \
71 st %t3, [%dst - (offset) - 0x14]; \
72 st %t4, [%dst - (offset) - 0x10]; \
73 st %t5, [%dst - (offset) - 0x0c]; \
74 st %t6, [%dst - (offset) - 0x08]; \
75 st %t7, [%dst - (offset) - 0x04];
77 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
78 ldd [%src - (offset) - 0x20], %t0; \
79 ldd [%src - (offset) - 0x18], %t2; \
80 ldd [%src - (offset) - 0x10], %t4; \
81 ldd [%src - (offset) - 0x08], %t6; \
82 std %t0, [%dst - (offset) - 0x20]; \
83 std %t2, [%dst - (offset) - 0x18]; \
84 std %t4, [%dst - (offset) - 0x10]; \
85 std %t6, [%dst - (offset) - 0x08];
87 #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
88 ldd [%src + (offset) + 0x00], %t0; \
89 ldd [%src + (offset) + 0x08], %t2; \
90 st %t0, [%dst + (offset) + 0x00]; \
91 st %t1, [%dst + (offset) + 0x04]; \
92 st %t2, [%dst + (offset) + 0x08]; \
93 st %t3, [%dst + (offset) + 0x0c];
95 #define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
96 ldub [%src + (offset) + 0x00], %t0; \
97 ldub [%src + (offset) + 0x01], %t1; \
98 stb %t0, [%dst + (offset) + 0x00]; \
99 stb %t1, [%dst + (offset) + 0x01];
101 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
102 ldd [%src + (offset) + 0x00], %t0; \
103 ldd [%src + (offset) + 0x08], %t2; \
104 srl %t0, shir, %t5; \
105 srl %t1, shir, %t6; \
106 sll %t0, shil, %t0; \
107 or %t5, %prev, %t5; \
108 sll %t1, shil, %prev; \
110 srl %t2, shir, %t1; \
111 srl %t3, shir, %t6; \
112 sll %t2, shil, %t2; \
113 or %t1, %prev, %t1; \
114 std %t4, [%dst + (offset) + (offset2) - 0x04]; \
115 std %t0, [%dst + (offset) + (offset2) + 0x04]; \
116 sll %t3, shil, %prev; \
119 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
120 ldd [%src + (offset) + 0x00], %t0; \
121 ldd [%src + (offset) + 0x08], %t2; \
122 srl %t0, shir, %t4; \
123 srl %t1, shir, %t5; \
124 sll %t0, shil, %t6; \
125 or %t4, %prev, %t0; \
126 sll %t1, shil, %prev; \
128 srl %t2, shir, %t4; \
129 srl %t3, shir, %t5; \
130 sll %t2, shil, %t6; \
131 or %t4, %prev, %t2; \
132 sll %t3, shil, %prev; \
134 std %t0, [%dst + (offset) + (offset2) + 0x00]; \
135 std %t2, [%dst + (offset) + (offset2) + 0x08];
142 nop ! Only bcopy returns here and it retuns void...
147 EXPORT_SYMBOL(__memmove)
150 EXPORT_SYMBOL(memmove)
166 1: /* reverse_bytes */
178 /* NOTE: This code is executed just for the cases,
179 where %src (=%o1) & 3 is != 0.
180 We need to align it to 4. So, for (%src & 3)
181 1 we need to do ldub,lduh
184 so even if it looks weird, the branches
185 are correct here. -jj
187 78: /* dword_align */
207 FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
208 EXPORT_SYMBOL(memcpy)
234 andcc %g1, 0xffffff80, %g0
240 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
241 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
242 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
243 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
259 jmpl %o5 + %lo(80f), %g0
262 79: /* memcpy_table */
264 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
265 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
266 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
267 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
268 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
269 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
270 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
272 80: /* memcpy_table_end */
282 81: /* memcpy_last7 */
310 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
311 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
312 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
313 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
327 jmpl %o5 + %lo(84f), %g0
330 83: /* amemcpy_table */
332 MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
333 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
334 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
335 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
336 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
337 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
338 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
340 84: /* amemcpy_table_end */
346 std %g2, [%o0 - 0x08]
348 85: /* amemcpy_last7 */
374 86: /* non_aligned */
497 restore %g7, %g0, %o0
508 jmpl %o5 + %lo(89f), %g0
511 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
512 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
513 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
514 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
515 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
516 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
517 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
519 89: /* short_table_end */
530 90: /* short_aligned_end */