2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 .file "cast5-avx-x86_64-asm_64.S"
32 /* structure of crypto context */
35 #define rr ((16*4)+16)
43 /**********************************************************************
45 **********************************************************************/
87 #define lookup_32bit(src, dst, op1, op2, op3) \
88 movb src ## bl, RID1b; \
89 movb src ## bh, RID2b; \
90 movl s1(, RID1, 4), dst ## d; \
91 op1 s2(, RID2, 4), dst ## d; \
93 movb src ## bl, RID1b; \
94 movb src ## bh, RID2b; \
95 op2 s3(, RID1, 4), dst ## d; \
96 op3 s4(, RID2, 4), dst ## d;
98 #define F(a, x, op0, op1, op2, op3) \
100 vpslld RKRF, x, RTMP; \
104 vpshufb RMASK, x, x; \
109 lookup_32bit(RGI1, RFS1, op1, op2, op3); \
111 lookup_32bit(RGI1, RFS2, op1, op2, op3); \
115 lookup_32bit(RGI2, RFS1, op1, op2, op3); \
117 lookup_32bit(RGI2, RFS3, op1, op2, op3); \
122 vpinsrq $1, RFS3, x, x;
124 #define F1(b, x) F(b, x, vpaddd, xorl, subl, addl)
125 #define F2(b, x) F(b, x, vpxor, subl, addl, xorl)
126 #define F3(b, x) F(b, x, vpsubd, addl, xorl, subl)
128 #define subround(a, b, x, n, f) \
132 #define round(l, r, n, f) \
133 vbroadcastss (km+(4*n))(CTX), RKM; \
134 vpinsrb $0, (kr+n)(CTX), RKRF, RKRF; \
135 vpsubq RKRF, R32, RKRR; \
136 subround(l ## 1, r ## 1, RX, n, f); \
137 subround(l ## 2, r ## 2, RX, n, f); \
138 subround(l ## 3, r ## 3, RX, n, f); \
139 subround(l ## 4, r ## 4, RX, n, f);
142 #define transpose_2x4(x0, x1, t0, t1) \
143 vpunpckldq x1, x0, t0; \
144 vpunpckhdq x1, x0, t1; \
146 vpunpcklqdq t1, t0, x0; \
147 vpunpckhqdq t1, t0, x1;
149 #define inpack_blocks(in, x0, x1, t0, t1) \
150 vmovdqu (0*4*4)(in), x0; \
151 vmovdqu (1*4*4)(in), x1; \
152 vpshufb RMASK, x0, x0; \
153 vpshufb RMASK, x1, x1; \
155 transpose_2x4(x0, x1, t0, t1)
157 #define outunpack_blocks(out, x0, x1, t0, t1) \
158 transpose_2x4(x0, x1, t0, t1) \
160 vpshufb RMASK, x0, x0; \
161 vpshufb RMASK, x1, x1; \
162 vmovdqu x0, (0*4*4)(out); \
163 vmovdqu x1, (1*4*4)(out);
165 #define outunpack_xor_blocks(out, x0, x1, t0, t1) \
166 transpose_2x4(x0, x1, t0, t1) \
168 vpshufb RMASK, x0, x0; \
169 vpshufb RMASK, x1, x1; \
170 vpxor (0*4*4)(out), x0, x0; \
171 vmovdqu x0, (0*4*4)(out); \
172 vpxor (1*4*4)(out), x1, x1; \
173 vmovdqu x1, (1*4*4)(out);
177 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
179 .byte 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0
182 .global __cast5_enc_blk_16way
183 .type __cast5_enc_blk_16way,@function;
185 __cast5_enc_blk_16way:
190 * %rcx: bool, if true: xor output
196 vmovdqu .Lbswap_mask, RMASK;
197 vmovdqu .L32_mask, R32;
198 vpxor RKRF, RKRF, RKRF;
200 inpack_blocks(%rdx, RL1, RR1, RTMP, RX);
201 leaq (2*4*4)(%rdx), %rax;
202 inpack_blocks(%rax, RL2, RR2, RTMP, RX);
203 leaq (2*4*4)(%rax), %rax;
204 inpack_blocks(%rax, RL3, RR3, RTMP, RX);
205 leaq (2*4*4)(%rax), %rax;
206 inpack_blocks(%rax, RL4, RR4, RTMP, RX);
221 round(RL, RR, 10, 2);
222 round(RR, RL, 11, 3);
228 round(RL, RR, 12, 1);
229 round(RR, RL, 13, 2);
230 round(RL, RR, 14, 3);
231 round(RR, RL, 15, 1);
240 outunpack_blocks(%rsi, RR1, RL1, RTMP, RX);
241 leaq (2*4*4)(%rsi), %rax;
242 outunpack_blocks(%rax, RR2, RL2, RTMP, RX);
243 leaq (2*4*4)(%rax), %rax;
244 outunpack_blocks(%rax, RR3, RL3, RTMP, RX);
245 leaq (2*4*4)(%rax), %rax;
246 outunpack_blocks(%rax, RR4, RL4, RTMP, RX);
251 outunpack_xor_blocks(%rsi, RR1, RL1, RTMP, RX);
252 leaq (2*4*4)(%rsi), %rax;
253 outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX);
254 leaq (2*4*4)(%rax), %rax;
255 outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX);
256 leaq (2*4*4)(%rax), %rax;
257 outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX);
262 .global cast5_dec_blk_16way
263 .type cast5_dec_blk_16way,@function;
274 vmovdqu .Lbswap_mask, RMASK;
275 vmovdqu .L32_mask, R32;
276 vpxor RKRF, RKRF, RKRF;
278 inpack_blocks(%rdx, RL1, RR1, RTMP, RX);
279 leaq (2*4*4)(%rdx), %rax;
280 inpack_blocks(%rax, RL2, RR2, RTMP, RX);
281 leaq (2*4*4)(%rax), %rax;
282 inpack_blocks(%rax, RL3, RR3, RTMP, RX);
283 leaq (2*4*4)(%rax), %rax;
284 inpack_blocks(%rax, RL4, RR4, RTMP, RX);
293 round(RL, RR, 15, 1);
294 round(RR, RL, 14, 3);
295 round(RL, RR, 13, 2);
296 round(RR, RL, 12, 1);
299 round(RL, RR, 11, 3);
300 round(RR, RL, 10, 2);
314 outunpack_blocks(%rsi, RR1, RL1, RTMP, RX);
315 leaq (2*4*4)(%rsi), %rax;
316 outunpack_blocks(%rax, RR2, RL2, RTMP, RX);
317 leaq (2*4*4)(%rax), %rax;
318 outunpack_blocks(%rax, RR3, RL3, RTMP, RX);
319 leaq (2*4*4)(%rax), %rax;
320 outunpack_blocks(%rax, RR4, RL4, RTMP, RX);