crypto: cast5/cast6 - move lookup tables to shared module
[deliverable/linux.git] / arch / x86 / crypto / cast5-avx-x86_64-asm_64.S
CommitLineData
4d6d6a2c
JG
1/*
2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
ddaea786
JK
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
8 *
4d6d6a2c
JG
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26.file "cast5-avx-x86_64-asm_64.S"
4d6d6a2c 27
044ab525
JK
28.extern cast_s1
29.extern cast_s2
30.extern cast_s3
31.extern cast_s4
4d6d6a2c
JG
32
33/* structure of crypto context */
34#define km 0
35#define kr (16*4)
36#define rr ((16*4)+16)
37
38/* s-boxes */
044ab525
JK
39#define s1 cast_s1
40#define s2 cast_s2
41#define s3 cast_s3
42#define s4 cast_s4
4d6d6a2c
JG
43
44/**********************************************************************
45 16-way AVX cast5
46 **********************************************************************/
47#define CTX %rdi
48
49#define RL1 %xmm0
50#define RR1 %xmm1
51#define RL2 %xmm2
52#define RR2 %xmm3
53#define RL3 %xmm4
54#define RR3 %xmm5
55#define RL4 %xmm6
56#define RR4 %xmm7
57
58#define RX %xmm8
59
60#define RKM %xmm9
ddaea786
JK
61#define RKR %xmm10
62#define RKRF %xmm11
63#define RKRR %xmm12
64
65#define R32 %xmm13
66#define R1ST %xmm14
4d6d6a2c 67
ddaea786 68#define RTMP %xmm15
4d6d6a2c 69
ddaea786
JK
70#define RID1 %rbp
71#define RID1d %ebp
72#define RID2 %rsi
73#define RID2d %esi
4d6d6a2c
JG
74
75#define RGI1 %rdx
76#define RGI1bl %dl
77#define RGI1bh %dh
78#define RGI2 %rcx
79#define RGI2bl %cl
80#define RGI2bh %ch
81
ddaea786
JK
82#define RGI3 %rax
83#define RGI3bl %al
84#define RGI3bh %ah
85#define RGI4 %rbx
86#define RGI4bl %bl
87#define RGI4bh %bh
88
4d6d6a2c
JG
89#define RFS1 %r8
90#define RFS1d %r8d
91#define RFS2 %r9
92#define RFS2d %r9d
93#define RFS3 %r10
94#define RFS3d %r10d
95
96
ddaea786
JK
97#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
98 movzbl src ## bh, RID1d; \
99 movzbl src ## bl, RID2d; \
100 shrq $16, src; \
4d6d6a2c
JG
101 movl s1(, RID1, 4), dst ## d; \
102 op1 s2(, RID2, 4), dst ## d; \
ddaea786
JK
103 movzbl src ## bh, RID1d; \
104 movzbl src ## bl, RID2d; \
105 interleave_op(il_reg); \
4d6d6a2c
JG
106 op2 s3(, RID1, 4), dst ## d; \
107 op3 s4(, RID2, 4), dst ## d;
108
ddaea786
JK
109#define dummy(d) /* do nothing */
110
111#define shr_next(reg) \
112 shrq $16, reg;
113
114#define F_head(a, x, gi1, gi2, op0) \
4d6d6a2c 115 op0 a, RKM, x; \
ddaea786
JK
116 vpslld RKRF, x, RTMP; \
117 vpsrld RKRR, x, x; \
4d6d6a2c
JG
118 vpor RTMP, x, x; \
119 \
ddaea786
JK
120 vmovq x, gi1; \
121 vpextrq $1, x, gi2;
122
123#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
124 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
125 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
4d6d6a2c 126 \
ddaea786
JK
127 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
128 shlq $32, RFS2; \
129 orq RFS1, RFS2; \
130 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
131 shlq $32, RFS1; \
132 orq RFS1, RFS3; \
4d6d6a2c 133 \
ddaea786 134 vmovq RFS2, x; \
4d6d6a2c
JG
135 vpinsrq $1, RFS3, x, x;
136
ddaea786
JK
137#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
138 F_head(b1, RX, RGI1, RGI2, op0); \
139 F_head(b2, RX, RGI3, RGI4, op0); \
140 \
141 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
142 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
143 \
144 vpxor a1, RX, a1; \
145 vpxor a2, RTMP, a2;
146
147#define F1_2(a1, b1, a2, b2) \
148 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
149#define F2_2(a1, b1, a2, b2) \
150 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
151#define F3_2(a1, b1, a2, b2) \
152 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
4d6d6a2c 153
ddaea786
JK
154#define subround(a1, b1, a2, b2, f) \
155 F ## f ## _2(a1, b1, a2, b2);
4d6d6a2c
JG
156
157#define round(l, r, n, f) \
158 vbroadcastss (km+(4*n))(CTX), RKM; \
ddaea786 159 vpand R1ST, RKR, RKRF; \
4d6d6a2c 160 vpsubq RKRF, R32, RKRR; \
ddaea786
JK
161 vpsrldq $1, RKR, RKR; \
162 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
163 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
164
165#define enc_preload_rkr() \
166 vbroadcastss .L16_mask, RKR; \
167 /* add 16-bit rotation to key rotations (mod 32) */ \
168 vpxor kr(CTX), RKR, RKR;
4d6d6a2c 169
ddaea786
JK
170#define dec_preload_rkr() \
171 vbroadcastss .L16_mask, RKR; \
172 /* add 16-bit rotation to key rotations (mod 32) */ \
173 vpxor kr(CTX), RKR, RKR; \
174 vpshufb .Lbswap128_mask, RKR, RKR;
4d6d6a2c
JG
175
176#define transpose_2x4(x0, x1, t0, t1) \
177 vpunpckldq x1, x0, t0; \
178 vpunpckhdq x1, x0, t1; \
179 \
180 vpunpcklqdq t1, t0, x0; \
181 vpunpckhqdq t1, t0, x1;
182
c12ab20b 183#define inpack_blocks(x0, x1, t0, t1, rmask) \
ddaea786
JK
184 vpshufb rmask, x0, x0; \
185 vpshufb rmask, x1, x1; \
4d6d6a2c
JG
186 \
187 transpose_2x4(x0, x1, t0, t1)
188
c12ab20b 189#define outunpack_blocks(x0, x1, t0, t1, rmask) \
4d6d6a2c
JG
190 transpose_2x4(x0, x1, t0, t1) \
191 \
ddaea786 192 vpshufb rmask, x0, x0; \
c12ab20b 193 vpshufb rmask, x1, x1;
4d6d6a2c 194
ddaea786
JK
195.data
196
4d6d6a2c
JG
197.align 16
198.Lbswap_mask:
199 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
ddaea786
JK
200.Lbswap128_mask:
201 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
c12ab20b
JK
202.Lbswap_iv_mask:
203 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
ddaea786
JK
204.L16_mask:
205 .byte 16, 16, 16, 16
4d6d6a2c 206.L32_mask:
ddaea786
JK
207 .byte 32, 0, 0, 0
208.Lfirst_mask:
209 .byte 0x1f, 0, 0, 0
210
211.text
4d6d6a2c
JG
212
213.align 16
c12ab20b 214.type __cast5_enc_blk16,@function;
4d6d6a2c 215
c12ab20b 216__cast5_enc_blk16:
4d6d6a2c
JG
217 /* input:
218 * %rdi: ctx, CTX
c12ab20b
JK
219 * RL1: blocks 1 and 2
220 * RR1: blocks 3 and 4
221 * RL2: blocks 5 and 6
222 * RR2: blocks 7 and 8
223 * RL3: blocks 9 and 10
224 * RR3: blocks 11 and 12
225 * RL4: blocks 13 and 14
226 * RR4: blocks 15 and 16
227 * output:
228 * RL1: encrypted blocks 1 and 2
229 * RR1: encrypted blocks 3 and 4
230 * RL2: encrypted blocks 5 and 6
231 * RR2: encrypted blocks 7 and 8
232 * RL3: encrypted blocks 9 and 10
233 * RR3: encrypted blocks 11 and 12
234 * RL4: encrypted blocks 13 and 14
235 * RR4: encrypted blocks 15 and 16
4d6d6a2c
JG
236 */
237
ddaea786 238 pushq %rbp;
4d6d6a2c 239 pushq %rbx;
4d6d6a2c 240
ddaea786
JK
241 vmovdqa .Lbswap_mask, RKM;
242 vmovd .Lfirst_mask, R1ST;
243 vmovd .L32_mask, R32;
244 enc_preload_rkr();
4d6d6a2c 245
c12ab20b
JK
246 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
247 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
248 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
249 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
4d6d6a2c
JG
250
251 round(RL, RR, 0, 1);
252 round(RR, RL, 1, 2);
253 round(RL, RR, 2, 3);
254 round(RR, RL, 3, 1);
255 round(RL, RR, 4, 2);
256 round(RR, RL, 5, 3);
257 round(RL, RR, 6, 1);
258 round(RR, RL, 7, 2);
259 round(RL, RR, 8, 3);
260 round(RR, RL, 9, 1);
261 round(RL, RR, 10, 2);
262 round(RR, RL, 11, 3);
263
ddaea786
JK
264 movzbl rr(CTX), %eax;
265 testl %eax, %eax;
4d6d6a2c
JG
266 jnz __skip_enc;
267
268 round(RL, RR, 12, 1);
269 round(RR, RL, 13, 2);
270 round(RL, RR, 14, 3);
271 round(RR, RL, 15, 1);
272
273__skip_enc:
4d6d6a2c 274 popq %rbx;
ddaea786
JK
275 popq %rbp;
276
277 vmovdqa .Lbswap_mask, RKM;
4d6d6a2c 278
c12ab20b
JK
279 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
280 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
281 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
282 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
4d6d6a2c
JG
283
284 ret;
285
286.align 16
c12ab20b 287.type __cast5_dec_blk16,@function;
4d6d6a2c 288
c12ab20b 289__cast5_dec_blk16:
4d6d6a2c
JG
290 /* input:
291 * %rdi: ctx, CTX
c12ab20b
JK
292 * RL1: encrypted blocks 1 and 2
293 * RR1: encrypted blocks 3 and 4
294 * RL2: encrypted blocks 5 and 6
295 * RR2: encrypted blocks 7 and 8
296 * RL3: encrypted blocks 9 and 10
297 * RR3: encrypted blocks 11 and 12
298 * RL4: encrypted blocks 13 and 14
299 * RR4: encrypted blocks 15 and 16
300 * output:
301 * RL1: decrypted blocks 1 and 2
302 * RR1: decrypted blocks 3 and 4
303 * RL2: decrypted blocks 5 and 6
304 * RR2: decrypted blocks 7 and 8
305 * RL3: decrypted blocks 9 and 10
306 * RR3: decrypted blocks 11 and 12
307 * RL4: decrypted blocks 13 and 14
308 * RR4: decrypted blocks 15 and 16
4d6d6a2c
JG
309 */
310
ddaea786 311 pushq %rbp;
4d6d6a2c
JG
312 pushq %rbx;
313
ddaea786
JK
314 vmovdqa .Lbswap_mask, RKM;
315 vmovd .Lfirst_mask, R1ST;
316 vmovd .L32_mask, R32;
317 dec_preload_rkr();
4d6d6a2c 318
c12ab20b
JK
319 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
320 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
321 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
322 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
4d6d6a2c 323
ddaea786
JK
324 movzbl rr(CTX), %eax;
325 testl %eax, %eax;
4d6d6a2c
JG
326 jnz __skip_dec;
327
328 round(RL, RR, 15, 1);
329 round(RR, RL, 14, 3);
330 round(RL, RR, 13, 2);
331 round(RR, RL, 12, 1);
332
ddaea786 333__dec_tail:
4d6d6a2c
JG
334 round(RL, RR, 11, 3);
335 round(RR, RL, 10, 2);
336 round(RL, RR, 9, 1);
337 round(RR, RL, 8, 3);
338 round(RL, RR, 7, 2);
339 round(RR, RL, 6, 1);
340 round(RL, RR, 5, 3);
341 round(RR, RL, 4, 2);
342 round(RL, RR, 3, 1);
343 round(RR, RL, 2, 3);
344 round(RL, RR, 1, 2);
345 round(RR, RL, 0, 1);
346
ddaea786 347 vmovdqa .Lbswap_mask, RKM;
4d6d6a2c 348 popq %rbx;
ddaea786 349 popq %rbp;
4d6d6a2c 350
c12ab20b
JK
351 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
352 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
353 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
354 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
4d6d6a2c
JG
355
356 ret;
ddaea786
JK
357
358__skip_dec:
359 vpsrldq $4, RKR, RKR;
360 jmp __dec_tail;
c12ab20b
JK
361
362.align 16
363.global cast5_ecb_enc_16way
364.type cast5_ecb_enc_16way,@function;
365
366cast5_ecb_enc_16way:
367 /* input:
368 * %rdi: ctx, CTX
369 * %rsi: dst
370 * %rdx: src
371 */
372
373 movq %rsi, %r11;
374
375 vmovdqu (0*4*4)(%rdx), RL1;
376 vmovdqu (1*4*4)(%rdx), RR1;
377 vmovdqu (2*4*4)(%rdx), RL2;
378 vmovdqu (3*4*4)(%rdx), RR2;
379 vmovdqu (4*4*4)(%rdx), RL3;
380 vmovdqu (5*4*4)(%rdx), RR3;
381 vmovdqu (6*4*4)(%rdx), RL4;
382 vmovdqu (7*4*4)(%rdx), RR4;
383
384 call __cast5_enc_blk16;
385
386 vmovdqu RR1, (0*4*4)(%r11);
387 vmovdqu RL1, (1*4*4)(%r11);
388 vmovdqu RR2, (2*4*4)(%r11);
389 vmovdqu RL2, (3*4*4)(%r11);
390 vmovdqu RR3, (4*4*4)(%r11);
391 vmovdqu RL3, (5*4*4)(%r11);
392 vmovdqu RR4, (6*4*4)(%r11);
393 vmovdqu RL4, (7*4*4)(%r11);
394
395 ret;
396
397.align 16
398.global cast5_ecb_dec_16way
399.type cast5_ecb_dec_16way,@function;
400
401cast5_ecb_dec_16way:
402 /* input:
403 * %rdi: ctx, CTX
404 * %rsi: dst
405 * %rdx: src
406 */
407
408 movq %rsi, %r11;
409
410 vmovdqu (0*4*4)(%rdx), RL1;
411 vmovdqu (1*4*4)(%rdx), RR1;
412 vmovdqu (2*4*4)(%rdx), RL2;
413 vmovdqu (3*4*4)(%rdx), RR2;
414 vmovdqu (4*4*4)(%rdx), RL3;
415 vmovdqu (5*4*4)(%rdx), RR3;
416 vmovdqu (6*4*4)(%rdx), RL4;
417 vmovdqu (7*4*4)(%rdx), RR4;
418
419 call __cast5_dec_blk16;
420
421 vmovdqu RR1, (0*4*4)(%r11);
422 vmovdqu RL1, (1*4*4)(%r11);
423 vmovdqu RR2, (2*4*4)(%r11);
424 vmovdqu RL2, (3*4*4)(%r11);
425 vmovdqu RR3, (4*4*4)(%r11);
426 vmovdqu RL3, (5*4*4)(%r11);
427 vmovdqu RR4, (6*4*4)(%r11);
428 vmovdqu RL4, (7*4*4)(%r11);
429
430 ret;
431
432.align 16
433.global cast5_cbc_dec_16way
434.type cast5_cbc_dec_16way,@function;
435
436cast5_cbc_dec_16way:
437 /* input:
438 * %rdi: ctx, CTX
439 * %rsi: dst
440 * %rdx: src
441 */
442
443 pushq %r12;
444
445 movq %rsi, %r11;
446 movq %rdx, %r12;
447
448 vmovdqu (0*16)(%rdx), RL1;
449 vmovdqu (1*16)(%rdx), RR1;
450 vmovdqu (2*16)(%rdx), RL2;
451 vmovdqu (3*16)(%rdx), RR2;
452 vmovdqu (4*16)(%rdx), RL3;
453 vmovdqu (5*16)(%rdx), RR3;
454 vmovdqu (6*16)(%rdx), RL4;
455 vmovdqu (7*16)(%rdx), RR4;
456
457 call __cast5_dec_blk16;
458
459 /* xor with src */
460 vmovq (%r12), RX;
461 vpshufd $0x4f, RX, RX;
462 vpxor RX, RR1, RR1;
463 vpxor 0*16+8(%r12), RL1, RL1;
464 vpxor 1*16+8(%r12), RR2, RR2;
465 vpxor 2*16+8(%r12), RL2, RL2;
466 vpxor 3*16+8(%r12), RR3, RR3;
467 vpxor 4*16+8(%r12), RL3, RL3;
468 vpxor 5*16+8(%r12), RR4, RR4;
469 vpxor 6*16+8(%r12), RL4, RL4;
470
471 vmovdqu RR1, (0*16)(%r11);
472 vmovdqu RL1, (1*16)(%r11);
473 vmovdqu RR2, (2*16)(%r11);
474 vmovdqu RL2, (3*16)(%r11);
475 vmovdqu RR3, (4*16)(%r11);
476 vmovdqu RL3, (5*16)(%r11);
477 vmovdqu RR4, (6*16)(%r11);
478 vmovdqu RL4, (7*16)(%r11);
479
480 popq %r12;
481
482 ret;
483
484.align 16
485.global cast5_ctr_16way
486.type cast5_ctr_16way,@function;
487
488cast5_ctr_16way:
489 /* input:
490 * %rdi: ctx, CTX
491 * %rsi: dst
492 * %rdx: src
493 * %rcx: iv (big endian, 64bit)
494 */
495
496 pushq %r12;
497
498 movq %rsi, %r11;
499 movq %rdx, %r12;
500
501 vpcmpeqd RTMP, RTMP, RTMP;
502 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
503
504 vpcmpeqd RKR, RKR, RKR;
505 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
506 vmovdqa .Lbswap_iv_mask, R1ST;
507 vmovdqa .Lbswap128_mask, RKM;
508
509 /* load IV and byteswap */
510 vmovq (%rcx), RX;
511 vpshufb R1ST, RX, RX;
512
513 /* construct IVs */
514 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
515 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
516 vpsubq RKR, RX, RX;
517 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
518 vpsubq RKR, RX, RX;
519 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
520 vpsubq RKR, RX, RX;
521 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
522 vpsubq RKR, RX, RX;
523 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
524 vpsubq RKR, RX, RX;
525 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
526 vpsubq RKR, RX, RX;
527 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
528 vpsubq RKR, RX, RX;
529 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
530
531 /* store last IV */
532 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
533 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
534 vmovq RX, (%rcx);
535
536 call __cast5_enc_blk16;
537
538 /* dst = src ^ iv */
539 vpxor (0*16)(%r12), RR1, RR1;
540 vpxor (1*16)(%r12), RL1, RL1;
541 vpxor (2*16)(%r12), RR2, RR2;
542 vpxor (3*16)(%r12), RL2, RL2;
543 vpxor (4*16)(%r12), RR3, RR3;
544 vpxor (5*16)(%r12), RL3, RL3;
545 vpxor (6*16)(%r12), RR4, RR4;
546 vpxor (7*16)(%r12), RL4, RL4;
547 vmovdqu RR1, (0*16)(%r11);
548 vmovdqu RL1, (1*16)(%r11);
549 vmovdqu RR2, (2*16)(%r11);
550 vmovdqu RL2, (3*16)(%r11);
551 vmovdqu RR3, (4*16)(%r11);
552 vmovdqu RL3, (5*16)(%r11);
553 vmovdqu RR4, (6*16)(%r11);
554 vmovdqu RL4, (7*16)(%r11);
555
556 popq %r12;
557
558 ret;
This page took 0.058739 seconds and 5 git commands to generate.