2 * Support for Vector Instructions
4 * Assembler macros to generate .byte/.word code for particular
5 * vector instructions that are supported by recent binutils (>= 2.26) only.
7 * Copyright IBM Corp. 2015
8 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
11 #ifndef __ASM_S390_VX_INSN_H
12 #define __ASM_S390_VX_INSN_H
17 /* Macros to generate vector instruction byte code */
19 #define REG_NUM_INVALID 255
21 /* GR_NUM - Retrieve general-purpose register number
23 * @opd: Operand to store register number
24 * @r64: String designation register in the format "%rN"
27 \opd
= REG_NUM_INVALID
76 .if \opd
== REG_NUM_INVALID
77 .error
"Invalid general-purpose register designation: \gr"
81 /* VX_R() - Macro to encode the VX_NUM into the instruction */
82 #define VX_R(v) (v & 0x0F)
84 /* VX_NUM - Retrieve vector register number
86 * @opd: Operand to store register number
87 * @vxr: String designation register in the format "%vN"
89 * The vector register number is used for as input number to the
90 * instruction and, as well as, to compute the RXB field of the
91 * instruction. To encode the particular vector register number,
92 * use the VX_R(v) macro to extract the instruction opcode.
95 \opd
= REG_NUM_INVALID
192 .if \opd
== REG_NUM_INVALID
193 .error
"Invalid vector register designation: \vxr"
197 /* RXB - Compute most significant bit used vector registers
199 * @rxb: Operand to store computed RXB value
200 * @v1: First vector register designated operand
201 * @v2: Second vector register designated operand
202 * @v3: Third vector register designated operand
203 * @v4: Fourth vector register designated operand
205 .macro RXB rxb v1 v2
=0 v3
=0 v4
=0
221 /* MRXB - Generate Element Size Control and RXB value
223 * @m: Element size control
224 * @v1: First vector register designated operand (for RXB)
225 * @v2: Second vector register designated operand (for RXB)
226 * @v3: Third vector register designated operand (for RXB)
227 * @v4: Fourth vector register designated operand (for RXB)
229 .macro MRXB m v1 v2
=0 v3
=0 v4
=0
231 RXB rxb
, \v1, \v2, \v3, \v4
232 .byte (\m
<< 4) | rxb
235 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
237 * @m: Element size control
239 * @v1: First vector register designated operand (for RXB)
240 * @v2: Second vector register designated operand (for RXB)
241 * @v3: Third vector register designated operand (for RXB)
242 * @v4: Fourth vector register designated operand (for RXB)
244 .macro MRXBOPC m opc v1 v2
=0 v3
=0 v4
=0
245 MRXB \m
, \v1, \v2, \v3, \v4
249 /* Vector support instructions */
251 /* VECTOR GENERATE BYTE MASK */
254 .word (0xE700 | (VX_R(v1
) << 4))
265 /* VECTOR LOAD VR ELEMENT FROM GR */
266 .macro VLVG v
, gr
, disp
, m
270 .word
0xE700 | (VX_R(v1
) << 4) | r3
271 .word (b2
<< 12) | (\disp
)
274 .macro VLVGB v
, gr
, index
, base
275 VLVG
\v, \gr
, \index
, \base
, 0
277 .macro VLVGH v
, gr
, index
278 VLVG
\v, \gr
, \index
, 1
280 .macro VLVGF v
, gr
, index
281 VLVG
\v, \gr
, \index
, 2
283 .macro VLVGG v
, gr
, index
284 VLVG
\v, \gr
, \index
, 3
288 .macro VL v
, disp
, index
="%r0", base
292 .word
0xE700 | (VX_R(v1
) << 4) | x2
293 .word (b2
<< 12) | (\disp
)
297 /* VECTOR LOAD ELEMENT */
298 .macro VLEx vr1
, disp
, index
="%r0", base
, m3
, opc
302 .word
0xE700 | (VX_R(v1
) << 4) | x2
303 .word (b2
<< 12) | (\disp
)
304 MRXBOPC \m3
, \opc
, v1
306 .macro VLEB vr1
, disp
, index
="%r0", base
, m3
307 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x00
309 .macro VLEH vr1
, disp
, index
="%r0", base
, m3
310 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x01
312 .macro VLEF vr1
, disp
, index
="%r0", base
, m3
313 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x03
315 .macro VLEG vr1
, disp
, index
="%r0", base
, m3
316 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x02
319 /* VECTOR LOAD ELEMENT IMMEDIATE */
320 .macro VLEIx vr1
, imm2
, m3
, opc
322 .word
0xE700 | (VX_R(v1
) << 4)
324 MRXBOPC \m3
, \opc
, v1
326 .macro VLEIB vr1
, imm2
, index
327 VLEIx
\vr
1, \imm2
, \index
, 0x40
329 .macro VLEIH vr1
, imm2
, index
330 VLEIx
\vr
1, \imm2
, \index
, 0x41
332 .macro VLEIF vr1
, imm2
, index
333 VLEIx
\vr
1, \imm2
, \index
, 0x43
335 .macro VLEIG vr1
, imm2
, index
336 VLEIx
\vr
1, \imm2
, \index
, 0x42
339 /* VECTOR LOAD GR FROM VR ELEMENT */
340 .macro VLGV gr
, vr
, disp
, base
="%r0", m
344 .word
0xE700 | (r1
<< 4) | VX_R(v3
)
345 .word (b2
<< 12) | (\disp
)
348 .macro VLGVB gr
, vr
, disp
, base
="%r0"
349 VLGV \gr
, \vr
, \disp
, \base
, 0
351 .macro VLGVH gr
, vr
, disp
, base
="%r0"
352 VLGV \gr
, \vr
, \disp
, \base
, 1
354 .macro VLGVF gr
, vr
, disp
, base
="%r0"
355 VLGV \gr
, \vr
, \disp
, \base
, 2
357 .macro VLGVG gr
, vr
, disp
, base
="%r0"
358 VLGV \gr
, \vr
, \disp
, \base
, 3
361 /* VECTOR LOAD MULTIPLE */
362 .macro VLM vfrom
, vto
, disp
, base
365 GR_NUM b2
, \base
/* Base register */
366 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v3
)
367 .word (b2
<< 12) | (\disp
)
368 MRXBOPC
0, 0x36, v1
, v3
371 /* VECTOR STORE MULTIPLE */
372 .macro VSTM vfrom
, vto
, disp
, base
375 GR_NUM b2
, \base
/* Base register */
376 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v3
)
377 .word (b2
<< 12) | (\disp
)
378 MRXBOPC
0, 0x3E, v1
, v3
382 .macro VPERM vr1
, vr2
, vr3
, vr4
387 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v2
)
388 .word (VX_R(v3
) << 12)
389 MRXBOPC
VX_R(v4
), 0x8C, v1
, v2
, v3
, v4
392 /* VECTOR UNPACK LOGICAL LOW */
393 .macro VUPLL vr1
, vr2
, m3
396 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v2
)
398 MRXBOPC \m3
, 0xD4, v1
, v2
400 .macro VUPLLB vr1
, vr2
403 .macro VUPLLH vr1
, vr2
406 .macro VUPLLF vr1
, vr2
411 /* Vector integer instructions */
413 /* VECTOR EXCLUSIVE OR */
414 .macro VX vr1
, vr2
, vr3
418 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v2
)
419 .word (VX_R(v3
) << 12)
420 MRXBOPC
0, 0x6D, v1
, v2
, v3
423 /* VECTOR GALOIS FIELD MULTIPLY SUM */
424 .macro VGFM vr1
, vr2
, vr3
, m4
428 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v2
)
429 .word (VX_R(v3
) << 12)
430 MRXBOPC \m4
, 0xB4, v1
, v2
, v3
432 .macro VGFMB vr1
, vr2
, vr3
433 VGFM
\vr
1, \vr
2, \vr
3, 0
435 .macro VGFMH vr1
, vr2
, vr3
436 VGFM
\vr
1, \vr
2, \vr
3, 1
438 .macro VGFMF vr1
, vr2
, vr3
439 VGFM
\vr
1, \vr
2, \vr
3, 2
441 .macro VGFMG vr1
, vr2
, vr3
442 VGFM
\vr
1, \vr
2, \vr
3, 3
445 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
446 .macro VGFMA vr1
, vr2
, vr3
, vr4
, m5
451 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v2
)
452 .word (VX_R(v3
) << 12) | (\m5
<< 8)
453 MRXBOPC
VX_R(v4
), 0xBC, v1
, v2
, v3
, v4
455 .macro VGFMAB vr1
, vr2
, vr3
, vr4
456 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 0
458 .macro VGFMAH vr1
, vr2
, vr3
, vr4
459 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 1
461 .macro VGFMAF vr1
, vr2
, vr3
, vr4
462 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 2
464 .macro VGFMAG vr1
, vr2
, vr3
, vr4
465 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 3
468 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
469 .macro VSRLB vr1
, vr2
, vr3
473 .word
0xE700 | (VX_R(v1
) << 4) | VX_R(v2
)
474 .word (VX_R(v3
) << 12)
475 MRXBOPC
0, 0x7D, v1
, v2
, v3
479 #endif /* __ASSEMBLY__ */
480 #endif /* __ASM_S390_VX_INSN_H */