2 * Support for Vector Instructions
4 * Assembler macros to generate .byte/.word code for particular
5 * vector instructions that are supported by recent binutils (>= 2.26) only.
7 * Copyright IBM Corp. 2015
8 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
11 #ifndef __ASM_S390_VX_INSN_H
12 #define __ASM_S390_VX_INSN_H
17 /* Macros to generate vector instruction byte code */
19 /* GR_NUM - Retrieve general-purpose register number
21 * @opd: Operand to store register number
22 * @r64: String designation register in the format "%rN"
79 /* VX_NUM - Retrieve vector register number
81 * @opd: Operand to store register number
82 * @vxr: String designation register in the format "%vN"
84 * The vector register number is used for as input number to the
85 * instruction and, as well as, to compute the RXB field of the
191 /* RXB - Compute most significant bit used vector registers
193 * @rxb: Operand to store computed RXB value
194 * @v1: First vector register designated operand
195 * @v2: Second vector register designated operand
196 * @v3: Third vector register designated operand
197 * @v4: Fourth vector register designated operand
199 .macro RXB rxb v1 v2
=0 v3
=0 v4
=0
215 /* MRXB - Generate Element Size Control and RXB value
217 * @m: Element size control
218 * @v1: First vector register designated operand (for RXB)
219 * @v2: Second vector register designated operand (for RXB)
220 * @v3: Third vector register designated operand (for RXB)
221 * @v4: Fourth vector register designated operand (for RXB)
223 .macro MRXB m v1 v2
=0 v3
=0 v4
=0
225 RXB rxb
, \v1, \v2, \v3, \v4
226 .byte (\m
<< 4) | rxb
229 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
231 * @m: Element size control
233 * @v1: First vector register designated operand (for RXB)
234 * @v2: Second vector register designated operand (for RXB)
235 * @v3: Third vector register designated operand (for RXB)
236 * @v4: Fourth vector register designated operand (for RXB)
238 .macro MRXBOPC m opc v1 v2
=0 v3
=0 v4
=0
239 MRXB \m
, \v1, \v2, \v3, \v4
243 /* Vector support instructions */
245 /* VECTOR GENERATE BYTE MASK */
248 .word (0xE700 | ((v1
&15) << 4))
259 /* VECTOR LOAD VR ELEMENT FROM GR */
260 .macro VLVG v
, gr
, disp
, m
264 .word
0xE700 | ((v1
&15) << 4) | r3
265 .word (b2
<< 12) | (\disp
)
268 .macro VLVGB v
, gr
, index
, base
269 VLVG
\v, \gr
, \index
, \base
, 0
271 .macro VLVGH v
, gr
, index
272 VLVG
\v, \gr
, \index
, 1
274 .macro VLVGF v
, gr
, index
275 VLVG
\v, \gr
, \index
, 2
277 .macro VLVGG v
, gr
, index
278 VLVG
\v, \gr
, \index
, 3
282 .macro VL v
, disp
, index
="%r0", base
286 .word
0xE700 | ((v1
&15) << 4) | x2
287 .word (b2
<< 12) | (\disp
)
291 /* VECTOR LOAD ELEMENT */
292 .macro VLEx vr1
, disp
, index
="%r0", base
, m3
, opc
296 .word
0xE700 | ((v1
&15) << 4) | x2
297 .word (b2
<< 12) | (\disp
)
298 MRXBOPC \m3
, \opc
, v1
300 .macro VLEB vr1
, disp
, index
="%r0", base
, m3
301 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x00
303 .macro VLEH vr1
, disp
, index
="%r0", base
, m3
304 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x01
306 .macro VLEF vr1
, disp
, index
="%r0", base
, m3
307 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x03
309 .macro VLEG vr1
, disp
, index
="%r0", base
, m3
310 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x02
313 /* VECTOR LOAD ELEMENT IMMEDIATE */
314 .macro VLEIx vr1
, imm2
, m3
, opc
316 .word
0xE700 | ((v1
&15) << 4)
318 MRXBOPC \m3
, \opc
, v1
320 .macro VLEIB vr1
, imm2
, index
321 VLEIx
\vr
1, \imm2
, \index
, 0x40
323 .macro VLEIH vr1
, imm2
, index
324 VLEIx
\vr
1, \imm2
, \index
, 0x41
326 .macro VLEIF vr1
, imm2
, index
327 VLEIx
\vr
1, \imm2
, \index
, 0x43
329 .macro VLEIG vr1
, imm2
, index
330 VLEIx
\vr
1, \imm2
, \index
, 0x42
333 /* VECTOR LOAD GR FROM VR ELEMENT */
334 .macro VLGV gr
, vr
, disp
, base
="%r0", m
338 .word
0xE700 | (r1
<< 4) | (v3
&15)
339 .word (b2
<< 12) | (\disp
)
342 .macro VLGVB gr
, vr
, disp
, base
="%r0"
343 VLGV \gr
, \vr
, \disp
, \base
, 0
345 .macro VLGVH gr
, vr
, disp
, base
="%r0"
346 VLGV \gr
, \vr
, \disp
, \base
, 1
348 .macro VLGVF gr
, vr
, disp
, base
="%r0"
349 VLGV \gr
, \vr
, \disp
, \base
, 2
351 .macro VLGVG gr
, vr
, disp
, base
="%r0"
352 VLGV \gr
, \vr
, \disp
, \base
, 3
355 /* VECTOR LOAD MULTIPLE */
356 .macro VLM vfrom
, vto
, disp
, base
359 GR_NUM b2
, \base
/* Base register */
360 .word
0xE700 | ((v1
&15) << 4) | (v3
&15)
361 .word (b2
<< 12) | (\disp
)
362 MRXBOPC
0, 0x36, v1
, v3
365 /* VECTOR STORE MULTIPLE */
366 .macro VSTM vfrom
, vto
, disp
, base
369 GR_NUM b2
, \base
/* Base register */
370 .word
0xE700 | ((v1
&15) << 4) | (v3
&15)
371 .word (b2
<< 12) | (\disp
)
372 MRXBOPC
0, 0x3E, v1
, v3
376 .macro VPERM vr1
, vr2
, vr3
, vr4
381 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
382 .word ((v3
&15) << 12)
383 MRXBOPC (v4
&15), 0x8C, v1
, v2
, v3
, v4
386 /* VECTOR UNPACK LOGICAL LOW */
387 .macro VUPLL vr1
, vr2
, m3
390 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
392 MRXBOPC \m3
, 0xD4, v1
, v2
394 .macro VUPLLB vr1
, vr2
397 .macro VUPLLH vr1
, vr2
400 .macro VUPLLF vr1
, vr2
405 /* Vector integer instructions */
407 /* VECTOR EXCLUSIVE OR */
408 .macro VX vr1
, vr2
, vr3
412 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
413 .word ((v3
&15) << 12)
414 MRXBOPC
0, 0x6D, v1
, v2
, v3
417 /* VECTOR GALOIS FIELD MULTIPLY SUM */
418 .macro VGFM vr1
, vr2
, vr3
, m4
422 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
423 .word ((v3
&15) << 12)
424 MRXBOPC \m4
, 0xB4, v1
, v2
, v3
426 .macro VGFMB vr1
, vr2
, vr3
427 VGFM
\vr
1, \vr
2, \vr
3, 0
429 .macro VGFMH vr1
, vr2
, vr3
430 VGFM
\vr
1, \vr
2, \vr
3, 1
432 .macro VGFMF vr1
, vr2
, vr3
433 VGFM
\vr
1, \vr
2, \vr
3, 2
435 .macro VGFMG vr1
, vr2
, vr3
436 VGFM
\vr
1, \vr
2, \vr
3, 3
439 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
440 .macro VGFMA vr1
, vr2
, vr3
, vr4
, m5
445 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
446 .word ((v3
&15) << 12) | (\m5
<< 8)
447 MRXBOPC (v4
&15), 0xBC, v1
, v2
, v3
, v4
449 .macro VGFMAB vr1
, vr2
, vr3
, vr4
450 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 0
452 .macro VGFMAH vr1
, vr2
, vr3
, vr4
453 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 1
455 .macro VGFMAF vr1
, vr2
, vr3
, vr4
456 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 2
458 .macro VGFMAG vr1
, vr2
, vr3
, vr4
459 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 3
462 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
463 .macro VSRLB vr1
, vr2
, vr3
467 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
468 .word ((v3
&15) << 12)
469 MRXBOPC
0, 0x7D, v1
, v2
, v3
473 #endif /* __ASSEMBLY__ */
474 #endif /* __ASM_S390_VX_INSN_H */