X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=arch%2Fs390%2Finclude%2Fasm%2Fvx-insn.h;h=b61846dff70fb7534db6e1b9c573cb880ef7ee5f;hb=0eab11c7e0d30de14a15ccd8269eef238321a8e1;hp=4a3135620f5e4c82f1416c0cd1c0735703ad01ac;hpb=05c78081d2d8eaf04bf60946fcc53380febf3376;p=mirror_ubuntu-artful-kernel.git diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h index 4a3135620f5e..b61846dff70f 100644 --- a/arch/s390/include/asm/vx-insn.h +++ b/arch/s390/include/asm/vx-insn.h @@ -16,15 +16,13 @@ /* Macros to generate vector instruction byte code */ -#define REG_NUM_INVALID 255 - /* GR_NUM - Retrieve general-purpose register number * * @opd: Operand to store register number * @r64: String designation register in the format "%rN" */ .macro GR_NUM opd gr - \opd = REG_NUM_INVALID + \opd = 255 .ifc \gr,%r0 \opd = 0 .endif @@ -73,14 +71,11 @@ .ifc \gr,%r15 \opd = 15 .endif - .if \opd == REG_NUM_INVALID - .error "Invalid general-purpose register designation: \gr" + .if \opd == 255 + \opd = \gr .endif .endm -/* VX_R() - Macro to encode the VX_NUM into the instruction */ -#define VX_R(v) (v & 0x0F) - /* VX_NUM - Retrieve vector register number * * @opd: Operand to store register number @@ -88,11 +83,10 @@ * * The vector register number is used for as input number to the * instruction and, as well as, to compute the RXB field of the - * instruction. To encode the particular vector register number, - * use the VX_R(v) macro to extract the instruction opcode. + * instruction. */ .macro VX_NUM opd vxr - \opd = REG_NUM_INVALID + \opd = 255 .ifc \vxr,%v0 \opd = 0 .endif @@ -189,8 +183,8 @@ .ifc \vxr,%v31 \opd = 31 .endif - .if \opd == REG_NUM_INVALID - .error "Invalid vector register designation: \vxr" + .if \opd == 255 + \opd = \vxr .endif .endm @@ -251,7 +245,7 @@ /* VECTOR GENERATE BYTE MASK */ .macro VGBM vr imm2 VX_NUM v1, \vr - .word (0xE700 | (VX_R(v1) << 4)) + .word (0xE700 | ((v1&15) << 4)) .word \imm2 MRXBOPC 0, 0x44, v1 .endm @@ -267,7 +261,7 @@ VX_NUM v1, \v GR_NUM b2, "%r0" GR_NUM r3, \gr - .word 0xE700 | (VX_R(v1) << 4) | r3 + .word 0xE700 | ((v1&15) << 4) | r3 .word (b2 << 12) | (\disp) MRXBOPC \m, 0x22, v1 .endm @@ -289,7 +283,7 @@ VX_NUM v1, \v GR_NUM x2, \index GR_NUM b2, \base - .word 0xE700 | (VX_R(v1) << 4) | x2 + .word 0xE700 | ((v1&15) << 4) | x2 .word (b2 << 12) | (\disp) MRXBOPC 0, 0x06, v1 .endm @@ -299,7 +293,7 @@ VX_NUM v1, \vr1 GR_NUM x2, \index GR_NUM b2, \base - .word 0xE700 | (VX_R(v1) << 4) | x2 + .word 0xE700 | ((v1&15) << 4) | x2 .word (b2 << 12) | (\disp) MRXBOPC \m3, \opc, v1 .endm @@ -319,7 +313,7 @@ /* VECTOR LOAD ELEMENT IMMEDIATE */ .macro VLEIx vr1, imm2, m3, opc VX_NUM v1, \vr1 - .word 0xE700 | (VX_R(v1) << 4) + .word 0xE700 | ((v1&15) << 4) .word \imm2 MRXBOPC \m3, \opc, v1 .endm @@ -341,7 +335,7 @@ GR_NUM r1, \gr GR_NUM b2, \base VX_NUM v3, \vr - .word 0xE700 | (r1 << 4) | VX_R(v3) + .word 0xE700 | (r1 << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC \m, 0x21, v3 .endm @@ -363,7 +357,7 @@ VX_NUM v1, \vfrom VX_NUM v3, \vto GR_NUM b2, \base /* Base register */ - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) + .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC 0, 0x36, v1, v3 .endm @@ -373,7 +367,7 @@ VX_NUM v1, \vfrom VX_NUM v3, \vto GR_NUM b2, \base /* Base register */ - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) + .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) MRXBOPC 0, 0x3E, v1, v3 .endm @@ -384,16 +378,16 @@ VX_NUM v2, \vr2 VX_NUM v3, \vr3 VX_NUM v4, \vr4 - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) - .word (VX_R(v3) << 12) - MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) + MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4 .endm /* VECTOR UNPACK LOGICAL LOW */ .macro VUPLL vr1, vr2, m3 VX_NUM v1, \vr1 VX_NUM v2, \vr2 - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word 0xE700 | ((v1&15) << 4) | (v2&15) .word 0x0000 MRXBOPC \m3, 0xD4, v1, v2 .endm @@ -415,8 +409,8 @@ VX_NUM v1, \vr1 VX_NUM v2, \vr2 VX_NUM v3, \vr3 - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) - .word (VX_R(v3) << 12) + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) MRXBOPC 0, 0x6D, v1, v2, v3 .endm @@ -425,8 +419,8 @@ VX_NUM v1, \vr1 VX_NUM v2, \vr2 VX_NUM v3, \vr3 - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) - .word (VX_R(v3) << 12) + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) MRXBOPC \m4, 0xB4, v1, v2, v3 .endm .macro VGFMB vr1, vr2, vr3 @@ -448,9 +442,9 @@ VX_NUM v2, \vr2 VX_NUM v3, \vr3 VX_NUM v4, \vr4 - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) - .word (VX_R(v3) << 12) | (\m5 << 8) - MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4 + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) | (\m5 << 8) + MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4 .endm .macro VGFMAB vr1, vr2, vr3, vr4 VGFMA \vr1, \vr2, \vr3, \vr4, 0 @@ -470,8 +464,8 @@ VX_NUM v1, \vr1 VX_NUM v2, \vr2 VX_NUM v3, \vr3 - .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) - .word (VX_R(v3) << 12) + .word 0xE700 | ((v1&15) << 4) | (v2&15) + .word ((v3&15) << 12) MRXBOPC 0, 0x7D, v1, v2, v3 .endm