]>
git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/crypto_helper.c
505166ce5a1487841d67a8f6d44cd55f08370090
2 * RISC-V Crypto Emulation Helpers for QEMU.
4 * Copyright (c) 2021 Ruibo Lu, luruibo2000@163.com
5 * Copyright (c) 2021 Zewen Ye, lustrew@foxmail.com
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "crypto/aes.h"
25 #include "crypto/aes-round.h"
26 #include "crypto/sm4.h"
28 #define AES_XTIME(a) \
29 ((a << 1) ^ ((a & 0x80) ? 0x1b : 0))
31 #define AES_GFMUL(a, b) (( \
32 (((b) & 0x1) ? (a) : 0) ^ \
33 (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \
34 (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \
35 (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF)
37 static inline uint32_t aes_mixcolumn_byte(uint8_t x
, bool fwd
)
42 u
= (AES_GFMUL(x
, 3) << 24) | (x
<< 16) | (x
<< 8) |
43 (AES_GFMUL(x
, 2) << 0);
45 u
= (AES_GFMUL(x
, 0xb) << 24) | (AES_GFMUL(x
, 0xd) << 16) |
46 (AES_GFMUL(x
, 0x9) << 8) | (AES_GFMUL(x
, 0xe) << 0);
51 #define sext32_xlen(x) (target_ulong)(int32_t)(x)
53 static inline target_ulong
aes32_operation(target_ulong shamt
,
54 target_ulong rs1
, target_ulong rs2
,
57 uint8_t si
= rs2
>> shamt
;
65 mixed
= aes_mixcolumn_byte(so
, true);
72 mixed
= aes_mixcolumn_byte(so
, false);
77 mixed
= rol32(mixed
, shamt
);
80 return sext32_xlen(res
);
83 target_ulong
HELPER(aes32esmi
)(target_ulong rs1
, target_ulong rs2
,
86 return aes32_operation(shamt
, rs1
, rs2
, true, true);
89 target_ulong
HELPER(aes32esi
)(target_ulong rs1
, target_ulong rs2
,
92 return aes32_operation(shamt
, rs1
, rs2
, true, false);
95 target_ulong
HELPER(aes32dsmi
)(target_ulong rs1
, target_ulong rs2
,
98 return aes32_operation(shamt
, rs1
, rs2
, false, true);
101 target_ulong
HELPER(aes32dsi
)(target_ulong rs1
, target_ulong rs2
,
104 return aes32_operation(shamt
, rs1
, rs2
, false, false);
107 #define BY(X, I) ((X >> (8 * I)) & 0xFF)
109 #define AES_SHIFROWS_LO(RS1, RS2) ( \
110 (((RS1 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
111 (((RS2 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
112 (((RS2 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
113 (((RS1 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
115 #define AES_INVSHIFROWS_LO(RS1, RS2) ( \
116 (((RS2 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
117 (((RS1 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
118 (((RS1 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
119 (((RS2 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
121 #define AES_MIXBYTE(COL, B0, B1, B2, B3) ( \
122 BY(COL, B3) ^ BY(COL, B2) ^ AES_GFMUL(BY(COL, B1), 3) ^ \
123 AES_GFMUL(BY(COL, B0), 2))
125 #define AES_MIXCOLUMN(COL) ( \
126 AES_MIXBYTE(COL, 3, 0, 1, 2) << 24 | \
127 AES_MIXBYTE(COL, 2, 3, 0, 1) << 16 | \
128 AES_MIXBYTE(COL, 1, 2, 3, 0) << 8 | AES_MIXBYTE(COL, 0, 1, 2, 3) << 0)
130 #define AES_INVMIXBYTE(COL, B0, B1, B2, B3) ( \
131 AES_GFMUL(BY(COL, B3), 0x9) ^ AES_GFMUL(BY(COL, B2), 0xd) ^ \
132 AES_GFMUL(BY(COL, B1), 0xb) ^ AES_GFMUL(BY(COL, B0), 0xe))
134 #define AES_INVMIXCOLUMN(COL) ( \
135 AES_INVMIXBYTE(COL, 3, 0, 1, 2) << 24 | \
136 AES_INVMIXBYTE(COL, 2, 3, 0, 1) << 16 | \
137 AES_INVMIXBYTE(COL, 1, 2, 3, 0) << 8 | \
138 AES_INVMIXBYTE(COL, 0, 1, 2, 3) << 0)
140 static const AESState aes_zero
= { };
142 static inline target_ulong
aes64_operation(target_ulong rs1
, target_ulong rs2
,
153 temp
= AES_SHIFROWS_LO(RS1
, RS2
);
154 temp
= (((uint64_t)AES_sbox
[(temp
>> 0) & 0xFF] << 0) |
155 ((uint64_t)AES_sbox
[(temp
>> 8) & 0xFF] << 8) |
156 ((uint64_t)AES_sbox
[(temp
>> 16) & 0xFF] << 16) |
157 ((uint64_t)AES_sbox
[(temp
>> 24) & 0xFF] << 24) |
158 ((uint64_t)AES_sbox
[(temp
>> 32) & 0xFF] << 32) |
159 ((uint64_t)AES_sbox
[(temp
>> 40) & 0xFF] << 40) |
160 ((uint64_t)AES_sbox
[(temp
>> 48) & 0xFF] << 48) |
161 ((uint64_t)AES_sbox
[(temp
>> 56) & 0xFF] << 56));
163 col_0
= temp
& 0xFFFFFFFF;
166 col_0
= AES_MIXCOLUMN(col_0
);
167 col_1
= AES_MIXCOLUMN(col_1
);
169 result
= ((uint64_t)col_1
<< 32) | col_0
;
174 temp
= AES_INVSHIFROWS_LO(RS1
, RS2
);
175 temp
= (((uint64_t)AES_isbox
[(temp
>> 0) & 0xFF] << 0) |
176 ((uint64_t)AES_isbox
[(temp
>> 8) & 0xFF] << 8) |
177 ((uint64_t)AES_isbox
[(temp
>> 16) & 0xFF] << 16) |
178 ((uint64_t)AES_isbox
[(temp
>> 24) & 0xFF] << 24) |
179 ((uint64_t)AES_isbox
[(temp
>> 32) & 0xFF] << 32) |
180 ((uint64_t)AES_isbox
[(temp
>> 40) & 0xFF] << 40) |
181 ((uint64_t)AES_isbox
[(temp
>> 48) & 0xFF] << 48) |
182 ((uint64_t)AES_isbox
[(temp
>> 56) & 0xFF] << 56));
184 col_0
= temp
& 0xFFFFFFFF;
187 col_0
= AES_INVMIXCOLUMN(col_0
);
188 col_1
= AES_INVMIXCOLUMN(col_1
);
190 result
= ((uint64_t)col_1
<< 32) | col_0
;
199 target_ulong
HELPER(aes64esm
)(target_ulong rs1
, target_ulong rs2
)
201 return aes64_operation(rs1
, rs2
, true, true);
204 target_ulong
HELPER(aes64es
)(target_ulong rs1
, target_ulong rs2
)
208 t
.d
[HOST_BIG_ENDIAN
] = rs1
;
209 t
.d
[!HOST_BIG_ENDIAN
] = rs2
;
210 aesenc_SB_SR_AK(&t
, &t
, &aes_zero
, false);
211 return t
.d
[HOST_BIG_ENDIAN
];
214 target_ulong
HELPER(aes64ds
)(target_ulong rs1
, target_ulong rs2
)
218 t
.d
[HOST_BIG_ENDIAN
] = rs1
;
219 t
.d
[!HOST_BIG_ENDIAN
] = rs2
;
220 aesdec_ISB_ISR_AK(&t
, &t
, &aes_zero
, false);
221 return t
.d
[HOST_BIG_ENDIAN
];
224 target_ulong
HELPER(aes64dsm
)(target_ulong rs1
, target_ulong rs2
)
226 return aes64_operation(rs1
, rs2
, false, true);
229 target_ulong
HELPER(aes64ks2
)(target_ulong rs1
, target_ulong rs2
)
233 uint32_t rs1_hi
= RS1
>> 32;
234 uint32_t rs2_lo
= RS2
;
235 uint32_t rs2_hi
= RS2
>> 32;
237 uint32_t r_lo
= (rs1_hi
^ rs2_lo
);
238 uint32_t r_hi
= (rs1_hi
^ rs2_lo
^ rs2_hi
);
239 target_ulong result
= ((uint64_t)r_hi
<< 32) | r_lo
;
244 target_ulong
HELPER(aes64ks1i
)(target_ulong rs1
, target_ulong rnum
)
247 static const uint8_t round_consts
[10] = {
248 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
251 uint8_t enc_rnum
= rnum
;
252 uint32_t temp
= (RS1
>> 32) & 0xFFFFFFFF;
256 if (enc_rnum
!= 0xA) {
257 temp
= ror32(temp
, 8); /* Rotate right by 8 */
258 rcon_
= round_consts
[enc_rnum
];
261 temp
= ((uint32_t)AES_sbox
[(temp
>> 24) & 0xFF] << 24) |
262 ((uint32_t)AES_sbox
[(temp
>> 16) & 0xFF] << 16) |
263 ((uint32_t)AES_sbox
[(temp
>> 8) & 0xFF] << 8) |
264 ((uint32_t)AES_sbox
[(temp
>> 0) & 0xFF] << 0);
268 result
= ((uint64_t)temp
<< 32) | temp
;
273 target_ulong
HELPER(aes64im
)(target_ulong rs1
)
277 t
.d
[HOST_BIG_ENDIAN
] = rs1
;
278 t
.d
[!HOST_BIG_ENDIAN
] = 0;
279 aesdec_IMC(&t
, &t
, false);
280 return t
.d
[HOST_BIG_ENDIAN
];
283 target_ulong
HELPER(sm4ed
)(target_ulong rs1
, target_ulong rs2
,
286 uint32_t sb_in
= (uint8_t)(rs2
>> shamt
);
287 uint32_t sb_out
= (uint32_t)sm4_sbox
[sb_in
];
289 uint32_t x
= sb_out
^ (sb_out
<< 8) ^ (sb_out
<< 2) ^ (sb_out
<< 18) ^
290 ((sb_out
& 0x3f) << 26) ^ ((sb_out
& 0xC0) << 10);
292 uint32_t rotl
= rol32(x
, shamt
);
294 return sext32_xlen(rotl
^ (uint32_t)rs1
);
297 target_ulong
HELPER(sm4ks
)(target_ulong rs1
, target_ulong rs2
,
300 uint32_t sb_in
= (uint8_t)(rs2
>> shamt
);
301 uint32_t sb_out
= sm4_sbox
[sb_in
];
303 uint32_t x
= sb_out
^ ((sb_out
& 0x07) << 29) ^ ((sb_out
& 0xFE) << 7) ^
304 ((sb_out
& 0x01) << 23) ^ ((sb_out
& 0xF8) << 13);
306 uint32_t rotl
= rol32(x
, shamt
);
308 return sext32_xlen(rotl
^ (uint32_t)rs1
);