]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/crypto_helper.c
505166ce5a1487841d67a8f6d44cd55f08370090
[mirror_qemu.git] / target / riscv / crypto_helper.c
1 /*
2 * RISC-V Crypto Emulation Helpers for QEMU.
3 *
4 * Copyright (c) 2021 Ruibo Lu, luruibo2000@163.com
5 * Copyright (c) 2021 Zewen Ye, lustrew@foxmail.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "crypto/aes.h"
25 #include "crypto/aes-round.h"
26 #include "crypto/sm4.h"
27
28 #define AES_XTIME(a) \
29 ((a << 1) ^ ((a & 0x80) ? 0x1b : 0))
30
31 #define AES_GFMUL(a, b) (( \
32 (((b) & 0x1) ? (a) : 0) ^ \
33 (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \
34 (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \
35 (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF)
36
37 static inline uint32_t aes_mixcolumn_byte(uint8_t x, bool fwd)
38 {
39 uint32_t u;
40
41 if (fwd) {
42 u = (AES_GFMUL(x, 3) << 24) | (x << 16) | (x << 8) |
43 (AES_GFMUL(x, 2) << 0);
44 } else {
45 u = (AES_GFMUL(x, 0xb) << 24) | (AES_GFMUL(x, 0xd) << 16) |
46 (AES_GFMUL(x, 0x9) << 8) | (AES_GFMUL(x, 0xe) << 0);
47 }
48 return u;
49 }
50
51 #define sext32_xlen(x) (target_ulong)(int32_t)(x)
52
53 static inline target_ulong aes32_operation(target_ulong shamt,
54 target_ulong rs1, target_ulong rs2,
55 bool enc, bool mix)
56 {
57 uint8_t si = rs2 >> shamt;
58 uint8_t so;
59 uint32_t mixed;
60 target_ulong res;
61
62 if (enc) {
63 so = AES_sbox[si];
64 if (mix) {
65 mixed = aes_mixcolumn_byte(so, true);
66 } else {
67 mixed = so;
68 }
69 } else {
70 so = AES_isbox[si];
71 if (mix) {
72 mixed = aes_mixcolumn_byte(so, false);
73 } else {
74 mixed = so;
75 }
76 }
77 mixed = rol32(mixed, shamt);
78 res = rs1 ^ mixed;
79
80 return sext32_xlen(res);
81 }
82
83 target_ulong HELPER(aes32esmi)(target_ulong rs1, target_ulong rs2,
84 target_ulong shamt)
85 {
86 return aes32_operation(shamt, rs1, rs2, true, true);
87 }
88
89 target_ulong HELPER(aes32esi)(target_ulong rs1, target_ulong rs2,
90 target_ulong shamt)
91 {
92 return aes32_operation(shamt, rs1, rs2, true, false);
93 }
94
95 target_ulong HELPER(aes32dsmi)(target_ulong rs1, target_ulong rs2,
96 target_ulong shamt)
97 {
98 return aes32_operation(shamt, rs1, rs2, false, true);
99 }
100
101 target_ulong HELPER(aes32dsi)(target_ulong rs1, target_ulong rs2,
102 target_ulong shamt)
103 {
104 return aes32_operation(shamt, rs1, rs2, false, false);
105 }
106
107 #define BY(X, I) ((X >> (8 * I)) & 0xFF)
108
109 #define AES_SHIFROWS_LO(RS1, RS2) ( \
110 (((RS1 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
111 (((RS2 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
112 (((RS2 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
113 (((RS1 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
114
115 #define AES_INVSHIFROWS_LO(RS1, RS2) ( \
116 (((RS2 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
117 (((RS1 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
118 (((RS1 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
119 (((RS2 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
120
121 #define AES_MIXBYTE(COL, B0, B1, B2, B3) ( \
122 BY(COL, B3) ^ BY(COL, B2) ^ AES_GFMUL(BY(COL, B1), 3) ^ \
123 AES_GFMUL(BY(COL, B0), 2))
124
125 #define AES_MIXCOLUMN(COL) ( \
126 AES_MIXBYTE(COL, 3, 0, 1, 2) << 24 | \
127 AES_MIXBYTE(COL, 2, 3, 0, 1) << 16 | \
128 AES_MIXBYTE(COL, 1, 2, 3, 0) << 8 | AES_MIXBYTE(COL, 0, 1, 2, 3) << 0)
129
130 #define AES_INVMIXBYTE(COL, B0, B1, B2, B3) ( \
131 AES_GFMUL(BY(COL, B3), 0x9) ^ AES_GFMUL(BY(COL, B2), 0xd) ^ \
132 AES_GFMUL(BY(COL, B1), 0xb) ^ AES_GFMUL(BY(COL, B0), 0xe))
133
134 #define AES_INVMIXCOLUMN(COL) ( \
135 AES_INVMIXBYTE(COL, 3, 0, 1, 2) << 24 | \
136 AES_INVMIXBYTE(COL, 2, 3, 0, 1) << 16 | \
137 AES_INVMIXBYTE(COL, 1, 2, 3, 0) << 8 | \
138 AES_INVMIXBYTE(COL, 0, 1, 2, 3) << 0)
139
140 static const AESState aes_zero = { };
141
142 static inline target_ulong aes64_operation(target_ulong rs1, target_ulong rs2,
143 bool enc, bool mix)
144 {
145 uint64_t RS1 = rs1;
146 uint64_t RS2 = rs2;
147 uint64_t result;
148 uint64_t temp;
149 uint32_t col_0;
150 uint32_t col_1;
151
152 if (enc) {
153 temp = AES_SHIFROWS_LO(RS1, RS2);
154 temp = (((uint64_t)AES_sbox[(temp >> 0) & 0xFF] << 0) |
155 ((uint64_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
156 ((uint64_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
157 ((uint64_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
158 ((uint64_t)AES_sbox[(temp >> 32) & 0xFF] << 32) |
159 ((uint64_t)AES_sbox[(temp >> 40) & 0xFF] << 40) |
160 ((uint64_t)AES_sbox[(temp >> 48) & 0xFF] << 48) |
161 ((uint64_t)AES_sbox[(temp >> 56) & 0xFF] << 56));
162 if (mix) {
163 col_0 = temp & 0xFFFFFFFF;
164 col_1 = temp >> 32;
165
166 col_0 = AES_MIXCOLUMN(col_0);
167 col_1 = AES_MIXCOLUMN(col_1);
168
169 result = ((uint64_t)col_1 << 32) | col_0;
170 } else {
171 result = temp;
172 }
173 } else {
174 temp = AES_INVSHIFROWS_LO(RS1, RS2);
175 temp = (((uint64_t)AES_isbox[(temp >> 0) & 0xFF] << 0) |
176 ((uint64_t)AES_isbox[(temp >> 8) & 0xFF] << 8) |
177 ((uint64_t)AES_isbox[(temp >> 16) & 0xFF] << 16) |
178 ((uint64_t)AES_isbox[(temp >> 24) & 0xFF] << 24) |
179 ((uint64_t)AES_isbox[(temp >> 32) & 0xFF] << 32) |
180 ((uint64_t)AES_isbox[(temp >> 40) & 0xFF] << 40) |
181 ((uint64_t)AES_isbox[(temp >> 48) & 0xFF] << 48) |
182 ((uint64_t)AES_isbox[(temp >> 56) & 0xFF] << 56));
183 if (mix) {
184 col_0 = temp & 0xFFFFFFFF;
185 col_1 = temp >> 32;
186
187 col_0 = AES_INVMIXCOLUMN(col_0);
188 col_1 = AES_INVMIXCOLUMN(col_1);
189
190 result = ((uint64_t)col_1 << 32) | col_0;
191 } else {
192 result = temp;
193 }
194 }
195
196 return result;
197 }
198
199 target_ulong HELPER(aes64esm)(target_ulong rs1, target_ulong rs2)
200 {
201 return aes64_operation(rs1, rs2, true, true);
202 }
203
204 target_ulong HELPER(aes64es)(target_ulong rs1, target_ulong rs2)
205 {
206 AESState t;
207
208 t.d[HOST_BIG_ENDIAN] = rs1;
209 t.d[!HOST_BIG_ENDIAN] = rs2;
210 aesenc_SB_SR_AK(&t, &t, &aes_zero, false);
211 return t.d[HOST_BIG_ENDIAN];
212 }
213
214 target_ulong HELPER(aes64ds)(target_ulong rs1, target_ulong rs2)
215 {
216 AESState t;
217
218 t.d[HOST_BIG_ENDIAN] = rs1;
219 t.d[!HOST_BIG_ENDIAN] = rs2;
220 aesdec_ISB_ISR_AK(&t, &t, &aes_zero, false);
221 return t.d[HOST_BIG_ENDIAN];
222 }
223
224 target_ulong HELPER(aes64dsm)(target_ulong rs1, target_ulong rs2)
225 {
226 return aes64_operation(rs1, rs2, false, true);
227 }
228
229 target_ulong HELPER(aes64ks2)(target_ulong rs1, target_ulong rs2)
230 {
231 uint64_t RS1 = rs1;
232 uint64_t RS2 = rs2;
233 uint32_t rs1_hi = RS1 >> 32;
234 uint32_t rs2_lo = RS2;
235 uint32_t rs2_hi = RS2 >> 32;
236
237 uint32_t r_lo = (rs1_hi ^ rs2_lo);
238 uint32_t r_hi = (rs1_hi ^ rs2_lo ^ rs2_hi);
239 target_ulong result = ((uint64_t)r_hi << 32) | r_lo;
240
241 return result;
242 }
243
244 target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
245 {
246 uint64_t RS1 = rs1;
247 static const uint8_t round_consts[10] = {
248 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
249 };
250
251 uint8_t enc_rnum = rnum;
252 uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
253 uint8_t rcon_ = 0;
254 target_ulong result;
255
256 if (enc_rnum != 0xA) {
257 temp = ror32(temp, 8); /* Rotate right by 8 */
258 rcon_ = round_consts[enc_rnum];
259 }
260
261 temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
262 ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
263 ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
264 ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
265
266 temp ^= rcon_;
267
268 result = ((uint64_t)temp << 32) | temp;
269
270 return result;
271 }
272
273 target_ulong HELPER(aes64im)(target_ulong rs1)
274 {
275 AESState t;
276
277 t.d[HOST_BIG_ENDIAN] = rs1;
278 t.d[!HOST_BIG_ENDIAN] = 0;
279 aesdec_IMC(&t, &t, false);
280 return t.d[HOST_BIG_ENDIAN];
281 }
282
283 target_ulong HELPER(sm4ed)(target_ulong rs1, target_ulong rs2,
284 target_ulong shamt)
285 {
286 uint32_t sb_in = (uint8_t)(rs2 >> shamt);
287 uint32_t sb_out = (uint32_t)sm4_sbox[sb_in];
288
289 uint32_t x = sb_out ^ (sb_out << 8) ^ (sb_out << 2) ^ (sb_out << 18) ^
290 ((sb_out & 0x3f) << 26) ^ ((sb_out & 0xC0) << 10);
291
292 uint32_t rotl = rol32(x, shamt);
293
294 return sext32_xlen(rotl ^ (uint32_t)rs1);
295 }
296
297 target_ulong HELPER(sm4ks)(target_ulong rs1, target_ulong rs2,
298 target_ulong shamt)
299 {
300 uint32_t sb_in = (uint8_t)(rs2 >> shamt);
301 uint32_t sb_out = sm4_sbox[sb_in];
302
303 uint32_t x = sb_out ^ ((sb_out & 0x07) << 29) ^ ((sb_out & 0xFE) << 7) ^
304 ((sb_out & 0x01) << 23) ^ ((sb_out & 0xF8) << 13);
305
306 uint32_t rotl = rol32(x, shamt);
307
308 return sext32_xlen(rotl ^ (uint32_t)rs1);
309 }
310 #undef sext32_xlen