]> git.proxmox.com Git - rustc.git/blob - library/stdarch/crates/core_arch/src/arm_shared/crypto.rs
New upstream version 1.56.0~beta.4+dfsg1
[rustc.git] / library / stdarch / crates / core_arch / src / arm_shared / crypto.rs
1 use crate::core_arch::arm_shared::{uint32x4_t, uint8x16_t};
2
3 #[allow(improper_ctypes)]
4 extern "C" {
5 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aese")]
6 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")]
7 fn vaeseq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t;
8 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aesd")]
9 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")]
10 fn vaesdq_u8_(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t;
11 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aesmc")]
12 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")]
13 fn vaesmcq_u8_(data: uint8x16_t) -> uint8x16_t;
14 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.aesimc")]
15 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")]
16 fn vaesimcq_u8_(data: uint8x16_t) -> uint8x16_t;
17
18 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1h")]
19 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")]
20 fn vsha1h_u32_(hash_e: u32) -> u32;
21 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1su0")]
22 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")]
23 fn vsha1su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t;
24 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1su1")]
25 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")]
26 fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
27 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1c")]
28 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")]
29 fn vsha1cq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
30 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1p")]
31 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")]
32 fn vsha1pq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
33 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha1m")]
34 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")]
35 fn vsha1mq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
36
37 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256h")]
38 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")]
39 fn vsha256hq_u32_(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
40 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256h2")]
41 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")]
42 fn vsha256h2q_u32_(hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
43 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256su0")]
44 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")]
45 fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t;
46 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sha256su1")]
47 #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")]
48 fn vsha256su1q_u32_(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
49 }
50
51 #[cfg(test)]
52 use stdarch_test::assert_instr;
53
54 // TODO: Use AES for ARM when the minimum LLVM version includes b8baa2a9132498ea286dbb0d03f005760ecc6fdb
55
56 /// AES single round encryption.
57 #[inline]
58 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
59 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
60 #[cfg_attr(test, assert_instr(aese))]
61 pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
62 vaeseq_u8_(data, key)
63 }
64
65 /// AES single round decryption.
66 #[inline]
67 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
68 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
69 #[cfg_attr(test, assert_instr(aesd))]
70 pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
71 vaesdq_u8_(data, key)
72 }
73
74 /// AES mix columns.
75 #[inline]
76 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
77 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
78 #[cfg_attr(test, assert_instr(aesmc))]
79 pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
80 vaesmcq_u8_(data)
81 }
82
83 /// AES inverse mix columns.
84 #[inline]
85 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))]
86 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
87 #[cfg_attr(test, assert_instr(aesimc))]
88 pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
89 vaesimcq_u8_(data)
90 }
91
92 /// SHA1 fixed rotate.
93 #[inline]
94 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
95 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
96 #[cfg_attr(test, assert_instr(sha1h))]
97 pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
98 vsha1h_u32_(hash_e)
99 }
100
101 /// SHA1 hash update accelerator, choose.
102 #[inline]
103 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
104 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
105 #[cfg_attr(test, assert_instr(sha1c))]
106 pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
107 vsha1cq_u32_(hash_abcd, hash_e, wk)
108 }
109
110 /// SHA1 hash update accelerator, majority.
111 #[inline]
112 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
113 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
114 #[cfg_attr(test, assert_instr(sha1m))]
115 pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
116 vsha1mq_u32_(hash_abcd, hash_e, wk)
117 }
118
119 /// SHA1 hash update accelerator, parity.
120 #[inline]
121 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
122 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
123 #[cfg_attr(test, assert_instr(sha1p))]
124 pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
125 vsha1pq_u32_(hash_abcd, hash_e, wk)
126 }
127
128 /// SHA1 schedule update accelerator, first part.
129 #[inline]
130 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
131 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
132 #[cfg_attr(test, assert_instr(sha1su0))]
133 pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
134 vsha1su0q_u32_(w0_3, w4_7, w8_11)
135 }
136
137 /// SHA1 schedule update accelerator, second part.
138 #[inline]
139 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
140 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
141 #[cfg_attr(test, assert_instr(sha1su1))]
142 pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
143 vsha1su1q_u32_(tw0_3, w12_15)
144 }
145
146 /// SHA256 hash update accelerator.
147 #[inline]
148 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
149 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
150 #[cfg_attr(test, assert_instr(sha256h))]
151 pub unsafe fn vsha256hq_u32(
152 hash_abcd: uint32x4_t,
153 hash_efgh: uint32x4_t,
154 wk: uint32x4_t,
155 ) -> uint32x4_t {
156 vsha256hq_u32_(hash_abcd, hash_efgh, wk)
157 }
158
159 /// SHA256 hash update accelerator, upper part.
160 #[inline]
161 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
162 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
163 #[cfg_attr(test, assert_instr(sha256h2))]
164 pub unsafe fn vsha256h2q_u32(
165 hash_efgh: uint32x4_t,
166 hash_abcd: uint32x4_t,
167 wk: uint32x4_t,
168 ) -> uint32x4_t {
169 vsha256h2q_u32_(hash_efgh, hash_abcd, wk)
170 }
171
172 /// SHA256 schedule update accelerator, first part.
173 #[inline]
174 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
175 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
176 #[cfg_attr(test, assert_instr(sha256su0))]
177 pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
178 vsha256su0q_u32_(w0_3, w4_7)
179 }
180
181 /// SHA256 schedule update accelerator, second part.
182 #[inline]
183 #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))]
184 #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))]
185 #[cfg_attr(test, assert_instr(sha256su1))]
186 pub unsafe fn vsha256su1q_u32(
187 tw0_3: uint32x4_t,
188 w8_11: uint32x4_t,
189 w12_15: uint32x4_t,
190 ) -> uint32x4_t {
191 vsha256su1q_u32_(tw0_3, w8_11, w12_15)
192 }
193
194 #[cfg(test)]
195 mod tests {
196 use super::*;
197 use crate::core_arch::{arm_shared::*, simd::*};
198 use std::mem;
199 use stdarch_test::simd_test;
200
201 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
202 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
203 unsafe fn test_vaeseq_u8() {
204 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
205 let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
206 let r: u8x16 = mem::transmute(vaeseq_u8(data, key));
207 assert_eq!(
208 r,
209 u8x16::new(
210 124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118, 124, 123, 124, 197
211 )
212 );
213 }
214
215 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
216 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
217 unsafe fn test_vaesdq_u8() {
218 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
219 let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
220 let r: u8x16 = mem::transmute(vaesdq_u8(data, key));
221 assert_eq!(
222 r,
223 u8x16::new(9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56)
224 );
225 }
226
227 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
228 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
229 unsafe fn test_vaesmcq_u8() {
230 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
231 let r: u8x16 = mem::transmute(vaesmcq_u8(data));
232 assert_eq!(
233 r,
234 u8x16::new(3, 4, 9, 10, 15, 8, 21, 30, 3, 4, 9, 10, 15, 8, 21, 30)
235 );
236 }
237
238 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
239 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))]
240 unsafe fn test_vaesimcq_u8() {
241 let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
242 let r: u8x16 = mem::transmute(vaesimcq_u8(data));
243 assert_eq!(
244 r,
245 u8x16::new(43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80, 125, 70)
246 );
247 }
248
249 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
250 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
251 unsafe fn test_vsha1h_u32() {
252 assert_eq!(vsha1h_u32(0x1234), 0x048d);
253 assert_eq!(vsha1h_u32(0x5678), 0x159e);
254 }
255
256 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
257 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
258 unsafe fn test_vsha1su0q_u32() {
259 let r: u32x4 = mem::transmute(vsha1su0q_u32(
260 mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
261 mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
262 mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
263 ));
264 assert_eq!(r, u32x4::new(0x9abc, 0xdef0, 0x1234, 0x5678));
265 }
266
267 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
268 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
269 unsafe fn test_vsha1su1q_u32() {
270 let r: u32x4 = mem::transmute(vsha1su1q_u32(
271 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
272 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
273 ));
274 assert_eq!(
275 r,
276 u32x4::new(0x00008898, 0x00019988, 0x00008898, 0x0000acd0)
277 );
278 }
279
280 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
281 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
282 unsafe fn test_vsha1cq_u32() {
283 let r: u32x4 = mem::transmute(vsha1cq_u32(
284 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
285 0x1234,
286 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
287 ));
288 assert_eq!(
289 r,
290 u32x4::new(0x8a32cbd8, 0x0c518a96, 0x0018a081, 0x0000c168)
291 );
292 }
293
294 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
295 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
296 unsafe fn test_vsha1pq_u32() {
297 let r: u32x4 = mem::transmute(vsha1pq_u32(
298 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
299 0x1234,
300 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
301 ));
302 assert_eq!(
303 r,
304 u32x4::new(0x469f0ba3, 0x0a326147, 0x80145d7f, 0x00009f47)
305 );
306 }
307
308 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
309 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
310 unsafe fn test_vsha1mq_u32() {
311 let r: u32x4 = mem::transmute(vsha1mq_u32(
312 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
313 0x1234,
314 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
315 ));
316 assert_eq!(
317 r,
318 u32x4::new(0xaa39693b, 0x0d51bf84, 0x001aa109, 0x0000d278)
319 );
320 }
321
322 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
323 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
324 unsafe fn test_vsha256hq_u32() {
325 let r: u32x4 = mem::transmute(vsha256hq_u32(
326 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
327 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
328 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
329 ));
330 assert_eq!(
331 r,
332 u32x4::new(0x05e9aaa8, 0xec5f4c02, 0x20a1ea61, 0x28738cef)
333 );
334 }
335
336 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
337 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
338 unsafe fn test_vsha256h2q_u32() {
339 let r: u32x4 = mem::transmute(vsha256h2q_u32(
340 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
341 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
342 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
343 ));
344 assert_eq!(
345 r,
346 u32x4::new(0x3745362e, 0x2fb51d00, 0xbd4c529b, 0x968b8516)
347 );
348 }
349
350 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
351 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
352 unsafe fn test_vsha256su0q_u32() {
353 let r: u32x4 = mem::transmute(vsha256su0q_u32(
354 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
355 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
356 ));
357 assert_eq!(
358 r,
359 u32x4::new(0xe59e1c97, 0x5eaf68da, 0xd7bcb51f, 0x6c8de152)
360 );
361 }
362
363 #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))]
364 #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))]
365 unsafe fn test_vsha256su1q_u32() {
366 let r: u32x4 = mem::transmute(vsha256su1q_u32(
367 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
368 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
369 mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)),
370 ));
371 assert_eq!(
372 r,
373 u32x4::new(0x5e09e8d2, 0x74a6f16b, 0xc966606b, 0xa686ee9f)
374 );
375 }
376 }