1 /*
2  *  Armv8-A Cryptographic Extension support functions for Aarch64
3  *
4  *  Copyright The Mbed TLS Contributors
5  *  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
6  */
7 
8 #if defined(__aarch64__) && !defined(__ARM_FEATURE_CRYPTO) && \
9     defined(__clang__) && __clang_major__ >= 4
10 /* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
11  *
12  * The intrinsic declaration are guarded by predefined ACLE macros in clang:
13  * these are normally only enabled by the -march option on the command line.
14  * By defining the macros ourselves we gain access to those declarations without
15  * requiring -march on the command line.
16  *
17  * `arm_neon.h` could be included by any header file, so we put these defines
18  * at the top of this file, before any includes.
19  */
20 #define __ARM_FEATURE_CRYPTO 1
21 /* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
22  *
23  * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it
24  * for older compilers.
25  */
26 #define __ARM_FEATURE_AES    1
27 #define MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG
28 #endif
29 
30 #include <string.h>
31 #include "common.h"
32 
33 #if defined(MBEDTLS_AESCE_C)
34 
35 #include "aesce.h"
36 
37 #if defined(MBEDTLS_ARCH_IS_ARM64)
38 
39 /* Compiler version checks. */
40 #if defined(__clang__)
41 #   if __clang_major__ < 4
42 #       error "Minimum version of Clang for MBEDTLS_AESCE_C is 4.0."
43 #   endif
44 #elif defined(__GNUC__)
45 #   if __GNUC__ < 6
46 #       error "Minimum version of GCC for MBEDTLS_AESCE_C is 6.0."
47 #   endif
48 #elif defined(_MSC_VER)
49 /* TODO: We haven't verified MSVC from 1920 to 1928. If someone verified that,
50  *       please update this and document of `MBEDTLS_AESCE_C` in
51  *       `mbedtls_config.h`. */
52 #   if _MSC_VER < 1929
53 #       error "Minimum version of MSVC for MBEDTLS_AESCE_C is 2019 version 16.11.2."
54 #   endif
55 #endif
56 
57 #ifdef __ARM_NEON
58 #include <arm_neon.h>
59 #else
60 #error "Target does not support NEON instructions"
61 #endif
62 
63 #if !(defined(__ARM_FEATURE_CRYPTO) || defined(__ARM_FEATURE_AES)) || \
64     defined(MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG)
65 #   if defined(__ARMCOMPILER_VERSION)
66 #       if __ARMCOMPILER_VERSION <= 6090000
67 #           error "Must use minimum -march=armv8-a+crypto for MBEDTLS_AESCE_C"
68 #       else
69 #           pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
70 #           define MBEDTLS_POP_TARGET_PRAGMA
71 #       endif
72 #   elif defined(__clang__)
73 #       pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
74 #       define MBEDTLS_POP_TARGET_PRAGMA
75 #   elif defined(__GNUC__)
76 #       pragma GCC push_options
77 #       pragma GCC target ("+crypto")
78 #       define MBEDTLS_POP_TARGET_PRAGMA
79 #   elif defined(_MSC_VER)
80 #       error "Required feature(__ARM_FEATURE_AES) is not enabled."
81 #   endif
82 #endif /* !(__ARM_FEATURE_CRYPTO || __ARM_FEATURE_AES) ||
83           MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG */
84 
85 #if defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
86 
87 #include <asm/hwcap.h>
88 #include <sys/auxv.h>
89 
90 signed char mbedtls_aesce_has_support_result = -1;
91 
92 #if !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
93 /*
94  * AES instruction support detection routine
95  */
mbedtls_aesce_has_support_impl(void)96 int mbedtls_aesce_has_support_impl(void)
97 {
98     /* To avoid many calls to getauxval, cache the result. This is
99      * thread-safe, because we store the result in a char so cannot
100      * be vulnerable to non-atomic updates.
101      * It is possible that we could end up setting result more than
102      * once, but that is harmless.
103      */
104     if (mbedtls_aesce_has_support_result == -1) {
105         unsigned long auxval = getauxval(AT_HWCAP);
106         if ((auxval & (HWCAP_ASIMD | HWCAP_AES)) ==
107             (HWCAP_ASIMD | HWCAP_AES)) {
108             mbedtls_aesce_has_support_result = 1;
109         } else {
110             mbedtls_aesce_has_support_result = 0;
111         }
112     }
113     return mbedtls_aesce_has_support_result;
114 }
115 #endif
116 
117 #endif /* defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY) */
118 
119 /* Single round of AESCE encryption */
120 #define AESCE_ENCRYPT_ROUND                   \
121     block = vaeseq_u8(block, vld1q_u8(keys)); \
122     block = vaesmcq_u8(block);                \
123     keys += 16
124 /* Two rounds of AESCE encryption */
125 #define AESCE_ENCRYPT_ROUND_X2        AESCE_ENCRYPT_ROUND; AESCE_ENCRYPT_ROUND
126 
127 MBEDTLS_OPTIMIZE_FOR_PERFORMANCE
aesce_encrypt_block(uint8x16_t block,unsigned char * keys,int rounds)128 static uint8x16_t aesce_encrypt_block(uint8x16_t block,
129                                       unsigned char *keys,
130                                       int rounds)
131 {
132     /* 10, 12 or 14 rounds. Unroll loop. */
133     if (rounds == 10) {
134         goto rounds_10;
135     }
136     if (rounds == 12) {
137         goto rounds_12;
138     }
139     AESCE_ENCRYPT_ROUND_X2;
140 rounds_12:
141     AESCE_ENCRYPT_ROUND_X2;
142 rounds_10:
143     AESCE_ENCRYPT_ROUND_X2;
144     AESCE_ENCRYPT_ROUND_X2;
145     AESCE_ENCRYPT_ROUND_X2;
146     AESCE_ENCRYPT_ROUND_X2;
147     AESCE_ENCRYPT_ROUND;
148 
149     /* AES AddRoundKey for the previous round.
150      * SubBytes, ShiftRows for the final round.  */
151     block = vaeseq_u8(block, vld1q_u8(keys));
152     keys += 16;
153 
154     /* Final round: no MixColumns */
155 
156     /* Final AddRoundKey */
157     block = veorq_u8(block, vld1q_u8(keys));
158 
159     return block;
160 }
161 
162 /* Single round of AESCE decryption
163  *
164  * AES AddRoundKey, SubBytes, ShiftRows
165  *
166  *      block = vaesdq_u8(block, vld1q_u8(keys));
167  *
168  * AES inverse MixColumns for the next round.
169  *
170  * This means that we switch the order of the inverse AddRoundKey and
171  * inverse MixColumns operations. We have to do this as AddRoundKey is
172  * done in an atomic instruction together with the inverses of SubBytes
173  * and ShiftRows.
174  *
175  * It works because MixColumns is a linear operation over GF(2^8) and
176  * AddRoundKey is an exclusive or, which is equivalent to addition over
177  * GF(2^8). (The inverse of MixColumns needs to be applied to the
178  * affected round keys separately which has been done when the
179  * decryption round keys were calculated.)
180  *
181  *      block = vaesimcq_u8(block);
182  */
183 #define AESCE_DECRYPT_ROUND                   \
184     block = vaesdq_u8(block, vld1q_u8(keys)); \
185     block = vaesimcq_u8(block);               \
186     keys += 16
187 /* Two rounds of AESCE decryption */
188 #define AESCE_DECRYPT_ROUND_X2        AESCE_DECRYPT_ROUND; AESCE_DECRYPT_ROUND
189 
aesce_decrypt_block(uint8x16_t block,unsigned char * keys,int rounds)190 static uint8x16_t aesce_decrypt_block(uint8x16_t block,
191                                       unsigned char *keys,
192                                       int rounds)
193 {
194     /* 10, 12 or 14 rounds. Unroll loop. */
195     if (rounds == 10) {
196         goto rounds_10;
197     }
198     if (rounds == 12) {
199         goto rounds_12;
200     }
201     AESCE_DECRYPT_ROUND_X2;
202 rounds_12:
203     AESCE_DECRYPT_ROUND_X2;
204 rounds_10:
205     AESCE_DECRYPT_ROUND_X2;
206     AESCE_DECRYPT_ROUND_X2;
207     AESCE_DECRYPT_ROUND_X2;
208     AESCE_DECRYPT_ROUND_X2;
209     AESCE_DECRYPT_ROUND;
210 
211     /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the
212      * last full round. */
213     block = vaesdq_u8(block, vld1q_u8(keys));
214     keys += 16;
215 
216     /* Inverse AddRoundKey for inverting the initial round key addition. */
217     block = veorq_u8(block, vld1q_u8(keys));
218 
219     return block;
220 }
221 
222 /*
223  * AES-ECB block en(de)cryption
224  */
mbedtls_aesce_crypt_ecb(mbedtls_aes_context * ctx,int mode,const unsigned char input[16],unsigned char output[16])225 int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx,
226                             int mode,
227                             const unsigned char input[16],
228                             unsigned char output[16])
229 {
230     uint8x16_t block = vld1q_u8(&input[0]);
231     unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset);
232 
233     if (mode == MBEDTLS_AES_ENCRYPT) {
234         block = aesce_encrypt_block(block, keys, ctx->nr);
235     } else {
236         block = aesce_decrypt_block(block, keys, ctx->nr);
237     }
238     vst1q_u8(&output[0], block);
239 
240     return 0;
241 }
242 
243 /*
244  * Compute decryption round keys from encryption round keys
245  */
mbedtls_aesce_inverse_key(unsigned char * invkey,const unsigned char * fwdkey,int nr)246 void mbedtls_aesce_inverse_key(unsigned char *invkey,
247                                const unsigned char *fwdkey,
248                                int nr)
249 {
250     int i, j;
251     j = nr;
252     vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16));
253     for (i = 1, j--; j > 0; i++, j--) {
254         vst1q_u8(invkey + i * 16,
255                  vaesimcq_u8(vld1q_u8(fwdkey + j * 16)));
256     }
257     vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16));
258 
259 }
260 
aes_rot_word(uint32_t word)261 static inline uint32_t aes_rot_word(uint32_t word)
262 {
263     return (word << (32 - 8)) | (word >> 8);
264 }
265 
aes_sub_word(uint32_t in)266 static inline uint32_t aes_sub_word(uint32_t in)
267 {
268     uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in));
269     uint8x16_t zero = vdupq_n_u8(0);
270 
271     /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields
272      * the correct result as ShiftRows doesn't change the first row. */
273     v = vaeseq_u8(zero, v);
274     return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0);
275 }
276 
277 /*
278  * Key expansion function
279  */
aesce_setkey_enc(unsigned char * rk,const unsigned char * key,const size_t key_bit_length)280 static void aesce_setkey_enc(unsigned char *rk,
281                              const unsigned char *key,
282                              const size_t key_bit_length)
283 {
284     static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10,
285                                     0x20, 0x40, 0x80, 0x1b, 0x36 };
286     /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf
287      *   - Section 5, Nr = Nk + 6
288      *   - Section 5.2, the length of round keys is Nb*(Nr+1)
289      */
290     const uint32_t key_len_in_words = key_bit_length / 32;  /* Nk */
291     const size_t round_key_len_in_words = 4;                /* Nb */
292     const size_t rounds_needed = key_len_in_words + 6;      /* Nr */
293     const size_t round_keys_len_in_words =
294         round_key_len_in_words * (rounds_needed + 1);       /* Nb*(Nr+1) */
295     const uint32_t *rko_end = (uint32_t *) rk + round_keys_len_in_words;
296 
297     memcpy(rk, key, key_len_in_words * 4);
298 
299     for (uint32_t *rki = (uint32_t *) rk;
300          rki + key_len_in_words < rko_end;
301          rki += key_len_in_words) {
302 
303         size_t iteration = (rki - (uint32_t *) rk) / key_len_in_words;
304         uint32_t *rko;
305         rko = rki + key_len_in_words;
306         rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1]));
307         rko[0] ^= rcon[iteration] ^ rki[0];
308         rko[1] = rko[0] ^ rki[1];
309         rko[2] = rko[1] ^ rki[2];
310         rko[3] = rko[2] ^ rki[3];
311         if (rko + key_len_in_words > rko_end) {
312             /* Do not write overflow words.*/
313             continue;
314         }
315 #if !defined(MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH)
316         switch (key_bit_length) {
317             case 128:
318                 break;
319             case 192:
320                 rko[4] = rko[3] ^ rki[4];
321                 rko[5] = rko[4] ^ rki[5];
322                 break;
323             case 256:
324                 rko[4] = aes_sub_word(rko[3]) ^ rki[4];
325                 rko[5] = rko[4] ^ rki[5];
326                 rko[6] = rko[5] ^ rki[6];
327                 rko[7] = rko[6] ^ rki[7];
328                 break;
329         }
330 #endif /* !MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH */
331     }
332 }
333 
334 /*
335  * Key expansion, wrapper
336  */
mbedtls_aesce_setkey_enc(unsigned char * rk,const unsigned char * key,size_t bits)337 int mbedtls_aesce_setkey_enc(unsigned char *rk,
338                              const unsigned char *key,
339                              size_t bits)
340 {
341     switch (bits) {
342         case 128:
343         case 192:
344         case 256:
345             aesce_setkey_enc(rk, key, bits);
346             break;
347         default:
348             return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
349     }
350 
351     return 0;
352 }
353 
354 #if defined(MBEDTLS_GCM_C)
355 
356 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 5
357 /* Some intrinsics are not available for GCC 5.X. */
358 #define vreinterpretq_p64_u8(a) ((poly64x2_t) a)
359 #define vreinterpretq_u8_p128(a) ((uint8x16_t) a)
vget_low_p64(poly64x2_t __a)360 static inline poly64_t vget_low_p64(poly64x2_t __a)
361 {
362     uint64x2_t tmp = (uint64x2_t) (__a);
363     uint64x1_t lo = vcreate_u64(vgetq_lane_u64(tmp, 0));
364     return (poly64_t) (lo);
365 }
366 #endif /* !__clang__ && __GNUC__ && __GNUC__ == 5*/
367 
368 /* vmull_p64/vmull_high_p64 wrappers.
369  *
370  * Older compilers miss some intrinsic functions for `poly*_t`. We use
371  * uint8x16_t and uint8x16x3_t as input/output parameters.
372  */
373 #if defined(__GNUC__) && !defined(__clang__)
374 /* GCC reports incompatible type error without cast. GCC think poly64_t and
375  * poly64x1_t are different, that is different with MSVC and Clang. */
376 #define MBEDTLS_VMULL_P64(a, b) vmull_p64((poly64_t) a, (poly64_t) b)
377 #else
378 /* MSVC reports `error C2440: 'type cast'` with cast. Clang does not report
379  * error with/without cast. And I think poly64_t and poly64x1_t are same, no
380  * cast for clang also. */
381 #define MBEDTLS_VMULL_P64(a, b) vmull_p64(a, b)
382 #endif
pmull_low(uint8x16_t a,uint8x16_t b)383 static inline uint8x16_t pmull_low(uint8x16_t a, uint8x16_t b)
384 {
385 
386     return vreinterpretq_u8_p128(
387         MBEDTLS_VMULL_P64(
388             vget_low_p64(vreinterpretq_p64_u8(a)),
389             vget_low_p64(vreinterpretq_p64_u8(b))
390             ));
391 }
392 
pmull_high(uint8x16_t a,uint8x16_t b)393 static inline uint8x16_t pmull_high(uint8x16_t a, uint8x16_t b)
394 {
395     return vreinterpretq_u8_p128(
396         vmull_high_p64(vreinterpretq_p64_u8(a),
397                        vreinterpretq_p64_u8(b)));
398 }
399 
400 /* GHASH does 128b polynomial multiplication on block in GF(2^128) defined by
401  * `x^128 + x^7 + x^2 + x + 1`.
402  *
403  * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b
404  * multiplies to generate a 128b.
405  *
406  * `poly_mult_128` executes polynomial multiplication and outputs 256b that
407  * represented by 3 128b due to code size optimization.
408  *
409  * Output layout:
410  * |            |             |             |
411  * |------------|-------------|-------------|
412  * | ret.val[0] | h3:h2:00:00 | high   128b |
413  * | ret.val[1] |   :m2:m1:00 | middle 128b |
414  * | ret.val[2] |   :  :l1:l0 | low    128b |
415  */
poly_mult_128(uint8x16_t a,uint8x16_t b)416 static inline uint8x16x3_t poly_mult_128(uint8x16_t a, uint8x16_t b)
417 {
418     uint8x16x3_t ret;
419     uint8x16_t h, m, l; /* retval high/middle/low */
420     uint8x16_t c, d, e;
421 
422     h = pmull_high(a, b);                       /* h3:h2:00:00 = a1*b1 */
423     l = pmull_low(a, b);                        /*   :  :l1:l0 = a0*b0 */
424     c = vextq_u8(b, b, 8);                      /*      :c1:c0 = b0:b1 */
425     d = pmull_high(a, c);                       /*   :d2:d1:00 = a1*b0 */
426     e = pmull_low(a, c);                        /*   :e2:e1:00 = a0*b1 */
427     m = veorq_u8(d, e);                         /*   :m2:m1:00 = d + e */
428 
429     ret.val[0] = h;
430     ret.val[1] = m;
431     ret.val[2] = l;
432     return ret;
433 }
434 
435 /*
436  * Modulo reduction.
437  *
438  * See: https://www.researchgate.net/publication/285612706_Implementing_GCM_on_ARMv8
439  *
440  * Section 4.3
441  *
442  * Modular reduction is slightly more complex. Write the GCM modulus as f(z) =
443  * z^128 +r(z), where r(z) = z^7+z^2+z+ 1. The well known approach is to
444  * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit
445  * operand to be reduced as a(z) = h(z)z^128 +l(z)≡h(z)r(z) + l(z). That is, we
446  * simply multiply the higher part of the operand by r(z) and add it to l(z). If
447  * the result is still larger than 128 bits, we reduce again.
448  */
poly_mult_reduce(uint8x16x3_t input)449 static inline uint8x16_t poly_mult_reduce(uint8x16x3_t input)
450 {
451     uint8x16_t const ZERO = vdupq_n_u8(0);
452 
453     uint64x2_t r = vreinterpretq_u64_u8(vdupq_n_u8(0x87));
454 #if defined(__GNUC__)
455     /* use 'asm' as an optimisation barrier to prevent loading MODULO from
456      * memory. It is for GNUC compatible compilers.
457      */
458     asm ("" : "+w" (r));
459 #endif
460     uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8));
461     uint8x16_t h, m, l; /* input high/middle/low 128b */
462     uint8x16_t c, d, e, f, g, n, o;
463     h = input.val[0];            /* h3:h2:00:00                          */
464     m = input.val[1];            /*   :m2:m1:00                          */
465     l = input.val[2];            /*   :  :l1:l0                          */
466     c = pmull_high(h, MODULO);   /*   :c2:c1:00 = reduction of h3        */
467     d = pmull_low(h, MODULO);    /*   :  :d1:d0 = reduction of h2        */
468     e = veorq_u8(c, m);          /*   :e2:e1:00 = m2:m1:00 + c2:c1:00    */
469     f = pmull_high(e, MODULO);   /*   :  :f1:f0 = reduction of e2        */
470     g = vextq_u8(ZERO, e, 8);    /*   :  :g1:00 = e1:00                  */
471     n = veorq_u8(d, l);          /*   :  :n1:n0 = d1:d0 + l1:l0          */
472     o = veorq_u8(n, f);          /*       o1:o0 = f1:f0 + n1:n0          */
473     return veorq_u8(o, g);       /*             = o1:o0 + g1:00          */
474 }
475 
476 /*
477  * GCM multiplication: c = a times b in GF(2^128)
478  */
mbedtls_aesce_gcm_mult(unsigned char c[16],const unsigned char a[16],const unsigned char b[16])479 void mbedtls_aesce_gcm_mult(unsigned char c[16],
480                             const unsigned char a[16],
481                             const unsigned char b[16])
482 {
483     uint8x16_t va, vb, vc;
484     va = vrbitq_u8(vld1q_u8(&a[0]));
485     vb = vrbitq_u8(vld1q_u8(&b[0]));
486     vc = vrbitq_u8(poly_mult_reduce(poly_mult_128(va, vb)));
487     vst1q_u8(&c[0], vc);
488 }
489 
490 #endif /* MBEDTLS_GCM_C */
491 
492 #if defined(MBEDTLS_POP_TARGET_PRAGMA)
493 #if defined(__clang__)
494 #pragma clang attribute pop
495 #elif defined(__GNUC__)
496 #pragma GCC pop_options
497 #endif
498 #undef MBEDTLS_POP_TARGET_PRAGMA
499 #endif
500 
501 #endif /* MBEDTLS_ARCH_IS_ARM64 */
502 
503 #endif /* MBEDTLS_AESCE_C */
504