1 /*
2  *  Armv8-A Cryptographic Extension support functions for Aarch64
3  *
4  *  Copyright The Mbed TLS Contributors
5  *  SPDX-License-Identifier: Apache-2.0
6  *
7  *  Licensed under the Apache License, Version 2.0 (the "License"); you may
8  *  not use this file except in compliance with the License.
9  *  You may obtain a copy of the License at
10  *
11  *  http://www.apache.org/licenses/LICENSE-2.0
12  *
13  *  Unless required by applicable law or agreed to in writing, software
14  *  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15  *  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  *  See the License for the specific language governing permissions and
17  *  limitations under the License.
18  */
19 
20 #if defined(__aarch64__) && !defined(__ARM_FEATURE_CRYPTO) && \
21     defined(__clang__) && __clang_major__ >= 4
22 /* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
23  *
24  * The intrinsic declaration are guarded by predefined ACLE macros in clang:
25  * these are normally only enabled by the -march option on the command line.
26  * By defining the macros ourselves we gain access to those declarations without
27  * requiring -march on the command line.
28  *
29  * `arm_neon.h` could be included by any header file, so we put these defines
30  * at the top of this file, before any includes.
31  */
32 #define __ARM_FEATURE_CRYPTO 1
33 /* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
34  *
35  * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it
36  * for older compilers.
37  */
38 #define __ARM_FEATURE_AES    1
39 #define MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG
40 #endif
41 
42 #include <string.h>
43 #include "common.h"
44 
45 #if defined(MBEDTLS_AESCE_C)
46 
47 #include "aesce.h"
48 
49 #if defined(MBEDTLS_HAVE_ARM64)
50 
51 #if !defined(__ARM_FEATURE_AES) || defined(MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG)
52 #   if defined(__clang__)
53 #       if __clang_major__ < 4
54 #           error "A more recent Clang is required for MBEDTLS_AESCE_C"
55 #       endif
56 #       pragma clang attribute push (__attribute__((target("crypto"))), apply_to=function)
57 #       define MBEDTLS_POP_TARGET_PRAGMA
58 #   elif defined(__GNUC__)
59 #       if __GNUC__ < 6
60 #           error "A more recent GCC is required for MBEDTLS_AESCE_C"
61 #       endif
62 #       pragma GCC push_options
63 #       pragma GCC target ("arch=armv8-a+crypto")
64 #       define MBEDTLS_POP_TARGET_PRAGMA
65 #   else
66 #       error "Only GCC and Clang supported for MBEDTLS_AESCE_C"
67 #   endif
68 #endif /* !__ARM_FEATURE_AES || MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG */
69 
70 #include <arm_neon.h>
71 
72 #if defined(__linux__)
73 #include <asm/hwcap.h>
74 #include <sys/auxv.h>
75 #endif
76 
77 /*
78  * AES instruction support detection routine
79  */
mbedtls_aesce_has_support(void)80 int mbedtls_aesce_has_support(void)
81 {
82 #if defined(__linux__)
83     unsigned long auxval = getauxval(AT_HWCAP);
84     return (auxval & (HWCAP_ASIMD | HWCAP_AES)) ==
85            (HWCAP_ASIMD | HWCAP_AES);
86 #else
87     /* Assume AES instructions are supported. */
88     return 1;
89 #endif
90 }
91 
aesce_encrypt_block(uint8x16_t block,unsigned char * keys,int rounds)92 static uint8x16_t aesce_encrypt_block(uint8x16_t block,
93                                       unsigned char *keys,
94                                       int rounds)
95 {
96     for (int i = 0; i < rounds - 1; i++) {
97         /* AES AddRoundKey, SubBytes, ShiftRows (in this order).
98          * AddRoundKey adds the round key for the previous round. */
99         block = vaeseq_u8(block, vld1q_u8(keys + i * 16));
100         /* AES mix columns */
101         block = vaesmcq_u8(block);
102     }
103 
104     /* AES AddRoundKey for the previous round.
105      * SubBytes, ShiftRows for the final round.  */
106     block = vaeseq_u8(block, vld1q_u8(keys + (rounds -1) * 16));
107 
108     /* Final round: no MixColumns */
109 
110     /* Final AddRoundKey */
111     block = veorq_u8(block, vld1q_u8(keys + rounds  * 16));
112 
113     return block;
114 }
115 
aesce_decrypt_block(uint8x16_t block,unsigned char * keys,int rounds)116 static uint8x16_t aesce_decrypt_block(uint8x16_t block,
117                                       unsigned char *keys,
118                                       int rounds)
119 {
120 
121     for (int i = 0; i < rounds - 1; i++) {
122         /* AES AddRoundKey, SubBytes, ShiftRows */
123         block = vaesdq_u8(block, vld1q_u8(keys + i * 16));
124         /* AES inverse MixColumns for the next round.
125          *
126          * This means that we switch the order of the inverse AddRoundKey and
127          * inverse MixColumns operations. We have to do this as AddRoundKey is
128          * done in an atomic instruction together with the inverses of SubBytes
129          * and ShiftRows.
130          *
131          * It works because MixColumns is a linear operation over GF(2^8) and
132          * AddRoundKey is an exclusive or, which is equivalent to addition over
133          * GF(2^8). (The inverse of MixColumns needs to be applied to the
134          * affected round keys separately which has been done when the
135          * decryption round keys were calculated.) */
136         block = vaesimcq_u8(block);
137     }
138 
139     /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the
140      * last full round. */
141     block = vaesdq_u8(block, vld1q_u8(keys + (rounds - 1) * 16));
142 
143     /* Inverse AddRoundKey for inverting the initial round key addition. */
144     block = veorq_u8(block, vld1q_u8(keys + rounds * 16));
145 
146     return block;
147 }
148 
149 /*
150  * AES-ECB block en(de)cryption
151  */
mbedtls_aesce_crypt_ecb(mbedtls_aes_context * ctx,int mode,const unsigned char input[16],unsigned char output[16])152 int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx,
153                             int mode,
154                             const unsigned char input[16],
155                             unsigned char output[16])
156 {
157     uint8x16_t block = vld1q_u8(&input[0]);
158     unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset);
159 
160     if (mode == MBEDTLS_AES_ENCRYPT) {
161         block = aesce_encrypt_block(block, keys, ctx->nr);
162     } else {
163         block = aesce_decrypt_block(block, keys, ctx->nr);
164     }
165     vst1q_u8(&output[0], block);
166 
167     return 0;
168 }
169 
170 /*
171  * Compute decryption round keys from encryption round keys
172  */
mbedtls_aesce_inverse_key(unsigned char * invkey,const unsigned char * fwdkey,int nr)173 void mbedtls_aesce_inverse_key(unsigned char *invkey,
174                                const unsigned char *fwdkey,
175                                int nr)
176 {
177     int i, j;
178     j = nr;
179     vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16));
180     for (i = 1, j--; j > 0; i++, j--) {
181         vst1q_u8(invkey + i * 16,
182                  vaesimcq_u8(vld1q_u8(fwdkey + j * 16)));
183     }
184     vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16));
185 
186 }
187 
aes_rot_word(uint32_t word)188 static inline uint32_t aes_rot_word(uint32_t word)
189 {
190     return (word << (32 - 8)) | (word >> 8);
191 }
192 
aes_sub_word(uint32_t in)193 static inline uint32_t aes_sub_word(uint32_t in)
194 {
195     uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in));
196     uint8x16_t zero = vdupq_n_u8(0);
197 
198     /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields
199      * the correct result as ShiftRows doesn't change the first row. */
200     v = vaeseq_u8(zero, v);
201     return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0);
202 }
203 
204 /*
205  * Key expansion function
206  */
aesce_setkey_enc(unsigned char * rk,const unsigned char * key,const size_t key_bit_length)207 static void aesce_setkey_enc(unsigned char *rk,
208                              const unsigned char *key,
209                              const size_t key_bit_length)
210 {
211     static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10,
212                                     0x20, 0x40, 0x80, 0x1b, 0x36 };
213     /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf
214      *   - Section 5, Nr = Nk + 6
215      *   - Section 5.2, the length of round keys is Nb*(Nr+1)
216      */
217     const uint32_t key_len_in_words = key_bit_length / 32;  /* Nk */
218     const size_t round_key_len_in_words = 4;                /* Nb */
219     const size_t rounds_needed = key_len_in_words + 6;      /* Nr */
220     const size_t round_keys_len_in_words =
221         round_key_len_in_words * (rounds_needed + 1);       /* Nb*(Nr+1) */
222     const uint32_t *rko_end = (uint32_t *) rk + round_keys_len_in_words;
223 
224     memcpy(rk, key, key_len_in_words * 4);
225 
226     for (uint32_t *rki = (uint32_t *) rk;
227          rki + key_len_in_words < rko_end;
228          rki += key_len_in_words) {
229 
230         size_t iteration = (rki - (uint32_t *) rk) / key_len_in_words;
231         uint32_t *rko;
232         rko = rki + key_len_in_words;
233         rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1]));
234         rko[0] ^= rcon[iteration] ^ rki[0];
235         rko[1] = rko[0] ^ rki[1];
236         rko[2] = rko[1] ^ rki[2];
237         rko[3] = rko[2] ^ rki[3];
238         if (rko + key_len_in_words > rko_end) {
239             /* Do not write overflow words.*/
240             continue;
241         }
242         switch (key_bit_length) {
243             case 128:
244                 break;
245             case 192:
246                 rko[4] = rko[3] ^ rki[4];
247                 rko[5] = rko[4] ^ rki[5];
248                 break;
249             case 256:
250                 rko[4] = aes_sub_word(rko[3]) ^ rki[4];
251                 rko[5] = rko[4] ^ rki[5];
252                 rko[6] = rko[5] ^ rki[6];
253                 rko[7] = rko[6] ^ rki[7];
254                 break;
255         }
256     }
257 }
258 
259 /*
260  * Key expansion, wrapper
261  */
mbedtls_aesce_setkey_enc(unsigned char * rk,const unsigned char * key,size_t bits)262 int mbedtls_aesce_setkey_enc(unsigned char *rk,
263                              const unsigned char *key,
264                              size_t bits)
265 {
266     switch (bits) {
267         case 128:
268         case 192:
269         case 256:
270             aesce_setkey_enc(rk, key, bits);
271             break;
272         default:
273             return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
274     }
275 
276     return 0;
277 }
278 
279 #if defined(MBEDTLS_GCM_C)
280 
281 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 5
282 /* Some intrinsics are not available for GCC 5.X. */
283 #define vreinterpretq_p64_u8(a) ((poly64x2_t) a)
284 #define vreinterpretq_u8_p128(a) ((uint8x16_t) a)
vget_low_p64(poly64x2_t __a)285 static inline poly64_t vget_low_p64(poly64x2_t __a)
286 {
287     uint64x2_t tmp = (uint64x2_t) (__a);
288     uint64x1_t lo = vcreate_u64(vgetq_lane_u64(tmp, 0));
289     return (poly64_t) (lo);
290 }
291 #endif /* !__clang__ && __GNUC__ && __GNUC__ == 5*/
292 
293 /* vmull_p64/vmull_high_p64 wrappers.
294  *
295  * Older compilers miss some intrinsic functions for `poly*_t`. We use
296  * uint8x16_t and uint8x16x3_t as input/output parameters.
297  */
pmull_low(uint8x16_t a,uint8x16_t b)298 static inline uint8x16_t pmull_low(uint8x16_t a, uint8x16_t b)
299 {
300     return vreinterpretq_u8_p128(
301         vmull_p64(
302             (poly64_t) vget_low_p64(vreinterpretq_p64_u8(a)),
303             (poly64_t) vget_low_p64(vreinterpretq_p64_u8(b))));
304 }
305 
pmull_high(uint8x16_t a,uint8x16_t b)306 static inline uint8x16_t pmull_high(uint8x16_t a, uint8x16_t b)
307 {
308     return vreinterpretq_u8_p128(
309         vmull_high_p64(vreinterpretq_p64_u8(a),
310                        vreinterpretq_p64_u8(b)));
311 }
312 
313 /* GHASH does 128b polynomial multiplication on block in GF(2^128) defined by
314  * `x^128 + x^7 + x^2 + x + 1`.
315  *
316  * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b
317  * multiplies to generate a 128b.
318  *
319  * `poly_mult_128` executes polynomial multiplication and outputs 256b that
320  * represented by 3 128b due to code size optimization.
321  *
322  * Output layout:
323  * |            |             |             |
324  * |------------|-------------|-------------|
325  * | ret.val[0] | h3:h2:00:00 | high   128b |
326  * | ret.val[1] |   :m2:m1:00 | middle 128b |
327  * | ret.val[2] |   :  :l1:l0 | low    128b |
328  */
poly_mult_128(uint8x16_t a,uint8x16_t b)329 static inline uint8x16x3_t poly_mult_128(uint8x16_t a, uint8x16_t b)
330 {
331     uint8x16x3_t ret;
332     uint8x16_t h, m, l; /* retval high/middle/low */
333     uint8x16_t c, d, e;
334 
335     h = pmull_high(a, b);                       /* h3:h2:00:00 = a1*b1 */
336     l = pmull_low(a, b);                        /*   :  :l1:l0 = a0*b0 */
337     c = vextq_u8(b, b, 8);                      /*      :c1:c0 = b0:b1 */
338     d = pmull_high(a, c);                       /*   :d2:d1:00 = a1*b0 */
339     e = pmull_low(a, c);                        /*   :e2:e1:00 = a0*b1 */
340     m = veorq_u8(d, e);                         /*   :m2:m1:00 = d + e */
341 
342     ret.val[0] = h;
343     ret.val[1] = m;
344     ret.val[2] = l;
345     return ret;
346 }
347 
348 /*
349  * Modulo reduction.
350  *
351  * See: https://www.researchgate.net/publication/285612706_Implementing_GCM_on_ARMv8
352  *
353  * Section 4.3
354  *
355  * Modular reduction is slightly more complex. Write the GCM modulus as f(z) =
356  * z^128 +r(z), where r(z) = z^7+z^2+z+ 1. The well known approach is to
357  * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit
358  * operand to be reduced as a(z) = h(z)z^128 +l(z)≡h(z)r(z) + l(z). That is, we
359  * simply multiply the higher part of the operand by r(z) and add it to l(z). If
360  * the result is still larger than 128 bits, we reduce again.
361  */
poly_mult_reduce(uint8x16x3_t input)362 static inline uint8x16_t poly_mult_reduce(uint8x16x3_t input)
363 {
364     uint8x16_t const ZERO = vdupq_n_u8(0);
365     /* use 'asm' as an optimisation barrier to prevent loading MODULO from memory */
366     uint64x2_t r = vreinterpretq_u64_u8(vdupq_n_u8(0x87));
367     asm ("" : "+w" (r));
368     uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8));
369     uint8x16_t h, m, l; /* input high/middle/low 128b */
370     uint8x16_t c, d, e, f, g, n, o;
371     h = input.val[0];            /* h3:h2:00:00                          */
372     m = input.val[1];            /*   :m2:m1:00                          */
373     l = input.val[2];            /*   :  :l1:l0                          */
374     c = pmull_high(h, MODULO);   /*   :c2:c1:00 = reduction of h3        */
375     d = pmull_low(h, MODULO);    /*   :  :d1:d0 = reduction of h2        */
376     e = veorq_u8(c, m);          /*   :e2:e1:00 = m2:m1:00 + c2:c1:00    */
377     f = pmull_high(e, MODULO);   /*   :  :f1:f0 = reduction of e2        */
378     g = vextq_u8(ZERO, e, 8);    /*   :  :g1:00 = e1:00                  */
379     n = veorq_u8(d, l);          /*   :  :n1:n0 = d1:d0 + l1:l0          */
380     o = veorq_u8(n, f);          /*       o1:o0 = f1:f0 + n1:n0          */
381     return veorq_u8(o, g);       /*             = o1:o0 + g1:00          */
382 }
383 
384 /*
385  * GCM multiplication: c = a times b in GF(2^128)
386  */
mbedtls_aesce_gcm_mult(unsigned char c[16],const unsigned char a[16],const unsigned char b[16])387 void mbedtls_aesce_gcm_mult(unsigned char c[16],
388                             const unsigned char a[16],
389                             const unsigned char b[16])
390 {
391     uint8x16_t va, vb, vc;
392     va = vrbitq_u8(vld1q_u8(&a[0]));
393     vb = vrbitq_u8(vld1q_u8(&b[0]));
394     vc = vrbitq_u8(poly_mult_reduce(poly_mult_128(va, vb)));
395     vst1q_u8(&c[0], vc);
396 }
397 
398 #endif /* MBEDTLS_GCM_C */
399 
400 #if defined(MBEDTLS_POP_TARGET_PRAGMA)
401 #if defined(__clang__)
402 #pragma clang attribute pop
403 #elif defined(__GNUC__)
404 #pragma GCC pop_options
405 #endif
406 #undef MBEDTLS_POP_TARGET_PRAGMA
407 #endif
408 
409 #endif /* MBEDTLS_HAVE_ARM64 */
410 
411 #endif /* MBEDTLS_AESCE_C */
412