1 /*
2 * GCM block cipher, ESP DMA hardware accelerated version
3 * Based on mbedTLS FIPS-197 compliant version.
4 *
5 * SPDX-FileCopyrightText: The Mbed TLS Contributors
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 *
9 * SPDX-FileContributor: 2016-2024 Espressif Systems (Shanghai) CO LTD
10 */
11 /*
12 * The AES block cipher was designed by Vincent Rijmen and Joan Daemen.
13 *
14 * http://csrc.nist.gov/encryption/aes/rijndael/Rijndael.pdf
15 * http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
16 */
17 #include <string.h>
18
19 #include "aes/esp_aes.h"
20 #include "aes/esp_aes_gcm.h"
21 #include "aes/esp_aes_internal.h"
22 #include "hal/aes_hal.h"
23
24 #include "mbedtls/aes.h"
25 #include "mbedtls/error.h"
26 #include "mbedtls/gcm.h"
27
28 #include "esp_heap_caps.h"
29 #include "esp_log.h"
30 #include "soc/soc_caps.h"
31 #include "soc/soc_memory_layout.h"
32
33 #include "sdkconfig.h"
34
35 #if SOC_AES_SUPPORT_DMA
36 #include "esp_aes_dma_priv.h"
37 #endif
38
39 #define ESP_PUT_BE64(a, val) \
40 do { \
41 *(uint64_t*)(a) = __builtin_bswap64( (uint64_t)(val) ); \
42 } while (0)
43
44 /* For simplicity limit the maxium amount of aad bytes to a single DMA descriptor
45 This should cover all normal, e.g. mbedtls, use cases */
46 #define ESP_AES_GCM_AAD_MAX_BYTES 4080
47
48 static const char *TAG = "esp-aes-gcm";
49
50 static void esp_gcm_ghash(esp_gcm_context *ctx, const unsigned char *x, size_t x_len, uint8_t *z);
51
52 /*
53 * Calculates the Initial Counter Block, J0
54 * and copies to to the esp_gcm_context
55 */
esp_gcm_derive_J0(esp_gcm_context * ctx)56 static void esp_gcm_derive_J0(esp_gcm_context *ctx)
57 {
58 uint8_t len_buf[16];
59
60 memset(ctx->J0, 0, AES_BLOCK_BYTES);
61 memset(len_buf, 0, AES_BLOCK_BYTES);
62
63 /* If IV is 96 bits J0 = ( IV || 0^31 || 1 ) */
64 if (ctx->iv_len == 12) {
65 memcpy(ctx->J0, ctx->iv, ctx->iv_len);
66 ctx->J0[AES_BLOCK_BYTES - 1] |= 1;
67 } else {
68 /* For IV != 96 bit, J0 = GHASH(IV || 0[s+64] || [len(IV)]64) */
69 /* First calculate GHASH on IV */
70 esp_gcm_ghash(ctx, ctx->iv, ctx->iv_len, ctx->J0);
71 /* Next create 128 bit block which is equal to
72 64 bit 0 + iv length truncated to 64 bits */
73 ESP_PUT_BE64(len_buf + 8, ctx->iv_len * 8);
74 /* Calculate GHASH on last block */
75 esp_gcm_ghash(ctx, len_buf, 16, ctx->J0);
76
77
78 }
79 }
80
81
82 /*
83 * Increment J0 as per GCM spec, by applying the Standard Incrementing
84 Function INC_32 to it.
85 * j is the counter which needs to be incremented which is
86 * copied to ctx->J0 after incrementing
87 */
increment32_j0(esp_gcm_context * ctx,uint8_t * j)88 static void increment32_j0(esp_gcm_context *ctx, uint8_t *j)
89 {
90 uint8_t j_len = AES_BLOCK_BYTES;
91 memcpy(j, ctx->J0, AES_BLOCK_BYTES);
92 if (j) {
93 for (uint32_t i = j_len; i > (j_len - 4); i--) {
94 if (++j[i - 1] != 0) {
95 break;
96 }
97 }
98 memcpy(ctx->J0, j, AES_BLOCK_BYTES);
99 }
100 }
101
102 /* Function to xor two data blocks */
xor_data(uint8_t * d,const uint8_t * s)103 static void xor_data(uint8_t *d, const uint8_t *s)
104 {
105 for (int i = 0; i < AES_BLOCK_BYTES; i++) {
106 d[i] ^= s[i];
107 }
108 }
109
110
111 /*
112 * 32-bit integer manipulation macros (big endian)
113 */
114 #ifndef GET_UINT32_BE
115 #define GET_UINT32_BE(n,b,i) \
116 { \
117 (n) = ( (uint32_t) (b)[(i) ] << 24 ) \
118 | ( (uint32_t) (b)[(i) + 1] << 16 ) \
119 | ( (uint32_t) (b)[(i) + 2] << 8 ) \
120 | ( (uint32_t) (b)[(i) + 3] ); \
121 }
122 #endif
123
124 #ifndef PUT_UINT32_BE
125 #define PUT_UINT32_BE(n,b,i) \
126 { \
127 (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
128 (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
129 (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
130 (b)[(i) + 3] = (unsigned char) ( (n) ); \
131 }
132 #endif
133
134 /* Based on MbedTLS's implemenation
135 *
136 * Precompute small multiples of H, that is set
137 * HH[i] || HL[i] = H times i,
138 * where i is seen as a field element as in [MGV], ie high-order bits
139 * correspond to low powers of P. The result is stored in the same way, that
140 * is the high-order bit of HH corresponds to P^0 and the low-order bit of HL
141 * corresponds to P^127.
142 */
gcm_gen_table(esp_gcm_context * ctx)143 static int gcm_gen_table( esp_gcm_context *ctx )
144 {
145 int i, j;
146 uint64_t hi, lo;
147 uint64_t vl, vh;
148 unsigned char *h;
149
150 h = ctx->H;
151
152 /* pack h as two 64-bits ints, big-endian */
153 GET_UINT32_BE( hi, h, 0 );
154 GET_UINT32_BE( lo, h, 4 );
155 vh = (uint64_t) hi << 32 | lo;
156
157 GET_UINT32_BE( hi, h, 8 );
158 GET_UINT32_BE( lo, h, 12 );
159 vl = (uint64_t) hi << 32 | lo;
160
161 /* 8 = 1000 corresponds to 1 in GF(2^128) */
162 ctx->HL[8] = vl;
163 ctx->HH[8] = vh;
164
165 /* 0 corresponds to 0 in GF(2^128) */
166 ctx->HH[0] = 0;
167 ctx->HL[0] = 0;
168
169 for ( i = 4; i > 0; i >>= 1 ) {
170 uint32_t T = ( vl & 1 ) * 0xe1000000U;
171 vl = ( vh << 63 ) | ( vl >> 1 );
172 vh = ( vh >> 1 ) ^ ( (uint64_t) T << 32);
173
174 ctx->HL[i] = vl;
175 ctx->HH[i] = vh;
176 }
177
178 for ( i = 2; i <= 8; i *= 2 ) {
179 uint64_t *HiL = ctx->HL + i, *HiH = ctx->HH + i;
180 vh = *HiH;
181 vl = *HiL;
182 for ( j = 1; j < i; j++ ) {
183 HiH[j] = vh ^ ctx->HH[j];
184 HiL[j] = vl ^ ctx->HL[j];
185 }
186 }
187
188 return ( 0 );
189 }
190 /*
191 * Shoup's method for multiplication use this table with
192 * last4[x] = x times P^128
193 * where x and last4[x] are seen as elements of GF(2^128) as in [MGV]
194 */
195 static const uint32_t last4[16] = {
196 0x00000000, 0x1c200000, 0x38400000, 0x24600000,
197 0x70800000, 0x6ca00000, 0x48c00000, 0x54e00000,
198 0xe1000000, 0xfd200000, 0xd9400000, 0xc5600000,
199 0x91800000, 0x8da00000, 0xa9c00000, 0xb5e00000
200 };
201 /* Based on MbedTLS's implemenation
202 *
203 * Sets output to x times H using the precomputed tables.
204 * x and output are seen as elements of GF(2^128) as in [MGV].
205 */
gcm_mult(esp_gcm_context * ctx,const unsigned char x[16],unsigned char output[16])206 static void gcm_mult( esp_gcm_context *ctx, const unsigned char x[16],
207 unsigned char output[16] )
208 {
209 int i = 0;
210 unsigned char lo, hi, rem;
211 uint64_t zh, zl;
212
213 lo = x[15] & 0xf;
214 hi = x[15] >> 4;
215
216 zh = ctx->HH[lo];
217 zl = ctx->HL[lo];
218
219 rem = (unsigned char) zl & 0xf;
220 zl = ( zh << 60 ) | ( zl >> 4 );
221 zh = ( zh >> 4 );
222 zh ^= (uint64_t) last4[rem] << 32;
223 zh ^= ctx->HH[hi];
224 zl ^= ctx->HL[hi];
225
226 for ( i = 14; i >= 0; i-- ) {
227 lo = x[i] & 0xf;
228 hi = x[i] >> 4;
229
230 rem = (unsigned char) zl & 0xf;
231 zl = ( zh << 60 ) | ( zl >> 4 );
232 zh = ( zh >> 4 );
233 zh ^= (uint64_t) last4[rem] << 32;
234 zh ^= ctx->HH[lo];
235 zl ^= ctx->HL[lo];
236
237 rem = (unsigned char) zl & 0xf;
238 zl = ( zh << 60 ) | ( zl >> 4 );
239 zh = ( zh >> 4 );
240 zh ^= (uint64_t) last4[rem] << 32;
241 zh ^= ctx->HH[hi];
242 zl ^= ctx->HL[hi];
243 }
244
245 PUT_UINT32_BE( zh >> 32, output, 0 );
246 PUT_UINT32_BE( zh, output, 4 );
247 PUT_UINT32_BE( zl >> 32, output, 8 );
248 PUT_UINT32_BE( zl, output, 12 );
249 }
250
251
252
253 /* Update the key value in gcm context */
esp_aes_gcm_setkey(esp_gcm_context * ctx,mbedtls_cipher_id_t cipher,const unsigned char * key,unsigned int keybits)254 int esp_aes_gcm_setkey( esp_gcm_context *ctx,
255 mbedtls_cipher_id_t cipher,
256 const unsigned char *key,
257 unsigned int keybits )
258 {
259 /* Fallback to software implementation of GCM operation when a non-AES
260 * cipher is selected, as we support hardware acceleration only for a
261 * GCM operation using AES cipher.
262 */
263 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
264 if (ctx->ctx_soft != NULL) {
265 mbedtls_gcm_free_soft(ctx->ctx_soft);
266 free(ctx->ctx_soft);
267 ctx->ctx_soft = NULL;
268 }
269
270 if (cipher != MBEDTLS_CIPHER_ID_AES) {
271 ctx->ctx_soft = (mbedtls_gcm_context_soft*) malloc(sizeof(mbedtls_gcm_context_soft));
272 if (ctx->ctx_soft == NULL) {
273 return MBEDTLS_ERR_CIPHER_ALLOC_FAILED;
274 }
275 mbedtls_gcm_init_soft(ctx->ctx_soft);
276 return mbedtls_gcm_setkey_soft(ctx->ctx_soft, cipher, key, keybits);
277 }
278 #endif
279
280 #if !SOC_AES_SUPPORT_AES_192
281 if (keybits == 192) {
282 return MBEDTLS_ERR_PLATFORM_FEATURE_UNSUPPORTED;
283 }
284 #endif
285 if (keybits != 128 && keybits != 192 && keybits != 256) {
286 return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
287 }
288
289
290 ctx->aes_ctx.key_bytes = keybits / 8;
291
292 memcpy(ctx->aes_ctx.key, key, ctx->aes_ctx.key_bytes);
293
294 return ( 0 );
295 }
296
297
298 /* AES-GCM GHASH calculation z = GHASH(x) using h0 hash key
299 */
esp_gcm_ghash(esp_gcm_context * ctx,const unsigned char * x,size_t x_len,uint8_t * z)300 static void esp_gcm_ghash(esp_gcm_context *ctx, const unsigned char *x, size_t x_len, uint8_t *z)
301 {
302
303 uint8_t tmp[AES_BLOCK_BYTES];
304
305 memset(tmp, 0, AES_BLOCK_BYTES);
306 /* GHASH(X) is calculated on input string which is multiple of 128 bits
307 * If input string bit length is not multiple of 128 bits it needs to
308 * be padded by 0
309 *
310 * Steps:
311 * 1. Let X1, X2, ... , Xm-1, Xm denote the unique sequence of blocks such
312 * that X = X1 || X2 || ... || Xm-1 || Xm.
313 * 2. Let Y0 be the “zero block,” 0128.
314 * 3. Fori=1,...,m,letYi =(Yi-1 ^ Xi)•H.
315 * 4. Return Ym
316 */
317
318 /* If input bit string is >= 128 bits, process full 128 bit blocks */
319 while (x_len >= AES_BLOCK_BYTES) {
320
321 xor_data(z, x);
322 gcm_mult(ctx, z, z);
323
324 x += AES_BLOCK_BYTES;
325 x_len -= AES_BLOCK_BYTES;
326 }
327
328 /* If input bit string is not multiple of 128 create last 128 bit
329 * block by padding necessary 0s
330 */
331 if (x_len) {
332 memcpy(tmp, x, x_len);
333 xor_data(z, tmp);
334 gcm_mult(ctx, z, z);
335 }
336 }
337
338
339 /* Function to init AES GCM context to zero */
esp_aes_gcm_init(esp_gcm_context * ctx)340 void esp_aes_gcm_init( esp_gcm_context *ctx)
341 {
342 if (ctx == NULL) {
343 return;
344 }
345
346 bzero(ctx, sizeof(esp_gcm_context));
347
348 #if SOC_AES_SUPPORT_DMA && CONFIG_MBEDTLS_AES_USE_INTERRUPT
349 esp_aes_intr_alloc();
350 #endif
351
352 ctx->gcm_state = ESP_AES_GCM_STATE_INIT;
353 }
354
355 /* Function to clear AES-GCM context */
esp_aes_gcm_free(esp_gcm_context * ctx)356 void esp_aes_gcm_free( esp_gcm_context *ctx)
357 {
358 if (ctx == NULL) {
359 return;
360 }
361 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
362 if (ctx->ctx_soft != NULL) {
363 mbedtls_gcm_free_soft(ctx->ctx_soft);
364 free(ctx->ctx_soft);
365 /* Note that the value of ctx->ctx_soft should be NULL'ed out
366 and here it is taken care by the bzero call below */
367 }
368 #endif
369 bzero(ctx, sizeof(esp_gcm_context));
370 }
371
372 /* Setup AES-GCM */
esp_aes_gcm_starts(esp_gcm_context * ctx,int mode,const unsigned char * iv,size_t iv_len)373 int esp_aes_gcm_starts( esp_gcm_context *ctx,
374 int mode,
375 const unsigned char *iv,
376 size_t iv_len )
377 {
378 if (!ctx) {
379 ESP_LOGE(TAG, "No AES context supplied");
380 return MBEDTLS_ERR_GCM_BAD_INPUT;
381 }
382
383 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
384 if (ctx->ctx_soft != NULL) {
385 return mbedtls_gcm_starts_soft(ctx->ctx_soft, mode, iv, iv_len);
386 }
387 #endif
388
389 /* IV is limited to 2^32 bits, so 2^29 bytes */
390 /* IV is not allowed to be zero length */
391 if ( iv_len == 0 ||
392 ( (uint32_t) iv_len ) >> 29 != 0 ) {
393 return ( MBEDTLS_ERR_GCM_BAD_INPUT );
394 }
395
396 if (!iv) {
397 ESP_LOGE(TAG, "No IV supplied");
398 return MBEDTLS_ERR_GCM_BAD_INPUT;
399 }
400
401 /* Initialize AES-GCM context */
402 memset(ctx->ghash, 0, sizeof(ctx->ghash));
403 ctx->data_len = 0;
404 ctx->aad = NULL;
405 ctx->aad_len = 0;
406
407 ctx->iv = iv;
408 ctx->iv_len = iv_len;
409 ctx->mode = mode;
410
411 /* H and the lookup table are only generated once per ctx */
412 if (ctx->gcm_state == ESP_AES_GCM_STATE_INIT) {
413 /* Lock the AES engine to calculate ghash key H in hardware */
414 #if CONFIG_MBEDTLS_HARDWARE_GCM
415 esp_aes_acquire_hardware();
416 ctx->aes_ctx.key_in_hardware = aes_hal_setkey(ctx->aes_ctx.key, ctx->aes_ctx.key_bytes, mode);
417 aes_hal_mode_init(ESP_AES_BLOCK_MODE_GCM);
418
419 aes_hal_gcm_calc_hash(ctx->H);
420
421 esp_aes_release_hardware();
422 #else
423 memset(ctx->H, 0, sizeof(ctx->H));
424 int ret = esp_aes_crypt_ecb(&ctx->aes_ctx, MBEDTLS_AES_ENCRYPT, ctx->H, ctx->H);
425 if (ret != 0) {
426 return ret;
427 }
428 #endif
429 gcm_gen_table(ctx);
430 }
431
432 /* Once H is obtained we need to derive J0 (Initial Counter Block) */
433 esp_gcm_derive_J0(ctx);
434
435 /* The initial counter block keeps updating during the esp_gcm_update call
436 * however to calculate final authentication tag T we need original J0
437 * so we make a copy here
438 */
439 memcpy(ctx->ori_j0, ctx->J0, 16);
440
441 ctx->gcm_state = ESP_AES_GCM_STATE_START;
442
443 return ( 0 );
444 }
445
esp_aes_gcm_update_ad(esp_gcm_context * ctx,const unsigned char * aad,size_t aad_len)446 int esp_aes_gcm_update_ad( esp_gcm_context *ctx,
447 const unsigned char *aad,
448 size_t aad_len )
449 {
450 if (!ctx) {
451 ESP_LOGE(TAG, "No AES context supplied");
452 return MBEDTLS_ERR_GCM_BAD_INPUT;
453 }
454
455 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
456 if (ctx->ctx_soft != NULL) {
457 return mbedtls_gcm_update_ad_soft(ctx->ctx_soft, aad, aad_len);
458 }
459 #endif
460
461 /* AD are limited to 2^32 bits, so 2^29 bytes */
462 if ( ( (uint32_t) aad_len ) >> 29 != 0 ) {
463 return ( MBEDTLS_ERR_GCM_BAD_INPUT );
464 }
465
466 if ( (aad_len > 0) && !aad) {
467 ESP_LOGE(TAG, "No aad supplied");
468 return MBEDTLS_ERR_GCM_BAD_INPUT;
469 }
470
471 if (ctx->gcm_state != ESP_AES_GCM_STATE_START) {
472 ESP_LOGE(TAG, "AES context in invalid state!");
473 return -1;
474 }
475
476 /* Initialise associated data */
477 ctx->aad = aad;
478 ctx->aad_len = aad_len;
479
480 esp_gcm_ghash(ctx, ctx->aad, ctx->aad_len, ctx->ghash);
481
482 return ( 0 );
483 }
484
485 /* Perform AES-GCM operation */
esp_aes_gcm_update(esp_gcm_context * ctx,const unsigned char * input,size_t input_length,unsigned char * output,size_t output_size,size_t * output_length)486 int esp_aes_gcm_update( esp_gcm_context *ctx,
487 const unsigned char *input, size_t input_length,
488 unsigned char *output, size_t output_size,
489 size_t *output_length )
490 {
491 if (!ctx) {
492 ESP_LOGE(TAG, "No GCM context supplied");
493 return MBEDTLS_ERR_GCM_BAD_INPUT;
494 }
495
496 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
497 if (ctx->ctx_soft != NULL) {
498 return mbedtls_gcm_update_soft(ctx->ctx_soft, input, input_length, output, output_size, output_length);
499 }
500 #endif
501
502 size_t nc_off = 0;
503 uint8_t nonce_counter[AES_BLOCK_BYTES] = {0};
504 uint8_t stream[AES_BLOCK_BYTES] = {0};
505
506 if (!output_length) {
507 ESP_LOGE(TAG, "No output length supplied");
508 return MBEDTLS_ERR_GCM_BAD_INPUT;
509 }
510 *output_length = input_length;
511
512 if (!input) {
513 ESP_LOGE(TAG, "No input supplied");
514 return MBEDTLS_ERR_GCM_BAD_INPUT;
515 }
516 if (!output) {
517 ESP_LOGE(TAG, "No output supplied");
518 return MBEDTLS_ERR_GCM_BAD_INPUT;
519 }
520
521 if ( output > input && (size_t) ( output - input ) < input_length ) {
522 return ( MBEDTLS_ERR_GCM_BAD_INPUT );
523 }
524 /* If this is the first time esp_gcm_update is getting called
525 * calculate GHASH on aad and preincrement the ICB
526 */
527 if (ctx->gcm_state == ESP_AES_GCM_STATE_START) {
528 /* Jo needs to be incremented first time, later the CTR
529 * operation will auto update it
530 */
531 increment32_j0(ctx, nonce_counter);
532 ctx->gcm_state = ESP_AES_GCM_STATE_UPDATE;
533 } else if (ctx->gcm_state == ESP_AES_GCM_STATE_UPDATE) {
534 memcpy(nonce_counter, ctx->J0, AES_BLOCK_BYTES);
535 }
536
537 /* Perform intermediate GHASH on "encrypted" data during decryption */
538 if (ctx->mode == ESP_AES_DECRYPT) {
539 esp_gcm_ghash(ctx, input, input_length, ctx->ghash);
540 }
541
542 /* Output = GCTR(J0, Input): Encrypt/Decrypt the input */
543 int ret = esp_aes_crypt_ctr(&ctx->aes_ctx, input_length, &nc_off, nonce_counter, stream, input, output);
544 if (ret != 0) {
545 return ret;
546 }
547
548 /* ICB gets auto incremented after GCTR operation here so update the context */
549 memcpy(ctx->J0, nonce_counter, AES_BLOCK_BYTES);
550
551 /* Keep updating the length counter for final tag calculation */
552 ctx->data_len += input_length;
553
554 /* Perform intermediate GHASH on "encrypted" data during encryption*/
555 if (ctx->mode == ESP_AES_ENCRYPT) {
556 esp_gcm_ghash(ctx, output, input_length, ctx->ghash);
557 }
558
559 return 0;
560 }
561
562 /* Function to read the tag value */
esp_aes_gcm_finish(esp_gcm_context * ctx,unsigned char * output,size_t output_size,size_t * output_length,unsigned char * tag,size_t tag_len)563 int esp_aes_gcm_finish( esp_gcm_context *ctx,
564 unsigned char *output, size_t output_size,
565 size_t *output_length,
566 unsigned char *tag, size_t tag_len )
567 {
568 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
569 if (ctx->ctx_soft != NULL) {
570 return mbedtls_gcm_finish_soft(ctx->ctx_soft, output, output_size, output_length, tag, tag_len);
571 }
572 #endif
573 size_t nc_off = 0;
574 uint8_t len_block[AES_BLOCK_BYTES] = {0};
575 uint8_t stream[AES_BLOCK_BYTES] = {0};
576
577 if ( tag_len > 16 || tag_len < 4 ) {
578 return ( MBEDTLS_ERR_GCM_BAD_INPUT );
579 }
580
581 /* Calculate final GHASH on aad_len, data length */
582 ESP_PUT_BE64(len_block, ctx->aad_len * 8);
583 ESP_PUT_BE64(len_block + 8, ctx->data_len * 8);
584 esp_gcm_ghash(ctx, len_block, AES_BLOCK_BYTES, ctx->ghash);
585
586 /* Tag T = GCTR(J0, ) where T is truncated to tag_len */
587 return esp_aes_crypt_ctr(&ctx->aes_ctx, tag_len, &nc_off, ctx->ori_j0, stream, ctx->ghash, tag);
588 }
589
590 #if CONFIG_MBEDTLS_HARDWARE_GCM
591 /* Due to restrictions in the hardware (e.g. need to do the whole conversion in one go),
592 some combinations of inputs are not supported */
esp_aes_gcm_input_support_hw_accel(size_t length,const unsigned char * aad,size_t aad_len,const unsigned char * input,unsigned char * output,uint8_t * stream_in)593 static bool esp_aes_gcm_input_support_hw_accel(size_t length, const unsigned char *aad, size_t aad_len,
594 const unsigned char *input, unsigned char *output, uint8_t *stream_in)
595 {
596 bool support_hw_accel = true;
597
598 if (aad_len > ESP_AES_GCM_AAD_MAX_BYTES) {
599 support_hw_accel = false;
600 } else if (!esp_ptr_dma_capable(aad) && aad_len > 0) {
601 /* aad in non internal DMA memory */
602 support_hw_accel = false;
603 } else if (!esp_ptr_dma_capable(input) && length > 0) {
604 /* input in non internal DMA memory */
605 support_hw_accel = false;
606 } else if (!esp_ptr_dma_capable(output) && length > 0) {
607 /* output in non internal DMA memory */
608 support_hw_accel = false;
609 } else if (!esp_ptr_dma_capable(stream_in)) {
610 /* Stream in (and therefor other descriptors and buffers that come from the stack)
611 in non internal DMA memory */
612 support_hw_accel = false;
613 } else if (length == 0) {
614 support_hw_accel = false;
615 }
616
617
618 return support_hw_accel;
619 }
620 #endif
621
esp_aes_gcm_crypt_and_tag_partial_hw(esp_gcm_context * ctx,int mode,size_t length,const unsigned char * iv,size_t iv_len,const unsigned char * aad,size_t aad_len,const unsigned char * input,unsigned char * output,size_t tag_len,unsigned char * tag)622 static int esp_aes_gcm_crypt_and_tag_partial_hw( esp_gcm_context *ctx,
623 int mode,
624 size_t length,
625 const unsigned char *iv,
626 size_t iv_len,
627 const unsigned char *aad,
628 size_t aad_len,
629 const unsigned char *input,
630 unsigned char *output,
631 size_t tag_len,
632 unsigned char *tag )
633 {
634 int ret = 0;
635 size_t olen;
636
637 if ( ( ret = esp_aes_gcm_starts( ctx, mode, iv, iv_len ) ) != 0 ) {
638 return ( ret );
639 }
640
641 if ( ( ret = esp_aes_gcm_update_ad( ctx, aad, aad_len ) ) != 0 ) {
642 return ( ret );
643 }
644
645 if ( ( ret = esp_aes_gcm_update( ctx, input, length, output, 0, &olen ) ) != 0 ) {
646 return ( ret );
647 }
648
649 if ( ( ret = esp_aes_gcm_finish( ctx, output, 0, &olen, tag, tag_len ) ) != 0 ) {
650 return ( ret );
651 }
652
653 return ret;
654 }
655
esp_aes_gcm_crypt_and_tag(esp_gcm_context * ctx,int mode,size_t length,const unsigned char * iv,size_t iv_len,const unsigned char * aad,size_t aad_len,const unsigned char * input,unsigned char * output,size_t tag_len,unsigned char * tag)656 int esp_aes_gcm_crypt_and_tag( esp_gcm_context *ctx,
657 int mode,
658 size_t length,
659 const unsigned char *iv,
660 size_t iv_len,
661 const unsigned char *aad,
662 size_t aad_len,
663 const unsigned char *input,
664 unsigned char *output,
665 size_t tag_len,
666 unsigned char *tag )
667 {
668 if (!ctx) {
669 ESP_LOGE(TAG, "No AES context supplied");
670 return MBEDTLS_ERR_GCM_BAD_INPUT;
671 }
672
673 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
674 if (ctx->ctx_soft != NULL) {
675 return mbedtls_gcm_crypt_and_tag_soft(ctx->ctx_soft, mode, length, iv, iv_len, aad, aad_len, input, output, tag_len, tag);
676 }
677 #endif
678 #if CONFIG_MBEDTLS_HARDWARE_GCM
679 int ret;
680 lldesc_t aad_desc[2] = {};
681 lldesc_t *aad_head_desc = NULL;
682 size_t remainder_bit;
683 uint8_t stream_in[AES_BLOCK_BYTES] = {};
684 unsigned stream_bytes = aad_len % AES_BLOCK_BYTES; // bytes which aren't in a full block
685 unsigned block_bytes = aad_len - stream_bytes; // bytes which are in a full block
686
687 /* Due to hardware limition only certain cases are fully supported in HW */
688 if (!esp_aes_gcm_input_support_hw_accel(length, aad, aad_len, input, output, stream_in)) {
689 return esp_aes_gcm_crypt_and_tag_partial_hw(ctx, mode, length, iv, iv_len, aad, aad_len, input, output, tag_len, tag);
690 }
691
692 /* Limit aad len to a single DMA descriptor to simplify DMA handling
693 In practice, e.g. with mbedtls the length of aad will always be short
694 */
695 if (aad_len > LLDESC_MAX_NUM_PER_DESC) {
696 return MBEDTLS_ERR_GCM_BAD_INPUT;
697 }
698 /* IV and AD are limited to 2^32 bits, so 2^29 bytes */
699 /* IV is not allowed to be zero length */
700 if ( iv_len == 0 ||
701 ( (uint32_t) iv_len ) >> 29 != 0 ||
702 ( (uint32_t) aad_len ) >> 29 != 0 ) {
703 return ( MBEDTLS_ERR_GCM_BAD_INPUT );
704 }
705
706 if (!iv) {
707 ESP_LOGE(TAG, "No IV supplied");
708 return MBEDTLS_ERR_GCM_BAD_INPUT;
709 }
710
711 if ( (aad_len > 0) && !aad) {
712 ESP_LOGE(TAG, "No aad supplied");
713 return MBEDTLS_ERR_GCM_BAD_INPUT;
714 }
715
716 /* Initialize AES-GCM context */
717 memset(ctx->ghash, 0, sizeof(ctx->ghash));
718 ctx->data_len = 0;
719
720 ctx->iv = iv;
721 ctx->iv_len = iv_len;
722 ctx->aad = aad;
723 ctx->aad_len = aad_len;
724 ctx->mode = mode;
725
726 esp_aes_acquire_hardware();
727 ctx->aes_ctx.key_in_hardware = 0;
728 ctx->aes_ctx.key_in_hardware = aes_hal_setkey(ctx->aes_ctx.key, ctx->aes_ctx.key_bytes, mode);
729
730 if (block_bytes > 0) {
731 aad_desc[0].length = block_bytes;
732 aad_desc[0].size = block_bytes;
733 aad_desc[0].owner = 1;
734 aad_desc[0].buf = aad;
735 }
736
737 if (stream_bytes > 0) {
738 memcpy(stream_in, aad + block_bytes, stream_bytes);
739
740 aad_desc[0].empty = (uint32_t)&aad_desc[1];
741 aad_desc[1].length = AES_BLOCK_BYTES;
742 aad_desc[1].size = AES_BLOCK_BYTES;
743 aad_desc[1].owner = 1;
744 aad_desc[1].buf = stream_in;
745 }
746
747 if (block_bytes > 0) {
748 aad_head_desc = &aad_desc[0];
749 } else if (stream_bytes > 0) {
750 aad_head_desc = &aad_desc[1];
751 }
752
753 aes_hal_mode_init(ESP_AES_BLOCK_MODE_GCM);
754
755 /* See TRM GCM chapter for description of this calculation */
756 remainder_bit = (8 * length) % 128;
757 aes_hal_gcm_init( (aad_len + AES_BLOCK_BYTES - 1) / AES_BLOCK_BYTES, remainder_bit);
758 aes_hal_gcm_calc_hash(ctx->H);
759
760 gcm_gen_table(ctx);
761 esp_gcm_derive_J0(ctx);
762
763 aes_hal_gcm_set_j0(ctx->J0);
764
765 ret = esp_aes_process_dma_gcm(&ctx->aes_ctx, input, output, length, aad_head_desc, aad_len);
766 if (ret != 0) {
767 esp_aes_release_hardware();
768 return ret;
769 }
770
771 aes_hal_gcm_read_tag(tag, tag_len);
772
773 esp_aes_release_hardware();
774
775 return ( ret );
776 #else
777 return esp_aes_gcm_crypt_and_tag_partial_hw(ctx, mode, length, iv, iv_len, aad, aad_len, input, output, tag_len, tag);
778 #endif
779 }
780
781
esp_aes_gcm_auth_decrypt(esp_gcm_context * ctx,size_t length,const unsigned char * iv,size_t iv_len,const unsigned char * aad,size_t aad_len,const unsigned char * tag,size_t tag_len,const unsigned char * input,unsigned char * output)782 int esp_aes_gcm_auth_decrypt( esp_gcm_context *ctx,
783 size_t length,
784 const unsigned char *iv,
785 size_t iv_len,
786 const unsigned char *aad,
787 size_t aad_len,
788 const unsigned char *tag,
789 size_t tag_len,
790 const unsigned char *input,
791 unsigned char *output )
792 {
793 #if defined(MBEDTLS_GCM_NON_AES_CIPHER_SOFT_FALLBACK)
794 if (ctx->ctx_soft != NULL) {
795 return mbedtls_gcm_auth_decrypt_soft(ctx->ctx_soft, length, iv, iv_len, aad, aad_len, tag, tag_len, input, output);
796 }
797 #endif
798 int ret;
799 unsigned char check_tag[16];
800 size_t i;
801 int diff;
802
803 if ( ( ret = esp_aes_gcm_crypt_and_tag( ctx, ESP_AES_DECRYPT, length,
804 iv, iv_len, aad, aad_len,
805 input, output, tag_len, check_tag ) ) != 0 ) {
806 return ( ret );
807 }
808
809 /* Check tag in "constant-time" */
810 for ( diff = 0, i = 0; i < tag_len; i++ ) {
811 diff |= tag[i] ^ check_tag[i];
812 }
813
814 if ( diff != 0 ) {
815 bzero( output, length );
816 return ( MBEDTLS_ERR_GCM_AUTH_FAILED );
817 }
818
819 return ( 0 );
820 }
821