1 /**
2  *  Constant-time functions
3  *
4  *  Copyright The Mbed TLS Contributors
5  *  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
6  */
7 
8 /*
9  * The following functions are implemented without using comparison operators, as those
10  * might be translated to branches by some compilers on some platforms.
11  */
12 
13 #include <stdint.h>
14 #include <limits.h>
15 
16 #include "common.h"
17 #include "constant_time_internal.h"
18 #include "mbedtls/constant_time.h"
19 #include "mbedtls/error.h"
20 #include "mbedtls/platform_util.h"
21 
22 #include <string.h>
23 
24 #if defined(MBEDTLS_USE_PSA_CRYPTO) && defined(MBEDTLS_SSL_SOME_SUITES_USE_MAC)
25 #include "psa/crypto.h"
26 /* Define a local translating function to save code size by not using too many
27  * arguments in each translating place. */
local_err_translation(psa_status_t status)28 static int local_err_translation(psa_status_t status)
29 {
30     return psa_status_to_mbedtls(status, psa_to_ssl_errors,
31                                  ARRAY_LENGTH(psa_to_ssl_errors),
32                                  psa_generic_status_to_mbedtls);
33 }
34 #define PSA_TO_MBEDTLS_ERR(status) local_err_translation(status)
35 #endif
36 
37 #if !defined(MBEDTLS_CT_ASM)
38 /*
39  * Define an object with the value zero, such that the compiler cannot prove that it
40  * has the value zero (because it is volatile, it "may be modified in ways unknown to
41  * the implementation").
42  */
43 volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;
44 #endif
45 
46 /*
47  * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to
48  * perform fast unaligned access to volatile data.
49  *
50  * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile
51  * memory accesses.
52  *
53  * Some of these definitions could be moved into alignment.h but for now they are
54  * only used here.
55  */
56 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \
57     ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \
58     defined(MBEDTLS_CT_AARCH64_ASM))
59 /* We check pointer sizes to avoid issues with them not matching register size requirements */
60 #define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS
61 
mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char * p)62 static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)
63 {
64     /* This is UB, even where it's safe:
65      *    return *((volatile uint32_t*)p);
66      * so instead the same thing is expressed in assembly below.
67      */
68     uint32_t r;
69 #if defined(MBEDTLS_CT_ARM_ASM)
70     asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);
71 #elif defined(MBEDTLS_CT_AARCH64_ASM)
72     asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);
73 #else
74 #error "No assembly defined for mbedtls_get_unaligned_volatile_uint32"
75 #endif
76     return r;
77 }
78 #endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&
79           (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */
80 
mbedtls_ct_memcmp(const void * a,const void * b,size_t n)81 int mbedtls_ct_memcmp(const void *a,
82                       const void *b,
83                       size_t n)
84 {
85     size_t i = 0;
86     /*
87      * `A` and `B` are cast to volatile to ensure that the compiler
88      * generates code that always fully reads both buffers.
89      * Otherwise it could generate a test to exit early if `diff` has all
90      * bits set early in the loop.
91      */
92     volatile const unsigned char *A = (volatile const unsigned char *) a;
93     volatile const unsigned char *B = (volatile const unsigned char *) b;
94     uint32_t diff = 0;
95 
96 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)
97     for (; (i + 4) <= n; i += 4) {
98         uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);
99         uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);
100         diff |= x ^ y;
101     }
102 #endif
103 
104     for (; i < n; i++) {
105         /* Read volatile data in order before computing diff.
106          * This avoids IAR compiler warning:
107          * 'the order of volatile accesses is undefined ..' */
108         unsigned char x = A[i], y = B[i];
109         diff |= x ^ y;
110     }
111 
112 
113 #if (INT_MAX < INT32_MAX)
114     /* We don't support int smaller than 32-bits, but if someone tried to build
115      * with this configuration, there is a risk that, for differing data, the
116      * only bits set in diff are in the top 16-bits, and would be lost by a
117      * simple cast from uint32 to int.
118      * This would have significant security implications, so protect against it. */
119 #error "mbedtls_ct_memcmp() requires minimum 32-bit ints"
120 #else
121     /* The bit-twiddling ensures that when we cast uint32_t to int, we are casting
122      * a value that is in the range 0..INT_MAX - a value larger than this would
123      * result in implementation defined behaviour.
124      *
125      * This ensures that the value returned by the function is non-zero iff
126      * diff is non-zero.
127      */
128     return (int) ((diff & 0xffff) | (diff >> 16));
129 #endif
130 }
131 
132 #if defined(MBEDTLS_NIST_KW_C)
133 
mbedtls_ct_memcmp_partial(const void * a,const void * b,size_t n,size_t skip_head,size_t skip_tail)134 int mbedtls_ct_memcmp_partial(const void *a,
135                               const void *b,
136                               size_t n,
137                               size_t skip_head,
138                               size_t skip_tail)
139 {
140     unsigned int diff = 0;
141 
142     volatile const unsigned char *A = (volatile const unsigned char *) a;
143     volatile const unsigned char *B = (volatile const unsigned char *) b;
144 
145     size_t valid_end = n - skip_tail;
146 
147     for (size_t i = 0; i < n; i++) {
148         unsigned char x = A[i], y = B[i];
149         unsigned int d = x ^ y;
150         mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head),
151                                                            mbedtls_ct_uint_lt(i, valid_end));
152         diff |= mbedtls_ct_uint_if_else_0(valid, d);
153     }
154 
155     /* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the
156      * cast from uint to int is safe. */
157     return (int) diff;
158 }
159 
160 #endif
161 
162 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
163 
mbedtls_ct_memmove_left(void * start,size_t total,size_t offset)164 void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)
165 {
166     volatile unsigned char *buf = start;
167     for (size_t i = 0; i < total; i++) {
168         mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i);
169         /* The first `total - offset` passes are a no-op. The last
170          * `offset` passes shift the data one byte to the left and
171          * zero out the last byte. */
172         for (size_t n = 0; n < total - 1; n++) {
173             unsigned char current = buf[n];
174             unsigned char next    = buf[n+1];
175             buf[n] = mbedtls_ct_uint_if(no_op, current, next);
176         }
177         buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]);
178     }
179 }
180 
181 #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
182 
mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,unsigned char * dest,const unsigned char * src1,const unsigned char * src2,size_t len)183 void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,
184                           unsigned char *dest,
185                           const unsigned char *src1,
186                           const unsigned char *src2,
187                           size_t len)
188 {
189 #if defined(MBEDTLS_CT_SIZE_64)
190     const uint64_t mask     = (uint64_t) condition;
191     const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition);
192 #else
193     const uint32_t mask     = (uint32_t) condition;
194     const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);
195 #endif
196 
197     /* If src2 is NULL, setup src2 so that we read from the destination address.
198      *
199      * This means that if src2 == NULL && condition is false, the result will be a
200      * no-op because we read from dest and write the same data back into dest.
201      */
202     if (src2 == NULL) {
203         src2 = dest;
204     }
205 
206     /* dest[i] = c1 == c2 ? src[i] : dest[i] */
207     size_t i = 0;
208 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
209 #if defined(MBEDTLS_CT_SIZE_64)
210     for (; (i + 8) <= len; i += 8) {
211         uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask;
212         uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask;
213         mbedtls_put_unaligned_uint64(dest + i, a | b);
214     }
215 #else
216     for (; (i + 4) <= len; i += 4) {
217         uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;
218         uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;
219         mbedtls_put_unaligned_uint32(dest + i, a | b);
220     }
221 #endif /* defined(MBEDTLS_CT_SIZE_64) */
222 #endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */
223     for (; i < len; i++) {
224         dest[i] = (src1[i] & mask) | (src2[i] & not_mask);
225     }
226 }
227 
mbedtls_ct_memcpy_offset(unsigned char * dest,const unsigned char * src,size_t offset,size_t offset_min,size_t offset_max,size_t len)228 void mbedtls_ct_memcpy_offset(unsigned char *dest,
229                               const unsigned char *src,
230                               size_t offset,
231                               size_t offset_min,
232                               size_t offset_max,
233                               size_t len)
234 {
235     size_t offsetval;
236 
237     for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {
238         mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL,
239                              len);
240     }
241 }
242 
243 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
244 
mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition,void * buf,size_t len)245 void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)
246 {
247     uint32_t mask = (uint32_t) ~condition;
248     uint8_t *p = (uint8_t *) buf;
249     size_t i = 0;
250 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
251     for (; (i + 4) <= len; i += 4) {
252         mbedtls_put_unaligned_uint32((void *) (p + i),
253                                      mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);
254     }
255 #endif
256     for (; i < len; i++) {
257         p[i] = p[i] & mask;
258     }
259 }
260 
261 #endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */
262