1 /*
2 * Message Processing Stack, Reader implementation
3 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * This file is part of Mbed TLS (https://tls.mbed.org)
20 */
21
22 #include "common.h"
23
24 #if defined(MBEDTLS_SSL_PROTO_TLS1_3)
25
26 #include "mps_reader.h"
27 #include "mps_common.h"
28 #include "mps_trace.h"
29
30 #include <string.h>
31
32 #if defined(MBEDTLS_MPS_ENABLE_TRACE)
33 static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER;
34 #endif /* MBEDTLS_MPS_ENABLE_TRACE */
35
36 /*
37 * GENERAL NOTE ON CODING STYLE
38 *
39 * The following code intentionally separates memory loads
40 * and stores from other operations (arithmetic or branches).
41 * This leads to the introduction of many local variables
42 * and significantly increases the C-code line count, but
43 * should not increase the size of generated assembly.
44 *
45 * The reason for this is twofold:
46 * (1) It will ease verification efforts using the VST
47 * (Verified Software Toolchain)
48 * whose program logic cannot directly reason
49 * about instructions containing a load or store in
50 * addition to other operations (e.g. *p = *q or
51 * tmp = *p + 42).
52 * (2) Operating on local variables and writing the results
53 * back to the target contexts on success only
54 * allows to maintain structure invariants even
55 * on failure - this in turn has two benefits:
56 * (2.a) If for some reason an error code is not caught
57 * and operation continues, functions are nonetheless
58 * called with sane contexts, reducing the risk
59 * of dangerous behavior.
60 * (2.b) Randomized testing is easier if structures
61 * remain intact even in the face of failing
62 * and/or non-sensical calls.
63 * Moreover, it might even reduce code-size because
64 * the compiler need not write back temporary results
65 * to memory in case of failure.
66 *
67 */
68
mps_reader_is_accumulating(mbedtls_mps_reader const * rd)69 static inline int mps_reader_is_accumulating(
70 mbedtls_mps_reader const *rd )
71 {
72 mbedtls_mps_size_t acc_remaining;
73 if( rd->acc == NULL )
74 return( 0 );
75
76 acc_remaining = rd->acc_share.acc_remaining;
77 return( acc_remaining > 0 );
78 }
79
mps_reader_is_producing(mbedtls_mps_reader const * rd)80 static inline int mps_reader_is_producing(
81 mbedtls_mps_reader const *rd )
82 {
83 unsigned char *frag = rd->frag;
84 return( frag == NULL );
85 }
86
mps_reader_is_consuming(mbedtls_mps_reader const * rd)87 static inline int mps_reader_is_consuming(
88 mbedtls_mps_reader const *rd )
89 {
90 return( !mps_reader_is_producing( rd ) );
91 }
92
mps_reader_get_fragment_offset(mbedtls_mps_reader const * rd)93 static inline mbedtls_mps_size_t mps_reader_get_fragment_offset(
94 mbedtls_mps_reader const *rd )
95 {
96 unsigned char *acc = rd->acc;
97 mbedtls_mps_size_t frag_offset;
98
99 if( acc == NULL )
100 return( 0 );
101
102 frag_offset = rd->acc_share.frag_offset;
103 return( frag_offset );
104 }
105
mps_reader_serving_from_accumulator(mbedtls_mps_reader const * rd)106 static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator(
107 mbedtls_mps_reader const *rd )
108 {
109 mbedtls_mps_size_t frag_offset, end;
110
111 frag_offset = mps_reader_get_fragment_offset( rd );
112 end = rd->end;
113
114 return( end < frag_offset );
115 }
116
mps_reader_zero(mbedtls_mps_reader * rd)117 static inline void mps_reader_zero( mbedtls_mps_reader *rd )
118 {
119 /* A plain memset() would likely be more efficient,
120 * but the current way of zeroing makes it harder
121 * to overlook fields which should not be zero-initialized.
122 * It's also more suitable for FV efforts since it
123 * doesn't require reasoning about structs being
124 * interpreted as unstructured binary blobs. */
125 static mbedtls_mps_reader const zero =
126 { .frag = NULL,
127 .frag_len = 0,
128 .commit = 0,
129 .end = 0,
130 .pending = 0,
131 .acc = NULL,
132 .acc_len = 0,
133 .acc_available = 0,
134 .acc_share = { .acc_remaining = 0 }
135 };
136 *rd = zero;
137 }
138
mbedtls_mps_reader_init(mbedtls_mps_reader * rd,unsigned char * acc,mbedtls_mps_size_t acc_len)139 int mbedtls_mps_reader_init( mbedtls_mps_reader *rd,
140 unsigned char *acc,
141 mbedtls_mps_size_t acc_len )
142 {
143 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_init" );
144 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
145 "* Accumulator size: %u bytes", (unsigned) acc_len );
146 mps_reader_zero( rd );
147 rd->acc = acc;
148 rd->acc_len = acc_len;
149 MBEDTLS_MPS_TRACE_RETURN( 0 );
150 }
151
mbedtls_mps_reader_free(mbedtls_mps_reader * rd)152 int mbedtls_mps_reader_free( mbedtls_mps_reader *rd )
153 {
154 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_free" );
155 mps_reader_zero( rd );
156 MBEDTLS_MPS_TRACE_RETURN( 0 );
157 }
158
mbedtls_mps_reader_feed(mbedtls_mps_reader * rd,unsigned char * new_frag,mbedtls_mps_size_t new_frag_len)159 int mbedtls_mps_reader_feed( mbedtls_mps_reader *rd,
160 unsigned char *new_frag,
161 mbedtls_mps_size_t new_frag_len )
162 {
163 mbedtls_mps_size_t copy_to_acc;
164 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_feed" );
165 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
166 "* Fragment length: %u bytes", (unsigned) new_frag_len );
167
168 if( new_frag == NULL )
169 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_INVALID_ARG );
170
171 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_producing( rd ),
172 "mbedtls_mps_reader_feed() requires reader to be in producing mode" );
173
174 if( mps_reader_is_accumulating( rd ) )
175 {
176 unsigned char *acc = rd->acc;
177 mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining;
178 mbedtls_mps_size_t acc_available = rd->acc_available;
179
180 /* Skip over parts of the accumulator that have already been filled. */
181 acc += acc_available;
182
183 copy_to_acc = acc_remaining;
184 if( copy_to_acc > new_frag_len )
185 copy_to_acc = new_frag_len;
186
187 /* Copy new contents to accumulator. */
188 memcpy( acc, new_frag, copy_to_acc );
189
190 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
191 "Copy new data of size %u of %u into accumulator at offset %u",
192 (unsigned) copy_to_acc, (unsigned) new_frag_len, (unsigned) acc_available );
193
194 /* Check if, with the new fragment, we have enough data. */
195 acc_remaining -= copy_to_acc;
196 if( acc_remaining > 0 )
197 {
198 /* We need to accumulate more data. Stay in producing mode. */
199 acc_available += copy_to_acc;
200 rd->acc_share.acc_remaining = acc_remaining;
201 rd->acc_available = acc_available;
202 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_NEED_MORE );
203 }
204
205 /* We have filled the accumulator: Move to consuming mode. */
206
207 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
208 "Enough data available to serve user request" );
209
210 /* Remember overlap of accumulator and fragment. */
211 rd->acc_share.frag_offset = acc_available;
212 acc_available += copy_to_acc;
213 rd->acc_available = acc_available;
214 }
215 else /* Not accumulating */
216 {
217 rd->acc_share.frag_offset = 0;
218 }
219
220 rd->frag = new_frag;
221 rd->frag_len = new_frag_len;
222 rd->commit = 0;
223 rd->end = 0;
224 MBEDTLS_MPS_TRACE_RETURN( 0 );
225 }
226
227
mbedtls_mps_reader_get(mbedtls_mps_reader * rd,mbedtls_mps_size_t desired,unsigned char ** buffer,mbedtls_mps_size_t * buflen)228 int mbedtls_mps_reader_get( mbedtls_mps_reader *rd,
229 mbedtls_mps_size_t desired,
230 unsigned char **buffer,
231 mbedtls_mps_size_t *buflen )
232 {
233 unsigned char *frag;
234 mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining;
235 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_get" );
236 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
237 "* Bytes requested: %u", (unsigned) desired );
238
239 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_consuming( rd ),
240 "mbedtls_mps_reader_get() requires reader to be in consuming mode" );
241
242 end = rd->end;
243 frag_offset = mps_reader_get_fragment_offset( rd );
244
245 /* Check if we're still serving from the accumulator. */
246 if( mps_reader_serving_from_accumulator( rd ) )
247 {
248 /* Illustration of supported and unsupported cases:
249 *
250 * - Allowed #1
251 *
252 * +-----------------------------------+
253 * | frag |
254 * +-----------------------------------+
255 *
256 * end end+desired
257 * | |
258 * +-----v-------v-------------+
259 * | acc |
260 * +---------------------------+
261 * | |
262 * frag_offset acc_available
263 *
264 * - Allowed #2
265 *
266 * +-----------------------------------+
267 * | frag |
268 * +-----------------------------------+
269 *
270 * end end+desired
271 * | |
272 * +----------v----------------v
273 * | acc |
274 * +---------------------------+
275 * | |
276 * frag_offset acc_available
277 *
278 * - Not allowed #1 (could be served, but we don't actually use it):
279 *
280 * +-----------------------------------+
281 * | frag |
282 * +-----------------------------------+
283 *
284 * end end+desired
285 * | |
286 * +------v-------------v------+
287 * | acc |
288 * +---------------------------+
289 * | |
290 * frag_offset acc_available
291 *
292 *
293 * - Not allowed #2 (can't be served with a contiguous buffer):
294 *
295 * +-----------------------------------+
296 * | frag |
297 * +-----------------------------------+
298 *
299 * end end + desired
300 * | |
301 * +------v--------------------+ v
302 * | acc |
303 * +---------------------------+
304 * | |
305 * frag_offset acc_available
306 *
307 * In case of Allowed #2 we're switching to serve from
308 * `frag` starting from the next call to mbedtls_mps_reader_get().
309 */
310
311 unsigned char *acc;
312
313 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
314 "Serve the request from the accumulator" );
315 if( frag_offset - end < desired )
316 {
317 mbedtls_mps_size_t acc_available;
318 acc_available = rd->acc_available;
319 if( acc_available - end != desired )
320 {
321 /* It might be possible to serve some of these situations by
322 * making additional space in the accumulator, removing those
323 * parts that have already been committed.
324 * On the other hand, this brings additional complexity and
325 * enlarges the code size, while there doesn't seem to be a use
326 * case where we don't attempt exactly the same `get` calls when
327 * resuming on a reader than what we tried before pausing it.
328 * If we believe we adhere to this restricted usage throughout
329 * the library, this check is a good opportunity to
330 * validate this. */
331 MBEDTLS_MPS_TRACE_RETURN(
332 MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS );
333 }
334 }
335
336 acc = rd->acc;
337 acc += end;
338
339 *buffer = acc;
340 if( buflen != NULL )
341 *buflen = desired;
342
343 end += desired;
344 rd->end = end;
345 rd->pending = 0;
346
347 MBEDTLS_MPS_TRACE_RETURN( 0 );
348 }
349
350 /* Attempt to serve the request from the current fragment */
351 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
352 "Serve the request from the current fragment." );
353
354 frag_len = rd->frag_len;
355 frag_fetched = end - frag_offset; /* The amount of data from the current
356 * fragment that has already been passed
357 * to the user. */
358 frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */
359
360 /* Check if we can serve the read request from the fragment. */
361 if( frag_remaining < desired )
362 {
363 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
364 "There's not enough data in the current fragment "
365 "to serve the request." );
366 /* There's not enough data in the current fragment,
367 * so either just RETURN what we have or fail. */
368 if( buflen == NULL )
369 {
370 if( frag_remaining > 0 )
371 {
372 rd->pending = desired - frag_remaining;
373 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
374 "Remember to collect %u bytes before re-opening",
375 (unsigned) rd->pending );
376 }
377 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_OUT_OF_DATA );
378 }
379
380 desired = frag_remaining;
381 }
382
383 /* There's enough data in the current fragment to serve the
384 * (potentially modified) read request. */
385
386 frag = rd->frag;
387 frag += frag_fetched;
388
389 *buffer = frag;
390 if( buflen != NULL )
391 *buflen = desired;
392
393 end += desired;
394 rd->end = end;
395 rd->pending = 0;
396 MBEDTLS_MPS_TRACE_RETURN( 0 );
397 }
398
mbedtls_mps_reader_commit(mbedtls_mps_reader * rd)399 int mbedtls_mps_reader_commit( mbedtls_mps_reader *rd )
400 {
401 mbedtls_mps_size_t end;
402 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_commit" );
403 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_consuming( rd ),
404 "mbedtls_mps_reader_commit() requires reader to be in consuming mode" );
405
406 end = rd->end;
407 rd->commit = end;
408
409 MBEDTLS_MPS_TRACE_RETURN( 0 );
410 }
411
mbedtls_mps_reader_reclaim(mbedtls_mps_reader * rd,int * paused)412 int mbedtls_mps_reader_reclaim( mbedtls_mps_reader *rd,
413 int *paused )
414 {
415 unsigned char *frag, *acc;
416 mbedtls_mps_size_t pending, commit;
417 mbedtls_mps_size_t acc_len, frag_offset, frag_len;
418 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_reclaim" );
419
420 if( paused != NULL )
421 *paused = 0;
422
423 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_consuming( rd ),
424 "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode" );
425
426 frag = rd->frag;
427 acc = rd->acc;
428 pending = rd->pending;
429 commit = rd->commit;
430 frag_len = rd->frag_len;
431
432 frag_offset = mps_reader_get_fragment_offset( rd );
433
434 if( pending == 0 )
435 {
436 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
437 "No unsatisfied read-request has been logged." );
438
439 /* Check if there's data left to be consumed. */
440 if( commit < frag_offset || commit - frag_offset < frag_len )
441 {
442 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
443 "There is data left to be consumed." );
444 rd->end = commit;
445 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_DATA_LEFT );
446 }
447
448 rd->acc_available = 0;
449 rd->acc_share.acc_remaining = 0;
450
451 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
452 "Fragment has been fully processed and committed." );
453 }
454 else
455 {
456 int overflow;
457
458 mbedtls_mps_size_t acc_backup_offset;
459 mbedtls_mps_size_t acc_backup_len;
460 mbedtls_mps_size_t frag_backup_offset;
461 mbedtls_mps_size_t frag_backup_len;
462
463 mbedtls_mps_size_t backup_len;
464 mbedtls_mps_size_t acc_len_needed;
465
466 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
467 "There has been an unsatisfied read with %u bytes overhead.",
468 (unsigned) pending );
469
470 if( acc == NULL )
471 {
472 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
473 "No accumulator present" );
474 MBEDTLS_MPS_TRACE_RETURN(
475 MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR );
476 }
477 acc_len = rd->acc_len;
478
479 /* Check if the upper layer has already fetched
480 * and committed the contents of the accumulator. */
481 if( commit < frag_offset )
482 {
483 /* No, accumulator is still being processed. */
484 frag_backup_offset = 0;
485 frag_backup_len = frag_len;
486 acc_backup_offset = commit;
487 acc_backup_len = frag_offset - commit;
488 }
489 else
490 {
491 /* Yes, the accumulator is already processed. */
492 frag_backup_offset = commit - frag_offset;
493 frag_backup_len = frag_len - frag_backup_offset;
494 acc_backup_offset = 0;
495 acc_backup_len = 0;
496 }
497
498 backup_len = acc_backup_len + frag_backup_len;
499 acc_len_needed = backup_len + pending;
500
501 overflow = 0;
502 overflow |= ( backup_len < acc_backup_len );
503 overflow |= ( acc_len_needed < backup_len );
504
505 if( overflow || acc_len < acc_len_needed )
506 {
507 /* Except for the different return code, we behave as if
508 * there hadn't been a call to mbedtls_mps_reader_get()
509 * since the last commit. */
510 rd->end = commit;
511 rd->pending = 0;
512 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_ERROR,
513 "The accumulator is too small to handle the backup." );
514 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_ERROR,
515 "* Size: %u", (unsigned) acc_len );
516 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_ERROR,
517 "* Needed: %u (%u + %u)",
518 (unsigned) acc_len_needed,
519 (unsigned) backup_len, (unsigned) pending );
520 MBEDTLS_MPS_TRACE_RETURN(
521 MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL );
522 }
523
524 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
525 "Fragment backup: %u", (unsigned) frag_backup_len );
526 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
527 "Accumulator backup: %u", (unsigned) acc_backup_len );
528
529 /* Move uncommitted parts from the accumulator to the front
530 * of the accumulator. */
531 memmove( acc, acc + acc_backup_offset, acc_backup_len );
532
533 /* Copy uncommitted parts of the current fragment to the
534 * accumulator. */
535 memcpy( acc + acc_backup_len,
536 frag + frag_backup_offset, frag_backup_len );
537
538 rd->acc_available = backup_len;
539 rd->acc_share.acc_remaining = pending;
540
541 if( paused != NULL )
542 *paused = 1;
543 }
544
545 rd->frag = NULL;
546 rd->frag_len = 0;
547
548 rd->commit = 0;
549 rd->end = 0;
550 rd->pending = 0;
551
552 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
553 "Final state: aa %u, al %u, ar %u",
554 (unsigned) rd->acc_available, (unsigned) rd->acc_len,
555 (unsigned) rd->acc_share.acc_remaining );
556 MBEDTLS_MPS_TRACE_RETURN( 0 );
557 }
558
559 #endif /* MBEDTLS_SSL_PROTO_TLS1_3 */
560