1 /*
2 * Message Processing Stack, Reader implementation
3 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * This file is part of Mbed TLS (https://tls.mbed.org)
20 */
21
22 #include "common.h"
23
24 #if defined(MBEDTLS_SSL_PROTO_TLS1_3)
25
26 #include "mps_reader.h"
27 #include "mps_common.h"
28 #include "mps_trace.h"
29
30 #include <string.h>
31
32 #if defined(MBEDTLS_MPS_ENABLE_TRACE)
33 static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER;
34 #endif /* MBEDTLS_MPS_ENABLE_TRACE */
35
36 /*
37 * GENERAL NOTE ON CODING STYLE
38 *
39 * The following code intentionally separates memory loads
40 * and stores from other operations (arithmetic or branches).
41 * This leads to the introduction of many local variables
42 * and significantly increases the C-code line count, but
43 * should not increase the size of generated assembly.
44 *
45 * The reason for this is twofold:
46 * (1) It will ease verification efforts using the VST
47 * (Verified Software Toolchain)
48 * whose program logic cannot directly reason
49 * about instructions containing a load or store in
50 * addition to other operations (e.g. *p = *q or
51 * tmp = *p + 42).
52 * (2) Operating on local variables and writing the results
53 * back to the target contexts on success only
54 * allows to maintain structure invariants even
55 * on failure - this in turn has two benefits:
56 * (2.a) If for some reason an error code is not caught
57 * and operation continues, functions are nonetheless
58 * called with sane contexts, reducing the risk
59 * of dangerous behavior.
60 * (2.b) Randomized testing is easier if structures
61 * remain intact even in the face of failing
62 * and/or non-sensical calls.
63 * Moreover, it might even reduce code-size because
64 * the compiler need not write back temporary results
65 * to memory in case of failure.
66 *
67 */
68
mps_reader_is_accumulating(mbedtls_mps_reader const * rd)69 static inline int mps_reader_is_accumulating(
70 mbedtls_mps_reader const *rd)
71 {
72 mbedtls_mps_size_t acc_remaining;
73 if (rd->acc == NULL) {
74 return 0;
75 }
76
77 acc_remaining = rd->acc_share.acc_remaining;
78 return acc_remaining > 0;
79 }
80
mps_reader_is_producing(mbedtls_mps_reader const * rd)81 static inline int mps_reader_is_producing(
82 mbedtls_mps_reader const *rd)
83 {
84 unsigned char *frag = rd->frag;
85 return frag == NULL;
86 }
87
mps_reader_is_consuming(mbedtls_mps_reader const * rd)88 static inline int mps_reader_is_consuming(
89 mbedtls_mps_reader const *rd)
90 {
91 return !mps_reader_is_producing(rd);
92 }
93
mps_reader_get_fragment_offset(mbedtls_mps_reader const * rd)94 static inline mbedtls_mps_size_t mps_reader_get_fragment_offset(
95 mbedtls_mps_reader const *rd)
96 {
97 unsigned char *acc = rd->acc;
98 mbedtls_mps_size_t frag_offset;
99
100 if (acc == NULL) {
101 return 0;
102 }
103
104 frag_offset = rd->acc_share.frag_offset;
105 return frag_offset;
106 }
107
mps_reader_serving_from_accumulator(mbedtls_mps_reader const * rd)108 static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator(
109 mbedtls_mps_reader const *rd)
110 {
111 mbedtls_mps_size_t frag_offset, end;
112
113 frag_offset = mps_reader_get_fragment_offset(rd);
114 end = rd->end;
115
116 return end < frag_offset;
117 }
118
mps_reader_zero(mbedtls_mps_reader * rd)119 static inline void mps_reader_zero(mbedtls_mps_reader *rd)
120 {
121 /* A plain memset() would likely be more efficient,
122 * but the current way of zeroing makes it harder
123 * to overlook fields which should not be zero-initialized.
124 * It's also more suitable for FV efforts since it
125 * doesn't require reasoning about structs being
126 * interpreted as unstructured binary blobs. */
127 static mbedtls_mps_reader const zero =
128 { .frag = NULL,
129 .frag_len = 0,
130 .commit = 0,
131 .end = 0,
132 .pending = 0,
133 .acc = NULL,
134 .acc_len = 0,
135 .acc_available = 0,
136 .acc_share = { .acc_remaining = 0 } };
137 *rd = zero;
138 }
139
mbedtls_mps_reader_init(mbedtls_mps_reader * rd,unsigned char * acc,mbedtls_mps_size_t acc_len)140 int mbedtls_mps_reader_init(mbedtls_mps_reader *rd,
141 unsigned char *acc,
142 mbedtls_mps_size_t acc_len)
143 {
144 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_init");
145 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
146 "* Accumulator size: %u bytes", (unsigned) acc_len);
147 mps_reader_zero(rd);
148 rd->acc = acc;
149 rd->acc_len = acc_len;
150 MBEDTLS_MPS_TRACE_RETURN(0);
151 }
152
mbedtls_mps_reader_free(mbedtls_mps_reader * rd)153 int mbedtls_mps_reader_free(mbedtls_mps_reader *rd)
154 {
155 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_free");
156 mps_reader_zero(rd);
157 MBEDTLS_MPS_TRACE_RETURN(0);
158 }
159
mbedtls_mps_reader_feed(mbedtls_mps_reader * rd,unsigned char * new_frag,mbedtls_mps_size_t new_frag_len)160 int mbedtls_mps_reader_feed(mbedtls_mps_reader *rd,
161 unsigned char *new_frag,
162 mbedtls_mps_size_t new_frag_len)
163 {
164 mbedtls_mps_size_t copy_to_acc;
165 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_feed");
166 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
167 "* Fragment length: %u bytes", (unsigned) new_frag_len);
168
169 if (new_frag == NULL) {
170 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_INVALID_ARG);
171 }
172
173 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_producing(
174 rd),
175 "mbedtls_mps_reader_feed() requires reader to be in producing mode");
176
177 if (mps_reader_is_accumulating(rd)) {
178 unsigned char *acc = rd->acc;
179 mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining;
180 mbedtls_mps_size_t acc_available = rd->acc_available;
181
182 /* Skip over parts of the accumulator that have already been filled. */
183 acc += acc_available;
184
185 copy_to_acc = acc_remaining;
186 if (copy_to_acc > new_frag_len) {
187 copy_to_acc = new_frag_len;
188 }
189
190 /* Copy new contents to accumulator. */
191 memcpy(acc, new_frag, copy_to_acc);
192
193 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
194 "Copy new data of size %u of %u into accumulator at offset %u",
195 (unsigned) copy_to_acc, (unsigned) new_frag_len,
196 (unsigned) acc_available);
197
198 /* Check if, with the new fragment, we have enough data. */
199 acc_remaining -= copy_to_acc;
200 if (acc_remaining > 0) {
201 /* We need to accumulate more data. Stay in producing mode. */
202 acc_available += copy_to_acc;
203 rd->acc_share.acc_remaining = acc_remaining;
204 rd->acc_available = acc_available;
205 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_MORE);
206 }
207
208 /* We have filled the accumulator: Move to consuming mode. */
209
210 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
211 "Enough data available to serve user request");
212
213 /* Remember overlap of accumulator and fragment. */
214 rd->acc_share.frag_offset = acc_available;
215 acc_available += copy_to_acc;
216 rd->acc_available = acc_available;
217 } else { /* Not accumulating */
218 rd->acc_share.frag_offset = 0;
219 }
220
221 rd->frag = new_frag;
222 rd->frag_len = new_frag_len;
223 rd->commit = 0;
224 rd->end = 0;
225 MBEDTLS_MPS_TRACE_RETURN(0);
226 }
227
228
mbedtls_mps_reader_get(mbedtls_mps_reader * rd,mbedtls_mps_size_t desired,unsigned char ** buffer,mbedtls_mps_size_t * buflen)229 int mbedtls_mps_reader_get(mbedtls_mps_reader *rd,
230 mbedtls_mps_size_t desired,
231 unsigned char **buffer,
232 mbedtls_mps_size_t *buflen)
233 {
234 unsigned char *frag;
235 mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining;
236 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_get");
237 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
238 "* Bytes requested: %u", (unsigned) desired);
239
240 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
241 rd),
242 "mbedtls_mps_reader_get() requires reader to be in consuming mode");
243
244 end = rd->end;
245 frag_offset = mps_reader_get_fragment_offset(rd);
246
247 /* Check if we're still serving from the accumulator. */
248 if (mps_reader_serving_from_accumulator(rd)) {
249 /* Illustration of supported and unsupported cases:
250 *
251 * - Allowed #1
252 *
253 * +-----------------------------------+
254 * | frag |
255 * +-----------------------------------+
256 *
257 * end end+desired
258 * | |
259 * +-----v-------v-------------+
260 * | acc |
261 * +---------------------------+
262 * | |
263 * frag_offset acc_available
264 *
265 * - Allowed #2
266 *
267 * +-----------------------------------+
268 * | frag |
269 * +-----------------------------------+
270 *
271 * end end+desired
272 * | |
273 * +----------v----------------v
274 * | acc |
275 * +---------------------------+
276 * | |
277 * frag_offset acc_available
278 *
279 * - Not allowed #1 (could be served, but we don't actually use it):
280 *
281 * +-----------------------------------+
282 * | frag |
283 * +-----------------------------------+
284 *
285 * end end+desired
286 * | |
287 * +------v-------------v------+
288 * | acc |
289 * +---------------------------+
290 * | |
291 * frag_offset acc_available
292 *
293 *
294 * - Not allowed #2 (can't be served with a contiguous buffer):
295 *
296 * +-----------------------------------+
297 * | frag |
298 * +-----------------------------------+
299 *
300 * end end + desired
301 * | |
302 * +------v--------------------+ v
303 * | acc |
304 * +---------------------------+
305 * | |
306 * frag_offset acc_available
307 *
308 * In case of Allowed #2 we're switching to serve from
309 * `frag` starting from the next call to mbedtls_mps_reader_get().
310 */
311
312 unsigned char *acc;
313
314 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
315 "Serve the request from the accumulator");
316 if (frag_offset - end < desired) {
317 mbedtls_mps_size_t acc_available;
318 acc_available = rd->acc_available;
319 if (acc_available - end != desired) {
320 /* It might be possible to serve some of these situations by
321 * making additional space in the accumulator, removing those
322 * parts that have already been committed.
323 * On the other hand, this brings additional complexity and
324 * enlarges the code size, while there doesn't seem to be a use
325 * case where we don't attempt exactly the same `get` calls when
326 * resuming on a reader than what we tried before pausing it.
327 * If we believe we adhere to this restricted usage throughout
328 * the library, this check is a good opportunity to
329 * validate this. */
330 MBEDTLS_MPS_TRACE_RETURN(
331 MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS);
332 }
333 }
334
335 acc = rd->acc;
336 acc += end;
337
338 *buffer = acc;
339 if (buflen != NULL) {
340 *buflen = desired;
341 }
342
343 end += desired;
344 rd->end = end;
345 rd->pending = 0;
346
347 MBEDTLS_MPS_TRACE_RETURN(0);
348 }
349
350 /* Attempt to serve the request from the current fragment */
351 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
352 "Serve the request from the current fragment.");
353
354 frag_len = rd->frag_len;
355 frag_fetched = end - frag_offset; /* The amount of data from the current
356 * fragment that has already been passed
357 * to the user. */
358 frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */
359
360 /* Check if we can serve the read request from the fragment. */
361 if (frag_remaining < desired) {
362 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
363 "There's not enough data in the current fragment "
364 "to serve the request.");
365 /* There's not enough data in the current fragment,
366 * so either just RETURN what we have or fail. */
367 if (buflen == NULL) {
368 if (frag_remaining > 0) {
369 rd->pending = desired - frag_remaining;
370 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
371 "Remember to collect %u bytes before re-opening",
372 (unsigned) rd->pending);
373 }
374 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_OUT_OF_DATA);
375 }
376
377 desired = frag_remaining;
378 }
379
380 /* There's enough data in the current fragment to serve the
381 * (potentially modified) read request. */
382
383 frag = rd->frag;
384 frag += frag_fetched;
385
386 *buffer = frag;
387 if (buflen != NULL) {
388 *buflen = desired;
389 }
390
391 end += desired;
392 rd->end = end;
393 rd->pending = 0;
394 MBEDTLS_MPS_TRACE_RETURN(0);
395 }
396
mbedtls_mps_reader_commit(mbedtls_mps_reader * rd)397 int mbedtls_mps_reader_commit(mbedtls_mps_reader *rd)
398 {
399 mbedtls_mps_size_t end;
400 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_commit");
401 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
402 rd),
403 "mbedtls_mps_reader_commit() requires reader to be in consuming mode");
404
405 end = rd->end;
406 rd->commit = end;
407
408 MBEDTLS_MPS_TRACE_RETURN(0);
409 }
410
mbedtls_mps_reader_reclaim(mbedtls_mps_reader * rd,int * paused)411 int mbedtls_mps_reader_reclaim(mbedtls_mps_reader *rd,
412 int *paused)
413 {
414 unsigned char *frag, *acc;
415 mbedtls_mps_size_t pending, commit;
416 mbedtls_mps_size_t acc_len, frag_offset, frag_len;
417 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_reclaim");
418
419 if (paused != NULL) {
420 *paused = 0;
421 }
422
423 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
424 rd),
425 "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode");
426
427 frag = rd->frag;
428 acc = rd->acc;
429 pending = rd->pending;
430 commit = rd->commit;
431 frag_len = rd->frag_len;
432
433 frag_offset = mps_reader_get_fragment_offset(rd);
434
435 if (pending == 0) {
436 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
437 "No unsatisfied read-request has been logged.");
438
439 /* Check if there's data left to be consumed. */
440 if (commit < frag_offset || commit - frag_offset < frag_len) {
441 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
442 "There is data left to be consumed.");
443 rd->end = commit;
444 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_DATA_LEFT);
445 }
446
447 rd->acc_available = 0;
448 rd->acc_share.acc_remaining = 0;
449
450 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
451 "Fragment has been fully processed and committed.");
452 } else {
453 int overflow;
454
455 mbedtls_mps_size_t acc_backup_offset;
456 mbedtls_mps_size_t acc_backup_len;
457 mbedtls_mps_size_t frag_backup_offset;
458 mbedtls_mps_size_t frag_backup_len;
459
460 mbedtls_mps_size_t backup_len;
461 mbedtls_mps_size_t acc_len_needed;
462
463 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
464 "There has been an unsatisfied read with %u bytes overhead.",
465 (unsigned) pending);
466
467 if (acc == NULL) {
468 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
469 "No accumulator present");
470 MBEDTLS_MPS_TRACE_RETURN(
471 MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR);
472 }
473 acc_len = rd->acc_len;
474
475 /* Check if the upper layer has already fetched
476 * and committed the contents of the accumulator. */
477 if (commit < frag_offset) {
478 /* No, accumulator is still being processed. */
479 frag_backup_offset = 0;
480 frag_backup_len = frag_len;
481 acc_backup_offset = commit;
482 acc_backup_len = frag_offset - commit;
483 } else {
484 /* Yes, the accumulator is already processed. */
485 frag_backup_offset = commit - frag_offset;
486 frag_backup_len = frag_len - frag_backup_offset;
487 acc_backup_offset = 0;
488 acc_backup_len = 0;
489 }
490
491 backup_len = acc_backup_len + frag_backup_len;
492 acc_len_needed = backup_len + pending;
493
494 overflow = 0;
495 overflow |= (backup_len < acc_backup_len);
496 overflow |= (acc_len_needed < backup_len);
497
498 if (overflow || acc_len < acc_len_needed) {
499 /* Except for the different return code, we behave as if
500 * there hadn't been a call to mbedtls_mps_reader_get()
501 * since the last commit. */
502 rd->end = commit;
503 rd->pending = 0;
504 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
505 "The accumulator is too small to handle the backup.");
506 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
507 "* Size: %u", (unsigned) acc_len);
508 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
509 "* Needed: %u (%u + %u)",
510 (unsigned) acc_len_needed,
511 (unsigned) backup_len, (unsigned) pending);
512 MBEDTLS_MPS_TRACE_RETURN(
513 MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL);
514 }
515
516 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
517 "Fragment backup: %u", (unsigned) frag_backup_len);
518 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
519 "Accumulator backup: %u", (unsigned) acc_backup_len);
520
521 /* Move uncommitted parts from the accumulator to the front
522 * of the accumulator. */
523 memmove(acc, acc + acc_backup_offset, acc_backup_len);
524
525 /* Copy uncommitted parts of the current fragment to the
526 * accumulator. */
527 memcpy(acc + acc_backup_len,
528 frag + frag_backup_offset, frag_backup_len);
529
530 rd->acc_available = backup_len;
531 rd->acc_share.acc_remaining = pending;
532
533 if (paused != NULL) {
534 *paused = 1;
535 }
536 }
537
538 rd->frag = NULL;
539 rd->frag_len = 0;
540
541 rd->commit = 0;
542 rd->end = 0;
543 rd->pending = 0;
544
545 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
546 "Final state: aa %u, al %u, ar %u",
547 (unsigned) rd->acc_available, (unsigned) rd->acc_len,
548 (unsigned) rd->acc_share.acc_remaining);
549 MBEDTLS_MPS_TRACE_RETURN(0);
550 }
551
552 #endif /* MBEDTLS_SSL_PROTO_TLS1_3 */
553