mps_reader.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. /*
  2. * Message Processing Stack, Reader implementation
  3. *
  4. * Copyright The Mbed TLS Contributors
  5. * SPDX-License-Identifier: Apache-2.0
  6. *
  7. * Licensed under the Apache License, Version 2.0 (the "License"); you may
  8. * not use this file except in compliance with the License.
  9. * You may obtain a copy of the License at
  10. *
  11. * http://www.apache.org/licenses/LICENSE-2.0
  12. *
  13. * Unless required by applicable law or agreed to in writing, software
  14. * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  15. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. * See the License for the specific language governing permissions and
  17. * limitations under the License.
  18. *
  19. * This file is part of Mbed TLS (https://tls.mbed.org)
  20. */
  21. #include "common.h"
  22. #if defined(MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL)
  23. #include "mps_reader.h"
  24. #include "mps_common.h"
  25. #include "mps_trace.h"
  26. #include <string.h>
  27. #if defined(MBEDTLS_MPS_ENABLE_TRACE)
  28. static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER;
  29. #endif /* MBEDTLS_MPS_ENABLE_TRACE */
  30. /*
  31. * GENERAL NOTE ON CODING STYLE
  32. *
  33. * The following code intentionally separates memory loads
  34. * and stores from other operations (arithmetic or branches).
  35. * This leads to the introduction of many local variables
  36. * and significantly increases the C-code line count, but
  37. * should not increase the size of generated assembly.
  38. *
  39. * The reason for this is twofold:
  40. * (1) It will ease verification efforts using the VST
  41. * (Verified Software Toolchain)
  42. * whose program logic cannot directly reason
  43. * about instructions containing a load or store in
  44. * addition to other operations (e.g. *p = *q or
  45. * tmp = *p + 42).
  46. * (2) Operating on local variables and writing the results
  47. * back to the target contexts on success only
  48. * allows to maintain structure invariants even
  49. * on failure - this in turn has two benefits:
  50. * (2.a) If for some reason an error code is not caught
  51. * and operation continues, functions are nonetheless
  52. * called with sane contexts, reducing the risk
  53. * of dangerous behavior.
  54. * (2.b) Randomized testing is easier if structures
  55. * remain intact even in the face of failing
  56. * and/or non-sensical calls.
  57. * Moreover, it might even reduce code-size because
  58. * the compiler need not write back temporary results
  59. * to memory in case of failure.
  60. *
  61. */
  62. static inline int mps_reader_is_accumulating(
  63. mbedtls_mps_reader const *rd)
  64. {
  65. mbedtls_mps_size_t acc_remaining;
  66. if (rd->acc == NULL) {
  67. return 0;
  68. }
  69. acc_remaining = rd->acc_share.acc_remaining;
  70. return acc_remaining > 0;
  71. }
  72. static inline int mps_reader_is_producing(
  73. mbedtls_mps_reader const *rd)
  74. {
  75. unsigned char *frag = rd->frag;
  76. return frag == NULL;
  77. }
  78. static inline int mps_reader_is_consuming(
  79. mbedtls_mps_reader const *rd)
  80. {
  81. return !mps_reader_is_producing(rd);
  82. }
  83. static inline mbedtls_mps_size_t mps_reader_get_fragment_offset(
  84. mbedtls_mps_reader const *rd)
  85. {
  86. unsigned char *acc = rd->acc;
  87. mbedtls_mps_size_t frag_offset;
  88. if (acc == NULL) {
  89. return 0;
  90. }
  91. frag_offset = rd->acc_share.frag_offset;
  92. return frag_offset;
  93. }
  94. static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator(
  95. mbedtls_mps_reader const *rd)
  96. {
  97. mbedtls_mps_size_t frag_offset, end;
  98. frag_offset = mps_reader_get_fragment_offset(rd);
  99. end = rd->end;
  100. return end < frag_offset;
  101. }
  102. static inline void mps_reader_zero(mbedtls_mps_reader *rd)
  103. {
  104. /* A plain memset() would likely be more efficient,
  105. * but the current way of zeroing makes it harder
  106. * to overlook fields which should not be zero-initialized.
  107. * It's also more suitable for FV efforts since it
  108. * doesn't require reasoning about structs being
  109. * interpreted as unstructured binary blobs. */
  110. static mbedtls_mps_reader const zero =
  111. { .frag = NULL,
  112. .frag_len = 0,
  113. .commit = 0,
  114. .end = 0,
  115. .pending = 0,
  116. .acc = NULL,
  117. .acc_len = 0,
  118. .acc_available = 0,
  119. .acc_share = { .acc_remaining = 0 } };
  120. *rd = zero;
  121. }
  122. int mbedtls_mps_reader_init(mbedtls_mps_reader *rd,
  123. unsigned char *acc,
  124. mbedtls_mps_size_t acc_len)
  125. {
  126. MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_init");
  127. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  128. "* Accumulator size: %u bytes", (unsigned) acc_len);
  129. mps_reader_zero(rd);
  130. rd->acc = acc;
  131. rd->acc_len = acc_len;
  132. MBEDTLS_MPS_TRACE_RETURN(0);
  133. }
  134. int mbedtls_mps_reader_free(mbedtls_mps_reader *rd)
  135. {
  136. MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_free");
  137. mps_reader_zero(rd);
  138. MBEDTLS_MPS_TRACE_RETURN(0);
  139. }
  140. int mbedtls_mps_reader_feed(mbedtls_mps_reader *rd,
  141. unsigned char *new_frag,
  142. mbedtls_mps_size_t new_frag_len)
  143. {
  144. mbedtls_mps_size_t copy_to_acc;
  145. MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_feed");
  146. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  147. "* Fragment length: %u bytes", (unsigned) new_frag_len);
  148. if (new_frag == NULL) {
  149. MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_INVALID_ARG);
  150. }
  151. MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_producing(
  152. rd),
  153. "mbedtls_mps_reader_feed() requires reader to be in producing mode");
  154. if (mps_reader_is_accumulating(rd)) {
  155. unsigned char *acc = rd->acc;
  156. mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining;
  157. mbedtls_mps_size_t acc_available = rd->acc_available;
  158. /* Skip over parts of the accumulator that have already been filled. */
  159. acc += acc_available;
  160. copy_to_acc = acc_remaining;
  161. if (copy_to_acc > new_frag_len) {
  162. copy_to_acc = new_frag_len;
  163. }
  164. /* Copy new contents to accumulator. */
  165. memcpy(acc, new_frag, copy_to_acc);
  166. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  167. "Copy new data of size %u of %u into accumulator at offset %u",
  168. (unsigned) copy_to_acc, (unsigned) new_frag_len,
  169. (unsigned) acc_available);
  170. /* Check if, with the new fragment, we have enough data. */
  171. acc_remaining -= copy_to_acc;
  172. if (acc_remaining > 0) {
  173. /* We need to accumulate more data. Stay in producing mode. */
  174. acc_available += copy_to_acc;
  175. rd->acc_share.acc_remaining = acc_remaining;
  176. rd->acc_available = acc_available;
  177. MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_MORE);
  178. }
  179. /* We have filled the accumulator: Move to consuming mode. */
  180. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  181. "Enough data available to serve user request");
  182. /* Remember overlap of accumulator and fragment. */
  183. rd->acc_share.frag_offset = acc_available;
  184. acc_available += copy_to_acc;
  185. rd->acc_available = acc_available;
  186. } else { /* Not accumulating */
  187. rd->acc_share.frag_offset = 0;
  188. }
  189. rd->frag = new_frag;
  190. rd->frag_len = new_frag_len;
  191. rd->commit = 0;
  192. rd->end = 0;
  193. MBEDTLS_MPS_TRACE_RETURN(0);
  194. }
  195. int mbedtls_mps_reader_get(mbedtls_mps_reader *rd,
  196. mbedtls_mps_size_t desired,
  197. unsigned char **buffer,
  198. mbedtls_mps_size_t *buflen)
  199. {
  200. unsigned char *frag;
  201. mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining;
  202. MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_get");
  203. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  204. "* Bytes requested: %u", (unsigned) desired);
  205. MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
  206. rd),
  207. "mbedtls_mps_reader_get() requires reader to be in consuming mode");
  208. end = rd->end;
  209. frag_offset = mps_reader_get_fragment_offset(rd);
  210. /* Check if we're still serving from the accumulator. */
  211. if (mps_reader_serving_from_accumulator(rd)) {
  212. /* Illustration of supported and unsupported cases:
  213. *
  214. * - Allowed #1
  215. *
  216. * +-----------------------------------+
  217. * | frag |
  218. * +-----------------------------------+
  219. *
  220. * end end+desired
  221. * | |
  222. * +-----v-------v-------------+
  223. * | acc |
  224. * +---------------------------+
  225. * | |
  226. * frag_offset acc_available
  227. *
  228. * - Allowed #2
  229. *
  230. * +-----------------------------------+
  231. * | frag |
  232. * +-----------------------------------+
  233. *
  234. * end end+desired
  235. * | |
  236. * +----------v----------------v
  237. * | acc |
  238. * +---------------------------+
  239. * | |
  240. * frag_offset acc_available
  241. *
  242. * - Not allowed #1 (could be served, but we don't actually use it):
  243. *
  244. * +-----------------------------------+
  245. * | frag |
  246. * +-----------------------------------+
  247. *
  248. * end end+desired
  249. * | |
  250. * +------v-------------v------+
  251. * | acc |
  252. * +---------------------------+
  253. * | |
  254. * frag_offset acc_available
  255. *
  256. *
  257. * - Not allowed #2 (can't be served with a contiguous buffer):
  258. *
  259. * +-----------------------------------+
  260. * | frag |
  261. * +-----------------------------------+
  262. *
  263. * end end + desired
  264. * | |
  265. * +------v--------------------+ v
  266. * | acc |
  267. * +---------------------------+
  268. * | |
  269. * frag_offset acc_available
  270. *
  271. * In case of Allowed #2 we're switching to serve from
  272. * `frag` starting from the next call to mbedtls_mps_reader_get().
  273. */
  274. unsigned char *acc;
  275. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  276. "Serve the request from the accumulator");
  277. if (frag_offset - end < desired) {
  278. mbedtls_mps_size_t acc_available;
  279. acc_available = rd->acc_available;
  280. if (acc_available - end != desired) {
  281. /* It might be possible to serve some of these situations by
  282. * making additional space in the accumulator, removing those
  283. * parts that have already been committed.
  284. * On the other hand, this brings additional complexity and
  285. * enlarges the code size, while there doesn't seem to be a use
  286. * case where we don't attempt exactly the same `get` calls when
  287. * resuming on a reader than what we tried before pausing it.
  288. * If we believe we adhere to this restricted usage throughout
  289. * the library, this check is a good opportunity to
  290. * validate this. */
  291. MBEDTLS_MPS_TRACE_RETURN(
  292. MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS);
  293. }
  294. }
  295. acc = rd->acc;
  296. acc += end;
  297. *buffer = acc;
  298. if (buflen != NULL) {
  299. *buflen = desired;
  300. }
  301. end += desired;
  302. rd->end = end;
  303. rd->pending = 0;
  304. MBEDTLS_MPS_TRACE_RETURN(0);
  305. }
  306. /* Attempt to serve the request from the current fragment */
  307. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  308. "Serve the request from the current fragment.");
  309. frag_len = rd->frag_len;
  310. frag_fetched = end - frag_offset; /* The amount of data from the current
  311. * fragment that has already been passed
  312. * to the user. */
  313. frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */
  314. /* Check if we can serve the read request from the fragment. */
  315. if (frag_remaining < desired) {
  316. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  317. "There's not enough data in the current fragment "
  318. "to serve the request.");
  319. /* There's not enough data in the current fragment,
  320. * so either just RETURN what we have or fail. */
  321. if (buflen == NULL) {
  322. if (frag_remaining > 0) {
  323. rd->pending = desired - frag_remaining;
  324. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  325. "Remember to collect %u bytes before re-opening",
  326. (unsigned) rd->pending);
  327. }
  328. MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_OUT_OF_DATA);
  329. }
  330. desired = frag_remaining;
  331. }
  332. /* There's enough data in the current fragment to serve the
  333. * (potentially modified) read request. */
  334. frag = rd->frag;
  335. frag += frag_fetched;
  336. *buffer = frag;
  337. if (buflen != NULL) {
  338. *buflen = desired;
  339. }
  340. end += desired;
  341. rd->end = end;
  342. rd->pending = 0;
  343. MBEDTLS_MPS_TRACE_RETURN(0);
  344. }
  345. int mbedtls_mps_reader_commit(mbedtls_mps_reader *rd)
  346. {
  347. mbedtls_mps_size_t end;
  348. MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_commit");
  349. MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
  350. rd),
  351. "mbedtls_mps_reader_commit() requires reader to be in consuming mode");
  352. end = rd->end;
  353. rd->commit = end;
  354. MBEDTLS_MPS_TRACE_RETURN(0);
  355. }
  356. int mbedtls_mps_reader_reclaim(mbedtls_mps_reader *rd,
  357. int *paused)
  358. {
  359. unsigned char *frag, *acc;
  360. mbedtls_mps_size_t pending, commit;
  361. mbedtls_mps_size_t acc_len, frag_offset, frag_len;
  362. MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_reclaim");
  363. if (paused != NULL) {
  364. *paused = 0;
  365. }
  366. MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
  367. rd),
  368. "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode");
  369. frag = rd->frag;
  370. acc = rd->acc;
  371. pending = rd->pending;
  372. commit = rd->commit;
  373. frag_len = rd->frag_len;
  374. frag_offset = mps_reader_get_fragment_offset(rd);
  375. if (pending == 0) {
  376. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  377. "No unsatisfied read-request has been logged.");
  378. /* Check if there's data left to be consumed. */
  379. if (commit < frag_offset || commit - frag_offset < frag_len) {
  380. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  381. "There is data left to be consumed.");
  382. rd->end = commit;
  383. MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_DATA_LEFT);
  384. }
  385. rd->acc_available = 0;
  386. rd->acc_share.acc_remaining = 0;
  387. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  388. "Fragment has been fully processed and committed.");
  389. } else {
  390. int overflow;
  391. mbedtls_mps_size_t acc_backup_offset;
  392. mbedtls_mps_size_t acc_backup_len;
  393. mbedtls_mps_size_t frag_backup_offset;
  394. mbedtls_mps_size_t frag_backup_len;
  395. mbedtls_mps_size_t backup_len;
  396. mbedtls_mps_size_t acc_len_needed;
  397. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  398. "There has been an unsatisfied read with %u bytes overhead.",
  399. (unsigned) pending);
  400. if (acc == NULL) {
  401. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  402. "No accumulator present");
  403. MBEDTLS_MPS_TRACE_RETURN(
  404. MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR);
  405. }
  406. acc_len = rd->acc_len;
  407. /* Check if the upper layer has already fetched
  408. * and committed the contents of the accumulator. */
  409. if (commit < frag_offset) {
  410. /* No, accumulator is still being processed. */
  411. frag_backup_offset = 0;
  412. frag_backup_len = frag_len;
  413. acc_backup_offset = commit;
  414. acc_backup_len = frag_offset - commit;
  415. } else {
  416. /* Yes, the accumulator is already processed. */
  417. frag_backup_offset = commit - frag_offset;
  418. frag_backup_len = frag_len - frag_backup_offset;
  419. acc_backup_offset = 0;
  420. acc_backup_len = 0;
  421. }
  422. backup_len = acc_backup_len + frag_backup_len;
  423. acc_len_needed = backup_len + pending;
  424. overflow = 0;
  425. overflow |= (backup_len < acc_backup_len);
  426. overflow |= (acc_len_needed < backup_len);
  427. if (overflow || acc_len < acc_len_needed) {
  428. /* Except for the different return code, we behave as if
  429. * there hadn't been a call to mbedtls_mps_reader_get()
  430. * since the last commit. */
  431. rd->end = commit;
  432. rd->pending = 0;
  433. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
  434. "The accumulator is too small to handle the backup.");
  435. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
  436. "* Size: %u", (unsigned) acc_len);
  437. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
  438. "* Needed: %u (%u + %u)",
  439. (unsigned) acc_len_needed,
  440. (unsigned) backup_len, (unsigned) pending);
  441. MBEDTLS_MPS_TRACE_RETURN(
  442. MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL);
  443. }
  444. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  445. "Fragment backup: %u", (unsigned) frag_backup_len);
  446. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  447. "Accumulator backup: %u", (unsigned) acc_backup_len);
  448. /* Move uncommitted parts from the accumulator to the front
  449. * of the accumulator. */
  450. memmove(acc, acc + acc_backup_offset, acc_backup_len);
  451. /* Copy uncommitted parts of the current fragment to the
  452. * accumulator. */
  453. memcpy(acc + acc_backup_len,
  454. frag + frag_backup_offset, frag_backup_len);
  455. rd->acc_available = backup_len;
  456. rd->acc_share.acc_remaining = pending;
  457. if (paused != NULL) {
  458. *paused = 1;
  459. }
  460. }
  461. rd->frag = NULL;
  462. rd->frag_len = 0;
  463. rd->commit = 0;
  464. rd->end = 0;
  465. rd->pending = 0;
  466. MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
  467. "Final state: aa %u, al %u, ar %u",
  468. (unsigned) rd->acc_available, (unsigned) rd->acc_len,
  469. (unsigned) rd->acc_share.acc_remaining);
  470. MBEDTLS_MPS_TRACE_RETURN(0);
  471. }
  472. #endif /* MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL */