aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/Modules/_hacl
diff options
context:
space:
mode:
authorAlexSm <alex@ydb.tech>2024-03-05 10:40:59 +0100
committerGitHub <noreply@github.com>2024-03-05 12:40:59 +0300
commit1ac13c847b5358faba44dbb638a828e24369467b (patch)
tree07672b4dd3604ad3dee540a02c6494cb7d10dc3d /contrib/tools/python3/Modules/_hacl
parentffcca3e7f7958ddc6487b91d3df8c01054bd0638 (diff)
downloadydb-1ac13c847b5358faba44dbb638a828e24369467b.tar.gz
Library import 16 (#2433)
Co-authored-by: robot-piglet <robot-piglet@yandex-team.com> Co-authored-by: deshevoy <deshevoy@yandex-team.com> Co-authored-by: robot-contrib <robot-contrib@yandex-team.com> Co-authored-by: thegeorg <thegeorg@yandex-team.com> Co-authored-by: robot-ya-builder <robot-ya-builder@yandex-team.com> Co-authored-by: svidyuk <svidyuk@yandex-team.com> Co-authored-by: shadchin <shadchin@yandex-team.com> Co-authored-by: robot-ratatosk <robot-ratatosk@yandex-team.com> Co-authored-by: innokentii <innokentii@yandex-team.com> Co-authored-by: arkady-e1ppa <arkady-e1ppa@yandex-team.com> Co-authored-by: snermolaev <snermolaev@yandex-team.com> Co-authored-by: dimdim11 <dimdim11@yandex-team.com> Co-authored-by: kickbutt <kickbutt@yandex-team.com> Co-authored-by: abdullinsaid <abdullinsaid@yandex-team.com> Co-authored-by: korsunandrei <korsunandrei@yandex-team.com> Co-authored-by: petrk <petrk@yandex-team.com> Co-authored-by: miroslav2 <miroslav2@yandex-team.com> Co-authored-by: serjflint <serjflint@yandex-team.com> Co-authored-by: akhropov <akhropov@yandex-team.com> Co-authored-by: prettyboy <prettyboy@yandex-team.com> Co-authored-by: ilikepugs <ilikepugs@yandex-team.com> Co-authored-by: hiddenpath <hiddenpath@yandex-team.com> Co-authored-by: mikhnenko <mikhnenko@yandex-team.com> Co-authored-by: spreis <spreis@yandex-team.com> Co-authored-by: andreyshspb <andreyshspb@yandex-team.com> Co-authored-by: dimaandreev <dimaandreev@yandex-team.com> Co-authored-by: rashid <rashid@yandex-team.com> Co-authored-by: robot-ydb-importer <robot-ydb-importer@yandex-team.com> Co-authored-by: r-vetrov <r-vetrov@yandex-team.com> Co-authored-by: ypodlesov <ypodlesov@yandex-team.com> Co-authored-by: zaverden <zaverden@yandex-team.com> Co-authored-by: vpozdyayev <vpozdyayev@yandex-team.com> Co-authored-by: robot-cozmo <robot-cozmo@yandex-team.com> Co-authored-by: v-korovin <v-korovin@yandex-team.com> Co-authored-by: arikon <arikon@yandex-team.com> Co-authored-by: khoden <khoden@yandex-team.com> Co-authored-by: psydmm <psydmm@yandex-team.com> Co-authored-by: robot-javacom <robot-javacom@yandex-team.com> Co-authored-by: dtorilov <dtorilov@yandex-team.com> Co-authored-by: sennikovmv <sennikovmv@yandex-team.com> Co-authored-by: hcpp <hcpp@ydb.tech>
Diffstat (limited to 'contrib/tools/python3/Modules/_hacl')
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c1472
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h65
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c508
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h65
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c1345
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h204
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c824
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h130
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Streaming_Types.h83
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h346
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h107
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/fstar_uint128_struct_endianness.h68
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h266
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/lowstar_endianness.h231
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/types.h14
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h61
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h61
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h184
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h65
-rw-r--r--contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h86
20 files changed, 6185 insertions, 0 deletions
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c
new file mode 100644
index 0000000000..222ac824f0
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c
@@ -0,0 +1,1472 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_MD5.h"
+
+static uint32_t
+_h0[4U] =
+ { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+
+static uint32_t
+_t[64U] =
+ {
+ (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU,
+ (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U,
+ (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU,
+ (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U,
+ (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU,
+ (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U,
+ (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU,
+ (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU,
+ (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU,
+ (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U,
+ (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U,
+ (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U,
+ (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U,
+ (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U,
+ (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U,
+ (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U
+ };
+
+void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s)
+{
+ KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, s[i] = _h0[i];);
+}
+
+static void legacy_update(uint32_t *abcd, uint8_t *x)
+{
+ uint32_t aa = abcd[0U];
+ uint32_t bb = abcd[1U];
+ uint32_t cc = abcd[2U];
+ uint32_t dd = abcd[3U];
+ uint32_t va = abcd[0U];
+ uint32_t vb0 = abcd[1U];
+ uint32_t vc0 = abcd[2U];
+ uint32_t vd0 = abcd[3U];
+ uint8_t *b0 = x;
+ uint32_t u = load32_le(b0);
+ uint32_t xk = u;
+ uint32_t ti0 = _t[0U];
+ uint32_t
+ v =
+ vb0
+ +
+ ((va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0)
+ << (uint32_t)7U
+ | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> (uint32_t)25U);
+ abcd[0U] = v;
+ uint32_t va0 = abcd[3U];
+ uint32_t vb1 = abcd[0U];
+ uint32_t vc1 = abcd[1U];
+ uint32_t vd1 = abcd[2U];
+ uint8_t *b1 = x + (uint32_t)4U;
+ uint32_t u0 = load32_le(b1);
+ uint32_t xk0 = u0;
+ uint32_t ti1 = _t[1U];
+ uint32_t
+ v0 =
+ vb1
+ +
+ ((va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1)
+ << (uint32_t)12U
+ | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> (uint32_t)20U);
+ abcd[3U] = v0;
+ uint32_t va1 = abcd[2U];
+ uint32_t vb2 = abcd[3U];
+ uint32_t vc2 = abcd[0U];
+ uint32_t vd2 = abcd[1U];
+ uint8_t *b2 = x + (uint32_t)8U;
+ uint32_t u1 = load32_le(b2);
+ uint32_t xk1 = u1;
+ uint32_t ti2 = _t[2U];
+ uint32_t
+ v1 =
+ vb2
+ +
+ ((va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2)
+ << (uint32_t)17U
+ | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> (uint32_t)15U);
+ abcd[2U] = v1;
+ uint32_t va2 = abcd[1U];
+ uint32_t vb3 = abcd[2U];
+ uint32_t vc3 = abcd[3U];
+ uint32_t vd3 = abcd[0U];
+ uint8_t *b3 = x + (uint32_t)12U;
+ uint32_t u2 = load32_le(b3);
+ uint32_t xk2 = u2;
+ uint32_t ti3 = _t[3U];
+ uint32_t
+ v2 =
+ vb3
+ +
+ ((va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3)
+ << (uint32_t)22U
+ | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> (uint32_t)10U);
+ abcd[1U] = v2;
+ uint32_t va3 = abcd[0U];
+ uint32_t vb4 = abcd[1U];
+ uint32_t vc4 = abcd[2U];
+ uint32_t vd4 = abcd[3U];
+ uint8_t *b4 = x + (uint32_t)16U;
+ uint32_t u3 = load32_le(b4);
+ uint32_t xk3 = u3;
+ uint32_t ti4 = _t[4U];
+ uint32_t
+ v3 =
+ vb4
+ +
+ ((va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4)
+ << (uint32_t)7U
+ | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> (uint32_t)25U);
+ abcd[0U] = v3;
+ uint32_t va4 = abcd[3U];
+ uint32_t vb5 = abcd[0U];
+ uint32_t vc5 = abcd[1U];
+ uint32_t vd5 = abcd[2U];
+ uint8_t *b5 = x + (uint32_t)20U;
+ uint32_t u4 = load32_le(b5);
+ uint32_t xk4 = u4;
+ uint32_t ti5 = _t[5U];
+ uint32_t
+ v4 =
+ vb5
+ +
+ ((va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5)
+ << (uint32_t)12U
+ | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> (uint32_t)20U);
+ abcd[3U] = v4;
+ uint32_t va5 = abcd[2U];
+ uint32_t vb6 = abcd[3U];
+ uint32_t vc6 = abcd[0U];
+ uint32_t vd6 = abcd[1U];
+ uint8_t *b6 = x + (uint32_t)24U;
+ uint32_t u5 = load32_le(b6);
+ uint32_t xk5 = u5;
+ uint32_t ti6 = _t[6U];
+ uint32_t
+ v5 =
+ vb6
+ +
+ ((va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6)
+ << (uint32_t)17U
+ | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> (uint32_t)15U);
+ abcd[2U] = v5;
+ uint32_t va6 = abcd[1U];
+ uint32_t vb7 = abcd[2U];
+ uint32_t vc7 = abcd[3U];
+ uint32_t vd7 = abcd[0U];
+ uint8_t *b7 = x + (uint32_t)28U;
+ uint32_t u6 = load32_le(b7);
+ uint32_t xk6 = u6;
+ uint32_t ti7 = _t[7U];
+ uint32_t
+ v6 =
+ vb7
+ +
+ ((va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7)
+ << (uint32_t)22U
+ | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> (uint32_t)10U);
+ abcd[1U] = v6;
+ uint32_t va7 = abcd[0U];
+ uint32_t vb8 = abcd[1U];
+ uint32_t vc8 = abcd[2U];
+ uint32_t vd8 = abcd[3U];
+ uint8_t *b8 = x + (uint32_t)32U;
+ uint32_t u7 = load32_le(b8);
+ uint32_t xk7 = u7;
+ uint32_t ti8 = _t[8U];
+ uint32_t
+ v7 =
+ vb8
+ +
+ ((va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8)
+ << (uint32_t)7U
+ | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> (uint32_t)25U);
+ abcd[0U] = v7;
+ uint32_t va8 = abcd[3U];
+ uint32_t vb9 = abcd[0U];
+ uint32_t vc9 = abcd[1U];
+ uint32_t vd9 = abcd[2U];
+ uint8_t *b9 = x + (uint32_t)36U;
+ uint32_t u8 = load32_le(b9);
+ uint32_t xk8 = u8;
+ uint32_t ti9 = _t[9U];
+ uint32_t
+ v8 =
+ vb9
+ +
+ ((va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9)
+ << (uint32_t)12U
+ | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> (uint32_t)20U);
+ abcd[3U] = v8;
+ uint32_t va9 = abcd[2U];
+ uint32_t vb10 = abcd[3U];
+ uint32_t vc10 = abcd[0U];
+ uint32_t vd10 = abcd[1U];
+ uint8_t *b10 = x + (uint32_t)40U;
+ uint32_t u9 = load32_le(b10);
+ uint32_t xk9 = u9;
+ uint32_t ti10 = _t[10U];
+ uint32_t
+ v9 =
+ vb10
+ +
+ ((va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10)
+ << (uint32_t)17U
+ | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> (uint32_t)15U);
+ abcd[2U] = v9;
+ uint32_t va10 = abcd[1U];
+ uint32_t vb11 = abcd[2U];
+ uint32_t vc11 = abcd[3U];
+ uint32_t vd11 = abcd[0U];
+ uint8_t *b11 = x + (uint32_t)44U;
+ uint32_t u10 = load32_le(b11);
+ uint32_t xk10 = u10;
+ uint32_t ti11 = _t[11U];
+ uint32_t
+ v10 =
+ vb11
+ +
+ ((va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11)
+ << (uint32_t)22U
+ | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> (uint32_t)10U);
+ abcd[1U] = v10;
+ uint32_t va11 = abcd[0U];
+ uint32_t vb12 = abcd[1U];
+ uint32_t vc12 = abcd[2U];
+ uint32_t vd12 = abcd[3U];
+ uint8_t *b12 = x + (uint32_t)48U;
+ uint32_t u11 = load32_le(b12);
+ uint32_t xk11 = u11;
+ uint32_t ti12 = _t[12U];
+ uint32_t
+ v11 =
+ vb12
+ +
+ ((va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12)
+ << (uint32_t)7U
+ | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> (uint32_t)25U);
+ abcd[0U] = v11;
+ uint32_t va12 = abcd[3U];
+ uint32_t vb13 = abcd[0U];
+ uint32_t vc13 = abcd[1U];
+ uint32_t vd13 = abcd[2U];
+ uint8_t *b13 = x + (uint32_t)52U;
+ uint32_t u12 = load32_le(b13);
+ uint32_t xk12 = u12;
+ uint32_t ti13 = _t[13U];
+ uint32_t
+ v12 =
+ vb13
+ +
+ ((va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13)
+ << (uint32_t)12U
+ | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> (uint32_t)20U);
+ abcd[3U] = v12;
+ uint32_t va13 = abcd[2U];
+ uint32_t vb14 = abcd[3U];
+ uint32_t vc14 = abcd[0U];
+ uint32_t vd14 = abcd[1U];
+ uint8_t *b14 = x + (uint32_t)56U;
+ uint32_t u13 = load32_le(b14);
+ uint32_t xk13 = u13;
+ uint32_t ti14 = _t[14U];
+ uint32_t
+ v13 =
+ vb14
+ +
+ ((va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14)
+ << (uint32_t)17U
+ | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> (uint32_t)15U);
+ abcd[2U] = v13;
+ uint32_t va14 = abcd[1U];
+ uint32_t vb15 = abcd[2U];
+ uint32_t vc15 = abcd[3U];
+ uint32_t vd15 = abcd[0U];
+ uint8_t *b15 = x + (uint32_t)60U;
+ uint32_t u14 = load32_le(b15);
+ uint32_t xk14 = u14;
+ uint32_t ti15 = _t[15U];
+ uint32_t
+ v14 =
+ vb15
+ +
+ ((va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15)
+ << (uint32_t)22U
+ | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> (uint32_t)10U);
+ abcd[1U] = v14;
+ uint32_t va15 = abcd[0U];
+ uint32_t vb16 = abcd[1U];
+ uint32_t vc16 = abcd[2U];
+ uint32_t vd16 = abcd[3U];
+ uint8_t *b16 = x + (uint32_t)4U;
+ uint32_t u15 = load32_le(b16);
+ uint32_t xk15 = u15;
+ uint32_t ti16 = _t[16U];
+ uint32_t
+ v15 =
+ vb16
+ +
+ ((va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16)
+ << (uint32_t)5U
+ | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> (uint32_t)27U);
+ abcd[0U] = v15;
+ uint32_t va16 = abcd[3U];
+ uint32_t vb17 = abcd[0U];
+ uint32_t vc17 = abcd[1U];
+ uint32_t vd17 = abcd[2U];
+ uint8_t *b17 = x + (uint32_t)24U;
+ uint32_t u16 = load32_le(b17);
+ uint32_t xk16 = u16;
+ uint32_t ti17 = _t[17U];
+ uint32_t
+ v16 =
+ vb17
+ +
+ ((va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17)
+ << (uint32_t)9U
+ | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> (uint32_t)23U);
+ abcd[3U] = v16;
+ uint32_t va17 = abcd[2U];
+ uint32_t vb18 = abcd[3U];
+ uint32_t vc18 = abcd[0U];
+ uint32_t vd18 = abcd[1U];
+ uint8_t *b18 = x + (uint32_t)44U;
+ uint32_t u17 = load32_le(b18);
+ uint32_t xk17 = u17;
+ uint32_t ti18 = _t[18U];
+ uint32_t
+ v17 =
+ vb18
+ +
+ ((va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18)
+ << (uint32_t)14U
+ | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> (uint32_t)18U);
+ abcd[2U] = v17;
+ uint32_t va18 = abcd[1U];
+ uint32_t vb19 = abcd[2U];
+ uint32_t vc19 = abcd[3U];
+ uint32_t vd19 = abcd[0U];
+ uint8_t *b19 = x;
+ uint32_t u18 = load32_le(b19);
+ uint32_t xk18 = u18;
+ uint32_t ti19 = _t[19U];
+ uint32_t
+ v18 =
+ vb19
+ +
+ ((va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19)
+ << (uint32_t)20U
+ | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> (uint32_t)12U);
+ abcd[1U] = v18;
+ uint32_t va19 = abcd[0U];
+ uint32_t vb20 = abcd[1U];
+ uint32_t vc20 = abcd[2U];
+ uint32_t vd20 = abcd[3U];
+ uint8_t *b20 = x + (uint32_t)20U;
+ uint32_t u19 = load32_le(b20);
+ uint32_t xk19 = u19;
+ uint32_t ti20 = _t[20U];
+ uint32_t
+ v19 =
+ vb20
+ +
+ ((va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20)
+ << (uint32_t)5U
+ | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> (uint32_t)27U);
+ abcd[0U] = v19;
+ uint32_t va20 = abcd[3U];
+ uint32_t vb21 = abcd[0U];
+ uint32_t vc21 = abcd[1U];
+ uint32_t vd21 = abcd[2U];
+ uint8_t *b21 = x + (uint32_t)40U;
+ uint32_t u20 = load32_le(b21);
+ uint32_t xk20 = u20;
+ uint32_t ti21 = _t[21U];
+ uint32_t
+ v20 =
+ vb21
+ +
+ ((va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21)
+ << (uint32_t)9U
+ | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> (uint32_t)23U);
+ abcd[3U] = v20;
+ uint32_t va21 = abcd[2U];
+ uint32_t vb22 = abcd[3U];
+ uint32_t vc22 = abcd[0U];
+ uint32_t vd22 = abcd[1U];
+ uint8_t *b22 = x + (uint32_t)60U;
+ uint32_t u21 = load32_le(b22);
+ uint32_t xk21 = u21;
+ uint32_t ti22 = _t[22U];
+ uint32_t
+ v21 =
+ vb22
+ +
+ ((va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22)
+ << (uint32_t)14U
+ | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> (uint32_t)18U);
+ abcd[2U] = v21;
+ uint32_t va22 = abcd[1U];
+ uint32_t vb23 = abcd[2U];
+ uint32_t vc23 = abcd[3U];
+ uint32_t vd23 = abcd[0U];
+ uint8_t *b23 = x + (uint32_t)16U;
+ uint32_t u22 = load32_le(b23);
+ uint32_t xk22 = u22;
+ uint32_t ti23 = _t[23U];
+ uint32_t
+ v22 =
+ vb23
+ +
+ ((va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23)
+ << (uint32_t)20U
+ | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> (uint32_t)12U);
+ abcd[1U] = v22;
+ uint32_t va23 = abcd[0U];
+ uint32_t vb24 = abcd[1U];
+ uint32_t vc24 = abcd[2U];
+ uint32_t vd24 = abcd[3U];
+ uint8_t *b24 = x + (uint32_t)36U;
+ uint32_t u23 = load32_le(b24);
+ uint32_t xk23 = u23;
+ uint32_t ti24 = _t[24U];
+ uint32_t
+ v23 =
+ vb24
+ +
+ ((va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24)
+ << (uint32_t)5U
+ | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> (uint32_t)27U);
+ abcd[0U] = v23;
+ uint32_t va24 = abcd[3U];
+ uint32_t vb25 = abcd[0U];
+ uint32_t vc25 = abcd[1U];
+ uint32_t vd25 = abcd[2U];
+ uint8_t *b25 = x + (uint32_t)56U;
+ uint32_t u24 = load32_le(b25);
+ uint32_t xk24 = u24;
+ uint32_t ti25 = _t[25U];
+ uint32_t
+ v24 =
+ vb25
+ +
+ ((va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25)
+ << (uint32_t)9U
+ | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> (uint32_t)23U);
+ abcd[3U] = v24;
+ uint32_t va25 = abcd[2U];
+ uint32_t vb26 = abcd[3U];
+ uint32_t vc26 = abcd[0U];
+ uint32_t vd26 = abcd[1U];
+ uint8_t *b26 = x + (uint32_t)12U;
+ uint32_t u25 = load32_le(b26);
+ uint32_t xk25 = u25;
+ uint32_t ti26 = _t[26U];
+ uint32_t
+ v25 =
+ vb26
+ +
+ ((va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26)
+ << (uint32_t)14U
+ | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> (uint32_t)18U);
+ abcd[2U] = v25;
+ uint32_t va26 = abcd[1U];
+ uint32_t vb27 = abcd[2U];
+ uint32_t vc27 = abcd[3U];
+ uint32_t vd27 = abcd[0U];
+ uint8_t *b27 = x + (uint32_t)32U;
+ uint32_t u26 = load32_le(b27);
+ uint32_t xk26 = u26;
+ uint32_t ti27 = _t[27U];
+ uint32_t
+ v26 =
+ vb27
+ +
+ ((va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27)
+ << (uint32_t)20U
+ | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> (uint32_t)12U);
+ abcd[1U] = v26;
+ uint32_t va27 = abcd[0U];
+ uint32_t vb28 = abcd[1U];
+ uint32_t vc28 = abcd[2U];
+ uint32_t vd28 = abcd[3U];
+ uint8_t *b28 = x + (uint32_t)52U;
+ uint32_t u27 = load32_le(b28);
+ uint32_t xk27 = u27;
+ uint32_t ti28 = _t[28U];
+ uint32_t
+ v27 =
+ vb28
+ +
+ ((va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28)
+ << (uint32_t)5U
+ | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> (uint32_t)27U);
+ abcd[0U] = v27;
+ uint32_t va28 = abcd[3U];
+ uint32_t vb29 = abcd[0U];
+ uint32_t vc29 = abcd[1U];
+ uint32_t vd29 = abcd[2U];
+ uint8_t *b29 = x + (uint32_t)8U;
+ uint32_t u28 = load32_le(b29);
+ uint32_t xk28 = u28;
+ uint32_t ti29 = _t[29U];
+ uint32_t
+ v28 =
+ vb29
+ +
+ ((va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29)
+ << (uint32_t)9U
+ | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> (uint32_t)23U);
+ abcd[3U] = v28;
+ uint32_t va29 = abcd[2U];
+ uint32_t vb30 = abcd[3U];
+ uint32_t vc30 = abcd[0U];
+ uint32_t vd30 = abcd[1U];
+ uint8_t *b30 = x + (uint32_t)28U;
+ uint32_t u29 = load32_le(b30);
+ uint32_t xk29 = u29;
+ uint32_t ti30 = _t[30U];
+ uint32_t
+ v29 =
+ vb30
+ +
+ ((va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30)
+ << (uint32_t)14U
+ | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> (uint32_t)18U);
+ abcd[2U] = v29;
+ uint32_t va30 = abcd[1U];
+ uint32_t vb31 = abcd[2U];
+ uint32_t vc31 = abcd[3U];
+ uint32_t vd31 = abcd[0U];
+ uint8_t *b31 = x + (uint32_t)48U;
+ uint32_t u30 = load32_le(b31);
+ uint32_t xk30 = u30;
+ uint32_t ti31 = _t[31U];
+ uint32_t
+ v30 =
+ vb31
+ +
+ ((va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31)
+ << (uint32_t)20U
+ | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> (uint32_t)12U);
+ abcd[1U] = v30;
+ uint32_t va31 = abcd[0U];
+ uint32_t vb32 = abcd[1U];
+ uint32_t vc32 = abcd[2U];
+ uint32_t vd32 = abcd[3U];
+ uint8_t *b32 = x + (uint32_t)20U;
+ uint32_t u31 = load32_le(b32);
+ uint32_t xk31 = u31;
+ uint32_t ti32 = _t[32U];
+ uint32_t
+ v31 =
+ vb32
+ +
+ ((va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32)
+ << (uint32_t)4U
+ | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> (uint32_t)28U);
+ abcd[0U] = v31;
+ uint32_t va32 = abcd[3U];
+ uint32_t vb33 = abcd[0U];
+ uint32_t vc33 = abcd[1U];
+ uint32_t vd33 = abcd[2U];
+ uint8_t *b33 = x + (uint32_t)32U;
+ uint32_t u32 = load32_le(b33);
+ uint32_t xk32 = u32;
+ uint32_t ti33 = _t[33U];
+ uint32_t
+ v32 =
+ vb33
+ +
+ ((va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33)
+ << (uint32_t)11U
+ | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> (uint32_t)21U);
+ abcd[3U] = v32;
+ uint32_t va33 = abcd[2U];
+ uint32_t vb34 = abcd[3U];
+ uint32_t vc34 = abcd[0U];
+ uint32_t vd34 = abcd[1U];
+ uint8_t *b34 = x + (uint32_t)44U;
+ uint32_t u33 = load32_le(b34);
+ uint32_t xk33 = u33;
+ uint32_t ti34 = _t[34U];
+ uint32_t
+ v33 =
+ vb34
+ +
+ ((va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34)
+ << (uint32_t)16U
+ | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> (uint32_t)16U);
+ abcd[2U] = v33;
+ uint32_t va34 = abcd[1U];
+ uint32_t vb35 = abcd[2U];
+ uint32_t vc35 = abcd[3U];
+ uint32_t vd35 = abcd[0U];
+ uint8_t *b35 = x + (uint32_t)56U;
+ uint32_t u34 = load32_le(b35);
+ uint32_t xk34 = u34;
+ uint32_t ti35 = _t[35U];
+ uint32_t
+ v34 =
+ vb35
+ +
+ ((va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35)
+ << (uint32_t)23U
+ | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> (uint32_t)9U);
+ abcd[1U] = v34;
+ uint32_t va35 = abcd[0U];
+ uint32_t vb36 = abcd[1U];
+ uint32_t vc36 = abcd[2U];
+ uint32_t vd36 = abcd[3U];
+ uint8_t *b36 = x + (uint32_t)4U;
+ uint32_t u35 = load32_le(b36);
+ uint32_t xk35 = u35;
+ uint32_t ti36 = _t[36U];
+ uint32_t
+ v35 =
+ vb36
+ +
+ ((va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36)
+ << (uint32_t)4U
+ | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> (uint32_t)28U);
+ abcd[0U] = v35;
+ uint32_t va36 = abcd[3U];
+ uint32_t vb37 = abcd[0U];
+ uint32_t vc37 = abcd[1U];
+ uint32_t vd37 = abcd[2U];
+ uint8_t *b37 = x + (uint32_t)16U;
+ uint32_t u36 = load32_le(b37);
+ uint32_t xk36 = u36;
+ uint32_t ti37 = _t[37U];
+ uint32_t
+ v36 =
+ vb37
+ +
+ ((va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37)
+ << (uint32_t)11U
+ | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> (uint32_t)21U);
+ abcd[3U] = v36;
+ uint32_t va37 = abcd[2U];
+ uint32_t vb38 = abcd[3U];
+ uint32_t vc38 = abcd[0U];
+ uint32_t vd38 = abcd[1U];
+ uint8_t *b38 = x + (uint32_t)28U;
+ uint32_t u37 = load32_le(b38);
+ uint32_t xk37 = u37;
+ uint32_t ti38 = _t[38U];
+ uint32_t
+ v37 =
+ vb38
+ +
+ ((va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38)
+ << (uint32_t)16U
+ | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> (uint32_t)16U);
+ abcd[2U] = v37;
+ uint32_t va38 = abcd[1U];
+ uint32_t vb39 = abcd[2U];
+ uint32_t vc39 = abcd[3U];
+ uint32_t vd39 = abcd[0U];
+ uint8_t *b39 = x + (uint32_t)40U;
+ uint32_t u38 = load32_le(b39);
+ uint32_t xk38 = u38;
+ uint32_t ti39 = _t[39U];
+ uint32_t
+ v38 =
+ vb39
+ +
+ ((va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39)
+ << (uint32_t)23U
+ | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> (uint32_t)9U);
+ abcd[1U] = v38;
+ uint32_t va39 = abcd[0U];
+ uint32_t vb40 = abcd[1U];
+ uint32_t vc40 = abcd[2U];
+ uint32_t vd40 = abcd[3U];
+ uint8_t *b40 = x + (uint32_t)52U;
+ uint32_t u39 = load32_le(b40);
+ uint32_t xk39 = u39;
+ uint32_t ti40 = _t[40U];
+ uint32_t
+ v39 =
+ vb40
+ +
+ ((va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40)
+ << (uint32_t)4U
+ | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> (uint32_t)28U);
+ abcd[0U] = v39;
+ uint32_t va40 = abcd[3U];
+ uint32_t vb41 = abcd[0U];
+ uint32_t vc41 = abcd[1U];
+ uint32_t vd41 = abcd[2U];
+ uint8_t *b41 = x;
+ uint32_t u40 = load32_le(b41);
+ uint32_t xk40 = u40;
+ uint32_t ti41 = _t[41U];
+ uint32_t
+ v40 =
+ vb41
+ +
+ ((va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41)
+ << (uint32_t)11U
+ | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> (uint32_t)21U);
+ abcd[3U] = v40;
+ uint32_t va41 = abcd[2U];
+ uint32_t vb42 = abcd[3U];
+ uint32_t vc42 = abcd[0U];
+ uint32_t vd42 = abcd[1U];
+ uint8_t *b42 = x + (uint32_t)12U;
+ uint32_t u41 = load32_le(b42);
+ uint32_t xk41 = u41;
+ uint32_t ti42 = _t[42U];
+ uint32_t
+ v41 =
+ vb42
+ +
+ ((va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42)
+ << (uint32_t)16U
+ | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> (uint32_t)16U);
+ abcd[2U] = v41;
+ uint32_t va42 = abcd[1U];
+ uint32_t vb43 = abcd[2U];
+ uint32_t vc43 = abcd[3U];
+ uint32_t vd43 = abcd[0U];
+ uint8_t *b43 = x + (uint32_t)24U;
+ uint32_t u42 = load32_le(b43);
+ uint32_t xk42 = u42;
+ uint32_t ti43 = _t[43U];
+ uint32_t
+ v42 =
+ vb43
+ +
+ ((va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43)
+ << (uint32_t)23U
+ | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> (uint32_t)9U);
+ abcd[1U] = v42;
+ uint32_t va43 = abcd[0U];
+ uint32_t vb44 = abcd[1U];
+ uint32_t vc44 = abcd[2U];
+ uint32_t vd44 = abcd[3U];
+ uint8_t *b44 = x + (uint32_t)36U;
+ uint32_t u43 = load32_le(b44);
+ uint32_t xk43 = u43;
+ uint32_t ti44 = _t[44U];
+ uint32_t
+ v43 =
+ vb44
+ +
+ ((va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44)
+ << (uint32_t)4U
+ | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> (uint32_t)28U);
+ abcd[0U] = v43;
+ uint32_t va44 = abcd[3U];
+ uint32_t vb45 = abcd[0U];
+ uint32_t vc45 = abcd[1U];
+ uint32_t vd45 = abcd[2U];
+ uint8_t *b45 = x + (uint32_t)48U;
+ uint32_t u44 = load32_le(b45);
+ uint32_t xk44 = u44;
+ uint32_t ti45 = _t[45U];
+ uint32_t
+ v44 =
+ vb45
+ +
+ ((va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45)
+ << (uint32_t)11U
+ | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> (uint32_t)21U);
+ abcd[3U] = v44;
+ uint32_t va45 = abcd[2U];
+ uint32_t vb46 = abcd[3U];
+ uint32_t vc46 = abcd[0U];
+ uint32_t vd46 = abcd[1U];
+ uint8_t *b46 = x + (uint32_t)60U;
+ uint32_t u45 = load32_le(b46);
+ uint32_t xk45 = u45;
+ uint32_t ti46 = _t[46U];
+ uint32_t
+ v45 =
+ vb46
+ +
+ ((va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46)
+ << (uint32_t)16U
+ | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> (uint32_t)16U);
+ abcd[2U] = v45;
+ uint32_t va46 = abcd[1U];
+ uint32_t vb47 = abcd[2U];
+ uint32_t vc47 = abcd[3U];
+ uint32_t vd47 = abcd[0U];
+ uint8_t *b47 = x + (uint32_t)8U;
+ uint32_t u46 = load32_le(b47);
+ uint32_t xk46 = u46;
+ uint32_t ti47 = _t[47U];
+ uint32_t
+ v46 =
+ vb47
+ +
+ ((va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47)
+ << (uint32_t)23U
+ | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> (uint32_t)9U);
+ abcd[1U] = v46;
+ uint32_t va47 = abcd[0U];
+ uint32_t vb48 = abcd[1U];
+ uint32_t vc48 = abcd[2U];
+ uint32_t vd48 = abcd[3U];
+ uint8_t *b48 = x;
+ uint32_t u47 = load32_le(b48);
+ uint32_t xk47 = u47;
+ uint32_t ti48 = _t[48U];
+ uint32_t
+ v47 =
+ vb48
+ +
+ ((va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48)
+ << (uint32_t)6U
+ | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> (uint32_t)26U);
+ abcd[0U] = v47;
+ uint32_t va48 = abcd[3U];
+ uint32_t vb49 = abcd[0U];
+ uint32_t vc49 = abcd[1U];
+ uint32_t vd49 = abcd[2U];
+ uint8_t *b49 = x + (uint32_t)28U;
+ uint32_t u48 = load32_le(b49);
+ uint32_t xk48 = u48;
+ uint32_t ti49 = _t[49U];
+ uint32_t
+ v48 =
+ vb49
+ +
+ ((va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49)
+ << (uint32_t)10U
+ | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> (uint32_t)22U);
+ abcd[3U] = v48;
+ uint32_t va49 = abcd[2U];
+ uint32_t vb50 = abcd[3U];
+ uint32_t vc50 = abcd[0U];
+ uint32_t vd50 = abcd[1U];
+ uint8_t *b50 = x + (uint32_t)56U;
+ uint32_t u49 = load32_le(b50);
+ uint32_t xk49 = u49;
+ uint32_t ti50 = _t[50U];
+ uint32_t
+ v49 =
+ vb50
+ +
+ ((va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50)
+ << (uint32_t)15U
+ | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> (uint32_t)17U);
+ abcd[2U] = v49;
+ uint32_t va50 = abcd[1U];
+ uint32_t vb51 = abcd[2U];
+ uint32_t vc51 = abcd[3U];
+ uint32_t vd51 = abcd[0U];
+ uint8_t *b51 = x + (uint32_t)20U;
+ uint32_t u50 = load32_le(b51);
+ uint32_t xk50 = u50;
+ uint32_t ti51 = _t[51U];
+ uint32_t
+ v50 =
+ vb51
+ +
+ ((va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51)
+ << (uint32_t)21U
+ | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> (uint32_t)11U);
+ abcd[1U] = v50;
+ uint32_t va51 = abcd[0U];
+ uint32_t vb52 = abcd[1U];
+ uint32_t vc52 = abcd[2U];
+ uint32_t vd52 = abcd[3U];
+ uint8_t *b52 = x + (uint32_t)48U;
+ uint32_t u51 = load32_le(b52);
+ uint32_t xk51 = u51;
+ uint32_t ti52 = _t[52U];
+ uint32_t
+ v51 =
+ vb52
+ +
+ ((va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52)
+ << (uint32_t)6U
+ | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> (uint32_t)26U);
+ abcd[0U] = v51;
+ uint32_t va52 = abcd[3U];
+ uint32_t vb53 = abcd[0U];
+ uint32_t vc53 = abcd[1U];
+ uint32_t vd53 = abcd[2U];
+ uint8_t *b53 = x + (uint32_t)12U;
+ uint32_t u52 = load32_le(b53);
+ uint32_t xk52 = u52;
+ uint32_t ti53 = _t[53U];
+ uint32_t
+ v52 =
+ vb53
+ +
+ ((va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53)
+ << (uint32_t)10U
+ | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> (uint32_t)22U);
+ abcd[3U] = v52;
+ uint32_t va53 = abcd[2U];
+ uint32_t vb54 = abcd[3U];
+ uint32_t vc54 = abcd[0U];
+ uint32_t vd54 = abcd[1U];
+ uint8_t *b54 = x + (uint32_t)40U;
+ uint32_t u53 = load32_le(b54);
+ uint32_t xk53 = u53;
+ uint32_t ti54 = _t[54U];
+ uint32_t
+ v53 =
+ vb54
+ +
+ ((va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54)
+ << (uint32_t)15U
+ | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> (uint32_t)17U);
+ abcd[2U] = v53;
+ uint32_t va54 = abcd[1U];
+ uint32_t vb55 = abcd[2U];
+ uint32_t vc55 = abcd[3U];
+ uint32_t vd55 = abcd[0U];
+ uint8_t *b55 = x + (uint32_t)4U;
+ uint32_t u54 = load32_le(b55);
+ uint32_t xk54 = u54;
+ uint32_t ti55 = _t[55U];
+ uint32_t
+ v54 =
+ vb55
+ +
+ ((va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55)
+ << (uint32_t)21U
+ | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> (uint32_t)11U);
+ abcd[1U] = v54;
+ uint32_t va55 = abcd[0U];
+ uint32_t vb56 = abcd[1U];
+ uint32_t vc56 = abcd[2U];
+ uint32_t vd56 = abcd[3U];
+ uint8_t *b56 = x + (uint32_t)32U;
+ uint32_t u55 = load32_le(b56);
+ uint32_t xk55 = u55;
+ uint32_t ti56 = _t[56U];
+ uint32_t
+ v55 =
+ vb56
+ +
+ ((va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56)
+ << (uint32_t)6U
+ | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> (uint32_t)26U);
+ abcd[0U] = v55;
+ uint32_t va56 = abcd[3U];
+ uint32_t vb57 = abcd[0U];
+ uint32_t vc57 = abcd[1U];
+ uint32_t vd57 = abcd[2U];
+ uint8_t *b57 = x + (uint32_t)60U;
+ uint32_t u56 = load32_le(b57);
+ uint32_t xk56 = u56;
+ uint32_t ti57 = _t[57U];
+ uint32_t
+ v56 =
+ vb57
+ +
+ ((va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57)
+ << (uint32_t)10U
+ | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> (uint32_t)22U);
+ abcd[3U] = v56;
+ uint32_t va57 = abcd[2U];
+ uint32_t vb58 = abcd[3U];
+ uint32_t vc58 = abcd[0U];
+ uint32_t vd58 = abcd[1U];
+ uint8_t *b58 = x + (uint32_t)24U;
+ uint32_t u57 = load32_le(b58);
+ uint32_t xk57 = u57;
+ uint32_t ti58 = _t[58U];
+ uint32_t
+ v57 =
+ vb58
+ +
+ ((va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58)
+ << (uint32_t)15U
+ | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> (uint32_t)17U);
+ abcd[2U] = v57;
+ uint32_t va58 = abcd[1U];
+ uint32_t vb59 = abcd[2U];
+ uint32_t vc59 = abcd[3U];
+ uint32_t vd59 = abcd[0U];
+ uint8_t *b59 = x + (uint32_t)52U;
+ uint32_t u58 = load32_le(b59);
+ uint32_t xk58 = u58;
+ uint32_t ti59 = _t[59U];
+ uint32_t
+ v58 =
+ vb59
+ +
+ ((va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59)
+ << (uint32_t)21U
+ | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> (uint32_t)11U);
+ abcd[1U] = v58;
+ uint32_t va59 = abcd[0U];
+ uint32_t vb60 = abcd[1U];
+ uint32_t vc60 = abcd[2U];
+ uint32_t vd60 = abcd[3U];
+ uint8_t *b60 = x + (uint32_t)16U;
+ uint32_t u59 = load32_le(b60);
+ uint32_t xk59 = u59;
+ uint32_t ti60 = _t[60U];
+ uint32_t
+ v59 =
+ vb60
+ +
+ ((va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60)
+ << (uint32_t)6U
+ | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> (uint32_t)26U);
+ abcd[0U] = v59;
+ uint32_t va60 = abcd[3U];
+ uint32_t vb61 = abcd[0U];
+ uint32_t vc61 = abcd[1U];
+ uint32_t vd61 = abcd[2U];
+ uint8_t *b61 = x + (uint32_t)44U;
+ uint32_t u60 = load32_le(b61);
+ uint32_t xk60 = u60;
+ uint32_t ti61 = _t[61U];
+ uint32_t
+ v60 =
+ vb61
+ +
+ ((va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61)
+ << (uint32_t)10U
+ | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> (uint32_t)22U);
+ abcd[3U] = v60;
+ uint32_t va61 = abcd[2U];
+ uint32_t vb62 = abcd[3U];
+ uint32_t vc62 = abcd[0U];
+ uint32_t vd62 = abcd[1U];
+ uint8_t *b62 = x + (uint32_t)8U;
+ uint32_t u61 = load32_le(b62);
+ uint32_t xk61 = u61;
+ uint32_t ti62 = _t[62U];
+ uint32_t
+ v61 =
+ vb62
+ +
+ ((va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62)
+ << (uint32_t)15U
+ | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> (uint32_t)17U);
+ abcd[2U] = v61;
+ uint32_t va62 = abcd[1U];
+ uint32_t vb = abcd[2U];
+ uint32_t vc = abcd[3U];
+ uint32_t vd = abcd[0U];
+ uint8_t *b63 = x + (uint32_t)36U;
+ uint32_t u62 = load32_le(b63);
+ uint32_t xk62 = u62;
+ uint32_t ti = _t[63U];
+ uint32_t
+ v62 =
+ vb
+ +
+ ((va62 + (vc ^ (vb | ~vd)) + xk62 + ti)
+ << (uint32_t)21U
+ | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> (uint32_t)11U);
+ abcd[1U] = v62;
+ uint32_t a = abcd[0U];
+ uint32_t b = abcd[1U];
+ uint32_t c = abcd[2U];
+ uint32_t d = abcd[3U];
+ abcd[0U] = a + aa;
+ abcd[1U] = b + bb;
+ abcd[2U] = c + cc;
+ abcd[3U] = d + dd;
+}
+
+static void legacy_pad(uint64_t len, uint8_t *dst)
+{
+ uint8_t *dst1 = dst;
+ dst1[0U] = (uint8_t)0x80U;
+ uint8_t *dst2 = dst + (uint32_t)1U;
+ for
+ (uint32_t
+ i = (uint32_t)0U;
+ i
+ < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
+ i++)
+ {
+ dst2[i] = (uint8_t)0U;
+ }
+ uint8_t
+ *dst3 =
+ dst
+ +
+ (uint32_t)1U
+ +
+ ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
+ % (uint32_t)64U;
+ store64_le(dst3, len << (uint32_t)3U);
+}
+
+void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst)
+{
+ KRML_MAYBE_FOR4(i,
+ (uint32_t)0U,
+ (uint32_t)4U,
+ (uint32_t)1U,
+ store32_le(dst + i * (uint32_t)4U, s[i]););
+}
+
+void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+{
+ for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ {
+ uint32_t sz = (uint32_t)64U;
+ uint8_t *block = blocks + sz * i;
+ legacy_update(s, block);
+ }
+}
+
+void
+Hacl_Hash_MD5_legacy_update_last(
+ uint32_t *s,
+ uint64_t prev_len,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ uint32_t blocks_n = input_len / (uint32_t)64U;
+ uint32_t blocks_len = blocks_n * (uint32_t)64U;
+ uint8_t *blocks = input;
+ uint32_t rest_len = input_len - blocks_len;
+ uint8_t *rest = input + blocks_len;
+ Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
+ uint64_t total_input_len = prev_len + (uint64_t)input_len;
+ uint32_t
+ pad_len =
+ (uint32_t)1U
+ +
+ ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
+ % (uint32_t)64U
+ + (uint32_t)8U;
+ uint32_t tmp_len = rest_len + pad_len;
+ uint8_t tmp_twoblocks[128U] = { 0U };
+ uint8_t *tmp = tmp_twoblocks;
+ uint8_t *tmp_rest = tmp;
+ uint8_t *tmp_pad = tmp + rest_len;
+ memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
+ legacy_pad(total_input_len, tmp_pad);
+ Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+}
+
+void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint32_t
+ s[4U] =
+ { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+ uint32_t blocks_n0 = input_len / (uint32_t)64U;
+ uint32_t blocks_n1;
+ if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+ {
+ blocks_n1 = blocks_n0 - (uint32_t)1U;
+ }
+ else
+ {
+ blocks_n1 = blocks_n0;
+ }
+ uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+ uint8_t *blocks0 = input;
+ uint32_t rest_len0 = input_len - blocks_len0;
+ uint8_t *rest0 = input + blocks_len0;
+ uint32_t blocks_n = blocks_n1;
+ uint32_t blocks_len = blocks_len0;
+ uint8_t *blocks = blocks0;
+ uint32_t rest_len = rest_len0;
+ uint8_t *rest = rest0;
+ Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
+ Hacl_Hash_MD5_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+ Hacl_Hash_Core_MD5_legacy_finish(s, dst);
+}
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ Hacl_Hash_Core_MD5_legacy_init(block_state);
+ return p;
+}
+
+void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ Hacl_Hash_Core_MD5_legacy_init(block_state);
+ Hacl_Streaming_MD_state_32
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+{
+ Hacl_Streaming_MD_state_32 s = *p;
+ uint64_t total_len = s.total_len;
+ if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ {
+ return Hacl_Streaming_Types_MaximumLengthExceeded;
+ }
+ uint32_t sz;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ sz = (uint32_t)64U;
+ }
+ else
+ {
+ sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ if (len <= (uint32_t)64U - sz)
+ {
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf + sz1;
+ memcpy(buf2, data, len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)len;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len2
+ }
+ );
+ }
+ else if (sz == (uint32_t)0U)
+ {
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ }
+ uint32_t ite;
+ if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - data1_len;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + data1_len;
+ Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+ uint8_t *dst = buf;
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)len
+ }
+ );
+ }
+ else
+ {
+ uint32_t diff = (uint32_t)64U - sz;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + diff;
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state10 = s1.block_state;
+ uint8_t *buf0 = s1.buf;
+ uint64_t total_len10 = s1.total_len;
+ uint32_t sz10;
+ if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ {
+ sz10 = (uint32_t)64U;
+ }
+ else
+ {
+ sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf0 + sz10;
+ memcpy(buf2, data1, diff * sizeof (uint8_t));
+ uint64_t total_len2 = total_len10 + (uint64_t)diff;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state10,
+ .buf = buf0,
+ .total_len = total_len2
+ }
+ );
+ Hacl_Streaming_MD_state_32 s10 = *p;
+ uint32_t *block_state1 = s10.block_state;
+ uint8_t *buf = s10.buf;
+ uint64_t total_len1 = s10.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ }
+ uint32_t ite;
+ if
+ (
+ (uint64_t)(len - diff)
+ % (uint64_t)(uint32_t)64U
+ == (uint64_t)0U
+ && (uint64_t)(len - diff) > (uint64_t)0U
+ )
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - diff - data1_len;
+ uint8_t *data11 = data2;
+ uint8_t *data21 = data2 + data1_len;
+ Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+ uint8_t *dst = buf;
+ memcpy(dst, data21, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)(len - diff)
+ }
+ );
+ }
+ return Hacl_Streaming_Types_Success;
+}
+
+void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+{
+ Hacl_Streaming_MD_state_32 scrut = *p;
+ uint32_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)64U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint32_t tmp_block_state[4U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t));
+ uint32_t ite;
+ if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = r % (uint32_t)64U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
+ Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst);
+}
+
+void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ KRML_HOST_FREE(block_state);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(s);
+}
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s0;
+ uint32_t *block_state0 = scrut.block_state;
+ uint8_t *buf0 = scrut.buf;
+ uint64_t total_len0 = scrut.total_len;
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, (uint32_t)4U * sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ return p;
+}
+
+void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ Hacl_Hash_MD5_legacy_hash(input, input_len, dst);
+}
+
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h
new file mode 100644
index 0000000000..13c19fd40f
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h
@@ -0,0 +1,65 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_MD5_H
+#define __Hacl_Hash_MD5_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+
+typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_MD5_state;
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void);
+
+void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s);
+
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+
+void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+
+void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s);
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+
+void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_MD5_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c
new file mode 100644
index 0000000000..5ecb3c0b3a
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c
@@ -0,0 +1,508 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_SHA1.h"
+
+static uint32_t
+_h0[5U] =
+ {
+ (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
+ (uint32_t)0xc3d2e1f0U
+ };
+
+void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
+{
+ KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
+}
+
+static void legacy_update(uint32_t *h, uint8_t *l)
+{
+ uint32_t ha = h[0U];
+ uint32_t hb = h[1U];
+ uint32_t hc = h[2U];
+ uint32_t hd = h[3U];
+ uint32_t he = h[4U];
+ uint32_t _w[80U] = { 0U };
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+ {
+ uint32_t v;
+ if (i < (uint32_t)16U)
+ {
+ uint8_t *b = l + i * (uint32_t)4U;
+ uint32_t u = load32_be(b);
+ v = u;
+ }
+ else
+ {
+ uint32_t wmit3 = _w[i - (uint32_t)3U];
+ uint32_t wmit8 = _w[i - (uint32_t)8U];
+ uint32_t wmit14 = _w[i - (uint32_t)14U];
+ uint32_t wmit16 = _w[i - (uint32_t)16U];
+ v =
+ (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
+ << (uint32_t)1U
+ | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
+ }
+ _w[i] = v;
+ }
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+ {
+ uint32_t _a = h[0U];
+ uint32_t _b = h[1U];
+ uint32_t _c = h[2U];
+ uint32_t _d = h[3U];
+ uint32_t _e = h[4U];
+ uint32_t wmit = _w[i];
+ uint32_t ite0;
+ if (i < (uint32_t)20U)
+ {
+ ite0 = (_b & _c) ^ (~_b & _d);
+ }
+ else if ((uint32_t)39U < i && i < (uint32_t)60U)
+ {
+ ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
+ }
+ else
+ {
+ ite0 = _b ^ (_c ^ _d);
+ }
+ uint32_t ite;
+ if (i < (uint32_t)20U)
+ {
+ ite = (uint32_t)0x5a827999U;
+ }
+ else if (i < (uint32_t)40U)
+ {
+ ite = (uint32_t)0x6ed9eba1U;
+ }
+ else if (i < (uint32_t)60U)
+ {
+ ite = (uint32_t)0x8f1bbcdcU;
+ }
+ else
+ {
+ ite = (uint32_t)0xca62c1d6U;
+ }
+ uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
+ h[0U] = _T;
+ h[1U] = _a;
+ h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
+ h[3U] = _c;
+ h[4U] = _d;
+ }
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+ {
+ _w[i] = (uint32_t)0U;
+ }
+ uint32_t sta = h[0U];
+ uint32_t stb = h[1U];
+ uint32_t stc = h[2U];
+ uint32_t std = h[3U];
+ uint32_t ste = h[4U];
+ h[0U] = sta + ha;
+ h[1U] = stb + hb;
+ h[2U] = stc + hc;
+ h[3U] = std + hd;
+ h[4U] = ste + he;
+}
+
+static void legacy_pad(uint64_t len, uint8_t *dst)
+{
+ uint8_t *dst1 = dst;
+ dst1[0U] = (uint8_t)0x80U;
+ uint8_t *dst2 = dst + (uint32_t)1U;
+ for
+ (uint32_t
+ i = (uint32_t)0U;
+ i
+ < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
+ i++)
+ {
+ dst2[i] = (uint8_t)0U;
+ }
+ uint8_t
+ *dst3 =
+ dst
+ +
+ (uint32_t)1U
+ +
+ ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
+ % (uint32_t)64U;
+ store64_be(dst3, len << (uint32_t)3U);
+}
+
+void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
+{
+ KRML_MAYBE_FOR5(i,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)1U,
+ store32_be(dst + i * (uint32_t)4U, s[i]););
+}
+
+void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+{
+ for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ {
+ uint32_t sz = (uint32_t)64U;
+ uint8_t *block = blocks + sz * i;
+ legacy_update(s, block);
+ }
+}
+
+void
+Hacl_Hash_SHA1_legacy_update_last(
+ uint32_t *s,
+ uint64_t prev_len,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ uint32_t blocks_n = input_len / (uint32_t)64U;
+ uint32_t blocks_len = blocks_n * (uint32_t)64U;
+ uint8_t *blocks = input;
+ uint32_t rest_len = input_len - blocks_len;
+ uint8_t *rest = input + blocks_len;
+ Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
+ uint64_t total_input_len = prev_len + (uint64_t)input_len;
+ uint32_t
+ pad_len =
+ (uint32_t)1U
+ +
+ ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
+ % (uint32_t)64U
+ + (uint32_t)8U;
+ uint32_t tmp_len = rest_len + pad_len;
+ uint8_t tmp_twoblocks[128U] = { 0U };
+ uint8_t *tmp = tmp_twoblocks;
+ uint8_t *tmp_rest = tmp;
+ uint8_t *tmp_pad = tmp + rest_len;
+ memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
+ legacy_pad(total_input_len, tmp_pad);
+ Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+}
+
+void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint32_t
+ s[5U] =
+ {
+ (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
+ (uint32_t)0xc3d2e1f0U
+ };
+ uint32_t blocks_n0 = input_len / (uint32_t)64U;
+ uint32_t blocks_n1;
+ if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+ {
+ blocks_n1 = blocks_n0 - (uint32_t)1U;
+ }
+ else
+ {
+ blocks_n1 = blocks_n0;
+ }
+ uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+ uint8_t *blocks0 = input;
+ uint32_t rest_len0 = input_len - blocks_len0;
+ uint8_t *rest0 = input + blocks_len0;
+ uint32_t blocks_n = blocks_n1;
+ uint32_t blocks_len = blocks_len0;
+ uint8_t *blocks = blocks0;
+ uint32_t rest_len = rest_len0;
+ uint8_t *rest = rest0;
+ Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
+ Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+ Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+}
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ Hacl_Hash_Core_SHA1_legacy_init(block_state);
+ return p;
+}
+
+void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ Hacl_Hash_Core_SHA1_legacy_init(block_state);
+ Hacl_Streaming_MD_state_32
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+{
+ Hacl_Streaming_MD_state_32 s = *p;
+ uint64_t total_len = s.total_len;
+ if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ {
+ return Hacl_Streaming_Types_MaximumLengthExceeded;
+ }
+ uint32_t sz;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ sz = (uint32_t)64U;
+ }
+ else
+ {
+ sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ if (len <= (uint32_t)64U - sz)
+ {
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf + sz1;
+ memcpy(buf2, data, len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)len;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len2
+ }
+ );
+ }
+ else if (sz == (uint32_t)0U)
+ {
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ }
+ uint32_t ite;
+ if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - data1_len;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + data1_len;
+ Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+ uint8_t *dst = buf;
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)len
+ }
+ );
+ }
+ else
+ {
+ uint32_t diff = (uint32_t)64U - sz;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + diff;
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state10 = s1.block_state;
+ uint8_t *buf0 = s1.buf;
+ uint64_t total_len10 = s1.total_len;
+ uint32_t sz10;
+ if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ {
+ sz10 = (uint32_t)64U;
+ }
+ else
+ {
+ sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf0 + sz10;
+ memcpy(buf2, data1, diff * sizeof (uint8_t));
+ uint64_t total_len2 = total_len10 + (uint64_t)diff;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state10,
+ .buf = buf0,
+ .total_len = total_len2
+ }
+ );
+ Hacl_Streaming_MD_state_32 s10 = *p;
+ uint32_t *block_state1 = s10.block_state;
+ uint8_t *buf = s10.buf;
+ uint64_t total_len1 = s10.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ }
+ uint32_t ite;
+ if
+ (
+ (uint64_t)(len - diff)
+ % (uint64_t)(uint32_t)64U
+ == (uint64_t)0U
+ && (uint64_t)(len - diff) > (uint64_t)0U
+ )
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - diff - data1_len;
+ uint8_t *data11 = data2;
+ uint8_t *data21 = data2 + data1_len;
+ Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+ uint8_t *dst = buf;
+ memcpy(dst, data21, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)(len - diff)
+ }
+ );
+ }
+ return Hacl_Streaming_Types_Success;
+}
+
+void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+{
+ Hacl_Streaming_MD_state_32 scrut = *p;
+ uint32_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)64U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint32_t tmp_block_state[5U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
+ uint32_t ite;
+ if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = r % (uint32_t)64U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
+ Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
+}
+
+void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ KRML_HOST_FREE(block_state);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(s);
+}
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s0;
+ uint32_t *block_state0 = scrut.block_state;
+ uint8_t *buf0 = scrut.buf;
+ uint64_t total_len0 = scrut.total_len;
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ return p;
+}
+
+void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ Hacl_Hash_SHA1_legacy_hash(input, input_len, dst);
+}
+
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h
new file mode 100644
index 0000000000..dc50aa6f6d
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h
@@ -0,0 +1,65 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_SHA1_H
+#define __Hacl_Hash_SHA1_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+
+typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA1_state;
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void);
+
+void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s);
+
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+
+void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+
+void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s);
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+
+void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_SHA1_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c
new file mode 100644
index 0000000000..08e3f7edbf
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c
@@ -0,0 +1,1345 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_SHA2.h"
+
+
+
+void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash)
+{
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint32_t *os = hash;
+ uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+ os[i] = x;);
+}
+
+static inline void sha256_update(uint8_t *b, uint32_t *hash)
+{
+ uint32_t hash_old[8U] = { 0U };
+ uint32_t ws[16U] = { 0U };
+ memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+ uint8_t *b10 = b;
+ uint32_t u = load32_be(b10);
+ ws[0U] = u;
+ uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+ ws[1U] = u0;
+ uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+ ws[2U] = u1;
+ uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+ ws[3U] = u2;
+ uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+ ws[4U] = u3;
+ uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+ ws[5U] = u4;
+ uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+ ws[6U] = u5;
+ uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+ ws[7U] = u6;
+ uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+ ws[8U] = u7;
+ uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+ ws[9U] = u8;
+ uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+ ws[10U] = u9;
+ uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+ ws[11U] = u10;
+ uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+ ws[12U] = u11;
+ uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+ ws[13U] = u12;
+ uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+ ws[14U] = u13;
+ uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+ ws[15U] = u14;
+ KRML_MAYBE_FOR4(i0,
+ (uint32_t)0U,
+ (uint32_t)4U,
+ (uint32_t)1U,
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+ uint32_t ws_t = ws[i];
+ uint32_t a0 = hash[0U];
+ uint32_t b0 = hash[1U];
+ uint32_t c0 = hash[2U];
+ uint32_t d0 = hash[3U];
+ uint32_t e0 = hash[4U];
+ uint32_t f0 = hash[5U];
+ uint32_t g0 = hash[6U];
+ uint32_t h02 = hash[7U];
+ uint32_t k_e_t = k_t;
+ uint32_t
+ t1 =
+ h02
+ +
+ ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
+ ^
+ ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
+ ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+ + ((e0 & f0) ^ (~e0 & g0))
+ + k_e_t
+ + ws_t;
+ uint32_t
+ t2 =
+ ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
+ ^
+ ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
+ ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+ + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
+ uint32_t a1 = t1 + t2;
+ uint32_t b1 = a0;
+ uint32_t c1 = b0;
+ uint32_t d1 = c0;
+ uint32_t e1 = d0 + t1;
+ uint32_t f1 = e0;
+ uint32_t g1 = f0;
+ uint32_t h12 = g0;
+ hash[0U] = a1;
+ hash[1U] = b1;
+ hash[2U] = c1;
+ hash[3U] = d1;
+ hash[4U] = e1;
+ hash[5U] = f1;
+ hash[6U] = g1;
+ hash[7U] = h12;);
+ if (i0 < (uint32_t)3U)
+ {
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint32_t t16 = ws[i];
+ uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
+ uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
+ uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+ uint32_t
+ s1 =
+ (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
+ ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
+ uint32_t
+ s0 =
+ (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
+ ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+ ws[i] = s1 + t7 + s0 + t16;);
+ });
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint32_t *os = hash;
+ uint32_t x = hash[i] + hash_old[i];
+ os[i] = x;);
+}
+
+void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+{
+ uint32_t blocks = len / (uint32_t)64U;
+ for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+ {
+ uint8_t *b0 = b;
+ uint8_t *mb = b0 + i * (uint32_t)64U;
+ sha256_update(mb, st);
+ }
+}
+
+void
+Hacl_SHA2_Scalar32_sha256_update_last(
+ uint64_t totlen,
+ uint32_t len,
+ uint8_t *b,
+ uint32_t *hash
+)
+{
+ uint32_t blocks;
+ if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+ {
+ blocks = (uint32_t)1U;
+ }
+ else
+ {
+ blocks = (uint32_t)2U;
+ }
+ uint32_t fin = blocks * (uint32_t)64U;
+ uint8_t last[128U] = { 0U };
+ uint8_t totlen_buf[8U] = { 0U };
+ uint64_t total_len_bits = totlen << (uint32_t)3U;
+ store64_be(totlen_buf, total_len_bits);
+ uint8_t *b0 = b;
+ memcpy(last, b0, len * sizeof (uint8_t));
+ last[len] = (uint8_t)0x80U;
+ memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+ uint8_t *last00 = last;
+ uint8_t *last10 = last + (uint32_t)64U;
+ uint8_t *l0 = last00;
+ uint8_t *l1 = last10;
+ uint8_t *lb0 = l0;
+ uint8_t *lb1 = l1;
+ uint8_t *last0 = lb0;
+ uint8_t *last1 = lb1;
+ sha256_update(last0, hash);
+ if (blocks > (uint32_t)1U)
+ {
+ sha256_update(last1, hash);
+ return;
+ }
+}
+
+void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h)
+{
+ uint8_t hbuf[32U] = { 0U };
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ store32_be(hbuf + i * (uint32_t)4U, st[i]););
+ memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+}
+
+void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash)
+{
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint32_t *os = hash;
+ uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+ os[i] = x;);
+}
+
+static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+{
+ Hacl_SHA2_Scalar32_sha256_update_nblocks(len, b, st);
+}
+
+void
+Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
+{
+ Hacl_SHA2_Scalar32_sha256_update_last(totlen, len, b, st);
+}
+
+void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h)
+{
+ uint8_t hbuf[32U] = { 0U };
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ store32_be(hbuf + i * (uint32_t)4U, st[i]););
+ memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+}
+
+void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash)
+{
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint64_t *os = hash;
+ uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+ os[i] = x;);
+}
+
+static inline void sha512_update(uint8_t *b, uint64_t *hash)
+{
+ uint64_t hash_old[8U] = { 0U };
+ uint64_t ws[16U] = { 0U };
+ memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t));
+ uint8_t *b10 = b;
+ uint64_t u = load64_be(b10);
+ ws[0U] = u;
+ uint64_t u0 = load64_be(b10 + (uint32_t)8U);
+ ws[1U] = u0;
+ uint64_t u1 = load64_be(b10 + (uint32_t)16U);
+ ws[2U] = u1;
+ uint64_t u2 = load64_be(b10 + (uint32_t)24U);
+ ws[3U] = u2;
+ uint64_t u3 = load64_be(b10 + (uint32_t)32U);
+ ws[4U] = u3;
+ uint64_t u4 = load64_be(b10 + (uint32_t)40U);
+ ws[5U] = u4;
+ uint64_t u5 = load64_be(b10 + (uint32_t)48U);
+ ws[6U] = u5;
+ uint64_t u6 = load64_be(b10 + (uint32_t)56U);
+ ws[7U] = u6;
+ uint64_t u7 = load64_be(b10 + (uint32_t)64U);
+ ws[8U] = u7;
+ uint64_t u8 = load64_be(b10 + (uint32_t)72U);
+ ws[9U] = u8;
+ uint64_t u9 = load64_be(b10 + (uint32_t)80U);
+ ws[10U] = u9;
+ uint64_t u10 = load64_be(b10 + (uint32_t)88U);
+ ws[11U] = u10;
+ uint64_t u11 = load64_be(b10 + (uint32_t)96U);
+ ws[12U] = u11;
+ uint64_t u12 = load64_be(b10 + (uint32_t)104U);
+ ws[13U] = u12;
+ uint64_t u13 = load64_be(b10 + (uint32_t)112U);
+ ws[14U] = u13;
+ uint64_t u14 = load64_be(b10 + (uint32_t)120U);
+ ws[15U] = u14;
+ KRML_MAYBE_FOR5(i0,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)1U,
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+ uint64_t ws_t = ws[i];
+ uint64_t a0 = hash[0U];
+ uint64_t b0 = hash[1U];
+ uint64_t c0 = hash[2U];
+ uint64_t d0 = hash[3U];
+ uint64_t e0 = hash[4U];
+ uint64_t f0 = hash[5U];
+ uint64_t g0 = hash[6U];
+ uint64_t h02 = hash[7U];
+ uint64_t k_e_t = k_t;
+ uint64_t
+ t1 =
+ h02
+ +
+ ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U)
+ ^
+ ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U)
+ ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U)))
+ + ((e0 & f0) ^ (~e0 & g0))
+ + k_e_t
+ + ws_t;
+ uint64_t
+ t2 =
+ ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U)
+ ^
+ ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U)
+ ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U)))
+ + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
+ uint64_t a1 = t1 + t2;
+ uint64_t b1 = a0;
+ uint64_t c1 = b0;
+ uint64_t d1 = c0;
+ uint64_t e1 = d0 + t1;
+ uint64_t f1 = e0;
+ uint64_t g1 = f0;
+ uint64_t h12 = g0;
+ hash[0U] = a1;
+ hash[1U] = b1;
+ hash[2U] = c1;
+ hash[3U] = d1;
+ hash[4U] = e1;
+ hash[5U] = f1;
+ hash[6U] = g1;
+ hash[7U] = h12;);
+ if (i0 < (uint32_t)4U)
+ {
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint64_t t16 = ws[i];
+ uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
+ uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
+ uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+ uint64_t
+ s1 =
+ (t2 << (uint32_t)45U | t2 >> (uint32_t)19U)
+ ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U);
+ uint64_t
+ s0 =
+ (t15 << (uint32_t)63U | t15 >> (uint32_t)1U)
+ ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U);
+ ws[i] = s1 + t7 + s0 + t16;);
+ });
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint64_t *os = hash;
+ uint64_t x = hash[i] + hash_old[i];
+ os[i] = x;);
+}
+
+void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+{
+ uint32_t blocks = len / (uint32_t)128U;
+ for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+ {
+ uint8_t *b0 = b;
+ uint8_t *mb = b0 + i * (uint32_t)128U;
+ sha512_update(mb, st);
+ }
+}
+
+void
+Hacl_SHA2_Scalar32_sha512_update_last(
+ FStar_UInt128_uint128 totlen,
+ uint32_t len,
+ uint8_t *b,
+ uint64_t *hash
+)
+{
+ uint32_t blocks;
+ if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+ {
+ blocks = (uint32_t)1U;
+ }
+ else
+ {
+ blocks = (uint32_t)2U;
+ }
+ uint32_t fin = blocks * (uint32_t)128U;
+ uint8_t last[256U] = { 0U };
+ uint8_t totlen_buf[16U] = { 0U };
+ FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+ store128_be(totlen_buf, total_len_bits);
+ uint8_t *b0 = b;
+ memcpy(last, b0, len * sizeof (uint8_t));
+ last[len] = (uint8_t)0x80U;
+ memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+ uint8_t *last00 = last;
+ uint8_t *last10 = last + (uint32_t)128U;
+ uint8_t *l0 = last00;
+ uint8_t *l1 = last10;
+ uint8_t *lb0 = l0;
+ uint8_t *lb1 = l1;
+ uint8_t *last0 = lb0;
+ uint8_t *last1 = lb1;
+ sha512_update(last0, hash);
+ if (blocks > (uint32_t)1U)
+ {
+ sha512_update(last1, hash);
+ return;
+ }
+}
+
+void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h)
+{
+ uint8_t hbuf[64U] = { 0U };
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ store64_be(hbuf + i * (uint32_t)8U, st[i]););
+ memcpy(h, hbuf, (uint32_t)64U * sizeof (uint8_t));
+}
+
+void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash)
+{
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint64_t *os = hash;
+ uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+ os[i] = x;);
+}
+
+void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+{
+ Hacl_SHA2_Scalar32_sha512_update_nblocks(len, b, st);
+}
+
+void
+Hacl_SHA2_Scalar32_sha384_update_last(
+ FStar_UInt128_uint128 totlen,
+ uint32_t len,
+ uint8_t *b,
+ uint64_t *st
+)
+{
+ Hacl_SHA2_Scalar32_sha512_update_last(totlen, len, b, st);
+}
+
+void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h)
+{
+ uint8_t hbuf[64U] = { 0U };
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ store64_be(hbuf + i * (uint32_t)8U, st[i]););
+ memcpy(h, hbuf, (uint32_t)48U * sizeof (uint8_t));
+}
+
+/**
+Allocate initial state for the SHA2_256 hash. The state is to be freed by
+calling `free_256`.
+*/
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ Hacl_SHA2_Scalar32_sha256_init(block_state);
+ return p;
+}
+
+/**
+Copies the state passed as argument into a newly allocated state (deep copy).
+The state is to be freed by calling `free_256`. Cloning the state this way is
+useful, for instance, if your control-flow diverges and you need to feed
+more (different) data into the hash in each branch.
+*/
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s0;
+ uint32_t *block_state0 = scrut.block_state;
+ uint8_t *buf0 = scrut.buf;
+ uint64_t total_len0 = scrut.total_len;
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ return p;
+}
+
+/**
+Reset an existing state to the initial hash state with empty data.
+*/
+void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ Hacl_SHA2_Scalar32_sha256_init(block_state);
+ Hacl_Streaming_MD_state_32
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+static inline Hacl_Streaming_Types_error_code
+update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+{
+ Hacl_Streaming_MD_state_32 s = *p;
+ uint64_t total_len = s.total_len;
+ if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ {
+ return Hacl_Streaming_Types_MaximumLengthExceeded;
+ }
+ uint32_t sz;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ sz = (uint32_t)64U;
+ }
+ else
+ {
+ sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ if (len <= (uint32_t)64U - sz)
+ {
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf + sz1;
+ memcpy(buf2, data, len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)len;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len2
+ }
+ );
+ }
+ else if (sz == (uint32_t)0U)
+ {
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+ }
+ uint32_t ite;
+ if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - data1_len;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + data1_len;
+ Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
+ data1,
+ block_state1);
+ uint8_t *dst = buf;
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)len
+ }
+ );
+ }
+ else
+ {
+ uint32_t diff = (uint32_t)64U - sz;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + diff;
+ Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t *block_state10 = s1.block_state;
+ uint8_t *buf0 = s1.buf;
+ uint64_t total_len10 = s1.total_len;
+ uint32_t sz10;
+ if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ {
+ sz10 = (uint32_t)64U;
+ }
+ else
+ {
+ sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf0 + sz10;
+ memcpy(buf2, data1, diff * sizeof (uint8_t));
+ uint64_t total_len2 = total_len10 + (uint64_t)diff;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state10,
+ .buf = buf0,
+ .total_len = total_len2
+ }
+ );
+ Hacl_Streaming_MD_state_32 s10 = *p;
+ uint32_t *block_state1 = s10.block_state;
+ uint8_t *buf = s10.buf;
+ uint64_t total_len1 = s10.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+ }
+ uint32_t ite;
+ if
+ (
+ (uint64_t)(len - diff)
+ % (uint64_t)(uint32_t)64U
+ == (uint64_t)0U
+ && (uint64_t)(len - diff) > (uint64_t)0U
+ )
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - diff - data1_len;
+ uint8_t *data11 = data2;
+ uint8_t *data21 = data2 + data1_len;
+ Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
+ data11,
+ block_state1);
+ uint8_t *dst = buf;
+ memcpy(dst, data21, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_32){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)(len - diff)
+ }
+ );
+ }
+ return Hacl_Streaming_Types_Success;
+}
+
+/**
+Feed an arbitrary amount of data into the hash. This function returns 0 for
+success, or 1 if the combined length of all of the data passed to `update_256`
+(since the last call to `init_256`) exceeds 2^61-1 bytes.
+
+This function is identical to the update function for SHA2_224.
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_256(
+ Hacl_Streaming_MD_state_32 *p,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ return update_224_256(p, input, input_len);
+}
+
+/**
+Write the resulting hash into `dst`, an array of 32 bytes. The state remains
+valid after a call to `finish_256`, meaning the user may feed more data into
+the hash via `update_256`. (The finish_256 function operates on an internal copy of
+the state and therefore does not invalidate the client-held state `p`.)
+*/
+void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+{
+ Hacl_Streaming_MD_state_32 scrut = *p;
+ uint32_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)64U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint32_t tmp_block_state[8U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+ uint32_t ite;
+ if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = r % (uint32_t)64U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r,
+ r,
+ buf_last,
+ tmp_block_state);
+ Hacl_SHA2_Scalar32_sha256_finish(tmp_block_state, dst);
+}
+
+/**
+Free a state allocated with `create_in_256`.
+
+This function is identical to the free function for SHA2_224.
+*/
+void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ KRML_HOST_FREE(block_state);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(s);
+}
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint8_t *ib = input;
+ uint8_t *rb = dst;
+ uint32_t st[8U] = { 0U };
+ Hacl_SHA2_Scalar32_sha256_init(st);
+ uint32_t rem = input_len % (uint32_t)64U;
+ uint64_t len_ = (uint64_t)input_len;
+ Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % (uint32_t)64U;
+ uint8_t *b0 = ib;
+ uint8_t *lb = b0 + input_len - rem1;
+ Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st);
+ Hacl_SHA2_Scalar32_sha256_finish(st, rb);
+}
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ Hacl_Streaming_MD_state_32
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_MD_state_32
+ *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
+ p[0U] = s;
+ Hacl_SHA2_Scalar32_sha224_init(block_state);
+ return p;
+}
+
+void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s)
+{
+ Hacl_Streaming_MD_state_32 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ Hacl_SHA2_Scalar32_sha224_init(block_state);
+ Hacl_Streaming_MD_state_32
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_224(
+ Hacl_Streaming_MD_state_32 *p,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ return update_224_256(p, input, input_len);
+}
+
+/**
+Write the resulting hash into `dst`, an array of 28 bytes. The state remains
+valid after a call to `finish_224`, meaning the user may feed more data into
+the hash via `update_224`.
+*/
+void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+{
+ Hacl_Streaming_MD_state_32 scrut = *p;
+ uint32_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)64U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint32_t tmp_block_state[8U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+ uint32_t ite;
+ if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = r % (uint32_t)64U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r,
+ r,
+ buf_last,
+ tmp_block_state);
+ Hacl_SHA2_Scalar32_sha224_finish(tmp_block_state, dst);
+}
+
+void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p)
+{
+ Hacl_Streaming_SHA2_free_256(p);
+}
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint8_t *ib = input;
+ uint8_t *rb = dst;
+ uint32_t st[8U] = { 0U };
+ Hacl_SHA2_Scalar32_sha224_init(st);
+ uint32_t rem = input_len % (uint32_t)64U;
+ uint64_t len_ = (uint64_t)input_len;
+ sha224_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % (uint32_t)64U;
+ uint8_t *b0 = ib;
+ uint8_t *lb = b0 + input_len - rem1;
+ Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st);
+ Hacl_SHA2_Scalar32_sha224_finish(st, rb);
+}
+
+Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
+ uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+ Hacl_Streaming_MD_state_64
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_MD_state_64
+ *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
+ p[0U] = s;
+ Hacl_SHA2_Scalar32_sha512_init(block_state);
+ return p;
+}
+
+/**
+Copies the state passed as argument into a newly allocated state (deep copy).
+The state is to be freed by calling `free_512`. Cloning the state this way is
+useful, for instance, if your control-flow diverges and you need to feed
+more (different) data into the hash in each branch.
+*/
+Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0)
+{
+ Hacl_Streaming_MD_state_64 scrut = *s0;
+ uint64_t *block_state0 = scrut.block_state;
+ uint8_t *buf0 = scrut.buf;
+ uint64_t total_len0 = scrut.total_len;
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
+ memcpy(buf, buf0, (uint32_t)128U * sizeof (uint8_t));
+ uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+ memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint64_t));
+ Hacl_Streaming_MD_state_64
+ s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
+ Hacl_Streaming_MD_state_64
+ *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
+ p[0U] = s;
+ return p;
+}
+
+void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s)
+{
+ Hacl_Streaming_MD_state_64 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint64_t *block_state = scrut.block_state;
+ Hacl_SHA2_Scalar32_sha512_init(block_state);
+ Hacl_Streaming_MD_state_64
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+static inline Hacl_Streaming_Types_error_code
+update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
+{
+ Hacl_Streaming_MD_state_64 s = *p;
+ uint64_t total_len = s.total_len;
+ if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len)
+ {
+ return Hacl_Streaming_Types_MaximumLengthExceeded;
+ }
+ uint32_t sz;
+ if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ sz = (uint32_t)128U;
+ }
+ else
+ {
+ sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+ }
+ if (len <= (uint32_t)128U - sz)
+ {
+ Hacl_Streaming_MD_state_64 s1 = *p;
+ uint64_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)128U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+ }
+ uint8_t *buf2 = buf + sz1;
+ memcpy(buf2, data, len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)len;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_64){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len2
+ }
+ );
+ }
+ else if (sz == (uint32_t)0U)
+ {
+ Hacl_Streaming_MD_state_64 s1 = *p;
+ uint64_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)128U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+ }
+ uint32_t ite;
+ if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ {
+ ite = (uint32_t)128U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+ }
+ uint32_t n_blocks = (len - ite) / (uint32_t)128U;
+ uint32_t data1_len = n_blocks * (uint32_t)128U;
+ uint32_t data2_len = len - data1_len;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + data1_len;
+ Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
+ data1,
+ block_state1);
+ uint8_t *dst = buf;
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_64){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)len
+ }
+ );
+ }
+ else
+ {
+ uint32_t diff = (uint32_t)128U - sz;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + diff;
+ Hacl_Streaming_MD_state_64 s1 = *p;
+ uint64_t *block_state10 = s1.block_state;
+ uint8_t *buf0 = s1.buf;
+ uint64_t total_len10 = s1.total_len;
+ uint32_t sz10;
+ if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ {
+ sz10 = (uint32_t)128U;
+ }
+ else
+ {
+ sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+ }
+ uint8_t *buf2 = buf0 + sz10;
+ memcpy(buf2, data1, diff * sizeof (uint8_t));
+ uint64_t total_len2 = total_len10 + (uint64_t)diff;
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_64){
+ .block_state = block_state10,
+ .buf = buf0,
+ .total_len = total_len2
+ }
+ );
+ Hacl_Streaming_MD_state_64 s10 = *p;
+ uint64_t *block_state1 = s10.block_state;
+ uint8_t *buf = s10.buf;
+ uint64_t total_len1 = s10.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)128U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+ }
+ uint32_t ite;
+ if
+ (
+ (uint64_t)(len - diff)
+ % (uint64_t)(uint32_t)128U
+ == (uint64_t)0U
+ && (uint64_t)(len - diff) > (uint64_t)0U
+ )
+ {
+ ite = (uint32_t)128U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+ }
+ uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
+ uint32_t data1_len = n_blocks * (uint32_t)128U;
+ uint32_t data2_len = len - diff - data1_len;
+ uint8_t *data11 = data2;
+ uint8_t *data21 = data2 + data1_len;
+ Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
+ data11,
+ block_state1);
+ uint8_t *dst = buf;
+ memcpy(dst, data21, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_MD_state_64){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)(len - diff)
+ }
+ );
+ }
+ return Hacl_Streaming_Types_Success;
+}
+
+/**
+Feed an arbitrary amount of data into the hash. This function returns 0 for
+success, or 1 if the combined length of all of the data passed to `update_512`
+(since the last call to `init_512`) exceeds 2^125-1 bytes.
+
+This function is identical to the update function for SHA2_384.
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_512(
+ Hacl_Streaming_MD_state_64 *p,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ return update_384_512(p, input, input_len);
+}
+
+/**
+Write the resulting hash into `dst`, an array of 64 bytes. The state remains
+valid after a call to `finish_512`, meaning the user may feed more data into
+the hash via `update_512`. (The finish_512 function operates on an internal copy of
+the state and therefore does not invalidate the client-held state `p`.)
+*/
+void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+{
+ Hacl_Streaming_MD_state_64 scrut = *p;
+ uint64_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)128U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint64_t tmp_block_state[8U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+ uint32_t ite;
+ if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)128U;
+ }
+ else
+ {
+ ite = r % (uint32_t)128U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+ FStar_UInt128_uint64_to_uint128((uint64_t)r)),
+ r,
+ buf_last,
+ tmp_block_state);
+ Hacl_SHA2_Scalar32_sha512_finish(tmp_block_state, dst);
+}
+
+/**
+Free a state allocated with `create_in_512`.
+
+This function is identical to the free function for SHA2_384.
+*/
+void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s)
+{
+ Hacl_Streaming_MD_state_64 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint64_t *block_state = scrut.block_state;
+ KRML_HOST_FREE(block_state);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(s);
+}
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint8_t *ib = input;
+ uint8_t *rb = dst;
+ uint64_t st[8U] = { 0U };
+ Hacl_SHA2_Scalar32_sha512_init(st);
+ uint32_t rem = input_len % (uint32_t)128U;
+ FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
+ Hacl_SHA2_Scalar32_sha512_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % (uint32_t)128U;
+ uint8_t *b0 = ib;
+ uint8_t *lb = b0 + input_len - rem1;
+ Hacl_SHA2_Scalar32_sha512_update_last(len_, rem, lb, st);
+ Hacl_SHA2_Scalar32_sha512_finish(st, rb);
+}
+
+Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
+ uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+ Hacl_Streaming_MD_state_64
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_MD_state_64
+ *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
+ p[0U] = s;
+ Hacl_SHA2_Scalar32_sha384_init(block_state);
+ return p;
+}
+
+void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s)
+{
+ Hacl_Streaming_MD_state_64 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint64_t *block_state = scrut.block_state;
+ Hacl_SHA2_Scalar32_sha384_init(block_state);
+ Hacl_Streaming_MD_state_64
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_384(
+ Hacl_Streaming_MD_state_64 *p,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ return update_384_512(p, input, input_len);
+}
+
+/**
+Write the resulting hash into `dst`, an array of 48 bytes. The state remains
+valid after a call to `finish_384`, meaning the user may feed more data into
+the hash via `update_384`.
+*/
+void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+{
+ Hacl_Streaming_MD_state_64 scrut = *p;
+ uint64_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)128U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint64_t tmp_block_state[8U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+ uint32_t ite;
+ if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)128U;
+ }
+ else
+ {
+ ite = r % (uint32_t)128U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+ FStar_UInt128_uint64_to_uint128((uint64_t)r)),
+ r,
+ buf_last,
+ tmp_block_state);
+ Hacl_SHA2_Scalar32_sha384_finish(tmp_block_state, dst);
+}
+
+void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p)
+{
+ Hacl_Streaming_SHA2_free_512(p);
+}
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint8_t *ib = input;
+ uint8_t *rb = dst;
+ uint64_t st[8U] = { 0U };
+ Hacl_SHA2_Scalar32_sha384_init(st);
+ uint32_t rem = input_len % (uint32_t)128U;
+ FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
+ Hacl_SHA2_Scalar32_sha384_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % (uint32_t)128U;
+ uint8_t *b0 = ib;
+ uint8_t *lb = b0 + input_len - rem1;
+ Hacl_SHA2_Scalar32_sha384_update_last(len_, rem, lb, st);
+ Hacl_SHA2_Scalar32_sha384_finish(st, rb);
+}
+
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h
new file mode 100644
index 0000000000..a0e731094d
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h
@@ -0,0 +1,204 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_SHA2_H
+#define __Hacl_Hash_SHA2_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "python_hacl_namespaces.h"
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+
+
+typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_224;
+
+typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_256;
+
+typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_384;
+
+typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_512;
+
+/**
+Allocate initial state for the SHA2_256 hash. The state is to be freed by
+calling `free_256`.
+*/
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void);
+
+/**
+Copies the state passed as argument into a newly allocated state (deep copy).
+The state is to be freed by calling `free_256`. Cloning the state this way is
+useful, for instance, if your control-flow diverges and you need to feed
+more (different) data into the hash in each branch.
+*/
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0);
+
+/**
+Reset an existing state to the initial hash state with empty data.
+*/
+void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s);
+
+/**
+Feed an arbitrary amount of data into the hash. This function returns 0 for
+success, or 1 if the combined length of all of the data passed to `update_256`
+(since the last call to `init_256`) exceeds 2^61-1 bytes.
+
+This function is identical to the update function for SHA2_224.
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_256(
+ Hacl_Streaming_MD_state_32 *p,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+/**
+Write the resulting hash into `dst`, an array of 32 bytes. The state remains
+valid after a call to `finish_256`, meaning the user may feed more data into
+the hash via `update_256`. (The finish_256 function operates on an internal copy of
+the state and therefore does not invalidate the client-held state `p`.)
+*/
+void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+
+/**
+Free a state allocated with `create_in_256`.
+
+This function is identical to the free function for SHA2_224.
+*/
+void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s);
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void);
+
+void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s);
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_224(
+ Hacl_Streaming_MD_state_32 *p,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+/**
+Write the resulting hash into `dst`, an array of 28 bytes. The state remains
+valid after a call to `finish_224`, meaning the user may feed more data into
+the hash via `update_224`.
+*/
+void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+
+void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p);
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void);
+
+/**
+Copies the state passed as argument into a newly allocated state (deep copy).
+The state is to be freed by calling `free_512`. Cloning the state this way is
+useful, for instance, if your control-flow diverges and you need to feed
+more (different) data into the hash in each branch.
+*/
+Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0);
+
+void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s);
+
+/**
+Feed an arbitrary amount of data into the hash. This function returns 0 for
+success, or 1 if the combined length of all of the data passed to `update_512`
+(since the last call to `init_512`) exceeds 2^125-1 bytes.
+
+This function is identical to the update function for SHA2_384.
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_512(
+ Hacl_Streaming_MD_state_64 *p,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+/**
+Write the resulting hash into `dst`, an array of 64 bytes. The state remains
+valid after a call to `finish_512`, meaning the user may feed more data into
+the hash via `update_512`. (The finish_512 function operates on an internal copy of
+the state and therefore does not invalidate the client-held state `p`.)
+*/
+void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+
+/**
+Free a state allocated with `create_in_512`.
+
+This function is identical to the free function for SHA2_384.
+*/
+void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s);
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void);
+
+void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s);
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_SHA2_update_384(
+ Hacl_Streaming_MD_state_64 *p,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+/**
+Write the resulting hash into `dst`, an array of 48 bytes. The state remains
+valid after a call to `finish_384`, meaning the user may feed more data into
+the hash via `update_384`.
+*/
+void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+
+void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p);
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+*/
+void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_SHA2_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c
new file mode 100644
index 0000000000..b3febdfeb2
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c
@@ -0,0 +1,824 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_SHA3.h"
+
+static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
+{
+ switch (a)
+ {
+ case Spec_Hash_Definitions_SHA3_224:
+ {
+ return (uint32_t)144U;
+ }
+ case Spec_Hash_Definitions_SHA3_256:
+ {
+ return (uint32_t)136U;
+ }
+ case Spec_Hash_Definitions_SHA3_384:
+ {
+ return (uint32_t)104U;
+ }
+ case Spec_Hash_Definitions_SHA3_512:
+ {
+ return (uint32_t)72U;
+ }
+ case Spec_Hash_Definitions_Shake128:
+ {
+ return (uint32_t)168U;
+ }
+ case Spec_Hash_Definitions_Shake256:
+ {
+ return (uint32_t)136U;
+ }
+ default:
+ {
+ KRML_HOST_EPRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__);
+ KRML_HOST_EXIT(253U);
+ }
+ }
+}
+
+static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
+{
+ switch (a)
+ {
+ case Spec_Hash_Definitions_SHA3_224:
+ {
+ return (uint32_t)28U;
+ }
+ case Spec_Hash_Definitions_SHA3_256:
+ {
+ return (uint32_t)32U;
+ }
+ case Spec_Hash_Definitions_SHA3_384:
+ {
+ return (uint32_t)48U;
+ }
+ case Spec_Hash_Definitions_SHA3_512:
+ {
+ return (uint32_t)64U;
+ }
+ default:
+ {
+ KRML_HOST_EPRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__);
+ KRML_HOST_EXIT(253U);
+ }
+ }
+}
+
+void
+Hacl_Hash_SHA3_update_multi_sha3(
+ Spec_Hash_Definitions_hash_alg a,
+ uint64_t *s,
+ uint8_t *blocks,
+ uint32_t n_blocks
+)
+{
+ for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ {
+ uint8_t *block = blocks + i * block_len(a);
+ Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s);
+ }
+}
+
+void
+Hacl_Hash_SHA3_update_last_sha3(
+ Spec_Hash_Definitions_hash_alg a,
+ uint64_t *s,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ uint8_t suffix;
+ if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256)
+ {
+ suffix = (uint8_t)0x1fU;
+ }
+ else
+ {
+ suffix = (uint8_t)0x06U;
+ }
+ uint32_t len = block_len(a);
+ if (input_len == len)
+ {
+ Hacl_Impl_SHA3_absorb_inner(len, input, s);
+ uint8_t *uu____0 = input + input_len;
+ uint8_t lastBlock_[200U] = { 0U };
+ uint8_t *lastBlock = lastBlock_;
+ memcpy(lastBlock, uu____0, (uint32_t)0U * sizeof (uint8_t));
+ lastBlock[0U] = suffix;
+ Hacl_Impl_SHA3_loadState(len, lastBlock, s);
+ if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U)
+ {
+ Hacl_Impl_SHA3_state_permute(s);
+ }
+ uint8_t nextBlock_[200U] = { 0U };
+ uint8_t *nextBlock = nextBlock_;
+ nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
+ Hacl_Impl_SHA3_loadState(len, nextBlock, s);
+ Hacl_Impl_SHA3_state_permute(s);
+ return;
+ }
+ uint8_t lastBlock_[200U] = { 0U };
+ uint8_t *lastBlock = lastBlock_;
+ memcpy(lastBlock, input, input_len * sizeof (uint8_t));
+ lastBlock[input_len] = suffix;
+ Hacl_Impl_SHA3_loadState(len, lastBlock, s);
+ if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && input_len == len - (uint32_t)1U)
+ {
+ Hacl_Impl_SHA3_state_permute(s);
+ }
+ uint8_t nextBlock_[200U] = { 0U };
+ uint8_t *nextBlock = nextBlock_;
+ nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
+ Hacl_Impl_SHA3_loadState(len, nextBlock, s);
+ Hacl_Impl_SHA3_state_permute(s);
+}
+
+typedef struct hash_buf2_s
+{
+ Hacl_Streaming_Keccak_hash_buf fst;
+ Hacl_Streaming_Keccak_hash_buf snd;
+}
+hash_buf2;
+
+Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s)
+{
+ Hacl_Streaming_Keccak_state scrut = *s;
+ Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
+ return block_state.fst;
+}
+
+Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a)
+{
+ KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
+ uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
+ uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+ Hacl_Streaming_Keccak_hash_buf block_state = { .fst = a, .snd = buf };
+ Hacl_Streaming_Keccak_state
+ s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_Keccak_state
+ *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+ p[0U] = s;
+ uint64_t *s1 = block_state.snd;
+ memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+ return p;
+}
+
+void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s)
+{
+ Hacl_Streaming_Keccak_state scrut = *s;
+ uint8_t *buf = scrut.buf;
+ Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
+ uint64_t *s1 = block_state.snd;
+ KRML_HOST_FREE(s1);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(s);
+}
+
+Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0)
+{
+ Hacl_Streaming_Keccak_state scrut0 = *s0;
+ Hacl_Streaming_Keccak_hash_buf block_state0 = scrut0.block_state;
+ uint8_t *buf0 = scrut0.buf;
+ uint64_t total_len0 = scrut0.total_len;
+ Spec_Hash_Definitions_hash_alg i = block_state0.fst;
+ KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i));
+ uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t));
+ memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t));
+ uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+ Hacl_Streaming_Keccak_hash_buf block_state = { .fst = i, .snd = buf };
+ hash_buf2 scrut = { .fst = block_state0, .snd = block_state };
+ uint64_t *s_dst = scrut.snd.snd;
+ uint64_t *s_src = scrut.fst.snd;
+ memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+ Hacl_Streaming_Keccak_state
+ s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 };
+ Hacl_Streaming_Keccak_state
+ *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+ p[0U] = s;
+ return p;
+}
+
+void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s)
+{
+ Hacl_Streaming_Keccak_state scrut = *s;
+ uint8_t *buf = scrut.buf;
+ Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
+ uint64_t *s1 = block_state.snd;
+ memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+ Hacl_Streaming_Keccak_state
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len)
+{
+ Hacl_Streaming_Keccak_state s = *p;
+ Hacl_Streaming_Keccak_hash_buf block_state = s.block_state;
+ uint64_t total_len = s.total_len;
+ Spec_Hash_Definitions_hash_alg i = block_state.fst;
+ if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len)
+ {
+ return Hacl_Streaming_Types_MaximumLengthExceeded;
+ }
+ uint32_t sz;
+ if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ sz = block_len(i);
+ }
+ else
+ {
+ sz = (uint32_t)(total_len % (uint64_t)block_len(i));
+ }
+ if (len <= block_len(i) - sz)
+ {
+ Hacl_Streaming_Keccak_state s1 = *p;
+ Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = block_len(i);
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
+ }
+ uint8_t *buf2 = buf + sz1;
+ memcpy(buf2, data, len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)len;
+ *p
+ =
+ (
+ (Hacl_Streaming_Keccak_state){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len2
+ }
+ );
+ }
+ else if (sz == (uint32_t)0U)
+ {
+ Hacl_Streaming_Keccak_state s1 = *p;
+ Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = block_len(i);
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
+ uint64_t *s2 = block_state1.snd;
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
+ }
+ uint32_t ite;
+ if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ {
+ ite = block_len(i);
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)len % (uint64_t)block_len(i));
+ }
+ uint32_t n_blocks = (len - ite) / block_len(i);
+ uint32_t data1_len = n_blocks * block_len(i);
+ uint32_t data2_len = len - data1_len;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + data1_len;
+ Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
+ uint64_t *s2 = block_state1.snd;
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
+ uint8_t *dst = buf;
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_Keccak_state){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)len
+ }
+ );
+ }
+ else
+ {
+ uint32_t diff = block_len(i) - sz;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + diff;
+ Hacl_Streaming_Keccak_state s1 = *p;
+ Hacl_Streaming_Keccak_hash_buf block_state10 = s1.block_state;
+ uint8_t *buf0 = s1.buf;
+ uint64_t total_len10 = s1.total_len;
+ uint32_t sz10;
+ if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ {
+ sz10 = block_len(i);
+ }
+ else
+ {
+ sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i));
+ }
+ uint8_t *buf2 = buf0 + sz10;
+ memcpy(buf2, data1, diff * sizeof (uint8_t));
+ uint64_t total_len2 = total_len10 + (uint64_t)diff;
+ *p
+ =
+ (
+ (Hacl_Streaming_Keccak_state){
+ .block_state = block_state10,
+ .buf = buf0,
+ .total_len = total_len2
+ }
+ );
+ Hacl_Streaming_Keccak_state s10 = *p;
+ Hacl_Streaming_Keccak_hash_buf block_state1 = s10.block_state;
+ uint8_t *buf = s10.buf;
+ uint64_t total_len1 = s10.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = block_len(i);
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
+ uint64_t *s2 = block_state1.snd;
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
+ }
+ uint32_t ite;
+ if
+ (
+ (uint64_t)(len - diff)
+ % (uint64_t)block_len(i)
+ == (uint64_t)0U
+ && (uint64_t)(len - diff) > (uint64_t)0U
+ )
+ {
+ ite = block_len(i);
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)block_len(i));
+ }
+ uint32_t n_blocks = (len - diff - ite) / block_len(i);
+ uint32_t data1_len = n_blocks * block_len(i);
+ uint32_t data2_len = len - diff - data1_len;
+ uint8_t *data11 = data2;
+ uint8_t *data21 = data2 + data1_len;
+ Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
+ uint64_t *s2 = block_state1.snd;
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data11, data1_len / block_len(a1));
+ uint8_t *dst = buf;
+ memcpy(dst, data21, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_Keccak_state){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)(len - diff)
+ }
+ );
+ }
+ return Hacl_Streaming_Types_Success;
+}
+
+static void
+finish_(
+ Spec_Hash_Definitions_hash_alg a,
+ Hacl_Streaming_Keccak_state *p,
+ uint8_t *dst,
+ uint32_t l
+)
+{
+ Hacl_Streaming_Keccak_state scrut0 = *p;
+ Hacl_Streaming_Keccak_hash_buf block_state = scrut0.block_state;
+ uint8_t *buf_ = scrut0.buf;
+ uint64_t total_len = scrut0.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = block_len(a);
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)block_len(a));
+ }
+ uint8_t *buf_1 = buf_;
+ uint64_t buf[25U] = { 0U };
+ Hacl_Streaming_Keccak_hash_buf tmp_block_state = { .fst = a, .snd = buf };
+ hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state };
+ uint64_t *s_dst = scrut.snd.snd;
+ uint64_t *s_src = scrut.fst.snd;
+ memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+ uint32_t ite0;
+ if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite0 = block_len(a);
+ }
+ else
+ {
+ ite0 = r % block_len(a);
+ }
+ uint8_t *buf_last = buf_1 + r - ite0;
+ uint8_t *buf_multi = buf_1;
+ Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst;
+ uint64_t *s0 = tmp_block_state.snd;
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, (uint32_t)0U / block_len(a1));
+ Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst;
+ uint64_t *s1 = tmp_block_state.snd;
+ Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r);
+ Spec_Hash_Definitions_hash_alg a11 = tmp_block_state.fst;
+ uint64_t *s = tmp_block_state.snd;
+ if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256)
+ {
+ uint32_t ite;
+ if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256)
+ {
+ ite = l;
+ }
+ else
+ {
+ ite = hash_len(a11);
+ }
+ Hacl_Impl_SHA3_squeeze(s, block_len(a11), ite, dst);
+ return;
+ }
+ Hacl_Impl_SHA3_squeeze(s, block_len(a11), hash_len(a11), dst);
+}
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst)
+{
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ if (a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256)
+ {
+ return Hacl_Streaming_Types_InvalidAlgorithm;
+ }
+ finish_(a1, s, dst, hash_len(a1));
+ return Hacl_Streaming_Types_Success;
+}
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l)
+{
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ if (!(a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256))
+ {
+ return Hacl_Streaming_Types_InvalidAlgorithm;
+ }
+ if (l == (uint32_t)0U)
+ {
+ return Hacl_Streaming_Types_InvalidLength;
+ }
+ finish_(a1, s, dst, l);
+ return Hacl_Streaming_Types_Success;
+}
+
+uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s)
+{
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ return block_len(a1);
+}
+
+uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s)
+{
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ return hash_len(a1);
+}
+
+bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s)
+{
+ Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Streaming_Keccak_get_alg(s);
+ return uu____0 == Spec_Hash_Definitions_Shake128 || uu____0 == Spec_Hash_Definitions_Shake256;
+}
+
+void
+Hacl_SHA3_shake128_hacl(
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint32_t outputByteLen,
+ uint8_t *output
+)
+{
+ Hacl_Impl_SHA3_keccak((uint32_t)1344U,
+ (uint32_t)256U,
+ inputByteLen,
+ input,
+ (uint8_t)0x1FU,
+ outputByteLen,
+ output);
+}
+
+void
+Hacl_SHA3_shake256_hacl(
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint32_t outputByteLen,
+ uint8_t *output
+)
+{
+ Hacl_Impl_SHA3_keccak((uint32_t)1088U,
+ (uint32_t)512U,
+ inputByteLen,
+ input,
+ (uint8_t)0x1FU,
+ outputByteLen,
+ output);
+}
+
+void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+{
+ Hacl_Impl_SHA3_keccak((uint32_t)1152U,
+ (uint32_t)448U,
+ inputByteLen,
+ input,
+ (uint8_t)0x06U,
+ (uint32_t)28U,
+ output);
+}
+
+void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+{
+ Hacl_Impl_SHA3_keccak((uint32_t)1088U,
+ (uint32_t)512U,
+ inputByteLen,
+ input,
+ (uint8_t)0x06U,
+ (uint32_t)32U,
+ output);
+}
+
+void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+{
+ Hacl_Impl_SHA3_keccak((uint32_t)832U,
+ (uint32_t)768U,
+ inputByteLen,
+ input,
+ (uint8_t)0x06U,
+ (uint32_t)48U,
+ output);
+}
+
+void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+{
+ Hacl_Impl_SHA3_keccak((uint32_t)576U,
+ (uint32_t)1024U,
+ inputByteLen,
+ input,
+ (uint8_t)0x06U,
+ (uint32_t)64U,
+ output);
+}
+
+static const
+uint32_t
+keccak_rotc[24U] =
+ {
+ (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U,
+ (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U,
+ (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U,
+ (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U
+ };
+
+static const
+uint32_t
+keccak_piln[24U] =
+ {
+ (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U,
+ (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U,
+ (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U,
+ (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U
+ };
+
+static const
+uint64_t
+keccak_rndc[24U] =
+ {
+ (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU,
+ (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U,
+ (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU,
+ (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU,
+ (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U,
+ (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U,
+ (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U,
+ (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U
+ };
+
+void Hacl_Impl_SHA3_state_permute(uint64_t *s)
+{
+ for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++)
+ {
+ uint64_t _C[5U] = { 0U };
+ KRML_MAYBE_FOR5(i,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)1U,
+ _C[i] =
+ s[i
+ + (uint32_t)0U]
+ ^
+ (s[i
+ + (uint32_t)5U]
+ ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U]))););
+ KRML_MAYBE_FOR5(i1,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)1U,
+ uint64_t uu____0 = _C[(i1 + (uint32_t)1U) % (uint32_t)5U];
+ uint64_t
+ _D =
+ _C[(i1 + (uint32_t)4U)
+ % (uint32_t)5U]
+ ^ (uu____0 << (uint32_t)1U | uu____0 >> (uint32_t)63U);
+ KRML_MAYBE_FOR5(i,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)1U,
+ s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;););
+ uint64_t x = s[1U];
+ uint64_t current = x;
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+ {
+ uint32_t _Y = keccak_piln[i];
+ uint32_t r = keccak_rotc[i];
+ uint64_t temp = s[_Y];
+ uint64_t uu____1 = current;
+ s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r);
+ current = temp;
+ }
+ KRML_MAYBE_FOR5(i,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)1U,
+ uint64_t
+ v0 =
+ s[(uint32_t)0U
+ + (uint32_t)5U * i]
+ ^ (~s[(uint32_t)1U + (uint32_t)5U * i] & s[(uint32_t)2U + (uint32_t)5U * i]);
+ uint64_t
+ v1 =
+ s[(uint32_t)1U
+ + (uint32_t)5U * i]
+ ^ (~s[(uint32_t)2U + (uint32_t)5U * i] & s[(uint32_t)3U + (uint32_t)5U * i]);
+ uint64_t
+ v2 =
+ s[(uint32_t)2U
+ + (uint32_t)5U * i]
+ ^ (~s[(uint32_t)3U + (uint32_t)5U * i] & s[(uint32_t)4U + (uint32_t)5U * i]);
+ uint64_t
+ v3 =
+ s[(uint32_t)3U
+ + (uint32_t)5U * i]
+ ^ (~s[(uint32_t)4U + (uint32_t)5U * i] & s[(uint32_t)0U + (uint32_t)5U * i]);
+ uint64_t
+ v4 =
+ s[(uint32_t)4U
+ + (uint32_t)5U * i]
+ ^ (~s[(uint32_t)0U + (uint32_t)5U * i] & s[(uint32_t)1U + (uint32_t)5U * i]);
+ s[(uint32_t)0U + (uint32_t)5U * i] = v0;
+ s[(uint32_t)1U + (uint32_t)5U * i] = v1;
+ s[(uint32_t)2U + (uint32_t)5U * i] = v2;
+ s[(uint32_t)3U + (uint32_t)5U * i] = v3;
+ s[(uint32_t)4U + (uint32_t)5U * i] = v4;);
+ uint64_t c = keccak_rndc[i0];
+ s[0U] = s[0U] ^ c;
+ }
+}
+
+void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
+{
+ uint8_t block[200U] = { 0U };
+ memcpy(block, input, rateInBytes * sizeof (uint8_t));
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+ {
+ uint64_t u = load64_le(block + i * (uint32_t)8U);
+ uint64_t x = u;
+ s[i] = s[i] ^ x;
+ }
+}
+
+static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res)
+{
+ uint8_t block[200U] = { 0U };
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+ {
+ uint64_t sj = s[i];
+ store64_le(block + i * (uint32_t)8U, sj);
+ }
+ memcpy(res, block, rateInBytes * sizeof (uint8_t));
+}
+
+void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
+{
+ Hacl_Impl_SHA3_loadState(rateInBytes, block, s);
+ Hacl_Impl_SHA3_state_permute(s);
+}
+
+static void
+absorb(
+ uint64_t *s,
+ uint32_t rateInBytes,
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint8_t delimitedSuffix
+)
+{
+ uint32_t n_blocks = inputByteLen / rateInBytes;
+ uint32_t rem = inputByteLen % rateInBytes;
+ for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ {
+ uint8_t *block = input + i * rateInBytes;
+ Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s);
+ }
+ uint8_t *last = input + n_blocks * rateInBytes;
+ uint8_t lastBlock_[200U] = { 0U };
+ uint8_t *lastBlock = lastBlock_;
+ memcpy(lastBlock, last, rem * sizeof (uint8_t));
+ lastBlock[rem] = delimitedSuffix;
+ Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s);
+ if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U)
+ {
+ Hacl_Impl_SHA3_state_permute(s);
+ }
+ uint8_t nextBlock_[200U] = { 0U };
+ uint8_t *nextBlock = nextBlock_;
+ nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U;
+ Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s);
+ Hacl_Impl_SHA3_state_permute(s);
+}
+
+void
+Hacl_Impl_SHA3_squeeze(
+ uint64_t *s,
+ uint32_t rateInBytes,
+ uint32_t outputByteLen,
+ uint8_t *output
+)
+{
+ uint32_t outBlocks = outputByteLen / rateInBytes;
+ uint32_t remOut = outputByteLen % rateInBytes;
+ uint8_t *last = output + outputByteLen - remOut;
+ uint8_t *blocks = output;
+ for (uint32_t i = (uint32_t)0U; i < outBlocks; i++)
+ {
+ storeState(rateInBytes, s, blocks + i * rateInBytes);
+ Hacl_Impl_SHA3_state_permute(s);
+ }
+ storeState(remOut, s, last);
+}
+
+void
+Hacl_Impl_SHA3_keccak(
+ uint32_t rate,
+ uint32_t capacity,
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint8_t delimitedSuffix,
+ uint32_t outputByteLen,
+ uint8_t *output
+)
+{
+ uint32_t rateInBytes = rate / (uint32_t)8U;
+ uint64_t s[25U] = { 0U };
+ absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix);
+ Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output);
+}
+
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h
new file mode 100644
index 0000000000..681b6af4a8
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h
@@ -0,0 +1,130 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_SHA3_H
+#define __Hacl_Hash_SHA3_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+
+typedef struct Hacl_Streaming_Keccak_hash_buf_s
+{
+ Spec_Hash_Definitions_hash_alg fst;
+ uint64_t *snd;
+}
+Hacl_Streaming_Keccak_hash_buf;
+
+typedef struct Hacl_Streaming_Keccak_state_s
+{
+ Hacl_Streaming_Keccak_hash_buf block_state;
+ uint8_t *buf;
+ uint64_t total_len;
+}
+Hacl_Streaming_Keccak_state;
+
+Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s);
+
+Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a);
+
+void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s);
+
+Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0);
+
+void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s);
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len);
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst);
+
+Hacl_Streaming_Types_error_code
+Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l);
+
+uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s);
+
+uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s);
+
+bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s);
+
+void
+Hacl_SHA3_shake128_hacl(
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint32_t outputByteLen,
+ uint8_t *output
+);
+
+void
+Hacl_SHA3_shake256_hacl(
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint32_t outputByteLen,
+ uint8_t *output
+);
+
+void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+
+void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+
+void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+
+void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+
+void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
+
+void
+Hacl_Impl_SHA3_squeeze(
+ uint64_t *s,
+ uint32_t rateInBytes,
+ uint32_t outputByteLen,
+ uint8_t *output
+);
+
+void
+Hacl_Impl_SHA3_keccak(
+ uint32_t rate,
+ uint32_t capacity,
+ uint32_t inputByteLen,
+ uint8_t *input,
+ uint8_t delimitedSuffix,
+ uint32_t outputByteLen,
+ uint8_t *output
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_SHA3_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Streaming_Types.h b/contrib/tools/python3/Modules/_hacl/Hacl_Streaming_Types.h
new file mode 100644
index 0000000000..15ef16ba60
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Streaming_Types.h
@@ -0,0 +1,83 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Streaming_Types_H
+#define __Hacl_Streaming_Types_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#define Spec_Hash_Definitions_SHA2_224 0
+#define Spec_Hash_Definitions_SHA2_256 1
+#define Spec_Hash_Definitions_SHA2_384 2
+#define Spec_Hash_Definitions_SHA2_512 3
+#define Spec_Hash_Definitions_SHA1 4
+#define Spec_Hash_Definitions_MD5 5
+#define Spec_Hash_Definitions_Blake2S 6
+#define Spec_Hash_Definitions_Blake2B 7
+#define Spec_Hash_Definitions_SHA3_256 8
+#define Spec_Hash_Definitions_SHA3_224 9
+#define Spec_Hash_Definitions_SHA3_384 10
+#define Spec_Hash_Definitions_SHA3_512 11
+#define Spec_Hash_Definitions_Shake128 12
+#define Spec_Hash_Definitions_Shake256 13
+
+typedef uint8_t Spec_Hash_Definitions_hash_alg;
+
+#define Hacl_Streaming_Types_Success 0
+#define Hacl_Streaming_Types_InvalidAlgorithm 1
+#define Hacl_Streaming_Types_InvalidLength 2
+#define Hacl_Streaming_Types_MaximumLengthExceeded 3
+
+typedef uint8_t Hacl_Streaming_Types_error_code;
+
+typedef struct Hacl_Streaming_MD_state_32_s
+{
+ uint32_t *block_state;
+ uint8_t *buf;
+ uint64_t total_len;
+}
+Hacl_Streaming_MD_state_32;
+
+typedef struct Hacl_Streaming_MD_state_64_s
+{
+ uint64_t *block_state;
+ uint8_t *buf;
+ uint64_t total_len;
+}
+Hacl_Streaming_MD_state_64;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Streaming_Types_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h
new file mode 100644
index 0000000000..3d36d44073
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h
@@ -0,0 +1,346 @@
+/*
+ Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License.
+*/
+
+
+#ifndef __FStar_UInt128_Verified_H
+#define __FStar_UInt128_Verified_H
+
+#include "FStar_UInt_8_16_32_64.h"
+#include <inttypes.h>
+#include <stdbool.h>
+#include "krml/types.h"
+#include "krml/internal/target.h"
+
+static inline uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
+{
+ return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
+}
+
+static inline uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b)
+{
+ return FStar_UInt128_constant_time_carry(a, b);
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low + b.low;
+ lit.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low + b.low;
+ lit.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low + b.low;
+ lit.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low - b.low;
+ lit.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low - b.low;
+ lit.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low - b.low;
+ lit.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return FStar_UInt128_sub_mod_impl(a, b);
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low & b.low;
+ lit.high = a.high & b.high;
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low ^ b.low;
+ lit.high = a.high ^ b.high;
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low | b.low;
+ lit.high = a.high | b.high;
+ return lit;
+}
+
+static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = ~a.low;
+ lit.high = ~a.high;
+ return lit;
+}
+
+static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
+
+static inline uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
+}
+
+static inline uint64_t
+FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return FStar_UInt128_add_u64_shift_left(hi, lo, s);
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s == (uint32_t)0U)
+ {
+ return a;
+ }
+ else
+ {
+ FStar_UInt128_uint128 lit;
+ lit.low = a.low << s;
+ lit.high = FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s);
+ return lit;
+ }
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = (uint64_t)0U;
+ lit.high = a.low << (s - FStar_UInt128_u32_64);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s < FStar_UInt128_u32_64)
+ {
+ return FStar_UInt128_shift_left_small(a, s);
+ }
+ else
+ {
+ return FStar_UInt128_shift_left_large(a, s);
+ }
+}
+
+static inline uint64_t FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
+}
+
+static inline uint64_t
+FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return FStar_UInt128_add_u64_shift_right(hi, lo, s);
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s == (uint32_t)0U)
+ {
+ return a;
+ }
+ else
+ {
+ FStar_UInt128_uint128 lit;
+ lit.low = FStar_UInt128_add_u64_shift_right_respec(a.high, a.low, s);
+ lit.high = a.high >> s;
+ return lit;
+ }
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a.high >> (s - FStar_UInt128_u32_64);
+ lit.high = (uint64_t)0U;
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s < FStar_UInt128_u32_64)
+ {
+ return FStar_UInt128_shift_right_small(a, s);
+ }
+ else
+ {
+ return FStar_UInt128_shift_right_large(a, s);
+ }
+}
+
+static inline bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return a.low == b.low && a.high == b.high;
+}
+
+static inline bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return a.high > b.high || (a.high == b.high && a.low > b.low);
+}
+
+static inline bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return a.high < b.high || (a.high == b.high && a.low < b.low);
+}
+
+static inline bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return a.high > b.high || (a.high == b.high && a.low >= b.low);
+}
+
+static inline bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return a.high < b.high || (a.high == b.high && a.low <= b.low);
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high);
+ lit.high = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high);
+ return lit;
+}
+
+static inline FStar_UInt128_uint128
+FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low =
+ (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high))
+ | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low));
+ lit.high =
+ (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high))
+ | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low));
+ return lit;
+}
+
+static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low = a;
+ lit.high = (uint64_t)0U;
+ return lit;
+}
+
+static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
+{
+ return a.low;
+}
+
+static inline uint64_t FStar_UInt128_u64_mod_32(uint64_t a)
+{
+ return a & (uint64_t)0xffffffffU;
+}
+
+static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
+
+static inline uint64_t FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
+{
+ return lo + (hi << FStar_UInt128_u32_32);
+}
+
+static inline FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low =
+ FStar_UInt128_u32_combine((x >> FStar_UInt128_u32_32)
+ * (uint64_t)y
+ + (FStar_UInt128_u64_mod_32(x) * (uint64_t)y >> FStar_UInt128_u32_32),
+ FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * (uint64_t)y));
+ lit.high =
+ ((x >> FStar_UInt128_u32_32)
+ * (uint64_t)y
+ + (FStar_UInt128_u64_mod_32(x) * (uint64_t)y >> FStar_UInt128_u32_32))
+ >> FStar_UInt128_u32_32;
+ return lit;
+}
+
+static inline uint64_t FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
+{
+ return lo + (hi << FStar_UInt128_u32_32);
+}
+
+static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
+{
+ FStar_UInt128_uint128 lit;
+ lit.low =
+ FStar_UInt128_u32_combine_(FStar_UInt128_u64_mod_32(x)
+ * (y >> FStar_UInt128_u32_32)
+ +
+ FStar_UInt128_u64_mod_32((x >> FStar_UInt128_u32_32)
+ * FStar_UInt128_u64_mod_32(y)
+ + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32)),
+ FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y)));
+ lit.high =
+ (x >> FStar_UInt128_u32_32)
+ * (y >> FStar_UInt128_u32_32)
+ +
+ (((x >> FStar_UInt128_u32_32)
+ * FStar_UInt128_u64_mod_32(y)
+ + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32))
+ >> FStar_UInt128_u32_32)
+ +
+ ((FStar_UInt128_u64_mod_32(x)
+ * (y >> FStar_UInt128_u32_32)
+ +
+ FStar_UInt128_u64_mod_32((x >> FStar_UInt128_u32_32)
+ * FStar_UInt128_u64_mod_32(y)
+ + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32)))
+ >> FStar_UInt128_u32_32);
+ return lit;
+}
+
+
+#define __FStar_UInt128_Verified_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
new file mode 100644
index 0000000000..a56c7d6134
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
@@ -0,0 +1,107 @@
+/*
+ Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License.
+*/
+
+
+#ifndef __FStar_UInt_8_16_32_64_H
+#define __FStar_UInt_8_16_32_64_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include "krml/lowstar_endianness.h"
+#include "krml/types.h"
+#include "krml/internal/target.h"
+
+static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
+{
+ uint64_t x = a ^ b;
+ uint64_t minus_x = ~x + (uint64_t)1U;
+ uint64_t x_or_minus_x = x | minus_x;
+ uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
+ return xnx - (uint64_t)1U;
+}
+
+static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
+{
+ uint64_t x = a;
+ uint64_t y = b;
+ uint64_t x_xor_y = x ^ y;
+ uint64_t x_sub_y = x - y;
+ uint64_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint64_t q = x_xor_y | x_sub_y_xor_y;
+ uint64_t x_xor_q = x ^ q;
+ uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
+ return x_xor_q_ - (uint64_t)1U;
+}
+
+static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
+{
+ uint32_t x = a ^ b;
+ uint32_t minus_x = ~x + (uint32_t)1U;
+ uint32_t x_or_minus_x = x | minus_x;
+ uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
+ return xnx - (uint32_t)1U;
+}
+
+static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
+{
+ uint32_t x = a;
+ uint32_t y = b;
+ uint32_t x_xor_y = x ^ y;
+ uint32_t x_sub_y = x - y;
+ uint32_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint32_t q = x_xor_y | x_sub_y_xor_y;
+ uint32_t x_xor_q = x ^ q;
+ uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
+ return x_xor_q_ - (uint32_t)1U;
+}
+
+static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
+{
+ uint16_t x = a ^ b;
+ uint16_t minus_x = ~x + (uint16_t)1U;
+ uint16_t x_or_minus_x = x | minus_x;
+ uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
+ return xnx - (uint16_t)1U;
+}
+
+static inline uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
+{
+ uint16_t x = a;
+ uint16_t y = b;
+ uint16_t x_xor_y = x ^ y;
+ uint16_t x_sub_y = x - y;
+ uint16_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint16_t q = x_xor_y | x_sub_y_xor_y;
+ uint16_t x_xor_q = x ^ q;
+ uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
+ return x_xor_q_ - (uint16_t)1U;
+}
+
+static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
+{
+ uint8_t x = a ^ b;
+ uint8_t minus_x = ~x + (uint8_t)1U;
+ uint8_t x_or_minus_x = x | minus_x;
+ uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
+ return xnx - (uint8_t)1U;
+}
+
+static inline uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
+{
+ uint8_t x = a;
+ uint8_t y = b;
+ uint8_t x_xor_y = x ^ y;
+ uint8_t x_sub_y = x - y;
+ uint8_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint8_t q = x_xor_y | x_sub_y_xor_y;
+ uint8_t x_xor_q = x ^ q;
+ uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
+ return x_xor_q_ - (uint8_t)1U;
+}
+
+
+#define __FStar_UInt_8_16_32_64_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/fstar_uint128_struct_endianness.h b/contrib/tools/python3/Modules/_hacl/include/krml/fstar_uint128_struct_endianness.h
new file mode 100644
index 0000000000..e2b6d62859
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/fstar_uint128_struct_endianness.h
@@ -0,0 +1,68 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef FSTAR_UINT128_STRUCT_ENDIANNESS_H
+#define FSTAR_UINT128_STRUCT_ENDIANNESS_H
+
+/* Hand-written implementation of endianness-related uint128 functions
+ * for the extracted uint128 implementation */
+
+/* Access 64-bit fields within the int128. */
+#define HIGH64_OF(x) ((x)->high)
+#define LOW64_OF(x) ((x)->low)
+
+/* A series of definitions written using pointers. */
+
+inline static void load128_le_(uint8_t *b, uint128_t *r) {
+ LOW64_OF(r) = load64_le(b);
+ HIGH64_OF(r) = load64_le(b + 8);
+}
+
+inline static void store128_le_(uint8_t *b, uint128_t *n) {
+ store64_le(b, LOW64_OF(n));
+ store64_le(b + 8, HIGH64_OF(n));
+}
+
+inline static void load128_be_(uint8_t *b, uint128_t *r) {
+ HIGH64_OF(r) = load64_be(b);
+ LOW64_OF(r) = load64_be(b + 8);
+}
+
+inline static void store128_be_(uint8_t *b, uint128_t *n) {
+ store64_be(b, HIGH64_OF(n));
+ store64_be(b + 8, LOW64_OF(n));
+}
+
+#ifndef KRML_NOSTRUCT_PASSING
+
+inline static uint128_t load128_le(uint8_t *b) {
+ uint128_t r;
+ load128_le_(b, &r);
+ return r;
+}
+
+inline static void store128_le(uint8_t *b, uint128_t n) {
+ store128_le_(b, &n);
+}
+
+inline static uint128_t load128_be(uint8_t *b) {
+ uint128_t r;
+ load128_be_(b, &r);
+ return r;
+}
+
+inline static void store128_be(uint8_t *b, uint128_t n) {
+ store128_be_(b, &n);
+}
+
+#else /* !defined(KRML_STRUCT_PASSING) */
+
+# define print128 print128_
+# define load128_le load128_le_
+# define store128_le store128_le_
+# define load128_be load128_be_
+# define store128_be store128_be_
+
+#endif /* KRML_STRUCT_PASSING */
+
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h b/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h
new file mode 100644
index 0000000000..5a2f94eb2e
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h
@@ -0,0 +1,266 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef __KRML_TARGET_H
+#define __KRML_TARGET_H
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <assert.h>
+
+/* Since KaRaMeL emits the inline keyword unconditionally, we follow the
+ * guidelines at https://gcc.gnu.org/onlinedocs/gcc/Inline.html and make this
+ * __inline__ to ensure the code compiles with -std=c90 and earlier. */
+#ifdef __GNUC__
+# define inline __inline__
+#endif
+
+/******************************************************************************/
+/* Macros that KaRaMeL will generate. */
+/******************************************************************************/
+
+/* For "bare" targets that do not have a C stdlib, the user might want to use
+ * [-add-early-include '"mydefinitions.h"'] and override these. */
+#ifndef KRML_HOST_PRINTF
+# define KRML_HOST_PRINTF printf
+#endif
+
+#if ( \
+ (defined __STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
+ (!(defined KRML_HOST_EPRINTF)))
+# define KRML_HOST_EPRINTF(...) fprintf(stderr, __VA_ARGS__)
+#elif !(defined KRML_HOST_EPRINTF) && defined(_MSC_VER)
+# define KRML_HOST_EPRINTF(...) fprintf(stderr, __VA_ARGS__)
+#endif
+
+#ifndef KRML_HOST_EXIT
+# define KRML_HOST_EXIT exit
+#endif
+
+#ifndef KRML_HOST_MALLOC
+# define KRML_HOST_MALLOC malloc
+#endif
+
+#ifndef KRML_HOST_CALLOC
+# define KRML_HOST_CALLOC calloc
+#endif
+
+#ifndef KRML_HOST_FREE
+# define KRML_HOST_FREE free
+#endif
+
+#ifndef KRML_HOST_IGNORE
+# define KRML_HOST_IGNORE(x) (void)(x)
+#endif
+
+/* In FStar.Buffer.fst, the size of arrays is uint32_t, but it's a number of
+ * *elements*. Do an ugly, run-time check (some of which KaRaMeL can eliminate).
+ */
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 4))
+# define _KRML_CHECK_SIZE_PRAGMA \
+ _Pragma("GCC diagnostic ignored \"-Wtype-limits\"")
+#else
+# define _KRML_CHECK_SIZE_PRAGMA
+#endif
+
+#define KRML_CHECK_SIZE(size_elt, sz) \
+ do { \
+ _KRML_CHECK_SIZE_PRAGMA \
+ if (((size_t)(sz)) > ((size_t)(SIZE_MAX / (size_elt)))) { \
+ KRML_HOST_PRINTF( \
+ "Maximum allocatable size exceeded, aborting before overflow at " \
+ "%s:%d\n", \
+ __FILE__, __LINE__); \
+ KRML_HOST_EXIT(253); \
+ } \
+ } while (0)
+
+/* Macros for prettier unrolling of loops */
+#define KRML_LOOP1(i, n, x) { \
+ x \
+ i += n; \
+}
+
+#define KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP3(i, n, x) \
+ KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP5(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP6(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP7(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP3(i, n, x)
+
+#define KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP4(i, n, x)
+
+#define KRML_LOOP9(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP10(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP11(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP3(i, n, x)
+
+#define KRML_LOOP12(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x)
+
+#define KRML_LOOP13(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP5(i, n, x)
+
+#define KRML_LOOP14(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP6(i, n, x)
+
+#define KRML_LOOP15(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP7(i, n, x)
+
+#define KRML_LOOP16(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP8(i, n, x)
+
+#define KRML_UNROLL_FOR(i, z, n, k, x) do { \
+ uint32_t i = z; \
+ KRML_LOOP##n(i, k, x) \
+} while (0)
+
+#define KRML_ACTUAL_FOR(i, z, n, k, x) \
+ do { \
+ for (uint32_t i = z; i < n; i += k) { \
+ x \
+ } \
+ } while (0)
+
+#ifndef KRML_UNROLL_MAX
+#define KRML_UNROLL_MAX 16
+#endif
+
+/* 1 is the number of loop iterations, i.e. (n - z)/k as evaluated by krml */
+#if 0 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR0(i, z, n, k, x)
+#else
+#define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 1 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x)
+#else
+#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 2 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x)
+#else
+#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 3 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x)
+#else
+#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 4 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x)
+#else
+#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 5 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x)
+#else
+#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 6 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x)
+#else
+#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 7 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x)
+#else
+#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 8 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x)
+#else
+#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 9 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x)
+#else
+#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 10 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x)
+#else
+#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 11 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x)
+#else
+#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 12 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x)
+#else
+#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 13 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x)
+#else
+#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 14 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x)
+#else
+#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 15 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x)
+#else
+#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 16 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x)
+#else
+#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/lowstar_endianness.h b/contrib/tools/python3/Modules/_hacl/include/krml/lowstar_endianness.h
new file mode 100644
index 0000000000..b6c648602b
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/lowstar_endianness.h
@@ -0,0 +1,231 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef __LOWSTAR_ENDIANNESS_H
+#define __LOWSTAR_ENDIANNESS_H
+
+#include <string.h>
+#include <inttypes.h>
+
+/******************************************************************************/
+/* Implementing C.fst (part 2: endian-ness macros) */
+/******************************************************************************/
+
+/* ... for Linux */
+#if defined(__linux__) || defined(__CYGWIN__) || defined (__USE_SYSTEM_ENDIAN_H__) || defined(__GLIBC__)
+# include <endian.h>
+
+/* ... for OSX */
+#elif defined(__APPLE__)
+# include <libkern/OSByteOrder.h>
+# define htole64(x) OSSwapHostToLittleInt64(x)
+# define le64toh(x) OSSwapLittleToHostInt64(x)
+# define htobe64(x) OSSwapHostToBigInt64(x)
+# define be64toh(x) OSSwapBigToHostInt64(x)
+
+# define htole16(x) OSSwapHostToLittleInt16(x)
+# define le16toh(x) OSSwapLittleToHostInt16(x)
+# define htobe16(x) OSSwapHostToBigInt16(x)
+# define be16toh(x) OSSwapBigToHostInt16(x)
+
+# define htole32(x) OSSwapHostToLittleInt32(x)
+# define le32toh(x) OSSwapLittleToHostInt32(x)
+# define htobe32(x) OSSwapHostToBigInt32(x)
+# define be32toh(x) OSSwapBigToHostInt32(x)
+
+/* ... for Solaris */
+#elif defined(__sun__)
+# error #include <sys/byteorder.h>
+# define htole64(x) LE_64(x)
+# define le64toh(x) LE_64(x)
+# define htobe64(x) BE_64(x)
+# define be64toh(x) BE_64(x)
+
+# define htole16(x) LE_16(x)
+# define le16toh(x) LE_16(x)
+# define htobe16(x) BE_16(x)
+# define be16toh(x) BE_16(x)
+
+# define htole32(x) LE_32(x)
+# define le32toh(x) LE_32(x)
+# define htobe32(x) BE_32(x)
+# define be32toh(x) BE_32(x)
+
+/* ... for the BSDs */
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+# include <sys/endian.h>
+#elif defined(__OpenBSD__)
+# include <endian.h>
+
+/* ... for Windows (MSVC)... not targeting XBOX 360! */
+#elif defined(_MSC_VER)
+
+# include <stdlib.h>
+# define htobe16(x) _byteswap_ushort(x)
+# define htole16(x) (x)
+# define be16toh(x) _byteswap_ushort(x)
+# define le16toh(x) (x)
+
+# define htobe32(x) _byteswap_ulong(x)
+# define htole32(x) (x)
+# define be32toh(x) _byteswap_ulong(x)
+# define le32toh(x) (x)
+
+# define htobe64(x) _byteswap_uint64(x)
+# define htole64(x) (x)
+# define be64toh(x) _byteswap_uint64(x)
+# define le64toh(x) (x)
+
+/* ... for Windows (GCC-like, e.g. mingw or clang) */
+#elif (defined(_WIN32) || defined(_WIN64) || defined(__EMSCRIPTEN__)) && \
+ (defined(__GNUC__) || defined(__clang__))
+
+# define htobe16(x) __builtin_bswap16(x)
+# define htole16(x) (x)
+# define be16toh(x) __builtin_bswap16(x)
+# define le16toh(x) (x)
+
+# define htobe32(x) __builtin_bswap32(x)
+# define htole32(x) (x)
+# define be32toh(x) __builtin_bswap32(x)
+# define le32toh(x) (x)
+
+# define htobe64(x) __builtin_bswap64(x)
+# define htole64(x) (x)
+# define be64toh(x) __builtin_bswap64(x)
+# define le64toh(x) (x)
+
+/* ... generic big-endian fallback code */
+/* ... AIX doesn't have __BYTE_ORDER__ (with XLC compiler) & is always big-endian */
+#elif (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || defined(_AIX)
+
+/* byte swapping code inspired by:
+ * https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h
+ * */
+
+# define htobe32(x) (x)
+# define be32toh(x) (x)
+# define htole32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+# define le32toh(x) (htole32((x)))
+
+# define htobe64(x) (x)
+# define be64toh(x) (x)
+# define htole64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+# define le64toh(x) (htole64((x)))
+
+/* ... generic little-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+# define htole32(x) (x)
+# define le32toh(x) (x)
+# define htobe32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+# define be32toh(x) (htobe32((x)))
+
+# define htole64(x) (x)
+# define le64toh(x) (x)
+# define htobe64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+# define be64toh(x) (htobe64((x)))
+
+/* ... couldn't determine endian-ness of the target platform */
+#else
+# error "Please define __BYTE_ORDER__!"
+
+#endif /* defined(__linux__) || ... */
+
+/* Loads and stores. These avoid undefined behavior due to unaligned memory
+ * accesses, via memcpy. */
+
+inline static uint16_t load16(uint8_t *b) {
+ uint16_t x;
+ memcpy(&x, b, 2);
+ return x;
+}
+
+inline static uint32_t load32(uint8_t *b) {
+ uint32_t x;
+ memcpy(&x, b, 4);
+ return x;
+}
+
+inline static uint64_t load64(uint8_t *b) {
+ uint64_t x;
+ memcpy(&x, b, 8);
+ return x;
+}
+
+inline static void store16(uint8_t *b, uint16_t i) {
+ memcpy(b, &i, 2);
+}
+
+inline static void store32(uint8_t *b, uint32_t i) {
+ memcpy(b, &i, 4);
+}
+
+inline static void store64(uint8_t *b, uint64_t i) {
+ memcpy(b, &i, 8);
+}
+
+/* Legacy accessors so that this header can serve as an implementation of
+ * C.Endianness */
+#define load16_le(b) (le16toh(load16(b)))
+#define store16_le(b, i) (store16(b, htole16(i)))
+#define load16_be(b) (be16toh(load16(b)))
+#define store16_be(b, i) (store16(b, htobe16(i)))
+
+#define load32_le(b) (le32toh(load32(b)))
+#define store32_le(b, i) (store32(b, htole32(i)))
+#define load32_be(b) (be32toh(load32(b)))
+#define store32_be(b, i) (store32(b, htobe32(i)))
+
+#define load64_le(b) (le64toh(load64(b)))
+#define store64_le(b, i) (store64(b, htole64(i)))
+#define load64_be(b) (be64toh(load64(b)))
+#define store64_be(b, i) (store64(b, htobe64(i)))
+
+/* Co-existence of LowStar.Endianness and FStar.Endianness generates name
+ * conflicts, because of course both insist on having no prefixes. Until a
+ * prefix is added, or until we truly retire FStar.Endianness, solve this issue
+ * in an elegant way. */
+#define load16_le0 load16_le
+#define store16_le0 store16_le
+#define load16_be0 load16_be
+#define store16_be0 store16_be
+
+#define load32_le0 load32_le
+#define store32_le0 store32_le
+#define load32_be0 load32_be
+#define store32_be0 store32_be
+
+#define load64_le0 load64_le
+#define store64_le0 store64_le
+#define load64_be0 load64_be
+#define store64_be0 store64_be
+
+#define load128_le0 load128_le
+#define store128_le0 store128_le
+#define load128_be0 load128_be
+#define store128_be0 store128_be
+
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/types.h b/contrib/tools/python3/Modules/_hacl/include/krml/types.h
new file mode 100644
index 0000000000..509f555536
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/types.h
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <inttypes.h>
+
+typedef struct FStar_UInt128_uint128_s {
+ uint64_t low;
+ uint64_t high;
+} FStar_UInt128_uint128, uint128_t;
+
+#define KRML_VERIFIED_UINT128
+
+#include "krml/lowstar_endianness.h"
+#include "krml/fstar_uint128_struct_endianness.h"
+#include "krml/FStar_UInt128_Verified.h"
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h
new file mode 100644
index 0000000000..87ad4cf228
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h
@@ -0,0 +1,61 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_MD5_H
+#define __internal_Hacl_Hash_MD5_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "../Hacl_Hash_MD5.h"
+
+void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s);
+
+void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst);
+
+void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+
+void
+Hacl_Hash_MD5_legacy_update_last(
+ uint32_t *s,
+ uint64_t prev_len,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_MD5_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h
new file mode 100644
index 0000000000..d2d9df44c6
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h
@@ -0,0 +1,61 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_SHA1_H
+#define __internal_Hacl_Hash_SHA1_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "../Hacl_Hash_SHA1.h"
+
+void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s);
+
+void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst);
+
+void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+
+void
+Hacl_Hash_SHA1_legacy_update_last(
+ uint32_t *s,
+ uint64_t prev_len,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_SHA1_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h
new file mode 100644
index 0000000000..851f7dc60c
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h
@@ -0,0 +1,184 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_SHA2_H
+#define __internal_Hacl_Hash_SHA2_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+
+#include "../Hacl_Hash_SHA2.h"
+
+static const
+uint32_t
+Hacl_Impl_SHA2_Generic_h224[8U] =
+ {
+ (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
+ (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+ };
+
+static const
+uint32_t
+Hacl_Impl_SHA2_Generic_h256[8U] =
+ {
+ (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
+ (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+ };
+
+static const
+uint64_t
+Hacl_Impl_SHA2_Generic_h384[8U] =
+ {
+ (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
+ (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
+ (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+ };
+
+static const
+uint64_t
+Hacl_Impl_SHA2_Generic_h512[8U] =
+ {
+ (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
+ (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
+ (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+ };
+
+static const
+uint32_t
+Hacl_Impl_SHA2_Generic_k224_256[64U] =
+ {
+ (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
+ (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
+ (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
+ (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
+ (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
+ (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
+ (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
+ (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
+ (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
+ (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
+ (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
+ (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
+ (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
+ (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
+ (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
+ (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+ };
+
+static const
+uint64_t
+Hacl_Impl_SHA2_Generic_k384_512[80U] =
+ {
+ (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
+ (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
+ (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
+ (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
+ (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
+ (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
+ (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
+ (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
+ (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
+ (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
+ (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
+ (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
+ (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
+ (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
+ (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
+ (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
+ (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
+ (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
+ (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
+ (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
+ (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
+ (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
+ (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
+ (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
+ (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
+ (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
+ (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+ };
+
+void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash);
+
+void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
+
+void
+Hacl_SHA2_Scalar32_sha256_update_last(
+ uint64_t totlen,
+ uint32_t len,
+ uint8_t *b,
+ uint32_t *hash
+);
+
+void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h);
+
+void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash);
+
+void
+Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
+
+void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h);
+
+void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash);
+
+void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+
+void
+Hacl_SHA2_Scalar32_sha512_update_last(
+ FStar_UInt128_uint128 totlen,
+ uint32_t len,
+ uint8_t *b,
+ uint64_t *hash
+);
+
+void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h);
+
+void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash);
+
+void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+
+void
+Hacl_SHA2_Scalar32_sha384_update_last(
+ FStar_UInt128_uint128 totlen,
+ uint32_t len,
+ uint8_t *b,
+ uint64_t *st
+);
+
+void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_SHA2_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h
new file mode 100644
index 0000000000..1c9808b8dd
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h
@@ -0,0 +1,65 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_SHA3_H
+#define __internal_Hacl_Hash_SHA3_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "../Hacl_Hash_SHA3.h"
+
+void
+Hacl_Hash_SHA3_update_multi_sha3(
+ Spec_Hash_Definitions_hash_alg a,
+ uint64_t *s,
+ uint8_t *blocks,
+ uint32_t n_blocks
+);
+
+void
+Hacl_Hash_SHA3_update_last_sha3(
+ Spec_Hash_Definitions_hash_alg a,
+ uint64_t *s,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+void Hacl_Impl_SHA3_state_permute(uint64_t *s);
+
+void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_SHA3_H_DEFINED
+#endif
diff --git a/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h b/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h
new file mode 100644
index 0000000000..0df236282a
--- /dev/null
+++ b/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h
@@ -0,0 +1,86 @@
+#ifndef _PYTHON_HACL_NAMESPACES_H
+#define _PYTHON_HACL_NAMESPACES_H
+
+/*
+ * C's excuse for namespaces: Use globally unique names to avoid linkage
+ * conflicts with builds linking or dynamically loading other code potentially
+ * using HACL* libraries.
+ */
+
+#define Hacl_Streaming_SHA2_state_sha2_224_s python_hashlib_Hacl_Streaming_SHA2_state_sha2_224_s
+#define Hacl_Streaming_SHA2_state_sha2_224 python_hashlib_Hacl_Streaming_SHA2_state_sha2_224
+#define Hacl_Streaming_SHA2_state_sha2_256 python_hashlib_Hacl_Streaming_SHA2_state_sha2_256
+#define Hacl_Streaming_SHA2_state_sha2_384_s python_hashlib_Hacl_Streaming_SHA2_state_sha2_384_s
+#define Hacl_Streaming_SHA2_state_sha2_384 python_hashlib_Hacl_Streaming_SHA2_state_sha2_384
+#define Hacl_Streaming_SHA2_state_sha2_512 python_hashlib_Hacl_Streaming_SHA2_state_sha2_512
+#define Hacl_Streaming_SHA2_create_in_256 python_hashlib_Hacl_Streaming_SHA2_create_in_256
+#define Hacl_Streaming_SHA2_create_in_224 python_hashlib_Hacl_Streaming_SHA2_create_in_224
+#define Hacl_Streaming_SHA2_create_in_512 python_hashlib_Hacl_Streaming_SHA2_create_in_512
+#define Hacl_Streaming_SHA2_create_in_384 python_hashlib_Hacl_Streaming_SHA2_create_in_384
+#define Hacl_Streaming_SHA2_copy_256 python_hashlib_Hacl_Streaming_SHA2_copy_256
+#define Hacl_Streaming_SHA2_copy_224 python_hashlib_Hacl_Streaming_SHA2_copy_224
+#define Hacl_Streaming_SHA2_copy_512 python_hashlib_Hacl_Streaming_SHA2_copy_512
+#define Hacl_Streaming_SHA2_copy_384 python_hashlib_Hacl_Streaming_SHA2_copy_384
+#define Hacl_Streaming_SHA2_init_256 python_hashlib_Hacl_Streaming_SHA2_init_256
+#define Hacl_Streaming_SHA2_init_224 python_hashlib_Hacl_Streaming_SHA2_init_224
+#define Hacl_Streaming_SHA2_init_512 python_hashlib_Hacl_Streaming_SHA2_init_512
+#define Hacl_Streaming_SHA2_init_384 python_hashlib_Hacl_Streaming_SHA2_init_384
+#define Hacl_SHA2_Scalar32_sha512_init python_hashlib_Hacl_SHA2_Scalar32_sha512_init
+#define Hacl_Streaming_SHA2_update_256 python_hashlib_Hacl_Streaming_SHA2_update_256
+#define Hacl_Streaming_SHA2_update_224 python_hashlib_Hacl_Streaming_SHA2_update_224
+#define Hacl_Streaming_SHA2_update_512 python_hashlib_Hacl_Streaming_SHA2_update_512
+#define Hacl_Streaming_SHA2_update_384 python_hashlib_Hacl_Streaming_SHA2_update_384
+#define Hacl_Streaming_SHA2_finish_256 python_hashlib_Hacl_Streaming_SHA2_finish_256
+#define Hacl_Streaming_SHA2_finish_224 python_hashlib_Hacl_Streaming_SHA2_finish_224
+#define Hacl_Streaming_SHA2_finish_512 python_hashlib_Hacl_Streaming_SHA2_finish_512
+#define Hacl_Streaming_SHA2_finish_384 python_hashlib_Hacl_Streaming_SHA2_finish_384
+#define Hacl_Streaming_SHA2_free_256 python_hashlib_Hacl_Streaming_SHA2_free_256
+#define Hacl_Streaming_SHA2_free_224 python_hashlib_Hacl_Streaming_SHA2_free_224
+#define Hacl_Streaming_SHA2_free_512 python_hashlib_Hacl_Streaming_SHA2_free_512
+#define Hacl_Streaming_SHA2_free_384 python_hashlib_Hacl_Streaming_SHA2_free_384
+#define Hacl_Streaming_SHA2_sha256 python_hashlib_Hacl_Streaming_SHA2_sha256
+#define Hacl_Streaming_SHA2_sha224 python_hashlib_Hacl_Streaming_SHA2_sha224
+#define Hacl_Streaming_SHA2_sha512 python_hashlib_Hacl_Streaming_SHA2_sha512
+#define Hacl_Streaming_SHA2_sha384 python_hashlib_Hacl_Streaming_SHA2_sha384
+
+#define Hacl_Streaming_MD5_legacy_create_in python_hashlib_Hacl_Streaming_MD5_legacy_create_in
+#define Hacl_Streaming_MD5_legacy_init python_hashlib_Hacl_Streaming_MD5_legacy_init
+#define Hacl_Streaming_MD5_legacy_update python_hashlib_Hacl_Streaming_MD5_legacy_update
+#define Hacl_Streaming_MD5_legacy_finish python_hashlib_Hacl_Streaming_MD5_legacy_finish
+#define Hacl_Streaming_MD5_legacy_free python_hashlib_Hacl_Streaming_MD5_legacy_free
+#define Hacl_Streaming_MD5_legacy_copy python_hashlib_Hacl_Streaming_MD5_legacy_copy
+#define Hacl_Streaming_MD5_legacy_hash python_hashlib_Hacl_Streaming_MD5_legacy_hash
+
+#define Hacl_Streaming_SHA1_legacy_create_in python_hashlib_Hacl_Streaming_SHA1_legacy_create_in
+#define Hacl_Streaming_SHA1_legacy_init python_hashlib_Hacl_Streaming_SHA1_legacy_init
+#define Hacl_Streaming_SHA1_legacy_update python_hashlib_Hacl_Streaming_SHA1_legacy_update
+#define Hacl_Streaming_SHA1_legacy_finish python_hashlib_Hacl_Streaming_SHA1_legacy_finish
+#define Hacl_Streaming_SHA1_legacy_free python_hashlib_Hacl_Streaming_SHA1_legacy_free
+#define Hacl_Streaming_SHA1_legacy_copy python_hashlib_Hacl_Streaming_SHA1_legacy_copy
+#define Hacl_Streaming_SHA1_legacy_hash python_hashlib_Hacl_Streaming_SHA1_legacy_hash
+
+#define Hacl_Hash_SHA3_update_last_sha3 python_hashlib_Hacl_Hash_SHA3_update_last_sha3
+#define Hacl_Hash_SHA3_update_multi_sha3 python_hashlib_Hacl_Hash_SHA3_update_multi_sha3
+#define Hacl_Impl_SHA3_absorb_inner python_hashlib_Hacl_Impl_SHA3_absorb_inner
+#define Hacl_Impl_SHA3_keccak python_hashlib_Hacl_Impl_SHA3_keccak
+#define Hacl_Impl_SHA3_loadState python_hashlib_Hacl_Impl_SHA3_loadState
+#define Hacl_Impl_SHA3_squeeze python_hashlib_Hacl_Impl_SHA3_squeeze
+#define Hacl_Impl_SHA3_state_permute python_hashlib_Hacl_Impl_SHA3_state_permute
+#define Hacl_SHA3_sha3_224 python_hashlib_Hacl_SHA3_sha3_224
+#define Hacl_SHA3_sha3_256 python_hashlib_Hacl_SHA3_sha3_256
+#define Hacl_SHA3_sha3_384 python_hashlib_Hacl_SHA3_sha3_384
+#define Hacl_SHA3_sha3_512 python_hashlib_Hacl_SHA3_sha3_512
+#define Hacl_SHA3_shake128_hacl python_hashlib_Hacl_SHA3_shake128_hacl
+#define Hacl_SHA3_shake256_hacl python_hashlib_Hacl_SHA3_shake256_hacl
+#define Hacl_Streaming_Keccak_block_len python_hashlib_Hacl_Streaming_Keccak_block_len
+#define Hacl_Streaming_Keccak_copy python_hashlib_Hacl_Streaming_Keccak_copy
+#define Hacl_Streaming_Keccak_finish python_hashlib_Hacl_Streaming_Keccak_finish
+#define Hacl_Streaming_Keccak_free python_hashlib_Hacl_Streaming_Keccak_free
+#define Hacl_Streaming_Keccak_get_alg python_hashlib_Hacl_Streaming_Keccak_get_alg
+#define Hacl_Streaming_Keccak_hash_len python_hashlib_Hacl_Streaming_Keccak_hash_len
+#define Hacl_Streaming_Keccak_is_shake python_hashlib_Hacl_Streaming_Keccak_is_shake
+#define Hacl_Streaming_Keccak_malloc python_hashlib_Hacl_Streaming_Keccak_malloc
+#define Hacl_Streaming_Keccak_reset python_hashlib_Hacl_Streaming_Keccak_reset
+#define Hacl_Streaming_Keccak_update python_hashlib_Hacl_Streaming_Keccak_update
+
+#endif // _PYTHON_HACL_NAMESPACES_H