summaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/Modules
diff options
context:
space:
mode:
authorshadchin <[email protected]>2024-04-28 21:17:44 +0300
committershadchin <[email protected]>2024-04-28 21:25:54 +0300
commita55d99a3eb72f90355bc146baeda18aa7eb97352 (patch)
treeb17cfed786effe8b81bba022239d6729f716fbeb /contrib/tools/python3/Modules
parent67bf49d08acf1277eff4c336021ac22d964bb4c4 (diff)
Update Python 3 to 3.12.3
7d09de7d8b99ea2be554ef0fc61276942ca9c2e1
Diffstat (limited to 'contrib/tools/python3/Modules')
-rw-r--r--contrib/tools/python3/Modules/_collectionsmodule.c3
-rw-r--r--contrib/tools/python3/Modules/_csv.c26
-rw-r--r--contrib/tools/python3/Modules/_decimal/_decimal.c183
-rw-r--r--contrib/tools/python3/Modules/_elementtree.c35
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c688
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h17
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c339
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h17
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c932
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h104
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c526
-rw-r--r--contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h51
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h18
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h84
-rw-r--r--contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h185
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h17
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h17
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h138
-rw-r--r--contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h4
-rw-r--r--contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h119
-rw-r--r--contrib/tools/python3/Modules/_io/bufferedio.c21
-rw-r--r--contrib/tools/python3/Modules/_multiprocessing/posixshmem.c14
-rw-r--r--contrib/tools/python3/Modules/_posixsubprocess.c4
-rw-r--r--contrib/tools/python3/Modules/_sqlite/ya.make4
-rw-r--r--contrib/tools/python3/Modules/_ssl.c66
-rw-r--r--contrib/tools/python3/Modules/_threadmodule.c2
-rw-r--r--contrib/tools/python3/Modules/arraymodule.c2
-rw-r--r--contrib/tools/python3/Modules/clinic/_elementtree.c.h19
-rw-r--r--contrib/tools/python3/Modules/clinic/pyexpat.c.h49
-rw-r--r--contrib/tools/python3/Modules/gcmodule.c3
-rw-r--r--contrib/tools/python3/Modules/getpath.c4
-rw-r--r--contrib/tools/python3/Modules/itertoolsmodule.c14
-rw-r--r--contrib/tools/python3/Modules/md5module.c20
-rw-r--r--contrib/tools/python3/Modules/overlapped.c19
-rw-r--r--contrib/tools/python3/Modules/posixmodule.c184
-rw-r--r--contrib/tools/python3/Modules/pwdmodule.c61
-rw-r--r--contrib/tools/python3/Modules/pyexpat.c53
-rw-r--r--contrib/tools/python3/Modules/selectmodule.c4
-rw-r--r--contrib/tools/python3/Modules/sha1module.c18
-rw-r--r--contrib/tools/python3/Modules/sha2module.c40
-rw-r--r--contrib/tools/python3/Modules/sha3module.c44
41 files changed, 2078 insertions, 2070 deletions
diff --git a/contrib/tools/python3/Modules/_collectionsmodule.c b/contrib/tools/python3/Modules/_collectionsmodule.c
index 9a81531bdff..4e195f0d5f5 100644
--- a/contrib/tools/python3/Modules/_collectionsmodule.c
+++ b/contrib/tools/python3/Modules/_collectionsmodule.c
@@ -1116,8 +1116,9 @@ deque_index(dequeobject *deque, PyObject *const *args, Py_ssize_t nargs)
n = stop - i;
while (--n >= 0) {
CHECK_NOT_END(b);
- item = b->data[index];
+ item = Py_NewRef(b->data[index]);
cmp = PyObject_RichCompareBool(item, v, Py_EQ);
+ Py_DECREF(item);
if (cmp > 0)
return PyLong_FromSsize_t(stop - n - 1);
if (cmp < 0)
diff --git a/contrib/tools/python3/Modules/_csv.c b/contrib/tools/python3/Modules/_csv.c
index 91cb63628a1..d63eac1bf7a 100644
--- a/contrib/tools/python3/Modules/_csv.c
+++ b/contrib/tools/python3/Modules/_csv.c
@@ -1109,6 +1109,8 @@ join_append_data(WriterObj *self, int field_kind, const void *field_data,
if (c == dialect->delimiter ||
c == dialect->escapechar ||
c == dialect->quotechar ||
+ c == '\n' ||
+ c == '\r' ||
PyUnicode_FindChar(
dialect->lineterminator, c, 0,
PyUnicode_GET_LENGTH(dialect->lineterminator), 1) >= 0) {
@@ -1180,6 +1182,7 @@ join_check_rec_size(WriterObj *self, Py_ssize_t rec_len)
static int
join_append(WriterObj *self, PyObject *field, int quoted)
{
+ DialectObj *dialect = self->dialect;
int field_kind = -1;
const void *field_data = NULL;
Py_ssize_t field_len = 0;
@@ -1192,6 +1195,19 @@ join_append(WriterObj *self, PyObject *field, int quoted)
field_data = PyUnicode_DATA(field);
field_len = PyUnicode_GET_LENGTH(field);
}
+ if (!field_len && dialect->delimiter == ' ' && dialect->skipinitialspace) {
+ if (dialect->quoting == QUOTE_NONE ||
+ (field == NULL &&
+ (dialect->quoting == QUOTE_STRINGS ||
+ dialect->quoting == QUOTE_NOTNULL)))
+ {
+ PyErr_Format(self->error_obj,
+ "empty field must be quoted if delimiter is a space "
+ "and skipinitialspace is true");
+ return 0;
+ }
+ quoted = 1;
+ }
rec_len = join_append_data(self, field_kind, field_data, field_len,
&quoted, 0);
if (rec_len < 0)
@@ -1243,6 +1259,7 @@ csv_writerow(WriterObj *self, PyObject *seq)
{
DialectObj *dialect = self->dialect;
PyObject *iter, *field, *line, *result;
+ bool null_field = false;
iter = PyObject_GetIter(seq);
if (iter == NULL) {
@@ -1279,11 +1296,12 @@ csv_writerow(WriterObj *self, PyObject *seq)
break;
}
+ null_field = (field == Py_None);
if (PyUnicode_Check(field)) {
append_ok = join_append(self, field, quoted);
Py_DECREF(field);
}
- else if (field == Py_None) {
+ else if (null_field) {
append_ok = join_append(self, NULL, quoted);
Py_DECREF(field);
}
@@ -1309,7 +1327,11 @@ csv_writerow(WriterObj *self, PyObject *seq)
return NULL;
if (self->num_fields > 0 && self->rec_len == 0) {
- if (dialect->quoting == QUOTE_NONE) {
+ if (dialect->quoting == QUOTE_NONE ||
+ (null_field &&
+ (dialect->quoting == QUOTE_STRINGS ||
+ dialect->quoting == QUOTE_NOTNULL)))
+ {
PyErr_Format(self->error_obj,
"single empty field record must be quoted");
return NULL;
diff --git a/contrib/tools/python3/Modules/_decimal/_decimal.c b/contrib/tools/python3/Modules/_decimal/_decimal.c
index 70b13982bb0..1a195816fe5 100644
--- a/contrib/tools/python3/Modules/_decimal/_decimal.c
+++ b/contrib/tools/python3/Modules/_decimal/_decimal.c
@@ -143,6 +143,8 @@ static PyObject *default_context_template = NULL;
static PyObject *basic_context_template = NULL;
static PyObject *extended_context_template = NULL;
+/* Invariant: NULL or pointer to _pydecimal.Decimal */
+static PyObject *PyDecimal = NULL;
/* Error codes for functions that return signals or conditions */
#define DEC_INVALID_SIGNALS (MPD_Max_status+1U)
@@ -3219,56 +3221,6 @@ dotsep_as_utf8(const char *s)
return utf8;
}
-/* copy of libmpdec _mpd_round() */
-static void
-_mpd_round(mpd_t *result, const mpd_t *a, mpd_ssize_t prec,
- const mpd_context_t *ctx, uint32_t *status)
-{
- mpd_ssize_t exp = a->exp + a->digits - prec;
-
- if (prec <= 0) {
- mpd_seterror(result, MPD_Invalid_operation, status);
- return;
- }
- if (mpd_isspecial(a) || mpd_iszero(a)) {
- mpd_qcopy(result, a, status);
- return;
- }
-
- mpd_qrescale_fmt(result, a, exp, ctx, status);
- if (result->digits > prec) {
- mpd_qrescale_fmt(result, result, exp+1, ctx, status);
- }
-}
-
-/* Locate negative zero "z" option within a UTF-8 format spec string.
- * Returns pointer to "z", else NULL.
- * The portion of the spec we're working with is [[fill]align][sign][z] */
-static const char *
-format_spec_z_search(char const *fmt, Py_ssize_t size) {
- char const *pos = fmt;
- char const *fmt_end = fmt + size;
- /* skip over [[fill]align] (fill may be multi-byte character) */
- pos += 1;
- while (pos < fmt_end && *pos & 0x80) {
- pos += 1;
- }
- if (pos < fmt_end && strchr("<>=^", *pos) != NULL) {
- pos += 1;
- } else {
- /* fill not present-- skip over [align] */
- pos = fmt;
- if (pos < fmt_end && strchr("<>=^", *pos) != NULL) {
- pos += 1;
- }
- }
- /* skip over [sign] */
- if (pos < fmt_end && strchr("+- ", *pos) != NULL) {
- pos += 1;
- }
- return pos < fmt_end && *pos == 'z' ? pos : NULL;
-}
-
static int
dict_get_item_string(PyObject *dict, const char *key, PyObject **valueobj, const char **valuestr)
{
@@ -3294,6 +3246,48 @@ dict_get_item_string(PyObject *dict, const char *key, PyObject **valueobj, const
return 0;
}
+/*
+ * Fallback _pydecimal formatting for new format specifiers that mpdecimal does
+ * not yet support. As documented, libmpdec follows the PEP-3101 format language:
+ * https://www.bytereef.org/mpdecimal/doc/libmpdec/assign-convert.html#to-string
+ */
+static PyObject *
+pydec_format(PyObject *dec, PyObject *context, PyObject *fmt)
+{
+ PyObject *result;
+ PyObject *pydec;
+ PyObject *u;
+
+ if (PyDecimal == NULL) {
+ PyDecimal = _PyImport_GetModuleAttrString("_pydecimal", "Decimal");
+ if (PyDecimal == NULL) {
+ return NULL;
+ }
+ }
+
+ u = dec_str(dec);
+ if (u == NULL) {
+ return NULL;
+ }
+
+ pydec = PyObject_CallOneArg(PyDecimal, u);
+ Py_DECREF(u);
+ if (pydec == NULL) {
+ return NULL;
+ }
+
+ result = PyObject_CallMethod(pydec, "__format__", "(OO)", fmt, context);
+ Py_DECREF(pydec);
+
+ if (result == NULL && PyErr_ExceptionMatches(PyExc_ValueError)) {
+ /* Do not confuse users with the _pydecimal exception */
+ PyErr_Clear();
+ PyErr_SetString(PyExc_ValueError, "invalid format string");
+ }
+
+ return result;
+}
+
/* Formatted representation of a PyDecObject. */
static PyObject *
dec_format(PyObject *dec, PyObject *args)
@@ -3306,16 +3300,11 @@ dec_format(PyObject *dec, PyObject *args)
PyObject *fmtarg;
PyObject *context;
mpd_spec_t spec;
- char const *fmt;
- char *fmt_copy = NULL;
+ char *fmt;
char *decstring = NULL;
uint32_t status = 0;
int replace_fillchar = 0;
- int no_neg_0 = 0;
Py_ssize_t size;
- mpd_t *mpd = MPD(dec);
- mpd_uint_t dt[MPD_MINALLOC_MAX];
- mpd_t tmp = {MPD_STATIC|MPD_STATIC_DATA,0,0,0,MPD_MINALLOC_MAX,dt};
CURRENT_CONTEXT(context);
@@ -3324,39 +3313,20 @@ dec_format(PyObject *dec, PyObject *args)
}
if (PyUnicode_Check(fmtarg)) {
- fmt = PyUnicode_AsUTF8AndSize(fmtarg, &size);
+ fmt = (char *)PyUnicode_AsUTF8AndSize(fmtarg, &size);
if (fmt == NULL) {
return NULL;
}
- /* NOTE: If https://github.com/python/cpython/pull/29438 lands, the
- * format string manipulation below can be eliminated by enhancing
- * the forked mpd_parse_fmt_str(). */
+
if (size > 0 && fmt[0] == '\0') {
/* NUL fill character: must be replaced with a valid UTF-8 char
before calling mpd_parse_fmt_str(). */
replace_fillchar = 1;
- fmt = fmt_copy = dec_strdup(fmt, size);
- if (fmt_copy == NULL) {
+ fmt = dec_strdup(fmt, size);
+ if (fmt == NULL) {
return NULL;
}
- fmt_copy[0] = '_';
- }
- /* Strip 'z' option, which isn't understood by mpd_parse_fmt_str().
- * NOTE: fmt is always null terminated by PyUnicode_AsUTF8AndSize() */
- char const *z_position = format_spec_z_search(fmt, size);
- if (z_position != NULL) {
- no_neg_0 = 1;
- size_t z_index = z_position - fmt;
- if (fmt_copy == NULL) {
- fmt = fmt_copy = dec_strdup(fmt, size);
- if (fmt_copy == NULL) {
- return NULL;
- }
- }
- /* Shift characters (including null terminator) left,
- overwriting the 'z' option. */
- memmove(fmt_copy + z_index, fmt_copy + z_index + 1, size - z_index);
- size -= 1;
+ fmt[0] = '_';
}
}
else {
@@ -3366,10 +3336,13 @@ dec_format(PyObject *dec, PyObject *args)
}
if (!mpd_parse_fmt_str(&spec, fmt, CtxCaps(context))) {
- PyErr_SetString(PyExc_ValueError,
- "invalid format string");
- goto finish;
+ if (replace_fillchar) {
+ PyMem_Free(fmt);
+ }
+
+ return pydec_format(dec, context, fmtarg);
}
+
if (replace_fillchar) {
/* In order to avoid clobbering parts of UTF-8 thousands separators or
decimal points when the substitution is reversed later, the actual
@@ -3422,45 +3395,8 @@ dec_format(PyObject *dec, PyObject *args)
}
}
- if (no_neg_0 && mpd_isnegative(mpd) && !mpd_isspecial(mpd)) {
- /* Round into a temporary (carefully mirroring the rounding
- of mpd_qformat_spec()), and check if the result is negative zero.
- If so, clear the sign and format the resulting positive zero. */
- mpd_ssize_t prec;
- mpd_qcopy(&tmp, mpd, &status);
- if (spec.prec >= 0) {
- switch (spec.type) {
- case 'f':
- mpd_qrescale(&tmp, &tmp, -spec.prec, CTX(context), &status);
- break;
- case '%':
- tmp.exp += 2;
- mpd_qrescale(&tmp, &tmp, -spec.prec, CTX(context), &status);
- break;
- case 'g':
- prec = (spec.prec == 0) ? 1 : spec.prec;
- if (tmp.digits > prec) {
- _mpd_round(&tmp, &tmp, prec, CTX(context), &status);
- }
- break;
- case 'e':
- if (!mpd_iszero(&tmp)) {
- _mpd_round(&tmp, &tmp, spec.prec+1, CTX(context), &status);
- }
- break;
- }
- }
- if (status & MPD_Errors) {
- PyErr_SetString(PyExc_ValueError, "unexpected error when rounding");
- goto finish;
- }
- if (mpd_iszero(&tmp)) {
- mpd_set_positive(&tmp);
- mpd = &tmp;
- }
- }
- decstring = mpd_qformat_spec(mpd, &spec, CTX(context), &status);
+ decstring = mpd_qformat_spec(MPD(dec), &spec, CTX(context), &status);
if (decstring == NULL) {
if (status & MPD_Malloc_error) {
PyErr_NoMemory();
@@ -3483,7 +3419,7 @@ finish:
Py_XDECREF(grouping);
Py_XDECREF(sep);
Py_XDECREF(dot);
- if (fmt_copy) PyMem_Free(fmt_copy);
+ if (replace_fillchar) PyMem_Free(fmt);
if (decstring) mpd_free(decstring);
return result;
}
@@ -5893,6 +5829,9 @@ PyInit__decimal(void)
/* Create the module */
ASSIGN_PTR(m, PyModule_Create(&_decimal_module));
+ /* For format specifiers not yet supported by libmpdec */
+ PyDecimal = NULL;
+
/* Add types to the module */
CHECK_INT(PyModule_AddObjectRef(m, "Decimal", (PyObject *)&PyDec_Type));
CHECK_INT(PyModule_AddObjectRef(m, "Context", (PyObject *)&PyDecContext_Type));
diff --git a/contrib/tools/python3/Modules/_elementtree.c b/contrib/tools/python3/Modules/_elementtree.c
index 620de8bb4c6..fcd4be9338f 100644
--- a/contrib/tools/python3/Modules/_elementtree.c
+++ b/contrib/tools/python3/Modules/_elementtree.c
@@ -3896,6 +3896,40 @@ _elementtree_XMLParser_close_impl(XMLParserObject *self)
}
/*[clinic input]
+_elementtree.XMLParser.flush
+
+[clinic start generated code]*/
+
+static PyObject *
+_elementtree_XMLParser_flush_impl(XMLParserObject *self)
+/*[clinic end generated code: output=42fdb8795ca24509 input=effbecdb28715949]*/
+{
+ if (!_check_xmlparser(self)) {
+ return NULL;
+ }
+
+ elementtreestate *st = self->state;
+
+ if (EXPAT(st, SetReparseDeferralEnabled) == NULL) {
+ Py_RETURN_NONE;
+ }
+
+ // NOTE: The Expat parser in the C implementation of ElementTree is not
+ // exposed to the outside; as a result we known that reparse deferral
+ // is currently enabled, or we would not even have access to function
+ // XML_SetReparseDeferralEnabled in the first place (which we checked
+ // for, a few lines up).
+
+ EXPAT(st, SetReparseDeferralEnabled)(self->parser, XML_FALSE);
+
+ PyObject *res = expat_parse(st, self, "", 0, XML_FALSE);
+
+ EXPAT(st, SetReparseDeferralEnabled)(self->parser, XML_TRUE);
+
+ return res;
+}
+
+/*[clinic input]
_elementtree.XMLParser.feed
data: object
@@ -4289,6 +4323,7 @@ static PyType_Spec treebuilder_spec = {
static PyMethodDef xmlparser_methods[] = {
_ELEMENTTREE_XMLPARSER_FEED_METHODDEF
_ELEMENTTREE_XMLPARSER_CLOSE_METHODDEF
+ _ELEMENTTREE_XMLPARSER_FLUSH_METHODDEF
_ELEMENTTREE_XMLPARSER__PARSE_WHOLE_METHODDEF
_ELEMENTTREE_XMLPARSER__SETEVENTS_METHODDEF
{NULL, NULL}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c
index 222ac824f01..ed294839ed8 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.c
@@ -25,37 +25,29 @@
#include "internal/Hacl_Hash_MD5.h"
-static uint32_t
-_h0[4U] =
- { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+static uint32_t _h0[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
static uint32_t
_t[64U] =
{
- (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU,
- (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U,
- (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU,
- (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U,
- (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU,
- (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U,
- (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU,
- (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU,
- (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU,
- (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U,
- (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U,
- (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U,
- (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U,
- (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U,
- (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U,
- (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U
+ 0xd76aa478U, 0xe8c7b756U, 0x242070dbU, 0xc1bdceeeU, 0xf57c0fafU, 0x4787c62aU, 0xa8304613U,
+ 0xfd469501U, 0x698098d8U, 0x8b44f7afU, 0xffff5bb1U, 0x895cd7beU, 0x6b901122U, 0xfd987193U,
+ 0xa679438eU, 0x49b40821U, 0xf61e2562U, 0xc040b340U, 0x265e5a51U, 0xe9b6c7aaU, 0xd62f105dU,
+ 0x02441453U, 0xd8a1e681U, 0xe7d3fbc8U, 0x21e1cde6U, 0xc33707d6U, 0xf4d50d87U, 0x455a14edU,
+ 0xa9e3e905U, 0xfcefa3f8U, 0x676f02d9U, 0x8d2a4c8aU, 0xfffa3942U, 0x8771f681U, 0x6d9d6122U,
+ 0xfde5380cU, 0xa4beea44U, 0x4bdecfa9U, 0xf6bb4b60U, 0xbebfbc70U, 0x289b7ec6U, 0xeaa127faU,
+ 0xd4ef3085U, 0x4881d05U, 0xd9d4d039U, 0xe6db99e5U, 0x1fa27cf8U, 0xc4ac5665U, 0xf4292244U,
+ 0x432aff97U, 0xab9423a7U, 0xfc93a039U, 0x655b59c3U, 0x8f0ccc92U, 0xffeff47dU, 0x85845dd1U,
+ 0x6fa87e4fU, 0xfe2ce6e0U, 0xa3014314U, 0x4e0811a1U, 0xf7537e82U, 0xbd3af235U, 0x2ad7d2bbU,
+ 0xeb86d391U
};
-void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s)
+void Hacl_Hash_MD5_init(uint32_t *s)
{
- KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, s[i] = _h0[i];);
+ KRML_MAYBE_FOR4(i, 0U, 4U, 1U, s[i] = _h0[i];);
}
-static void legacy_update(uint32_t *abcd, uint8_t *x)
+static void update(uint32_t *abcd, uint8_t *x)
{
uint32_t aa = abcd[0U];
uint32_t bb = abcd[1U];
@@ -74,14 +66,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb0
+
((va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0)
- << (uint32_t)7U
- | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> (uint32_t)25U);
+ << 7U
+ | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> 25U);
abcd[0U] = v;
uint32_t va0 = abcd[3U];
uint32_t vb1 = abcd[0U];
uint32_t vc1 = abcd[1U];
uint32_t vd1 = abcd[2U];
- uint8_t *b1 = x + (uint32_t)4U;
+ uint8_t *b1 = x + 4U;
uint32_t u0 = load32_le(b1);
uint32_t xk0 = u0;
uint32_t ti1 = _t[1U];
@@ -90,14 +82,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb1
+
((va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1)
- << (uint32_t)12U
- | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> (uint32_t)20U);
+ << 12U
+ | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> 20U);
abcd[3U] = v0;
uint32_t va1 = abcd[2U];
uint32_t vb2 = abcd[3U];
uint32_t vc2 = abcd[0U];
uint32_t vd2 = abcd[1U];
- uint8_t *b2 = x + (uint32_t)8U;
+ uint8_t *b2 = x + 8U;
uint32_t u1 = load32_le(b2);
uint32_t xk1 = u1;
uint32_t ti2 = _t[2U];
@@ -106,14 +98,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb2
+
((va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2)
- << (uint32_t)17U
- | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> (uint32_t)15U);
+ << 17U
+ | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> 15U);
abcd[2U] = v1;
uint32_t va2 = abcd[1U];
uint32_t vb3 = abcd[2U];
uint32_t vc3 = abcd[3U];
uint32_t vd3 = abcd[0U];
- uint8_t *b3 = x + (uint32_t)12U;
+ uint8_t *b3 = x + 12U;
uint32_t u2 = load32_le(b3);
uint32_t xk2 = u2;
uint32_t ti3 = _t[3U];
@@ -122,14 +114,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb3
+
((va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3)
- << (uint32_t)22U
- | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> (uint32_t)10U);
+ << 22U
+ | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> 10U);
abcd[1U] = v2;
uint32_t va3 = abcd[0U];
uint32_t vb4 = abcd[1U];
uint32_t vc4 = abcd[2U];
uint32_t vd4 = abcd[3U];
- uint8_t *b4 = x + (uint32_t)16U;
+ uint8_t *b4 = x + 16U;
uint32_t u3 = load32_le(b4);
uint32_t xk3 = u3;
uint32_t ti4 = _t[4U];
@@ -138,14 +130,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb4
+
((va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4)
- << (uint32_t)7U
- | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> (uint32_t)25U);
+ << 7U
+ | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> 25U);
abcd[0U] = v3;
uint32_t va4 = abcd[3U];
uint32_t vb5 = abcd[0U];
uint32_t vc5 = abcd[1U];
uint32_t vd5 = abcd[2U];
- uint8_t *b5 = x + (uint32_t)20U;
+ uint8_t *b5 = x + 20U;
uint32_t u4 = load32_le(b5);
uint32_t xk4 = u4;
uint32_t ti5 = _t[5U];
@@ -154,14 +146,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb5
+
((va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5)
- << (uint32_t)12U
- | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> (uint32_t)20U);
+ << 12U
+ | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> 20U);
abcd[3U] = v4;
uint32_t va5 = abcd[2U];
uint32_t vb6 = abcd[3U];
uint32_t vc6 = abcd[0U];
uint32_t vd6 = abcd[1U];
- uint8_t *b6 = x + (uint32_t)24U;
+ uint8_t *b6 = x + 24U;
uint32_t u5 = load32_le(b6);
uint32_t xk5 = u5;
uint32_t ti6 = _t[6U];
@@ -170,14 +162,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb6
+
((va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6)
- << (uint32_t)17U
- | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> (uint32_t)15U);
+ << 17U
+ | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> 15U);
abcd[2U] = v5;
uint32_t va6 = abcd[1U];
uint32_t vb7 = abcd[2U];
uint32_t vc7 = abcd[3U];
uint32_t vd7 = abcd[0U];
- uint8_t *b7 = x + (uint32_t)28U;
+ uint8_t *b7 = x + 28U;
uint32_t u6 = load32_le(b7);
uint32_t xk6 = u6;
uint32_t ti7 = _t[7U];
@@ -186,14 +178,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb7
+
((va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7)
- << (uint32_t)22U
- | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> (uint32_t)10U);
+ << 22U
+ | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> 10U);
abcd[1U] = v6;
uint32_t va7 = abcd[0U];
uint32_t vb8 = abcd[1U];
uint32_t vc8 = abcd[2U];
uint32_t vd8 = abcd[3U];
- uint8_t *b8 = x + (uint32_t)32U;
+ uint8_t *b8 = x + 32U;
uint32_t u7 = load32_le(b8);
uint32_t xk7 = u7;
uint32_t ti8 = _t[8U];
@@ -202,14 +194,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb8
+
((va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8)
- << (uint32_t)7U
- | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> (uint32_t)25U);
+ << 7U
+ | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> 25U);
abcd[0U] = v7;
uint32_t va8 = abcd[3U];
uint32_t vb9 = abcd[0U];
uint32_t vc9 = abcd[1U];
uint32_t vd9 = abcd[2U];
- uint8_t *b9 = x + (uint32_t)36U;
+ uint8_t *b9 = x + 36U;
uint32_t u8 = load32_le(b9);
uint32_t xk8 = u8;
uint32_t ti9 = _t[9U];
@@ -218,14 +210,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb9
+
((va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9)
- << (uint32_t)12U
- | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> (uint32_t)20U);
+ << 12U
+ | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> 20U);
abcd[3U] = v8;
uint32_t va9 = abcd[2U];
uint32_t vb10 = abcd[3U];
uint32_t vc10 = abcd[0U];
uint32_t vd10 = abcd[1U];
- uint8_t *b10 = x + (uint32_t)40U;
+ uint8_t *b10 = x + 40U;
uint32_t u9 = load32_le(b10);
uint32_t xk9 = u9;
uint32_t ti10 = _t[10U];
@@ -234,14 +226,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb10
+
((va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10)
- << (uint32_t)17U
- | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> (uint32_t)15U);
+ << 17U
+ | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> 15U);
abcd[2U] = v9;
uint32_t va10 = abcd[1U];
uint32_t vb11 = abcd[2U];
uint32_t vc11 = abcd[3U];
uint32_t vd11 = abcd[0U];
- uint8_t *b11 = x + (uint32_t)44U;
+ uint8_t *b11 = x + 44U;
uint32_t u10 = load32_le(b11);
uint32_t xk10 = u10;
uint32_t ti11 = _t[11U];
@@ -250,14 +242,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb11
+
((va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11)
- << (uint32_t)22U
- | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> (uint32_t)10U);
+ << 22U
+ | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> 10U);
abcd[1U] = v10;
uint32_t va11 = abcd[0U];
uint32_t vb12 = abcd[1U];
uint32_t vc12 = abcd[2U];
uint32_t vd12 = abcd[3U];
- uint8_t *b12 = x + (uint32_t)48U;
+ uint8_t *b12 = x + 48U;
uint32_t u11 = load32_le(b12);
uint32_t xk11 = u11;
uint32_t ti12 = _t[12U];
@@ -266,14 +258,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb12
+
((va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12)
- << (uint32_t)7U
- | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> (uint32_t)25U);
+ << 7U
+ | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> 25U);
abcd[0U] = v11;
uint32_t va12 = abcd[3U];
uint32_t vb13 = abcd[0U];
uint32_t vc13 = abcd[1U];
uint32_t vd13 = abcd[2U];
- uint8_t *b13 = x + (uint32_t)52U;
+ uint8_t *b13 = x + 52U;
uint32_t u12 = load32_le(b13);
uint32_t xk12 = u12;
uint32_t ti13 = _t[13U];
@@ -282,14 +274,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb13
+
((va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13)
- << (uint32_t)12U
- | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> (uint32_t)20U);
+ << 12U
+ | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> 20U);
abcd[3U] = v12;
uint32_t va13 = abcd[2U];
uint32_t vb14 = abcd[3U];
uint32_t vc14 = abcd[0U];
uint32_t vd14 = abcd[1U];
- uint8_t *b14 = x + (uint32_t)56U;
+ uint8_t *b14 = x + 56U;
uint32_t u13 = load32_le(b14);
uint32_t xk13 = u13;
uint32_t ti14 = _t[14U];
@@ -298,14 +290,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb14
+
((va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14)
- << (uint32_t)17U
- | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> (uint32_t)15U);
+ << 17U
+ | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> 15U);
abcd[2U] = v13;
uint32_t va14 = abcd[1U];
uint32_t vb15 = abcd[2U];
uint32_t vc15 = abcd[3U];
uint32_t vd15 = abcd[0U];
- uint8_t *b15 = x + (uint32_t)60U;
+ uint8_t *b15 = x + 60U;
uint32_t u14 = load32_le(b15);
uint32_t xk14 = u14;
uint32_t ti15 = _t[15U];
@@ -314,14 +306,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb15
+
((va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15)
- << (uint32_t)22U
- | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> (uint32_t)10U);
+ << 22U
+ | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> 10U);
abcd[1U] = v14;
uint32_t va15 = abcd[0U];
uint32_t vb16 = abcd[1U];
uint32_t vc16 = abcd[2U];
uint32_t vd16 = abcd[3U];
- uint8_t *b16 = x + (uint32_t)4U;
+ uint8_t *b16 = x + 4U;
uint32_t u15 = load32_le(b16);
uint32_t xk15 = u15;
uint32_t ti16 = _t[16U];
@@ -330,14 +322,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb16
+
((va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16)
- << (uint32_t)5U
- | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> (uint32_t)27U);
+ << 5U
+ | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> 27U);
abcd[0U] = v15;
uint32_t va16 = abcd[3U];
uint32_t vb17 = abcd[0U];
uint32_t vc17 = abcd[1U];
uint32_t vd17 = abcd[2U];
- uint8_t *b17 = x + (uint32_t)24U;
+ uint8_t *b17 = x + 24U;
uint32_t u16 = load32_le(b17);
uint32_t xk16 = u16;
uint32_t ti17 = _t[17U];
@@ -346,14 +338,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb17
+
((va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17)
- << (uint32_t)9U
- | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> (uint32_t)23U);
+ << 9U
+ | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> 23U);
abcd[3U] = v16;
uint32_t va17 = abcd[2U];
uint32_t vb18 = abcd[3U];
uint32_t vc18 = abcd[0U];
uint32_t vd18 = abcd[1U];
- uint8_t *b18 = x + (uint32_t)44U;
+ uint8_t *b18 = x + 44U;
uint32_t u17 = load32_le(b18);
uint32_t xk17 = u17;
uint32_t ti18 = _t[18U];
@@ -362,8 +354,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb18
+
((va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18)
- << (uint32_t)14U
- | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> (uint32_t)18U);
+ << 14U
+ | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> 18U);
abcd[2U] = v17;
uint32_t va18 = abcd[1U];
uint32_t vb19 = abcd[2U];
@@ -378,14 +370,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb19
+
((va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19)
- << (uint32_t)20U
- | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> (uint32_t)12U);
+ << 20U
+ | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> 12U);
abcd[1U] = v18;
uint32_t va19 = abcd[0U];
uint32_t vb20 = abcd[1U];
uint32_t vc20 = abcd[2U];
uint32_t vd20 = abcd[3U];
- uint8_t *b20 = x + (uint32_t)20U;
+ uint8_t *b20 = x + 20U;
uint32_t u19 = load32_le(b20);
uint32_t xk19 = u19;
uint32_t ti20 = _t[20U];
@@ -394,14 +386,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb20
+
((va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20)
- << (uint32_t)5U
- | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> (uint32_t)27U);
+ << 5U
+ | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> 27U);
abcd[0U] = v19;
uint32_t va20 = abcd[3U];
uint32_t vb21 = abcd[0U];
uint32_t vc21 = abcd[1U];
uint32_t vd21 = abcd[2U];
- uint8_t *b21 = x + (uint32_t)40U;
+ uint8_t *b21 = x + 40U;
uint32_t u20 = load32_le(b21);
uint32_t xk20 = u20;
uint32_t ti21 = _t[21U];
@@ -410,14 +402,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb21
+
((va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21)
- << (uint32_t)9U
- | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> (uint32_t)23U);
+ << 9U
+ | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> 23U);
abcd[3U] = v20;
uint32_t va21 = abcd[2U];
uint32_t vb22 = abcd[3U];
uint32_t vc22 = abcd[0U];
uint32_t vd22 = abcd[1U];
- uint8_t *b22 = x + (uint32_t)60U;
+ uint8_t *b22 = x + 60U;
uint32_t u21 = load32_le(b22);
uint32_t xk21 = u21;
uint32_t ti22 = _t[22U];
@@ -426,14 +418,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb22
+
((va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22)
- << (uint32_t)14U
- | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> (uint32_t)18U);
+ << 14U
+ | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> 18U);
abcd[2U] = v21;
uint32_t va22 = abcd[1U];
uint32_t vb23 = abcd[2U];
uint32_t vc23 = abcd[3U];
uint32_t vd23 = abcd[0U];
- uint8_t *b23 = x + (uint32_t)16U;
+ uint8_t *b23 = x + 16U;
uint32_t u22 = load32_le(b23);
uint32_t xk22 = u22;
uint32_t ti23 = _t[23U];
@@ -442,14 +434,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb23
+
((va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23)
- << (uint32_t)20U
- | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> (uint32_t)12U);
+ << 20U
+ | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> 12U);
abcd[1U] = v22;
uint32_t va23 = abcd[0U];
uint32_t vb24 = abcd[1U];
uint32_t vc24 = abcd[2U];
uint32_t vd24 = abcd[3U];
- uint8_t *b24 = x + (uint32_t)36U;
+ uint8_t *b24 = x + 36U;
uint32_t u23 = load32_le(b24);
uint32_t xk23 = u23;
uint32_t ti24 = _t[24U];
@@ -458,14 +450,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb24
+
((va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24)
- << (uint32_t)5U
- | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> (uint32_t)27U);
+ << 5U
+ | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> 27U);
abcd[0U] = v23;
uint32_t va24 = abcd[3U];
uint32_t vb25 = abcd[0U];
uint32_t vc25 = abcd[1U];
uint32_t vd25 = abcd[2U];
- uint8_t *b25 = x + (uint32_t)56U;
+ uint8_t *b25 = x + 56U;
uint32_t u24 = load32_le(b25);
uint32_t xk24 = u24;
uint32_t ti25 = _t[25U];
@@ -474,14 +466,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb25
+
((va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25)
- << (uint32_t)9U
- | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> (uint32_t)23U);
+ << 9U
+ | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> 23U);
abcd[3U] = v24;
uint32_t va25 = abcd[2U];
uint32_t vb26 = abcd[3U];
uint32_t vc26 = abcd[0U];
uint32_t vd26 = abcd[1U];
- uint8_t *b26 = x + (uint32_t)12U;
+ uint8_t *b26 = x + 12U;
uint32_t u25 = load32_le(b26);
uint32_t xk25 = u25;
uint32_t ti26 = _t[26U];
@@ -490,14 +482,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb26
+
((va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26)
- << (uint32_t)14U
- | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> (uint32_t)18U);
+ << 14U
+ | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> 18U);
abcd[2U] = v25;
uint32_t va26 = abcd[1U];
uint32_t vb27 = abcd[2U];
uint32_t vc27 = abcd[3U];
uint32_t vd27 = abcd[0U];
- uint8_t *b27 = x + (uint32_t)32U;
+ uint8_t *b27 = x + 32U;
uint32_t u26 = load32_le(b27);
uint32_t xk26 = u26;
uint32_t ti27 = _t[27U];
@@ -506,14 +498,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb27
+
((va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27)
- << (uint32_t)20U
- | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> (uint32_t)12U);
+ << 20U
+ | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> 12U);
abcd[1U] = v26;
uint32_t va27 = abcd[0U];
uint32_t vb28 = abcd[1U];
uint32_t vc28 = abcd[2U];
uint32_t vd28 = abcd[3U];
- uint8_t *b28 = x + (uint32_t)52U;
+ uint8_t *b28 = x + 52U;
uint32_t u27 = load32_le(b28);
uint32_t xk27 = u27;
uint32_t ti28 = _t[28U];
@@ -522,14 +514,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb28
+
((va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28)
- << (uint32_t)5U
- | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> (uint32_t)27U);
+ << 5U
+ | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> 27U);
abcd[0U] = v27;
uint32_t va28 = abcd[3U];
uint32_t vb29 = abcd[0U];
uint32_t vc29 = abcd[1U];
uint32_t vd29 = abcd[2U];
- uint8_t *b29 = x + (uint32_t)8U;
+ uint8_t *b29 = x + 8U;
uint32_t u28 = load32_le(b29);
uint32_t xk28 = u28;
uint32_t ti29 = _t[29U];
@@ -538,14 +530,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb29
+
((va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29)
- << (uint32_t)9U
- | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> (uint32_t)23U);
+ << 9U
+ | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> 23U);
abcd[3U] = v28;
uint32_t va29 = abcd[2U];
uint32_t vb30 = abcd[3U];
uint32_t vc30 = abcd[0U];
uint32_t vd30 = abcd[1U];
- uint8_t *b30 = x + (uint32_t)28U;
+ uint8_t *b30 = x + 28U;
uint32_t u29 = load32_le(b30);
uint32_t xk29 = u29;
uint32_t ti30 = _t[30U];
@@ -554,14 +546,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb30
+
((va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30)
- << (uint32_t)14U
- | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> (uint32_t)18U);
+ << 14U
+ | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> 18U);
abcd[2U] = v29;
uint32_t va30 = abcd[1U];
uint32_t vb31 = abcd[2U];
uint32_t vc31 = abcd[3U];
uint32_t vd31 = abcd[0U];
- uint8_t *b31 = x + (uint32_t)48U;
+ uint8_t *b31 = x + 48U;
uint32_t u30 = load32_le(b31);
uint32_t xk30 = u30;
uint32_t ti31 = _t[31U];
@@ -570,14 +562,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb31
+
((va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31)
- << (uint32_t)20U
- | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> (uint32_t)12U);
+ << 20U
+ | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> 12U);
abcd[1U] = v30;
uint32_t va31 = abcd[0U];
uint32_t vb32 = abcd[1U];
uint32_t vc32 = abcd[2U];
uint32_t vd32 = abcd[3U];
- uint8_t *b32 = x + (uint32_t)20U;
+ uint8_t *b32 = x + 20U;
uint32_t u31 = load32_le(b32);
uint32_t xk31 = u31;
uint32_t ti32 = _t[32U];
@@ -586,14 +578,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb32
+
((va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32)
- << (uint32_t)4U
- | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> (uint32_t)28U);
+ << 4U
+ | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> 28U);
abcd[0U] = v31;
uint32_t va32 = abcd[3U];
uint32_t vb33 = abcd[0U];
uint32_t vc33 = abcd[1U];
uint32_t vd33 = abcd[2U];
- uint8_t *b33 = x + (uint32_t)32U;
+ uint8_t *b33 = x + 32U;
uint32_t u32 = load32_le(b33);
uint32_t xk32 = u32;
uint32_t ti33 = _t[33U];
@@ -602,14 +594,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb33
+
((va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33)
- << (uint32_t)11U
- | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> (uint32_t)21U);
+ << 11U
+ | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> 21U);
abcd[3U] = v32;
uint32_t va33 = abcd[2U];
uint32_t vb34 = abcd[3U];
uint32_t vc34 = abcd[0U];
uint32_t vd34 = abcd[1U];
- uint8_t *b34 = x + (uint32_t)44U;
+ uint8_t *b34 = x + 44U;
uint32_t u33 = load32_le(b34);
uint32_t xk33 = u33;
uint32_t ti34 = _t[34U];
@@ -618,14 +610,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb34
+
((va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34)
- << (uint32_t)16U
- | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> (uint32_t)16U);
+ << 16U
+ | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> 16U);
abcd[2U] = v33;
uint32_t va34 = abcd[1U];
uint32_t vb35 = abcd[2U];
uint32_t vc35 = abcd[3U];
uint32_t vd35 = abcd[0U];
- uint8_t *b35 = x + (uint32_t)56U;
+ uint8_t *b35 = x + 56U;
uint32_t u34 = load32_le(b35);
uint32_t xk34 = u34;
uint32_t ti35 = _t[35U];
@@ -634,14 +626,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb35
+
((va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35)
- << (uint32_t)23U
- | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> (uint32_t)9U);
+ << 23U
+ | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> 9U);
abcd[1U] = v34;
uint32_t va35 = abcd[0U];
uint32_t vb36 = abcd[1U];
uint32_t vc36 = abcd[2U];
uint32_t vd36 = abcd[3U];
- uint8_t *b36 = x + (uint32_t)4U;
+ uint8_t *b36 = x + 4U;
uint32_t u35 = load32_le(b36);
uint32_t xk35 = u35;
uint32_t ti36 = _t[36U];
@@ -650,14 +642,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb36
+
((va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36)
- << (uint32_t)4U
- | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> (uint32_t)28U);
+ << 4U
+ | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> 28U);
abcd[0U] = v35;
uint32_t va36 = abcd[3U];
uint32_t vb37 = abcd[0U];
uint32_t vc37 = abcd[1U];
uint32_t vd37 = abcd[2U];
- uint8_t *b37 = x + (uint32_t)16U;
+ uint8_t *b37 = x + 16U;
uint32_t u36 = load32_le(b37);
uint32_t xk36 = u36;
uint32_t ti37 = _t[37U];
@@ -666,14 +658,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb37
+
((va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37)
- << (uint32_t)11U
- | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> (uint32_t)21U);
+ << 11U
+ | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> 21U);
abcd[3U] = v36;
uint32_t va37 = abcd[2U];
uint32_t vb38 = abcd[3U];
uint32_t vc38 = abcd[0U];
uint32_t vd38 = abcd[1U];
- uint8_t *b38 = x + (uint32_t)28U;
+ uint8_t *b38 = x + 28U;
uint32_t u37 = load32_le(b38);
uint32_t xk37 = u37;
uint32_t ti38 = _t[38U];
@@ -682,14 +674,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb38
+
((va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38)
- << (uint32_t)16U
- | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> (uint32_t)16U);
+ << 16U
+ | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> 16U);
abcd[2U] = v37;
uint32_t va38 = abcd[1U];
uint32_t vb39 = abcd[2U];
uint32_t vc39 = abcd[3U];
uint32_t vd39 = abcd[0U];
- uint8_t *b39 = x + (uint32_t)40U;
+ uint8_t *b39 = x + 40U;
uint32_t u38 = load32_le(b39);
uint32_t xk38 = u38;
uint32_t ti39 = _t[39U];
@@ -698,14 +690,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb39
+
((va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39)
- << (uint32_t)23U
- | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> (uint32_t)9U);
+ << 23U
+ | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> 9U);
abcd[1U] = v38;
uint32_t va39 = abcd[0U];
uint32_t vb40 = abcd[1U];
uint32_t vc40 = abcd[2U];
uint32_t vd40 = abcd[3U];
- uint8_t *b40 = x + (uint32_t)52U;
+ uint8_t *b40 = x + 52U;
uint32_t u39 = load32_le(b40);
uint32_t xk39 = u39;
uint32_t ti40 = _t[40U];
@@ -714,8 +706,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb40
+
((va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40)
- << (uint32_t)4U
- | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> (uint32_t)28U);
+ << 4U
+ | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> 28U);
abcd[0U] = v39;
uint32_t va40 = abcd[3U];
uint32_t vb41 = abcd[0U];
@@ -730,14 +722,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb41
+
((va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41)
- << (uint32_t)11U
- | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> (uint32_t)21U);
+ << 11U
+ | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> 21U);
abcd[3U] = v40;
uint32_t va41 = abcd[2U];
uint32_t vb42 = abcd[3U];
uint32_t vc42 = abcd[0U];
uint32_t vd42 = abcd[1U];
- uint8_t *b42 = x + (uint32_t)12U;
+ uint8_t *b42 = x + 12U;
uint32_t u41 = load32_le(b42);
uint32_t xk41 = u41;
uint32_t ti42 = _t[42U];
@@ -746,14 +738,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb42
+
((va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42)
- << (uint32_t)16U
- | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> (uint32_t)16U);
+ << 16U
+ | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> 16U);
abcd[2U] = v41;
uint32_t va42 = abcd[1U];
uint32_t vb43 = abcd[2U];
uint32_t vc43 = abcd[3U];
uint32_t vd43 = abcd[0U];
- uint8_t *b43 = x + (uint32_t)24U;
+ uint8_t *b43 = x + 24U;
uint32_t u42 = load32_le(b43);
uint32_t xk42 = u42;
uint32_t ti43 = _t[43U];
@@ -762,14 +754,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb43
+
((va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43)
- << (uint32_t)23U
- | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> (uint32_t)9U);
+ << 23U
+ | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> 9U);
abcd[1U] = v42;
uint32_t va43 = abcd[0U];
uint32_t vb44 = abcd[1U];
uint32_t vc44 = abcd[2U];
uint32_t vd44 = abcd[3U];
- uint8_t *b44 = x + (uint32_t)36U;
+ uint8_t *b44 = x + 36U;
uint32_t u43 = load32_le(b44);
uint32_t xk43 = u43;
uint32_t ti44 = _t[44U];
@@ -778,14 +770,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb44
+
((va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44)
- << (uint32_t)4U
- | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> (uint32_t)28U);
+ << 4U
+ | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> 28U);
abcd[0U] = v43;
uint32_t va44 = abcd[3U];
uint32_t vb45 = abcd[0U];
uint32_t vc45 = abcd[1U];
uint32_t vd45 = abcd[2U];
- uint8_t *b45 = x + (uint32_t)48U;
+ uint8_t *b45 = x + 48U;
uint32_t u44 = load32_le(b45);
uint32_t xk44 = u44;
uint32_t ti45 = _t[45U];
@@ -794,14 +786,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb45
+
((va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45)
- << (uint32_t)11U
- | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> (uint32_t)21U);
+ << 11U
+ | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> 21U);
abcd[3U] = v44;
uint32_t va45 = abcd[2U];
uint32_t vb46 = abcd[3U];
uint32_t vc46 = abcd[0U];
uint32_t vd46 = abcd[1U];
- uint8_t *b46 = x + (uint32_t)60U;
+ uint8_t *b46 = x + 60U;
uint32_t u45 = load32_le(b46);
uint32_t xk45 = u45;
uint32_t ti46 = _t[46U];
@@ -810,14 +802,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb46
+
((va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46)
- << (uint32_t)16U
- | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> (uint32_t)16U);
+ << 16U
+ | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> 16U);
abcd[2U] = v45;
uint32_t va46 = abcd[1U];
uint32_t vb47 = abcd[2U];
uint32_t vc47 = abcd[3U];
uint32_t vd47 = abcd[0U];
- uint8_t *b47 = x + (uint32_t)8U;
+ uint8_t *b47 = x + 8U;
uint32_t u46 = load32_le(b47);
uint32_t xk46 = u46;
uint32_t ti47 = _t[47U];
@@ -826,8 +818,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb47
+
((va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47)
- << (uint32_t)23U
- | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> (uint32_t)9U);
+ << 23U
+ | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> 9U);
abcd[1U] = v46;
uint32_t va47 = abcd[0U];
uint32_t vb48 = abcd[1U];
@@ -842,14 +834,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb48
+
((va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48)
- << (uint32_t)6U
- | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> (uint32_t)26U);
+ << 6U
+ | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> 26U);
abcd[0U] = v47;
uint32_t va48 = abcd[3U];
uint32_t vb49 = abcd[0U];
uint32_t vc49 = abcd[1U];
uint32_t vd49 = abcd[2U];
- uint8_t *b49 = x + (uint32_t)28U;
+ uint8_t *b49 = x + 28U;
uint32_t u48 = load32_le(b49);
uint32_t xk48 = u48;
uint32_t ti49 = _t[49U];
@@ -858,14 +850,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb49
+
((va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49)
- << (uint32_t)10U
- | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> (uint32_t)22U);
+ << 10U
+ | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> 22U);
abcd[3U] = v48;
uint32_t va49 = abcd[2U];
uint32_t vb50 = abcd[3U];
uint32_t vc50 = abcd[0U];
uint32_t vd50 = abcd[1U];
- uint8_t *b50 = x + (uint32_t)56U;
+ uint8_t *b50 = x + 56U;
uint32_t u49 = load32_le(b50);
uint32_t xk49 = u49;
uint32_t ti50 = _t[50U];
@@ -874,14 +866,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb50
+
((va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50)
- << (uint32_t)15U
- | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> (uint32_t)17U);
+ << 15U
+ | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> 17U);
abcd[2U] = v49;
uint32_t va50 = abcd[1U];
uint32_t vb51 = abcd[2U];
uint32_t vc51 = abcd[3U];
uint32_t vd51 = abcd[0U];
- uint8_t *b51 = x + (uint32_t)20U;
+ uint8_t *b51 = x + 20U;
uint32_t u50 = load32_le(b51);
uint32_t xk50 = u50;
uint32_t ti51 = _t[51U];
@@ -890,14 +882,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb51
+
((va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51)
- << (uint32_t)21U
- | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> (uint32_t)11U);
+ << 21U
+ | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> 11U);
abcd[1U] = v50;
uint32_t va51 = abcd[0U];
uint32_t vb52 = abcd[1U];
uint32_t vc52 = abcd[2U];
uint32_t vd52 = abcd[3U];
- uint8_t *b52 = x + (uint32_t)48U;
+ uint8_t *b52 = x + 48U;
uint32_t u51 = load32_le(b52);
uint32_t xk51 = u51;
uint32_t ti52 = _t[52U];
@@ -906,14 +898,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb52
+
((va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52)
- << (uint32_t)6U
- | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> (uint32_t)26U);
+ << 6U
+ | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> 26U);
abcd[0U] = v51;
uint32_t va52 = abcd[3U];
uint32_t vb53 = abcd[0U];
uint32_t vc53 = abcd[1U];
uint32_t vd53 = abcd[2U];
- uint8_t *b53 = x + (uint32_t)12U;
+ uint8_t *b53 = x + 12U;
uint32_t u52 = load32_le(b53);
uint32_t xk52 = u52;
uint32_t ti53 = _t[53U];
@@ -922,14 +914,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb53
+
((va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53)
- << (uint32_t)10U
- | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> (uint32_t)22U);
+ << 10U
+ | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> 22U);
abcd[3U] = v52;
uint32_t va53 = abcd[2U];
uint32_t vb54 = abcd[3U];
uint32_t vc54 = abcd[0U];
uint32_t vd54 = abcd[1U];
- uint8_t *b54 = x + (uint32_t)40U;
+ uint8_t *b54 = x + 40U;
uint32_t u53 = load32_le(b54);
uint32_t xk53 = u53;
uint32_t ti54 = _t[54U];
@@ -938,14 +930,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb54
+
((va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54)
- << (uint32_t)15U
- | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> (uint32_t)17U);
+ << 15U
+ | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> 17U);
abcd[2U] = v53;
uint32_t va54 = abcd[1U];
uint32_t vb55 = abcd[2U];
uint32_t vc55 = abcd[3U];
uint32_t vd55 = abcd[0U];
- uint8_t *b55 = x + (uint32_t)4U;
+ uint8_t *b55 = x + 4U;
uint32_t u54 = load32_le(b55);
uint32_t xk54 = u54;
uint32_t ti55 = _t[55U];
@@ -954,14 +946,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb55
+
((va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55)
- << (uint32_t)21U
- | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> (uint32_t)11U);
+ << 21U
+ | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> 11U);
abcd[1U] = v54;
uint32_t va55 = abcd[0U];
uint32_t vb56 = abcd[1U];
uint32_t vc56 = abcd[2U];
uint32_t vd56 = abcd[3U];
- uint8_t *b56 = x + (uint32_t)32U;
+ uint8_t *b56 = x + 32U;
uint32_t u55 = load32_le(b56);
uint32_t xk55 = u55;
uint32_t ti56 = _t[56U];
@@ -970,14 +962,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb56
+
((va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56)
- << (uint32_t)6U
- | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> (uint32_t)26U);
+ << 6U
+ | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> 26U);
abcd[0U] = v55;
uint32_t va56 = abcd[3U];
uint32_t vb57 = abcd[0U];
uint32_t vc57 = abcd[1U];
uint32_t vd57 = abcd[2U];
- uint8_t *b57 = x + (uint32_t)60U;
+ uint8_t *b57 = x + 60U;
uint32_t u56 = load32_le(b57);
uint32_t xk56 = u56;
uint32_t ti57 = _t[57U];
@@ -986,14 +978,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb57
+
((va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57)
- << (uint32_t)10U
- | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> (uint32_t)22U);
+ << 10U
+ | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> 22U);
abcd[3U] = v56;
uint32_t va57 = abcd[2U];
uint32_t vb58 = abcd[3U];
uint32_t vc58 = abcd[0U];
uint32_t vd58 = abcd[1U];
- uint8_t *b58 = x + (uint32_t)24U;
+ uint8_t *b58 = x + 24U;
uint32_t u57 = load32_le(b58);
uint32_t xk57 = u57;
uint32_t ti58 = _t[58U];
@@ -1002,14 +994,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb58
+
((va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58)
- << (uint32_t)15U
- | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> (uint32_t)17U);
+ << 15U
+ | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> 17U);
abcd[2U] = v57;
uint32_t va58 = abcd[1U];
uint32_t vb59 = abcd[2U];
uint32_t vc59 = abcd[3U];
uint32_t vd59 = abcd[0U];
- uint8_t *b59 = x + (uint32_t)52U;
+ uint8_t *b59 = x + 52U;
uint32_t u58 = load32_le(b59);
uint32_t xk58 = u58;
uint32_t ti59 = _t[59U];
@@ -1018,14 +1010,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb59
+
((va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59)
- << (uint32_t)21U
- | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> (uint32_t)11U);
+ << 21U
+ | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> 11U);
abcd[1U] = v58;
uint32_t va59 = abcd[0U];
uint32_t vb60 = abcd[1U];
uint32_t vc60 = abcd[2U];
uint32_t vd60 = abcd[3U];
- uint8_t *b60 = x + (uint32_t)16U;
+ uint8_t *b60 = x + 16U;
uint32_t u59 = load32_le(b60);
uint32_t xk59 = u59;
uint32_t ti60 = _t[60U];
@@ -1034,14 +1026,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb60
+
((va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60)
- << (uint32_t)6U
- | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> (uint32_t)26U);
+ << 6U
+ | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> 26U);
abcd[0U] = v59;
uint32_t va60 = abcd[3U];
uint32_t vb61 = abcd[0U];
uint32_t vc61 = abcd[1U];
uint32_t vd61 = abcd[2U];
- uint8_t *b61 = x + (uint32_t)44U;
+ uint8_t *b61 = x + 44U;
uint32_t u60 = load32_le(b61);
uint32_t xk60 = u60;
uint32_t ti61 = _t[61U];
@@ -1050,14 +1042,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb61
+
((va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61)
- << (uint32_t)10U
- | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> (uint32_t)22U);
+ << 10U
+ | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> 22U);
abcd[3U] = v60;
uint32_t va61 = abcd[2U];
uint32_t vb62 = abcd[3U];
uint32_t vc62 = abcd[0U];
uint32_t vd62 = abcd[1U];
- uint8_t *b62 = x + (uint32_t)8U;
+ uint8_t *b62 = x + 8U;
uint32_t u61 = load32_le(b62);
uint32_t xk61 = u61;
uint32_t ti62 = _t[62U];
@@ -1066,14 +1058,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb62
+
((va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62)
- << (uint32_t)15U
- | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> (uint32_t)17U);
+ << 15U
+ | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> 17U);
abcd[2U] = v61;
uint32_t va62 = abcd[1U];
uint32_t vb = abcd[2U];
uint32_t vc = abcd[3U];
uint32_t vd = abcd[0U];
- uint8_t *b63 = x + (uint32_t)36U;
+ uint8_t *b63 = x + 36U;
uint32_t u62 = load32_le(b63);
uint32_t xk62 = u62;
uint32_t ti = _t[63U];
@@ -1082,8 +1074,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
vb
+
((va62 + (vc ^ (vb | ~vd)) + xk62 + ti)
- << (uint32_t)21U
- | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> (uint32_t)11U);
+ << 21U
+ | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> 11U);
abcd[1U] = v62;
uint32_t a = abcd[0U];
uint32_t b = abcd[1U];
@@ -1095,98 +1087,69 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
abcd[3U] = d + dd;
}
-static void legacy_pad(uint64_t len, uint8_t *dst)
+static void pad(uint64_t len, uint8_t *dst)
{
uint8_t *dst1 = dst;
- dst1[0U] = (uint8_t)0x80U;
- uint8_t *dst2 = dst + (uint32_t)1U;
- for
- (uint32_t
- i = (uint32_t)0U;
- i
- < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
- i++)
+ dst1[0U] = 0x80U;
+ uint8_t *dst2 = dst + 1U;
+ for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
{
- dst2[i] = (uint8_t)0U;
+ dst2[i] = 0U;
}
- uint8_t
- *dst3 =
- dst
- +
- (uint32_t)1U
- +
- ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
- % (uint32_t)64U;
- store64_le(dst3, len << (uint32_t)3U);
+ uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+ store64_le(dst3, len << 3U);
}
-void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst)
+void Hacl_Hash_MD5_finish(uint32_t *s, uint8_t *dst)
{
- KRML_MAYBE_FOR4(i,
- (uint32_t)0U,
- (uint32_t)4U,
- (uint32_t)1U,
- store32_le(dst + i * (uint32_t)4U, s[i]););
+ KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(dst + i * 4U, s[i]););
}
-void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+void Hacl_Hash_MD5_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
{
- for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ for (uint32_t i = 0U; i < n_blocks; i++)
{
- uint32_t sz = (uint32_t)64U;
+ uint32_t sz = 64U;
uint8_t *block = blocks + sz * i;
- legacy_update(s, block);
+ update(s, block);
}
}
void
-Hacl_Hash_MD5_legacy_update_last(
- uint32_t *s,
- uint64_t prev_len,
- uint8_t *input,
- uint32_t input_len
-)
+Hacl_Hash_MD5_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len)
{
- uint32_t blocks_n = input_len / (uint32_t)64U;
- uint32_t blocks_len = blocks_n * (uint32_t)64U;
+ uint32_t blocks_n = input_len / 64U;
+ uint32_t blocks_len = blocks_n * 64U;
uint8_t *blocks = input;
uint32_t rest_len = input_len - blocks_len;
uint8_t *rest = input + blocks_len;
- Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
+ Hacl_Hash_MD5_update_multi(s, blocks, blocks_n);
uint64_t total_input_len = prev_len + (uint64_t)input_len;
- uint32_t
- pad_len =
- (uint32_t)1U
- +
- ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
- % (uint32_t)64U
- + (uint32_t)8U;
+ uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
uint32_t tmp_len = rest_len + pad_len;
uint8_t tmp_twoblocks[128U] = { 0U };
uint8_t *tmp = tmp_twoblocks;
uint8_t *tmp_rest = tmp;
uint8_t *tmp_pad = tmp + rest_len;
memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
- legacy_pad(total_input_len, tmp_pad);
- Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+ pad(total_input_len, tmp_pad);
+ Hacl_Hash_MD5_update_multi(s, tmp, tmp_len / 64U);
}
-void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_MD5_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- uint32_t
- s[4U] =
- { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
- uint32_t blocks_n0 = input_len / (uint32_t)64U;
+ uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
+ uint32_t blocks_n0 = input_len / 64U;
uint32_t blocks_n1;
- if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+ if (input_len % 64U == 0U && blocks_n0 > 0U)
{
- blocks_n1 = blocks_n0 - (uint32_t)1U;
+ blocks_n1 = blocks_n0 - 1U;
}
else
{
blocks_n1 = blocks_n0;
}
- uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+ uint32_t blocks_len0 = blocks_n1 * 64U;
uint8_t *blocks0 = input;
uint32_t rest_len0 = input_len - blocks_len0;
uint8_t *rest0 = input + blocks_len0;
@@ -1195,75 +1158,75 @@ void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
uint8_t *blocks = blocks0;
uint32_t rest_len = rest_len0;
uint8_t *rest = rest0;
- Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
- Hacl_Hash_MD5_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
- Hacl_Hash_Core_MD5_legacy_finish(s, dst);
+ Hacl_Hash_MD5_update_multi(s, blocks, blocks_n);
+ Hacl_Hash_MD5_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+ Hacl_Hash_MD5_finish(s, output);
}
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void)
{
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
Hacl_Streaming_MD_state_32
- s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
Hacl_Streaming_MD_state_32
*p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
p[0U] = s;
- Hacl_Hash_Core_MD5_legacy_init(block_state);
+ Hacl_Hash_MD5_init(block_state);
return p;
}
-void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
- Hacl_Hash_Core_MD5_legacy_init(block_state);
+ Hacl_Hash_MD5_init(block_state);
Hacl_Streaming_MD_state_32
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
/**
0 = success, 1 = max length exceeded
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
{
- Hacl_Streaming_MD_state_32 s = *p;
+ Hacl_Streaming_MD_state_32 s = *state;
uint64_t total_len = s.total_len;
- if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
{
return Hacl_Streaming_Types_MaximumLengthExceeded;
}
uint32_t sz;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- sz = (uint32_t)64U;
+ sz = 64U;
}
else
{
- sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ sz = (uint32_t)(total_len % (uint64_t)64U);
}
- if (len <= (uint32_t)64U - sz)
+ if (chunk_len <= 64U - sz)
{
- Hacl_Streaming_MD_state_32 s1 = *p;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
uint8_t *buf2 = buf + sz1;
- memcpy(buf2, data, len * sizeof (uint8_t));
- uint64_t total_len2 = total_len1 + (uint64_t)len;
- *p
+ memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+ *state
=
(
(Hacl_Streaming_MD_state_32){
@@ -1273,74 +1236,74 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
}
);
}
- else if (sz == (uint32_t)0U)
+ else if (sz == 0U)
{
- Hacl_Streaming_MD_state_32 s1 = *p;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ Hacl_Hash_MD5_update_multi(block_state1, buf, 1U);
}
uint32_t ite;
- if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
}
- uint32_t n_blocks = (len - ite) / (uint32_t)64U;
- uint32_t data1_len = n_blocks * (uint32_t)64U;
- uint32_t data2_len = len - data1_len;
- uint8_t *data1 = data;
- uint8_t *data2 = data + data1_len;
- Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+ uint32_t n_blocks = (chunk_len - ite) / 64U;
+ uint32_t data1_len = n_blocks * 64U;
+ uint32_t data2_len = chunk_len - data1_len;
+ uint8_t *data1 = chunk;
+ uint8_t *data2 = chunk + data1_len;
+ Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U);
uint8_t *dst = buf;
memcpy(dst, data2, data2_len * sizeof (uint8_t));
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_32){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)len
+ .total_len = total_len1 + (uint64_t)chunk_len
}
);
}
else
{
- uint32_t diff = (uint32_t)64U - sz;
- uint8_t *data1 = data;
- uint8_t *data2 = data + diff;
- Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t diff = 64U - sz;
+ uint8_t *chunk1 = chunk;
+ uint8_t *chunk2 = chunk + diff;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state10 = s1.block_state;
uint8_t *buf0 = s1.buf;
uint64_t total_len10 = s1.total_len;
uint32_t sz10;
- if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
{
- sz10 = (uint32_t)64U;
+ sz10 = 64U;
}
else
{
- sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
}
uint8_t *buf2 = buf0 + sz10;
- memcpy(buf2, data1, diff * sizeof (uint8_t));
+ memcpy(buf2, chunk1, diff * sizeof (uint8_t));
uint64_t total_len2 = total_len10 + (uint64_t)diff;
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_32){
@@ -1349,114 +1312,109 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
.total_len = total_len2
}
);
- Hacl_Streaming_MD_state_32 s10 = *p;
+ Hacl_Streaming_MD_state_32 s10 = *state;
uint32_t *block_state1 = s10.block_state;
uint8_t *buf = s10.buf;
uint64_t total_len1 = s10.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ Hacl_Hash_MD5_update_multi(block_state1, buf, 1U);
}
uint32_t ite;
if
- (
- (uint64_t)(len - diff)
- % (uint64_t)(uint32_t)64U
- == (uint64_t)0U
- && (uint64_t)(len - diff) > (uint64_t)0U
- )
+ ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
}
- uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
- uint32_t data1_len = n_blocks * (uint32_t)64U;
- uint32_t data2_len = len - diff - data1_len;
- uint8_t *data11 = data2;
- uint8_t *data21 = data2 + data1_len;
- Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+ uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+ uint32_t data1_len = n_blocks * 64U;
+ uint32_t data2_len = chunk_len - diff - data1_len;
+ uint8_t *data1 = chunk2;
+ uint8_t *data2 = chunk2 + data1_len;
+ Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U);
uint8_t *dst = buf;
- memcpy(dst, data21, data2_len * sizeof (uint8_t));
- *p
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *state
=
(
(Hacl_Streaming_MD_state_32){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)(len - diff)
+ .total_len = total_len1 + (uint64_t)(chunk_len - diff)
}
);
}
return Hacl_Streaming_Types_Success;
}
-void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
{
- Hacl_Streaming_MD_state_32 scrut = *p;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state = scrut.block_state;
uint8_t *buf_ = scrut.buf;
uint64_t total_len = scrut.total_len;
uint32_t r;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- r = (uint32_t)64U;
+ r = 64U;
}
else
{
- r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ r = (uint32_t)(total_len % (uint64_t)64U);
}
uint8_t *buf_1 = buf_;
uint32_t tmp_block_state[4U] = { 0U };
- memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t));
+ memcpy(tmp_block_state, block_state, 4U * sizeof (uint32_t));
uint32_t ite;
- if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ if (r % 64U == 0U && r > 0U)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = r % (uint32_t)64U;
+ ite = r % 64U;
}
uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
- Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+ Hacl_Hash_MD5_update_multi(tmp_block_state, buf_multi, 0U);
uint64_t prev_len_last = total_len - (uint64_t)r;
- Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
- Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst);
+ Hacl_Hash_MD5_update_last(tmp_block_state, prev_len_last, buf_last, r);
+ Hacl_Hash_MD5_finish(tmp_block_state, output);
}
-void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
KRML_HOST_FREE(block_state);
KRML_HOST_FREE(buf);
- KRML_HOST_FREE(s);
+ KRML_HOST_FREE(state);
}
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s0;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state0 = scrut.block_state;
uint8_t *buf0 = scrut.buf;
uint64_t total_len0 = scrut.total_len;
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
- memcpy(block_state, block_state0, (uint32_t)4U * sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ memcpy(buf, buf0, 64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, 4U * sizeof (uint32_t));
Hacl_Streaming_MD_state_32
s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
Hacl_Streaming_MD_state_32
@@ -1465,8 +1423,8 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_sta
return p;
}
-void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_MD5_hash(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- Hacl_Hash_MD5_legacy_hash(input, input_len, dst);
+ Hacl_Hash_MD5_hash_oneshot(output, input, input_len);
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h
index 13c19fd40f4..f69d6e5a81d 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_MD5.h
@@ -31,31 +31,32 @@ extern "C" {
#endif
#include <string.h>
+#include "python_hacl_namespaces.h"
#include "krml/types.h"
#include "krml/lowstar_endianness.h"
#include "krml/internal/target.h"
#include "Hacl_Streaming_Types.h"
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_MD5_state;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_MD5_state_t;
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void);
-void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state);
/**
0 = success, 1 = max length exceeded
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len);
-void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
-void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state);
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state);
-void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_MD5_hash(uint8_t *output, uint8_t *input, uint32_t input_len);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c
index 5ecb3c0b3a5..1a8b09b1711 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.c
@@ -25,19 +25,14 @@
#include "internal/Hacl_Hash_SHA1.h"
-static uint32_t
-_h0[5U] =
- {
- (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
- (uint32_t)0xc3d2e1f0U
- };
+static uint32_t _h0[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
-void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
+void Hacl_Hash_SHA1_init(uint32_t *s)
{
- KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
+ KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i] = _h0[i];);
}
-static void legacy_update(uint32_t *h, uint8_t *l)
+static void update(uint32_t *h, uint8_t *l)
{
uint32_t ha = h[0U];
uint32_t hb = h[1U];
@@ -45,29 +40,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
uint32_t hd = h[3U];
uint32_t he = h[4U];
uint32_t _w[80U] = { 0U };
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+ for (uint32_t i = 0U; i < 80U; i++)
{
uint32_t v;
- if (i < (uint32_t)16U)
+ if (i < 16U)
{
- uint8_t *b = l + i * (uint32_t)4U;
+ uint8_t *b = l + i * 4U;
uint32_t u = load32_be(b);
v = u;
}
else
{
- uint32_t wmit3 = _w[i - (uint32_t)3U];
- uint32_t wmit8 = _w[i - (uint32_t)8U];
- uint32_t wmit14 = _w[i - (uint32_t)14U];
- uint32_t wmit16 = _w[i - (uint32_t)16U];
- v =
- (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
- << (uint32_t)1U
- | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
+ uint32_t wmit3 = _w[i - 3U];
+ uint32_t wmit8 = _w[i - 8U];
+ uint32_t wmit14 = _w[i - 14U];
+ uint32_t wmit16 = _w[i - 16U];
+ v = (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) << 1U | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> 31U;
}
_w[i] = v;
}
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+ for (uint32_t i = 0U; i < 80U; i++)
{
uint32_t _a = h[0U];
uint32_t _b = h[1U];
@@ -76,11 +68,11 @@ static void legacy_update(uint32_t *h, uint8_t *l)
uint32_t _e = h[4U];
uint32_t wmit = _w[i];
uint32_t ite0;
- if (i < (uint32_t)20U)
+ if (i < 20U)
{
ite0 = (_b & _c) ^ (~_b & _d);
}
- else if ((uint32_t)39U < i && i < (uint32_t)60U)
+ else if (39U < i && i < 60U)
{
ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
}
@@ -89,32 +81,32 @@ static void legacy_update(uint32_t *h, uint8_t *l)
ite0 = _b ^ (_c ^ _d);
}
uint32_t ite;
- if (i < (uint32_t)20U)
+ if (i < 20U)
{
- ite = (uint32_t)0x5a827999U;
+ ite = 0x5a827999U;
}
- else if (i < (uint32_t)40U)
+ else if (i < 40U)
{
- ite = (uint32_t)0x6ed9eba1U;
+ ite = 0x6ed9eba1U;
}
- else if (i < (uint32_t)60U)
+ else if (i < 60U)
{
- ite = (uint32_t)0x8f1bbcdcU;
+ ite = 0x8f1bbcdcU;
}
else
{
- ite = (uint32_t)0xca62c1d6U;
+ ite = 0xca62c1d6U;
}
- uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
+ uint32_t _T = (_a << 5U | _a >> 27U) + ite0 + _e + ite + wmit;
h[0U] = _T;
h[1U] = _a;
- h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
+ h[2U] = _b << 30U | _b >> 2U;
h[3U] = _c;
h[4U] = _d;
}
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+ for (uint32_t i = 0U; i < 80U; i++)
{
- _w[i] = (uint32_t)0U;
+ _w[i] = 0U;
}
uint32_t sta = h[0U];
uint32_t stb = h[1U];
@@ -128,101 +120,69 @@ static void legacy_update(uint32_t *h, uint8_t *l)
h[4U] = ste + he;
}
-static void legacy_pad(uint64_t len, uint8_t *dst)
+static void pad(uint64_t len, uint8_t *dst)
{
uint8_t *dst1 = dst;
- dst1[0U] = (uint8_t)0x80U;
- uint8_t *dst2 = dst + (uint32_t)1U;
- for
- (uint32_t
- i = (uint32_t)0U;
- i
- < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
- i++)
+ dst1[0U] = 0x80U;
+ uint8_t *dst2 = dst + 1U;
+ for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
{
- dst2[i] = (uint8_t)0U;
+ dst2[i] = 0U;
}
- uint8_t
- *dst3 =
- dst
- +
- (uint32_t)1U
- +
- ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
- % (uint32_t)64U;
- store64_be(dst3, len << (uint32_t)3U);
+ uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+ store64_be(dst3, len << 3U);
}
-void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
+void Hacl_Hash_SHA1_finish(uint32_t *s, uint8_t *dst)
{
- KRML_MAYBE_FOR5(i,
- (uint32_t)0U,
- (uint32_t)5U,
- (uint32_t)1U,
- store32_be(dst + i * (uint32_t)4U, s[i]););
+ KRML_MAYBE_FOR5(i, 0U, 5U, 1U, store32_be(dst + i * 4U, s[i]););
}
-void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+void Hacl_Hash_SHA1_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
{
- for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ for (uint32_t i = 0U; i < n_blocks; i++)
{
- uint32_t sz = (uint32_t)64U;
+ uint32_t sz = 64U;
uint8_t *block = blocks + sz * i;
- legacy_update(s, block);
+ update(s, block);
}
}
void
-Hacl_Hash_SHA1_legacy_update_last(
- uint32_t *s,
- uint64_t prev_len,
- uint8_t *input,
- uint32_t input_len
-)
+Hacl_Hash_SHA1_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len)
{
- uint32_t blocks_n = input_len / (uint32_t)64U;
- uint32_t blocks_len = blocks_n * (uint32_t)64U;
+ uint32_t blocks_n = input_len / 64U;
+ uint32_t blocks_len = blocks_n * 64U;
uint8_t *blocks = input;
uint32_t rest_len = input_len - blocks_len;
uint8_t *rest = input + blocks_len;
- Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
+ Hacl_Hash_SHA1_update_multi(s, blocks, blocks_n);
uint64_t total_input_len = prev_len + (uint64_t)input_len;
- uint32_t
- pad_len =
- (uint32_t)1U
- +
- ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
- % (uint32_t)64U
- + (uint32_t)8U;
+ uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
uint32_t tmp_len = rest_len + pad_len;
uint8_t tmp_twoblocks[128U] = { 0U };
uint8_t *tmp = tmp_twoblocks;
uint8_t *tmp_rest = tmp;
uint8_t *tmp_pad = tmp + rest_len;
memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
- legacy_pad(total_input_len, tmp_pad);
- Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+ pad(total_input_len, tmp_pad);
+ Hacl_Hash_SHA1_update_multi(s, tmp, tmp_len / 64U);
}
-void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA1_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- uint32_t
- s[5U] =
- {
- (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
- (uint32_t)0xc3d2e1f0U
- };
- uint32_t blocks_n0 = input_len / (uint32_t)64U;
+ uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
+ uint32_t blocks_n0 = input_len / 64U;
uint32_t blocks_n1;
- if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+ if (input_len % 64U == 0U && blocks_n0 > 0U)
{
- blocks_n1 = blocks_n0 - (uint32_t)1U;
+ blocks_n1 = blocks_n0 - 1U;
}
else
{
blocks_n1 = blocks_n0;
}
- uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+ uint32_t blocks_len0 = blocks_n1 * 64U;
uint8_t *blocks0 = input;
uint32_t rest_len0 = input_len - blocks_len0;
uint8_t *rest0 = input + blocks_len0;
@@ -231,75 +191,75 @@ void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst
uint8_t *blocks = blocks0;
uint32_t rest_len = rest_len0;
uint8_t *rest = rest0;
- Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
- Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
- Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+ Hacl_Hash_SHA1_update_multi(s, blocks, blocks_n);
+ Hacl_Hash_SHA1_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+ Hacl_Hash_SHA1_finish(s, output);
}
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void)
{
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
Hacl_Streaming_MD_state_32
- s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
Hacl_Streaming_MD_state_32
*p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
p[0U] = s;
- Hacl_Hash_Core_SHA1_legacy_init(block_state);
+ Hacl_Hash_SHA1_init(block_state);
return p;
}
-void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
- Hacl_Hash_Core_SHA1_legacy_init(block_state);
+ Hacl_Hash_SHA1_init(block_state);
Hacl_Streaming_MD_state_32
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
/**
0 = success, 1 = max length exceeded
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
{
- Hacl_Streaming_MD_state_32 s = *p;
+ Hacl_Streaming_MD_state_32 s = *state;
uint64_t total_len = s.total_len;
- if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
{
return Hacl_Streaming_Types_MaximumLengthExceeded;
}
uint32_t sz;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- sz = (uint32_t)64U;
+ sz = 64U;
}
else
{
- sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ sz = (uint32_t)(total_len % (uint64_t)64U);
}
- if (len <= (uint32_t)64U - sz)
+ if (chunk_len <= 64U - sz)
{
- Hacl_Streaming_MD_state_32 s1 = *p;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
uint8_t *buf2 = buf + sz1;
- memcpy(buf2, data, len * sizeof (uint8_t));
- uint64_t total_len2 = total_len1 + (uint64_t)len;
- *p
+ memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+ *state
=
(
(Hacl_Streaming_MD_state_32){
@@ -309,74 +269,74 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
}
);
}
- else if (sz == (uint32_t)0U)
+ else if (sz == 0U)
{
- Hacl_Streaming_MD_state_32 s1 = *p;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U);
}
uint32_t ite;
- if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
}
- uint32_t n_blocks = (len - ite) / (uint32_t)64U;
- uint32_t data1_len = n_blocks * (uint32_t)64U;
- uint32_t data2_len = len - data1_len;
- uint8_t *data1 = data;
- uint8_t *data2 = data + data1_len;
- Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+ uint32_t n_blocks = (chunk_len - ite) / 64U;
+ uint32_t data1_len = n_blocks * 64U;
+ uint32_t data2_len = chunk_len - data1_len;
+ uint8_t *data1 = chunk;
+ uint8_t *data2 = chunk + data1_len;
+ Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U);
uint8_t *dst = buf;
memcpy(dst, data2, data2_len * sizeof (uint8_t));
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_32){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)len
+ .total_len = total_len1 + (uint64_t)chunk_len
}
);
}
else
{
- uint32_t diff = (uint32_t)64U - sz;
- uint8_t *data1 = data;
- uint8_t *data2 = data + diff;
- Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t diff = 64U - sz;
+ uint8_t *chunk1 = chunk;
+ uint8_t *chunk2 = chunk + diff;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state10 = s1.block_state;
uint8_t *buf0 = s1.buf;
uint64_t total_len10 = s1.total_len;
uint32_t sz10;
- if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
{
- sz10 = (uint32_t)64U;
+ sz10 = 64U;
}
else
{
- sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
}
uint8_t *buf2 = buf0 + sz10;
- memcpy(buf2, data1, diff * sizeof (uint8_t));
+ memcpy(buf2, chunk1, diff * sizeof (uint8_t));
uint64_t total_len2 = total_len10 + (uint64_t)diff;
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_32){
@@ -385,114 +345,109 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
.total_len = total_len2
}
);
- Hacl_Streaming_MD_state_32 s10 = *p;
+ Hacl_Streaming_MD_state_32 s10 = *state;
uint32_t *block_state1 = s10.block_state;
uint8_t *buf = s10.buf;
uint64_t total_len1 = s10.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+ Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U);
}
uint32_t ite;
if
- (
- (uint64_t)(len - diff)
- % (uint64_t)(uint32_t)64U
- == (uint64_t)0U
- && (uint64_t)(len - diff) > (uint64_t)0U
- )
+ ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
}
- uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
- uint32_t data1_len = n_blocks * (uint32_t)64U;
- uint32_t data2_len = len - diff - data1_len;
- uint8_t *data11 = data2;
- uint8_t *data21 = data2 + data1_len;
- Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+ uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+ uint32_t data1_len = n_blocks * 64U;
+ uint32_t data2_len = chunk_len - diff - data1_len;
+ uint8_t *data1 = chunk2;
+ uint8_t *data2 = chunk2 + data1_len;
+ Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U);
uint8_t *dst = buf;
- memcpy(dst, data21, data2_len * sizeof (uint8_t));
- *p
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *state
=
(
(Hacl_Streaming_MD_state_32){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)(len - diff)
+ .total_len = total_len1 + (uint64_t)(chunk_len - diff)
}
);
}
return Hacl_Streaming_Types_Success;
}
-void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
{
- Hacl_Streaming_MD_state_32 scrut = *p;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state = scrut.block_state;
uint8_t *buf_ = scrut.buf;
uint64_t total_len = scrut.total_len;
uint32_t r;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- r = (uint32_t)64U;
+ r = 64U;
}
else
{
- r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ r = (uint32_t)(total_len % (uint64_t)64U);
}
uint8_t *buf_1 = buf_;
uint32_t tmp_block_state[5U] = { 0U };
- memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
+ memcpy(tmp_block_state, block_state, 5U * sizeof (uint32_t));
uint32_t ite;
- if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ if (r % 64U == 0U && r > 0U)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = r % (uint32_t)64U;
+ ite = r % 64U;
}
uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
- Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+ Hacl_Hash_SHA1_update_multi(tmp_block_state, buf_multi, 0U);
uint64_t prev_len_last = total_len - (uint64_t)r;
- Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
- Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
+ Hacl_Hash_SHA1_update_last(tmp_block_state, prev_len_last, buf_last, r);
+ Hacl_Hash_SHA1_finish(tmp_block_state, output);
}
-void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
KRML_HOST_FREE(block_state);
KRML_HOST_FREE(buf);
- KRML_HOST_FREE(s);
+ KRML_HOST_FREE(state);
}
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s0;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state0 = scrut.block_state;
uint8_t *buf0 = scrut.buf;
uint64_t total_len0 = scrut.total_len;
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
- memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ memcpy(buf, buf0, 64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, 5U * sizeof (uint32_t));
Hacl_Streaming_MD_state_32
s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
Hacl_Streaming_MD_state_32
@@ -501,8 +456,8 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_st
return p;
}
-void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA1_hash(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- Hacl_Hash_SHA1_legacy_hash(input, input_len, dst);
+ Hacl_Hash_SHA1_hash_oneshot(output, input, input_len);
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h
index dc50aa6f6d3..ad1e8e72a73 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA1.h
@@ -31,31 +31,32 @@ extern "C" {
#endif
#include <string.h>
+#include "python_hacl_namespaces.h"
#include "krml/types.h"
#include "krml/lowstar_endianness.h"
#include "krml/internal/target.h"
#include "Hacl_Streaming_Types.h"
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA1_state;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA1_state_t;
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void);
-void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state);
/**
0 = success, 1 = max length exceeded
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len);
-void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
-void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state);
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state);
-void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA1_hash(uint8_t *output, uint8_t *input, uint32_t input_len);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c
index 08e3f7edbf4..4b6af5fc78c 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.c
@@ -27,14 +27,14 @@
-void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash)
+void Hacl_Hash_SHA2_sha256_init(uint32_t *hash)
{
KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
+ 0U,
+ 8U,
+ 1U,
uint32_t *os = hash;
- uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+ uint32_t x = Hacl_Hash_SHA2_h256[i];
os[i] = x;);
}
@@ -42,49 +42,49 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
{
uint32_t hash_old[8U] = { 0U };
uint32_t ws[16U] = { 0U };
- memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+ memcpy(hash_old, hash, 8U * sizeof (uint32_t));
uint8_t *b10 = b;
uint32_t u = load32_be(b10);
ws[0U] = u;
- uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+ uint32_t u0 = load32_be(b10 + 4U);
ws[1U] = u0;
- uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+ uint32_t u1 = load32_be(b10 + 8U);
ws[2U] = u1;
- uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+ uint32_t u2 = load32_be(b10 + 12U);
ws[3U] = u2;
- uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+ uint32_t u3 = load32_be(b10 + 16U);
ws[4U] = u3;
- uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+ uint32_t u4 = load32_be(b10 + 20U);
ws[5U] = u4;
- uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+ uint32_t u5 = load32_be(b10 + 24U);
ws[6U] = u5;
- uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+ uint32_t u6 = load32_be(b10 + 28U);
ws[7U] = u6;
- uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+ uint32_t u7 = load32_be(b10 + 32U);
ws[8U] = u7;
- uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+ uint32_t u8 = load32_be(b10 + 36U);
ws[9U] = u8;
- uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+ uint32_t u9 = load32_be(b10 + 40U);
ws[10U] = u9;
- uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+ uint32_t u10 = load32_be(b10 + 44U);
ws[11U] = u10;
- uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+ uint32_t u11 = load32_be(b10 + 48U);
ws[12U] = u11;
- uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+ uint32_t u12 = load32_be(b10 + 52U);
ws[13U] = u12;
- uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+ uint32_t u13 = load32_be(b10 + 56U);
ws[14U] = u13;
- uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+ uint32_t u14 = load32_be(b10 + 60U);
ws[15U] = u14;
KRML_MAYBE_FOR4(i0,
- (uint32_t)0U,
- (uint32_t)4U,
- (uint32_t)1U,
+ 0U,
+ 4U,
+ 1U,
KRML_MAYBE_FOR16(i,
- (uint32_t)0U,
- (uint32_t)16U,
- (uint32_t)1U,
- uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+ 0U,
+ 16U,
+ 1U,
+ uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
uint32_t ws_t = ws[i];
uint32_t a0 = hash[0U];
uint32_t b0 = hash[1U];
@@ -98,20 +98,13 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
uint32_t
t1 =
h02
- +
- ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
- ^
- ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
- ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+ + ((e0 << 26U | e0 >> 6U) ^ ((e0 << 21U | e0 >> 11U) ^ (e0 << 7U | e0 >> 25U)))
+ ((e0 & f0) ^ (~e0 & g0))
+ k_e_t
+ ws_t;
uint32_t
t2 =
- ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
- ^
- ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
- ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+ ((a0 << 30U | a0 >> 2U) ^ ((a0 << 19U | a0 >> 13U) ^ (a0 << 10U | a0 >> 22U)))
+ ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
uint32_t a1 = t1 + t2;
uint32_t b1 = a0;
@@ -129,74 +122,63 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
hash[5U] = f1;
hash[6U] = g1;
hash[7U] = h12;);
- if (i0 < (uint32_t)3U)
+ if (i0 < 3U)
{
KRML_MAYBE_FOR16(i,
- (uint32_t)0U,
- (uint32_t)16U,
- (uint32_t)1U,
+ 0U,
+ 16U,
+ 1U,
uint32_t t16 = ws[i];
- uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
- uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
- uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
- uint32_t
- s1 =
- (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
- ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
- uint32_t
- s0 =
- (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
- ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+ uint32_t t15 = ws[(i + 1U) % 16U];
+ uint32_t t7 = ws[(i + 9U) % 16U];
+ uint32_t t2 = ws[(i + 14U) % 16U];
+ uint32_t s1 = (t2 << 15U | t2 >> 17U) ^ ((t2 << 13U | t2 >> 19U) ^ t2 >> 10U);
+ uint32_t s0 = (t15 << 25U | t15 >> 7U) ^ ((t15 << 14U | t15 >> 18U) ^ t15 >> 3U);
ws[i] = s1 + t7 + s0 + t16;);
});
KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
+ 0U,
+ 8U,
+ 1U,
uint32_t *os = hash;
uint32_t x = hash[i] + hash_old[i];
os[i] = x;);
}
-void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+void Hacl_Hash_SHA2_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
{
- uint32_t blocks = len / (uint32_t)64U;
- for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+ uint32_t blocks = len / 64U;
+ for (uint32_t i = 0U; i < blocks; i++)
{
uint8_t *b0 = b;
- uint8_t *mb = b0 + i * (uint32_t)64U;
+ uint8_t *mb = b0 + i * 64U;
sha256_update(mb, st);
}
}
void
-Hacl_SHA2_Scalar32_sha256_update_last(
- uint64_t totlen,
- uint32_t len,
- uint8_t *b,
- uint32_t *hash
-)
+Hacl_Hash_SHA2_sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash)
{
uint32_t blocks;
- if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+ if (len + 8U + 1U <= 64U)
{
- blocks = (uint32_t)1U;
+ blocks = 1U;
}
else
{
- blocks = (uint32_t)2U;
+ blocks = 2U;
}
- uint32_t fin = blocks * (uint32_t)64U;
+ uint32_t fin = blocks * 64U;
uint8_t last[128U] = { 0U };
uint8_t totlen_buf[8U] = { 0U };
- uint64_t total_len_bits = totlen << (uint32_t)3U;
+ uint64_t total_len_bits = totlen << 3U;
store64_be(totlen_buf, total_len_bits);
uint8_t *b0 = b;
memcpy(last, b0, len * sizeof (uint8_t));
- last[len] = (uint8_t)0x80U;
- memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+ last[len] = 0x80U;
+ memcpy(last + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
uint8_t *last00 = last;
- uint8_t *last10 = last + (uint32_t)64U;
+ uint8_t *last10 = last + 64U;
uint8_t *l0 = last00;
uint8_t *l1 = last10;
uint8_t *lb0 = l0;
@@ -204,65 +186,56 @@ Hacl_SHA2_Scalar32_sha256_update_last(
uint8_t *last0 = lb0;
uint8_t *last1 = lb1;
sha256_update(last0, hash);
- if (blocks > (uint32_t)1U)
+ if (blocks > 1U)
{
sha256_update(last1, hash);
return;
}
}
-void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h)
{
uint8_t hbuf[32U] = { 0U };
- KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
- store32_be(hbuf + i * (uint32_t)4U, st[i]););
- memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+ KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+ memcpy(h, hbuf, 32U * sizeof (uint8_t));
}
-void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash)
+void Hacl_Hash_SHA2_sha224_init(uint32_t *hash)
{
KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
+ 0U,
+ 8U,
+ 1U,
uint32_t *os = hash;
- uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+ uint32_t x = Hacl_Hash_SHA2_h224[i];
os[i] = x;);
}
static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
{
- Hacl_SHA2_Scalar32_sha256_update_nblocks(len, b, st);
+ Hacl_Hash_SHA2_sha256_update_nblocks(len, b, st);
}
-void
-Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
+void Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
{
- Hacl_SHA2_Scalar32_sha256_update_last(totlen, len, b, st);
+ Hacl_Hash_SHA2_sha256_update_last(totlen, len, b, st);
}
-void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha224_finish(uint32_t *st, uint8_t *h)
{
uint8_t hbuf[32U] = { 0U };
- KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
- store32_be(hbuf + i * (uint32_t)4U, st[i]););
- memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+ KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+ memcpy(h, hbuf, 28U * sizeof (uint8_t));
}
-void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash)
+void Hacl_Hash_SHA2_sha512_init(uint64_t *hash)
{
KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
+ 0U,
+ 8U,
+ 1U,
uint64_t *os = hash;
- uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+ uint64_t x = Hacl_Hash_SHA2_h512[i];
os[i] = x;);
}
@@ -270,49 +243,49 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
{
uint64_t hash_old[8U] = { 0U };
uint64_t ws[16U] = { 0U };
- memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t));
+ memcpy(hash_old, hash, 8U * sizeof (uint64_t));
uint8_t *b10 = b;
uint64_t u = load64_be(b10);
ws[0U] = u;
- uint64_t u0 = load64_be(b10 + (uint32_t)8U);
+ uint64_t u0 = load64_be(b10 + 8U);
ws[1U] = u0;
- uint64_t u1 = load64_be(b10 + (uint32_t)16U);
+ uint64_t u1 = load64_be(b10 + 16U);
ws[2U] = u1;
- uint64_t u2 = load64_be(b10 + (uint32_t)24U);
+ uint64_t u2 = load64_be(b10 + 24U);
ws[3U] = u2;
- uint64_t u3 = load64_be(b10 + (uint32_t)32U);
+ uint64_t u3 = load64_be(b10 + 32U);
ws[4U] = u3;
- uint64_t u4 = load64_be(b10 + (uint32_t)40U);
+ uint64_t u4 = load64_be(b10 + 40U);
ws[5U] = u4;
- uint64_t u5 = load64_be(b10 + (uint32_t)48U);
+ uint64_t u5 = load64_be(b10 + 48U);
ws[6U] = u5;
- uint64_t u6 = load64_be(b10 + (uint32_t)56U);
+ uint64_t u6 = load64_be(b10 + 56U);
ws[7U] = u6;
- uint64_t u7 = load64_be(b10 + (uint32_t)64U);
+ uint64_t u7 = load64_be(b10 + 64U);
ws[8U] = u7;
- uint64_t u8 = load64_be(b10 + (uint32_t)72U);
+ uint64_t u8 = load64_be(b10 + 72U);
ws[9U] = u8;
- uint64_t u9 = load64_be(b10 + (uint32_t)80U);
+ uint64_t u9 = load64_be(b10 + 80U);
ws[10U] = u9;
- uint64_t u10 = load64_be(b10 + (uint32_t)88U);
+ uint64_t u10 = load64_be(b10 + 88U);
ws[11U] = u10;
- uint64_t u11 = load64_be(b10 + (uint32_t)96U);
+ uint64_t u11 = load64_be(b10 + 96U);
ws[12U] = u11;
- uint64_t u12 = load64_be(b10 + (uint32_t)104U);
+ uint64_t u12 = load64_be(b10 + 104U);
ws[13U] = u12;
- uint64_t u13 = load64_be(b10 + (uint32_t)112U);
+ uint64_t u13 = load64_be(b10 + 112U);
ws[14U] = u13;
- uint64_t u14 = load64_be(b10 + (uint32_t)120U);
+ uint64_t u14 = load64_be(b10 + 120U);
ws[15U] = u14;
KRML_MAYBE_FOR5(i0,
- (uint32_t)0U,
- (uint32_t)5U,
- (uint32_t)1U,
+ 0U,
+ 5U,
+ 1U,
KRML_MAYBE_FOR16(i,
- (uint32_t)0U,
- (uint32_t)16U,
- (uint32_t)1U,
- uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+ 0U,
+ 16U,
+ 1U,
+ uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
uint64_t ws_t = ws[i];
uint64_t a0 = hash[0U];
uint64_t b0 = hash[1U];
@@ -326,20 +299,13 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
uint64_t
t1 =
h02
- +
- ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U)
- ^
- ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U)
- ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U)))
+ + ((e0 << 50U | e0 >> 14U) ^ ((e0 << 46U | e0 >> 18U) ^ (e0 << 23U | e0 >> 41U)))
+ ((e0 & f0) ^ (~e0 & g0))
+ k_e_t
+ ws_t;
uint64_t
t2 =
- ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U)
- ^
- ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U)
- ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U)))
+ ((a0 << 36U | a0 >> 28U) ^ ((a0 << 30U | a0 >> 34U) ^ (a0 << 25U | a0 >> 39U)))
+ ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
uint64_t a1 = t1 + t2;
uint64_t b1 = a0;
@@ -357,48 +323,42 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
hash[5U] = f1;
hash[6U] = g1;
hash[7U] = h12;);
- if (i0 < (uint32_t)4U)
+ if (i0 < 4U)
{
KRML_MAYBE_FOR16(i,
- (uint32_t)0U,
- (uint32_t)16U,
- (uint32_t)1U,
+ 0U,
+ 16U,
+ 1U,
uint64_t t16 = ws[i];
- uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
- uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
- uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
- uint64_t
- s1 =
- (t2 << (uint32_t)45U | t2 >> (uint32_t)19U)
- ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U);
- uint64_t
- s0 =
- (t15 << (uint32_t)63U | t15 >> (uint32_t)1U)
- ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U);
+ uint64_t t15 = ws[(i + 1U) % 16U];
+ uint64_t t7 = ws[(i + 9U) % 16U];
+ uint64_t t2 = ws[(i + 14U) % 16U];
+ uint64_t s1 = (t2 << 45U | t2 >> 19U) ^ ((t2 << 3U | t2 >> 61U) ^ t2 >> 6U);
+ uint64_t s0 = (t15 << 63U | t15 >> 1U) ^ ((t15 << 56U | t15 >> 8U) ^ t15 >> 7U);
ws[i] = s1 + t7 + s0 + t16;);
});
KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
+ 0U,
+ 8U,
+ 1U,
uint64_t *os = hash;
uint64_t x = hash[i] + hash_old[i];
os[i] = x;);
}
-void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+void Hacl_Hash_SHA2_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
{
- uint32_t blocks = len / (uint32_t)128U;
- for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+ uint32_t blocks = len / 128U;
+ for (uint32_t i = 0U; i < blocks; i++)
{
uint8_t *b0 = b;
- uint8_t *mb = b0 + i * (uint32_t)128U;
+ uint8_t *mb = b0 + i * 128U;
sha512_update(mb, st);
}
}
void
-Hacl_SHA2_Scalar32_sha512_update_last(
+Hacl_Hash_SHA2_sha512_update_last(
FStar_UInt128_uint128 totlen,
uint32_t len,
uint8_t *b,
@@ -406,25 +366,25 @@ Hacl_SHA2_Scalar32_sha512_update_last(
)
{
uint32_t blocks;
- if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+ if (len + 16U + 1U <= 128U)
{
- blocks = (uint32_t)1U;
+ blocks = 1U;
}
else
{
- blocks = (uint32_t)2U;
+ blocks = 2U;
}
- uint32_t fin = blocks * (uint32_t)128U;
+ uint32_t fin = blocks * 128U;
uint8_t last[256U] = { 0U };
uint8_t totlen_buf[16U] = { 0U };
- FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+ FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
store128_be(totlen_buf, total_len_bits);
uint8_t *b0 = b;
memcpy(last, b0, len * sizeof (uint8_t));
- last[len] = (uint8_t)0x80U;
- memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+ last[len] = 0x80U;
+ memcpy(last + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
uint8_t *last00 = last;
- uint8_t *last10 = last + (uint32_t)128U;
+ uint8_t *last10 = last + 128U;
uint8_t *l0 = last00;
uint8_t *l1 = last10;
uint8_t *lb0 = l0;
@@ -432,76 +392,68 @@ Hacl_SHA2_Scalar32_sha512_update_last(
uint8_t *last0 = lb0;
uint8_t *last1 = lb1;
sha512_update(last0, hash);
- if (blocks > (uint32_t)1U)
+ if (blocks > 1U)
{
sha512_update(last1, hash);
return;
}
}
-void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha512_finish(uint64_t *st, uint8_t *h)
{
uint8_t hbuf[64U] = { 0U };
- KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
- store64_be(hbuf + i * (uint32_t)8U, st[i]););
- memcpy(h, hbuf, (uint32_t)64U * sizeof (uint8_t));
+ KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+ memcpy(h, hbuf, 64U * sizeof (uint8_t));
}
-void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash)
+void Hacl_Hash_SHA2_sha384_init(uint64_t *hash)
{
KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
+ 0U,
+ 8U,
+ 1U,
uint64_t *os = hash;
- uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+ uint64_t x = Hacl_Hash_SHA2_h384[i];
os[i] = x;);
}
-void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+void Hacl_Hash_SHA2_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
{
- Hacl_SHA2_Scalar32_sha512_update_nblocks(len, b, st);
+ Hacl_Hash_SHA2_sha512_update_nblocks(len, b, st);
}
void
-Hacl_SHA2_Scalar32_sha384_update_last(
+Hacl_Hash_SHA2_sha384_update_last(
FStar_UInt128_uint128 totlen,
uint32_t len,
uint8_t *b,
uint64_t *st
)
{
- Hacl_SHA2_Scalar32_sha512_update_last(totlen, len, b, st);
+ Hacl_Hash_SHA2_sha512_update_last(totlen, len, b, st);
}
-void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha384_finish(uint64_t *st, uint8_t *h)
{
uint8_t hbuf[64U] = { 0U };
- KRML_MAYBE_FOR8(i,
- (uint32_t)0U,
- (uint32_t)8U,
- (uint32_t)1U,
- store64_be(hbuf + i * (uint32_t)8U, st[i]););
- memcpy(h, hbuf, (uint32_t)48U * sizeof (uint8_t));
+ KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+ memcpy(h, hbuf, 48U * sizeof (uint8_t));
}
/**
Allocate initial state for the SHA2_256 hash. The state is to be freed by
calling `free_256`.
*/
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void)
{
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
Hacl_Streaming_MD_state_32
- s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
Hacl_Streaming_MD_state_32
*p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
p[0U] = s;
- Hacl_SHA2_Scalar32_sha256_init(block_state);
+ Hacl_Hash_SHA2_sha256_init(block_state);
return p;
}
@@ -511,16 +463,16 @@ The state is to be freed by calling `free_256`. Cloning the state this way is
useful, for instance, if your control-flow diverges and you need to feed
more (different) data into the hash in each branch.
*/
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s0;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state0 = scrut.block_state;
uint8_t *buf0 = scrut.buf;
uint64_t total_len0 = scrut.total_len;
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
- memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ memcpy(buf, buf0, 64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, 8U * sizeof (uint32_t));
Hacl_Streaming_MD_state_32
s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
Hacl_Streaming_MD_state_32
@@ -532,54 +484,54 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state
/**
Reset an existing state to the initial hash state with empty data.
*/
-void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
- Hacl_SHA2_Scalar32_sha256_init(block_state);
+ Hacl_Hash_SHA2_sha256_init(block_state);
Hacl_Streaming_MD_state_32
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
static inline Hacl_Streaming_Types_error_code
-update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
{
- Hacl_Streaming_MD_state_32 s = *p;
+ Hacl_Streaming_MD_state_32 s = *state;
uint64_t total_len = s.total_len;
- if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
{
return Hacl_Streaming_Types_MaximumLengthExceeded;
}
uint32_t sz;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- sz = (uint32_t)64U;
+ sz = 64U;
}
else
{
- sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ sz = (uint32_t)(total_len % (uint64_t)64U);
}
- if (len <= (uint32_t)64U - sz)
+ if (chunk_len <= 64U - sz)
{
- Hacl_Streaming_MD_state_32 s1 = *p;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
uint8_t *buf2 = buf + sz1;
- memcpy(buf2, data, len * sizeof (uint8_t));
- uint64_t total_len2 = total_len1 + (uint64_t)len;
- *p
+ memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+ *state
=
(
(Hacl_Streaming_MD_state_32){
@@ -589,76 +541,74 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
}
);
}
- else if (sz == (uint32_t)0U)
+ else if (sz == 0U)
{
- Hacl_Streaming_MD_state_32 s1 = *p;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+ Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1);
}
uint32_t ite;
- if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
}
- uint32_t n_blocks = (len - ite) / (uint32_t)64U;
- uint32_t data1_len = n_blocks * (uint32_t)64U;
- uint32_t data2_len = len - data1_len;
- uint8_t *data1 = data;
- uint8_t *data2 = data + data1_len;
- Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
- data1,
- block_state1);
+ uint32_t n_blocks = (chunk_len - ite) / 64U;
+ uint32_t data1_len = n_blocks * 64U;
+ uint32_t data2_len = chunk_len - data1_len;
+ uint8_t *data1 = chunk;
+ uint8_t *data2 = chunk + data1_len;
+ Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
uint8_t *dst = buf;
memcpy(dst, data2, data2_len * sizeof (uint8_t));
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_32){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)len
+ .total_len = total_len1 + (uint64_t)chunk_len
}
);
}
else
{
- uint32_t diff = (uint32_t)64U - sz;
- uint8_t *data1 = data;
- uint8_t *data2 = data + diff;
- Hacl_Streaming_MD_state_32 s1 = *p;
+ uint32_t diff = 64U - sz;
+ uint8_t *chunk1 = chunk;
+ uint8_t *chunk2 = chunk + diff;
+ Hacl_Streaming_MD_state_32 s1 = *state;
uint32_t *block_state10 = s1.block_state;
uint8_t *buf0 = s1.buf;
uint64_t total_len10 = s1.total_len;
uint32_t sz10;
- if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
{
- sz10 = (uint32_t)64U;
+ sz10 = 64U;
}
else
{
- sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
}
uint8_t *buf2 = buf0 + sz10;
- memcpy(buf2, data1, diff * sizeof (uint8_t));
+ memcpy(buf2, chunk1, diff * sizeof (uint8_t));
uint64_t total_len2 = total_len10 + (uint64_t)diff;
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_32){
@@ -667,55 +617,48 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
.total_len = total_len2
}
);
- Hacl_Streaming_MD_state_32 s10 = *p;
+ Hacl_Streaming_MD_state_32 s10 = *state;
uint32_t *block_state1 = s10.block_state;
uint8_t *buf = s10.buf;
uint64_t total_len1 = s10.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)64U;
+ sz1 = 64U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+ Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1);
}
uint32_t ite;
if
- (
- (uint64_t)(len - diff)
- % (uint64_t)(uint32_t)64U
- == (uint64_t)0U
- && (uint64_t)(len - diff) > (uint64_t)0U
- )
+ ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
}
- uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
- uint32_t data1_len = n_blocks * (uint32_t)64U;
- uint32_t data2_len = len - diff - data1_len;
- uint8_t *data11 = data2;
- uint8_t *data21 = data2 + data1_len;
- Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
- data11,
- block_state1);
+ uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+ uint32_t data1_len = n_blocks * 64U;
+ uint32_t data2_len = chunk_len - diff - data1_len;
+ uint8_t *data1 = chunk2;
+ uint8_t *data2 = chunk2 + data1_len;
+ Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
uint8_t *dst = buf;
- memcpy(dst, data21, data2_len * sizeof (uint8_t));
- *p
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *state
=
(
(Hacl_Streaming_MD_state_32){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)(len - diff)
+ .total_len = total_len1 + (uint64_t)(chunk_len - diff)
}
);
}
@@ -725,209 +668,203 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
/**
Feed an arbitrary amount of data into the hash. This function returns 0 for
success, or 1 if the combined length of all of the data passed to `update_256`
-(since the last call to `init_256`) exceeds 2^61-1 bytes.
+(since the last call to `reset_256`) exceeds 2^61-1 bytes.
This function is identical to the update function for SHA2_224.
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_256(
- Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_256(
+ Hacl_Streaming_MD_state_32 *state,
uint8_t *input,
uint32_t input_len
)
{
- return update_224_256(p, input, input_len);
+ return update_224_256(state, input, input_len);
}
/**
-Write the resulting hash into `dst`, an array of 32 bytes. The state remains
-valid after a call to `finish_256`, meaning the user may feed more data into
-the hash via `update_256`. (The finish_256 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 32 bytes. The state remains
+valid after a call to `digest_256`, meaning the user may feed more data into
+the hash via `update_256`. (The digest_256 function operates on an internal copy of
the state and therefore does not invalidate the client-held state `p`.)
*/
-void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
{
- Hacl_Streaming_MD_state_32 scrut = *p;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state = scrut.block_state;
uint8_t *buf_ = scrut.buf;
uint64_t total_len = scrut.total_len;
uint32_t r;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- r = (uint32_t)64U;
+ r = 64U;
}
else
{
- r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ r = (uint32_t)(total_len % (uint64_t)64U);
}
uint8_t *buf_1 = buf_;
uint32_t tmp_block_state[8U] = { 0U };
- memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+ memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
uint32_t ite;
- if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ if (r % 64U == 0U && r > 0U)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = r % (uint32_t)64U;
+ ite = r % 64U;
}
uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
- Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ Hacl_Hash_SHA2_sha256_update_nblocks(0U, buf_multi, tmp_block_state);
uint64_t prev_len_last = total_len - (uint64_t)r;
- Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r,
- r,
- buf_last,
- tmp_block_state);
- Hacl_SHA2_Scalar32_sha256_finish(tmp_block_state, dst);
+ Hacl_Hash_SHA2_sha256_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+ Hacl_Hash_SHA2_sha256_finish(tmp_block_state, output);
}
/**
-Free a state allocated with `create_in_256`.
+Free a state allocated with `malloc_256`.
This function is identical to the free function for SHA2_224.
*/
-void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_free_256(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
KRML_HOST_FREE(block_state);
KRML_HOST_FREE(buf);
- KRML_HOST_FREE(s);
+ KRML_HOST_FREE(state);
}
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 32 bytes.
*/
-void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len)
{
uint8_t *ib = input;
- uint8_t *rb = dst;
+ uint8_t *rb = output;
uint32_t st[8U] = { 0U };
- Hacl_SHA2_Scalar32_sha256_init(st);
- uint32_t rem = input_len % (uint32_t)64U;
+ Hacl_Hash_SHA2_sha256_init(st);
+ uint32_t rem = input_len % 64U;
uint64_t len_ = (uint64_t)input_len;
- Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st);
- uint32_t rem1 = input_len % (uint32_t)64U;
+ Hacl_Hash_SHA2_sha256_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % 64U;
uint8_t *b0 = ib;
uint8_t *lb = b0 + input_len - rem1;
- Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st);
- Hacl_SHA2_Scalar32_sha256_finish(st, rb);
+ Hacl_Hash_SHA2_sha256_update_last(len_, rem, lb, st);
+ Hacl_Hash_SHA2_sha256_finish(st, rb);
}
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void)
{
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
- uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
Hacl_Streaming_MD_state_32
- s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
Hacl_Streaming_MD_state_32
*p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
p[0U] = s;
- Hacl_SHA2_Scalar32_sha224_init(block_state);
+ Hacl_Hash_SHA2_sha224_init(block_state);
return p;
}
-void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_MD_state_32 scrut = *s;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint8_t *buf = scrut.buf;
uint32_t *block_state = scrut.block_state;
- Hacl_SHA2_Scalar32_sha224_init(block_state);
+ Hacl_Hash_SHA2_sha224_init(block_state);
Hacl_Streaming_MD_state_32
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_224(
- Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_224(
+ Hacl_Streaming_MD_state_32 *state,
uint8_t *input,
uint32_t input_len
)
{
- return update_224_256(p, input, input_len);
+ return update_224_256(state, input, input_len);
}
/**
-Write the resulting hash into `dst`, an array of 28 bytes. The state remains
-valid after a call to `finish_224`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 28 bytes. The state remains
+valid after a call to `digest_224`, meaning the user may feed more data into
the hash via `update_224`.
*/
-void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
{
- Hacl_Streaming_MD_state_32 scrut = *p;
+ Hacl_Streaming_MD_state_32 scrut = *state;
uint32_t *block_state = scrut.block_state;
uint8_t *buf_ = scrut.buf;
uint64_t total_len = scrut.total_len;
uint32_t r;
- if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
{
- r = (uint32_t)64U;
+ r = 64U;
}
else
{
- r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ r = (uint32_t)(total_len % (uint64_t)64U);
}
uint8_t *buf_1 = buf_;
uint32_t tmp_block_state[8U] = { 0U };
- memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+ memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
uint32_t ite;
- if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ if (r % 64U == 0U && r > 0U)
{
- ite = (uint32_t)64U;
+ ite = 64U;
}
else
{
- ite = r % (uint32_t)64U;
+ ite = r % 64U;
}
uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
- sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ sha224_update_nblocks(0U, buf_multi, tmp_block_state);
uint64_t prev_len_last = total_len - (uint64_t)r;
- Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r,
- r,
- buf_last,
- tmp_block_state);
- Hacl_SHA2_Scalar32_sha224_finish(tmp_block_state, dst);
+ Hacl_Hash_SHA2_sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+ Hacl_Hash_SHA2_sha224_finish(tmp_block_state, output);
}
-void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p)
+void Hacl_Hash_SHA2_free_224(Hacl_Streaming_MD_state_32 *state)
{
- Hacl_Streaming_SHA2_free_256(p);
+ Hacl_Hash_SHA2_free_256(state);
}
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 28 bytes.
*/
-void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len)
{
uint8_t *ib = input;
- uint8_t *rb = dst;
+ uint8_t *rb = output;
uint32_t st[8U] = { 0U };
- Hacl_SHA2_Scalar32_sha224_init(st);
- uint32_t rem = input_len % (uint32_t)64U;
+ Hacl_Hash_SHA2_sha224_init(st);
+ uint32_t rem = input_len % 64U;
uint64_t len_ = (uint64_t)input_len;
sha224_update_nblocks(input_len, ib, st);
- uint32_t rem1 = input_len % (uint32_t)64U;
+ uint32_t rem1 = input_len % 64U;
uint8_t *b0 = ib;
uint8_t *lb = b0 + input_len - rem1;
- Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st);
- Hacl_SHA2_Scalar32_sha224_finish(st, rb);
+ Hacl_Hash_SHA2_sha224_update_last(len_, rem, lb, st);
+ Hacl_Hash_SHA2_sha224_finish(st, rb);
}
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void)
{
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
- uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+ uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
Hacl_Streaming_MD_state_64
- s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
Hacl_Streaming_MD_state_64
*p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
p[0U] = s;
- Hacl_SHA2_Scalar32_sha512_init(block_state);
+ Hacl_Hash_SHA2_sha512_init(block_state);
return p;
}
@@ -937,16 +874,16 @@ The state is to be freed by calling `free_512`. Cloning the state this way is
useful, for instance, if your control-flow diverges and you need to feed
more (different) data into the hash in each branch.
*/
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state)
{
- Hacl_Streaming_MD_state_64 scrut = *s0;
+ Hacl_Streaming_MD_state_64 scrut = *state;
uint64_t *block_state0 = scrut.block_state;
uint8_t *buf0 = scrut.buf;
uint64_t total_len0 = scrut.total_len;
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
- memcpy(buf, buf0, (uint32_t)128U * sizeof (uint8_t));
- uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
- memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint64_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+ memcpy(buf, buf0, 128U * sizeof (uint8_t));
+ uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
+ memcpy(block_state, block_state0, 8U * sizeof (uint64_t));
Hacl_Streaming_MD_state_64
s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
Hacl_Streaming_MD_state_64
@@ -955,54 +892,54 @@ Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state
return p;
}
-void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state)
{
- Hacl_Streaming_MD_state_64 scrut = *s;
+ Hacl_Streaming_MD_state_64 scrut = *state;
uint8_t *buf = scrut.buf;
uint64_t *block_state = scrut.block_state;
- Hacl_SHA2_Scalar32_sha512_init(block_state);
+ Hacl_Hash_SHA2_sha512_init(block_state);
Hacl_Streaming_MD_state_64
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
static inline Hacl_Streaming_Types_error_code
-update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
+update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk_len)
{
- Hacl_Streaming_MD_state_64 s = *p;
+ Hacl_Streaming_MD_state_64 s = *state;
uint64_t total_len = s.total_len;
- if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len)
+ if ((uint64_t)chunk_len > 18446744073709551615ULL - total_len)
{
return Hacl_Streaming_Types_MaximumLengthExceeded;
}
uint32_t sz;
- if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
{
- sz = (uint32_t)128U;
+ sz = 128U;
}
else
{
- sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+ sz = (uint32_t)(total_len % (uint64_t)128U);
}
- if (len <= (uint32_t)128U - sz)
+ if (chunk_len <= 128U - sz)
{
- Hacl_Streaming_MD_state_64 s1 = *p;
+ Hacl_Streaming_MD_state_64 s1 = *state;
uint64_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)128U;
+ sz1 = 128U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
}
uint8_t *buf2 = buf + sz1;
- memcpy(buf2, data, len * sizeof (uint8_t));
- uint64_t total_len2 = total_len1 + (uint64_t)len;
- *p
+ memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+ *state
=
(
(Hacl_Streaming_MD_state_64){
@@ -1012,76 +949,74 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
}
);
}
- else if (sz == (uint32_t)0U)
+ else if (sz == 0U)
{
- Hacl_Streaming_MD_state_64 s1 = *p;
+ Hacl_Streaming_MD_state_64 s1 = *state;
uint64_t *block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)128U;
+ sz1 = 128U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+ Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1);
}
uint32_t ite;
- if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
{
- ite = (uint32_t)128U;
+ ite = 128U;
}
else
{
- ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+ ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
}
- uint32_t n_blocks = (len - ite) / (uint32_t)128U;
- uint32_t data1_len = n_blocks * (uint32_t)128U;
- uint32_t data2_len = len - data1_len;
- uint8_t *data1 = data;
- uint8_t *data2 = data + data1_len;
- Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
- data1,
- block_state1);
+ uint32_t n_blocks = (chunk_len - ite) / 128U;
+ uint32_t data1_len = n_blocks * 128U;
+ uint32_t data2_len = chunk_len - data1_len;
+ uint8_t *data1 = chunk;
+ uint8_t *data2 = chunk + data1_len;
+ Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
uint8_t *dst = buf;
memcpy(dst, data2, data2_len * sizeof (uint8_t));
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_64){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)len
+ .total_len = total_len1 + (uint64_t)chunk_len
}
);
}
else
{
- uint32_t diff = (uint32_t)128U - sz;
- uint8_t *data1 = data;
- uint8_t *data2 = data + diff;
- Hacl_Streaming_MD_state_64 s1 = *p;
+ uint32_t diff = 128U - sz;
+ uint8_t *chunk1 = chunk;
+ uint8_t *chunk2 = chunk + diff;
+ Hacl_Streaming_MD_state_64 s1 = *state;
uint64_t *block_state10 = s1.block_state;
uint8_t *buf0 = s1.buf;
uint64_t total_len10 = s1.total_len;
uint32_t sz10;
- if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
{
- sz10 = (uint32_t)128U;
+ sz10 = 128U;
}
else
{
- sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+ sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
}
uint8_t *buf2 = buf0 + sz10;
- memcpy(buf2, data1, diff * sizeof (uint8_t));
+ memcpy(buf2, chunk1, diff * sizeof (uint8_t));
uint64_t total_len2 = total_len10 + (uint64_t)diff;
- *p
+ *state
=
(
(Hacl_Streaming_MD_state_64){
@@ -1090,55 +1025,48 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
.total_len = total_len2
}
);
- Hacl_Streaming_MD_state_64 s10 = *p;
+ Hacl_Streaming_MD_state_64 s10 = *state;
uint64_t *block_state1 = s10.block_state;
uint8_t *buf = s10.buf;
uint64_t total_len1 = s10.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
{
- sz1 = (uint32_t)128U;
+ sz1 = 128U;
}
else
{
- sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+ sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
- Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+ Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1);
}
uint32_t ite;
if
- (
- (uint64_t)(len - diff)
- % (uint64_t)(uint32_t)128U
- == (uint64_t)0U
- && (uint64_t)(len - diff) > (uint64_t)0U
- )
+ ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
{
- ite = (uint32_t)128U;
+ ite = 128U;
}
else
{
- ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+ ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
}
- uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
- uint32_t data1_len = n_blocks * (uint32_t)128U;
- uint32_t data2_len = len - diff - data1_len;
- uint8_t *data11 = data2;
- uint8_t *data21 = data2 + data1_len;
- Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
- data11,
- block_state1);
+ uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+ uint32_t data1_len = n_blocks * 128U;
+ uint32_t data2_len = chunk_len - diff - data1_len;
+ uint8_t *data1 = chunk2;
+ uint8_t *data2 = chunk2 + data1_len;
+ Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
uint8_t *dst = buf;
- memcpy(dst, data21, data2_len * sizeof (uint8_t));
- *p
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *state
=
(
(Hacl_Streaming_MD_state_64){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)(len - diff)
+ .total_len = total_len1 + (uint64_t)(chunk_len - diff)
}
);
}
@@ -1148,198 +1076,198 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
/**
Feed an arbitrary amount of data into the hash. This function returns 0 for
success, or 1 if the combined length of all of the data passed to `update_512`
-(since the last call to `init_512`) exceeds 2^125-1 bytes.
+(since the last call to `reset_512`) exceeds 2^125-1 bytes.
This function is identical to the update function for SHA2_384.
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_512(
- Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_512(
+ Hacl_Streaming_MD_state_64 *state,
uint8_t *input,
uint32_t input_len
)
{
- return update_384_512(p, input, input_len);
+ return update_384_512(state, input, input_len);
}
/**
-Write the resulting hash into `dst`, an array of 64 bytes. The state remains
-valid after a call to `finish_512`, meaning the user may feed more data into
-the hash via `update_512`. (The finish_512 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 64 bytes. The state remains
+valid after a call to `digest_512`, meaning the user may feed more data into
+the hash via `update_512`. (The digest_512 function operates on an internal copy of
the state and therefore does not invalidate the client-held state `p`.)
*/
-void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output)
{
- Hacl_Streaming_MD_state_64 scrut = *p;
+ Hacl_Streaming_MD_state_64 scrut = *state;
uint64_t *block_state = scrut.block_state;
uint8_t *buf_ = scrut.buf;
uint64_t total_len = scrut.total_len;
uint32_t r;
- if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
{
- r = (uint32_t)128U;
+ r = 128U;
}
else
{
- r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+ r = (uint32_t)(total_len % (uint64_t)128U);
}
uint8_t *buf_1 = buf_;
uint64_t tmp_block_state[8U] = { 0U };
- memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+ memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
uint32_t ite;
- if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+ if (r % 128U == 0U && r > 0U)
{
- ite = (uint32_t)128U;
+ ite = 128U;
}
else
{
- ite = r % (uint32_t)128U;
+ ite = r % 128U;
}
uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
- Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ Hacl_Hash_SHA2_sha512_update_nblocks(0U, buf_multi, tmp_block_state);
uint64_t prev_len_last = total_len - (uint64_t)r;
- Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+ Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
FStar_UInt128_uint64_to_uint128((uint64_t)r)),
r,
buf_last,
tmp_block_state);
- Hacl_SHA2_Scalar32_sha512_finish(tmp_block_state, dst);
+ Hacl_Hash_SHA2_sha512_finish(tmp_block_state, output);
}
/**
-Free a state allocated with `create_in_512`.
+Free a state allocated with `malloc_512`.
This function is identical to the free function for SHA2_384.
*/
-void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_free_512(Hacl_Streaming_MD_state_64 *state)
{
- Hacl_Streaming_MD_state_64 scrut = *s;
+ Hacl_Streaming_MD_state_64 scrut = *state;
uint8_t *buf = scrut.buf;
uint64_t *block_state = scrut.block_state;
KRML_HOST_FREE(block_state);
KRML_HOST_FREE(buf);
- KRML_HOST_FREE(s);
+ KRML_HOST_FREE(state);
}
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 64 bytes.
*/
-void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_512(uint8_t *output, uint8_t *input, uint32_t input_len)
{
uint8_t *ib = input;
- uint8_t *rb = dst;
+ uint8_t *rb = output;
uint64_t st[8U] = { 0U };
- Hacl_SHA2_Scalar32_sha512_init(st);
- uint32_t rem = input_len % (uint32_t)128U;
+ Hacl_Hash_SHA2_sha512_init(st);
+ uint32_t rem = input_len % 128U;
FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
- Hacl_SHA2_Scalar32_sha512_update_nblocks(input_len, ib, st);
- uint32_t rem1 = input_len % (uint32_t)128U;
+ Hacl_Hash_SHA2_sha512_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % 128U;
uint8_t *b0 = ib;
uint8_t *lb = b0 + input_len - rem1;
- Hacl_SHA2_Scalar32_sha512_update_last(len_, rem, lb, st);
- Hacl_SHA2_Scalar32_sha512_finish(st, rb);
+ Hacl_Hash_SHA2_sha512_update_last(len_, rem, lb, st);
+ Hacl_Hash_SHA2_sha512_finish(st, rb);
}
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void)
{
- uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
- uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+ uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
Hacl_Streaming_MD_state_64
- s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
Hacl_Streaming_MD_state_64
*p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
p[0U] = s;
- Hacl_SHA2_Scalar32_sha384_init(block_state);
+ Hacl_Hash_SHA2_sha384_init(block_state);
return p;
}
-void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state)
{
- Hacl_Streaming_MD_state_64 scrut = *s;
+ Hacl_Streaming_MD_state_64 scrut = *state;
uint8_t *buf = scrut.buf;
uint64_t *block_state = scrut.block_state;
- Hacl_SHA2_Scalar32_sha384_init(block_state);
+ Hacl_Hash_SHA2_sha384_init(block_state);
Hacl_Streaming_MD_state_64
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_384(
- Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_384(
+ Hacl_Streaming_MD_state_64 *state,
uint8_t *input,
uint32_t input_len
)
{
- return update_384_512(p, input, input_len);
+ return update_384_512(state, input, input_len);
}
/**
-Write the resulting hash into `dst`, an array of 48 bytes. The state remains
-valid after a call to `finish_384`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 48 bytes. The state remains
+valid after a call to `digest_384`, meaning the user may feed more data into
the hash via `update_384`.
*/
-void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output)
{
- Hacl_Streaming_MD_state_64 scrut = *p;
+ Hacl_Streaming_MD_state_64 scrut = *state;
uint64_t *block_state = scrut.block_state;
uint8_t *buf_ = scrut.buf;
uint64_t total_len = scrut.total_len;
uint32_t r;
- if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
{
- r = (uint32_t)128U;
+ r = 128U;
}
else
{
- r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+ r = (uint32_t)(total_len % (uint64_t)128U);
}
uint8_t *buf_1 = buf_;
uint64_t tmp_block_state[8U] = { 0U };
- memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+ memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
uint32_t ite;
- if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+ if (r % 128U == 0U && r > 0U)
{
- ite = (uint32_t)128U;
+ ite = 128U;
}
else
{
- ite = r % (uint32_t)128U;
+ ite = r % 128U;
}
uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
- Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ Hacl_Hash_SHA2_sha384_update_nblocks(0U, buf_multi, tmp_block_state);
uint64_t prev_len_last = total_len - (uint64_t)r;
- Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+ Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
FStar_UInt128_uint64_to_uint128((uint64_t)r)),
r,
buf_last,
tmp_block_state);
- Hacl_SHA2_Scalar32_sha384_finish(tmp_block_state, dst);
+ Hacl_Hash_SHA2_sha384_finish(tmp_block_state, output);
}
-void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p)
+void Hacl_Hash_SHA2_free_384(Hacl_Streaming_MD_state_64 *state)
{
- Hacl_Streaming_SHA2_free_512(p);
+ Hacl_Hash_SHA2_free_512(state);
}
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 48 bytes.
*/
-void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_384(uint8_t *output, uint8_t *input, uint32_t input_len)
{
uint8_t *ib = input;
- uint8_t *rb = dst;
+ uint8_t *rb = output;
uint64_t st[8U] = { 0U };
- Hacl_SHA2_Scalar32_sha384_init(st);
- uint32_t rem = input_len % (uint32_t)128U;
+ Hacl_Hash_SHA2_sha384_init(st);
+ uint32_t rem = input_len % 128U;
FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
- Hacl_SHA2_Scalar32_sha384_update_nblocks(input_len, ib, st);
- uint32_t rem1 = input_len % (uint32_t)128U;
+ Hacl_Hash_SHA2_sha384_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % 128U;
uint8_t *b0 = ib;
uint8_t *lb = b0 + input_len - rem1;
- Hacl_SHA2_Scalar32_sha384_update_last(len_, rem, lb, st);
- Hacl_SHA2_Scalar32_sha384_finish(st, rb);
+ Hacl_Hash_SHA2_sha384_update_last(len_, rem, lb, st);
+ Hacl_Hash_SHA2_sha384_finish(st, rb);
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h
index a0e731094df..d8204b504ba 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA2.h
@@ -39,19 +39,19 @@ extern "C" {
#include "Hacl_Streaming_Types.h"
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_224;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA2_state_t_224;
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_256;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA2_state_t_256;
-typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_384;
+typedef Hacl_Streaming_MD_state_64 Hacl_Hash_SHA2_state_t_384;
-typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_512;
+typedef Hacl_Streaming_MD_state_64 Hacl_Hash_SHA2_state_t_512;
/**
Allocate initial state for the SHA2_256 hash. The state is to be freed by
calling `free_256`.
*/
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void);
/**
Copies the state passed as argument into a newly allocated state (deep copy).
@@ -59,73 +59,73 @@ The state is to be freed by calling `free_256`. Cloning the state this way is
useful, for instance, if your control-flow diverges and you need to feed
more (different) data into the hash in each branch.
*/
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state);
/**
Reset an existing state to the initial hash state with empty data.
*/
-void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state);
/**
Feed an arbitrary amount of data into the hash. This function returns 0 for
success, or 1 if the combined length of all of the data passed to `update_256`
-(since the last call to `init_256`) exceeds 2^61-1 bytes.
+(since the last call to `reset_256`) exceeds 2^61-1 bytes.
This function is identical to the update function for SHA2_224.
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_256(
- Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_256(
+ Hacl_Streaming_MD_state_32 *state,
uint8_t *input,
uint32_t input_len
);
/**
-Write the resulting hash into `dst`, an array of 32 bytes. The state remains
-valid after a call to `finish_256`, meaning the user may feed more data into
-the hash via `update_256`. (The finish_256 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 32 bytes. The state remains
+valid after a call to `digest_256`, meaning the user may feed more data into
+the hash via `update_256`. (The digest_256 function operates on an internal copy of
the state and therefore does not invalidate the client-held state `p`.)
*/
-void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
/**
-Free a state allocated with `create_in_256`.
+Free a state allocated with `malloc_256`.
This function is identical to the free function for SHA2_224.
*/
-void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_free_256(Hacl_Streaming_MD_state_32 *state);
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 32 bytes.
*/
-void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len);
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void);
-void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state);
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_224(
- Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_224(
+ Hacl_Streaming_MD_state_32 *state,
uint8_t *input,
uint32_t input_len
);
/**
-Write the resulting hash into `dst`, an array of 28 bytes. The state remains
-valid after a call to `finish_224`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 28 bytes. The state remains
+valid after a call to `digest_224`, meaning the user may feed more data into
the hash via `update_224`.
*/
-void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
-void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p);
+void Hacl_Hash_SHA2_free_224(Hacl_Streaming_MD_state_32 *state);
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 28 bytes.
*/
-void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len);
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void);
/**
Copies the state passed as argument into a newly allocated state (deep copy).
@@ -133,68 +133,68 @@ The state is to be freed by calling `free_512`. Cloning the state this way is
useful, for instance, if your control-flow diverges and you need to feed
more (different) data into the hash in each branch.
*/
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state);
-void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state);
/**
Feed an arbitrary amount of data into the hash. This function returns 0 for
success, or 1 if the combined length of all of the data passed to `update_512`
-(since the last call to `init_512`) exceeds 2^125-1 bytes.
+(since the last call to `reset_512`) exceeds 2^125-1 bytes.
This function is identical to the update function for SHA2_384.
*/
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_512(
- Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_512(
+ Hacl_Streaming_MD_state_64 *state,
uint8_t *input,
uint32_t input_len
);
/**
-Write the resulting hash into `dst`, an array of 64 bytes. The state remains
-valid after a call to `finish_512`, meaning the user may feed more data into
-the hash via `update_512`. (The finish_512 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 64 bytes. The state remains
+valid after a call to `digest_512`, meaning the user may feed more data into
+the hash via `update_512`. (The digest_512 function operates on an internal copy of
the state and therefore does not invalidate the client-held state `p`.)
*/
-void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output);
/**
-Free a state allocated with `create_in_512`.
+Free a state allocated with `malloc_512`.
This function is identical to the free function for SHA2_384.
*/
-void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_free_512(Hacl_Streaming_MD_state_64 *state);
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 64 bytes.
*/
-void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_512(uint8_t *output, uint8_t *input, uint32_t input_len);
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void);
-void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state);
Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_384(
- Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_384(
+ Hacl_Streaming_MD_state_64 *state,
uint8_t *input,
uint32_t input_len
);
/**
-Write the resulting hash into `dst`, an array of 48 bytes. The state remains
-valid after a call to `finish_384`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 48 bytes. The state remains
+valid after a call to `digest_384`, meaning the user may feed more data into
the hash via `update_384`.
*/
-void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output);
-void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p);
+void Hacl_Hash_SHA2_free_384(Hacl_Streaming_MD_state_64 *state);
/**
-Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 48 bytes.
*/
-void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_384(uint8_t *output, uint8_t *input, uint32_t input_len);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c
index b3febdfeb2b..4f502866fe0 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.c
@@ -31,27 +31,27 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
{
case Spec_Hash_Definitions_SHA3_224:
{
- return (uint32_t)144U;
+ return 144U;
}
case Spec_Hash_Definitions_SHA3_256:
{
- return (uint32_t)136U;
+ return 136U;
}
case Spec_Hash_Definitions_SHA3_384:
{
- return (uint32_t)104U;
+ return 104U;
}
case Spec_Hash_Definitions_SHA3_512:
{
- return (uint32_t)72U;
+ return 72U;
}
case Spec_Hash_Definitions_Shake128:
{
- return (uint32_t)168U;
+ return 168U;
}
case Spec_Hash_Definitions_Shake256:
{
- return (uint32_t)136U;
+ return 136U;
}
default:
{
@@ -67,19 +67,19 @@ static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
{
case Spec_Hash_Definitions_SHA3_224:
{
- return (uint32_t)28U;
+ return 28U;
}
case Spec_Hash_Definitions_SHA3_256:
{
- return (uint32_t)32U;
+ return 32U;
}
case Spec_Hash_Definitions_SHA3_384:
{
- return (uint32_t)48U;
+ return 48U;
}
case Spec_Hash_Definitions_SHA3_512:
{
- return (uint32_t)64U;
+ return 64U;
}
default:
{
@@ -97,10 +97,10 @@ Hacl_Hash_SHA3_update_multi_sha3(
uint32_t n_blocks
)
{
- for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ for (uint32_t i = 0U; i < n_blocks; i++)
{
uint8_t *block = blocks + i * block_len(a);
- Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s);
+ Hacl_Hash_SHA3_absorb_inner(block_len(a), block, s);
}
}
@@ -115,139 +115,139 @@ Hacl_Hash_SHA3_update_last_sha3(
uint8_t suffix;
if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256)
{
- suffix = (uint8_t)0x1fU;
+ suffix = 0x1fU;
}
else
{
- suffix = (uint8_t)0x06U;
+ suffix = 0x06U;
}
uint32_t len = block_len(a);
if (input_len == len)
{
- Hacl_Impl_SHA3_absorb_inner(len, input, s);
- uint8_t *uu____0 = input + input_len;
+ Hacl_Hash_SHA3_absorb_inner(len, input, s);
uint8_t lastBlock_[200U] = { 0U };
uint8_t *lastBlock = lastBlock_;
- memcpy(lastBlock, uu____0, (uint32_t)0U * sizeof (uint8_t));
+ memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t));
lastBlock[0U] = suffix;
- Hacl_Impl_SHA3_loadState(len, lastBlock, s);
- if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U)
+ Hacl_Hash_SHA3_loadState(len, lastBlock, s);
+ if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U)
{
- Hacl_Impl_SHA3_state_permute(s);
+ Hacl_Hash_SHA3_state_permute(s);
}
uint8_t nextBlock_[200U] = { 0U };
uint8_t *nextBlock = nextBlock_;
- nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
- Hacl_Impl_SHA3_loadState(len, nextBlock, s);
- Hacl_Impl_SHA3_state_permute(s);
+ nextBlock[len - 1U] = 0x80U;
+ Hacl_Hash_SHA3_loadState(len, nextBlock, s);
+ Hacl_Hash_SHA3_state_permute(s);
return;
}
uint8_t lastBlock_[200U] = { 0U };
uint8_t *lastBlock = lastBlock_;
memcpy(lastBlock, input, input_len * sizeof (uint8_t));
lastBlock[input_len] = suffix;
- Hacl_Impl_SHA3_loadState(len, lastBlock, s);
- if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && input_len == len - (uint32_t)1U)
+ Hacl_Hash_SHA3_loadState(len, lastBlock, s);
+ if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U)
{
- Hacl_Impl_SHA3_state_permute(s);
+ Hacl_Hash_SHA3_state_permute(s);
}
uint8_t nextBlock_[200U] = { 0U };
uint8_t *nextBlock = nextBlock_;
- nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
- Hacl_Impl_SHA3_loadState(len, nextBlock, s);
- Hacl_Impl_SHA3_state_permute(s);
+ nextBlock[len - 1U] = 0x80U;
+ Hacl_Hash_SHA3_loadState(len, nextBlock, s);
+ Hacl_Hash_SHA3_state_permute(s);
}
typedef struct hash_buf2_s
{
- Hacl_Streaming_Keccak_hash_buf fst;
- Hacl_Streaming_Keccak_hash_buf snd;
+ Hacl_Hash_SHA3_hash_buf fst;
+ Hacl_Hash_SHA3_hash_buf snd;
}
hash_buf2;
-Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s)
+Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s)
{
- Hacl_Streaming_Keccak_state scrut = *s;
- Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
+ Hacl_Hash_SHA3_hash_buf block_state = (*s).block_state;
return block_state.fst;
}
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a)
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a)
{
KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
- uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
- Hacl_Streaming_Keccak_hash_buf block_state = { .fst = a, .snd = buf };
- Hacl_Streaming_Keccak_state
- s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)(uint32_t)0U };
- Hacl_Streaming_Keccak_state
- *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+ uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+ Hacl_Hash_SHA3_hash_buf block_state = { .fst = a, .snd = buf };
+ Hacl_Hash_SHA3_state_t
+ s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U };
+ Hacl_Hash_SHA3_state_t
+ *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t));
p[0U] = s;
uint64_t *s1 = block_state.snd;
- memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+ memset(s1, 0U, 25U * sizeof (uint64_t));
return p;
}
-void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s)
+void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state)
{
- Hacl_Streaming_Keccak_state scrut = *s;
+ Hacl_Hash_SHA3_state_t scrut = *state;
uint8_t *buf = scrut.buf;
- Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
- uint64_t *s1 = block_state.snd;
- KRML_HOST_FREE(s1);
- KRML_HOST_FREE(buf);
+ Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state;
+ uint64_t *s = block_state.snd;
KRML_HOST_FREE(s);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(state);
}
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0)
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state)
{
- Hacl_Streaming_Keccak_state scrut0 = *s0;
- Hacl_Streaming_Keccak_hash_buf block_state0 = scrut0.block_state;
+ Hacl_Hash_SHA3_state_t scrut0 = *state;
+ Hacl_Hash_SHA3_hash_buf block_state0 = scrut0.block_state;
uint8_t *buf0 = scrut0.buf;
uint64_t total_len0 = scrut0.total_len;
Spec_Hash_Definitions_hash_alg i = block_state0.fst;
KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i));
uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t));
memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t));
- uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
- Hacl_Streaming_Keccak_hash_buf block_state = { .fst = i, .snd = buf };
+ uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+ Hacl_Hash_SHA3_hash_buf block_state = { .fst = i, .snd = buf };
hash_buf2 scrut = { .fst = block_state0, .snd = block_state };
uint64_t *s_dst = scrut.snd.snd;
uint64_t *s_src = scrut.fst.snd;
- memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
- Hacl_Streaming_Keccak_state
+ memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
+ Hacl_Hash_SHA3_state_t
s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 };
- Hacl_Streaming_Keccak_state
- *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+ Hacl_Hash_SHA3_state_t
+ *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t));
p[0U] = s;
return p;
}
-void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s)
+void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state)
{
- Hacl_Streaming_Keccak_state scrut = *s;
+ Hacl_Hash_SHA3_state_t scrut = *state;
uint8_t *buf = scrut.buf;
- Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
- uint64_t *s1 = block_state.snd;
- memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
- Hacl_Streaming_Keccak_state
- tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
- s[0U] = tmp;
+ Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state;
+ Spec_Hash_Definitions_hash_alg i = block_state.fst;
+ KRML_MAYBE_UNUSED_VAR(i);
+ uint64_t *s = block_state.snd;
+ memset(s, 0U, 25U * sizeof (uint64_t));
+ Hacl_Hash_SHA3_state_t
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+ state[0U] = tmp;
}
Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len)
+Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len)
{
- Hacl_Streaming_Keccak_state s = *p;
- Hacl_Streaming_Keccak_hash_buf block_state = s.block_state;
+ Hacl_Hash_SHA3_state_t s = *state;
+ Hacl_Hash_SHA3_hash_buf block_state = s.block_state;
uint64_t total_len = s.total_len;
Spec_Hash_Definitions_hash_alg i = block_state.fst;
- if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len)
+ if ((uint64_t)chunk_len > 0xFFFFFFFFFFFFFFFFULL - total_len)
{
return Hacl_Streaming_Types_MaximumLengthExceeded;
}
uint32_t sz;
- if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)block_len(i) == 0ULL && total_len > 0ULL)
{
sz = block_len(i);
}
@@ -255,14 +255,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
{
sz = (uint32_t)(total_len % (uint64_t)block_len(i));
}
- if (len <= block_len(i) - sz)
+ if (chunk_len <= block_len(i) - sz)
{
- Hacl_Streaming_Keccak_state s1 = *p;
- Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+ Hacl_Hash_SHA3_state_t s1 = *state;
+ Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
{
sz1 = block_len(i);
}
@@ -271,26 +271,20 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
}
uint8_t *buf2 = buf + sz1;
- memcpy(buf2, data, len * sizeof (uint8_t));
- uint64_t total_len2 = total_len1 + (uint64_t)len;
- *p
+ memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+ *state
=
- (
- (Hacl_Streaming_Keccak_state){
- .block_state = block_state1,
- .buf = buf,
- .total_len = total_len2
- }
- );
+ ((Hacl_Hash_SHA3_state_t){ .block_state = block_state1, .buf = buf, .total_len = total_len2 });
}
- else if (sz == (uint32_t)0U)
+ else if (sz == 0U)
{
- Hacl_Streaming_Keccak_state s1 = *p;
- Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+ Hacl_Hash_SHA3_state_t s1 = *state;
+ Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state;
uint8_t *buf = s1.buf;
uint64_t total_len1 = s1.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
{
sz1 = block_len(i);
}
@@ -298,52 +292,52 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
{
sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
uint64_t *s2 = block_state1.snd;
Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
}
uint32_t ite;
- if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ if ((uint64_t)chunk_len % (uint64_t)block_len(i) == 0ULL && (uint64_t)chunk_len > 0ULL)
{
ite = block_len(i);
}
else
{
- ite = (uint32_t)((uint64_t)len % (uint64_t)block_len(i));
+ ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)block_len(i));
}
- uint32_t n_blocks = (len - ite) / block_len(i);
+ uint32_t n_blocks = (chunk_len - ite) / block_len(i);
uint32_t data1_len = n_blocks * block_len(i);
- uint32_t data2_len = len - data1_len;
- uint8_t *data1 = data;
- uint8_t *data2 = data + data1_len;
+ uint32_t data2_len = chunk_len - data1_len;
+ uint8_t *data1 = chunk;
+ uint8_t *data2 = chunk + data1_len;
Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
uint64_t *s2 = block_state1.snd;
Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
uint8_t *dst = buf;
memcpy(dst, data2, data2_len * sizeof (uint8_t));
- *p
+ *state
=
(
- (Hacl_Streaming_Keccak_state){
+ (Hacl_Hash_SHA3_state_t){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)len
+ .total_len = total_len1 + (uint64_t)chunk_len
}
);
}
else
{
uint32_t diff = block_len(i) - sz;
- uint8_t *data1 = data;
- uint8_t *data2 = data + diff;
- Hacl_Streaming_Keccak_state s1 = *p;
- Hacl_Streaming_Keccak_hash_buf block_state10 = s1.block_state;
+ uint8_t *chunk1 = chunk;
+ uint8_t *chunk2 = chunk + diff;
+ Hacl_Hash_SHA3_state_t s1 = *state;
+ Hacl_Hash_SHA3_hash_buf block_state10 = s1.block_state;
uint8_t *buf0 = s1.buf;
uint64_t total_len10 = s1.total_len;
uint32_t sz10;
- if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ if (total_len10 % (uint64_t)block_len(i) == 0ULL && total_len10 > 0ULL)
{
sz10 = block_len(i);
}
@@ -352,23 +346,23 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i));
}
uint8_t *buf2 = buf0 + sz10;
- memcpy(buf2, data1, diff * sizeof (uint8_t));
+ memcpy(buf2, chunk1, diff * sizeof (uint8_t));
uint64_t total_len2 = total_len10 + (uint64_t)diff;
- *p
+ *state
=
(
- (Hacl_Streaming_Keccak_state){
+ (Hacl_Hash_SHA3_state_t){
.block_state = block_state10,
.buf = buf0,
.total_len = total_len2
}
);
- Hacl_Streaming_Keccak_state s10 = *p;
- Hacl_Streaming_Keccak_hash_buf block_state1 = s10.block_state;
+ Hacl_Hash_SHA3_state_t s10 = *state;
+ Hacl_Hash_SHA3_hash_buf block_state1 = s10.block_state;
uint8_t *buf = s10.buf;
uint64_t total_len1 = s10.total_len;
uint32_t sz1;
- if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
{
sz1 = block_len(i);
}
@@ -376,7 +370,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
{
sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
}
- if (!(sz1 == (uint32_t)0U))
+ if (!(sz1 == 0U))
{
Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
uint64_t *s2 = block_state1.snd;
@@ -385,35 +379,35 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
uint32_t ite;
if
(
- (uint64_t)(len - diff)
+ (uint64_t)(chunk_len - diff)
% (uint64_t)block_len(i)
- == (uint64_t)0U
- && (uint64_t)(len - diff) > (uint64_t)0U
+ == 0ULL
+ && (uint64_t)(chunk_len - diff) > 0ULL
)
{
ite = block_len(i);
}
else
{
- ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)block_len(i));
+ ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)block_len(i));
}
- uint32_t n_blocks = (len - diff - ite) / block_len(i);
+ uint32_t n_blocks = (chunk_len - diff - ite) / block_len(i);
uint32_t data1_len = n_blocks * block_len(i);
- uint32_t data2_len = len - diff - data1_len;
- uint8_t *data11 = data2;
- uint8_t *data21 = data2 + data1_len;
+ uint32_t data2_len = chunk_len - diff - data1_len;
+ uint8_t *data1 = chunk2;
+ uint8_t *data2 = chunk2 + data1_len;
Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
uint64_t *s2 = block_state1.snd;
- Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data11, data1_len / block_len(a1));
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
uint8_t *dst = buf;
- memcpy(dst, data21, data2_len * sizeof (uint8_t));
- *p
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *state
=
(
- (Hacl_Streaming_Keccak_state){
+ (Hacl_Hash_SHA3_state_t){
.block_state = block_state1,
.buf = buf,
- .total_len = total_len1 + (uint64_t)(len - diff)
+ .total_len = total_len1 + (uint64_t)(chunk_len - diff)
}
);
}
@@ -421,19 +415,19 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
}
static void
-finish_(
+digest_(
Spec_Hash_Definitions_hash_alg a,
- Hacl_Streaming_Keccak_state *p,
- uint8_t *dst,
+ Hacl_Hash_SHA3_state_t *state,
+ uint8_t *output,
uint32_t l
)
{
- Hacl_Streaming_Keccak_state scrut0 = *p;
- Hacl_Streaming_Keccak_hash_buf block_state = scrut0.block_state;
+ Hacl_Hash_SHA3_state_t scrut0 = *state;
+ Hacl_Hash_SHA3_hash_buf block_state = scrut0.block_state;
uint8_t *buf_ = scrut0.buf;
uint64_t total_len = scrut0.total_len;
uint32_t r;
- if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && total_len > (uint64_t)0U)
+ if (total_len % (uint64_t)block_len(a) == 0ULL && total_len > 0ULL)
{
r = block_len(a);
}
@@ -443,25 +437,25 @@ finish_(
}
uint8_t *buf_1 = buf_;
uint64_t buf[25U] = { 0U };
- Hacl_Streaming_Keccak_hash_buf tmp_block_state = { .fst = a, .snd = buf };
+ Hacl_Hash_SHA3_hash_buf tmp_block_state = { .fst = a, .snd = buf };
hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state };
uint64_t *s_dst = scrut.snd.snd;
uint64_t *s_src = scrut.fst.snd;
- memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
- uint32_t ite0;
- if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U)
+ memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
+ uint32_t ite;
+ if (r % block_len(a) == 0U && r > 0U)
{
- ite0 = block_len(a);
+ ite = block_len(a);
}
else
{
- ite0 = r % block_len(a);
+ ite = r % block_len(a);
}
- uint8_t *buf_last = buf_1 + r - ite0;
+ uint8_t *buf_last = buf_1 + r - ite;
uint8_t *buf_multi = buf_1;
Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst;
uint64_t *s0 = tmp_block_state.snd;
- Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, (uint32_t)0U / block_len(a1));
+ Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, 0U / block_len(a1));
Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst;
uint64_t *s1 = tmp_block_state.snd;
Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r);
@@ -469,267 +463,182 @@ finish_(
uint64_t *s = tmp_block_state.snd;
if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256)
{
- uint32_t ite;
- if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256)
- {
- ite = l;
- }
- else
- {
- ite = hash_len(a11);
- }
- Hacl_Impl_SHA3_squeeze(s, block_len(a11), ite, dst);
+ Hacl_Hash_SHA3_squeeze0(s, block_len(a11), l, output);
return;
}
- Hacl_Impl_SHA3_squeeze(s, block_len(a11), hash_len(a11), dst);
+ Hacl_Hash_SHA3_squeeze0(s, block_len(a11), hash_len(a11), output);
}
Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst)
+Hacl_Hash_SHA3_digest(Hacl_Hash_SHA3_state_t *state, uint8_t *output)
{
- Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(state);
if (a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256)
{
return Hacl_Streaming_Types_InvalidAlgorithm;
}
- finish_(a1, s, dst, hash_len(a1));
+ digest_(a1, state, output, hash_len(a1));
return Hacl_Streaming_Types_Success;
}
Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l)
+Hacl_Hash_SHA3_squeeze(Hacl_Hash_SHA3_state_t *s, uint8_t *dst, uint32_t l)
{
- Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
if (!(a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256))
{
return Hacl_Streaming_Types_InvalidAlgorithm;
}
- if (l == (uint32_t)0U)
+ if (l == 0U)
{
return Hacl_Streaming_Types_InvalidLength;
}
- finish_(a1, s, dst, l);
+ digest_(a1, s, dst, l);
return Hacl_Streaming_Types_Success;
}
-uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s)
+uint32_t Hacl_Hash_SHA3_block_len(Hacl_Hash_SHA3_state_t *s)
{
- Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
return block_len(a1);
}
-uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s)
+uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s)
{
- Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+ Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
return hash_len(a1);
}
-bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s)
+bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s)
{
- Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Streaming_Keccak_get_alg(s);
+ Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Hash_SHA3_get_alg(s);
return uu____0 == Spec_Hash_Definitions_Shake128 || uu____0 == Spec_Hash_Definitions_Shake256;
}
void
-Hacl_SHA3_shake128_hacl(
+Hacl_Hash_SHA3_shake128_hacl(
uint32_t inputByteLen,
uint8_t *input,
uint32_t outputByteLen,
uint8_t *output
)
{
- Hacl_Impl_SHA3_keccak((uint32_t)1344U,
- (uint32_t)256U,
- inputByteLen,
- input,
- (uint8_t)0x1FU,
- outputByteLen,
- output);
+ Hacl_Hash_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output);
}
void
-Hacl_SHA3_shake256_hacl(
+Hacl_Hash_SHA3_shake256_hacl(
uint32_t inputByteLen,
uint8_t *input,
uint32_t outputByteLen,
uint8_t *output
)
{
- Hacl_Impl_SHA3_keccak((uint32_t)1088U,
- (uint32_t)512U,
- inputByteLen,
- input,
- (uint8_t)0x1FU,
- outputByteLen,
- output);
+ Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output);
}
-void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- Hacl_Impl_SHA3_keccak((uint32_t)1152U,
- (uint32_t)448U,
- inputByteLen,
- input,
- (uint8_t)0x06U,
- (uint32_t)28U,
- output);
+ Hacl_Hash_SHA3_keccak(1152U, 448U, input_len, input, 0x06U, 28U, output);
}
-void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- Hacl_Impl_SHA3_keccak((uint32_t)1088U,
- (uint32_t)512U,
- inputByteLen,
- input,
- (uint8_t)0x06U,
- (uint32_t)32U,
- output);
+ Hacl_Hash_SHA3_keccak(1088U, 512U, input_len, input, 0x06U, 32U, output);
}
-void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- Hacl_Impl_SHA3_keccak((uint32_t)832U,
- (uint32_t)768U,
- inputByteLen,
- input,
- (uint8_t)0x06U,
- (uint32_t)48U,
- output);
+ Hacl_Hash_SHA3_keccak(832U, 768U, input_len, input, 0x06U, 48U, output);
}
-void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t input_len)
{
- Hacl_Impl_SHA3_keccak((uint32_t)576U,
- (uint32_t)1024U,
- inputByteLen,
- input,
- (uint8_t)0x06U,
- (uint32_t)64U,
- output);
+ Hacl_Hash_SHA3_keccak(576U, 1024U, input_len, input, 0x06U, 64U, output);
}
static const
uint32_t
keccak_rotc[24U] =
{
- (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U,
- (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U,
- (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U,
- (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U
+ 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U,
+ 39U, 61U, 20U, 44U
};
static const
uint32_t
keccak_piln[24U] =
{
- (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U,
- (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U,
- (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U,
- (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U
+ 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U,
+ 22U, 9U, 6U, 1U
};
static const
uint64_t
keccak_rndc[24U] =
{
- (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU,
- (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U,
- (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU,
- (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU,
- (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U,
- (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U,
- (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U,
- (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U
+ 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+ 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+ 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+ 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+ 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL,
+ 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
-void Hacl_Impl_SHA3_state_permute(uint64_t *s)
+void Hacl_Hash_SHA3_state_permute(uint64_t *s)
{
- for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++)
+ for (uint32_t i0 = 0U; i0 < 24U; i0++)
{
uint64_t _C[5U] = { 0U };
KRML_MAYBE_FOR5(i,
- (uint32_t)0U,
- (uint32_t)5U,
- (uint32_t)1U,
- _C[i] =
- s[i
- + (uint32_t)0U]
- ^
- (s[i
- + (uint32_t)5U]
- ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U]))););
+ 0U,
+ 5U,
+ 1U,
+ _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U]))););
KRML_MAYBE_FOR5(i1,
- (uint32_t)0U,
- (uint32_t)5U,
- (uint32_t)1U,
- uint64_t uu____0 = _C[(i1 + (uint32_t)1U) % (uint32_t)5U];
- uint64_t
- _D =
- _C[(i1 + (uint32_t)4U)
- % (uint32_t)5U]
- ^ (uu____0 << (uint32_t)1U | uu____0 >> (uint32_t)63U);
- KRML_MAYBE_FOR5(i,
- (uint32_t)0U,
- (uint32_t)5U,
- (uint32_t)1U,
- s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;););
+ 0U,
+ 5U,
+ 1U,
+ uint64_t uu____0 = _C[(i1 + 1U) % 5U];
+ uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U);
+ KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;););
uint64_t x = s[1U];
uint64_t current = x;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+ for (uint32_t i = 0U; i < 24U; i++)
{
uint32_t _Y = keccak_piln[i];
uint32_t r = keccak_rotc[i];
uint64_t temp = s[_Y];
uint64_t uu____1 = current;
- s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r);
+ s[_Y] = uu____1 << r | uu____1 >> (64U - r);
current = temp;
}
KRML_MAYBE_FOR5(i,
- (uint32_t)0U,
- (uint32_t)5U,
- (uint32_t)1U,
- uint64_t
- v0 =
- s[(uint32_t)0U
- + (uint32_t)5U * i]
- ^ (~s[(uint32_t)1U + (uint32_t)5U * i] & s[(uint32_t)2U + (uint32_t)5U * i]);
- uint64_t
- v1 =
- s[(uint32_t)1U
- + (uint32_t)5U * i]
- ^ (~s[(uint32_t)2U + (uint32_t)5U * i] & s[(uint32_t)3U + (uint32_t)5U * i]);
- uint64_t
- v2 =
- s[(uint32_t)2U
- + (uint32_t)5U * i]
- ^ (~s[(uint32_t)3U + (uint32_t)5U * i] & s[(uint32_t)4U + (uint32_t)5U * i]);
- uint64_t
- v3 =
- s[(uint32_t)3U
- + (uint32_t)5U * i]
- ^ (~s[(uint32_t)4U + (uint32_t)5U * i] & s[(uint32_t)0U + (uint32_t)5U * i]);
- uint64_t
- v4 =
- s[(uint32_t)4U
- + (uint32_t)5U * i]
- ^ (~s[(uint32_t)0U + (uint32_t)5U * i] & s[(uint32_t)1U + (uint32_t)5U * i]);
- s[(uint32_t)0U + (uint32_t)5U * i] = v0;
- s[(uint32_t)1U + (uint32_t)5U * i] = v1;
- s[(uint32_t)2U + (uint32_t)5U * i] = v2;
- s[(uint32_t)3U + (uint32_t)5U * i] = v3;
- s[(uint32_t)4U + (uint32_t)5U * i] = v4;);
+ 0U,
+ 5U,
+ 1U,
+ uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]);
+ uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]);
+ uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]);
+ uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]);
+ uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]);
+ s[0U + 5U * i] = v0;
+ s[1U + 5U * i] = v1;
+ s[2U + 5U * i] = v2;
+ s[3U + 5U * i] = v3;
+ s[4U + 5U * i] = v4;);
uint64_t c = keccak_rndc[i0];
s[0U] = s[0U] ^ c;
}
}
-void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
+void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
{
uint8_t block[200U] = { 0U };
memcpy(block, input, rateInBytes * sizeof (uint8_t));
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+ for (uint32_t i = 0U; i < 25U; i++)
{
- uint64_t u = load64_le(block + i * (uint32_t)8U);
+ uint64_t u = load64_le(block + i * 8U);
uint64_t x = u;
s[i] = s[i] ^ x;
}
@@ -738,18 +647,18 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res)
{
uint8_t block[200U] = { 0U };
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+ for (uint32_t i = 0U; i < 25U; i++)
{
uint64_t sj = s[i];
- store64_le(block + i * (uint32_t)8U, sj);
+ store64_le(block + i * 8U, sj);
}
memcpy(res, block, rateInBytes * sizeof (uint8_t));
}
-void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
+void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
{
- Hacl_Impl_SHA3_loadState(rateInBytes, block, s);
- Hacl_Impl_SHA3_state_permute(s);
+ Hacl_Hash_SHA3_loadState(rateInBytes, block, s);
+ Hacl_Hash_SHA3_state_permute(s);
}
static void
@@ -763,30 +672,30 @@ absorb(
{
uint32_t n_blocks = inputByteLen / rateInBytes;
uint32_t rem = inputByteLen % rateInBytes;
- for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+ for (uint32_t i = 0U; i < n_blocks; i++)
{
uint8_t *block = input + i * rateInBytes;
- Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s);
+ Hacl_Hash_SHA3_absorb_inner(rateInBytes, block, s);
}
uint8_t *last = input + n_blocks * rateInBytes;
uint8_t lastBlock_[200U] = { 0U };
uint8_t *lastBlock = lastBlock_;
memcpy(lastBlock, last, rem * sizeof (uint8_t));
lastBlock[rem] = delimitedSuffix;
- Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s);
- if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U)
+ Hacl_Hash_SHA3_loadState(rateInBytes, lastBlock, s);
+ if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U)
{
- Hacl_Impl_SHA3_state_permute(s);
+ Hacl_Hash_SHA3_state_permute(s);
}
uint8_t nextBlock_[200U] = { 0U };
uint8_t *nextBlock = nextBlock_;
- nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U;
- Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s);
- Hacl_Impl_SHA3_state_permute(s);
+ nextBlock[rateInBytes - 1U] = 0x80U;
+ Hacl_Hash_SHA3_loadState(rateInBytes, nextBlock, s);
+ Hacl_Hash_SHA3_state_permute(s);
}
void
-Hacl_Impl_SHA3_squeeze(
+Hacl_Hash_SHA3_squeeze0(
uint64_t *s,
uint32_t rateInBytes,
uint32_t outputByteLen,
@@ -797,16 +706,16 @@ Hacl_Impl_SHA3_squeeze(
uint32_t remOut = outputByteLen % rateInBytes;
uint8_t *last = output + outputByteLen - remOut;
uint8_t *blocks = output;
- for (uint32_t i = (uint32_t)0U; i < outBlocks; i++)
+ for (uint32_t i = 0U; i < outBlocks; i++)
{
storeState(rateInBytes, s, blocks + i * rateInBytes);
- Hacl_Impl_SHA3_state_permute(s);
+ Hacl_Hash_SHA3_state_permute(s);
}
storeState(remOut, s, last);
}
void
-Hacl_Impl_SHA3_keccak(
+Hacl_Hash_SHA3_keccak(
uint32_t rate,
uint32_t capacity,
uint32_t inputByteLen,
@@ -816,9 +725,10 @@ Hacl_Impl_SHA3_keccak(
uint8_t *output
)
{
- uint32_t rateInBytes = rate / (uint32_t)8U;
+ KRML_MAYBE_UNUSED_VAR(capacity);
+ uint32_t rateInBytes = rate / 8U;
uint64_t s[25U] = { 0U };
absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix);
- Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output);
+ Hacl_Hash_SHA3_squeeze0(s, rateInBytes, outputByteLen, output);
}
diff --git a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h
index 681b6af4a80..678e9f2fbe1 100644
--- a/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h
+++ b/contrib/tools/python3/Modules/_hacl/Hacl_Hash_SHA3.h
@@ -31,54 +31,55 @@ extern "C" {
#endif
#include <string.h>
+#include "python_hacl_namespaces.h"
#include "krml/types.h"
#include "krml/lowstar_endianness.h"
#include "krml/internal/target.h"
#include "Hacl_Streaming_Types.h"
-typedef struct Hacl_Streaming_Keccak_hash_buf_s
+typedef struct Hacl_Hash_SHA3_hash_buf_s
{
Spec_Hash_Definitions_hash_alg fst;
uint64_t *snd;
}
-Hacl_Streaming_Keccak_hash_buf;
+Hacl_Hash_SHA3_hash_buf;
-typedef struct Hacl_Streaming_Keccak_state_s
+typedef struct Hacl_Hash_SHA3_state_t_s
{
- Hacl_Streaming_Keccak_hash_buf block_state;
+ Hacl_Hash_SHA3_hash_buf block_state;
uint8_t *buf;
uint64_t total_len;
}
-Hacl_Streaming_Keccak_state;
+Hacl_Hash_SHA3_state_t;
-Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s);
+Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s);
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a);
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a);
-void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s);
+void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state);
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0);
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state);
-void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s);
+void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state);
Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len);
+Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len);
Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst);
+Hacl_Hash_SHA3_digest(Hacl_Hash_SHA3_state_t *state, uint8_t *output);
Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l);
+Hacl_Hash_SHA3_squeeze(Hacl_Hash_SHA3_state_t *s, uint8_t *dst, uint32_t l);
-uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s);
+uint32_t Hacl_Hash_SHA3_block_len(Hacl_Hash_SHA3_state_t *s);
-uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s);
+uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s);
-bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s);
+bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s);
void
-Hacl_SHA3_shake128_hacl(
+Hacl_Hash_SHA3_shake128_hacl(
uint32_t inputByteLen,
uint8_t *input,
uint32_t outputByteLen,
@@ -86,25 +87,25 @@ Hacl_SHA3_shake128_hacl(
);
void
-Hacl_SHA3_shake256_hacl(
+Hacl_Hash_SHA3_shake256_hacl(
uint32_t inputByteLen,
uint8_t *input,
uint32_t outputByteLen,
uint8_t *output
);
-void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t input_len);
-void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t input_len);
-void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t input_len);
-void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t input_len);
-void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
+void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
void
-Hacl_Impl_SHA3_squeeze(
+Hacl_Hash_SHA3_squeeze0(
uint64_t *s,
uint32_t rateInBytes,
uint32_t outputByteLen,
@@ -112,7 +113,7 @@ Hacl_Impl_SHA3_squeeze(
);
void
-Hacl_Impl_SHA3_keccak(
+Hacl_Hash_SHA3_keccak(
uint32_t rate,
uint32_t capacity,
uint32_t inputByteLen,
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h
index 3d36d440735..bdf25898f2b 100644
--- a/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt128_Verified.h
@@ -15,7 +15,7 @@
static inline uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
{
- return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
+ return (a ^ ((a ^ b) | ((a - b) ^ b))) >> 63U;
}
static inline uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b)
@@ -118,7 +118,7 @@ static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a
return lit;
}
-static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
+static uint32_t FStar_UInt128_u32_64 = 64U;
static inline uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
{
@@ -134,7 +134,7 @@ FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
static inline FStar_UInt128_uint128
FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
{
- if (s == (uint32_t)0U)
+ if (s == 0U)
{
return a;
}
@@ -151,7 +151,7 @@ static inline FStar_UInt128_uint128
FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
{
FStar_UInt128_uint128 lit;
- lit.low = (uint64_t)0U;
+ lit.low = 0ULL;
lit.high = a.low << (s - FStar_UInt128_u32_64);
return lit;
}
@@ -183,7 +183,7 @@ FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
static inline FStar_UInt128_uint128
FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
{
- if (s == (uint32_t)0U)
+ if (s == 0U)
{
return a;
}
@@ -201,7 +201,7 @@ FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
{
FStar_UInt128_uint128 lit;
lit.low = a.high >> (s - FStar_UInt128_u32_64);
- lit.high = (uint64_t)0U;
+ lit.high = 0ULL;
return lit;
}
@@ -269,7 +269,7 @@ static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a)
{
FStar_UInt128_uint128 lit;
lit.low = a;
- lit.high = (uint64_t)0U;
+ lit.high = 0ULL;
return lit;
}
@@ -280,10 +280,10 @@ static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
static inline uint64_t FStar_UInt128_u64_mod_32(uint64_t a)
{
- return a & (uint64_t)0xffffffffU;
+ return a & 0xffffffffULL;
}
-static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
+static uint32_t FStar_UInt128_u32_32 = 32U;
static inline uint64_t FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
{
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
index a56c7d61349..1bdec972a2f 100644
--- a/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
@@ -14,16 +14,16 @@
#include "krml/types.h"
#include "krml/internal/target.h"
-static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
+static KRML_NOINLINE uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
{
uint64_t x = a ^ b;
- uint64_t minus_x = ~x + (uint64_t)1U;
+ uint64_t minus_x = ~x + 1ULL;
uint64_t x_or_minus_x = x | minus_x;
- uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
- return xnx - (uint64_t)1U;
+ uint64_t xnx = x_or_minus_x >> 63U;
+ return xnx - 1ULL;
}
-static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
+static KRML_NOINLINE uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
{
uint64_t x = a;
uint64_t y = b;
@@ -32,20 +32,20 @@ static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
uint64_t x_sub_y_xor_y = x_sub_y ^ y;
uint64_t q = x_xor_y | x_sub_y_xor_y;
uint64_t x_xor_q = x ^ q;
- uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
- return x_xor_q_ - (uint64_t)1U;
+ uint64_t x_xor_q_ = x_xor_q >> 63U;
+ return x_xor_q_ - 1ULL;
}
-static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
+static KRML_NOINLINE uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
{
uint32_t x = a ^ b;
- uint32_t minus_x = ~x + (uint32_t)1U;
+ uint32_t minus_x = ~x + 1U;
uint32_t x_or_minus_x = x | minus_x;
- uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
- return xnx - (uint32_t)1U;
+ uint32_t xnx = x_or_minus_x >> 31U;
+ return xnx - 1U;
}
-static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
+static KRML_NOINLINE uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
{
uint32_t x = a;
uint32_t y = b;
@@ -54,52 +54,52 @@ static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
uint32_t x_sub_y_xor_y = x_sub_y ^ y;
uint32_t q = x_xor_y | x_sub_y_xor_y;
uint32_t x_xor_q = x ^ q;
- uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
- return x_xor_q_ - (uint32_t)1U;
+ uint32_t x_xor_q_ = x_xor_q >> 31U;
+ return x_xor_q_ - 1U;
}
-static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
+static KRML_NOINLINE uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
{
- uint16_t x = a ^ b;
- uint16_t minus_x = ~x + (uint16_t)1U;
- uint16_t x_or_minus_x = x | minus_x;
- uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
- return xnx - (uint16_t)1U;
+ uint16_t x = (uint32_t)a ^ (uint32_t)b;
+ uint16_t minus_x = (uint32_t)~x + 1U;
+ uint16_t x_or_minus_x = (uint32_t)x | (uint32_t)minus_x;
+ uint16_t xnx = (uint32_t)x_or_minus_x >> 15U;
+ return (uint32_t)xnx - 1U;
}
-static inline uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
+static KRML_NOINLINE uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
{
uint16_t x = a;
uint16_t y = b;
- uint16_t x_xor_y = x ^ y;
- uint16_t x_sub_y = x - y;
- uint16_t x_sub_y_xor_y = x_sub_y ^ y;
- uint16_t q = x_xor_y | x_sub_y_xor_y;
- uint16_t x_xor_q = x ^ q;
- uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
- return x_xor_q_ - (uint16_t)1U;
+ uint16_t x_xor_y = (uint32_t)x ^ (uint32_t)y;
+ uint16_t x_sub_y = (uint32_t)x - (uint32_t)y;
+ uint16_t x_sub_y_xor_y = (uint32_t)x_sub_y ^ (uint32_t)y;
+ uint16_t q = (uint32_t)x_xor_y | (uint32_t)x_sub_y_xor_y;
+ uint16_t x_xor_q = (uint32_t)x ^ (uint32_t)q;
+ uint16_t x_xor_q_ = (uint32_t)x_xor_q >> 15U;
+ return (uint32_t)x_xor_q_ - 1U;
}
-static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
+static KRML_NOINLINE uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
{
- uint8_t x = a ^ b;
- uint8_t minus_x = ~x + (uint8_t)1U;
- uint8_t x_or_minus_x = x | minus_x;
- uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
- return xnx - (uint8_t)1U;
+ uint8_t x = (uint32_t)a ^ (uint32_t)b;
+ uint8_t minus_x = (uint32_t)~x + 1U;
+ uint8_t x_or_minus_x = (uint32_t)x | (uint32_t)minus_x;
+ uint8_t xnx = (uint32_t)x_or_minus_x >> 7U;
+ return (uint32_t)xnx - 1U;
}
-static inline uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
+static KRML_NOINLINE uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
{
uint8_t x = a;
uint8_t y = b;
- uint8_t x_xor_y = x ^ y;
- uint8_t x_sub_y = x - y;
- uint8_t x_sub_y_xor_y = x_sub_y ^ y;
- uint8_t q = x_xor_y | x_sub_y_xor_y;
- uint8_t x_xor_q = x ^ q;
- uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
- return x_xor_q_ - (uint8_t)1U;
+ uint8_t x_xor_y = (uint32_t)x ^ (uint32_t)y;
+ uint8_t x_sub_y = (uint32_t)x - (uint32_t)y;
+ uint8_t x_sub_y_xor_y = (uint32_t)x_sub_y ^ (uint32_t)y;
+ uint8_t q = (uint32_t)x_xor_y | (uint32_t)x_sub_y_xor_y;
+ uint8_t x_xor_q = (uint32_t)x ^ (uint32_t)q;
+ uint8_t x_xor_q_ = (uint32_t)x_xor_q >> 7U;
+ return (uint32_t)x_xor_q_ - 1U;
}
diff --git a/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h b/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h
index 5a2f94eb2ec..c7fcc0151e6 100644
--- a/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h
+++ b/contrib/tools/python3/Modules/_hacl/include/krml/internal/target.h
@@ -4,13 +4,13 @@
#ifndef __KRML_TARGET_H
#define __KRML_TARGET_H
-#include <stdlib.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <stdbool.h>
+#include <assert.h>
#include <inttypes.h>
#include <limits.h>
-#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
/* Since KaRaMeL emits the inline keyword unconditionally, we follow the
* guidelines at https://gcc.gnu.org/onlinedocs/gcc/Inline.html and make this
@@ -57,6 +57,31 @@
# define KRML_HOST_IGNORE(x) (void)(x)
#endif
+#ifndef KRML_MAYBE_UNUSED_VAR
+# define KRML_MAYBE_UNUSED_VAR(x) KRML_HOST_IGNORE(x)
+#endif
+
+#ifndef KRML_MAYBE_UNUSED
+# if defined(__GNUC__)
+# define KRML_MAYBE_UNUSED __attribute__((unused))
+# else
+# define KRML_MAYBE_UNUSED
+# endif
+#endif
+
+#ifndef KRML_NOINLINE
+# if defined(_MSC_VER)
+# define KRML_NOINLINE __declspec(noinline)
+# elif defined (__GNUC__)
+# define KRML_NOINLINE __attribute__((noinline,unused))
+# else
+# define KRML_NOINLINE
+# warning "The KRML_NOINLINE macro is not defined for this toolchain!"
+# warning "The compiler may defeat side-channel resistance with optimizations."
+# warning "Please locate target.h and try to fill it out with a suitable definition for this compiler."
+# endif
+#endif
+
/* In FStar.Buffer.fst, the size of arrays is uint32_t, but it's a number of
* *elements*. Do an ugly, run-time check (some of which KaRaMeL can eliminate).
*/
@@ -83,184 +108,186 @@
#define KRML_LOOP1(i, n, x) { \
x \
i += n; \
+ (void) i; \
}
-#define KRML_LOOP2(i, n, x) \
- KRML_LOOP1(i, n, x) \
+#define KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x) \
KRML_LOOP1(i, n, x)
-#define KRML_LOOP3(i, n, x) \
- KRML_LOOP2(i, n, x) \
+#define KRML_LOOP3(i, n, x) \
+ KRML_LOOP2(i, n, x) \
KRML_LOOP1(i, n, x)
-#define KRML_LOOP4(i, n, x) \
- KRML_LOOP2(i, n, x) \
+#define KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x) \
KRML_LOOP2(i, n, x)
-#define KRML_LOOP5(i, n, x) \
- KRML_LOOP4(i, n, x) \
+#define KRML_LOOP5(i, n, x) \
+ KRML_LOOP4(i, n, x) \
KRML_LOOP1(i, n, x)
-#define KRML_LOOP6(i, n, x) \
- KRML_LOOP4(i, n, x) \
+#define KRML_LOOP6(i, n, x) \
+ KRML_LOOP4(i, n, x) \
KRML_LOOP2(i, n, x)
-#define KRML_LOOP7(i, n, x) \
- KRML_LOOP4(i, n, x) \
+#define KRML_LOOP7(i, n, x) \
+ KRML_LOOP4(i, n, x) \
KRML_LOOP3(i, n, x)
-#define KRML_LOOP8(i, n, x) \
- KRML_LOOP4(i, n, x) \
+#define KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x) \
KRML_LOOP4(i, n, x)
-#define KRML_LOOP9(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP9(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP1(i, n, x)
-#define KRML_LOOP10(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP10(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP2(i, n, x)
-#define KRML_LOOP11(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP11(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP3(i, n, x)
-#define KRML_LOOP12(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP12(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP4(i, n, x)
-#define KRML_LOOP13(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP13(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP5(i, n, x)
-#define KRML_LOOP14(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP14(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP6(i, n, x)
-#define KRML_LOOP15(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP15(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP7(i, n, x)
-#define KRML_LOOP16(i, n, x) \
- KRML_LOOP8(i, n, x) \
+#define KRML_LOOP16(i, n, x) \
+ KRML_LOOP8(i, n, x) \
KRML_LOOP8(i, n, x)
-#define KRML_UNROLL_FOR(i, z, n, k, x) do { \
- uint32_t i = z; \
- KRML_LOOP##n(i, k, x) \
-} while (0)
+#define KRML_UNROLL_FOR(i, z, n, k, x) \
+ do { \
+ uint32_t i = z; \
+ KRML_LOOP##n(i, k, x) \
+ } while (0)
-#define KRML_ACTUAL_FOR(i, z, n, k, x) \
- do { \
- for (uint32_t i = z; i < n; i += k) { \
- x \
- } \
+#define KRML_ACTUAL_FOR(i, z, n, k, x) \
+ do { \
+ for (uint32_t i = z; i < n; i += k) { \
+ x \
+ } \
} while (0)
#ifndef KRML_UNROLL_MAX
-#define KRML_UNROLL_MAX 16
+# define KRML_UNROLL_MAX 16
#endif
/* 1 is the number of loop iterations, i.e. (n - z)/k as evaluated by krml */
#if 0 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR0(i, z, n, k, x)
+# define KRML_MAYBE_FOR0(i, z, n, k, x)
#else
-#define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 1 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x)
+# define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x)
#else
-#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 2 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x)
+# define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x)
#else
-#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 3 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x)
+# define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x)
#else
-#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 4 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x)
+# define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x)
#else
-#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 5 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x)
+# define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x)
#else
-#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 6 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x)
+# define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x)
#else
-#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 7 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x)
+# define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x)
#else
-#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 8 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x)
+# define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x)
#else
-#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 9 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x)
+# define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x)
#else
-#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 10 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x)
+# define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x)
#else
-#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 11 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x)
+# define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x)
#else
-#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 12 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x)
+# define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x)
#else
-#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 13 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x)
+# define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x)
#else
-#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 14 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x)
+# define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x)
#else
-#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 15 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x)
+# define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x)
#else
-#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#if 16 <= KRML_UNROLL_MAX
-#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x)
+# define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x)
#else
-#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+# define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
#endif
#endif
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h
index 87ad4cf228d..a50ec407f53 100644
--- a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_MD5.h
@@ -37,21 +37,16 @@ extern "C" {
#include "../Hacl_Hash_MD5.h"
-void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s);
+void Hacl_Hash_MD5_init(uint32_t *s);
-void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst);
+void Hacl_Hash_MD5_finish(uint32_t *s, uint8_t *dst);
-void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+void Hacl_Hash_MD5_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
void
-Hacl_Hash_MD5_legacy_update_last(
- uint32_t *s,
- uint64_t prev_len,
- uint8_t *input,
- uint32_t input_len
-);
-
-void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+Hacl_Hash_MD5_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len);
+
+void Hacl_Hash_MD5_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h
index d2d9df44c6c..b39bad3f3b9 100644
--- a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA1.h
@@ -37,21 +37,16 @@ extern "C" {
#include "../Hacl_Hash_SHA1.h"
-void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s);
+void Hacl_Hash_SHA1_init(uint32_t *s);
-void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst);
+void Hacl_Hash_SHA1_finish(uint32_t *s, uint8_t *dst);
-void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+void Hacl_Hash_SHA1_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
void
-Hacl_Hash_SHA1_legacy_update_last(
- uint32_t *s,
- uint64_t prev_len,
- uint8_t *input,
- uint32_t input_len
-);
-
-void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+Hacl_Hash_SHA1_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len);
+
+void Hacl_Hash_SHA1_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h
index 851f7dc60c9..0127f4373fb 100644
--- a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA2.h
@@ -40,141 +40,121 @@ extern "C" {
static const
uint32_t
-Hacl_Impl_SHA2_Generic_h224[8U] =
+Hacl_Hash_SHA2_h224[8U] =
{
- (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
- (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+ 0xc1059ed8U, 0x367cd507U, 0x3070dd17U, 0xf70e5939U, 0xffc00b31U, 0x68581511U, 0x64f98fa7U,
+ 0xbefa4fa4U
};
static const
uint32_t
-Hacl_Impl_SHA2_Generic_h256[8U] =
+Hacl_Hash_SHA2_h256[8U] =
{
- (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
- (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+ 0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU, 0x510e527fU, 0x9b05688cU, 0x1f83d9abU,
+ 0x5be0cd19U
};
static const
uint64_t
-Hacl_Impl_SHA2_Generic_h384[8U] =
+Hacl_Hash_SHA2_h384[8U] =
{
- (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
- (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
- (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+ 0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL,
+ 0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL
};
static const
uint64_t
-Hacl_Impl_SHA2_Generic_h512[8U] =
+Hacl_Hash_SHA2_h512[8U] =
{
- (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
- (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
- (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+ 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+ 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
};
static const
uint32_t
-Hacl_Impl_SHA2_Generic_k224_256[64U] =
+Hacl_Hash_SHA2_k224_256[64U] =
{
- (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
- (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
- (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
- (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
- (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
- (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
- (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
- (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
- (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
- (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
- (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
- (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
- (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
- (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
- (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
- (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+ 0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+ 0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+ 0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+ 0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+ 0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+ 0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+ 0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+ 0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+ 0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+ 0xc67178f2U
};
static const
uint64_t
-Hacl_Impl_SHA2_Generic_k384_512[80U] =
+Hacl_Hash_SHA2_k384_512[80U] =
{
- (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
- (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
- (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
- (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
- (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
- (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
- (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
- (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
- (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
- (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
- (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
- (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
- (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
- (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
- (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
- (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
- (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
- (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
- (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
- (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
- (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
- (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
- (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
- (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
- (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
- (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
- (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+ 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+ 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+ 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+ 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+ 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+ 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+ 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+ 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+ 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+ 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+ 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+ 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+ 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+ 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+ 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+ 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+ 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+ 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+ 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+ 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
};
-void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash);
+void Hacl_Hash_SHA2_sha256_init(uint32_t *hash);
-void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
+void Hacl_Hash_SHA2_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
void
-Hacl_SHA2_Scalar32_sha256_update_last(
- uint64_t totlen,
- uint32_t len,
- uint8_t *b,
- uint32_t *hash
-);
+Hacl_Hash_SHA2_sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash);
-void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h);
-void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash);
+void Hacl_Hash_SHA2_sha224_init(uint32_t *hash);
void
-Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
+Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
-void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha224_finish(uint32_t *st, uint8_t *h);
-void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash);
+void Hacl_Hash_SHA2_sha512_init(uint64_t *hash);
-void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+void Hacl_Hash_SHA2_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
void
-Hacl_SHA2_Scalar32_sha512_update_last(
+Hacl_Hash_SHA2_sha512_update_last(
FStar_UInt128_uint128 totlen,
uint32_t len,
uint8_t *b,
uint64_t *hash
);
-void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha512_finish(uint64_t *st, uint8_t *h);
-void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash);
+void Hacl_Hash_SHA2_sha384_init(uint64_t *hash);
-void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+void Hacl_Hash_SHA2_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
void
-Hacl_SHA2_Scalar32_sha384_update_last(
+Hacl_Hash_SHA2_sha384_update_last(
FStar_UInt128_uint128 totlen,
uint32_t len,
uint8_t *b,
uint64_t *st
);
-void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha384_finish(uint64_t *st, uint8_t *h);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h
index 1c9808b8dd4..b80e81fafb9 100644
--- a/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h
+++ b/contrib/tools/python3/Modules/_hacl/internal/Hacl_Hash_SHA3.h
@@ -53,9 +53,9 @@ Hacl_Hash_SHA3_update_last_sha3(
uint32_t input_len
);
-void Hacl_Impl_SHA3_state_permute(uint64_t *s);
+void Hacl_Hash_SHA3_state_permute(uint64_t *s);
-void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
+void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
#if defined(__cplusplus)
}
diff --git a/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h b/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h
index 0df236282ac..684e7fd2fbe 100644
--- a/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h
+++ b/contrib/tools/python3/Modules/_hacl/python_hacl_namespaces.h
@@ -5,59 +5,61 @@
* C's excuse for namespaces: Use globally unique names to avoid linkage
* conflicts with builds linking or dynamically loading other code potentially
* using HACL* libraries.
+ *
+ * To make sure this is effective: cd Modules && nm -a *.o | grep Hacl
*/
-#define Hacl_Streaming_SHA2_state_sha2_224_s python_hashlib_Hacl_Streaming_SHA2_state_sha2_224_s
-#define Hacl_Streaming_SHA2_state_sha2_224 python_hashlib_Hacl_Streaming_SHA2_state_sha2_224
-#define Hacl_Streaming_SHA2_state_sha2_256 python_hashlib_Hacl_Streaming_SHA2_state_sha2_256
-#define Hacl_Streaming_SHA2_state_sha2_384_s python_hashlib_Hacl_Streaming_SHA2_state_sha2_384_s
-#define Hacl_Streaming_SHA2_state_sha2_384 python_hashlib_Hacl_Streaming_SHA2_state_sha2_384
-#define Hacl_Streaming_SHA2_state_sha2_512 python_hashlib_Hacl_Streaming_SHA2_state_sha2_512
-#define Hacl_Streaming_SHA2_create_in_256 python_hashlib_Hacl_Streaming_SHA2_create_in_256
-#define Hacl_Streaming_SHA2_create_in_224 python_hashlib_Hacl_Streaming_SHA2_create_in_224
-#define Hacl_Streaming_SHA2_create_in_512 python_hashlib_Hacl_Streaming_SHA2_create_in_512
-#define Hacl_Streaming_SHA2_create_in_384 python_hashlib_Hacl_Streaming_SHA2_create_in_384
-#define Hacl_Streaming_SHA2_copy_256 python_hashlib_Hacl_Streaming_SHA2_copy_256
-#define Hacl_Streaming_SHA2_copy_224 python_hashlib_Hacl_Streaming_SHA2_copy_224
-#define Hacl_Streaming_SHA2_copy_512 python_hashlib_Hacl_Streaming_SHA2_copy_512
-#define Hacl_Streaming_SHA2_copy_384 python_hashlib_Hacl_Streaming_SHA2_copy_384
-#define Hacl_Streaming_SHA2_init_256 python_hashlib_Hacl_Streaming_SHA2_init_256
-#define Hacl_Streaming_SHA2_init_224 python_hashlib_Hacl_Streaming_SHA2_init_224
-#define Hacl_Streaming_SHA2_init_512 python_hashlib_Hacl_Streaming_SHA2_init_512
-#define Hacl_Streaming_SHA2_init_384 python_hashlib_Hacl_Streaming_SHA2_init_384
+#define Hacl_Hash_SHA2_state_sha2_224_s python_hashlib_Hacl_Hash_SHA2_state_sha2_224_s
+#define Hacl_Hash_SHA2_state_sha2_224 python_hashlib_Hacl_Hash_SHA2_state_sha2_224
+#define Hacl_Hash_SHA2_state_sha2_256 python_hashlib_Hacl_Hash_SHA2_state_sha2_256
+#define Hacl_Hash_SHA2_state_sha2_384_s python_hashlib_Hacl_Hash_SHA2_state_sha2_384_s
+#define Hacl_Hash_SHA2_state_sha2_384 python_hashlib_Hacl_Hash_SHA2_state_sha2_384
+#define Hacl_Hash_SHA2_state_sha2_512 python_hashlib_Hacl_Hash_SHA2_state_sha2_512
+#define Hacl_Hash_SHA2_malloc_256 python_hashlib_Hacl_Hash_SHA2_malloc_256
+#define Hacl_Hash_SHA2_malloc_224 python_hashlib_Hacl_Hash_SHA2_malloc_224
+#define Hacl_Hash_SHA2_malloc_512 python_hashlib_Hacl_Hash_SHA2_malloc_512
+#define Hacl_Hash_SHA2_malloc_384 python_hashlib_Hacl_Hash_SHA2_malloc_384
+#define Hacl_Hash_SHA2_copy_256 python_hashlib_Hacl_Hash_SHA2_copy_256
+#define Hacl_Hash_SHA2_copy_224 python_hashlib_Hacl_Hash_SHA2_copy_224
+#define Hacl_Hash_SHA2_copy_512 python_hashlib_Hacl_Hash_SHA2_copy_512
+#define Hacl_Hash_SHA2_copy_384 python_hashlib_Hacl_Hash_SHA2_copy_384
+#define Hacl_Hash_SHA2_init_256 python_hashlib_Hacl_Hash_SHA2_init_256
+#define Hacl_Hash_SHA2_init_224 python_hashlib_Hacl_Hash_SHA2_init_224
+#define Hacl_Hash_SHA2_init_512 python_hashlib_Hacl_Hash_SHA2_init_512
+#define Hacl_Hash_SHA2_init_384 python_hashlib_Hacl_Hash_SHA2_init_384
#define Hacl_SHA2_Scalar32_sha512_init python_hashlib_Hacl_SHA2_Scalar32_sha512_init
-#define Hacl_Streaming_SHA2_update_256 python_hashlib_Hacl_Streaming_SHA2_update_256
-#define Hacl_Streaming_SHA2_update_224 python_hashlib_Hacl_Streaming_SHA2_update_224
-#define Hacl_Streaming_SHA2_update_512 python_hashlib_Hacl_Streaming_SHA2_update_512
-#define Hacl_Streaming_SHA2_update_384 python_hashlib_Hacl_Streaming_SHA2_update_384
-#define Hacl_Streaming_SHA2_finish_256 python_hashlib_Hacl_Streaming_SHA2_finish_256
-#define Hacl_Streaming_SHA2_finish_224 python_hashlib_Hacl_Streaming_SHA2_finish_224
-#define Hacl_Streaming_SHA2_finish_512 python_hashlib_Hacl_Streaming_SHA2_finish_512
-#define Hacl_Streaming_SHA2_finish_384 python_hashlib_Hacl_Streaming_SHA2_finish_384
-#define Hacl_Streaming_SHA2_free_256 python_hashlib_Hacl_Streaming_SHA2_free_256
-#define Hacl_Streaming_SHA2_free_224 python_hashlib_Hacl_Streaming_SHA2_free_224
-#define Hacl_Streaming_SHA2_free_512 python_hashlib_Hacl_Streaming_SHA2_free_512
-#define Hacl_Streaming_SHA2_free_384 python_hashlib_Hacl_Streaming_SHA2_free_384
-#define Hacl_Streaming_SHA2_sha256 python_hashlib_Hacl_Streaming_SHA2_sha256
-#define Hacl_Streaming_SHA2_sha224 python_hashlib_Hacl_Streaming_SHA2_sha224
-#define Hacl_Streaming_SHA2_sha512 python_hashlib_Hacl_Streaming_SHA2_sha512
-#define Hacl_Streaming_SHA2_sha384 python_hashlib_Hacl_Streaming_SHA2_sha384
+#define Hacl_Hash_SHA2_update_256 python_hashlib_Hacl_Hash_SHA2_update_256
+#define Hacl_Hash_SHA2_update_224 python_hashlib_Hacl_Hash_SHA2_update_224
+#define Hacl_Hash_SHA2_update_512 python_hashlib_Hacl_Hash_SHA2_update_512
+#define Hacl_Hash_SHA2_update_384 python_hashlib_Hacl_Hash_SHA2_update_384
+#define Hacl_Hash_SHA2_digest_256 python_hashlib_Hacl_Hash_SHA2_digest_256
+#define Hacl_Hash_SHA2_digest_224 python_hashlib_Hacl_Hash_SHA2_digest_224
+#define Hacl_Hash_SHA2_digest_512 python_hashlib_Hacl_Hash_SHA2_digest_512
+#define Hacl_Hash_SHA2_digest_384 python_hashlib_Hacl_Hash_SHA2_digest_384
+#define Hacl_Hash_SHA2_free_256 python_hashlib_Hacl_Hash_SHA2_free_256
+#define Hacl_Hash_SHA2_free_224 python_hashlib_Hacl_Hash_SHA2_free_224
+#define Hacl_Hash_SHA2_free_512 python_hashlib_Hacl_Hash_SHA2_free_512
+#define Hacl_Hash_SHA2_free_384 python_hashlib_Hacl_Hash_SHA2_free_384
+#define Hacl_Hash_SHA2_sha256 python_hashlib_Hacl_Hash_SHA2_sha256
+#define Hacl_Hash_SHA2_sha224 python_hashlib_Hacl_Hash_SHA2_sha224
+#define Hacl_Hash_SHA2_sha512 python_hashlib_Hacl_Hash_SHA2_sha512
+#define Hacl_Hash_SHA2_sha384 python_hashlib_Hacl_Hash_SHA2_sha384
-#define Hacl_Streaming_MD5_legacy_create_in python_hashlib_Hacl_Streaming_MD5_legacy_create_in
-#define Hacl_Streaming_MD5_legacy_init python_hashlib_Hacl_Streaming_MD5_legacy_init
-#define Hacl_Streaming_MD5_legacy_update python_hashlib_Hacl_Streaming_MD5_legacy_update
-#define Hacl_Streaming_MD5_legacy_finish python_hashlib_Hacl_Streaming_MD5_legacy_finish
-#define Hacl_Streaming_MD5_legacy_free python_hashlib_Hacl_Streaming_MD5_legacy_free
-#define Hacl_Streaming_MD5_legacy_copy python_hashlib_Hacl_Streaming_MD5_legacy_copy
-#define Hacl_Streaming_MD5_legacy_hash python_hashlib_Hacl_Streaming_MD5_legacy_hash
+#define Hacl_Hash_MD5_malloc python_hashlib_Hacl_Hash_MD5_malloc
+#define Hacl_Hash_MD5_init python_hashlib_Hacl_Hash_MD5_init
+#define Hacl_Hash_MD5_update python_hashlib_Hacl_Hash_MD5_update
+#define Hacl_Hash_MD5_digest python_hashlib_Hacl_Hash_MD5_digest
+#define Hacl_Hash_MD5_free python_hashlib_Hacl_Hash_MD5_free
+#define Hacl_Hash_MD5_copy python_hashlib_Hacl_Hash_MD5_copy
+#define Hacl_Hash_MD5_hash python_hashlib_Hacl_Hash_MD5_hash
-#define Hacl_Streaming_SHA1_legacy_create_in python_hashlib_Hacl_Streaming_SHA1_legacy_create_in
-#define Hacl_Streaming_SHA1_legacy_init python_hashlib_Hacl_Streaming_SHA1_legacy_init
-#define Hacl_Streaming_SHA1_legacy_update python_hashlib_Hacl_Streaming_SHA1_legacy_update
-#define Hacl_Streaming_SHA1_legacy_finish python_hashlib_Hacl_Streaming_SHA1_legacy_finish
-#define Hacl_Streaming_SHA1_legacy_free python_hashlib_Hacl_Streaming_SHA1_legacy_free
-#define Hacl_Streaming_SHA1_legacy_copy python_hashlib_Hacl_Streaming_SHA1_legacy_copy
-#define Hacl_Streaming_SHA1_legacy_hash python_hashlib_Hacl_Streaming_SHA1_legacy_hash
+#define Hacl_Hash_SHA1_malloc python_hashlib_Hacl_Hash_SHA1_malloc
+#define Hacl_Hash_SHA1_init python_hashlib_Hacl_Hash_SHA1_init
+#define Hacl_Hash_SHA1_update python_hashlib_Hacl_Hash_SHA1_update
+#define Hacl_Hash_SHA1_digest python_hashlib_Hacl_Hash_SHA1_digest
+#define Hacl_Hash_SHA1_free python_hashlib_Hacl_Hash_SHA1_free
+#define Hacl_Hash_SHA1_copy python_hashlib_Hacl_Hash_SHA1_copy
+#define Hacl_Hash_SHA1_hash python_hashlib_Hacl_Hash_SHA1_hash
#define Hacl_Hash_SHA3_update_last_sha3 python_hashlib_Hacl_Hash_SHA3_update_last_sha3
#define Hacl_Hash_SHA3_update_multi_sha3 python_hashlib_Hacl_Hash_SHA3_update_multi_sha3
@@ -72,15 +74,16 @@
#define Hacl_SHA3_sha3_512 python_hashlib_Hacl_SHA3_sha3_512
#define Hacl_SHA3_shake128_hacl python_hashlib_Hacl_SHA3_shake128_hacl
#define Hacl_SHA3_shake256_hacl python_hashlib_Hacl_SHA3_shake256_hacl
-#define Hacl_Streaming_Keccak_block_len python_hashlib_Hacl_Streaming_Keccak_block_len
-#define Hacl_Streaming_Keccak_copy python_hashlib_Hacl_Streaming_Keccak_copy
-#define Hacl_Streaming_Keccak_finish python_hashlib_Hacl_Streaming_Keccak_finish
-#define Hacl_Streaming_Keccak_free python_hashlib_Hacl_Streaming_Keccak_free
-#define Hacl_Streaming_Keccak_get_alg python_hashlib_Hacl_Streaming_Keccak_get_alg
-#define Hacl_Streaming_Keccak_hash_len python_hashlib_Hacl_Streaming_Keccak_hash_len
-#define Hacl_Streaming_Keccak_is_shake python_hashlib_Hacl_Streaming_Keccak_is_shake
-#define Hacl_Streaming_Keccak_malloc python_hashlib_Hacl_Streaming_Keccak_malloc
-#define Hacl_Streaming_Keccak_reset python_hashlib_Hacl_Streaming_Keccak_reset
-#define Hacl_Streaming_Keccak_update python_hashlib_Hacl_Streaming_Keccak_update
+#define Hacl_Hash_SHA3_block_len python_hashlib_Hacl_Hash_SHA3_block_len
+#define Hacl_Hash_SHA3_copy python_hashlib_Hacl_Hash_SHA3_copy
+#define Hacl_Hash_SHA3_digest python_hashlib_Hacl_Hash_SHA3_digest
+#define Hacl_Hash_SHA3_free python_hashlib_Hacl_Hash_SHA3_free
+#define Hacl_Hash_SHA3_get_alg python_hashlib_Hacl_Hash_SHA3_get_alg
+#define Hacl_Hash_SHA3_hash_len python_hashlib_Hacl_Hash_SHA3_hash_len
+#define Hacl_Hash_SHA3_is_shake python_hashlib_Hacl_Hash_SHA3_is_shake
+#define Hacl_Hash_SHA3_malloc python_hashlib_Hacl_Hash_SHA3_malloc
+#define Hacl_Hash_SHA3_reset python_hashlib_Hacl_Hash_SHA3_reset
+#define Hacl_Hash_SHA3_update python_hashlib_Hacl_Hash_SHA3_update
+#define Hacl_Hash_SHA3_squeeze python_hashlib_Hacl_Hash_SHA3_squeeze
#endif // _PYTHON_HACL_NAMESPACES_H
diff --git a/contrib/tools/python3/Modules/_io/bufferedio.c b/contrib/tools/python3/Modules/_io/bufferedio.c
index f30d54a5e11..e87d04bd07a 100644
--- a/contrib/tools/python3/Modules/_io/bufferedio.c
+++ b/contrib/tools/python3/Modules/_io/bufferedio.c
@@ -1015,6 +1015,16 @@ _io__Buffered_read1_impl(buffered *self, Py_ssize_t n)
Py_DECREF(res);
return NULL;
}
+ /* Flush the write buffer if necessary */
+ if (self->writable) {
+ PyObject *r = buffered_flush_and_rewind_unlocked(self);
+ if (r == NULL) {
+ LEAVE_BUFFERED(self)
+ Py_DECREF(res);
+ return NULL;
+ }
+ Py_DECREF(r);
+ }
_bufferedreader_reset_buf(self);
r = _bufferedreader_raw_read(self, PyBytes_AS_STRING(res), n);
LEAVE_BUFFERED(self)
@@ -1276,7 +1286,11 @@ _io__Buffered_tell_impl(buffered *self)
if (pos == -1)
return NULL;
pos -= RAW_OFFSET(self);
- /* TODO: sanity check (pos >= 0) */
+
+ // GH-95782
+ if (pos < 0)
+ pos = 0;
+
return PyLong_FromOff_t(pos);
}
@@ -1345,6 +1359,11 @@ _io__Buffered_seek_impl(buffered *self, PyObject *targetobj, int whence)
offset = target;
if (offset >= -self->pos && offset <= avail) {
self->pos += offset;
+
+ // GH-95782
+ if (current - avail + offset < 0)
+ return PyLong_FromOff_t(0);
+
return PyLong_FromOff_t(current - avail + offset);
}
}
diff --git a/contrib/tools/python3/Modules/_multiprocessing/posixshmem.c b/contrib/tools/python3/Modules/_multiprocessing/posixshmem.c
index 88c93fe3137..7af995b396e 100644
--- a/contrib/tools/python3/Modules/_multiprocessing/posixshmem.c
+++ b/contrib/tools/python3/Modules/_multiprocessing/posixshmem.c
@@ -42,10 +42,15 @@ _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags,
{
int fd;
int async_err = 0;
- const char *name = PyUnicode_AsUTF8(path);
+ Py_ssize_t name_size;
+ const char *name = PyUnicode_AsUTF8AndSize(path, &name_size);
if (name == NULL) {
return -1;
}
+ if (strlen(name) != (size_t)name_size) {
+ PyErr_SetString(PyExc_ValueError, "embedded null character");
+ return -1;
+ }
do {
Py_BEGIN_ALLOW_THREADS
fd = shm_open(name, flags, mode);
@@ -81,10 +86,15 @@ _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path)
{
int rv;
int async_err = 0;
- const char *name = PyUnicode_AsUTF8(path);
+ Py_ssize_t name_size;
+ const char *name = PyUnicode_AsUTF8AndSize(path, &name_size);
if (name == NULL) {
return NULL;
}
+ if (strlen(name) != (size_t)name_size) {
+ PyErr_SetString(PyExc_ValueError, "embedded null character");
+ return NULL;
+ }
do {
Py_BEGIN_ALLOW_THREADS
rv = shm_unlink(name);
diff --git a/contrib/tools/python3/Modules/_posixsubprocess.c b/contrib/tools/python3/Modules/_posixsubprocess.c
index d75bb92757c..35ea2ac306a 100644
--- a/contrib/tools/python3/Modules/_posixsubprocess.c
+++ b/contrib/tools/python3/Modules/_posixsubprocess.c
@@ -946,7 +946,9 @@ subprocess_fork_exec_impl(PyObject *module, PyObject *process_args,
Py_ssize_t fds_to_keep_len = PyTuple_GET_SIZE(py_fds_to_keep);
PyInterpreterState *interp = PyInterpreterState_Get();
- if ((preexec_fn != Py_None) && interp->finalizing) {
+ if ((preexec_fn != Py_None) &&
+ _PyInterpreterState_GetFinalizing(interp) != NULL)
+ {
PyErr_SetString(PyExc_RuntimeError,
"preexec_fn not supported at interpreter shutdown");
return NULL;
diff --git a/contrib/tools/python3/Modules/_sqlite/ya.make b/contrib/tools/python3/Modules/_sqlite/ya.make
index c8e770686f0..49635c2d573 100644
--- a/contrib/tools/python3/Modules/_sqlite/ya.make
+++ b/contrib/tools/python3/Modules/_sqlite/ya.make
@@ -2,9 +2,9 @@
LIBRARY()
-VERSION(3.12.2)
+VERSION(3.12.3)
-ORIGINAL_SOURCE(https://github.com/python/cpython/archive/v3.12.2.tar.gz)
+ORIGINAL_SOURCE(https://github.com/python/cpython/archive/v3.12.3.tar.gz)
LICENSE(Python-2.0)
diff --git a/contrib/tools/python3/Modules/_ssl.c b/contrib/tools/python3/Modules/_ssl.c
index 7b4e9b5d696..e72675c6b59 100644
--- a/contrib/tools/python3/Modules/_ssl.c
+++ b/contrib/tools/python3/Modules/_ssl.c
@@ -3143,7 +3143,6 @@ _ssl__SSLContext_impl(PyTypeObject *type, int proto_version)
result = SSL_CTX_set_cipher_list(ctx, "HIGH:!aNULL:!eNULL");
}
if (result == 0) {
- Py_DECREF(self);
ERR_clear_error();
PyErr_SetString(get_state_ctx(self)->PySSLErrorObject,
"No cipher can be selected.");
@@ -4520,6 +4519,50 @@ set_sni_callback(PySSLContext *self, PyObject *arg, void *c)
return 0;
}
+#if OPENSSL_VERSION_NUMBER < 0x30300000L
+static X509_OBJECT *x509_object_dup(const X509_OBJECT *obj)
+{
+ int ok;
+ X509_OBJECT *ret = X509_OBJECT_new();
+ if (ret == NULL) {
+ return NULL;
+ }
+ switch (X509_OBJECT_get_type(obj)) {
+ case X509_LU_X509:
+ ok = X509_OBJECT_set1_X509(ret, X509_OBJECT_get0_X509(obj));
+ break;
+ case X509_LU_CRL:
+ /* X509_OBJECT_get0_X509_CRL was not const-correct prior to 3.0.*/
+ ok = X509_OBJECT_set1_X509_CRL(
+ ret, X509_OBJECT_get0_X509_CRL((X509_OBJECT *)obj));
+ break;
+ default:
+ /* We cannot duplicate unrecognized types in a polyfill, but it is
+ * safe to leave an empty object. The caller will ignore it. */
+ ok = 1;
+ break;
+ }
+ if (!ok) {
+ X509_OBJECT_free(ret);
+ return NULL;
+ }
+ return ret;
+}
+
+static STACK_OF(X509_OBJECT) *
+X509_STORE_get1_objects(X509_STORE *store)
+{
+ STACK_OF(X509_OBJECT) *ret;
+ if (!X509_STORE_lock(store)) {
+ return NULL;
+ }
+ ret = sk_X509_OBJECT_deep_copy(X509_STORE_get0_objects(store),
+ x509_object_dup, X509_OBJECT_free);
+ X509_STORE_unlock(store);
+ return ret;
+}
+#endif
+
PyDoc_STRVAR(PySSLContext_sni_callback_doc,
"Set a callback that will be called when a server name is provided by the SSL/TLS client in the SNI extension.\n\
\n\
@@ -4549,7 +4592,12 @@ _ssl__SSLContext_cert_store_stats_impl(PySSLContext *self)
int x509 = 0, crl = 0, ca = 0, i;
store = SSL_CTX_get_cert_store(self->ctx);
- objs = X509_STORE_get0_objects(store);
+ objs = X509_STORE_get1_objects(store);
+ if (objs == NULL) {
+ PyErr_SetString(PyExc_MemoryError, "failed to query cert store");
+ return NULL;
+ }
+
for (i = 0; i < sk_X509_OBJECT_num(objs); i++) {
obj = sk_X509_OBJECT_value(objs, i);
switch (X509_OBJECT_get_type(obj)) {
@@ -4563,12 +4611,11 @@ _ssl__SSLContext_cert_store_stats_impl(PySSLContext *self)
crl++;
break;
default:
- /* Ignore X509_LU_FAIL, X509_LU_RETRY, X509_LU_PKEY.
- * As far as I can tell they are internal states and never
- * stored in a cert store */
+ /* Ignore unrecognized types. */
break;
}
}
+ sk_X509_OBJECT_pop_free(objs, X509_OBJECT_free);
return Py_BuildValue("{sisisi}", "x509", x509, "crl", crl,
"x509_ca", ca);
}
@@ -4600,7 +4647,12 @@ _ssl__SSLContext_get_ca_certs_impl(PySSLContext *self, int binary_form)
}
store = SSL_CTX_get_cert_store(self->ctx);
- objs = X509_STORE_get0_objects(store);
+ objs = X509_STORE_get1_objects(store);
+ if (objs == NULL) {
+ PyErr_SetString(PyExc_MemoryError, "failed to query cert store");
+ goto error;
+ }
+
for (i = 0; i < sk_X509_OBJECT_num(objs); i++) {
X509_OBJECT *obj;
X509 *cert;
@@ -4628,9 +4680,11 @@ _ssl__SSLContext_get_ca_certs_impl(PySSLContext *self, int binary_form)
}
Py_CLEAR(ci);
}
+ sk_X509_OBJECT_pop_free(objs, X509_OBJECT_free);
return rlist;
error:
+ sk_X509_OBJECT_pop_free(objs, X509_OBJECT_free);
Py_XDECREF(ci);
Py_XDECREF(rlist);
return NULL;
diff --git a/contrib/tools/python3/Modules/_threadmodule.c b/contrib/tools/python3/Modules/_threadmodule.c
index 568fe8375d1..365f4460088 100644
--- a/contrib/tools/python3/Modules/_threadmodule.c
+++ b/contrib/tools/python3/Modules/_threadmodule.c
@@ -1190,7 +1190,7 @@ thread_PyThread_start_new_thread(PyObject *self, PyObject *fargs)
"thread is not supported for isolated subinterpreters");
return NULL;
}
- if (interp->finalizing) {
+ if (_PyInterpreterState_GetFinalizing(interp) != NULL) {
PyErr_SetString(PyExc_RuntimeError,
"can't create new thread at interpreter shutdown");
return NULL;
diff --git a/contrib/tools/python3/Modules/arraymodule.c b/contrib/tools/python3/Modules/arraymodule.c
index 6680820d8e6..19ee83d24c8 100644
--- a/contrib/tools/python3/Modules/arraymodule.c
+++ b/contrib/tools/python3/Modules/arraymodule.c
@@ -244,7 +244,7 @@ BB_setitem(arrayobject *ap, Py_ssize_t i, PyObject *v)
if (!PyArg_Parse(v, "b;array item must be integer", &x))
return -1;
if (i >= 0)
- ((char *)ap->ob_item)[i] = x;
+ ((unsigned char *)ap->ob_item)[i] = x;
return 0;
}
diff --git a/contrib/tools/python3/Modules/clinic/_elementtree.c.h b/contrib/tools/python3/Modules/clinic/_elementtree.c.h
index 0b3a86159cc..d635d33d857 100644
--- a/contrib/tools/python3/Modules/clinic/_elementtree.c.h
+++ b/contrib/tools/python3/Modules/clinic/_elementtree.c.h
@@ -1168,6 +1168,23 @@ _elementtree_XMLParser_close(XMLParserObject *self, PyObject *Py_UNUSED(ignored)
return _elementtree_XMLParser_close_impl(self);
}
+PyDoc_STRVAR(_elementtree_XMLParser_flush__doc__,
+"flush($self, /)\n"
+"--\n"
+"\n");
+
+#define _ELEMENTTREE_XMLPARSER_FLUSH_METHODDEF \
+ {"flush", (PyCFunction)_elementtree_XMLParser_flush, METH_NOARGS, _elementtree_XMLParser_flush__doc__},
+
+static PyObject *
+_elementtree_XMLParser_flush_impl(XMLParserObject *self);
+
+static PyObject *
+_elementtree_XMLParser_flush(XMLParserObject *self, PyObject *Py_UNUSED(ignored))
+{
+ return _elementtree_XMLParser_flush_impl(self);
+}
+
PyDoc_STRVAR(_elementtree_XMLParser_feed__doc__,
"feed($self, data, /)\n"
"--\n"
@@ -1218,4 +1235,4 @@ skip_optional:
exit:
return return_value;
}
-/*[clinic end generated code: output=31c4780c4df68441 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=ec30550fd83b2791 input=a9049054013a1b77]*/
diff --git a/contrib/tools/python3/Modules/clinic/pyexpat.c.h b/contrib/tools/python3/Modules/clinic/pyexpat.c.h
index 34937c5d594..20a28dc6a75 100644
--- a/contrib/tools/python3/Modules/clinic/pyexpat.c.h
+++ b/contrib/tools/python3/Modules/clinic/pyexpat.c.h
@@ -8,6 +8,53 @@ preserve
#endif
+PyDoc_STRVAR(pyexpat_xmlparser_SetReparseDeferralEnabled__doc__,
+"SetReparseDeferralEnabled($self, enabled, /)\n"
+"--\n"
+"\n"
+"Enable/Disable reparse deferral; enabled by default with Expat >=2.6.0.");
+
+#define PYEXPAT_XMLPARSER_SETREPARSEDEFERRALENABLED_METHODDEF \
+ {"SetReparseDeferralEnabled", (PyCFunction)pyexpat_xmlparser_SetReparseDeferralEnabled, METH_O, pyexpat_xmlparser_SetReparseDeferralEnabled__doc__},
+
+static PyObject *
+pyexpat_xmlparser_SetReparseDeferralEnabled_impl(xmlparseobject *self,
+ int enabled);
+
+static PyObject *
+pyexpat_xmlparser_SetReparseDeferralEnabled(xmlparseobject *self, PyObject *arg)
+{
+ PyObject *return_value = NULL;
+ int enabled;
+
+ enabled = PyObject_IsTrue(arg);
+ if (enabled < 0) {
+ goto exit;
+ }
+ return_value = pyexpat_xmlparser_SetReparseDeferralEnabled_impl(self, enabled);
+
+exit:
+ return return_value;
+}
+
+PyDoc_STRVAR(pyexpat_xmlparser_GetReparseDeferralEnabled__doc__,
+"GetReparseDeferralEnabled($self, /)\n"
+"--\n"
+"\n"
+"Retrieve reparse deferral enabled status; always returns false with Expat <2.6.0.");
+
+#define PYEXPAT_XMLPARSER_GETREPARSEDEFERRALENABLED_METHODDEF \
+ {"GetReparseDeferralEnabled", (PyCFunction)pyexpat_xmlparser_GetReparseDeferralEnabled, METH_NOARGS, pyexpat_xmlparser_GetReparseDeferralEnabled__doc__},
+
+static PyObject *
+pyexpat_xmlparser_GetReparseDeferralEnabled_impl(xmlparseobject *self);
+
+static PyObject *
+pyexpat_xmlparser_GetReparseDeferralEnabled(xmlparseobject *self, PyObject *Py_UNUSED(ignored))
+{
+ return pyexpat_xmlparser_GetReparseDeferralEnabled_impl(self);
+}
+
PyDoc_STRVAR(pyexpat_xmlparser_Parse__doc__,
"Parse($self, data, isfinal=False, /)\n"
"--\n"
@@ -498,4 +545,4 @@ exit:
#ifndef PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF
#define PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF
#endif /* !defined(PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF) */
-/*[clinic end generated code: output=63efc62e24a7b5a7 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=8625852bb44a5e56 input=a9049054013a1b77]*/
diff --git a/contrib/tools/python3/Modules/gcmodule.c b/contrib/tools/python3/Modules/gcmodule.c
index b7cb30ab7ed..5f2820c8489 100644
--- a/contrib/tools/python3/Modules/gcmodule.c
+++ b/contrib/tools/python3/Modules/gcmodule.c
@@ -2290,6 +2290,9 @@ void
_Py_RunGC(PyThreadState *tstate)
{
GCState *gcstate = &tstate->interp->gc;
+ if (!gcstate->enabled) {
+ return;
+ }
gcstate->collecting = 1;
gc_collect_generations(tstate);
gcstate->collecting = 0;
diff --git a/contrib/tools/python3/Modules/getpath.c b/contrib/tools/python3/Modules/getpath.c
index bd6920cf068..9dbe2c25051 100644
--- a/contrib/tools/python3/Modules/getpath.c
+++ b/contrib/tools/python3/Modules/getpath.c
@@ -260,6 +260,10 @@ getpath_joinpath(PyObject *Py_UNUSED(self), PyObject *args)
}
/* Convert all parts to wchar and accumulate max final length */
wchar_t **parts = (wchar_t **)PyMem_Malloc(n * sizeof(wchar_t *));
+ if (parts == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
memset(parts, 0, n * sizeof(wchar_t *));
Py_ssize_t cchFinal = 0;
Py_ssize_t first = 0;
diff --git a/contrib/tools/python3/Modules/itertoolsmodule.c b/contrib/tools/python3/Modules/itertoolsmodule.c
index 24e77c485db..d42f9dd0768 100644
--- a/contrib/tools/python3/Modules/itertoolsmodule.c
+++ b/contrib/tools/python3/Modules/itertoolsmodule.c
@@ -810,10 +810,9 @@ teedataobject_traverse(teedataobject *tdo, visitproc visit, void * arg)
}
static void
-teedataobject_safe_decref(PyObject *obj, PyTypeObject *tdo_type)
+teedataobject_safe_decref(PyObject *obj)
{
- while (obj && Py_IS_TYPE(obj, tdo_type) &&
- Py_REFCNT(obj) == 1) {
+ while (obj && Py_REFCNT(obj) == 1) {
PyObject *nextlink = ((teedataobject *)obj)->nextlink;
((teedataobject *)obj)->nextlink = NULL;
Py_SETREF(obj, nextlink);
@@ -832,8 +831,7 @@ teedataobject_clear(teedataobject *tdo)
Py_CLEAR(tdo->values[i]);
tmp = tdo->nextlink;
tdo->nextlink = NULL;
- itertools_state *state = get_module_state_by_cls(Py_TYPE(tdo));
- teedataobject_safe_decref(tmp, state->teedataobject_type);
+ teedataobject_safe_decref(tmp);
return 0;
}
@@ -4618,15 +4616,15 @@ batched(p, n) --> [p0, p1, ..., p_n-1], [p_n, p_n+1, ..., p_2n-1], ...\n\
chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ...\n\
chain.from_iterable([p, q, ...]) --> p0, p1, ... plast, q0, q1, ...\n\
compress(data, selectors) --> (d[0] if s[0]), (d[1] if s[1]), ...\n\
-dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails\n\
+dropwhile(predicate, seq) --> seq[n], seq[n+1], starting when predicate fails\n\
groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v)\n\
-filterfalse(pred, seq) --> elements of seq where pred(elem) is False\n\
+filterfalse(predicate, seq) --> elements of seq where predicate(elem) is False\n\
islice(seq, [start,] stop [, step]) --> elements from\n\
seq[start:stop:step]\n\
pairwise(s) --> (s[0],s[1]), (s[1],s[2]), (s[2], s[3]), ...\n\
starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ...\n\
tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n\n\
-takewhile(pred, seq) --> seq[0], seq[1], until pred fails\n\
+takewhile(predicate, seq) --> seq[0], seq[1], until predicate fails\n\
zip_longest(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ...\n\
\n\
Combinatoric generators:\n\
diff --git a/contrib/tools/python3/Modules/md5module.c b/contrib/tools/python3/Modules/md5module.c
index 2122f8b18ba..dcc9da38d75 100644
--- a/contrib/tools/python3/Modules/md5module.c
+++ b/contrib/tools/python3/Modules/md5module.c
@@ -52,7 +52,7 @@ typedef struct {
// Prevents undefined behavior via multiple threads entering the C API.
// The lock will be NULL before threaded access has been enabled.
PyThread_type_lock lock;
- Hacl_Streaming_MD5_state *hash_state;
+ Hacl_Hash_MD5_state_t *hash_state;
} MD5object;
#include "clinic/md5module.c.h"
@@ -90,11 +90,11 @@ MD5_traverse(PyObject *ptr, visitproc visit, void *arg)
static void
MD5_dealloc(MD5object *ptr)
{
- Hacl_Streaming_MD5_legacy_free(ptr->hash_state);
+ Hacl_Hash_MD5_free(ptr->hash_state);
if (ptr->lock != NULL) {
PyThread_free_lock(ptr->lock);
}
- PyTypeObject *tp = Py_TYPE(ptr);
+ PyTypeObject *tp = Py_TYPE((PyObject*)ptr);
PyObject_GC_UnTrack(ptr);
PyObject_GC_Del(ptr);
Py_DECREF(tp);
@@ -122,7 +122,7 @@ MD5Type_copy_impl(MD5object *self, PyTypeObject *cls)
return NULL;
ENTER_HASHLIB(self);
- newobj->hash_state = Hacl_Streaming_MD5_legacy_copy(self->hash_state);
+ newobj->hash_state = Hacl_Hash_MD5_copy(self->hash_state);
LEAVE_HASHLIB(self);
return (PyObject *)newobj;
}
@@ -139,7 +139,7 @@ MD5Type_digest_impl(MD5object *self)
{
unsigned char digest[MD5_DIGESTSIZE];
ENTER_HASHLIB(self);
- Hacl_Streaming_MD5_legacy_finish(self->hash_state, digest);
+ Hacl_Hash_MD5_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
return PyBytes_FromStringAndSize((const char *)digest, MD5_DIGESTSIZE);
}
@@ -156,20 +156,20 @@ MD5Type_hexdigest_impl(MD5object *self)
{
unsigned char digest[MD5_DIGESTSIZE];
ENTER_HASHLIB(self);
- Hacl_Streaming_MD5_legacy_finish(self->hash_state, digest);
+ Hacl_Hash_MD5_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
return _Py_strhex((const char*)digest, MD5_DIGESTSIZE);
}
-static void update(Hacl_Streaming_MD5_state *state, uint8_t *buf, Py_ssize_t len) {
+static void update(Hacl_Hash_MD5_state_t *state, uint8_t *buf, Py_ssize_t len) {
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
- Hacl_Streaming_MD5_legacy_update(state, buf, UINT32_MAX);
+ Hacl_Hash_MD5_update(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
- Hacl_Streaming_MD5_legacy_update(state, buf, (uint32_t) len);
+ Hacl_Hash_MD5_update(state, buf, (uint32_t) len);
}
/*[clinic input]
@@ -293,7 +293,7 @@ _md5_md5_impl(PyObject *module, PyObject *string, int usedforsecurity)
return NULL;
}
- new->hash_state = Hacl_Streaming_MD5_legacy_create_in();
+ new->hash_state = Hacl_Hash_MD5_malloc();
if (PyErr_Occurred()) {
Py_DECREF(new);
diff --git a/contrib/tools/python3/Modules/overlapped.c b/contrib/tools/python3/Modules/overlapped.c
index afdd78d1bc9..686ecf8b33b 100644
--- a/contrib/tools/python3/Modules/overlapped.c
+++ b/contrib/tools/python3/Modules/overlapped.c
@@ -719,6 +719,24 @@ Overlapped_dealloc(OverlappedObject *self)
if (!HasOverlappedIoCompleted(&self->overlapped) &&
self->type != TYPE_NOT_STARTED)
{
+ // NOTE: We should not get here, if we do then something is wrong in
+ // the IocpProactor or ProactorEventLoop. Since everything uses IOCP if
+ // the overlapped IO hasn't completed yet then we should not be
+ // deallocating!
+ //
+ // The problem is likely that this OverlappedObject was removed from
+ // the IocpProactor._cache before it was complete. The _cache holds a
+ // reference while IO is pending so that it does not get deallocated
+ // while the kernel has retained the OVERLAPPED structure.
+ //
+ // CancelIoEx (likely called from self.cancel()) may have successfully
+ // completed, but the OVERLAPPED is still in use until either
+ // HasOverlappedIoCompleted() is true or GetQueuedCompletionStatus has
+ // returned this OVERLAPPED object.
+ //
+ // NOTE: Waiting when IOCP is in use can hang indefinitely, but this
+ // CancelIoEx is superfluous in that self.cancel() was already called,
+ // so I've only ever seen this return FALSE with GLE=ERROR_NOT_FOUND
Py_BEGIN_ALLOW_THREADS
if (CancelIoEx(self->handle, &self->overlapped))
wait = TRUE;
@@ -2039,6 +2057,7 @@ overlapped_exec(PyObject *module)
WINAPI_CONSTANT(F_DWORD, ERROR_OPERATION_ABORTED);
WINAPI_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
WINAPI_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
+ WINAPI_CONSTANT(F_DWORD, ERROR_PORT_UNREACHABLE);
WINAPI_CONSTANT(F_DWORD, INFINITE);
WINAPI_CONSTANT(F_HANDLE, INVALID_HANDLE_VALUE);
WINAPI_CONSTANT(F_HANDLE, NULL);
diff --git a/contrib/tools/python3/Modules/posixmodule.c b/contrib/tools/python3/Modules/posixmodule.c
index c99c69d018b..8fb0d3cedad 100644
--- a/contrib/tools/python3/Modules/posixmodule.c
+++ b/contrib/tools/python3/Modules/posixmodule.c
@@ -7664,7 +7664,7 @@ os_fork1_impl(PyObject *module)
pid_t pid;
PyInterpreterState *interp = _PyInterpreterState_GET();
- if (interp->finalizing) {
+ if (_PyInterpreterState_GetFinalizing(interp) != NULL) {
PyErr_SetString(PyExc_RuntimeError,
"can't fork at interpreter shutdown");
return NULL;
@@ -7708,7 +7708,7 @@ os_fork_impl(PyObject *module)
{
pid_t pid;
PyInterpreterState *interp = _PyInterpreterState_GET();
- if (interp->finalizing) {
+ if (_PyInterpreterState_GetFinalizing(interp) != NULL) {
PyErr_SetString(PyExc_RuntimeError,
"can't fork at interpreter shutdown");
return NULL;
@@ -8395,7 +8395,7 @@ os_forkpty_impl(PyObject *module)
pid_t pid;
PyInterpreterState *interp = _PyInterpreterState_GET();
- if (interp->finalizing) {
+ if (_PyInterpreterState_GetFinalizing(interp) != NULL) {
PyErr_SetString(PyExc_RuntimeError,
"can't fork at interpreter shutdown");
return NULL;
@@ -9249,36 +9249,39 @@ wait_helper(PyObject *module, pid_t pid, int status, struct rusage *ru)
if (!result)
return NULL;
+ int pos = 0;
+
#ifndef doubletime
#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
#endif
- PyStructSequence_SET_ITEM(result, 0,
- PyFloat_FromDouble(doubletime(ru->ru_utime)));
- PyStructSequence_SET_ITEM(result, 1,
- PyFloat_FromDouble(doubletime(ru->ru_stime)));
-#define SET_INT(result, index, value)\
- PyStructSequence_SET_ITEM(result, index, PyLong_FromLong(value))
- SET_INT(result, 2, ru->ru_maxrss);
- SET_INT(result, 3, ru->ru_ixrss);
- SET_INT(result, 4, ru->ru_idrss);
- SET_INT(result, 5, ru->ru_isrss);
- SET_INT(result, 6, ru->ru_minflt);
- SET_INT(result, 7, ru->ru_majflt);
- SET_INT(result, 8, ru->ru_nswap);
- SET_INT(result, 9, ru->ru_inblock);
- SET_INT(result, 10, ru->ru_oublock);
- SET_INT(result, 11, ru->ru_msgsnd);
- SET_INT(result, 12, ru->ru_msgrcv);
- SET_INT(result, 13, ru->ru_nsignals);
- SET_INT(result, 14, ru->ru_nvcsw);
- SET_INT(result, 15, ru->ru_nivcsw);
-#undef SET_INT
-
- if (PyErr_Occurred()) {
- Py_DECREF(result);
- return NULL;
- }
+#define SET_RESULT(CALL) \
+ do { \
+ PyObject *item = (CALL); \
+ if (item == NULL) { \
+ Py_DECREF(result); \
+ return NULL; \
+ } \
+ PyStructSequence_SET_ITEM(result, pos++, item); \
+ } while(0)
+
+ SET_RESULT(PyFloat_FromDouble(doubletime(ru->ru_utime)));
+ SET_RESULT(PyFloat_FromDouble(doubletime(ru->ru_stime)));
+ SET_RESULT(PyLong_FromLong(ru->ru_maxrss));
+ SET_RESULT(PyLong_FromLong(ru->ru_ixrss));
+ SET_RESULT(PyLong_FromLong(ru->ru_idrss));
+ SET_RESULT(PyLong_FromLong(ru->ru_isrss));
+ SET_RESULT(PyLong_FromLong(ru->ru_minflt));
+ SET_RESULT(PyLong_FromLong(ru->ru_majflt));
+ SET_RESULT(PyLong_FromLong(ru->ru_nswap));
+ SET_RESULT(PyLong_FromLong(ru->ru_inblock));
+ SET_RESULT(PyLong_FromLong(ru->ru_oublock));
+ SET_RESULT(PyLong_FromLong(ru->ru_msgsnd));
+ SET_RESULT(PyLong_FromLong(ru->ru_msgrcv));
+ SET_RESULT(PyLong_FromLong(ru->ru_nsignals));
+ SET_RESULT(PyLong_FromLong(ru->ru_nvcsw));
+ SET_RESULT(PyLong_FromLong(ru->ru_nivcsw));
+#undef SET_RESULT
return Py_BuildValue("NiN", PyLong_FromPid(pid), status, result);
}
@@ -9401,15 +9404,25 @@ os_waitid_impl(PyObject *module, idtype_t idtype, id_t id, int options)
if (!result)
return NULL;
- PyStructSequence_SET_ITEM(result, 0, PyLong_FromPid(si.si_pid));
- PyStructSequence_SET_ITEM(result, 1, _PyLong_FromUid(si.si_uid));
- PyStructSequence_SET_ITEM(result, 2, PyLong_FromLong((long)(si.si_signo)));
- PyStructSequence_SET_ITEM(result, 3, PyLong_FromLong((long)(si.si_status)));
- PyStructSequence_SET_ITEM(result, 4, PyLong_FromLong((long)(si.si_code)));
- if (PyErr_Occurred()) {
- Py_DECREF(result);
- return NULL;
- }
+ int pos = 0;
+
+#define SET_RESULT(CALL) \
+ do { \
+ PyObject *item = (CALL); \
+ if (item == NULL) { \
+ Py_DECREF(result); \
+ return NULL; \
+ } \
+ PyStructSequence_SET_ITEM(result, pos++, item); \
+ } while(0)
+
+ SET_RESULT(PyLong_FromPid(si.si_pid));
+ SET_RESULT(_PyLong_FromUid(si.si_uid));
+ SET_RESULT(PyLong_FromLong((long)(si.si_signo)));
+ SET_RESULT(PyLong_FromLong((long)(si.si_status)));
+ SET_RESULT(PyLong_FromLong((long)(si.si_code)));
+
+#undef SET_RESULT
return result;
}
@@ -12362,46 +12375,50 @@ _pystatvfs_fromstructstatvfs(PyObject *module, struct statvfs st) {
if (v == NULL)
return NULL;
+ int pos = 0;
+
+#define SET_RESULT(CALL) \
+ do { \
+ PyObject *item = (CALL); \
+ if (item == NULL) { \
+ Py_DECREF(v); \
+ return NULL; \
+ } \
+ PyStructSequence_SET_ITEM(v, pos++, item); \
+ } while(0)
+
#if !defined(HAVE_LARGEFILE_SUPPORT)
- PyStructSequence_SET_ITEM(v, 0, PyLong_FromLong((long) st.f_bsize));
- PyStructSequence_SET_ITEM(v, 1, PyLong_FromLong((long) st.f_frsize));
- PyStructSequence_SET_ITEM(v, 2, PyLong_FromLong((long) st.f_blocks));
- PyStructSequence_SET_ITEM(v, 3, PyLong_FromLong((long) st.f_bfree));
- PyStructSequence_SET_ITEM(v, 4, PyLong_FromLong((long) st.f_bavail));
- PyStructSequence_SET_ITEM(v, 5, PyLong_FromLong((long) st.f_files));
- PyStructSequence_SET_ITEM(v, 6, PyLong_FromLong((long) st.f_ffree));
- PyStructSequence_SET_ITEM(v, 7, PyLong_FromLong((long) st.f_favail));
- PyStructSequence_SET_ITEM(v, 8, PyLong_FromLong((long) st.f_flag));
- PyStructSequence_SET_ITEM(v, 9, PyLong_FromLong((long) st.f_namemax));
+ SET_RESULT(PyLong_FromLong((long) st.f_bsize));
+ SET_RESULT(PyLong_FromLong((long) st.f_frsize));
+ SET_RESULT(PyLong_FromLong((long) st.f_blocks));
+ SET_RESULT(PyLong_FromLong((long) st.f_bfree));
+ SET_RESULT(PyLong_FromLong((long) st.f_bavail));
+ SET_RESULT(PyLong_FromLong((long) st.f_files));
+ SET_RESULT(PyLong_FromLong((long) st.f_ffree));
+ SET_RESULT(PyLong_FromLong((long) st.f_favail));
+ SET_RESULT(PyLong_FromLong((long) st.f_flag));
+ SET_RESULT(PyLong_FromLong((long) st.f_namemax));
#else
- PyStructSequence_SET_ITEM(v, 0, PyLong_FromLong((long) st.f_bsize));
- PyStructSequence_SET_ITEM(v, 1, PyLong_FromLong((long) st.f_frsize));
- PyStructSequence_SET_ITEM(v, 2,
- PyLong_FromLongLong((long long) st.f_blocks));
- PyStructSequence_SET_ITEM(v, 3,
- PyLong_FromLongLong((long long) st.f_bfree));
- PyStructSequence_SET_ITEM(v, 4,
- PyLong_FromLongLong((long long) st.f_bavail));
- PyStructSequence_SET_ITEM(v, 5,
- PyLong_FromLongLong((long long) st.f_files));
- PyStructSequence_SET_ITEM(v, 6,
- PyLong_FromLongLong((long long) st.f_ffree));
- PyStructSequence_SET_ITEM(v, 7,
- PyLong_FromLongLong((long long) st.f_favail));
- PyStructSequence_SET_ITEM(v, 8, PyLong_FromLong((long) st.f_flag));
- PyStructSequence_SET_ITEM(v, 9, PyLong_FromLong((long) st.f_namemax));
+ SET_RESULT(PyLong_FromLong((long) st.f_bsize));
+ SET_RESULT(PyLong_FromLong((long) st.f_frsize));
+ SET_RESULT(PyLong_FromLongLong((long long) st.f_blocks));
+ SET_RESULT(PyLong_FromLongLong((long long) st.f_bfree));
+ SET_RESULT(PyLong_FromLongLong((long long) st.f_bavail));
+ SET_RESULT(PyLong_FromLongLong((long long) st.f_files));
+ SET_RESULT(PyLong_FromLongLong((long long) st.f_ffree));
+ SET_RESULT(PyLong_FromLongLong((long long) st.f_favail));
+ SET_RESULT(PyLong_FromLong((long) st.f_flag));
+ SET_RESULT(PyLong_FromLong((long) st.f_namemax));
#endif
/* The _ALL_SOURCE feature test macro defines f_fsid as a structure
* (issue #32390). */
#if defined(_AIX) && defined(_ALL_SOURCE)
- PyStructSequence_SET_ITEM(v, 10, PyLong_FromUnsignedLong(st.f_fsid.val[0]));
+ SET_RESULT(PyLong_FromUnsignedLong(st.f_fsid.val[0]));
#else
- PyStructSequence_SET_ITEM(v, 10, PyLong_FromUnsignedLong(st.f_fsid));
+ SET_RESULT(PyLong_FromUnsignedLong(st.f_fsid));
#endif
- if (PyErr_Occurred()) {
- Py_DECREF(v);
- return NULL;
- }
+
+#undef SET_RESULT
return v;
}
@@ -14331,12 +14348,23 @@ os_get_terminal_size_impl(PyObject *module, int fd)
termsize = PyStructSequence_New((PyTypeObject *)TerminalSizeType);
if (termsize == NULL)
return NULL;
- PyStructSequence_SET_ITEM(termsize, 0, PyLong_FromLong(columns));
- PyStructSequence_SET_ITEM(termsize, 1, PyLong_FromLong(lines));
- if (PyErr_Occurred()) {
- Py_DECREF(termsize);
- return NULL;
- }
+
+ int pos = 0;
+
+#define SET_TERMSIZE(CALL) \
+ do { \
+ PyObject *item = (CALL); \
+ if (item == NULL) { \
+ Py_DECREF(termsize); \
+ return NULL; \
+ } \
+ PyStructSequence_SET_ITEM(termsize, pos++, item); \
+ } while(0)
+
+ SET_TERMSIZE(PyLong_FromLong(columns));
+ SET_TERMSIZE(PyLong_FromLong(lines));
+#undef SET_TERMSIZE
+
return termsize;
}
#endif /* defined(TERMSIZE_USE_CONIO) || defined(TERMSIZE_USE_IOCTL) */
@@ -15026,6 +15054,10 @@ DirEntry_from_find_data(PyObject *module, path_t *path, WIN32_FIND_DATAW *dataW)
find_data_to_file_info(dataW, &file_info, &reparse_tag);
_Py_attribute_data_to_stat(&file_info, reparse_tag, NULL, NULL, &entry->win32_lstat);
+ /* ctime is only deprecated from 3.12, so we copy birthtime across */
+ entry->win32_lstat.st_ctime = entry->win32_lstat.st_birthtime;
+ entry->win32_lstat.st_ctime_nsec = entry->win32_lstat.st_birthtime_nsec;
+
return (PyObject *)entry;
error:
diff --git a/contrib/tools/python3/Modules/pwdmodule.c b/contrib/tools/python3/Modules/pwdmodule.c
index cc2e2a43893..920259a62c4 100644
--- a/contrib/tools/python3/Modules/pwdmodule.c
+++ b/contrib/tools/python3/Modules/pwdmodule.c
@@ -63,53 +63,52 @@ static struct PyModuleDef pwdmodule;
#define DEFAULT_BUFFER_SIZE 1024
-static void
-sets(PyObject *v, int i, const char* val)
-{
- if (val) {
- PyObject *o = PyUnicode_DecodeFSDefault(val);
- PyStructSequence_SET_ITEM(v, i, o);
- }
- else {
- PyStructSequence_SET_ITEM(v, i, Py_None);
- Py_INCREF(Py_None);
- }
-}
-
static PyObject *
mkpwent(PyObject *module, struct passwd *p)
{
- int setIndex = 0;
PyObject *v = PyStructSequence_New(get_pwd_state(module)->StructPwdType);
- if (v == NULL)
+ if (v == NULL) {
return NULL;
+ }
+
+ int setIndex = 0;
+
+#define SET_STRING(VAL) \
+ SET_RESULT((VAL) ? PyUnicode_DecodeFSDefault((VAL)) : Py_NewRef(Py_None))
-#define SETS(i,val) sets(v, i, val)
+#define SET_RESULT(CALL) \
+ do { \
+ PyObject *item = (CALL); \
+ if (item == NULL) { \
+ goto error; \
+ } \
+ PyStructSequence_SET_ITEM(v, setIndex++, item); \
+ } while(0)
- SETS(setIndex++, p->pw_name);
+ SET_STRING(p->pw_name);
#if defined(HAVE_STRUCT_PASSWD_PW_PASSWD) && !defined(__ANDROID__)
- SETS(setIndex++, p->pw_passwd);
+ SET_STRING(p->pw_passwd);
#else
- SETS(setIndex++, "");
+ SET_STRING("");
#endif
- PyStructSequence_SET_ITEM(v, setIndex++, _PyLong_FromUid(p->pw_uid));
- PyStructSequence_SET_ITEM(v, setIndex++, _PyLong_FromGid(p->pw_gid));
+ SET_RESULT(_PyLong_FromUid(p->pw_uid));
+ SET_RESULT(_PyLong_FromGid(p->pw_gid));
#if defined(HAVE_STRUCT_PASSWD_PW_GECOS)
- SETS(setIndex++, p->pw_gecos);
+ SET_STRING(p->pw_gecos);
#else
- SETS(setIndex++, "");
+ SET_STRING("");
#endif
- SETS(setIndex++, p->pw_dir);
- SETS(setIndex++, p->pw_shell);
-
-#undef SETS
+ SET_STRING(p->pw_dir);
+ SET_STRING(p->pw_shell);
- if (PyErr_Occurred()) {
- Py_XDECREF(v);
- return NULL;
- }
+#undef SET_STRING
+#undef SET_RESULT
return v;
+
+error:
+ Py_DECREF(v);
+ return NULL;
}
/*[clinic input]
diff --git a/contrib/tools/python3/Modules/pyexpat.c b/contrib/tools/python3/Modules/pyexpat.c
index b21360419d6..be31c637fcc 100644
--- a/contrib/tools/python3/Modules/pyexpat.c
+++ b/contrib/tools/python3/Modules/pyexpat.c
@@ -1,6 +1,7 @@
#include "Python.h"
#include <ctype.h>
+#include <stdbool.h>
#include "structmember.h" // PyMemberDef
#include "expat.h"
@@ -76,6 +77,12 @@ typedef struct {
/* NULL if not enabled */
int buffer_size; /* Size of buffer, in XML_Char units */
int buffer_used; /* Buffer units in use */
+ bool reparse_deferral_enabled; /* Whether to defer reparsing of
+ unfinished XML tokens; a de-facto cache of
+ what Expat has the authority on, for lack
+ of a getter API function
+ "XML_GetReparseDeferralEnabled" in Expat
+ 2.6.0 */
PyObject *intern; /* Dictionary to intern strings */
PyObject **handlers;
} xmlparseobject;
@@ -706,6 +713,40 @@ get_parse_result(pyexpat_state *state, xmlparseobject *self, int rv)
#define MAX_CHUNK_SIZE (1 << 20)
/*[clinic input]
+pyexpat.xmlparser.SetReparseDeferralEnabled
+
+ enabled: bool
+ /
+
+Enable/Disable reparse deferral; enabled by default with Expat >=2.6.0.
+[clinic start generated code]*/
+
+static PyObject *
+pyexpat_xmlparser_SetReparseDeferralEnabled_impl(xmlparseobject *self,
+ int enabled)
+/*[clinic end generated code: output=5ec539e3b63c8c49 input=021eb9e0bafc32c5]*/
+{
+#if XML_COMBINED_VERSION >= 20600
+ XML_SetReparseDeferralEnabled(self->itself, enabled ? XML_TRUE : XML_FALSE);
+ self->reparse_deferral_enabled = (bool)enabled;
+#endif
+ Py_RETURN_NONE;
+}
+
+/*[clinic input]
+pyexpat.xmlparser.GetReparseDeferralEnabled
+
+Retrieve reparse deferral enabled status; always returns false with Expat <2.6.0.
+[clinic start generated code]*/
+
+static PyObject *
+pyexpat_xmlparser_GetReparseDeferralEnabled_impl(xmlparseobject *self)
+/*[clinic end generated code: output=4e91312e88a595a8 input=54b5f11d32b20f3e]*/
+{
+ return PyBool_FromLong(self->reparse_deferral_enabled);
+}
+
+/*[clinic input]
pyexpat.xmlparser.Parse
cls: defining_class
@@ -1065,6 +1106,8 @@ static struct PyMethodDef xmlparse_methods[] = {
#if XML_COMBINED_VERSION >= 19505
PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF
#endif
+ PYEXPAT_XMLPARSER_SETREPARSEDEFERRALENABLED_METHODDEF
+ PYEXPAT_XMLPARSER_GETREPARSEDEFERRALENABLED_METHODDEF
{NULL, NULL} /* sentinel */
};
@@ -1160,6 +1203,11 @@ newxmlparseobject(pyexpat_state *state, const char *encoding,
self->ns_prefixes = 0;
self->handlers = NULL;
self->intern = Py_XNewRef(intern);
+#if XML_COMBINED_VERSION >= 20600
+ self->reparse_deferral_enabled = true;
+#else
+ self->reparse_deferral_enabled = false;
+#endif
/* namespace_separator is either NULL or contains one char + \0 */
self->itself = XML_ParserCreate_MM(encoding, &ExpatMemoryHandler,
@@ -2028,6 +2076,11 @@ pyexpat_exec(PyObject *mod)
#else
capi->SetHashSalt = NULL;
#endif
+#if XML_COMBINED_VERSION >= 20600
+ capi->SetReparseDeferralEnabled = XML_SetReparseDeferralEnabled;
+#else
+ capi->SetReparseDeferralEnabled = NULL;
+#endif
/* export using capsule */
PyObject *capi_object = PyCapsule_New(capi, PyExpat_CAPSULE_NAME,
diff --git a/contrib/tools/python3/Modules/selectmodule.c b/contrib/tools/python3/Modules/selectmodule.c
index b7c6b1b5399..97f1db20f67 100644
--- a/contrib/tools/python3/Modules/selectmodule.c
+++ b/contrib/tools/python3/Modules/selectmodule.c
@@ -818,10 +818,10 @@ static int devpoll_flush(devpollObject *self)
** clear what to do if a partial write occurred. For now, raise
** an exception and see if we actually found this problem in
** the wild.
- ** See http://bugs.python.org/issue6397.
+ ** See https://github.com/python/cpython/issues/50646.
*/
PyErr_Format(PyExc_OSError, "failed to write all pollfds. "
- "Please, report at http://bugs.python.org/. "
+ "Please, report at https://github.com/python/cpython/issues/. "
"Data to report: Size tried: %d, actual size written: %d.",
size, n);
return -1;
diff --git a/contrib/tools/python3/Modules/sha1module.c b/contrib/tools/python3/Modules/sha1module.c
index c66269b5f5c..624dc57b0a3 100644
--- a/contrib/tools/python3/Modules/sha1module.c
+++ b/contrib/tools/python3/Modules/sha1module.c
@@ -51,7 +51,7 @@ typedef struct {
// Prevents undefined behavior via multiple threads entering the C API.
// The lock will be NULL before threaded access has been enabled.
PyThread_type_lock lock;
- Hacl_Streaming_SHA1_state *hash_state;
+ Hacl_Hash_SHA1_state_t *hash_state;
} SHA1object;
#include "clinic/sha1module.c.h"
@@ -90,7 +90,7 @@ SHA1_traverse(PyObject *ptr, visitproc visit, void *arg)
static void
SHA1_dealloc(SHA1object *ptr)
{
- Hacl_Streaming_SHA1_legacy_free(ptr->hash_state);
+ Hacl_Hash_SHA1_free(ptr->hash_state);
if (ptr->lock != NULL) {
PyThread_free_lock(ptr->lock);
}
@@ -122,7 +122,7 @@ SHA1Type_copy_impl(SHA1object *self, PyTypeObject *cls)
return NULL;
ENTER_HASHLIB(self);
- newobj->hash_state = Hacl_Streaming_SHA1_legacy_copy(self->hash_state);
+ newobj->hash_state = Hacl_Hash_SHA1_copy(self->hash_state);
LEAVE_HASHLIB(self);
return (PyObject *)newobj;
}
@@ -139,7 +139,7 @@ SHA1Type_digest_impl(SHA1object *self)
{
unsigned char digest[SHA1_DIGESTSIZE];
ENTER_HASHLIB(self);
- Hacl_Streaming_SHA1_legacy_finish(self->hash_state, digest);
+ Hacl_Hash_SHA1_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
return PyBytes_FromStringAndSize((const char *)digest, SHA1_DIGESTSIZE);
}
@@ -156,20 +156,20 @@ SHA1Type_hexdigest_impl(SHA1object *self)
{
unsigned char digest[SHA1_DIGESTSIZE];
ENTER_HASHLIB(self);
- Hacl_Streaming_SHA1_legacy_finish(self->hash_state, digest);
+ Hacl_Hash_SHA1_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
return _Py_strhex((const char *)digest, SHA1_DIGESTSIZE);
}
-static void update(Hacl_Streaming_SHA1_state *state, uint8_t *buf, Py_ssize_t len) {
+static void update(Hacl_Hash_SHA1_state_t *state, uint8_t *buf, Py_ssize_t len) {
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
- Hacl_Streaming_SHA1_legacy_update(state, buf, UINT32_MAX);
+ Hacl_Hash_SHA1_update(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
- Hacl_Streaming_SHA1_legacy_update(state, buf, (uint32_t) len);
+ Hacl_Hash_SHA1_update(state, buf, (uint32_t) len);
}
/*[clinic input]
@@ -293,7 +293,7 @@ _sha1_sha1_impl(PyObject *module, PyObject *string, int usedforsecurity)
return NULL;
}
- new->hash_state = Hacl_Streaming_SHA1_legacy_create_in();
+ new->hash_state = Hacl_Hash_SHA1_malloc();
if (PyErr_Occurred()) {
Py_DECREF(new);
diff --git a/contrib/tools/python3/Modules/sha2module.c b/contrib/tools/python3/Modules/sha2module.c
index db3774c81e2..f5dab39a8c5 100644
--- a/contrib/tools/python3/Modules/sha2module.c
+++ b/contrib/tools/python3/Modules/sha2module.c
@@ -55,7 +55,7 @@ typedef struct {
// Prevents undefined behavior via multiple threads entering the C API.
// The lock will be NULL before threaded access has been enabled.
PyThread_type_lock lock;
- Hacl_Streaming_SHA2_state_sha2_256 *state;
+ Hacl_Hash_SHA2_state_t_256 *state;
} SHA256object;
typedef struct {
@@ -64,7 +64,7 @@ typedef struct {
// Prevents undefined behavior via multiple threads entering the C API.
// The lock will be NULL before threaded access has been enabled.
PyThread_type_lock lock;
- Hacl_Streaming_SHA2_state_sha2_512 *state;
+ Hacl_Hash_SHA2_state_t_512 *state;
} SHA512object;
#include "clinic/sha2module.c.h"
@@ -89,13 +89,13 @@ sha2_get_state(PyObject *module)
static void SHA256copy(SHA256object *src, SHA256object *dest)
{
dest->digestsize = src->digestsize;
- dest->state = Hacl_Streaming_SHA2_copy_256(src->state);
+ dest->state = Hacl_Hash_SHA2_copy_256(src->state);
}
static void SHA512copy(SHA512object *src, SHA512object *dest)
{
dest->digestsize = src->digestsize;
- dest->state = Hacl_Streaming_SHA2_copy_512(src->state);
+ dest->state = Hacl_Hash_SHA2_copy_512(src->state);
}
static SHA256object *
@@ -162,7 +162,7 @@ SHA2_traverse(PyObject *ptr, visitproc visit, void *arg)
static void
SHA256_dealloc(SHA256object *ptr)
{
- Hacl_Streaming_SHA2_free_256(ptr->state);
+ Hacl_Hash_SHA2_free_256(ptr->state);
if (ptr->lock != NULL) {
PyThread_free_lock(ptr->lock);
}
@@ -175,7 +175,7 @@ SHA256_dealloc(SHA256object *ptr)
static void
SHA512_dealloc(SHA512object *ptr)
{
- Hacl_Streaming_SHA2_free_512(ptr->state);
+ Hacl_Hash_SHA2_free_512(ptr->state);
if (ptr->lock != NULL) {
PyThread_free_lock(ptr->lock);
}
@@ -188,34 +188,34 @@ SHA512_dealloc(SHA512object *ptr)
/* HACL* takes a uint32_t for the length of its parameter, but Py_ssize_t can be
* 64 bits so we loop in <4gig chunks when needed. */
-static void update_256(Hacl_Streaming_SHA2_state_sha2_256 *state, uint8_t *buf, Py_ssize_t len) {
+static void update_256(Hacl_Hash_SHA2_state_t_256 *state, uint8_t *buf, Py_ssize_t len) {
/* Note: we explicitly ignore the error code on the basis that it would take >
* 1 billion years to overflow the maximum admissible length for SHA2-256
* (namely, 2^61-1 bytes). */
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
- Hacl_Streaming_SHA2_update_256(state, buf, UINT32_MAX);
+ Hacl_Hash_SHA2_update_256(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
/* Cast to uint32_t is safe: len <= UINT32_MAX at this point. */
- Hacl_Streaming_SHA2_update_256(state, buf, (uint32_t) len);
+ Hacl_Hash_SHA2_update_256(state, buf, (uint32_t) len);
}
-static void update_512(Hacl_Streaming_SHA2_state_sha2_512 *state, uint8_t *buf, Py_ssize_t len) {
+static void update_512(Hacl_Hash_SHA2_state_t_512 *state, uint8_t *buf, Py_ssize_t len) {
/* Note: we explicitly ignore the error code on the basis that it would take >
* 1 billion years to overflow the maximum admissible length for this API
* (namely, 2^64-1 bytes). */
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
- Hacl_Streaming_SHA2_update_512(state, buf, UINT32_MAX);
+ Hacl_Hash_SHA2_update_512(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
/* Cast to uint32_t is safe: len <= UINT32_MAX at this point. */
- Hacl_Streaming_SHA2_update_512(state, buf, (uint32_t) len);
+ Hacl_Hash_SHA2_update_512(state, buf, (uint32_t) len);
}
@@ -298,7 +298,7 @@ SHA256Type_digest_impl(SHA256object *self)
ENTER_HASHLIB(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
- Hacl_Streaming_SHA2_finish_256(self->state, digest);
+ Hacl_Hash_SHA2_digest_256(self->state, digest);
LEAVE_HASHLIB(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
@@ -318,7 +318,7 @@ SHA512Type_digest_impl(SHA512object *self)
ENTER_HASHLIB(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
- Hacl_Streaming_SHA2_finish_512(self->state, digest);
+ Hacl_Hash_SHA2_digest_512(self->state, digest);
LEAVE_HASHLIB(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
@@ -336,7 +336,7 @@ SHA256Type_hexdigest_impl(SHA256object *self)
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
ENTER_HASHLIB(self);
- Hacl_Streaming_SHA2_finish_256(self->state, digest);
+ Hacl_Hash_SHA2_digest_256(self->state, digest);
LEAVE_HASHLIB(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
@@ -354,7 +354,7 @@ SHA512Type_hexdigest_impl(SHA512object *self)
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
ENTER_HASHLIB(self);
- Hacl_Streaming_SHA2_finish_512(self->state, digest);
+ Hacl_Hash_SHA2_digest_512(self->state, digest);
LEAVE_HASHLIB(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
@@ -599,7 +599,7 @@ _sha2_sha256_impl(PyObject *module, PyObject *string, int usedforsecurity)
return NULL;
}
- new->state = Hacl_Streaming_SHA2_create_in_256();
+ new->state = Hacl_Hash_SHA2_malloc_256();
new->digestsize = 32;
if (PyErr_Occurred()) {
@@ -653,7 +653,7 @@ _sha2_sha224_impl(PyObject *module, PyObject *string, int usedforsecurity)
return NULL;
}
- new->state = Hacl_Streaming_SHA2_create_in_224();
+ new->state = Hacl_Hash_SHA2_malloc_224();
new->digestsize = 28;
if (PyErr_Occurred()) {
@@ -707,7 +707,7 @@ _sha2_sha512_impl(PyObject *module, PyObject *string, int usedforsecurity)
return NULL;
}
- new->state = Hacl_Streaming_SHA2_create_in_512();
+ new->state = Hacl_Hash_SHA2_malloc_512();
new->digestsize = 64;
if (PyErr_Occurred()) {
@@ -760,7 +760,7 @@ _sha2_sha384_impl(PyObject *module, PyObject *string, int usedforsecurity)
return NULL;
}
- new->state = Hacl_Streaming_SHA2_create_in_384();
+ new->state = Hacl_Hash_SHA2_malloc_384();
new->digestsize = 48;
if (PyErr_Occurred()) {
diff --git a/contrib/tools/python3/Modules/sha3module.c b/contrib/tools/python3/Modules/sha3module.c
index 558d2005cff..f3c6a7fced7 100644
--- a/contrib/tools/python3/Modules/sha3module.c
+++ b/contrib/tools/python3/Modules/sha3module.c
@@ -63,7 +63,7 @@ typedef struct {
// Prevents undefined behavior via multiple threads entering the C API.
// The lock will be NULL before threaded access has been enabled.
PyThread_type_lock lock;
- Hacl_Streaming_Keccak_state *hash_state;
+ Hacl_Hash_SHA3_state_t *hash_state;
} SHA3object;
#include "clinic/sha3module.c.h"
@@ -80,18 +80,18 @@ newSHA3object(PyTypeObject *type)
return newobj;
}
-static void sha3_update(Hacl_Streaming_Keccak_state *state, uint8_t *buf, Py_ssize_t len) {
+static void sha3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *buf, Py_ssize_t len) {
/* Note: we explicitly ignore the error code on the basis that it would take >
* 1 billion years to hash more than 2^64 bytes. */
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
- Hacl_Streaming_Keccak_update(state, buf, UINT32_MAX);
+ Hacl_Hash_SHA3_update(state, buf, UINT32_MAX);
len -= UINT32_MAX;
buf += UINT32_MAX;
}
#endif
/* Cast to uint32_t is safe: len <= UINT32_MAX at this point. */
- Hacl_Streaming_Keccak_update(state, buf, (uint32_t) len);
+ Hacl_Hash_SHA3_update(state, buf, (uint32_t) len);
}
/*[clinic input]
@@ -119,17 +119,17 @@ py_sha3_new_impl(PyTypeObject *type, PyObject *data, int usedforsecurity)
assert(state != NULL);
if (type == state->sha3_224_type) {
- self->hash_state = Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_SHA3_224);
+ self->hash_state = Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_SHA3_224);
} else if (type == state->sha3_256_type) {
- self->hash_state = Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_SHA3_256);
+ self->hash_state = Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_SHA3_256);
} else if (type == state->sha3_384_type) {
- self->hash_state = Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_SHA3_384);
+ self->hash_state = Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_SHA3_384);
} else if (type == state->sha3_512_type) {
- self->hash_state = Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_SHA3_512);
+ self->hash_state = Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_SHA3_512);
} else if (type == state->shake_128_type) {
- self->hash_state = Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_Shake128);
+ self->hash_state = Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_Shake128);
} else if (type == state->shake_256_type) {
- self->hash_state = Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_Shake256);
+ self->hash_state = Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_Shake256);
} else {
PyErr_BadInternalCall();
goto error;
@@ -168,7 +168,7 @@ py_sha3_new_impl(PyTypeObject *type, PyObject *data, int usedforsecurity)
static void
SHA3_dealloc(SHA3object *self)
{
- Hacl_Streaming_Keccak_free(self->hash_state);
+ Hacl_Hash_SHA3_free(self->hash_state);
if (self->lock != NULL) {
PyThread_free_lock(self->lock);
}
@@ -197,7 +197,7 @@ _sha3_sha3_224_copy_impl(SHA3object *self)
return NULL;
}
ENTER_HASHLIB(self);
- newobj->hash_state = Hacl_Streaming_Keccak_copy(self->hash_state);
+ newobj->hash_state = Hacl_Hash_SHA3_copy(self->hash_state);
LEAVE_HASHLIB(self);
return (PyObject *)newobj;
}
@@ -217,10 +217,10 @@ _sha3_sha3_224_digest_impl(SHA3object *self)
// This function errors out if the algorithm is Shake. Here, we know this
// not to be the case, and therefore do not perform error checking.
ENTER_HASHLIB(self);
- Hacl_Streaming_Keccak_finish(self->hash_state, digest);
+ Hacl_Hash_SHA3_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
return PyBytes_FromStringAndSize((const char *)digest,
- Hacl_Streaming_Keccak_hash_len(self->hash_state));
+ Hacl_Hash_SHA3_hash_len(self->hash_state));
}
@@ -236,10 +236,10 @@ _sha3_sha3_224_hexdigest_impl(SHA3object *self)
{
unsigned char digest[SHA3_MAX_DIGESTSIZE];
ENTER_HASHLIB(self);
- Hacl_Streaming_Keccak_finish(self->hash_state, digest);
+ Hacl_Hash_SHA3_digest(self->hash_state, digest);
LEAVE_HASHLIB(self);
return _Py_strhex((const char *)digest,
- Hacl_Streaming_Keccak_hash_len(self->hash_state));
+ Hacl_Hash_SHA3_hash_len(self->hash_state));
}
@@ -287,7 +287,7 @@ static PyMethodDef SHA3_methods[] = {
static PyObject *
SHA3_get_block_size(SHA3object *self, void *closure)
{
- uint32_t rate = Hacl_Streaming_Keccak_block_len(self->hash_state);
+ uint32_t rate = Hacl_Hash_SHA3_block_len(self->hash_state);
return PyLong_FromLong(rate);
}
@@ -323,17 +323,17 @@ static PyObject *
SHA3_get_digest_size(SHA3object *self, void *closure)
{
// Preserving previous behavior: variable-length algorithms return 0
- if (Hacl_Streaming_Keccak_is_shake(self->hash_state))
+ if (Hacl_Hash_SHA3_is_shake(self->hash_state))
return PyLong_FromLong(0);
else
- return PyLong_FromLong(Hacl_Streaming_Keccak_hash_len(self->hash_state));
+ return PyLong_FromLong(Hacl_Hash_SHA3_hash_len(self->hash_state));
}
static PyObject *
SHA3_get_capacity_bits(SHA3object *self, void *closure)
{
- uint32_t rate = Hacl_Streaming_Keccak_block_len(self->hash_state) * 8;
+ uint32_t rate = Hacl_Hash_SHA3_block_len(self->hash_state) * 8;
int capacity = 1600 - rate;
return PyLong_FromLong(capacity);
}
@@ -342,7 +342,7 @@ SHA3_get_capacity_bits(SHA3object *self, void *closure)
static PyObject *
SHA3_get_rate_bits(SHA3object *self, void *closure)
{
- uint32_t rate = Hacl_Streaming_Keccak_block_len(self->hash_state) * 8;
+ uint32_t rate = Hacl_Hash_SHA3_block_len(self->hash_state) * 8;
return PyLong_FromLong(rate);
}
@@ -435,7 +435,7 @@ _SHAKE_digest(SHA3object *self, unsigned long digestlen, int hex)
* - the output length is zero -- we follow the existing behavior and return
* an empty digest, without raising an error */
if (digestlen > 0) {
- Hacl_Streaming_Keccak_squeeze(self->hash_state, digest, digestlen);
+ Hacl_Hash_SHA3_squeeze(self->hash_state, digest, digestlen);
}
if (hex) {
result = _Py_strhex((const char *)digest, digestlen);