aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/openssl/crypto/poly1305
diff options
context:
space:
mode:
authordeshevoy <deshevoy@yandex-team.ru>2022-02-10 16:46:56 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:56 +0300
commite988f30484abe5fdeedcc7a5d3c226c01a21800c (patch)
tree0a217b173aabb57b7e51f8a169989b1a3e0309fe /contrib/libs/openssl/crypto/poly1305
parent33ee501c05d3f24036ae89766a858930ae66c548 (diff)
downloadydb-e988f30484abe5fdeedcc7a5d3c226c01a21800c.tar.gz
Restoring authorship annotation for <deshevoy@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/openssl/crypto/poly1305')
-rw-r--r--contrib/libs/openssl/crypto/poly1305/poly1305.c1058
-rw-r--r--contrib/libs/openssl/crypto/poly1305/poly1305_ameth.c234
-rw-r--r--contrib/libs/openssl/crypto/poly1305/poly1305_base2_44.c342
-rw-r--r--contrib/libs/openssl/crypto/poly1305/poly1305_ieee754.c976
-rw-r--r--contrib/libs/openssl/crypto/poly1305/poly1305_local.h54
-rw-r--r--contrib/libs/openssl/crypto/poly1305/poly1305_pmeth.c384
6 files changed, 1524 insertions, 1524 deletions
diff --git a/contrib/libs/openssl/crypto/poly1305/poly1305.c b/contrib/libs/openssl/crypto/poly1305/poly1305.c
index e7f5b92c8f..903f7528df 100644
--- a/contrib/libs/openssl/crypto/poly1305/poly1305.c
+++ b/contrib/libs/openssl/crypto/poly1305/poly1305.c
@@ -1,531 +1,531 @@
-/*
- * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the OpenSSL license (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
-
-#include <stdlib.h>
-#include <string.h>
-#include <openssl/crypto.h>
-
+/*
+ * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <openssl/crypto.h>
+
#include "crypto/poly1305.h"
-#include "poly1305_local.h"
-
-size_t Poly1305_ctx_size(void)
-{
- return sizeof(struct poly1305_context);
-}
-
-/* pick 32-bit unsigned integer in little endian order */
-static unsigned int U8TOU32(const unsigned char *p)
-{
- return (((unsigned int)(p[0] & 0xff)) |
- ((unsigned int)(p[1] & 0xff) << 8) |
- ((unsigned int)(p[2] & 0xff) << 16) |
- ((unsigned int)(p[3] & 0xff) << 24));
-}
-
-/*
- * Implementations can be classified by amount of significant bits in
- * words making up the multi-precision value, or in other words radix
- * or base of numerical representation, e.g. base 2^64, base 2^32,
- * base 2^26. Complementary characteristic is how wide is the result of
- * multiplication of pair of digits, e.g. it would take 128 bits to
- * accommodate multiplication result in base 2^64 case. These are used
- * interchangeably. To describe implementation that is. But interface
- * is designed to isolate this so that low-level primitives implemented
- * in assembly can be self-contained/self-coherent.
- */
-#ifndef POLY1305_ASM
-/*
- * Even though there is __int128 reference implementation targeting
- * 64-bit platforms provided below, it's not obvious that it's optimal
- * choice for every one of them. Depending on instruction set overall
- * amount of instructions can be comparable to one in __int64
- * implementation. Amount of multiplication instructions would be lower,
- * but not necessarily overall. And in out-of-order execution context,
- * it is the latter that can be crucial...
- *
- * On related note. Poly1305 author, D. J. Bernstein, discusses and
- * provides floating-point implementations of the algorithm in question.
- * It made a lot of sense by the time of introduction, because most
- * then-modern processors didn't have pipelined integer multiplier.
- * [Not to mention that some had non-constant timing for integer
- * multiplications.] Floating-point instructions on the other hand could
- * be issued every cycle, which allowed to achieve better performance.
- * Nowadays, with SIMD and/or out-or-order execution, shared or
- * even emulated FPU, it's more complicated, and floating-point
- * implementation is not necessarily optimal choice in every situation,
- * rather contrary...
- *
- * <appro@openssl.org>
- */
-
-typedef unsigned int u32;
-
-/*
- * poly1305_blocks processes a multiple of POLY1305_BLOCK_SIZE blocks
- * of |inp| no longer than |len|. Behaviour for |len| not divisible by
- * block size is unspecified in general case, even though in reference
- * implementation the trailing chunk is simply ignored. Per algorithm
- * specification, every input block, complete or last partial, is to be
- * padded with a bit past most significant byte. The latter kind is then
- * padded with zeros till block size. This last partial block padding
- * is caller(*)'s responsibility, and because of this the last partial
- * block is always processed with separate call with |len| set to
- * POLY1305_BLOCK_SIZE and |padbit| to 0. In all other cases |padbit|
- * should be set to 1 to perform implicit padding with 128th bit.
- * poly1305_blocks does not actually check for this constraint though,
- * it's caller(*)'s responsibility to comply.
- *
- * (*) In the context "caller" is not application code, but higher
- * level Poly1305_* from this very module, so that quirks are
- * handled locally.
- */
-static void
-poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, u32 padbit);
-
-/*
+#include "poly1305_local.h"
+
+size_t Poly1305_ctx_size(void)
+{
+ return sizeof(struct poly1305_context);
+}
+
+/* pick 32-bit unsigned integer in little endian order */
+static unsigned int U8TOU32(const unsigned char *p)
+{
+ return (((unsigned int)(p[0] & 0xff)) |
+ ((unsigned int)(p[1] & 0xff) << 8) |
+ ((unsigned int)(p[2] & 0xff) << 16) |
+ ((unsigned int)(p[3] & 0xff) << 24));
+}
+
+/*
+ * Implementations can be classified by amount of significant bits in
+ * words making up the multi-precision value, or in other words radix
+ * or base of numerical representation, e.g. base 2^64, base 2^32,
+ * base 2^26. Complementary characteristic is how wide is the result of
+ * multiplication of pair of digits, e.g. it would take 128 bits to
+ * accommodate multiplication result in base 2^64 case. These are used
+ * interchangeably. To describe implementation that is. But interface
+ * is designed to isolate this so that low-level primitives implemented
+ * in assembly can be self-contained/self-coherent.
+ */
+#ifndef POLY1305_ASM
+/*
+ * Even though there is __int128 reference implementation targeting
+ * 64-bit platforms provided below, it's not obvious that it's optimal
+ * choice for every one of them. Depending on instruction set overall
+ * amount of instructions can be comparable to one in __int64
+ * implementation. Amount of multiplication instructions would be lower,
+ * but not necessarily overall. And in out-of-order execution context,
+ * it is the latter that can be crucial...
+ *
+ * On related note. Poly1305 author, D. J. Bernstein, discusses and
+ * provides floating-point implementations of the algorithm in question.
+ * It made a lot of sense by the time of introduction, because most
+ * then-modern processors didn't have pipelined integer multiplier.
+ * [Not to mention that some had non-constant timing for integer
+ * multiplications.] Floating-point instructions on the other hand could
+ * be issued every cycle, which allowed to achieve better performance.
+ * Nowadays, with SIMD and/or out-or-order execution, shared or
+ * even emulated FPU, it's more complicated, and floating-point
+ * implementation is not necessarily optimal choice in every situation,
+ * rather contrary...
+ *
+ * <appro@openssl.org>
+ */
+
+typedef unsigned int u32;
+
+/*
+ * poly1305_blocks processes a multiple of POLY1305_BLOCK_SIZE blocks
+ * of |inp| no longer than |len|. Behaviour for |len| not divisible by
+ * block size is unspecified in general case, even though in reference
+ * implementation the trailing chunk is simply ignored. Per algorithm
+ * specification, every input block, complete or last partial, is to be
+ * padded with a bit past most significant byte. The latter kind is then
+ * padded with zeros till block size. This last partial block padding
+ * is caller(*)'s responsibility, and because of this the last partial
+ * block is always processed with separate call with |len| set to
+ * POLY1305_BLOCK_SIZE and |padbit| to 0. In all other cases |padbit|
+ * should be set to 1 to perform implicit padding with 128th bit.
+ * poly1305_blocks does not actually check for this constraint though,
+ * it's caller(*)'s responsibility to comply.
+ *
+ * (*) In the context "caller" is not application code, but higher
+ * level Poly1305_* from this very module, so that quirks are
+ * handled locally.
+ */
+static void
+poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, u32 padbit);
+
+/*
* Type-agnostic "rip-off" from constant_time.h
- */
-# define CONSTANT_TIME_CARRY(a,b) ( \
- (a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1) \
- )
-
-# if (defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16) && \
- (defined(__SIZEOF_LONG__) && __SIZEOF_LONG__==8)
-
-typedef unsigned long u64;
-typedef __uint128_t u128;
-
-typedef struct {
- u64 h[3];
- u64 r[2];
-} poly1305_internal;
-
-/* pick 32-bit unsigned integer in little endian order */
-static u64 U8TOU64(const unsigned char *p)
-{
- return (((u64)(p[0] & 0xff)) |
- ((u64)(p[1] & 0xff) << 8) |
- ((u64)(p[2] & 0xff) << 16) |
- ((u64)(p[3] & 0xff) << 24) |
- ((u64)(p[4] & 0xff) << 32) |
- ((u64)(p[5] & 0xff) << 40) |
- ((u64)(p[6] & 0xff) << 48) |
- ((u64)(p[7] & 0xff) << 56));
-}
-
-/* store a 32-bit unsigned integer in little endian */
-static void U64TO8(unsigned char *p, u64 v)
-{
- p[0] = (unsigned char)((v) & 0xff);
- p[1] = (unsigned char)((v >> 8) & 0xff);
- p[2] = (unsigned char)((v >> 16) & 0xff);
- p[3] = (unsigned char)((v >> 24) & 0xff);
- p[4] = (unsigned char)((v >> 32) & 0xff);
- p[5] = (unsigned char)((v >> 40) & 0xff);
- p[6] = (unsigned char)((v >> 48) & 0xff);
- p[7] = (unsigned char)((v >> 56) & 0xff);
-}
-
-static void poly1305_init(void *ctx, const unsigned char key[16])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
-
- /* h = 0 */
- st->h[0] = 0;
- st->h[1] = 0;
- st->h[2] = 0;
-
- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- st->r[0] = U8TOU64(&key[0]) & 0x0ffffffc0fffffff;
- st->r[1] = U8TOU64(&key[8]) & 0x0ffffffc0ffffffc;
-}
-
-static void
-poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, u32 padbit)
-{
- poly1305_internal *st = (poly1305_internal *)ctx;
- u64 r0, r1;
- u64 s1;
- u64 h0, h1, h2, c;
- u128 d0, d1;
-
- r0 = st->r[0];
- r1 = st->r[1];
-
- s1 = r1 + (r1 >> 2);
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
-
- while (len >= POLY1305_BLOCK_SIZE) {
- /* h += m[i] */
- h0 = (u64)(d0 = (u128)h0 + U8TOU64(inp + 0));
- h1 = (u64)(d1 = (u128)h1 + (d0 >> 64) + U8TOU64(inp + 8));
- /*
- * padbit can be zero only when original len was
- * POLY1306_BLOCK_SIZE, but we don't check
- */
- h2 += (u64)(d1 >> 64) + padbit;
-
- /* h *= r "%" p, where "%" stands for "partial remainder" */
- d0 = ((u128)h0 * r0) +
- ((u128)h1 * s1);
- d1 = ((u128)h0 * r1) +
- ((u128)h1 * r0) +
- (h2 * s1);
- h2 = (h2 * r0);
-
- /* last reduction step: */
- /* a) h2:h0 = h2<<128 + d1<<64 + d0 */
- h0 = (u64)d0;
- h1 = (u64)(d1 += d0 >> 64);
- h2 += (u64)(d1 >> 64);
- /* b) (h2:h0 += (h2:h0>>130) * 5) %= 2^130 */
- c = (h2 >> 2) + (h2 & ~3UL);
- h2 &= 3;
- h0 += c;
- h1 += (c = CONSTANT_TIME_CARRY(h0,c));
- h2 += CONSTANT_TIME_CARRY(h1,c);
- /*
- * Occasional overflows to 3rd bit of h2 are taken care of
- * "naturally". If after this point we end up at the top of
- * this loop, then the overflow bit will be accounted for
- * in next iteration. If we end up in poly1305_emit, then
- * comparison to modulus below will still count as "carry
- * into 131st bit", so that properly reduced value will be
- * picked in conditional move.
- */
-
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- }
-
- st->h[0] = h0;
- st->h[1] = h1;
- st->h[2] = h2;
-}
-
-static void poly1305_emit(void *ctx, unsigned char mac[16],
- const u32 nonce[4])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
- u64 h0, h1, h2;
- u64 g0, g1, g2;
- u128 t;
- u64 mask;
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
-
- /* compare to modulus by computing h + -p */
- g0 = (u64)(t = (u128)h0 + 5);
- g1 = (u64)(t = (u128)h1 + (t >> 64));
- g2 = h2 + (u64)(t >> 64);
-
- /* if there was carry into 131st bit, h1:h0 = g1:g0 */
- mask = 0 - (g2 >> 2);
- g0 &= mask;
- g1 &= mask;
- mask = ~mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
-
- /* mac = (h + nonce) % (2^128) */
- h0 = (u64)(t = (u128)h0 + nonce[0] + ((u64)nonce[1]<<32));
- h1 = (u64)(t = (u128)h1 + nonce[2] + ((u64)nonce[3]<<32) + (t >> 64));
-
- U64TO8(mac + 0, h0);
- U64TO8(mac + 8, h1);
-}
-
-# else
-
-# if defined(_WIN32) && !defined(__MINGW32__)
-typedef unsigned __int64 u64;
-# elif defined(__arch64__)
-typedef unsigned long u64;
-# else
-typedef unsigned long long u64;
-# endif
-
-typedef struct {
- u32 h[5];
- u32 r[4];
-} poly1305_internal;
-
-/* store a 32-bit unsigned integer in little endian */
-static void U32TO8(unsigned char *p, unsigned int v)
-{
- p[0] = (unsigned char)((v) & 0xff);
- p[1] = (unsigned char)((v >> 8) & 0xff);
- p[2] = (unsigned char)((v >> 16) & 0xff);
- p[3] = (unsigned char)((v >> 24) & 0xff);
-}
-
-static void poly1305_init(void *ctx, const unsigned char key[16])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
-
- /* h = 0 */
- st->h[0] = 0;
- st->h[1] = 0;
- st->h[2] = 0;
- st->h[3] = 0;
- st->h[4] = 0;
-
- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- st->r[0] = U8TOU32(&key[0]) & 0x0fffffff;
- st->r[1] = U8TOU32(&key[4]) & 0x0ffffffc;
- st->r[2] = U8TOU32(&key[8]) & 0x0ffffffc;
- st->r[3] = U8TOU32(&key[12]) & 0x0ffffffc;
-}
-
-static void
-poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, u32 padbit)
-{
- poly1305_internal *st = (poly1305_internal *)ctx;
- u32 r0, r1, r2, r3;
- u32 s1, s2, s3;
- u32 h0, h1, h2, h3, h4, c;
- u64 d0, d1, d2, d3;
-
- r0 = st->r[0];
- r1 = st->r[1];
- r2 = st->r[2];
- r3 = st->r[3];
-
- s1 = r1 + (r1 >> 2);
- s2 = r2 + (r2 >> 2);
- s3 = r3 + (r3 >> 2);
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
- h3 = st->h[3];
- h4 = st->h[4];
-
- while (len >= POLY1305_BLOCK_SIZE) {
- /* h += m[i] */
- h0 = (u32)(d0 = (u64)h0 + U8TOU32(inp + 0));
- h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + U8TOU32(inp + 4));
- h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + U8TOU32(inp + 8));
- h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + U8TOU32(inp + 12));
- h4 += (u32)(d3 >> 32) + padbit;
-
- /* h *= r "%" p, where "%" stands for "partial remainder" */
- d0 = ((u64)h0 * r0) +
- ((u64)h1 * s3) +
- ((u64)h2 * s2) +
- ((u64)h3 * s1);
- d1 = ((u64)h0 * r1) +
- ((u64)h1 * r0) +
- ((u64)h2 * s3) +
- ((u64)h3 * s2) +
- (h4 * s1);
- d2 = ((u64)h0 * r2) +
- ((u64)h1 * r1) +
- ((u64)h2 * r0) +
- ((u64)h3 * s3) +
- (h4 * s2);
- d3 = ((u64)h0 * r3) +
- ((u64)h1 * r2) +
- ((u64)h2 * r1) +
- ((u64)h3 * r0) +
- (h4 * s3);
- h4 = (h4 * r0);
-
- /* last reduction step: */
- /* a) h4:h0 = h4<<128 + d3<<96 + d2<<64 + d1<<32 + d0 */
- h0 = (u32)d0;
- h1 = (u32)(d1 += d0 >> 32);
- h2 = (u32)(d2 += d1 >> 32);
- h3 = (u32)(d3 += d2 >> 32);
- h4 += (u32)(d3 >> 32);
- /* b) (h4:h0 += (h4:h0>>130) * 5) %= 2^130 */
- c = (h4 >> 2) + (h4 & ~3U);
- h4 &= 3;
- h0 += c;
- h1 += (c = CONSTANT_TIME_CARRY(h0,c));
- h2 += (c = CONSTANT_TIME_CARRY(h1,c));
- h3 += (c = CONSTANT_TIME_CARRY(h2,c));
- h4 += CONSTANT_TIME_CARRY(h3,c);
- /*
- * Occasional overflows to 3rd bit of h4 are taken care of
- * "naturally". If after this point we end up at the top of
- * this loop, then the overflow bit will be accounted for
- * in next iteration. If we end up in poly1305_emit, then
- * comparison to modulus below will still count as "carry
- * into 131st bit", so that properly reduced value will be
- * picked in conditional move.
- */
-
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- }
-
- st->h[0] = h0;
- st->h[1] = h1;
- st->h[2] = h2;
- st->h[3] = h3;
- st->h[4] = h4;
-}
-
-static void poly1305_emit(void *ctx, unsigned char mac[16],
- const u32 nonce[4])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
- u32 h0, h1, h2, h3, h4;
- u32 g0, g1, g2, g3, g4;
- u64 t;
- u32 mask;
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
- h3 = st->h[3];
- h4 = st->h[4];
-
- /* compare to modulus by computing h + -p */
- g0 = (u32)(t = (u64)h0 + 5);
- g1 = (u32)(t = (u64)h1 + (t >> 32));
- g2 = (u32)(t = (u64)h2 + (t >> 32));
- g3 = (u32)(t = (u64)h3 + (t >> 32));
- g4 = h4 + (u32)(t >> 32);
-
- /* if there was carry into 131st bit, h3:h0 = g3:g0 */
- mask = 0 - (g4 >> 2);
- g0 &= mask;
- g1 &= mask;
- g2 &= mask;
- g3 &= mask;
- mask = ~mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
- h2 = (h2 & mask) | g2;
- h3 = (h3 & mask) | g3;
-
- /* mac = (h + nonce) % (2^128) */
- h0 = (u32)(t = (u64)h0 + nonce[0]);
- h1 = (u32)(t = (u64)h1 + (t >> 32) + nonce[1]);
- h2 = (u32)(t = (u64)h2 + (t >> 32) + nonce[2]);
- h3 = (u32)(t = (u64)h3 + (t >> 32) + nonce[3]);
-
- U32TO8(mac + 0, h0);
- U32TO8(mac + 4, h1);
- U32TO8(mac + 8, h2);
- U32TO8(mac + 12, h3);
-}
-# endif
-#else
-int poly1305_init(void *ctx, const unsigned char key[16], void *func);
-void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
- unsigned int padbit);
-void poly1305_emit(void *ctx, unsigned char mac[16],
- const unsigned int nonce[4]);
-#endif
-
-void Poly1305_Init(POLY1305 *ctx, const unsigned char key[32])
-{
- ctx->nonce[0] = U8TOU32(&key[16]);
- ctx->nonce[1] = U8TOU32(&key[20]);
- ctx->nonce[2] = U8TOU32(&key[24]);
- ctx->nonce[3] = U8TOU32(&key[28]);
-
-#ifndef POLY1305_ASM
- poly1305_init(ctx->opaque, key);
-#else
- /*
- * Unlike reference poly1305_init assembly counterpart is expected
- * to return a value: non-zero if it initializes ctx->func, and zero
- * otherwise. Latter is to simplify assembly in cases when there no
- * multiple code paths to switch between.
- */
- if (!poly1305_init(ctx->opaque, key, &ctx->func)) {
- ctx->func.blocks = poly1305_blocks;
- ctx->func.emit = poly1305_emit;
- }
-#endif
-
- ctx->num = 0;
-
-}
-
-#ifdef POLY1305_ASM
-/*
- * This "eclipses" poly1305_blocks and poly1305_emit, but it's
- * conscious choice imposed by -Wshadow compiler warnings.
- */
-# define poly1305_blocks (*poly1305_blocks_p)
-# define poly1305_emit (*poly1305_emit_p)
-#endif
-
-void Poly1305_Update(POLY1305 *ctx, const unsigned char *inp, size_t len)
-{
-#ifdef POLY1305_ASM
- /*
- * As documented, poly1305_blocks is never called with input
- * longer than single block and padbit argument set to 0. This
- * property is fluently used in assembly modules to optimize
- * padbit handling on loop boundary.
- */
- poly1305_blocks_f poly1305_blocks_p = ctx->func.blocks;
-#endif
- size_t rem, num;
-
- if ((num = ctx->num)) {
- rem = POLY1305_BLOCK_SIZE - num;
- if (len >= rem) {
- memcpy(ctx->data + num, inp, rem);
- poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1);
- inp += rem;
- len -= rem;
- } else {
- /* Still not enough data to process a block. */
- memcpy(ctx->data + num, inp, len);
- ctx->num = num + len;
- return;
- }
- }
-
- rem = len % POLY1305_BLOCK_SIZE;
- len -= rem;
-
- if (len >= POLY1305_BLOCK_SIZE) {
- poly1305_blocks(ctx->opaque, inp, len, 1);
- inp += len;
- }
-
- if (rem)
- memcpy(ctx->data, inp, rem);
-
- ctx->num = rem;
-}
-
-void Poly1305_Final(POLY1305 *ctx, unsigned char mac[16])
-{
-#ifdef POLY1305_ASM
- poly1305_blocks_f poly1305_blocks_p = ctx->func.blocks;
- poly1305_emit_f poly1305_emit_p = ctx->func.emit;
-#endif
- size_t num;
-
- if ((num = ctx->num)) {
- ctx->data[num++] = 1; /* pad bit */
- while (num < POLY1305_BLOCK_SIZE)
- ctx->data[num++] = 0;
- poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_emit(ctx->opaque, mac, ctx->nonce);
-
- /* zero out the state */
- OPENSSL_cleanse(ctx, sizeof(*ctx));
-}
+ */
+# define CONSTANT_TIME_CARRY(a,b) ( \
+ (a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1) \
+ )
+
+# if (defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16) && \
+ (defined(__SIZEOF_LONG__) && __SIZEOF_LONG__==8)
+
+typedef unsigned long u64;
+typedef __uint128_t u128;
+
+typedef struct {
+ u64 h[3];
+ u64 r[2];
+} poly1305_internal;
+
+/* pick 32-bit unsigned integer in little endian order */
+static u64 U8TOU64(const unsigned char *p)
+{
+ return (((u64)(p[0] & 0xff)) |
+ ((u64)(p[1] & 0xff) << 8) |
+ ((u64)(p[2] & 0xff) << 16) |
+ ((u64)(p[3] & 0xff) << 24) |
+ ((u64)(p[4] & 0xff) << 32) |
+ ((u64)(p[5] & 0xff) << 40) |
+ ((u64)(p[6] & 0xff) << 48) |
+ ((u64)(p[7] & 0xff) << 56));
+}
+
+/* store a 32-bit unsigned integer in little endian */
+static void U64TO8(unsigned char *p, u64 v)
+{
+ p[0] = (unsigned char)((v) & 0xff);
+ p[1] = (unsigned char)((v >> 8) & 0xff);
+ p[2] = (unsigned char)((v >> 16) & 0xff);
+ p[3] = (unsigned char)((v >> 24) & 0xff);
+ p[4] = (unsigned char)((v >> 32) & 0xff);
+ p[5] = (unsigned char)((v >> 40) & 0xff);
+ p[6] = (unsigned char)((v >> 48) & 0xff);
+ p[7] = (unsigned char)((v >> 56) & 0xff);
+}
+
+static void poly1305_init(void *ctx, const unsigned char key[16])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ st->r[0] = U8TOU64(&key[0]) & 0x0ffffffc0fffffff;
+ st->r[1] = U8TOU64(&key[8]) & 0x0ffffffc0ffffffc;
+}
+
+static void
+poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, u32 padbit)
+{
+ poly1305_internal *st = (poly1305_internal *)ctx;
+ u64 r0, r1;
+ u64 s1;
+ u64 h0, h1, h2, c;
+ u128 d0, d1;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+
+ s1 = r1 + (r1 >> 2);
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ while (len >= POLY1305_BLOCK_SIZE) {
+ /* h += m[i] */
+ h0 = (u64)(d0 = (u128)h0 + U8TOU64(inp + 0));
+ h1 = (u64)(d1 = (u128)h1 + (d0 >> 64) + U8TOU64(inp + 8));
+ /*
+ * padbit can be zero only when original len was
+ * POLY1306_BLOCK_SIZE, but we don't check
+ */
+ h2 += (u64)(d1 >> 64) + padbit;
+
+ /* h *= r "%" p, where "%" stands for "partial remainder" */
+ d0 = ((u128)h0 * r0) +
+ ((u128)h1 * s1);
+ d1 = ((u128)h0 * r1) +
+ ((u128)h1 * r0) +
+ (h2 * s1);
+ h2 = (h2 * r0);
+
+ /* last reduction step: */
+ /* a) h2:h0 = h2<<128 + d1<<64 + d0 */
+ h0 = (u64)d0;
+ h1 = (u64)(d1 += d0 >> 64);
+ h2 += (u64)(d1 >> 64);
+ /* b) (h2:h0 += (h2:h0>>130) * 5) %= 2^130 */
+ c = (h2 >> 2) + (h2 & ~3UL);
+ h2 &= 3;
+ h0 += c;
+ h1 += (c = CONSTANT_TIME_CARRY(h0,c));
+ h2 += CONSTANT_TIME_CARRY(h1,c);
+ /*
+ * Occasional overflows to 3rd bit of h2 are taken care of
+ * "naturally". If after this point we end up at the top of
+ * this loop, then the overflow bit will be accounted for
+ * in next iteration. If we end up in poly1305_emit, then
+ * comparison to modulus below will still count as "carry
+ * into 131st bit", so that properly reduced value will be
+ * picked in conditional move.
+ */
+
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+}
+
+static void poly1305_emit(void *ctx, unsigned char mac[16],
+ const u32 nonce[4])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+ u64 h0, h1, h2;
+ u64 g0, g1, g2;
+ u128 t;
+ u64 mask;
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ /* compare to modulus by computing h + -p */
+ g0 = (u64)(t = (u128)h0 + 5);
+ g1 = (u64)(t = (u128)h1 + (t >> 64));
+ g2 = h2 + (u64)(t >> 64);
+
+ /* if there was carry into 131st bit, h1:h0 = g1:g0 */
+ mask = 0 - (g2 >> 2);
+ g0 &= mask;
+ g1 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+
+ /* mac = (h + nonce) % (2^128) */
+ h0 = (u64)(t = (u128)h0 + nonce[0] + ((u64)nonce[1]<<32));
+ h1 = (u64)(t = (u128)h1 + nonce[2] + ((u64)nonce[3]<<32) + (t >> 64));
+
+ U64TO8(mac + 0, h0);
+ U64TO8(mac + 8, h1);
+}
+
+# else
+
+# if defined(_WIN32) && !defined(__MINGW32__)
+typedef unsigned __int64 u64;
+# elif defined(__arch64__)
+typedef unsigned long u64;
+# else
+typedef unsigned long long u64;
+# endif
+
+typedef struct {
+ u32 h[5];
+ u32 r[4];
+} poly1305_internal;
+
+/* store a 32-bit unsigned integer in little endian */
+static void U32TO8(unsigned char *p, unsigned int v)
+{
+ p[0] = (unsigned char)((v) & 0xff);
+ p[1] = (unsigned char)((v >> 8) & 0xff);
+ p[2] = (unsigned char)((v >> 16) & 0xff);
+ p[3] = (unsigned char)((v >> 24) & 0xff);
+}
+
+static void poly1305_init(void *ctx, const unsigned char key[16])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+ st->h[3] = 0;
+ st->h[4] = 0;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ st->r[0] = U8TOU32(&key[0]) & 0x0fffffff;
+ st->r[1] = U8TOU32(&key[4]) & 0x0ffffffc;
+ st->r[2] = U8TOU32(&key[8]) & 0x0ffffffc;
+ st->r[3] = U8TOU32(&key[12]) & 0x0ffffffc;
+}
+
+static void
+poly1305_blocks(void *ctx, const unsigned char *inp, size_t len, u32 padbit)
+{
+ poly1305_internal *st = (poly1305_internal *)ctx;
+ u32 r0, r1, r2, r3;
+ u32 s1, s2, s3;
+ u32 h0, h1, h2, h3, h4, c;
+ u64 d0, d1, d2, d3;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+ r3 = st->r[3];
+
+ s1 = r1 + (r1 >> 2);
+ s2 = r2 + (r2 >> 2);
+ s3 = r3 + (r3 >> 2);
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ while (len >= POLY1305_BLOCK_SIZE) {
+ /* h += m[i] */
+ h0 = (u32)(d0 = (u64)h0 + U8TOU32(inp + 0));
+ h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + U8TOU32(inp + 4));
+ h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + U8TOU32(inp + 8));
+ h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + U8TOU32(inp + 12));
+ h4 += (u32)(d3 >> 32) + padbit;
+
+ /* h *= r "%" p, where "%" stands for "partial remainder" */
+ d0 = ((u64)h0 * r0) +
+ ((u64)h1 * s3) +
+ ((u64)h2 * s2) +
+ ((u64)h3 * s1);
+ d1 = ((u64)h0 * r1) +
+ ((u64)h1 * r0) +
+ ((u64)h2 * s3) +
+ ((u64)h3 * s2) +
+ (h4 * s1);
+ d2 = ((u64)h0 * r2) +
+ ((u64)h1 * r1) +
+ ((u64)h2 * r0) +
+ ((u64)h3 * s3) +
+ (h4 * s2);
+ d3 = ((u64)h0 * r3) +
+ ((u64)h1 * r2) +
+ ((u64)h2 * r1) +
+ ((u64)h3 * r0) +
+ (h4 * s3);
+ h4 = (h4 * r0);
+
+ /* last reduction step: */
+ /* a) h4:h0 = h4<<128 + d3<<96 + d2<<64 + d1<<32 + d0 */
+ h0 = (u32)d0;
+ h1 = (u32)(d1 += d0 >> 32);
+ h2 = (u32)(d2 += d1 >> 32);
+ h3 = (u32)(d3 += d2 >> 32);
+ h4 += (u32)(d3 >> 32);
+ /* b) (h4:h0 += (h4:h0>>130) * 5) %= 2^130 */
+ c = (h4 >> 2) + (h4 & ~3U);
+ h4 &= 3;
+ h0 += c;
+ h1 += (c = CONSTANT_TIME_CARRY(h0,c));
+ h2 += (c = CONSTANT_TIME_CARRY(h1,c));
+ h3 += (c = CONSTANT_TIME_CARRY(h2,c));
+ h4 += CONSTANT_TIME_CARRY(h3,c);
+ /*
+ * Occasional overflows to 3rd bit of h4 are taken care of
+ * "naturally". If after this point we end up at the top of
+ * this loop, then the overflow bit will be accounted for
+ * in next iteration. If we end up in poly1305_emit, then
+ * comparison to modulus below will still count as "carry
+ * into 131st bit", so that properly reduced value will be
+ * picked in conditional move.
+ */
+
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+ st->h[3] = h3;
+ st->h[4] = h4;
+}
+
+static void poly1305_emit(void *ctx, unsigned char mac[16],
+ const u32 nonce[4])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+ u32 h0, h1, h2, h3, h4;
+ u32 g0, g1, g2, g3, g4;
+ u64 t;
+ u32 mask;
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ /* compare to modulus by computing h + -p */
+ g0 = (u32)(t = (u64)h0 + 5);
+ g1 = (u32)(t = (u64)h1 + (t >> 32));
+ g2 = (u32)(t = (u64)h2 + (t >> 32));
+ g3 = (u32)(t = (u64)h3 + (t >> 32));
+ g4 = h4 + (u32)(t >> 32);
+
+ /* if there was carry into 131st bit, h3:h0 = g3:g0 */
+ mask = 0 - (g4 >> 2);
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+
+ /* mac = (h + nonce) % (2^128) */
+ h0 = (u32)(t = (u64)h0 + nonce[0]);
+ h1 = (u32)(t = (u64)h1 + (t >> 32) + nonce[1]);
+ h2 = (u32)(t = (u64)h2 + (t >> 32) + nonce[2]);
+ h3 = (u32)(t = (u64)h3 + (t >> 32) + nonce[3]);
+
+ U32TO8(mac + 0, h0);
+ U32TO8(mac + 4, h1);
+ U32TO8(mac + 8, h2);
+ U32TO8(mac + 12, h3);
+}
+# endif
+#else
+int poly1305_init(void *ctx, const unsigned char key[16], void *func);
+void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
+ unsigned int padbit);
+void poly1305_emit(void *ctx, unsigned char mac[16],
+ const unsigned int nonce[4]);
+#endif
+
+void Poly1305_Init(POLY1305 *ctx, const unsigned char key[32])
+{
+ ctx->nonce[0] = U8TOU32(&key[16]);
+ ctx->nonce[1] = U8TOU32(&key[20]);
+ ctx->nonce[2] = U8TOU32(&key[24]);
+ ctx->nonce[3] = U8TOU32(&key[28]);
+
+#ifndef POLY1305_ASM
+ poly1305_init(ctx->opaque, key);
+#else
+ /*
+ * Unlike reference poly1305_init assembly counterpart is expected
+ * to return a value: non-zero if it initializes ctx->func, and zero
+ * otherwise. Latter is to simplify assembly in cases when there no
+ * multiple code paths to switch between.
+ */
+ if (!poly1305_init(ctx->opaque, key, &ctx->func)) {
+ ctx->func.blocks = poly1305_blocks;
+ ctx->func.emit = poly1305_emit;
+ }
+#endif
+
+ ctx->num = 0;
+
+}
+
+#ifdef POLY1305_ASM
+/*
+ * This "eclipses" poly1305_blocks and poly1305_emit, but it's
+ * conscious choice imposed by -Wshadow compiler warnings.
+ */
+# define poly1305_blocks (*poly1305_blocks_p)
+# define poly1305_emit (*poly1305_emit_p)
+#endif
+
+void Poly1305_Update(POLY1305 *ctx, const unsigned char *inp, size_t len)
+{
+#ifdef POLY1305_ASM
+ /*
+ * As documented, poly1305_blocks is never called with input
+ * longer than single block and padbit argument set to 0. This
+ * property is fluently used in assembly modules to optimize
+ * padbit handling on loop boundary.
+ */
+ poly1305_blocks_f poly1305_blocks_p = ctx->func.blocks;
+#endif
+ size_t rem, num;
+
+ if ((num = ctx->num)) {
+ rem = POLY1305_BLOCK_SIZE - num;
+ if (len >= rem) {
+ memcpy(ctx->data + num, inp, rem);
+ poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1);
+ inp += rem;
+ len -= rem;
+ } else {
+ /* Still not enough data to process a block. */
+ memcpy(ctx->data + num, inp, len);
+ ctx->num = num + len;
+ return;
+ }
+ }
+
+ rem = len % POLY1305_BLOCK_SIZE;
+ len -= rem;
+
+ if (len >= POLY1305_BLOCK_SIZE) {
+ poly1305_blocks(ctx->opaque, inp, len, 1);
+ inp += len;
+ }
+
+ if (rem)
+ memcpy(ctx->data, inp, rem);
+
+ ctx->num = rem;
+}
+
+void Poly1305_Final(POLY1305 *ctx, unsigned char mac[16])
+{
+#ifdef POLY1305_ASM
+ poly1305_blocks_f poly1305_blocks_p = ctx->func.blocks;
+ poly1305_emit_f poly1305_emit_p = ctx->func.emit;
+#endif
+ size_t num;
+
+ if ((num = ctx->num)) {
+ ctx->data[num++] = 1; /* pad bit */
+ while (num < POLY1305_BLOCK_SIZE)
+ ctx->data[num++] = 0;
+ poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit(ctx->opaque, mac, ctx->nonce);
+
+ /* zero out the state */
+ OPENSSL_cleanse(ctx, sizeof(*ctx));
+}
diff --git a/contrib/libs/openssl/crypto/poly1305/poly1305_ameth.c b/contrib/libs/openssl/crypto/poly1305/poly1305_ameth.c
index 0dddf79626..4e8ad89497 100644
--- a/contrib/libs/openssl/crypto/poly1305/poly1305_ameth.c
+++ b/contrib/libs/openssl/crypto/poly1305/poly1305_ameth.c
@@ -1,122 +1,122 @@
-/*
+/*
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the OpenSSL license (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
-
-#include <stdio.h>
-#include "internal/cryptlib.h"
-#include <openssl/evp.h>
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <stdio.h>
+#include "internal/cryptlib.h"
+#include <openssl/evp.h>
#include "crypto/asn1.h"
#include "crypto/poly1305.h"
-#include "poly1305_local.h"
+#include "poly1305_local.h"
#include "crypto/evp.h"
-
-/*
- * POLY1305 "ASN1" method. This is just here to indicate the maximum
- * POLY1305 output length and to free up a POLY1305 key.
- */
-
-static int poly1305_size(const EVP_PKEY *pkey)
-{
- return POLY1305_DIGEST_SIZE;
-}
-
-static void poly1305_key_free(EVP_PKEY *pkey)
-{
- ASN1_OCTET_STRING *os = EVP_PKEY_get0(pkey);
- if (os != NULL) {
- if (os->data != NULL)
- OPENSSL_cleanse(os->data, os->length);
- ASN1_OCTET_STRING_free(os);
- }
-}
-
-static int poly1305_pkey_ctrl(EVP_PKEY *pkey, int op, long arg1, void *arg2)
-{
- /* nothing, (including ASN1_PKEY_CTRL_DEFAULT_MD_NID), is supported */
- return -2;
-}
-
-static int poly1305_pkey_public_cmp(const EVP_PKEY *a, const EVP_PKEY *b)
-{
+
+/*
+ * POLY1305 "ASN1" method. This is just here to indicate the maximum
+ * POLY1305 output length and to free up a POLY1305 key.
+ */
+
+static int poly1305_size(const EVP_PKEY *pkey)
+{
+ return POLY1305_DIGEST_SIZE;
+}
+
+static void poly1305_key_free(EVP_PKEY *pkey)
+{
+ ASN1_OCTET_STRING *os = EVP_PKEY_get0(pkey);
+ if (os != NULL) {
+ if (os->data != NULL)
+ OPENSSL_cleanse(os->data, os->length);
+ ASN1_OCTET_STRING_free(os);
+ }
+}
+
+static int poly1305_pkey_ctrl(EVP_PKEY *pkey, int op, long arg1, void *arg2)
+{
+ /* nothing, (including ASN1_PKEY_CTRL_DEFAULT_MD_NID), is supported */
+ return -2;
+}
+
+static int poly1305_pkey_public_cmp(const EVP_PKEY *a, const EVP_PKEY *b)
+{
return ASN1_OCTET_STRING_cmp(EVP_PKEY_get0(a), EVP_PKEY_get0(b)) == 0;
-}
-
-static int poly1305_set_priv_key(EVP_PKEY *pkey, const unsigned char *priv,
- size_t len)
-{
- ASN1_OCTET_STRING *os;
-
- if (pkey->pkey.ptr != NULL || len != POLY1305_KEY_SIZE)
- return 0;
-
- os = ASN1_OCTET_STRING_new();
- if (os == NULL)
- return 0;
-
- if (!ASN1_OCTET_STRING_set(os, priv, len)) {
- ASN1_OCTET_STRING_free(os);
- return 0;
- }
-
- pkey->pkey.ptr = os;
- return 1;
-}
-
-static int poly1305_get_priv_key(const EVP_PKEY *pkey, unsigned char *priv,
- size_t *len)
-{
- ASN1_OCTET_STRING *os = (ASN1_OCTET_STRING *)pkey->pkey.ptr;
-
- if (priv == NULL) {
- *len = POLY1305_KEY_SIZE;
- return 1;
- }
-
- if (os == NULL || *len < POLY1305_KEY_SIZE)
- return 0;
-
- memcpy(priv, ASN1_STRING_get0_data(os), ASN1_STRING_length(os));
- *len = POLY1305_KEY_SIZE;
-
- return 1;
-}
-
-const EVP_PKEY_ASN1_METHOD poly1305_asn1_meth = {
- EVP_PKEY_POLY1305,
- EVP_PKEY_POLY1305,
- 0,
-
- "POLY1305",
- "OpenSSL POLY1305 method",
-
- 0, 0, poly1305_pkey_public_cmp, 0,
-
- 0, 0, 0,
-
- poly1305_size,
- 0, 0,
- 0, 0, 0, 0, 0, 0, 0,
-
- poly1305_key_free,
- poly1305_pkey_ctrl,
- NULL,
- NULL,
-
- NULL,
- NULL,
- NULL,
-
- NULL,
- NULL,
- NULL,
-
- poly1305_set_priv_key,
- NULL,
- poly1305_get_priv_key,
- NULL,
-};
+}
+
+static int poly1305_set_priv_key(EVP_PKEY *pkey, const unsigned char *priv,
+ size_t len)
+{
+ ASN1_OCTET_STRING *os;
+
+ if (pkey->pkey.ptr != NULL || len != POLY1305_KEY_SIZE)
+ return 0;
+
+ os = ASN1_OCTET_STRING_new();
+ if (os == NULL)
+ return 0;
+
+ if (!ASN1_OCTET_STRING_set(os, priv, len)) {
+ ASN1_OCTET_STRING_free(os);
+ return 0;
+ }
+
+ pkey->pkey.ptr = os;
+ return 1;
+}
+
+static int poly1305_get_priv_key(const EVP_PKEY *pkey, unsigned char *priv,
+ size_t *len)
+{
+ ASN1_OCTET_STRING *os = (ASN1_OCTET_STRING *)pkey->pkey.ptr;
+
+ if (priv == NULL) {
+ *len = POLY1305_KEY_SIZE;
+ return 1;
+ }
+
+ if (os == NULL || *len < POLY1305_KEY_SIZE)
+ return 0;
+
+ memcpy(priv, ASN1_STRING_get0_data(os), ASN1_STRING_length(os));
+ *len = POLY1305_KEY_SIZE;
+
+ return 1;
+}
+
+const EVP_PKEY_ASN1_METHOD poly1305_asn1_meth = {
+ EVP_PKEY_POLY1305,
+ EVP_PKEY_POLY1305,
+ 0,
+
+ "POLY1305",
+ "OpenSSL POLY1305 method",
+
+ 0, 0, poly1305_pkey_public_cmp, 0,
+
+ 0, 0, 0,
+
+ poly1305_size,
+ 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+
+ poly1305_key_free,
+ poly1305_pkey_ctrl,
+ NULL,
+ NULL,
+
+ NULL,
+ NULL,
+ NULL,
+
+ NULL,
+ NULL,
+ NULL,
+
+ poly1305_set_priv_key,
+ NULL,
+ poly1305_get_priv_key,
+ NULL,
+};
diff --git a/contrib/libs/openssl/crypto/poly1305/poly1305_base2_44.c b/contrib/libs/openssl/crypto/poly1305/poly1305_base2_44.c
index b6313d01ba..eda7ca0279 100644
--- a/contrib/libs/openssl/crypto/poly1305/poly1305_base2_44.c
+++ b/contrib/libs/openssl/crypto/poly1305/poly1305_base2_44.c
@@ -1,171 +1,171 @@
-/*
- * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the OpenSSL license (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
-
-/*
- * This module is meant to be used as template for base 2^44 assembly
- * implementation[s]. On side note compiler-generated code is not
- * slower than compiler-generated base 2^64 code on [high-end] x86_64,
- * even though amount of multiplications is 50% higher. Go figure...
- */
-#include <stdlib.h>
-
-typedef unsigned char u8;
-typedef unsigned int u32;
-typedef unsigned long u64;
-typedef unsigned __int128 u128;
-
-typedef struct {
- u64 h[3];
- u64 s[2];
- u64 r[3];
-} poly1305_internal;
-
-#define POLY1305_BLOCK_SIZE 16
-
-/* pick 64-bit unsigned integer in little endian order */
-static u64 U8TOU64(const unsigned char *p)
-{
- return (((u64)(p[0] & 0xff)) |
- ((u64)(p[1] & 0xff) << 8) |
- ((u64)(p[2] & 0xff) << 16) |
- ((u64)(p[3] & 0xff) << 24) |
- ((u64)(p[4] & 0xff) << 32) |
- ((u64)(p[5] & 0xff) << 40) |
- ((u64)(p[6] & 0xff) << 48) |
- ((u64)(p[7] & 0xff) << 56));
-}
-
-/* store a 64-bit unsigned integer in little endian */
-static void U64TO8(unsigned char *p, u64 v)
-{
- p[0] = (unsigned char)((v) & 0xff);
- p[1] = (unsigned char)((v >> 8) & 0xff);
- p[2] = (unsigned char)((v >> 16) & 0xff);
- p[3] = (unsigned char)((v >> 24) & 0xff);
- p[4] = (unsigned char)((v >> 32) & 0xff);
- p[5] = (unsigned char)((v >> 40) & 0xff);
- p[6] = (unsigned char)((v >> 48) & 0xff);
- p[7] = (unsigned char)((v >> 56) & 0xff);
-}
-
-int poly1305_init(void *ctx, const unsigned char key[16])
-{
- poly1305_internal *st = (poly1305_internal *)ctx;
- u64 r0, r1;
-
- /* h = 0 */
- st->h[0] = 0;
- st->h[1] = 0;
- st->h[2] = 0;
-
- r0 = U8TOU64(&key[0]) & 0x0ffffffc0fffffff;
- r1 = U8TOU64(&key[8]) & 0x0ffffffc0ffffffc;
-
- /* break r1:r0 to three 44-bit digits, masks are 1<<44-1 */
- st->r[0] = r0 & 0x0fffffffffff;
- st->r[1] = ((r0 >> 44) | (r1 << 20)) & 0x0fffffffffff;
- st->r[2] = (r1 >> 24);
-
- st->s[0] = (st->r[1] + (st->r[1] << 2)) << 2;
- st->s[1] = (st->r[2] + (st->r[2] << 2)) << 2;
-
- return 0;
-}
-
-void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
- u32 padbit)
-{
- poly1305_internal *st = (poly1305_internal *)ctx;
- u64 r0, r1, r2;
- u64 s1, s2;
- u64 h0, h1, h2, c;
- u128 d0, d1, d2;
- u64 pad = (u64)padbit << 40;
-
- r0 = st->r[0];
- r1 = st->r[1];
- r2 = st->r[2];
-
- s1 = st->s[0];
- s2 = st->s[1];
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
-
- while (len >= POLY1305_BLOCK_SIZE) {
- u64 m0, m1;
-
- m0 = U8TOU64(inp + 0);
- m1 = U8TOU64(inp + 8);
-
- /* h += m[i], m[i] is broken to 44-bit digits */
- h0 += m0 & 0x0fffffffffff;
- h1 += ((m0 >> 44) | (m1 << 20)) & 0x0fffffffffff;
- h2 += (m1 >> 24) + pad;
-
- /* h *= r "%" p, where "%" stands for "partial remainder" */
- d0 = ((u128)h0 * r0) + ((u128)h1 * s2) + ((u128)h2 * s1);
- d1 = ((u128)h0 * r1) + ((u128)h1 * r0) + ((u128)h2 * s2);
- d2 = ((u128)h0 * r2) + ((u128)h1 * r1) + ((u128)h2 * r0);
-
- /* "lazy" reduction step */
- h0 = (u64)d0 & 0x0fffffffffff;
- h1 = (u64)(d1 += (u64)(d0 >> 44)) & 0x0fffffffffff;
- h2 = (u64)(d2 += (u64)(d1 >> 44)) & 0x03ffffffffff; /* last 42 bits */
-
- c = (d2 >> 42);
- h0 += c + (c << 2);
-
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- }
-
- st->h[0] = h0;
- st->h[1] = h1;
- st->h[2] = h2;
-}
-
-void poly1305_emit(void *ctx, unsigned char mac[16], const u32 nonce[4])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
- u64 h0, h1, h2;
- u64 g0, g1, g2;
- u128 t;
- u64 mask;
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
-
- /* after "lazy" reduction, convert 44+bit digits to 64-bit ones */
- h0 = (u64)(t = (u128)h0 + (h1 << 44)); h1 >>= 20;
- h1 = (u64)(t = (u128)h1 + (h2 << 24) + (t >> 64)); h2 >>= 40;
- h2 += (u64)(t >> 64);
-
- /* compare to modulus by computing h + -p */
- g0 = (u64)(t = (u128)h0 + 5);
- g1 = (u64)(t = (u128)h1 + (t >> 64));
- g2 = h2 + (u64)(t >> 64);
-
- /* if there was carry into 131st bit, h1:h0 = g1:g0 */
- mask = 0 - (g2 >> 2);
- g0 &= mask;
- g1 &= mask;
- mask = ~mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
-
- /* mac = (h + nonce) % (2^128) */
- h0 = (u64)(t = (u128)h0 + nonce[0] + ((u64)nonce[1]<<32));
- h1 = (u64)(t = (u128)h1 + nonce[2] + ((u64)nonce[3]<<32) + (t >> 64));
-
- U64TO8(mac + 0, h0);
- U64TO8(mac + 8, h1);
-}
+/*
+ * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*
+ * This module is meant to be used as template for base 2^44 assembly
+ * implementation[s]. On side note compiler-generated code is not
+ * slower than compiler-generated base 2^64 code on [high-end] x86_64,
+ * even though amount of multiplications is 50% higher. Go figure...
+ */
+#include <stdlib.h>
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+typedef unsigned long u64;
+typedef unsigned __int128 u128;
+
+typedef struct {
+ u64 h[3];
+ u64 s[2];
+ u64 r[3];
+} poly1305_internal;
+
+#define POLY1305_BLOCK_SIZE 16
+
+/* pick 64-bit unsigned integer in little endian order */
+static u64 U8TOU64(const unsigned char *p)
+{
+ return (((u64)(p[0] & 0xff)) |
+ ((u64)(p[1] & 0xff) << 8) |
+ ((u64)(p[2] & 0xff) << 16) |
+ ((u64)(p[3] & 0xff) << 24) |
+ ((u64)(p[4] & 0xff) << 32) |
+ ((u64)(p[5] & 0xff) << 40) |
+ ((u64)(p[6] & 0xff) << 48) |
+ ((u64)(p[7] & 0xff) << 56));
+}
+
+/* store a 64-bit unsigned integer in little endian */
+static void U64TO8(unsigned char *p, u64 v)
+{
+ p[0] = (unsigned char)((v) & 0xff);
+ p[1] = (unsigned char)((v >> 8) & 0xff);
+ p[2] = (unsigned char)((v >> 16) & 0xff);
+ p[3] = (unsigned char)((v >> 24) & 0xff);
+ p[4] = (unsigned char)((v >> 32) & 0xff);
+ p[5] = (unsigned char)((v >> 40) & 0xff);
+ p[6] = (unsigned char)((v >> 48) & 0xff);
+ p[7] = (unsigned char)((v >> 56) & 0xff);
+}
+
+int poly1305_init(void *ctx, const unsigned char key[16])
+{
+ poly1305_internal *st = (poly1305_internal *)ctx;
+ u64 r0, r1;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+
+ r0 = U8TOU64(&key[0]) & 0x0ffffffc0fffffff;
+ r1 = U8TOU64(&key[8]) & 0x0ffffffc0ffffffc;
+
+ /* break r1:r0 to three 44-bit digits, masks are 1<<44-1 */
+ st->r[0] = r0 & 0x0fffffffffff;
+ st->r[1] = ((r0 >> 44) | (r1 << 20)) & 0x0fffffffffff;
+ st->r[2] = (r1 >> 24);
+
+ st->s[0] = (st->r[1] + (st->r[1] << 2)) << 2;
+ st->s[1] = (st->r[2] + (st->r[2] << 2)) << 2;
+
+ return 0;
+}
+
+void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
+ u32 padbit)
+{
+ poly1305_internal *st = (poly1305_internal *)ctx;
+ u64 r0, r1, r2;
+ u64 s1, s2;
+ u64 h0, h1, h2, c;
+ u128 d0, d1, d2;
+ u64 pad = (u64)padbit << 40;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+
+ s1 = st->s[0];
+ s2 = st->s[1];
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ while (len >= POLY1305_BLOCK_SIZE) {
+ u64 m0, m1;
+
+ m0 = U8TOU64(inp + 0);
+ m1 = U8TOU64(inp + 8);
+
+ /* h += m[i], m[i] is broken to 44-bit digits */
+ h0 += m0 & 0x0fffffffffff;
+ h1 += ((m0 >> 44) | (m1 << 20)) & 0x0fffffffffff;
+ h2 += (m1 >> 24) + pad;
+
+ /* h *= r "%" p, where "%" stands for "partial remainder" */
+ d0 = ((u128)h0 * r0) + ((u128)h1 * s2) + ((u128)h2 * s1);
+ d1 = ((u128)h0 * r1) + ((u128)h1 * r0) + ((u128)h2 * s2);
+ d2 = ((u128)h0 * r2) + ((u128)h1 * r1) + ((u128)h2 * r0);
+
+ /* "lazy" reduction step */
+ h0 = (u64)d0 & 0x0fffffffffff;
+ h1 = (u64)(d1 += (u64)(d0 >> 44)) & 0x0fffffffffff;
+ h2 = (u64)(d2 += (u64)(d1 >> 44)) & 0x03ffffffffff; /* last 42 bits */
+
+ c = (d2 >> 42);
+ h0 += c + (c << 2);
+
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+}
+
+void poly1305_emit(void *ctx, unsigned char mac[16], const u32 nonce[4])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+ u64 h0, h1, h2;
+ u64 g0, g1, g2;
+ u128 t;
+ u64 mask;
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ /* after "lazy" reduction, convert 44+bit digits to 64-bit ones */
+ h0 = (u64)(t = (u128)h0 + (h1 << 44)); h1 >>= 20;
+ h1 = (u64)(t = (u128)h1 + (h2 << 24) + (t >> 64)); h2 >>= 40;
+ h2 += (u64)(t >> 64);
+
+ /* compare to modulus by computing h + -p */
+ g0 = (u64)(t = (u128)h0 + 5);
+ g1 = (u64)(t = (u128)h1 + (t >> 64));
+ g2 = h2 + (u64)(t >> 64);
+
+ /* if there was carry into 131st bit, h1:h0 = g1:g0 */
+ mask = 0 - (g2 >> 2);
+ g0 &= mask;
+ g1 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+
+ /* mac = (h + nonce) % (2^128) */
+ h0 = (u64)(t = (u128)h0 + nonce[0] + ((u64)nonce[1]<<32));
+ h1 = (u64)(t = (u128)h1 + nonce[2] + ((u64)nonce[3]<<32) + (t >> 64));
+
+ U64TO8(mac + 0, h0);
+ U64TO8(mac + 8, h1);
+}
diff --git a/contrib/libs/openssl/crypto/poly1305/poly1305_ieee754.c b/contrib/libs/openssl/crypto/poly1305/poly1305_ieee754.c
index 7cfd968645..87923cfe33 100644
--- a/contrib/libs/openssl/crypto/poly1305/poly1305_ieee754.c
+++ b/contrib/libs/openssl/crypto/poly1305/poly1305_ieee754.c
@@ -1,488 +1,488 @@
-/*
- * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the OpenSSL license (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
-
-/*
- * This module is meant to be used as template for non-x87 floating-
- * point assembly modules. The template itself is x86_64-specific
- * though, as it was debugged on x86_64. So that implementor would
- * have to recognize platform-specific parts, UxTOy and inline asm,
- * and act accordingly.
- *
- * Huh? x86_64-specific code as template for non-x87? Note seven, which
- * is not a typo, but reference to 80-bit precision. This module on the
- * other hand relies on 64-bit precision operations, which are default
- * for x86_64 code. And since we are at it, just for sense of it,
- * large-block performance in cycles per processed byte for *this* code
- * is:
- * gcc-4.8 icc-15.0 clang-3.4(*)
- *
- * Westmere 4.96 5.09 4.37
- * Sandy Bridge 4.95 4.90 4.17
- * Haswell 4.92 4.87 3.78
- * Bulldozer 4.67 4.49 4.68
- * VIA Nano 7.07 7.05 5.98
- * Silvermont 10.6 9.61 12.6
- *
- * (*) clang managed to discover parallelism and deployed SIMD;
- *
- * And for range of other platforms with unspecified gcc versions:
- *
- * Freescale e300 12.5
- * PPC74x0 10.8
- * POWER6 4.92
- * POWER7 4.50
- * POWER8 4.10
- *
- * z10 11.2
- * z196+ 7.30
- *
- * UltraSPARC III 16.0
- * SPARC T4 16.1
- */
-
-#if !(defined(__GNUC__) && __GNUC__>=2)
-# error "this is gcc-specific template"
-#endif
-
-#include <stdlib.h>
-
-typedef unsigned char u8;
-typedef unsigned int u32;
-typedef unsigned long long u64;
-typedef union { double d; u64 u; } elem64;
-
-#define TWO(p) ((double)(1ULL<<(p)))
-#define TWO0 TWO(0)
-#define TWO32 TWO(32)
-#define TWO64 (TWO32*TWO(32))
-#define TWO96 (TWO64*TWO(32))
-#define TWO130 (TWO96*TWO(34))
-
-#define EXP(p) ((1023ULL+(p))<<52)
-
-#if defined(__x86_64__) || (defined(__PPC__) && defined(__LITTLE_ENDIAN__))
-# define U8TOU32(p) (*(const u32 *)(p))
-# define U32TO8(p,v) (*(u32 *)(p) = (v))
-#elif defined(__PPC__)
-# define U8TOU32(p) ({u32 ret; asm ("lwbrx %0,0,%1":"=r"(ret):"b"(p)); ret; })
-# define U32TO8(p,v) asm ("stwbrx %0,0,%1"::"r"(v),"b"(p):"memory")
-#elif defined(__s390x__)
-# define U8TOU32(p) ({u32 ret; asm ("lrv %0,%1":"=d"(ret):"m"(*(u32 *)(p))); ret; })
-# define U32TO8(p,v) asm ("strv %1,%0":"=m"(*(u32 *)(p)):"d"(v))
-#endif
-
-#ifndef U8TOU32
-# define U8TOU32(p) ((u32)(p)[0] | (u32)(p)[1]<<8 | \
- (u32)(p)[2]<<16 | (u32)(p)[3]<<24 )
-#endif
-#ifndef U32TO8
-# define U32TO8(p,v) ((p)[0] = (u8)(v), (p)[1] = (u8)((v)>>8), \
- (p)[2] = (u8)((v)>>16), (p)[3] = (u8)((v)>>24) )
-#endif
-
-typedef struct {
- elem64 h[4];
- double r[8];
- double s[6];
-} poly1305_internal;
-
-/* "round toward zero (truncate), mask all exceptions" */
-#if defined(__x86_64__)
-static const u32 mxcsr = 0x7f80;
-#elif defined(__PPC__)
-static const u64 one = 1;
-#elif defined(__s390x__)
-static const u32 fpc = 1;
-#elif defined(__sparc__)
-static const u64 fsr = 1ULL<<30;
-#elif defined(__mips__)
-static const u32 fcsr = 1;
-#else
-#error "unrecognized platform"
-#endif
-
-int poly1305_init(void *ctx, const unsigned char key[16])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
- elem64 r0, r1, r2, r3;
-
- /* h = 0, biased */
-#if 0
- st->h[0].d = TWO(52)*TWO0;
- st->h[1].d = TWO(52)*TWO32;
- st->h[2].d = TWO(52)*TWO64;
- st->h[3].d = TWO(52)*TWO96;
-#else
- st->h[0].u = EXP(52+0);
- st->h[1].u = EXP(52+32);
- st->h[2].u = EXP(52+64);
- st->h[3].u = EXP(52+96);
-#endif
-
- if (key) {
- /*
- * set "truncate" rounding mode
- */
-#if defined(__x86_64__)
- u32 mxcsr_orig;
-
- asm volatile ("stmxcsr %0":"=m"(mxcsr_orig));
- asm volatile ("ldmxcsr %0"::"m"(mxcsr));
-#elif defined(__PPC__)
- double fpscr_orig, fpscr = *(double *)&one;
-
- asm volatile ("mffs %0":"=f"(fpscr_orig));
- asm volatile ("mtfsf 255,%0"::"f"(fpscr));
-#elif defined(__s390x__)
- u32 fpc_orig;
-
- asm volatile ("stfpc %0":"=m"(fpc_orig));
- asm volatile ("lfpc %0"::"m"(fpc));
-#elif defined(__sparc__)
- u64 fsr_orig;
-
- asm volatile ("stx %%fsr,%0":"=m"(fsr_orig));
- asm volatile ("ldx %0,%%fsr"::"m"(fsr));
-#elif defined(__mips__)
- u32 fcsr_orig;
-
- asm volatile ("cfc1 %0,$31":"=r"(fcsr_orig));
- asm volatile ("ctc1 %0,$31"::"r"(fcsr));
-#endif
-
- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- r0.u = EXP(52+0) | (U8TOU32(&key[0]) & 0x0fffffff);
- r1.u = EXP(52+32) | (U8TOU32(&key[4]) & 0x0ffffffc);
- r2.u = EXP(52+64) | (U8TOU32(&key[8]) & 0x0ffffffc);
- r3.u = EXP(52+96) | (U8TOU32(&key[12]) & 0x0ffffffc);
-
- st->r[0] = r0.d - TWO(52)*TWO0;
- st->r[2] = r1.d - TWO(52)*TWO32;
- st->r[4] = r2.d - TWO(52)*TWO64;
- st->r[6] = r3.d - TWO(52)*TWO96;
-
- st->s[0] = st->r[2] * (5.0/TWO130);
- st->s[2] = st->r[4] * (5.0/TWO130);
- st->s[4] = st->r[6] * (5.0/TWO130);
-
- /*
- * base 2^32 -> base 2^16
- */
- st->r[1] = (st->r[0] + TWO(52)*TWO(16)*TWO0) -
- TWO(52)*TWO(16)*TWO0;
- st->r[0] -= st->r[1];
-
- st->r[3] = (st->r[2] + TWO(52)*TWO(16)*TWO32) -
- TWO(52)*TWO(16)*TWO32;
- st->r[2] -= st->r[3];
-
- st->r[5] = (st->r[4] + TWO(52)*TWO(16)*TWO64) -
- TWO(52)*TWO(16)*TWO64;
- st->r[4] -= st->r[5];
-
- st->r[7] = (st->r[6] + TWO(52)*TWO(16)*TWO96) -
- TWO(52)*TWO(16)*TWO96;
- st->r[6] -= st->r[7];
-
- st->s[1] = (st->s[0] + TWO(52)*TWO(16)*TWO0/TWO96) -
- TWO(52)*TWO(16)*TWO0/TWO96;
- st->s[0] -= st->s[1];
-
- st->s[3] = (st->s[2] + TWO(52)*TWO(16)*TWO32/TWO96) -
- TWO(52)*TWO(16)*TWO32/TWO96;
- st->s[2] -= st->s[3];
-
- st->s[5] = (st->s[4] + TWO(52)*TWO(16)*TWO64/TWO96) -
- TWO(52)*TWO(16)*TWO64/TWO96;
- st->s[4] -= st->s[5];
-
- /*
- * restore original FPU control register
- */
-#if defined(__x86_64__)
- asm volatile ("ldmxcsr %0"::"m"(mxcsr_orig));
-#elif defined(__PPC__)
- asm volatile ("mtfsf 255,%0"::"f"(fpscr_orig));
-#elif defined(__s390x__)
- asm volatile ("lfpc %0"::"m"(fpc_orig));
-#elif defined(__sparc__)
- asm volatile ("ldx %0,%%fsr"::"m"(fsr_orig));
-#elif defined(__mips__)
- asm volatile ("ctc1 %0,$31"::"r"(fcsr_orig));
-#endif
- }
-
- return 0;
-}
-
-void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
- int padbit)
-{
- poly1305_internal *st = (poly1305_internal *)ctx;
- elem64 in0, in1, in2, in3;
- u64 pad = (u64)padbit<<32;
-
- double x0, x1, x2, x3;
- double h0lo, h0hi, h1lo, h1hi, h2lo, h2hi, h3lo, h3hi;
- double c0lo, c0hi, c1lo, c1hi, c2lo, c2hi, c3lo, c3hi;
-
- const double r0lo = st->r[0];
- const double r0hi = st->r[1];
- const double r1lo = st->r[2];
- const double r1hi = st->r[3];
- const double r2lo = st->r[4];
- const double r2hi = st->r[5];
- const double r3lo = st->r[6];
- const double r3hi = st->r[7];
-
- const double s1lo = st->s[0];
- const double s1hi = st->s[1];
- const double s2lo = st->s[2];
- const double s2hi = st->s[3];
- const double s3lo = st->s[4];
- const double s3hi = st->s[5];
-
- /*
- * set "truncate" rounding mode
- */
-#if defined(__x86_64__)
- u32 mxcsr_orig;
-
- asm volatile ("stmxcsr %0":"=m"(mxcsr_orig));
- asm volatile ("ldmxcsr %0"::"m"(mxcsr));
-#elif defined(__PPC__)
- double fpscr_orig, fpscr = *(double *)&one;
-
- asm volatile ("mffs %0":"=f"(fpscr_orig));
- asm volatile ("mtfsf 255,%0"::"f"(fpscr));
-#elif defined(__s390x__)
- u32 fpc_orig;
-
- asm volatile ("stfpc %0":"=m"(fpc_orig));
- asm volatile ("lfpc %0"::"m"(fpc));
-#elif defined(__sparc__)
- u64 fsr_orig;
-
- asm volatile ("stx %%fsr,%0":"=m"(fsr_orig));
- asm volatile ("ldx %0,%%fsr"::"m"(fsr));
-#elif defined(__mips__)
- u32 fcsr_orig;
-
- asm volatile ("cfc1 %0,$31":"=r"(fcsr_orig));
- asm volatile ("ctc1 %0,$31"::"r"(fcsr));
-#endif
-
- /*
- * load base 2^32 and de-bias
- */
- h0lo = st->h[0].d - TWO(52)*TWO0;
- h1lo = st->h[1].d - TWO(52)*TWO32;
- h2lo = st->h[2].d - TWO(52)*TWO64;
- h3lo = st->h[3].d - TWO(52)*TWO96;
-
-#ifdef __clang__
- h0hi = 0;
- h1hi = 0;
- h2hi = 0;
- h3hi = 0;
-#else
- in0.u = EXP(52+0) | U8TOU32(&inp[0]);
- in1.u = EXP(52+32) | U8TOU32(&inp[4]);
- in2.u = EXP(52+64) | U8TOU32(&inp[8]);
- in3.u = EXP(52+96) | U8TOU32(&inp[12]) | pad;
-
- x0 = in0.d - TWO(52)*TWO0;
- x1 = in1.d - TWO(52)*TWO32;
- x2 = in2.d - TWO(52)*TWO64;
- x3 = in3.d - TWO(52)*TWO96;
-
- x0 += h0lo;
- x1 += h1lo;
- x2 += h2lo;
- x3 += h3lo;
-
- goto fast_entry;
-#endif
-
- do {
- in0.u = EXP(52+0) | U8TOU32(&inp[0]);
- in1.u = EXP(52+32) | U8TOU32(&inp[4]);
- in2.u = EXP(52+64) | U8TOU32(&inp[8]);
- in3.u = EXP(52+96) | U8TOU32(&inp[12]) | pad;
-
- x0 = in0.d - TWO(52)*TWO0;
- x1 = in1.d - TWO(52)*TWO32;
- x2 = in2.d - TWO(52)*TWO64;
- x3 = in3.d - TWO(52)*TWO96;
-
- /*
- * note that there are multiple ways to accumulate input, e.g.
- * one can as well accumulate to h0lo-h1lo-h1hi-h2hi...
- */
- h0lo += x0;
- h0hi += x1;
- h2lo += x2;
- h2hi += x3;
-
- /*
- * carries that cross 32n-bit (and 130-bit) boundaries
- */
- c0lo = (h0lo + TWO(52)*TWO32) - TWO(52)*TWO32;
- c1lo = (h1lo + TWO(52)*TWO64) - TWO(52)*TWO64;
- c2lo = (h2lo + TWO(52)*TWO96) - TWO(52)*TWO96;
- c3lo = (h3lo + TWO(52)*TWO130) - TWO(52)*TWO130;
-
- c0hi = (h0hi + TWO(52)*TWO32) - TWO(52)*TWO32;
- c1hi = (h1hi + TWO(52)*TWO64) - TWO(52)*TWO64;
- c2hi = (h2hi + TWO(52)*TWO96) - TWO(52)*TWO96;
- c3hi = (h3hi + TWO(52)*TWO130) - TWO(52)*TWO130;
-
- /*
- * base 2^48 -> base 2^32 with last reduction step
- */
- x1 = (h1lo - c1lo) + c0lo;
- x2 = (h2lo - c2lo) + c1lo;
- x3 = (h3lo - c3lo) + c2lo;
- x0 = (h0lo - c0lo) + c3lo * (5.0/TWO130);
-
- x1 += (h1hi - c1hi) + c0hi;
- x2 += (h2hi - c2hi) + c1hi;
- x3 += (h3hi - c3hi) + c2hi;
- x0 += (h0hi - c0hi) + c3hi * (5.0/TWO130);
-
-#ifndef __clang__
- fast_entry:
-#endif
- /*
- * base 2^32 * base 2^16 = base 2^48
- */
- h0lo = s3lo * x1 + s2lo * x2 + s1lo * x3 + r0lo * x0;
- h1lo = r0lo * x1 + s3lo * x2 + s2lo * x3 + r1lo * x0;
- h2lo = r1lo * x1 + r0lo * x2 + s3lo * x3 + r2lo * x0;
- h3lo = r2lo * x1 + r1lo * x2 + r0lo * x3 + r3lo * x0;
-
- h0hi = s3hi * x1 + s2hi * x2 + s1hi * x3 + r0hi * x0;
- h1hi = r0hi * x1 + s3hi * x2 + s2hi * x3 + r1hi * x0;
- h2hi = r1hi * x1 + r0hi * x2 + s3hi * x3 + r2hi * x0;
- h3hi = r2hi * x1 + r1hi * x2 + r0hi * x3 + r3hi * x0;
-
- inp += 16;
- len -= 16;
-
- } while (len >= 16);
-
- /*
- * carries that cross 32n-bit (and 130-bit) boundaries
- */
- c0lo = (h0lo + TWO(52)*TWO32) - TWO(52)*TWO32;
- c1lo = (h1lo + TWO(52)*TWO64) - TWO(52)*TWO64;
- c2lo = (h2lo + TWO(52)*TWO96) - TWO(52)*TWO96;
- c3lo = (h3lo + TWO(52)*TWO130) - TWO(52)*TWO130;
-
- c0hi = (h0hi + TWO(52)*TWO32) - TWO(52)*TWO32;
- c1hi = (h1hi + TWO(52)*TWO64) - TWO(52)*TWO64;
- c2hi = (h2hi + TWO(52)*TWO96) - TWO(52)*TWO96;
- c3hi = (h3hi + TWO(52)*TWO130) - TWO(52)*TWO130;
-
- /*
- * base 2^48 -> base 2^32 with last reduction step
- */
- x1 = (h1lo - c1lo) + c0lo;
- x2 = (h2lo - c2lo) + c1lo;
- x3 = (h3lo - c3lo) + c2lo;
- x0 = (h0lo - c0lo) + c3lo * (5.0/TWO130);
-
- x1 += (h1hi - c1hi) + c0hi;
- x2 += (h2hi - c2hi) + c1hi;
- x3 += (h3hi - c3hi) + c2hi;
- x0 += (h0hi - c0hi) + c3hi * (5.0/TWO130);
-
- /*
- * store base 2^32, with bias
- */
- st->h[1].d = x1 + TWO(52)*TWO32;
- st->h[2].d = x2 + TWO(52)*TWO64;
- st->h[3].d = x3 + TWO(52)*TWO96;
- st->h[0].d = x0 + TWO(52)*TWO0;
-
- /*
- * restore original FPU control register
- */
-#if defined(__x86_64__)
- asm volatile ("ldmxcsr %0"::"m"(mxcsr_orig));
-#elif defined(__PPC__)
- asm volatile ("mtfsf 255,%0"::"f"(fpscr_orig));
-#elif defined(__s390x__)
- asm volatile ("lfpc %0"::"m"(fpc_orig));
-#elif defined(__sparc__)
- asm volatile ("ldx %0,%%fsr"::"m"(fsr_orig));
-#elif defined(__mips__)
- asm volatile ("ctc1 %0,$31"::"r"(fcsr_orig));
-#endif
-}
-
-void poly1305_emit(void *ctx, unsigned char mac[16], const u32 nonce[4])
-{
- poly1305_internal *st = (poly1305_internal *) ctx;
- u64 h0, h1, h2, h3, h4;
- u32 g0, g1, g2, g3, g4;
- u64 t;
- u32 mask;
-
- /*
- * thanks to bias masking exponent gives integer result
- */
- h0 = st->h[0].u & 0x000fffffffffffffULL;
- h1 = st->h[1].u & 0x000fffffffffffffULL;
- h2 = st->h[2].u & 0x000fffffffffffffULL;
- h3 = st->h[3].u & 0x000fffffffffffffULL;
-
- /*
- * can be partially reduced, so reduce...
- */
- h4 = h3>>32; h3 &= 0xffffffffU;
- g4 = h4&-4;
- h4 &= 3;
- g4 += g4>>2;
-
- h0 += g4;
- h1 += h0>>32; h0 &= 0xffffffffU;
- h2 += h1>>32; h1 &= 0xffffffffU;
- h3 += h2>>32; h2 &= 0xffffffffU;
-
- /* compute h + -p */
- g0 = (u32)(t = h0 + 5);
- g1 = (u32)(t = h1 + (t >> 32));
- g2 = (u32)(t = h2 + (t >> 32));
- g3 = (u32)(t = h3 + (t >> 32));
- g4 = h4 + (u32)(t >> 32);
-
- /* if there was carry, select g0-g3 */
- mask = 0 - (g4 >> 2);
- g0 &= mask;
- g1 &= mask;
- g2 &= mask;
- g3 &= mask;
- mask = ~mask;
- g0 |= (h0 & mask);
- g1 |= (h1 & mask);
- g2 |= (h2 & mask);
- g3 |= (h3 & mask);
-
- /* mac = (h + nonce) % (2^128) */
- g0 = (u32)(t = (u64)g0 + nonce[0]);
- g1 = (u32)(t = (u64)g1 + (t >> 32) + nonce[1]);
- g2 = (u32)(t = (u64)g2 + (t >> 32) + nonce[2]);
- g3 = (u32)(t = (u64)g3 + (t >> 32) + nonce[3]);
-
- U32TO8(mac + 0, g0);
- U32TO8(mac + 4, g1);
- U32TO8(mac + 8, g2);
- U32TO8(mac + 12, g3);
-}
+/*
+ * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*
+ * This module is meant to be used as template for non-x87 floating-
+ * point assembly modules. The template itself is x86_64-specific
+ * though, as it was debugged on x86_64. So that implementor would
+ * have to recognize platform-specific parts, UxTOy and inline asm,
+ * and act accordingly.
+ *
+ * Huh? x86_64-specific code as template for non-x87? Note seven, which
+ * is not a typo, but reference to 80-bit precision. This module on the
+ * other hand relies on 64-bit precision operations, which are default
+ * for x86_64 code. And since we are at it, just for sense of it,
+ * large-block performance in cycles per processed byte for *this* code
+ * is:
+ * gcc-4.8 icc-15.0 clang-3.4(*)
+ *
+ * Westmere 4.96 5.09 4.37
+ * Sandy Bridge 4.95 4.90 4.17
+ * Haswell 4.92 4.87 3.78
+ * Bulldozer 4.67 4.49 4.68
+ * VIA Nano 7.07 7.05 5.98
+ * Silvermont 10.6 9.61 12.6
+ *
+ * (*) clang managed to discover parallelism and deployed SIMD;
+ *
+ * And for range of other platforms with unspecified gcc versions:
+ *
+ * Freescale e300 12.5
+ * PPC74x0 10.8
+ * POWER6 4.92
+ * POWER7 4.50
+ * POWER8 4.10
+ *
+ * z10 11.2
+ * z196+ 7.30
+ *
+ * UltraSPARC III 16.0
+ * SPARC T4 16.1
+ */
+
+#if !(defined(__GNUC__) && __GNUC__>=2)
+# error "this is gcc-specific template"
+#endif
+
+#include <stdlib.h>
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef union { double d; u64 u; } elem64;
+
+#define TWO(p) ((double)(1ULL<<(p)))
+#define TWO0 TWO(0)
+#define TWO32 TWO(32)
+#define TWO64 (TWO32*TWO(32))
+#define TWO96 (TWO64*TWO(32))
+#define TWO130 (TWO96*TWO(34))
+
+#define EXP(p) ((1023ULL+(p))<<52)
+
+#if defined(__x86_64__) || (defined(__PPC__) && defined(__LITTLE_ENDIAN__))
+# define U8TOU32(p) (*(const u32 *)(p))
+# define U32TO8(p,v) (*(u32 *)(p) = (v))
+#elif defined(__PPC__)
+# define U8TOU32(p) ({u32 ret; asm ("lwbrx %0,0,%1":"=r"(ret):"b"(p)); ret; })
+# define U32TO8(p,v) asm ("stwbrx %0,0,%1"::"r"(v),"b"(p):"memory")
+#elif defined(__s390x__)
+# define U8TOU32(p) ({u32 ret; asm ("lrv %0,%1":"=d"(ret):"m"(*(u32 *)(p))); ret; })
+# define U32TO8(p,v) asm ("strv %1,%0":"=m"(*(u32 *)(p)):"d"(v))
+#endif
+
+#ifndef U8TOU32
+# define U8TOU32(p) ((u32)(p)[0] | (u32)(p)[1]<<8 | \
+ (u32)(p)[2]<<16 | (u32)(p)[3]<<24 )
+#endif
+#ifndef U32TO8
+# define U32TO8(p,v) ((p)[0] = (u8)(v), (p)[1] = (u8)((v)>>8), \
+ (p)[2] = (u8)((v)>>16), (p)[3] = (u8)((v)>>24) )
+#endif
+
+typedef struct {
+ elem64 h[4];
+ double r[8];
+ double s[6];
+} poly1305_internal;
+
+/* "round toward zero (truncate), mask all exceptions" */
+#if defined(__x86_64__)
+static const u32 mxcsr = 0x7f80;
+#elif defined(__PPC__)
+static const u64 one = 1;
+#elif defined(__s390x__)
+static const u32 fpc = 1;
+#elif defined(__sparc__)
+static const u64 fsr = 1ULL<<30;
+#elif defined(__mips__)
+static const u32 fcsr = 1;
+#else
+#error "unrecognized platform"
+#endif
+
+int poly1305_init(void *ctx, const unsigned char key[16])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+ elem64 r0, r1, r2, r3;
+
+ /* h = 0, biased */
+#if 0
+ st->h[0].d = TWO(52)*TWO0;
+ st->h[1].d = TWO(52)*TWO32;
+ st->h[2].d = TWO(52)*TWO64;
+ st->h[3].d = TWO(52)*TWO96;
+#else
+ st->h[0].u = EXP(52+0);
+ st->h[1].u = EXP(52+32);
+ st->h[2].u = EXP(52+64);
+ st->h[3].u = EXP(52+96);
+#endif
+
+ if (key) {
+ /*
+ * set "truncate" rounding mode
+ */
+#if defined(__x86_64__)
+ u32 mxcsr_orig;
+
+ asm volatile ("stmxcsr %0":"=m"(mxcsr_orig));
+ asm volatile ("ldmxcsr %0"::"m"(mxcsr));
+#elif defined(__PPC__)
+ double fpscr_orig, fpscr = *(double *)&one;
+
+ asm volatile ("mffs %0":"=f"(fpscr_orig));
+ asm volatile ("mtfsf 255,%0"::"f"(fpscr));
+#elif defined(__s390x__)
+ u32 fpc_orig;
+
+ asm volatile ("stfpc %0":"=m"(fpc_orig));
+ asm volatile ("lfpc %0"::"m"(fpc));
+#elif defined(__sparc__)
+ u64 fsr_orig;
+
+ asm volatile ("stx %%fsr,%0":"=m"(fsr_orig));
+ asm volatile ("ldx %0,%%fsr"::"m"(fsr));
+#elif defined(__mips__)
+ u32 fcsr_orig;
+
+ asm volatile ("cfc1 %0,$31":"=r"(fcsr_orig));
+ asm volatile ("ctc1 %0,$31"::"r"(fcsr));
+#endif
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ r0.u = EXP(52+0) | (U8TOU32(&key[0]) & 0x0fffffff);
+ r1.u = EXP(52+32) | (U8TOU32(&key[4]) & 0x0ffffffc);
+ r2.u = EXP(52+64) | (U8TOU32(&key[8]) & 0x0ffffffc);
+ r3.u = EXP(52+96) | (U8TOU32(&key[12]) & 0x0ffffffc);
+
+ st->r[0] = r0.d - TWO(52)*TWO0;
+ st->r[2] = r1.d - TWO(52)*TWO32;
+ st->r[4] = r2.d - TWO(52)*TWO64;
+ st->r[6] = r3.d - TWO(52)*TWO96;
+
+ st->s[0] = st->r[2] * (5.0/TWO130);
+ st->s[2] = st->r[4] * (5.0/TWO130);
+ st->s[4] = st->r[6] * (5.0/TWO130);
+
+ /*
+ * base 2^32 -> base 2^16
+ */
+ st->r[1] = (st->r[0] + TWO(52)*TWO(16)*TWO0) -
+ TWO(52)*TWO(16)*TWO0;
+ st->r[0] -= st->r[1];
+
+ st->r[3] = (st->r[2] + TWO(52)*TWO(16)*TWO32) -
+ TWO(52)*TWO(16)*TWO32;
+ st->r[2] -= st->r[3];
+
+ st->r[5] = (st->r[4] + TWO(52)*TWO(16)*TWO64) -
+ TWO(52)*TWO(16)*TWO64;
+ st->r[4] -= st->r[5];
+
+ st->r[7] = (st->r[6] + TWO(52)*TWO(16)*TWO96) -
+ TWO(52)*TWO(16)*TWO96;
+ st->r[6] -= st->r[7];
+
+ st->s[1] = (st->s[0] + TWO(52)*TWO(16)*TWO0/TWO96) -
+ TWO(52)*TWO(16)*TWO0/TWO96;
+ st->s[0] -= st->s[1];
+
+ st->s[3] = (st->s[2] + TWO(52)*TWO(16)*TWO32/TWO96) -
+ TWO(52)*TWO(16)*TWO32/TWO96;
+ st->s[2] -= st->s[3];
+
+ st->s[5] = (st->s[4] + TWO(52)*TWO(16)*TWO64/TWO96) -
+ TWO(52)*TWO(16)*TWO64/TWO96;
+ st->s[4] -= st->s[5];
+
+ /*
+ * restore original FPU control register
+ */
+#if defined(__x86_64__)
+ asm volatile ("ldmxcsr %0"::"m"(mxcsr_orig));
+#elif defined(__PPC__)
+ asm volatile ("mtfsf 255,%0"::"f"(fpscr_orig));
+#elif defined(__s390x__)
+ asm volatile ("lfpc %0"::"m"(fpc_orig));
+#elif defined(__sparc__)
+ asm volatile ("ldx %0,%%fsr"::"m"(fsr_orig));
+#elif defined(__mips__)
+ asm volatile ("ctc1 %0,$31"::"r"(fcsr_orig));
+#endif
+ }
+
+ return 0;
+}
+
+void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
+ int padbit)
+{
+ poly1305_internal *st = (poly1305_internal *)ctx;
+ elem64 in0, in1, in2, in3;
+ u64 pad = (u64)padbit<<32;
+
+ double x0, x1, x2, x3;
+ double h0lo, h0hi, h1lo, h1hi, h2lo, h2hi, h3lo, h3hi;
+ double c0lo, c0hi, c1lo, c1hi, c2lo, c2hi, c3lo, c3hi;
+
+ const double r0lo = st->r[0];
+ const double r0hi = st->r[1];
+ const double r1lo = st->r[2];
+ const double r1hi = st->r[3];
+ const double r2lo = st->r[4];
+ const double r2hi = st->r[5];
+ const double r3lo = st->r[6];
+ const double r3hi = st->r[7];
+
+ const double s1lo = st->s[0];
+ const double s1hi = st->s[1];
+ const double s2lo = st->s[2];
+ const double s2hi = st->s[3];
+ const double s3lo = st->s[4];
+ const double s3hi = st->s[5];
+
+ /*
+ * set "truncate" rounding mode
+ */
+#if defined(__x86_64__)
+ u32 mxcsr_orig;
+
+ asm volatile ("stmxcsr %0":"=m"(mxcsr_orig));
+ asm volatile ("ldmxcsr %0"::"m"(mxcsr));
+#elif defined(__PPC__)
+ double fpscr_orig, fpscr = *(double *)&one;
+
+ asm volatile ("mffs %0":"=f"(fpscr_orig));
+ asm volatile ("mtfsf 255,%0"::"f"(fpscr));
+#elif defined(__s390x__)
+ u32 fpc_orig;
+
+ asm volatile ("stfpc %0":"=m"(fpc_orig));
+ asm volatile ("lfpc %0"::"m"(fpc));
+#elif defined(__sparc__)
+ u64 fsr_orig;
+
+ asm volatile ("stx %%fsr,%0":"=m"(fsr_orig));
+ asm volatile ("ldx %0,%%fsr"::"m"(fsr));
+#elif defined(__mips__)
+ u32 fcsr_orig;
+
+ asm volatile ("cfc1 %0,$31":"=r"(fcsr_orig));
+ asm volatile ("ctc1 %0,$31"::"r"(fcsr));
+#endif
+
+ /*
+ * load base 2^32 and de-bias
+ */
+ h0lo = st->h[0].d - TWO(52)*TWO0;
+ h1lo = st->h[1].d - TWO(52)*TWO32;
+ h2lo = st->h[2].d - TWO(52)*TWO64;
+ h3lo = st->h[3].d - TWO(52)*TWO96;
+
+#ifdef __clang__
+ h0hi = 0;
+ h1hi = 0;
+ h2hi = 0;
+ h3hi = 0;
+#else
+ in0.u = EXP(52+0) | U8TOU32(&inp[0]);
+ in1.u = EXP(52+32) | U8TOU32(&inp[4]);
+ in2.u = EXP(52+64) | U8TOU32(&inp[8]);
+ in3.u = EXP(52+96) | U8TOU32(&inp[12]) | pad;
+
+ x0 = in0.d - TWO(52)*TWO0;
+ x1 = in1.d - TWO(52)*TWO32;
+ x2 = in2.d - TWO(52)*TWO64;
+ x3 = in3.d - TWO(52)*TWO96;
+
+ x0 += h0lo;
+ x1 += h1lo;
+ x2 += h2lo;
+ x3 += h3lo;
+
+ goto fast_entry;
+#endif
+
+ do {
+ in0.u = EXP(52+0) | U8TOU32(&inp[0]);
+ in1.u = EXP(52+32) | U8TOU32(&inp[4]);
+ in2.u = EXP(52+64) | U8TOU32(&inp[8]);
+ in3.u = EXP(52+96) | U8TOU32(&inp[12]) | pad;
+
+ x0 = in0.d - TWO(52)*TWO0;
+ x1 = in1.d - TWO(52)*TWO32;
+ x2 = in2.d - TWO(52)*TWO64;
+ x3 = in3.d - TWO(52)*TWO96;
+
+ /*
+ * note that there are multiple ways to accumulate input, e.g.
+ * one can as well accumulate to h0lo-h1lo-h1hi-h2hi...
+ */
+ h0lo += x0;
+ h0hi += x1;
+ h2lo += x2;
+ h2hi += x3;
+
+ /*
+ * carries that cross 32n-bit (and 130-bit) boundaries
+ */
+ c0lo = (h0lo + TWO(52)*TWO32) - TWO(52)*TWO32;
+ c1lo = (h1lo + TWO(52)*TWO64) - TWO(52)*TWO64;
+ c2lo = (h2lo + TWO(52)*TWO96) - TWO(52)*TWO96;
+ c3lo = (h3lo + TWO(52)*TWO130) - TWO(52)*TWO130;
+
+ c0hi = (h0hi + TWO(52)*TWO32) - TWO(52)*TWO32;
+ c1hi = (h1hi + TWO(52)*TWO64) - TWO(52)*TWO64;
+ c2hi = (h2hi + TWO(52)*TWO96) - TWO(52)*TWO96;
+ c3hi = (h3hi + TWO(52)*TWO130) - TWO(52)*TWO130;
+
+ /*
+ * base 2^48 -> base 2^32 with last reduction step
+ */
+ x1 = (h1lo - c1lo) + c0lo;
+ x2 = (h2lo - c2lo) + c1lo;
+ x3 = (h3lo - c3lo) + c2lo;
+ x0 = (h0lo - c0lo) + c3lo * (5.0/TWO130);
+
+ x1 += (h1hi - c1hi) + c0hi;
+ x2 += (h2hi - c2hi) + c1hi;
+ x3 += (h3hi - c3hi) + c2hi;
+ x0 += (h0hi - c0hi) + c3hi * (5.0/TWO130);
+
+#ifndef __clang__
+ fast_entry:
+#endif
+ /*
+ * base 2^32 * base 2^16 = base 2^48
+ */
+ h0lo = s3lo * x1 + s2lo * x2 + s1lo * x3 + r0lo * x0;
+ h1lo = r0lo * x1 + s3lo * x2 + s2lo * x3 + r1lo * x0;
+ h2lo = r1lo * x1 + r0lo * x2 + s3lo * x3 + r2lo * x0;
+ h3lo = r2lo * x1 + r1lo * x2 + r0lo * x3 + r3lo * x0;
+
+ h0hi = s3hi * x1 + s2hi * x2 + s1hi * x3 + r0hi * x0;
+ h1hi = r0hi * x1 + s3hi * x2 + s2hi * x3 + r1hi * x0;
+ h2hi = r1hi * x1 + r0hi * x2 + s3hi * x3 + r2hi * x0;
+ h3hi = r2hi * x1 + r1hi * x2 + r0hi * x3 + r3hi * x0;
+
+ inp += 16;
+ len -= 16;
+
+ } while (len >= 16);
+
+ /*
+ * carries that cross 32n-bit (and 130-bit) boundaries
+ */
+ c0lo = (h0lo + TWO(52)*TWO32) - TWO(52)*TWO32;
+ c1lo = (h1lo + TWO(52)*TWO64) - TWO(52)*TWO64;
+ c2lo = (h2lo + TWO(52)*TWO96) - TWO(52)*TWO96;
+ c3lo = (h3lo + TWO(52)*TWO130) - TWO(52)*TWO130;
+
+ c0hi = (h0hi + TWO(52)*TWO32) - TWO(52)*TWO32;
+ c1hi = (h1hi + TWO(52)*TWO64) - TWO(52)*TWO64;
+ c2hi = (h2hi + TWO(52)*TWO96) - TWO(52)*TWO96;
+ c3hi = (h3hi + TWO(52)*TWO130) - TWO(52)*TWO130;
+
+ /*
+ * base 2^48 -> base 2^32 with last reduction step
+ */
+ x1 = (h1lo - c1lo) + c0lo;
+ x2 = (h2lo - c2lo) + c1lo;
+ x3 = (h3lo - c3lo) + c2lo;
+ x0 = (h0lo - c0lo) + c3lo * (5.0/TWO130);
+
+ x1 += (h1hi - c1hi) + c0hi;
+ x2 += (h2hi - c2hi) + c1hi;
+ x3 += (h3hi - c3hi) + c2hi;
+ x0 += (h0hi - c0hi) + c3hi * (5.0/TWO130);
+
+ /*
+ * store base 2^32, with bias
+ */
+ st->h[1].d = x1 + TWO(52)*TWO32;
+ st->h[2].d = x2 + TWO(52)*TWO64;
+ st->h[3].d = x3 + TWO(52)*TWO96;
+ st->h[0].d = x0 + TWO(52)*TWO0;
+
+ /*
+ * restore original FPU control register
+ */
+#if defined(__x86_64__)
+ asm volatile ("ldmxcsr %0"::"m"(mxcsr_orig));
+#elif defined(__PPC__)
+ asm volatile ("mtfsf 255,%0"::"f"(fpscr_orig));
+#elif defined(__s390x__)
+ asm volatile ("lfpc %0"::"m"(fpc_orig));
+#elif defined(__sparc__)
+ asm volatile ("ldx %0,%%fsr"::"m"(fsr_orig));
+#elif defined(__mips__)
+ asm volatile ("ctc1 %0,$31"::"r"(fcsr_orig));
+#endif
+}
+
+void poly1305_emit(void *ctx, unsigned char mac[16], const u32 nonce[4])
+{
+ poly1305_internal *st = (poly1305_internal *) ctx;
+ u64 h0, h1, h2, h3, h4;
+ u32 g0, g1, g2, g3, g4;
+ u64 t;
+ u32 mask;
+
+ /*
+ * thanks to bias masking exponent gives integer result
+ */
+ h0 = st->h[0].u & 0x000fffffffffffffULL;
+ h1 = st->h[1].u & 0x000fffffffffffffULL;
+ h2 = st->h[2].u & 0x000fffffffffffffULL;
+ h3 = st->h[3].u & 0x000fffffffffffffULL;
+
+ /*
+ * can be partially reduced, so reduce...
+ */
+ h4 = h3>>32; h3 &= 0xffffffffU;
+ g4 = h4&-4;
+ h4 &= 3;
+ g4 += g4>>2;
+
+ h0 += g4;
+ h1 += h0>>32; h0 &= 0xffffffffU;
+ h2 += h1>>32; h1 &= 0xffffffffU;
+ h3 += h2>>32; h2 &= 0xffffffffU;
+
+ /* compute h + -p */
+ g0 = (u32)(t = h0 + 5);
+ g1 = (u32)(t = h1 + (t >> 32));
+ g2 = (u32)(t = h2 + (t >> 32));
+ g3 = (u32)(t = h3 + (t >> 32));
+ g4 = h4 + (u32)(t >> 32);
+
+ /* if there was carry, select g0-g3 */
+ mask = 0 - (g4 >> 2);
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ mask = ~mask;
+ g0 |= (h0 & mask);
+ g1 |= (h1 & mask);
+ g2 |= (h2 & mask);
+ g3 |= (h3 & mask);
+
+ /* mac = (h + nonce) % (2^128) */
+ g0 = (u32)(t = (u64)g0 + nonce[0]);
+ g1 = (u32)(t = (u64)g1 + (t >> 32) + nonce[1]);
+ g2 = (u32)(t = (u64)g2 + (t >> 32) + nonce[2]);
+ g3 = (u32)(t = (u64)g3 + (t >> 32) + nonce[3]);
+
+ U32TO8(mac + 0, g0);
+ U32TO8(mac + 4, g1);
+ U32TO8(mac + 8, g2);
+ U32TO8(mac + 12, g3);
+}
diff --git a/contrib/libs/openssl/crypto/poly1305/poly1305_local.h b/contrib/libs/openssl/crypto/poly1305/poly1305_local.h
index 6d4d9dc5b6..3a5cd149c1 100644
--- a/contrib/libs/openssl/crypto/poly1305/poly1305_local.h
+++ b/contrib/libs/openssl/crypto/poly1305/poly1305_local.h
@@ -1,27 +1,27 @@
-/*
- * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the OpenSSL license (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
-
-typedef void (*poly1305_blocks_f) (void *ctx, const unsigned char *inp,
- size_t len, unsigned int padbit);
-typedef void (*poly1305_emit_f) (void *ctx, unsigned char mac[16],
- const unsigned int nonce[4]);
-
-struct poly1305_context {
- double opaque[24]; /* large enough to hold internal state, declared
- * 'double' to ensure at least 64-bit invariant
- * alignment across all platforms and
- * configurations */
- unsigned int nonce[4];
- unsigned char data[POLY1305_BLOCK_SIZE];
- size_t num;
- struct {
- poly1305_blocks_f blocks;
- poly1305_emit_f emit;
- } func;
-};
+/*
+ * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+typedef void (*poly1305_blocks_f) (void *ctx, const unsigned char *inp,
+ size_t len, unsigned int padbit);
+typedef void (*poly1305_emit_f) (void *ctx, unsigned char mac[16],
+ const unsigned int nonce[4]);
+
+struct poly1305_context {
+ double opaque[24]; /* large enough to hold internal state, declared
+ * 'double' to ensure at least 64-bit invariant
+ * alignment across all platforms and
+ * configurations */
+ unsigned int nonce[4];
+ unsigned char data[POLY1305_BLOCK_SIZE];
+ size_t num;
+ struct {
+ poly1305_blocks_f blocks;
+ poly1305_emit_f emit;
+ } func;
+};
diff --git a/contrib/libs/openssl/crypto/poly1305/poly1305_pmeth.c b/contrib/libs/openssl/crypto/poly1305/poly1305_pmeth.c
index 49a799a12f..3d7eeb62a4 100644
--- a/contrib/libs/openssl/crypto/poly1305/poly1305_pmeth.c
+++ b/contrib/libs/openssl/crypto/poly1305/poly1305_pmeth.c
@@ -1,194 +1,194 @@
-/*
- * Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the OpenSSL license (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
-
-#include <stdio.h>
-#include "internal/cryptlib.h"
-#include <openssl/x509.h>
-#include <openssl/x509v3.h>
-#include <openssl/evp.h>
-#include <openssl/err.h>
+/*
+ * Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <stdio.h>
+#include "internal/cryptlib.h"
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/evp.h>
+#include <openssl/err.h>
#include "crypto/poly1305.h"
-#include "poly1305_local.h"
+#include "poly1305_local.h"
#include "crypto/evp.h"
-
-/* POLY1305 pkey context structure */
-
-typedef struct {
- ASN1_OCTET_STRING ktmp; /* Temp storage for key */
- POLY1305 ctx;
-} POLY1305_PKEY_CTX;
-
-static int pkey_poly1305_init(EVP_PKEY_CTX *ctx)
-{
- POLY1305_PKEY_CTX *pctx;
-
- if ((pctx = OPENSSL_zalloc(sizeof(*pctx))) == NULL) {
- CRYPTOerr(CRYPTO_F_PKEY_POLY1305_INIT, ERR_R_MALLOC_FAILURE);
- return 0;
- }
- pctx->ktmp.type = V_ASN1_OCTET_STRING;
-
- EVP_PKEY_CTX_set_data(ctx, pctx);
- EVP_PKEY_CTX_set0_keygen_info(ctx, NULL, 0);
- return 1;
-}
-
-static void pkey_poly1305_cleanup(EVP_PKEY_CTX *ctx)
-{
- POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(ctx);
-
- if (pctx != NULL) {
- OPENSSL_clear_free(pctx->ktmp.data, pctx->ktmp.length);
- OPENSSL_clear_free(pctx, sizeof(*pctx));
- EVP_PKEY_CTX_set_data(ctx, NULL);
- }
-}
-
-static int pkey_poly1305_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src)
-{
- POLY1305_PKEY_CTX *sctx, *dctx;
-
- /* allocate memory for dst->data and a new POLY1305_CTX in dst->data->ctx */
- if (!pkey_poly1305_init(dst))
- return 0;
- sctx = EVP_PKEY_CTX_get_data(src);
- dctx = EVP_PKEY_CTX_get_data(dst);
- if (ASN1_STRING_get0_data(&sctx->ktmp) != NULL &&
- !ASN1_STRING_copy(&dctx->ktmp, &sctx->ktmp)) {
- /* cleanup and free the POLY1305_PKEY_CTX in dst->data */
- pkey_poly1305_cleanup(dst);
- return 0;
- }
- memcpy(&dctx->ctx, &sctx->ctx, sizeof(POLY1305));
- return 1;
-}
-
-static int pkey_poly1305_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey)
-{
- ASN1_OCTET_STRING *key;
- POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(ctx);
-
- if (ASN1_STRING_get0_data(&pctx->ktmp) == NULL)
- return 0;
- key = ASN1_OCTET_STRING_dup(&pctx->ktmp);
- if (key == NULL)
- return 0;
- return EVP_PKEY_assign_POLY1305(pkey, key);
-}
-
-static int int_update(EVP_MD_CTX *ctx, const void *data, size_t count)
-{
- POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(EVP_MD_CTX_pkey_ctx(ctx));
-
- Poly1305_Update(&pctx->ctx, data, count);
- return 1;
-}
-
-static int poly1305_signctx_init(EVP_PKEY_CTX *ctx, EVP_MD_CTX *mctx)
-{
- POLY1305_PKEY_CTX *pctx = ctx->data;
- ASN1_OCTET_STRING *key = (ASN1_OCTET_STRING *)ctx->pkey->pkey.ptr;
-
- if (key->length != POLY1305_KEY_SIZE)
- return 0;
- EVP_MD_CTX_set_flags(mctx, EVP_MD_CTX_FLAG_NO_INIT);
- EVP_MD_CTX_set_update_fn(mctx, int_update);
- Poly1305_Init(&pctx->ctx, key->data);
- return 1;
-}
-static int poly1305_signctx(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen,
- EVP_MD_CTX *mctx)
-{
- POLY1305_PKEY_CTX *pctx = ctx->data;
-
- *siglen = POLY1305_DIGEST_SIZE;
- if (sig != NULL)
- Poly1305_Final(&pctx->ctx, sig);
- return 1;
-}
-
-static int pkey_poly1305_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2)
-{
- POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(ctx);
- const unsigned char *key;
- size_t len;
-
- switch (type) {
-
- case EVP_PKEY_CTRL_MD:
- /* ignore */
- break;
-
- case EVP_PKEY_CTRL_SET_MAC_KEY:
- case EVP_PKEY_CTRL_DIGESTINIT:
- if (type == EVP_PKEY_CTRL_SET_MAC_KEY) {
- /* user explicitly setting the key */
- key = p2;
- len = p1;
- } else {
- /* user indirectly setting the key via EVP_DigestSignInit */
- key = EVP_PKEY_get0_poly1305(EVP_PKEY_CTX_get0_pkey(ctx), &len);
- }
- if (key == NULL || len != POLY1305_KEY_SIZE ||
- !ASN1_OCTET_STRING_set(&pctx->ktmp, key, len))
- return 0;
- Poly1305_Init(&pctx->ctx, ASN1_STRING_get0_data(&pctx->ktmp));
- break;
-
- default:
- return -2;
-
- }
- return 1;
-}
-
-static int pkey_poly1305_ctrl_str(EVP_PKEY_CTX *ctx,
- const char *type, const char *value)
-{
- if (value == NULL)
- return 0;
- if (strcmp(type, "key") == 0)
- return EVP_PKEY_CTX_str2ctrl(ctx, EVP_PKEY_CTRL_SET_MAC_KEY, value);
- if (strcmp(type, "hexkey") == 0)
- return EVP_PKEY_CTX_hex2ctrl(ctx, EVP_PKEY_CTRL_SET_MAC_KEY, value);
- return -2;
-}
-
-const EVP_PKEY_METHOD poly1305_pkey_meth = {
- EVP_PKEY_POLY1305,
- EVP_PKEY_FLAG_SIGCTX_CUSTOM, /* we don't deal with a separate MD */
- pkey_poly1305_init,
- pkey_poly1305_copy,
- pkey_poly1305_cleanup,
-
- 0, 0,
-
- 0,
- pkey_poly1305_keygen,
-
- 0, 0,
-
- 0, 0,
-
- 0, 0,
-
- poly1305_signctx_init,
- poly1305_signctx,
-
- 0, 0,
-
- 0, 0,
-
- 0, 0,
-
- 0, 0,
-
- pkey_poly1305_ctrl,
- pkey_poly1305_ctrl_str
-};
+
+/* POLY1305 pkey context structure */
+
+typedef struct {
+ ASN1_OCTET_STRING ktmp; /* Temp storage for key */
+ POLY1305 ctx;
+} POLY1305_PKEY_CTX;
+
+static int pkey_poly1305_init(EVP_PKEY_CTX *ctx)
+{
+ POLY1305_PKEY_CTX *pctx;
+
+ if ((pctx = OPENSSL_zalloc(sizeof(*pctx))) == NULL) {
+ CRYPTOerr(CRYPTO_F_PKEY_POLY1305_INIT, ERR_R_MALLOC_FAILURE);
+ return 0;
+ }
+ pctx->ktmp.type = V_ASN1_OCTET_STRING;
+
+ EVP_PKEY_CTX_set_data(ctx, pctx);
+ EVP_PKEY_CTX_set0_keygen_info(ctx, NULL, 0);
+ return 1;
+}
+
+static void pkey_poly1305_cleanup(EVP_PKEY_CTX *ctx)
+{
+ POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(ctx);
+
+ if (pctx != NULL) {
+ OPENSSL_clear_free(pctx->ktmp.data, pctx->ktmp.length);
+ OPENSSL_clear_free(pctx, sizeof(*pctx));
+ EVP_PKEY_CTX_set_data(ctx, NULL);
+ }
+}
+
+static int pkey_poly1305_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src)
+{
+ POLY1305_PKEY_CTX *sctx, *dctx;
+
+ /* allocate memory for dst->data and a new POLY1305_CTX in dst->data->ctx */
+ if (!pkey_poly1305_init(dst))
+ return 0;
+ sctx = EVP_PKEY_CTX_get_data(src);
+ dctx = EVP_PKEY_CTX_get_data(dst);
+ if (ASN1_STRING_get0_data(&sctx->ktmp) != NULL &&
+ !ASN1_STRING_copy(&dctx->ktmp, &sctx->ktmp)) {
+ /* cleanup and free the POLY1305_PKEY_CTX in dst->data */
+ pkey_poly1305_cleanup(dst);
+ return 0;
+ }
+ memcpy(&dctx->ctx, &sctx->ctx, sizeof(POLY1305));
+ return 1;
+}
+
+static int pkey_poly1305_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey)
+{
+ ASN1_OCTET_STRING *key;
+ POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(ctx);
+
+ if (ASN1_STRING_get0_data(&pctx->ktmp) == NULL)
+ return 0;
+ key = ASN1_OCTET_STRING_dup(&pctx->ktmp);
+ if (key == NULL)
+ return 0;
+ return EVP_PKEY_assign_POLY1305(pkey, key);
+}
+
+static int int_update(EVP_MD_CTX *ctx, const void *data, size_t count)
+{
+ POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(EVP_MD_CTX_pkey_ctx(ctx));
+
+ Poly1305_Update(&pctx->ctx, data, count);
+ return 1;
+}
+
+static int poly1305_signctx_init(EVP_PKEY_CTX *ctx, EVP_MD_CTX *mctx)
+{
+ POLY1305_PKEY_CTX *pctx = ctx->data;
+ ASN1_OCTET_STRING *key = (ASN1_OCTET_STRING *)ctx->pkey->pkey.ptr;
+
+ if (key->length != POLY1305_KEY_SIZE)
+ return 0;
+ EVP_MD_CTX_set_flags(mctx, EVP_MD_CTX_FLAG_NO_INIT);
+ EVP_MD_CTX_set_update_fn(mctx, int_update);
+ Poly1305_Init(&pctx->ctx, key->data);
+ return 1;
+}
+static int poly1305_signctx(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen,
+ EVP_MD_CTX *mctx)
+{
+ POLY1305_PKEY_CTX *pctx = ctx->data;
+
+ *siglen = POLY1305_DIGEST_SIZE;
+ if (sig != NULL)
+ Poly1305_Final(&pctx->ctx, sig);
+ return 1;
+}
+
+static int pkey_poly1305_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2)
+{
+ POLY1305_PKEY_CTX *pctx = EVP_PKEY_CTX_get_data(ctx);
+ const unsigned char *key;
+ size_t len;
+
+ switch (type) {
+
+ case EVP_PKEY_CTRL_MD:
+ /* ignore */
+ break;
+
+ case EVP_PKEY_CTRL_SET_MAC_KEY:
+ case EVP_PKEY_CTRL_DIGESTINIT:
+ if (type == EVP_PKEY_CTRL_SET_MAC_KEY) {
+ /* user explicitly setting the key */
+ key = p2;
+ len = p1;
+ } else {
+ /* user indirectly setting the key via EVP_DigestSignInit */
+ key = EVP_PKEY_get0_poly1305(EVP_PKEY_CTX_get0_pkey(ctx), &len);
+ }
+ if (key == NULL || len != POLY1305_KEY_SIZE ||
+ !ASN1_OCTET_STRING_set(&pctx->ktmp, key, len))
+ return 0;
+ Poly1305_Init(&pctx->ctx, ASN1_STRING_get0_data(&pctx->ktmp));
+ break;
+
+ default:
+ return -2;
+
+ }
+ return 1;
+}
+
+static int pkey_poly1305_ctrl_str(EVP_PKEY_CTX *ctx,
+ const char *type, const char *value)
+{
+ if (value == NULL)
+ return 0;
+ if (strcmp(type, "key") == 0)
+ return EVP_PKEY_CTX_str2ctrl(ctx, EVP_PKEY_CTRL_SET_MAC_KEY, value);
+ if (strcmp(type, "hexkey") == 0)
+ return EVP_PKEY_CTX_hex2ctrl(ctx, EVP_PKEY_CTRL_SET_MAC_KEY, value);
+ return -2;
+}
+
+const EVP_PKEY_METHOD poly1305_pkey_meth = {
+ EVP_PKEY_POLY1305,
+ EVP_PKEY_FLAG_SIGCTX_CUSTOM, /* we don't deal with a separate MD */
+ pkey_poly1305_init,
+ pkey_poly1305_copy,
+ pkey_poly1305_cleanup,
+
+ 0, 0,
+
+ 0,
+ pkey_poly1305_keygen,
+
+ 0, 0,
+
+ 0, 0,
+
+ 0, 0,
+
+ poly1305_signctx_init,
+ poly1305_signctx,
+
+ 0, 0,
+
+ 0, 0,
+
+ 0, 0,
+
+ 0, 0,
+
+ pkey_poly1305_ctrl,
+ pkey_poly1305_ctrl_str
+};