aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws/s2n/pq-crypto/bike_r2/aes_ctr_prf.c
blob: 26c99bc80d13dacd96c79aa44ee18315062df5db (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * SPDX-License-Identifier: Apache-2.0"
 *
 * Written by Nir Drucker and Shay Gueron
 * AWS Cryptographic Algorithms Group.
 * (ndrucker@amazon.com, gueron@amazon.com)
 */

#include "aes_ctr_prf.h"
#include "utilities.h"
#include <string.h>

ret_t
init_aes_ctr_prf_state(OUT aes_ctr_prf_state_t *s,
                       IN const uint32_t        max_invokations,
                       IN const seed_t *seed)
{
  if(0 == max_invokations)
  {
    BIKE_ERROR(E_AES_CTR_PRF_INIT_FAIL);
  }

  // Set the key schedule (from seed).
  // Make sure the size matches the AES256 key size
  DEFER_CLEANUP(aes256_key_t key, aes256_key_cleanup);

  bike_static_assert(sizeof(*seed) == sizeof(key.raw), seed_size_equals_ky_size);
  memcpy(key.raw, seed->raw, sizeof(key.raw));

  GUARD(aes256_key_expansion(&s->ks_ptr, &key));

  // Initialize buffer and counter
  s->ctr.u.qw[0]    = 0;
  s->ctr.u.qw[1]    = 0;
  s->buffer.u.qw[0] = 0;
  s->buffer.u.qw[1] = 0;

  s->pos             = AES256_BLOCK_SIZE;
  s->rem_invokations = max_invokations;

  SEDMSG("    Init aes_prf_ctr state:\n");
  SEDMSG("      s.pos = %d\n", s->pos);
  SEDMSG("      s.rem_invokations = %u\n", s->rem_invokations);
  SEDMSG("      s.ctr = 0x\n");

  return SUCCESS;
}

_INLINE_ ret_t
perform_aes(OUT uint8_t *ct, IN OUT aes_ctr_prf_state_t *s)
{
  // Ensure that the CTR is big enough
  bike_static_assert(
      ((sizeof(s->ctr.u.qw[0]) == 8) && (BIT(33) >= MAX_AES_INVOKATION)),
      ctr_size_is_too_small);

  if(0 == s->rem_invokations)
  {
    BIKE_ERROR(E_AES_OVER_USED);
  }

  GUARD(aes256_enc(ct, s->ctr.u.bytes, &s->ks_ptr));

  s->ctr.u.qw[0]++;
  s->rem_invokations--;

  return SUCCESS;
}

ret_t
aes_ctr_prf(OUT uint8_t *a, IN OUT aes_ctr_prf_state_t *s, IN const uint32_t len)
{
  // When Len is smaller than whats left in the buffer
  // No need in additional AES
  if((len + s->pos) <= AES256_BLOCK_SIZE)
  {
    memcpy(a, &s->buffer.u.bytes[s->pos], len);
    s->pos += len;

    return SUCCESS;
  }

  // If s.pos != AES256_BLOCK_SIZE then copy whats left in the buffer
  // Else copy zero bytes
  uint32_t idx = AES256_BLOCK_SIZE - s->pos;
  memcpy(a, &s->buffer.u.bytes[s->pos], idx);

  // Init s.pos
  s->pos = 0;

  // Copy full AES blocks
  while((len - idx) >= AES256_BLOCK_SIZE)
  {
    GUARD(perform_aes(&a[idx], s));
    idx += AES256_BLOCK_SIZE;
  }

  GUARD(perform_aes(s->buffer.u.bytes, s));

  // Copy the tail
  s->pos = len - idx;
  memcpy(&a[idx], s->buffer.u.bytes, s->pos);

  return SUCCESS;
}