blob: 5ddcdd2ad835deb39af40c4d166d4775d59c0d94 [file] [log] [blame]
Denys Vlasenko83e5c622018-11-23 17:21:38 +01001/*
2 * Copyright (C) 2018 Denys Vlasenko
3 *
4 * Licensed under GPLv2, see file LICENSE in this source tree.
5 */
6
7#include "tls.h"
8
9typedef uint8_t byte;
10typedef uint32_t word32;
11#define XMEMSET memset
12#define XMEMCPY memcpy
13
Denys Vlasenkoecc90902018-11-23 18:31:26 +010014/* from wolfssl-3.15.3/wolfcrypt/src/aes.c */
Denys Vlasenko83e5c622018-11-23 17:21:38 +010015
Denys Vlasenkoe5897d02019-10-25 13:05:15 +020016#ifdef UNUSED
Denys Vlasenkoecc90902018-11-23 18:31:26 +010017static ALWAYS_INLINE void FlattenSzInBits(byte* buf, word32 sz)
Denys Vlasenko83e5c622018-11-23 17:21:38 +010018{
19 /* Multiply the sz by 8 */
Denys Vlasenkoecc90902018-11-23 18:31:26 +010020//bbox: these sizes are never even close to 2^32/8
21// word32 szHi = (sz >> (8*sizeof(sz) - 3));
Denys Vlasenko83e5c622018-11-23 17:21:38 +010022 sz <<= 3;
23
24 /* copy over the words of the sz into the destination buffer */
Denys Vlasenkoecc90902018-11-23 18:31:26 +010025// buf[0] = (szHi >> 24) & 0xff;
26// buf[1] = (szHi >> 16) & 0xff;
27// buf[2] = (szHi >> 8) & 0xff;
28// buf[3] = szHi & 0xff;
Denys Vlasenko25569c32018-11-23 18:55:15 +010029 *(uint32_t*)(buf + 0) = 0;
Denys Vlasenkoecc90902018-11-23 18:31:26 +010030// buf[4] = (sz >> 24) & 0xff;
31// buf[5] = (sz >> 16) & 0xff;
32// buf[6] = (sz >> 8) & 0xff;
33// buf[7] = sz & 0xff;
Denys Vlasenko25569c32018-11-23 18:55:15 +010034 *(uint32_t*)(buf + 4) = SWAP_BE32(sz);
Denys Vlasenko83e5c622018-11-23 17:21:38 +010035}
Denys Vlasenkoe5897d02019-10-25 13:05:15 +020036#endif
Denys Vlasenko83e5c622018-11-23 17:21:38 +010037
38static void RIGHTSHIFTX(byte* x)
39{
Denys Vlasenko9f00a0f2018-12-08 13:34:43 +010040#define l ((unsigned long*)x)
41#if 0
Denys Vlasenko83e5c622018-11-23 17:21:38 +010042
Denys Vlasenko9f00a0f2018-12-08 13:34:43 +010043 // Generic byte-at-a-time algorithm
44 int i;
45 byte carryIn = (x[15] & 0x01) ? 0xE1 : 0;
Denys Vlasenko83e5c622018-11-23 17:21:38 +010046 for (i = 0; i < AES_BLOCK_SIZE; i++) {
Denys Vlasenko9f00a0f2018-12-08 13:34:43 +010047 byte carryOut = (x[i] << 7); // zero, or 0x80
48 x[i] = (x[i] >> 1) ^ carryIn;
Denys Vlasenko83e5c622018-11-23 17:21:38 +010049 carryIn = carryOut;
50 }
Denys Vlasenko9f00a0f2018-12-08 13:34:43 +010051
52#elif BB_BIG_ENDIAN
53
54 // Big-endian can shift-right in larger than byte chunks
55 // (we use the fact that 'x' is long-aligned)
56 unsigned long carryIn = (x[15] & 0x01)
57 ? ((unsigned long)0xE1 << (LONG_BIT-8))
58 : 0;
59# if ULONG_MAX <= 0xffffffff
60 int i;
61 for (i = 0; i < AES_BLOCK_SIZE/sizeof(long); i++) {
62 unsigned long carryOut = l[i] << (LONG_BIT-1); // zero, or 0x800..00
63 l[i] = (l[i] >> 1) ^ carryIn;
64 carryIn = carryOut;
65 }
66# else
67 // 64-bit code: need to process only 2 words
68 unsigned long carryOut = l[0] << (LONG_BIT-1); // zero, or 0x800..00
69 l[0] = (l[0] >> 1) ^ carryIn;
70 l[1] = (l[1] >> 1) ^ carryOut;
71# endif
72
73#else /* LITTLE_ENDIAN */
74
75 // In order to use word-sized ops, little-endian needs to byteswap.
76 // On x86, code size increase is ~10 bytes compared to byte-by-byte.
77 unsigned long carryIn = (x[15] & 0x01)
78 ? ((unsigned long)0xE1 << (LONG_BIT-8))
79 : 0;
80# if ULONG_MAX <= 0xffffffff
81 int i;
82 for (i = 0; i < AES_BLOCK_SIZE/sizeof(long); i++) {
83 unsigned long ti = SWAP_BE32(l[i]);
84 unsigned long carryOut = ti << (LONG_BIT-1); // zero, or 0x800..00
85 ti = (ti >> 1) ^ carryIn;
86 l[i] = SWAP_BE32(ti);
87 carryIn = carryOut;
88 }
89# else
90 // 64-bit code: need to process only 2 words
91 unsigned long tt = SWAP_BE64(l[0]);
92 unsigned long carryOut = tt << (LONG_BIT-1); // zero, or 0x800..00
93 tt = (tt >> 1) ^ carryIn; l[0] = SWAP_BE64(tt);
94 tt = SWAP_BE64(l[1]);
95 tt = (tt >> 1) ^ carryOut; l[1] = SWAP_BE64(tt);
96# endif
97
98#endif /* LITTLE_ENDIAN */
99#undef l
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100100}
101
Denys Vlasenko32ec5f12018-12-08 21:24:38 +0100102// Caller guarantees X is aligned
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100103static void GMULT(byte* X, byte* Y)
104{
Denys Vlasenko03569bc2018-11-24 14:08:29 +0100105 byte Z[AES_BLOCK_SIZE] ALIGNED_long;
Denys Vlasenko32ec5f12018-12-08 21:24:38 +0100106 //byte V[AES_BLOCK_SIZE] ALIGNED_long;
107 int i;
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100108
109 XMEMSET(Z, 0, AES_BLOCK_SIZE);
Denys Vlasenko32ec5f12018-12-08 21:24:38 +0100110 //XMEMCPY(V, X, AES_BLOCK_SIZE);
111 for (i = 0; i < AES_BLOCK_SIZE; i++) {
112 uint32_t y = 0x800000 | Y[i];
113 for (;;) { // for every bit in Y[i], from msb to lsb
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100114 if (y & 0x80) {
Denys Vlasenko32ec5f12018-12-08 21:24:38 +0100115 xorbuf_aligned_AES_BLOCK_SIZE(Z, X); // was V, not X
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100116 }
Denys Vlasenko32ec5f12018-12-08 21:24:38 +0100117 RIGHTSHIFTX(X); // was V, not X
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100118 y = y << 1;
Denys Vlasenko32ec5f12018-12-08 21:24:38 +0100119 if ((int32_t)y < 0) // if bit 0x80000000 set = if 8 iterations done
120 break;
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100121 }
122 }
123 XMEMCPY(X, Z, AES_BLOCK_SIZE);
124}
125
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100126//bbox:
Denys Vlasenkod496b402018-11-23 19:00:12 +0100127// for TLS AES-GCM, a (which is AAD) is always 13 bytes long, and bbox code provides
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100128// extra 3 zeroed bytes, making it a[16], or a[AES_BLOCK_SIZE].
Denys Vlasenkod496b402018-11-23 19:00:12 +0100129// Resulting auth tag in s[] is also always AES_BLOCK_SIZE bytes.
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100130//
131// This allows some simplifications.
Denys Vlasenko985702c2018-11-24 13:47:44 +0100132#define aSz 13
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100133#define sSz AES_BLOCK_SIZE
134void FAST_FUNC aesgcm_GHASH(byte* h,
135 const byte* a, //unsigned aSz,
136 const byte* c, unsigned cSz,
137 byte* s //, unsigned sSz
138)
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100139{
Denys Vlasenko03569bc2018-11-24 14:08:29 +0100140 byte x[AES_BLOCK_SIZE] ALIGNED_long;
Denys Vlasenkobe5ca422018-11-25 14:03:59 +0100141// byte scratch[AES_BLOCK_SIZE] ALIGNED_long;
142 unsigned blocks, partial;
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100143 //was: byte* h = aes->H;
144
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100145 //XMEMSET(x, 0, AES_BLOCK_SIZE);
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100146
147 /* Hash in A, the Additional Authentication Data */
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100148// if (aSz != 0 && a != NULL) {
149// blocks = aSz / AES_BLOCK_SIZE;
150// partial = aSz % AES_BLOCK_SIZE;
151// while (blocks--) {
152 //xorbuf(x, a, AES_BLOCK_SIZE);
153 XMEMCPY(x, a, AES_BLOCK_SIZE);// memcpy(x,a) = memset(x,0)+xorbuf(x,a)
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100154 GMULT(x, h);
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100155// a += AES_BLOCK_SIZE;
156// }
157// if (partial != 0) {
158// XMEMSET(scratch, 0, AES_BLOCK_SIZE);
159// XMEMCPY(scratch, a, partial);
160// xorbuf(x, scratch, AES_BLOCK_SIZE);
161// GMULT(x, h);
162// }
163// }
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100164
165 /* Hash in C, the Ciphertext */
Denys Vlasenkoecc90902018-11-23 18:31:26 +0100166 if (cSz != 0 /*&& c != NULL*/) {
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100167 blocks = cSz / AES_BLOCK_SIZE;
168 partial = cSz % AES_BLOCK_SIZE;
169 while (blocks--) {
Denys Vlasenko23d0d8c2018-11-25 12:01:44 +0100170 if (BB_UNALIGNED_MEMACCESS_OK) // c is not guaranteed to be aligned
171 xorbuf_aligned_AES_BLOCK_SIZE(x, c);
172 else
173 xorbuf(x, c, AES_BLOCK_SIZE);
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100174 GMULT(x, h);
175 c += AES_BLOCK_SIZE;
176 }
177 if (partial != 0) {
Denys Vlasenkofbf5e632018-11-23 19:07:05 +0100178 //XMEMSET(scratch, 0, AES_BLOCK_SIZE);
179 //XMEMCPY(scratch, c, partial);
180 //xorbuf(x, scratch, AES_BLOCK_SIZE);
Denys Vlasenko03569bc2018-11-24 14:08:29 +0100181 xorbuf(x, c, partial);//same result as above
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100182 GMULT(x, h);
183 }
184 }
185
186 /* Hash in the lengths of A and C in bits */
Denys Vlasenkobe5ca422018-11-25 14:03:59 +0100187 //FlattenSzInBits(&scratch[0], aSz);
188 //FlattenSzInBits(&scratch[8], cSz);
189 //xorbuf_aligned_AES_BLOCK_SIZE(x, scratch);
190 // simpler:
191#define P32(v) ((uint32_t*)v)
192 //P32(x)[0] ^= 0;
193 P32(x)[1] ^= SWAP_BE32(aSz * 8);
194 //P32(x)[2] ^= 0;
195 P32(x)[3] ^= SWAP_BE32(cSz * 8);
196#undef P32
197
Denys Vlasenko83e5c622018-11-23 17:21:38 +0100198 GMULT(x, h);
199
200 /* Copy the result into s. */
201 XMEMCPY(s, x, sSz);
202}