blob: ce99fa342b79b2f97380c8095bc1029791fd60c3 [file] [log] [blame]
Damjan Marion04bd0ea2023-03-16 16:37:56 +00001/*
2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_sha2_h
17#define included_sha2_h
18
19#include <vppinfra/clib.h>
20
21#define SHA224_DIGEST_SIZE 28
22#define SHA224_BLOCK_SIZE 64
23
24#define SHA256_DIGEST_SIZE 32
25#define SHA256_BLOCK_SIZE 64
26#define SHA256_ROTR(x, y) ((x >> y) | (x << (32 - y)))
27#define SHA256_CH(a, b, c) ((a & b) ^ (~a & c))
28#define SHA256_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
29#define SHA256_CSIGMA0(x) \
30 (SHA256_ROTR (x, 2) ^ SHA256_ROTR (x, 13) ^ SHA256_ROTR (x, 22));
31#define SHA256_CSIGMA1(x) \
32 (SHA256_ROTR (x, 6) ^ SHA256_ROTR (x, 11) ^ SHA256_ROTR (x, 25));
33#define SHA256_SSIGMA0(x) (SHA256_ROTR (x, 7) ^ SHA256_ROTR (x, 18) ^ (x >> 3))
34#define SHA256_SSIGMA1(x) \
35 (SHA256_ROTR (x, 17) ^ SHA256_ROTR (x, 19) ^ (x >> 10))
36
37#define SHA256_MSG_SCHED(w, j) \
38 { \
39 w[j] = w[j - 7] + w[j - 16]; \
40 w[j] += SHA256_SSIGMA0 (w[j - 15]); \
41 w[j] += SHA256_SSIGMA1 (w[j - 2]); \
42 }
43
44#define SHA256_TRANSFORM(s, w, i, k) \
45 { \
46 __typeof__ (s[0]) t1, t2; \
47 t1 = k + w[i] + s[7]; \
48 t1 += SHA256_CSIGMA1 (s[4]); \
49 t1 += SHA256_CH (s[4], s[5], s[6]); \
50 t2 = SHA256_CSIGMA0 (s[0]); \
51 t2 += SHA256_MAJ (s[0], s[1], s[2]); \
52 s[7] = s[6]; \
53 s[6] = s[5]; \
54 s[5] = s[4]; \
55 s[4] = s[3] + t1; \
56 s[3] = s[2]; \
57 s[2] = s[1]; \
58 s[1] = s[0]; \
59 s[0] = t1 + t2; \
60 }
61
62#define SHA512_224_DIGEST_SIZE 28
63#define SHA512_224_BLOCK_SIZE 128
64
65#define SHA512_256_DIGEST_SIZE 32
66#define SHA512_256_BLOCK_SIZE 128
67
68#define SHA384_DIGEST_SIZE 48
69#define SHA384_BLOCK_SIZE 128
70
71#define SHA512_DIGEST_SIZE 64
72#define SHA512_BLOCK_SIZE 128
73#define SHA512_ROTR(x, y) ((x >> y) | (x << (64 - y)))
74#define SHA512_CH(a, b, c) ((a & b) ^ (~a & c))
75#define SHA512_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
76#define SHA512_CSIGMA0(x) \
77 (SHA512_ROTR (x, 28) ^ SHA512_ROTR (x, 34) ^ SHA512_ROTR (x, 39))
78#define SHA512_CSIGMA1(x) \
79 (SHA512_ROTR (x, 14) ^ SHA512_ROTR (x, 18) ^ SHA512_ROTR (x, 41))
80#define SHA512_SSIGMA0(x) (SHA512_ROTR (x, 1) ^ SHA512_ROTR (x, 8) ^ (x >> 7))
81#define SHA512_SSIGMA1(x) \
82 (SHA512_ROTR (x, 19) ^ SHA512_ROTR (x, 61) ^ (x >> 6))
83
84#define SHA512_MSG_SCHED(w, j) \
85 { \
86 w[j] = w[j - 7] + w[j - 16]; \
87 w[j] += SHA512_SSIGMA0 (w[j - 15]); \
88 w[j] += SHA512_SSIGMA1 (w[j - 2]); \
89 }
90
91#define SHA512_TRANSFORM(s, w, i, k) \
92 { \
93 __typeof__ (s[0]) t1, t2; \
94 t1 = k + w[i] + s[7]; \
95 t1 += SHA512_CSIGMA1 (s[4]); \
96 t1 += SHA512_CH (s[4], s[5], s[6]); \
97 t2 = SHA512_CSIGMA0 (s[0]); \
98 t2 += SHA512_MAJ (s[0], s[1], s[2]); \
99 s[7] = s[6]; \
100 s[6] = s[5]; \
101 s[5] = s[4]; \
102 s[4] = s[3] + t1; \
103 s[3] = s[2]; \
104 s[2] = s[1]; \
105 s[1] = s[0]; \
106 s[0] = t1 + t2; \
107 }
108
109static const u32 sha224_h[8] = { 0xc1059ed8, 0x367cd507, 0x3070dd17,
110 0xf70e5939, 0xffc00b31, 0x68581511,
111 0x64f98fa7, 0xbefa4fa4 };
112
113static const u32 sha256_h[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372,
114 0xa54ff53a, 0x510e527f, 0x9b05688c,
115 0x1f83d9ab, 0x5be0cd19 };
116
117static const u32 sha256_k[64] = {
118 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
119 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
120 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
121 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
122 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
123 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
124 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
125 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
126 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
127 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
128 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
129};
130
131static const u64 sha384_h[8] = { 0xcbbb9d5dc1059ed8, 0x629a292a367cd507,
132 0x9159015a3070dd17, 0x152fecd8f70e5939,
133 0x67332667ffc00b31, 0x8eb44a8768581511,
134 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4 };
135
136static const u64 sha512_h[8] = { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
137 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
138 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
139 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 };
140
141static const u64 sha512_224_h[8] = { 0x8c3d37c819544da2, 0x73e1996689dcd4d6,
142 0x1dfab7ae32ff9c82, 0x679dd514582f9fcf,
143 0x0f6d2b697bd44da8, 0x77e36f7304c48942,
144 0x3f9d85a86a1d36c8, 0x1112e6ad91d692a1 };
145
146static const u64 sha512_256_h[8] = { 0x22312194fc2bf72c, 0x9f555fa3c84c64c2,
147 0x2393b86b6f53b151, 0x963877195940eabd,
148 0x96283ee2a88effe3, 0xbe5e1e2553863992,
149 0x2b0199fc2c85b8aa, 0x0eb72ddc81c52ca2 };
150
151static const u64 sha512_k[80] = {
152 0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f,
153 0xe9b5dba58189dbbc, 0x3956c25bf348b538, 0x59f111f1b605d019,
154 0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242,
155 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
156 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
157 0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
158 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275,
159 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
160 0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f,
161 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
162 0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc,
163 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
164 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6,
165 0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001,
166 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
167 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
168 0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99,
169 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
170 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc,
171 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
172 0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915,
173 0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207,
174 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba,
175 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
176 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
177 0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
178 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
179};
180
181typedef enum
182{
183 CLIB_SHA2_224,
184 CLIB_SHA2_256,
185 CLIB_SHA2_384,
186 CLIB_SHA2_512,
187 CLIB_SHA2_512_224,
188 CLIB_SHA2_512_256,
189} clib_sha2_type_t;
190
191#define SHA2_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
192#define SHA2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
193
194typedef struct
195{
196 u64 total_bytes;
197 u16 n_pending;
198 u8 block_size;
199 u8 digest_size;
200 union
201 {
202 u32 h32[8];
203 u64 h64[8];
204#if defined(__SHA__) && defined(__x86_64__)
205 u32x4 h32x4[2];
206#endif
207 };
208 union
209 {
210 u8 as_u8[SHA2_MAX_BLOCK_SIZE];
211 u64 as_u64[SHA2_MAX_BLOCK_SIZE / sizeof (u64)];
212 uword as_uword[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
213 } pending;
214} clib_sha2_ctx_t;
215
216static_always_inline void
217clib_sha2_init (clib_sha2_ctx_t *ctx, clib_sha2_type_t type)
218{
219 const u32 *h32 = 0;
220 const u64 *h64 = 0;
221
222 ctx->total_bytes = 0;
223 ctx->n_pending = 0;
224
225 switch (type)
226 {
227 case CLIB_SHA2_224:
228 h32 = sha224_h;
229 ctx->block_size = SHA224_BLOCK_SIZE;
230 ctx->digest_size = SHA224_DIGEST_SIZE;
231 break;
232 case CLIB_SHA2_256:
233 h32 = sha256_h;
234 ctx->block_size = SHA256_BLOCK_SIZE;
235 ctx->digest_size = SHA256_DIGEST_SIZE;
236 break;
237 case CLIB_SHA2_384:
238 h64 = sha384_h;
239 ctx->block_size = SHA384_BLOCK_SIZE;
240 ctx->digest_size = SHA384_DIGEST_SIZE;
241 break;
242 case CLIB_SHA2_512:
243 h64 = sha512_h;
244 ctx->block_size = SHA512_BLOCK_SIZE;
245 ctx->digest_size = SHA512_DIGEST_SIZE;
246 break;
247 case CLIB_SHA2_512_224:
248 h64 = sha512_224_h;
249 ctx->block_size = SHA512_224_BLOCK_SIZE;
250 ctx->digest_size = SHA512_224_DIGEST_SIZE;
251 break;
252 case CLIB_SHA2_512_256:
253 h64 = sha512_256_h;
254 ctx->block_size = SHA512_256_BLOCK_SIZE;
255 ctx->digest_size = SHA512_256_DIGEST_SIZE;
256 break;
257 }
258 if (h32)
259 for (int i = 0; i < 8; i++)
260 ctx->h32[i] = h32[i];
261
262 if (h64)
263 for (int i = 0; i < 8; i++)
264 ctx->h64[i] = h64[i];
265}
266
267#if defined(__SHA__) && defined(__x86_64__)
268static inline void
269shani_sha256_cycle_w (u32x4 cw[], u8 a, u8 b, u8 c, u8 d)
270{
271 cw[a] = (u32x4) _mm_sha256msg1_epu32 ((__m128i) cw[a], (__m128i) cw[b]);
272 cw[a] += (u32x4) _mm_alignr_epi8 ((__m128i) cw[d], (__m128i) cw[c], 4);
273 cw[a] = (u32x4) _mm_sha256msg2_epu32 ((__m128i) cw[a], (__m128i) cw[d]);
274}
275
276static inline void
277shani_sha256_4_rounds (u32x4 cw, u8 n, u32x4 s[])
278{
279 u32x4 r = *(u32x4 *) (sha256_k + 4 * n) + cw;
280 s[0] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[0], (__m128i) s[1],
281 (__m128i) r);
282 r = (u32x4) u64x2_interleave_hi ((u64x2) r, (u64x2) r);
283 s[1] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[1], (__m128i) s[0],
284 (__m128i) r);
285}
286
287static inline void
288shani_sha256_shuffle (u32x4 d[2], u32x4 s[2])
289{
290 /* {0, 1, 2, 3}, {4, 5, 6, 7} -> {7, 6, 3, 2}, {5, 4, 1, 0} */
291 d[0] = (u32x4) _mm_shuffle_ps ((__m128) s[1], (__m128) s[0], 0xbb);
292 d[1] = (u32x4) _mm_shuffle_ps ((__m128) s[1], (__m128) s[0], 0x11);
293}
294#endif
295
296static inline void
297clib_sha256_block (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_blocks)
298{
299#if defined(__SHA__) && defined(__x86_64__)
300 u32x4 h[2], s[2], w[4];
301
302 shani_sha256_shuffle (h, ctx->h32x4);
303
304 while (n_blocks)
305 {
306 w[0] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 0));
307 w[1] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 16));
308 w[2] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 32));
309 w[3] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 48));
310
311 s[0] = h[0];
312 s[1] = h[1];
313
314 shani_sha256_4_rounds (w[0], 0, s);
315 shani_sha256_4_rounds (w[1], 1, s);
316 shani_sha256_4_rounds (w[2], 2, s);
317 shani_sha256_4_rounds (w[3], 3, s);
318
319 shani_sha256_cycle_w (w, 0, 1, 2, 3);
320 shani_sha256_4_rounds (w[0], 4, s);
321 shani_sha256_cycle_w (w, 1, 2, 3, 0);
322 shani_sha256_4_rounds (w[1], 5, s);
323 shani_sha256_cycle_w (w, 2, 3, 0, 1);
324 shani_sha256_4_rounds (w[2], 6, s);
325 shani_sha256_cycle_w (w, 3, 0, 1, 2);
326 shani_sha256_4_rounds (w[3], 7, s);
327
328 shani_sha256_cycle_w (w, 0, 1, 2, 3);
329 shani_sha256_4_rounds (w[0], 8, s);
330 shani_sha256_cycle_w (w, 1, 2, 3, 0);
331 shani_sha256_4_rounds (w[1], 9, s);
332 shani_sha256_cycle_w (w, 2, 3, 0, 1);
333 shani_sha256_4_rounds (w[2], 10, s);
334 shani_sha256_cycle_w (w, 3, 0, 1, 2);
335 shani_sha256_4_rounds (w[3], 11, s);
336
337 shani_sha256_cycle_w (w, 0, 1, 2, 3);
338 shani_sha256_4_rounds (w[0], 12, s);
339 shani_sha256_cycle_w (w, 1, 2, 3, 0);
340 shani_sha256_4_rounds (w[1], 13, s);
341 shani_sha256_cycle_w (w, 2, 3, 0, 1);
342 shani_sha256_4_rounds (w[2], 14, s);
343 shani_sha256_cycle_w (w, 3, 0, 1, 2);
344 shani_sha256_4_rounds (w[3], 15, s);
345
346 h[0] += s[0];
347 h[1] += s[1];
348
349 /* next */
350 msg += SHA256_BLOCK_SIZE;
351 n_blocks--;
352 }
353
354 shani_sha256_shuffle (ctx->h32x4, h);
355#else
356 u32 w[64], s[8], i;
357
358 while (n_blocks)
359 {
360 for (i = 0; i < 8; i++)
361 s[i] = ctx->h32[i];
362
363 for (i = 0; i < 16; i++)
364 {
365 w[i] = clib_net_to_host_u32 (*((u32 *) msg + i));
366 SHA256_TRANSFORM (s, w, i, sha256_k[i]);
367 }
368
369 for (i = 16; i < 64; i++)
370 {
371 SHA256_MSG_SCHED (w, i);
372 SHA256_TRANSFORM (s, w, i, sha256_k[i]);
373 }
374
375 for (i = 0; i < 8; i++)
376 ctx->h32[i] += s[i];
377
378 /* next */
379 msg += SHA256_BLOCK_SIZE;
380 n_blocks--;
381 }
382#endif
383}
384
385static_always_inline void
386clib_sha512_block (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_blocks)
387{
388 u64 w[80], s[8], i;
389
390 while (n_blocks)
391 {
392 for (i = 0; i < 8; i++)
393 s[i] = ctx->h64[i];
394
395 for (i = 0; i < 16; i++)
396 {
397 w[i] = clib_net_to_host_u64 (*((u64 *) msg + i));
398 SHA512_TRANSFORM (s, w, i, sha512_k[i]);
399 }
400
401 for (i = 16; i < 80; i++)
402 {
403 SHA512_MSG_SCHED (w, i);
404 SHA512_TRANSFORM (s, w, i, sha512_k[i]);
405 }
406
407 for (i = 0; i < 8; i++)
408 ctx->h64[i] += s[i];
409
410 /* next */
411 msg += SHA512_BLOCK_SIZE;
412 n_blocks--;
413 }
414}
415
416static_always_inline void
417clib_sha2_update (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_bytes)
418{
419 uword n_blocks;
420 if (ctx->n_pending)
421 {
422 uword n_left = ctx->block_size - ctx->n_pending;
423 if (n_bytes < n_left)
424 {
425 clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_bytes);
426 ctx->n_pending += n_bytes;
427 return;
428 }
429 else
430 {
431 clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_left);
432 if (ctx->block_size == SHA512_BLOCK_SIZE)
433 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
434 else
435 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
436 ctx->n_pending = 0;
437 ctx->total_bytes += ctx->block_size;
438 n_bytes -= n_left;
439 msg += n_left;
440 }
441 }
442
443 if ((n_blocks = n_bytes / ctx->block_size))
444 {
445 if (ctx->block_size == SHA512_BLOCK_SIZE)
446 clib_sha512_block (ctx, msg, n_blocks);
447 else
448 clib_sha256_block (ctx, msg, n_blocks);
449 n_bytes -= n_blocks * ctx->block_size;
450 msg += n_blocks * ctx->block_size;
451 ctx->total_bytes += n_blocks * ctx->block_size;
452 }
453
454 if (n_bytes)
455 {
456 clib_memset_u8 (ctx->pending.as_u8, 0, ctx->block_size);
457 clib_memcpy_fast (ctx->pending.as_u8, msg, n_bytes);
458 ctx->n_pending = n_bytes;
459 }
460 else
461 ctx->n_pending = 0;
462}
463
464static_always_inline void
465clib_sha2_final (clib_sha2_ctx_t *ctx, u8 *digest)
466{
467 int i;
468
469 ctx->total_bytes += ctx->n_pending;
470 if (ctx->n_pending == 0)
471 {
472 clib_memset (ctx->pending.as_u8, 0, ctx->block_size);
473 ctx->pending.as_u8[0] = 0x80;
474 }
475 else if (ctx->n_pending + sizeof (u64) + sizeof (u8) > ctx->block_size)
476 {
477 ctx->pending.as_u8[ctx->n_pending] = 0x80;
478 if (ctx->block_size == SHA512_BLOCK_SIZE)
479 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
480 else
481 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
482 clib_memset (ctx->pending.as_u8, 0, ctx->block_size);
483 }
484 else
485 ctx->pending.as_u8[ctx->n_pending] = 0x80;
486
487 ctx->pending.as_u64[ctx->block_size / 8 - 1] =
488 clib_net_to_host_u64 (ctx->total_bytes * 8);
489 if (ctx->block_size == SHA512_BLOCK_SIZE)
490 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
491 else
492 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
493
494 if (ctx->block_size == SHA512_BLOCK_SIZE)
495 {
496 for (i = 0; i < ctx->digest_size / sizeof (u64); i++)
497 *((u64 *) digest + i) = clib_net_to_host_u64 (ctx->h64[i]);
498
499 /* sha512-224 case - write half of u64 */
500 if (i * sizeof (u64) < ctx->digest_size)
501 *((u32 *) digest + 2 * i) = clib_net_to_host_u32 (ctx->h64[i] >> 32);
502 }
503 else
504 for (i = 0; i < ctx->digest_size / sizeof (u32); i++)
505 *((u32 *) digest + i) = clib_net_to_host_u32 (ctx->h32[i]);
506}
507
508static_always_inline void
509clib_sha2 (clib_sha2_type_t type, const u8 *msg, uword len, u8 *digest)
510{
511 clib_sha2_ctx_t ctx;
512 clib_sha2_init (&ctx, type);
513 clib_sha2_update (&ctx, msg, len);
514 clib_sha2_final (&ctx, digest);
515}
516
517#define clib_sha224(...) clib_sha2 (CLIB_SHA2_224, __VA_ARGS__)
518#define clib_sha256(...) clib_sha2 (CLIB_SHA2_256, __VA_ARGS__)
519#define clib_sha384(...) clib_sha2 (CLIB_SHA2_384, __VA_ARGS__)
520#define clib_sha512(...) clib_sha2 (CLIB_SHA2_512, __VA_ARGS__)
521#define clib_sha512_224(...) clib_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
522#define clib_sha512_256(...) clib_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
523
524static_always_inline void
525clib_hmac_sha2 (clib_sha2_type_t type, const u8 *key, uword key_len,
526 const u8 *msg, uword len, u8 *digest)
527{
528 clib_sha2_ctx_t _ctx, *ctx = &_ctx;
529 uword key_data[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
530 u8 i_digest[SHA2_MAX_DIGEST_SIZE];
531 int i, n_words;
532
533 clib_sha2_init (ctx, type);
534 n_words = ctx->block_size / sizeof (uword);
535
536 /* key */
537 if (key_len > ctx->block_size)
538 {
539 /* key is longer than block, calculate hash of key */
540 clib_sha2_update (ctx, key, key_len);
541 for (i = (ctx->digest_size / sizeof (uword)) / 2; i < n_words; i++)
542 key_data[i] = 0;
543 clib_sha2_final (ctx, (u8 *) key_data);
544 clib_sha2_init (ctx, type);
545 }
546 else
547 {
548 for (i = 0; i < n_words; i++)
549 key_data[i] = 0;
550 clib_memcpy_fast (key_data, key, key_len);
551 }
552
553 /* ipad */
554 for (i = 0; i < n_words; i++)
555 ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x3636363636363636;
556 if (ctx->block_size == SHA512_BLOCK_SIZE)
557 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
558 else
559 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
560 ctx->total_bytes += ctx->block_size;
561
562 /* message */
563 clib_sha2_update (ctx, msg, len);
564 clib_sha2_final (ctx, i_digest);
565
566 /* opad */
567 clib_sha2_init (ctx, type);
568 for (i = 0; i < n_words; i++)
569 ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x5c5c5c5c5c5c5c5c;
570 if (ctx->block_size == SHA512_BLOCK_SIZE)
571 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
572 else
573 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
574 ctx->total_bytes += ctx->block_size;
575
576 /* digest */
577 clib_sha2_update (ctx, i_digest, ctx->digest_size);
578 clib_sha2_final (ctx, digest);
579}
580
581#define clib_hmac_sha224(...) clib_hmac_sha2 (CLIB_SHA2_224, __VA_ARGS__)
582#define clib_hmac_sha256(...) clib_hmac_sha2 (CLIB_SHA2_256, __VA_ARGS__)
583#define clib_hmac_sha384(...) clib_hmac_sha2 (CLIB_SHA2_384, __VA_ARGS__)
584#define clib_hmac_sha512(...) clib_hmac_sha2 (CLIB_SHA2_512, __VA_ARGS__)
585#define clib_hmac_sha512_224(...) \
586 clib_hmac_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
587#define clib_hmac_sha512_256(...) \
588 clib_hmac_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
589
590#endif /* included_sha2_h */