1 /***********************************************************************
3 ** Implementation of the Skein block functions.
5 ** Source code author: Doug Whiting, 2008.
7 ** This algorithm and source code is released to the public domain.
9 ** Compile-time switches:
11 ** SKEIN_USE_ASM -- set bits (256/512/1024) to select which
12 ** versions use ASM code for block processing
13 ** [default: use C for all block sizes]
15 ************************************************************************/
17 #include <linux/string.h>
18 #include "skein_base.h"
19 #include "skein_block.h"
22 #define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */
26 #define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */
29 #define BLK_BITS (WCNT * 64) /* some useful definitions for code here */
30 #define KW_TWK_BASE (0)
31 #define KW_KEY_BASE (3)
32 #define ks (kw + KW_KEY_BASE)
33 #define ts (kw + KW_TWK_BASE)
36 #define debug_save_tweak(ctx) \
38 ctx->h.tweak[0] = ts[0]; \
39 ctx->h.tweak[1] = ts[1]; \
42 #define debug_save_tweak(ctx)
45 #if !(SKEIN_USE_ASM & 256)
47 #define RCNT (SKEIN_256_ROUNDS_TOTAL / 8)
48 #ifdef SKEIN_LOOP /* configure how much to unroll the loop */
49 #define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10)
51 #define SKEIN_UNROLL_256 (0)
55 #if (RCNT % SKEIN_UNROLL_256)
56 #error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */
59 #define ROUND256(p0, p1, p2, p3, ROT, r_num) \
62 X##p1 = rotl_64(X##p1, ROT##_0); \
65 X##p3 = rotl_64(X##p3, ROT##_1); \
69 #if SKEIN_UNROLL_256 == 0
70 #define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
72 ROUND256(p0, p1, p2, p3, ROT, r_num); \
77 /* inject the key schedule value */ \
78 X0 += ks[((R) + 1) % 5]; \
79 X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
80 X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
81 X3 += ks[((R) + 4) % 5] + (R) + 1; \
85 #define R256(p0, p1, p2, p3, ROT, r_num) ROUND256(p0, p1, p2, p3, ROT, r_num)
89 /* inject the key schedule value */ \
90 X0 += ks[r + (R) + 0]; \
91 X1 += ks[r + (R) + 1] + ts[r + (R) + 0]; \
92 X2 += ks[r + (R) + 2] + ts[r + (R) + 1]; \
93 X3 += ks[r + (R) + 3] + r + (R); \
94 /* rotate key schedule */ \
95 ks[r + (R) + 4] = ks[r + (R) - 1]; \
96 ts[r + (R) + 2] = ts[r + (R) - 1]; \
99 #define R256_8_ROUNDS(R) \
101 R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
102 R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
103 R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
104 R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
106 R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
107 R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
108 R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
109 R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
113 #define R256_UNROLL_R(NN) \
114 ((SKEIN_UNROLL_256 == 0 && \
115 SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \
116 (SKEIN_UNROLL_256 > (NN)))
118 #if (SKEIN_UNROLL_256 > 14)
119 #error "need more unrolling in skein_256_process_block"
123 #if !(SKEIN_USE_ASM & 512)
125 #define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
127 #ifdef SKEIN_LOOP /* configure how much to unroll the loop */
128 #define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
130 #define SKEIN_UNROLL_512 (0)
134 #if (RCNT % SKEIN_UNROLL_512)
135 #error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
138 #define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
141 X##p1 = rotl_64(X##p1, ROT##_0); \
144 X##p3 = rotl_64(X##p3, ROT##_1); \
147 X##p5 = rotl_64(X##p5, ROT##_2); \
149 X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); \
153 #if SKEIN_UNROLL_512 == 0
154 #define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
156 ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
161 /* inject the key schedule value */ \
162 X0 += ks[((R) + 1) % 9]; \
163 X1 += ks[((R) + 2) % 9]; \
164 X2 += ks[((R) + 3) % 9]; \
165 X3 += ks[((R) + 4) % 9]; \
166 X4 += ks[((R) + 5) % 9]; \
167 X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
168 X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
169 X7 += ks[((R) + 8) % 9] + (R) + 1; \
172 #else /* looping version */
173 #define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
174 ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
178 /* inject the key schedule value */ \
179 X0 += ks[r + (R) + 0]; \
180 X1 += ks[r + (R) + 1]; \
181 X2 += ks[r + (R) + 2]; \
182 X3 += ks[r + (R) + 3]; \
183 X4 += ks[r + (R) + 4]; \
184 X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
185 X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
186 X7 += ks[r + (R) + 7] + r + (R); \
187 /* rotate key schedule */ \
188 ks[r + (R) + 8] = ks[r + (R) - 1]; \
189 ts[r + (R) + 2] = ts[r + (R) - 1]; \
191 #endif /* end of looped code definitions */
192 #define R512_8_ROUNDS(R) /* do 8 full rounds */ \
194 R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
195 R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
196 R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
197 R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
199 R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
200 R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
201 R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
202 R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
203 I512(2 * (R) + 1); /* and key injection */ \
205 #define R512_UNROLL_R(NN) \
206 ((SKEIN_UNROLL_512 == 0 && \
207 SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
208 (SKEIN_UNROLL_512 > (NN)))
210 #if (SKEIN_UNROLL_512 > 14)
211 #error "need more unrolling in skein_512_process_block"
215 #if !(SKEIN_USE_ASM & 1024)
217 #define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
218 #ifdef SKEIN_LOOP /* configure how much to unroll the loop */
219 #define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10)
221 #define SKEIN_UNROLL_1024 (0)
224 #if (SKEIN_UNROLL_1024 != 0)
225 #if (RCNT % SKEIN_UNROLL_1024)
226 #error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
229 #define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
233 X##p1 = rotl_64(X##p1, ROT##_0); \
236 X##p3 = rotl_64(X##p3, ROT##_1); \
239 X##p5 = rotl_64(X##p5, ROT##_2); \
242 X##p7 = rotl_64(X##p7, ROT##_3); \
245 X##p9 = rotl_64(X##p9, ROT##_4); \
248 X##pB = rotl_64(X##pB, ROT##_5); \
251 X##pD = rotl_64(X##pD, ROT##_6); \
254 X##pF = rotl_64(X##pF, ROT##_7); \
258 #if SKEIN_UNROLL_1024 == 0
259 #define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
261 ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
266 /* inject the key schedule value */ \
267 X00 += ks[((R) + 1) % 17]; \
268 X01 += ks[((R) + 2) % 17]; \
269 X02 += ks[((R) + 3) % 17]; \
270 X03 += ks[((R) + 4) % 17]; \
271 X04 += ks[((R) + 5) % 17]; \
272 X05 += ks[((R) + 6) % 17]; \
273 X06 += ks[((R) + 7) % 17]; \
274 X07 += ks[((R) + 8) % 17]; \
275 X08 += ks[((R) + 9) % 17]; \
276 X09 += ks[((R) + 10) % 17]; \
277 X10 += ks[((R) + 11) % 17]; \
278 X11 += ks[((R) + 12) % 17]; \
279 X12 += ks[((R) + 13) % 17]; \
280 X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
281 X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
282 X15 += ks[((R) + 16) % 17] + (R) + 1; \
284 #else /* looping version */
285 #define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
287 ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
292 /* inject the key schedule value */ \
293 X00 += ks[r + (R) + 0]; \
294 X01 += ks[r + (R) + 1]; \
295 X02 += ks[r + (R) + 2]; \
296 X03 += ks[r + (R) + 3]; \
297 X04 += ks[r + (R) + 4]; \
298 X05 += ks[r + (R) + 5]; \
299 X06 += ks[r + (R) + 6]; \
300 X07 += ks[r + (R) + 7]; \
301 X08 += ks[r + (R) + 8]; \
302 X09 += ks[r + (R) + 9]; \
303 X10 += ks[r + (R) + 10]; \
304 X11 += ks[r + (R) + 11]; \
305 X12 += ks[r + (R) + 12]; \
306 X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
307 X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
308 X15 += ks[r + (R) + 15] + r + (R); \
309 /* rotate key schedule */ \
310 ks[r + (R) + 16] = ks[r + (R) - 1]; \
311 ts[r + (R) + 2] = ts[r + (R) - 1]; \
315 #define R1024_8_ROUNDS(R) \
317 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
318 R1024_0, 8*(R) + 1); \
319 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
320 R1024_1, 8*(R) + 2); \
321 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
322 R1024_2, 8*(R) + 3); \
323 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
324 R1024_3, 8*(R) + 4); \
326 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
327 R1024_4, 8*(R) + 5); \
328 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
329 R1024_5, 8*(R) + 6); \
330 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
331 R1024_6, 8*(R) + 7); \
332 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
333 R1024_7, 8*(R) + 8); \
337 #define R1024_UNROLL_R(NN) \
338 ((SKEIN_UNROLL_1024 == 0 && \
339 SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
340 (SKEIN_UNROLL_1024 > (NN)))
342 #if (SKEIN_UNROLL_1024 > 14)
343 #error "need more unrolling in Skein_1024_Process_Block"
347 /***************************** SKEIN_256 ******************************/
348 #if !(SKEIN_USE_ASM & 256)
349 void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
350 size_t blk_cnt, size_t byte_cnt_add)
353 WCNT = SKEIN_256_STATE_WORDS
357 /* key schedule: chaining vars + tweak + "rot"*/
358 u64 kw[WCNT+4+RCNT*2];
360 /* key schedule words : chaining vars + tweak */
363 u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
364 u64 w[WCNT]; /* local copy of input block */
366 const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */
373 skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
374 ts[0] = ctx->h.tweak[0];
375 ts[1] = ctx->h.tweak[1];
378 * this implementation only supports 2**64 input bytes
379 * (no carry out here)
381 ts[0] += byte_cnt_add; /* update processed length */
383 /* precompute the key schedule for this block */
388 ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY;
390 ts[2] = ts[0] ^ ts[1];
392 /* get input block in little-endian format */
393 skein_get64_lsb_first(w, blk_ptr, WCNT);
394 debug_save_tweak(ctx);
396 /* do the first full key injection */
398 X1 = w[1] + ks[1] + ts[0];
399 X2 = w[2] + ks[2] + ts[1];
402 blk_ptr += SKEIN_256_BLOCK_BYTES;
406 r < (SKEIN_UNROLL_256 ? 2 * RCNT : 2);
407 r += (SKEIN_UNROLL_256 ? 2 * SKEIN_UNROLL_256 : 1)) {
436 #if R256_UNROLL_R(10)
439 #if R256_UNROLL_R(11)
442 #if R256_UNROLL_R(12)
445 #if R256_UNROLL_R(13)
448 #if R256_UNROLL_R(14)
452 /* do the final "feedforward" xor, update context chaining */
453 ctx->x[0] = X0 ^ w[0];
454 ctx->x[1] = X1 ^ w[1];
455 ctx->x[2] = X2 ^ w[2];
456 ctx->x[3] = X3 ^ w[3];
458 ts[1] &= ~SKEIN_T1_FLAG_FIRST;
460 ctx->h.tweak[0] = ts[0];
461 ctx->h.tweak[1] = ts[1];
464 #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
465 size_t skein_256_process_block_code_size(void)
467 return ((u8 *) skein_256_process_block_code_size) -
468 ((u8 *) skein_256_process_block);
470 unsigned int skein_256_unroll_cnt(void)
472 return SKEIN_UNROLL_256;
477 /***************************** SKEIN_512 ******************************/
478 #if !(SKEIN_USE_ASM & 512)
479 void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
480 size_t blk_cnt, size_t byte_cnt_add)
483 WCNT = SKEIN_512_STATE_WORDS
487 u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
489 u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
491 u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
492 u64 w[WCNT]; /* local copy of input block */
494 const u64 *X_ptr[8]; /* use for debugging (help cc put Xn in regs) */
506 skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
507 ts[0] = ctx->h.tweak[0];
508 ts[1] = ctx->h.tweak[1];
511 * this implementation only supports 2**64 input bytes
512 * (no carry out here)
514 ts[0] += byte_cnt_add; /* update processed length */
516 /* precompute the key schedule for this block */
525 ks[8] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
526 ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ SKEIN_KS_PARITY;
528 ts[2] = ts[0] ^ ts[1];
530 /* get input block in little-endian format */
531 skein_get64_lsb_first(w, blk_ptr, WCNT);
532 debug_save_tweak(ctx);
534 /* do the first full key injection */
540 X5 = w[5] + ks[5] + ts[0];
541 X6 = w[6] + ks[6] + ts[1];
544 blk_ptr += SKEIN_512_BLOCK_BYTES;
548 r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
549 r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
580 #if R512_UNROLL_R(10)
583 #if R512_UNROLL_R(11)
586 #if R512_UNROLL_R(12)
589 #if R512_UNROLL_R(13)
592 #if R512_UNROLL_R(14)
597 /* do the final "feedforward" xor, update context chaining */
598 ctx->x[0] = X0 ^ w[0];
599 ctx->x[1] = X1 ^ w[1];
600 ctx->x[2] = X2 ^ w[2];
601 ctx->x[3] = X3 ^ w[3];
602 ctx->x[4] = X4 ^ w[4];
603 ctx->x[5] = X5 ^ w[5];
604 ctx->x[6] = X6 ^ w[6];
605 ctx->x[7] = X7 ^ w[7];
607 ts[1] &= ~SKEIN_T1_FLAG_FIRST;
609 ctx->h.tweak[0] = ts[0];
610 ctx->h.tweak[1] = ts[1];
613 #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
614 size_t skein_512_process_block_code_size(void)
616 return ((u8 *) skein_512_process_block_code_size) -
617 ((u8 *) skein_512_process_block);
619 unsigned int skein_512_unroll_cnt(void)
621 return SKEIN_UNROLL_512;
626 /***************************** SKEIN_1024 ******************************/
627 #if !(SKEIN_USE_ASM & 1024)
628 void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
629 size_t blk_cnt, size_t byte_cnt_add)
630 { /* do it in C, always looping (unrolled is bigger AND slower!) */
632 WCNT = SKEIN_1024_STATE_WORDS
635 #if (SKEIN_UNROLL_1024 != 0)
636 u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
638 u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
641 /* local copy of vars, for speed */
642 u64 X00, X01, X02, X03, X04, X05, X06, X07,
643 X08, X09, X10, X11, X12, X13, X14, X15;
644 u64 w[WCNT]; /* local copy of input block */
646 skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
647 ts[0] = ctx->h.tweak[0];
648 ts[1] = ctx->h.tweak[1];
651 * this implementation only supports 2**64 input bytes
652 * (no carry out here)
654 ts[0] += byte_cnt_add; /* update processed length */
656 /* precompute the key schedule for this block */
673 ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
674 ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
675 ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
676 ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY;
678 ts[2] = ts[0] ^ ts[1];
680 /* get input block in little-endian format */
681 skein_get64_lsb_first(w, blk_ptr, WCNT);
682 debug_save_tweak(ctx);
684 /* do the first full key injection */
695 X10 = w[10] + ks[10];
696 X11 = w[11] + ks[11];
697 X12 = w[12] + ks[12];
698 X13 = w[13] + ks[13] + ts[0];
699 X14 = w[14] + ks[14] + ts[1];
700 X15 = w[15] + ks[15];
703 r < (SKEIN_UNROLL_1024 ? 2 * RCNT : 2);
704 r += (SKEIN_UNROLL_1024 ? 2 * SKEIN_UNROLL_1024 : 1)) {
706 #if R1024_UNROLL_R(1)
709 #if R1024_UNROLL_R(2)
712 #if R1024_UNROLL_R(3)
715 #if R1024_UNROLL_R(4)
718 #if R1024_UNROLL_R(5)
721 #if R1024_UNROLL_R(6)
724 #if R1024_UNROLL_R(7)
727 #if R1024_UNROLL_R(8)
730 #if R1024_UNROLL_R(9)
733 #if R1024_UNROLL_R(10)
736 #if R1024_UNROLL_R(11)
739 #if R1024_UNROLL_R(12)
742 #if R1024_UNROLL_R(13)
745 #if R1024_UNROLL_R(14)
749 /* do the final "feedforward" xor, update context chaining */
751 ctx->x[0] = X00 ^ w[0];
752 ctx->x[1] = X01 ^ w[1];
753 ctx->x[2] = X02 ^ w[2];
754 ctx->x[3] = X03 ^ w[3];
755 ctx->x[4] = X04 ^ w[4];
756 ctx->x[5] = X05 ^ w[5];
757 ctx->x[6] = X06 ^ w[6];
758 ctx->x[7] = X07 ^ w[7];
759 ctx->x[8] = X08 ^ w[8];
760 ctx->x[9] = X09 ^ w[9];
761 ctx->x[10] = X10 ^ w[10];
762 ctx->x[11] = X11 ^ w[11];
763 ctx->x[12] = X12 ^ w[12];
764 ctx->x[13] = X13 ^ w[13];
765 ctx->x[14] = X14 ^ w[14];
766 ctx->x[15] = X15 ^ w[15];
768 ts[1] &= ~SKEIN_T1_FLAG_FIRST;
769 blk_ptr += SKEIN_1024_BLOCK_BYTES;
771 ctx->h.tweak[0] = ts[0];
772 ctx->h.tweak[1] = ts[1];
775 #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
776 size_t skein_1024_process_block_code_size(void)
778 return ((u8 *) skein_1024_process_block_code_size) -
779 ((u8 *) skein_1024_process_block);
781 unsigned int skein_1024_unroll_cnt(void)
783 return SKEIN_UNROLL_1024;