1 /*
2 * Argon2 reference source code package - reference C implementations
3 *
4 * Copyright 2015
5 * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
6 *
7 * You may use this work under the terms of a Creative Commons CC0 1.0
8 * License/Waiver or the Apache Public License 2.0, at your option. The terms of
9 * these licenses can be found at:
10 *
11 * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
12 * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * You should have received a copy of both of these licenses along with this
15 * software. If not, they may be obtained at the above URLs.
16 */
17
18 /*For memory wiping*/
19 #ifdef _MSC_VER
20 #include <windows.h>
21 #include <winbase.h> /* For SecureZeroMemory */
22 #endif
23 #if defined __STDC_LIB_EXT1__
24 #define __STDC_WANT_LIB_EXT1__ 1
25 #endif
26 #define VC_GE_2005(version) (version >= 1400)
27
28 #include <inttypes.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32
33 #include "core.h"
34 #include "thread.c"
35 #include "blake2.h"
36 #include "blake2-impl.h"
37
38 #ifdef GENKAT
39 #include "genkat.h"
40 #endif
41
42 #ifdef SUPPORT_SSE
43 #include "opt.c"
44 #else
45 #include "ref.c"
46 #endif
47
48 #if defined(__clang__)
49 #if __has_attribute(optnone)
50 #define NOT_OPTIMIZED __attribute__((optnone))
51 #endif
52 #elif defined(__GNUC__)
53 #define GCC_VERSION \
54 (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
55 #if GCC_VERSION >= 40400
56 #define NOT_OPTIMIZED __attribute__((optimize("O0")))
57 #endif
58 #endif
59 #ifndef NOT_OPTIMIZED
60 #define NOT_OPTIMIZED
61 #endif
62
63 /* Argon2 Team - Begin Code */
blake2b_long(void * pout,size_t outlen,const void * in,size_t inlen)64 static int blake2b_long(void *pout, size_t outlen, const void *in, size_t inlen) {
65 uint8_t *out = (uint8_t *)pout;
66 blake2b_state blake_state;
67 uint8_t outlen_bytes[sizeof(uint32_t)] = {0};
68 int ret = -1;
69
70 if (outlen > UINT32_MAX) {
71 goto fail;
72 }
73
74 /* Ensure little-endian byte order! */
75 store32(outlen_bytes, (uint32_t)outlen);
76
77 #define TRY(statement) \
78 do { \
79 ret = statement; \
80 if (ret < 0) { \
81 goto fail; \
82 } \
83 } while ((void)0, 0)
84
85 if (outlen <= BLAKE2B_OUTBYTES) {
86 TRY(blake2b_init(&blake_state, outlen));
87 TRY(blake2b_update(&blake_state, outlen_bytes, sizeof(outlen_bytes)));
88 TRY(blake2b_update(&blake_state, in, inlen));
89 TRY(blake2b_final(&blake_state, out, outlen));
90 } else {
91 uint32_t toproduce;
92 uint8_t out_buffer[BLAKE2B_OUTBYTES];
93 uint8_t in_buffer[BLAKE2B_OUTBYTES];
94 TRY(blake2b_init(&blake_state, BLAKE2B_OUTBYTES));
95 TRY(blake2b_update(&blake_state, outlen_bytes, sizeof(outlen_bytes)));
96 TRY(blake2b_update(&blake_state, in, inlen));
97 TRY(blake2b_final(&blake_state, out_buffer, BLAKE2B_OUTBYTES));
98 memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2);
99 out += BLAKE2B_OUTBYTES / 2;
100 toproduce = (uint32_t)outlen - BLAKE2B_OUTBYTES / 2;
101
102 while (toproduce > BLAKE2B_OUTBYTES) {
103 memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES);
104 TRY(blake2b(out_buffer, BLAKE2B_OUTBYTES, in_buffer,
105 BLAKE2B_OUTBYTES, NULL, 0));
106 memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2);
107 out += BLAKE2B_OUTBYTES / 2;
108 toproduce -= BLAKE2B_OUTBYTES / 2;
109 }
110
111 memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES);
112 TRY(blake2b(out_buffer, toproduce, in_buffer, BLAKE2B_OUTBYTES, NULL,
113 0));
114 memcpy(out, out_buffer, toproduce);
115 }
116 fail:
117 clear_internal_memory(&blake_state, sizeof(blake_state));
118 return ret;
119 #undef TRY
120 }
121 /* Argon2 Team - End Code */
122
123 /***************Instance and Position constructors**********/
init_block_value(block * b,uint8_t in)124 static void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
125
copy_block(block * dst,const block * src)126 static void copy_block(block *dst, const block *src) {
127 memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
128 }
129
xor_block(block * dst,const block * src)130 static void xor_block(block *dst, const block *src) {
131 int i;
132 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
133 dst->v[i] ^= src->v[i];
134 }
135 }
136
load_block(block * dst,const void * input)137 static void load_block(block *dst, const void *input) {
138 unsigned i;
139 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
140 dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
141 }
142 }
143
store_block(void * output,const block * src)144 static void store_block(void *output, const block *src) {
145 unsigned i;
146 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
147 store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
148 }
149 }
150
151 /***************Memory functions*****************/
152 static
allocate_memory(const argon2_context * context,uint8_t ** memory,size_t num,size_t size)153 int allocate_memory(const argon2_context *context, uint8_t **memory,
154 size_t num, size_t size) {
155 size_t memory_size = num*size;
156 if (memory == NULL) {
157 return ARGON2_MEMORY_ALLOCATION_ERROR;
158 }
159
160 /* 1. Check for multiplication overflow */
161 if (size != 0 && memory_size / size != num) {
162 return ARGON2_MEMORY_ALLOCATION_ERROR;
163 }
164
165 /* 2. Try to allocate with appropriate allocator */
166 if (context->allocate_cbk) {
167 (context->allocate_cbk)(memory, memory_size);
168 } else {
169 *memory = malloc(memory_size);
170 }
171
172 if (*memory == NULL) {
173 return ARGON2_MEMORY_ALLOCATION_ERROR;
174 }
175
176 return ARGON2_OK;
177 }
178 static
free_memory(const argon2_context * context,uint8_t * memory,size_t num,size_t size)179 void free_memory(const argon2_context *context, uint8_t *memory,
180 size_t num, size_t size) {
181 size_t memory_size = num*size;
182 clear_internal_memory(memory, memory_size);
183 if (context->free_cbk) {
184 (context->free_cbk)(memory, memory_size);
185 } else {
186 free(memory);
187 }
188 }
189
secure_wipe_memory(void * v,size_t n)190 void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
191 #if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
192 SecureZeroMemory(v, n);
193 #elif defined memset_s
194 memset_s(v, n, 0, n);
195 #elif defined(__OpenBSD__)
196 explicit_bzero(v, n);
197 #else
198 static void *(*const volatile memset_sec)(void *, int, size_t) = &memset;
199 memset_sec(v, 0, n);
200 #endif
201 }
202
203 /* Memory clear flag defaults to true. */
204 static int FLAG_clear_internal_memory = 1;
clear_internal_memory(void * v,size_t n)205 static void clear_internal_memory(void *v, size_t n) {
206 if (FLAG_clear_internal_memory && v) {
207 secure_wipe_memory(v, n);
208 }
209 }
210
finalize(const argon2_context * context,argon2_instance_t * instance)211 static void finalize(const argon2_context *context, argon2_instance_t *instance) {
212 if (context != NULL && instance != NULL) {
213 block blockhash;
214 uint32_t l;
215
216 copy_block(&blockhash, instance->memory + instance->lane_length - 1);
217
218 /* XOR the last blocks */
219 for (l = 1; l < instance->lanes; ++l) {
220 uint32_t last_block_in_lane =
221 l * instance->lane_length + (instance->lane_length - 1);
222 xor_block(&blockhash, instance->memory + last_block_in_lane);
223 }
224
225 /* Hash the result */
226 {
227 uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
228 store_block(blockhash_bytes, &blockhash);
229 blake2b_long(context->out, context->outlen, blockhash_bytes,
230 ARGON2_BLOCK_SIZE);
231 /* clear blockhash and blockhash_bytes */
232 clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE);
233 clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
234 }
235
236 #ifdef GENKAT
237 print_tag(context->out, context->outlen);
238 #endif
239
240 free_memory(context, (uint8_t *)instance->memory,
241 instance->memory_blocks, sizeof(block));
242 }
243 }
244
index_alpha(const argon2_instance_t * instance,const argon2_position_t * position,uint32_t pseudo_rand,int same_lane)245 static uint32_t index_alpha(const argon2_instance_t *instance,
246 const argon2_position_t *position, uint32_t pseudo_rand,
247 int same_lane) {
248 /*
249 * Pass 0:
250 * This lane : all already finished segments plus already constructed
251 * blocks in this segment
252 * Other lanes : all already finished segments
253 * Pass 1+:
254 * This lane : (SYNC_POINTS - 1) last segments plus already constructed
255 * blocks in this segment
256 * Other lanes : (SYNC_POINTS - 1) last segments
257 */
258 uint32_t reference_area_size;
259 uint64_t relative_position;
260 uint32_t start_position, absolute_position;
261
262 if (0 == position->pass) {
263 /* First pass */
264 if (0 == position->slice) {
265 /* First slice */
266 reference_area_size =
267 position->index - 1; /* all but the previous */
268 } else {
269 if (same_lane) {
270 /* The same lane => add current segment */
271 reference_area_size =
272 position->slice * instance->segment_length +
273 position->index - 1;
274 } else {
275 reference_area_size =
276 position->slice * instance->segment_length +
277 ((position->index == 0) ? (-1) : 0);
278 }
279 }
280 } else {
281 /* Second pass */
282 if (same_lane) {
283 reference_area_size = instance->lane_length -
284 instance->segment_length + position->index -
285 1;
286 } else {
287 reference_area_size = instance->lane_length -
288 instance->segment_length +
289 ((position->index == 0) ? (-1) : 0);
290 }
291 }
292
293 /* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
294 * relative position */
295 relative_position = pseudo_rand;
296 relative_position = relative_position * relative_position >> 32;
297 relative_position = reference_area_size - 1 -
298 (reference_area_size * relative_position >> 32);
299
300 /* 1.2.5 Computing starting position */
301 start_position = 0;
302
303 if (0 != position->pass) {
304 start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
305 ? 0
306 : (position->slice + 1) * instance->segment_length;
307 }
308
309 /* 1.2.6. Computing absolute position */
310 absolute_position = (start_position + relative_position) %
311 instance->lane_length; /* absolute position */
312 return absolute_position;
313 }
314
315 /* Single-threaded version for p=1 case */
fill_memory_blocks_st(argon2_instance_t * instance)316 static int fill_memory_blocks_st(argon2_instance_t *instance) {
317 uint32_t r, s, l;
318
319 for (r = 0; r < instance->passes; ++r) {
320 for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
321 for (l = 0; l < instance->lanes; ++l) {
322 argon2_position_t position = {r, l, (uint8_t)s, 0};
323 fill_segment(instance, position);
324 }
325 }
326 #ifdef GENKAT
327 internal_kat(instance, r); /* Print all memory blocks */
328 #endif
329 }
330 return ARGON2_OK;
331 }
332
333 #if !defined(ARGON2_NO_THREADS)
334
335 #ifdef _WIN32
fill_segment_thr(void * thread_data)336 static unsigned __stdcall fill_segment_thr(void *thread_data)
337 #else
338 static void *fill_segment_thr(void *thread_data)
339 #endif
340 {
341 argon2_thread_data *my_data = thread_data;
342 fill_segment(my_data->instance_ptr, my_data->pos);
343 argon2_thread_exit();
344 return 0;
345 }
346
347 /* Multi-threaded version for p > 1 case */
fill_memory_blocks_mt(argon2_instance_t * instance)348 static int fill_memory_blocks_mt(argon2_instance_t *instance) {
349 uint32_t r, s;
350 argon2_thread_handle_t *thread = NULL;
351 argon2_thread_data *thr_data = NULL;
352 int rc = ARGON2_OK;
353
354 /* 1. Allocating space for threads */
355 thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t));
356 if (thread == NULL) {
357 rc = ARGON2_MEMORY_ALLOCATION_ERROR;
358 goto fail;
359 }
360
361 thr_data = calloc(instance->lanes, sizeof(argon2_thread_data));
362 if (thr_data == NULL) {
363 rc = ARGON2_MEMORY_ALLOCATION_ERROR;
364 goto fail;
365 }
366
367 for (r = 0; r < instance->passes; ++r) {
368 for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
369 uint32_t l;
370
371 /* 2. Calling threads */
372 for (l = 0; l < instance->lanes; ++l) {
373 argon2_position_t position;
374
375 /* 2.1 Join a thread if limit is exceeded */
376 if (l >= instance->threads) {
377 if (argon2_thread_join(thread[l - instance->threads])) {
378 rc = ARGON2_THREAD_FAIL;
379 goto fail;
380 }
381 }
382
383 /* 2.2 Create thread */
384 position.pass = r;
385 position.lane = l;
386 position.slice = (uint8_t)s;
387 position.index = 0;
388 thr_data[l].instance_ptr =
389 instance; /* preparing the thread input */
390 memcpy(&(thr_data[l].pos), &position,
391 sizeof(argon2_position_t));
392 if (argon2_thread_create(&thread[l], &fill_segment_thr,
393 (void *)&thr_data[l])) {
394 rc = ARGON2_THREAD_FAIL;
395 goto fail;
396 }
397
398 /* fill_segment(instance, position); */
399 /*Non-thread equivalent of the lines above */
400 }
401
402 /* 3. Joining remaining threads */
403 for (l = instance->lanes - instance->threads; l < instance->lanes;
404 ++l) {
405 if (argon2_thread_join(thread[l])) {
406 rc = ARGON2_THREAD_FAIL;
407 goto fail;
408 }
409 }
410 }
411
412 #ifdef GENKAT
413 internal_kat(instance, r); /* Print all memory blocks */
414 #endif
415 }
416
417 fail:
418 if (thread != NULL) {
419 free(thread);
420 }
421 if (thr_data != NULL) {
422 free(thr_data);
423 }
424 return rc;
425 }
426
427 #endif /* ARGON2_NO_THREADS */
428 static
fill_memory_blocks(argon2_instance_t * instance)429 int fill_memory_blocks(argon2_instance_t *instance) {
430 if (instance == NULL || instance->lanes == 0) {
431 return ARGON2_INCORRECT_PARAMETER;
432 }
433 #if defined(ARGON2_NO_THREADS)
434 return fill_memory_blocks_st(instance);
435 #else
436 return instance->threads == 1 ?
437 fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance);
438 #endif
439 }
440 static
validate_inputs(const argon2_context * context)441 int validate_inputs(const argon2_context *context) {
442 if (NULL == context) {
443 return ARGON2_INCORRECT_PARAMETER;
444 }
445
446 if (NULL == context->out) {
447 return ARGON2_OUTPUT_PTR_NULL;
448 }
449
450 /* Validate output length */
451 if (ARGON2_MIN_OUTLEN > context->outlen) {
452 return ARGON2_OUTPUT_TOO_SHORT;
453 }
454
455 if (ARGON2_MAX_OUTLEN < context->outlen) {
456 return ARGON2_OUTPUT_TOO_LONG;
457 }
458
459 /* Validate password (required param) */
460 if (NULL == context->pwd) {
461 if (0 != context->pwdlen) {
462 return ARGON2_PWD_PTR_MISMATCH;
463 }
464 }
465
466 if (ARGON2_MIN_PWD_LENGTH > context->pwdlen) {
467 return ARGON2_PWD_TOO_SHORT;
468 }
469
470 if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) {
471 return ARGON2_PWD_TOO_LONG;
472 }
473
474 /* Validate salt (required param) */
475 if (NULL == context->salt) {
476 if (0 != context->saltlen) {
477 return ARGON2_SALT_PTR_MISMATCH;
478 }
479 }
480
481 if (ARGON2_MIN_SALT_LENGTH > context->saltlen) {
482 return ARGON2_SALT_TOO_SHORT;
483 }
484
485 if (ARGON2_MAX_SALT_LENGTH < context->saltlen) {
486 return ARGON2_SALT_TOO_LONG;
487 }
488
489 /* Validate secret (optional param) */
490 if (NULL == context->secret) {
491 if (0 != context->secretlen) {
492 return ARGON2_SECRET_PTR_MISMATCH;
493 }
494 } else {
495 if (ARGON2_MIN_SECRET > context->secretlen) {
496 return ARGON2_SECRET_TOO_SHORT;
497 }
498 if (ARGON2_MAX_SECRET < context->secretlen) {
499 return ARGON2_SECRET_TOO_LONG;
500 }
501 }
502
503 /* Validate associated data (optional param) */
504 if (NULL == context->ad) {
505 if (0 != context->adlen) {
506 return ARGON2_AD_PTR_MISMATCH;
507 }
508 } else {
509 if (ARGON2_MIN_AD_LENGTH > context->adlen) {
510 return ARGON2_AD_TOO_SHORT;
511 }
512 if (ARGON2_MAX_AD_LENGTH < context->adlen) {
513 return ARGON2_AD_TOO_LONG;
514 }
515 }
516
517 /* Validate memory cost */
518 if (ARGON2_MIN_MEMORY > context->m_cost) {
519 return ARGON2_MEMORY_TOO_LITTLE;
520 }
521
522 if (ARGON2_MAX_MEMORY < context->m_cost) {
523 return ARGON2_MEMORY_TOO_MUCH;
524 }
525
526 if (context->m_cost < 8 * context->lanes) {
527 return ARGON2_MEMORY_TOO_LITTLE;
528 }
529
530 /* Validate time cost */
531 if (ARGON2_MIN_TIME > context->t_cost) {
532 return ARGON2_TIME_TOO_SMALL;
533 }
534
535 if (ARGON2_MAX_TIME < context->t_cost) {
536 return ARGON2_TIME_TOO_LARGE;
537 }
538
539 /* Validate lanes */
540 if (ARGON2_MIN_LANES > context->lanes) {
541 return ARGON2_LANES_TOO_FEW;
542 }
543
544 if (ARGON2_MAX_LANES < context->lanes) {
545 return ARGON2_LANES_TOO_MANY;
546 }
547
548 /* Validate threads */
549 if (ARGON2_MIN_THREADS > context->threads) {
550 return ARGON2_THREADS_TOO_FEW;
551 }
552
553 if (ARGON2_MAX_THREADS < context->threads) {
554 return ARGON2_THREADS_TOO_MANY;
555 }
556
557 if (NULL != context->allocate_cbk && NULL == context->free_cbk) {
558 return ARGON2_FREE_MEMORY_CBK_NULL;
559 }
560
561 if (NULL == context->allocate_cbk && NULL != context->free_cbk) {
562 return ARGON2_ALLOCATE_MEMORY_CBK_NULL;
563 }
564
565 return ARGON2_OK;
566 }
567 static
fill_first_blocks(uint8_t * blockhash,const argon2_instance_t * instance)568 void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
569 uint32_t l;
570 /* Make the first and second block in each lane as G(H0||i||0) or
571 G(H0||i||1) */
572 uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
573 for (l = 0; l < instance->lanes; ++l) {
574
575 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
576 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
577 blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
578 ARGON2_PREHASH_SEED_LENGTH);
579 load_block(&instance->memory[l * instance->lane_length + 0],
580 blockhash_bytes);
581
582 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
583 blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
584 ARGON2_PREHASH_SEED_LENGTH);
585 load_block(&instance->memory[l * instance->lane_length + 1],
586 blockhash_bytes);
587 }
588 clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
589 }
590 static
initial_hash(uint8_t * blockhash,argon2_context * context,argon2_type type)591 void initial_hash(uint8_t *blockhash, argon2_context *context,
592 argon2_type type) {
593 blake2b_state BlakeHash;
594 uint8_t value[sizeof(uint32_t)];
595
596 if (NULL == context || NULL == blockhash) {
597 return;
598 }
599
600 blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH);
601
602 store32(&value, context->lanes);
603 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
604
605 store32(&value, context->outlen);
606 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
607
608 store32(&value, context->m_cost);
609 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
610
611 store32(&value, context->t_cost);
612 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
613
614 store32(&value, context->version);
615 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
616
617 store32(&value, (uint32_t)type);
618 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
619
620 store32(&value, context->pwdlen);
621 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
622
623 if (context->pwd != NULL) {
624 blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
625 context->pwdlen);
626
627 if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
628 secure_wipe_memory(context->pwd, context->pwdlen);
629 context->pwdlen = 0;
630 }
631 }
632
633 store32(&value, context->saltlen);
634 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
635
636 if (context->salt != NULL) {
637 blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
638 context->saltlen);
639 }
640
641 store32(&value, context->secretlen);
642 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
643
644 if (context->secret != NULL) {
645 blake2b_update(&BlakeHash, (const uint8_t *)context->secret,
646 context->secretlen);
647
648 if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
649 secure_wipe_memory(context->secret, context->secretlen);
650 context->secretlen = 0;
651 }
652 }
653
654 store32(&value, context->adlen);
655 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
656
657 if (context->ad != NULL) {
658 blake2b_update(&BlakeHash, (const uint8_t *)context->ad,
659 context->adlen);
660 }
661
662 blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
663 }
664 static
initialize(argon2_instance_t * instance,argon2_context * context)665 int initialize(argon2_instance_t *instance, argon2_context *context) {
666 uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
667 int result = ARGON2_OK;
668
669 if (instance == NULL || context == NULL)
670 return ARGON2_INCORRECT_PARAMETER;
671 instance->context_ptr = context;
672
673 /* 1. Memory allocation */
674 result = allocate_memory(context, (uint8_t **)&(instance->memory),
675 instance->memory_blocks, sizeof(block));
676 if (result != ARGON2_OK) {
677 return result;
678 }
679
680 /* 2. Initial hashing */
681 /* H_0 + 8 extra bytes to produce the first blocks */
682 /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
683 /* Hashing all inputs */
684 initial_hash(blockhash, context, instance->type);
685 /* Zeroing 8 extra bytes */
686 clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
687 ARGON2_PREHASH_SEED_LENGTH -
688 ARGON2_PREHASH_DIGEST_LENGTH);
689
690 #ifdef GENKAT
691 initial_kat(blockhash, context, instance->type);
692 #endif
693
694 /* 3. Creating first blocks, we always have at least two blocks in a slice
695 */
696 fill_first_blocks(blockhash, instance);
697 /* Clearing the hash */
698 clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
699
700 return ARGON2_OK;
701 }
702