1 // Copyright (c) 2017 Martijn Rijkeboer <mrr@sru-systems.com>
2 //
3 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
6 // option. This file may not be copied, modified, or distributed
7 // except according to those terms.
8
9 use crate::block::Block;
10 use crate::common;
11 use crate::context::Context;
12 use crate::memory::Memory;
13 use crate::variant::Variant;
14 use crate::version::Version;
15 use blake2b_simd::Params;
16 #[cfg(feature = "crossbeam-utils")]
17 use crossbeam_utils::thread::scope;
18 use std::mem;
19
20 /// Position of the block currently being operated on.
21 #[derive(Clone, Debug)]
22 struct Position {
23 pass: u32,
24 lane: u32,
25 slice: u32,
26 index: u32,
27 }
28
29 /// Initializes the memory.
initialize(context: &Context, memory: &mut Memory)30 pub fn initialize(context: &Context, memory: &mut Memory) {
31 fill_first_blocks(context, memory, &mut h0(context));
32 }
33
34 /// Fills all the memory blocks.
fill_memory_blocks(context: &Context, memory: &mut Memory)35 pub fn fill_memory_blocks(context: &Context, memory: &mut Memory) {
36 if context.config.uses_sequential() {
37 fill_memory_blocks_st(context, memory);
38 } else {
39 fill_memory_blocks_mt(context, memory);
40 }
41 }
42
43 /// Calculates the final hash and returns it.
finalize(context: &Context, memory: &Memory) -> Vec<u8>44 pub fn finalize(context: &Context, memory: &Memory) -> Vec<u8> {
45 let mut blockhash = memory[context.lane_length - 1].clone();
46 for l in 1..context.config.lanes {
47 let last_block_in_lane = l * context.lane_length + (context.lane_length - 1);
48 blockhash ^= &memory[last_block_in_lane];
49 }
50
51 let mut hash = vec![0u8; context.config.hash_length as usize];
52 hprime(hash.as_mut_slice(), blockhash.as_u8());
53 hash
54 }
55
blake2b(out: &mut [u8], input: &[&[u8]])56 fn blake2b(out: &mut [u8], input: &[&[u8]]) {
57 let mut blake = Params::new().hash_length(out.len()).to_state();
58 for slice in input {
59 blake.update(slice);
60 }
61 out.copy_from_slice(blake.finalize().as_bytes());
62 }
63
f_bla_mka(x: u64, y: u64) -> u6464 fn f_bla_mka(x: u64, y: u64) -> u64 {
65 let m = 0xFFFF_FFFFu64;
66 let xy = (x & m) * (y & m);
67 x.wrapping_add(y.wrapping_add(xy.wrapping_add(xy)))
68 }
69
fill_block(prev_block: &Block, ref_block: &Block, next_block: &mut Block, with_xor: bool)70 fn fill_block(prev_block: &Block, ref_block: &Block, next_block: &mut Block, with_xor: bool) {
71 let mut block_r = ref_block.clone();
72 block_r ^= prev_block;
73 let mut block_tmp = block_r.clone();
74
75 // Now block_r = ref_block + prev_block and block_tmp = ref_block + prev_block
76 if with_xor {
77 // Saving the next block contents for XOR over
78 block_tmp ^= next_block;
79 // Now block_r = ref_block + prev_block and
80 // block_tmp = ref_block + prev_block + next_block
81 }
82
83 // Apply Blake2 on columns of 64-bit words: (0,1,...,15) , then
84 // (16,17,..31)... finally (112,113,...127)
85 for i in 0..8 {
86 let mut v0 = block_r[16 * i];
87 let mut v1 = block_r[16 * i + 1];
88 let mut v2 = block_r[16 * i + 2];
89 let mut v3 = block_r[16 * i + 3];
90 let mut v4 = block_r[16 * i + 4];
91 let mut v5 = block_r[16 * i + 5];
92 let mut v6 = block_r[16 * i + 6];
93 let mut v7 = block_r[16 * i + 7];
94 let mut v8 = block_r[16 * i + 8];
95 let mut v9 = block_r[16 * i + 9];
96 let mut v10 = block_r[16 * i + 10];
97 let mut v11 = block_r[16 * i + 11];
98 let mut v12 = block_r[16 * i + 12];
99 let mut v13 = block_r[16 * i + 13];
100 let mut v14 = block_r[16 * i + 14];
101 let mut v15 = block_r[16 * i + 15];
102
103 p(
104 &mut v0, &mut v1, &mut v2, &mut v3, &mut v4, &mut v5, &mut v6, &mut v7, &mut v8,
105 &mut v9, &mut v10, &mut v11, &mut v12, &mut v13, &mut v14, &mut v15,
106 );
107
108 block_r[16 * i] = v0;
109 block_r[16 * i + 1] = v1;
110 block_r[16 * i + 2] = v2;
111 block_r[16 * i + 3] = v3;
112 block_r[16 * i + 4] = v4;
113 block_r[16 * i + 5] = v5;
114 block_r[16 * i + 6] = v6;
115 block_r[16 * i + 7] = v7;
116 block_r[16 * i + 8] = v8;
117 block_r[16 * i + 9] = v9;
118 block_r[16 * i + 10] = v10;
119 block_r[16 * i + 11] = v11;
120 block_r[16 * i + 12] = v12;
121 block_r[16 * i + 13] = v13;
122 block_r[16 * i + 14] = v14;
123 block_r[16 * i + 15] = v15;
124 }
125
126 // Apply Blake2 on rows of 64-bit words: (0,1,16,17,...112,113), then
127 // (2,3,18,19,...,114,115).. finally (14,15,30,31,...,126,127)
128 for i in 0..8 {
129 let mut v0 = block_r[2 * i];
130 let mut v1 = block_r[2 * i + 1];
131 let mut v2 = block_r[2 * i + 16];
132 let mut v3 = block_r[2 * i + 17];
133 let mut v4 = block_r[2 * i + 32];
134 let mut v5 = block_r[2 * i + 33];
135 let mut v6 = block_r[2 * i + 48];
136 let mut v7 = block_r[2 * i + 49];
137 let mut v8 = block_r[2 * i + 64];
138 let mut v9 = block_r[2 * i + 65];
139 let mut v10 = block_r[2 * i + 80];
140 let mut v11 = block_r[2 * i + 81];
141 let mut v12 = block_r[2 * i + 96];
142 let mut v13 = block_r[2 * i + 97];
143 let mut v14 = block_r[2 * i + 112];
144 let mut v15 = block_r[2 * i + 113];
145
146 p(
147 &mut v0, &mut v1, &mut v2, &mut v3, &mut v4, &mut v5, &mut v6, &mut v7, &mut v8,
148 &mut v9, &mut v10, &mut v11, &mut v12, &mut v13, &mut v14, &mut v15,
149 );
150
151 block_r[2 * i] = v0;
152 block_r[2 * i + 1] = v1;
153 block_r[2 * i + 16] = v2;
154 block_r[2 * i + 17] = v3;
155 block_r[2 * i + 32] = v4;
156 block_r[2 * i + 33] = v5;
157 block_r[2 * i + 48] = v6;
158 block_r[2 * i + 49] = v7;
159 block_r[2 * i + 64] = v8;
160 block_r[2 * i + 65] = v9;
161 block_r[2 * i + 80] = v10;
162 block_r[2 * i + 81] = v11;
163 block_r[2 * i + 96] = v12;
164 block_r[2 * i + 97] = v13;
165 block_r[2 * i + 112] = v14;
166 block_r[2 * i + 113] = v15;
167 }
168
169 block_tmp.copy_to(next_block);
170 *next_block ^= &block_r;
171 }
172
fill_first_blocks(context: &Context, memory: &mut Memory, h0: &mut [u8])173 fn fill_first_blocks(context: &Context, memory: &mut Memory, h0: &mut [u8]) {
174 for lane in 0..context.config.lanes {
175 let start = common::PREHASH_DIGEST_LENGTH;
176 // H'(H0||0||i)
177 h0[start..(start + 4)].clone_from_slice(&u32_as_32le(0));
178 h0[(start + 4)..(start + 8)].clone_from_slice(&u32_as_32le(lane));
179 hprime(memory[(lane, 0)].as_u8_mut(), &h0);
180
181 // H'(H0||1||i)
182 h0[start..(start + 4)].clone_from_slice(&u32_as_32le(1));
183 hprime(memory[(lane, 1)].as_u8_mut(), &h0);
184 }
185 }
186
187 #[cfg(feature = "crossbeam-utils")]
fill_memory_blocks_mt(context: &Context, memory: &mut Memory)188 fn fill_memory_blocks_mt(context: &Context, memory: &mut Memory) {
189 for p in 0..context.config.time_cost {
190 for s in 0..common::SYNC_POINTS {
191 let _ = scope(|scoped| {
192 for (l, mem) in (0..context.config.lanes).zip(memory.as_lanes_mut()) {
193 let position = Position {
194 pass: p,
195 lane: l,
196 slice: s,
197 index: 0,
198 };
199 scoped.spawn(move |_| {
200 fill_segment(context, &position, mem);
201 });
202 }
203 });
204 }
205 }
206 }
207
208 #[cfg(not(feature = "crossbeam-utils"))]
fill_memory_blocks_mt(_: &Context, _: &mut Memory)209 fn fill_memory_blocks_mt(_: &Context, _: &mut Memory) {
210 unimplemented!()
211 }
212
fill_memory_blocks_st(context: &Context, memory: &mut Memory)213 fn fill_memory_blocks_st(context: &Context, memory: &mut Memory) {
214 for p in 0..context.config.time_cost {
215 for s in 0..common::SYNC_POINTS {
216 for l in 0..context.config.lanes {
217 let position = Position {
218 pass: p,
219 lane: l,
220 slice: s,
221 index: 0,
222 };
223 fill_segment(context, &position, memory);
224 }
225 }
226 }
227 }
228
fill_segment(context: &Context, position: &Position, memory: &mut Memory)229 fn fill_segment(context: &Context, position: &Position, memory: &mut Memory) {
230 let mut position = position.clone();
231 let data_independent_addressing = (context.config.variant == Variant::Argon2i)
232 || (context.config.variant == Variant::Argon2id && position.pass == 0)
233 && (position.slice < (common::SYNC_POINTS / 2));
234 let zero_block = Block::zero();
235 let mut input_block = Block::zero();
236 let mut address_block = Block::zero();
237
238 if data_independent_addressing {
239 input_block[0] = position.pass as u64;
240 input_block[1] = position.lane as u64;
241 input_block[2] = position.slice as u64;
242 input_block[3] = context.memory_blocks as u64;
243 input_block[4] = context.config.time_cost as u64;
244 input_block[5] = context.config.variant.as_u64();
245 }
246
247 let mut starting_index = 0u32;
248
249 if position.pass == 0 && position.slice == 0 {
250 starting_index = 2;
251
252 // Don't forget to generate the first block of addresses:
253 if data_independent_addressing {
254 next_addresses(&mut address_block, &mut input_block, &zero_block);
255 }
256 }
257
258 let mut curr_offset = (position.lane * context.lane_length)
259 + (position.slice * context.segment_length)
260 + starting_index;
261
262 let mut prev_offset = if curr_offset % context.lane_length == 0 {
263 // Last block in this lane
264 curr_offset + context.lane_length - 1
265 } else {
266 curr_offset - 1
267 };
268
269 let mut pseudo_rand;
270 for i in starting_index..context.segment_length {
271 // 1.1 Rotating prev_offset if needed
272 if curr_offset % context.lane_length == 1 {
273 prev_offset = curr_offset - 1;
274 }
275
276 // 1.2 Computing the index of the reference block
277 // 1.2.1 Taking pseudo-random value from the previous block
278 if data_independent_addressing {
279 if i % common::ADDRESSES_IN_BLOCK == 0 {
280 next_addresses(&mut address_block, &mut input_block, &zero_block);
281 }
282 pseudo_rand = address_block[(i % common::ADDRESSES_IN_BLOCK) as usize];
283 } else {
284 pseudo_rand = memory[(prev_offset)][0];
285 }
286
287 // 1.2.2 Computing the lane of the reference block
288 // If (position.pass == 0) && (position.slice == 0): can not reference other lanes yet
289 let ref_lane = if (position.pass == 0) && (position.slice == 0) {
290 position.lane as u64
291 } else {
292 (pseudo_rand >> 32) % context.config.lanes as u64
293 };
294
295 // 1.2.3 Computing the number of possible reference block within the lane.
296 position.index = i;
297 let pseudo_rand_u32 = (pseudo_rand & 0xFFFF_FFFF) as u32;
298 let same_lane = ref_lane == (position.lane as u64);
299 let ref_index = index_alpha(context, &position, pseudo_rand_u32, same_lane);
300
301 // 2 Creating a new block
302 let index = context.lane_length as u64 * ref_lane + ref_index as u64;
303 let mut curr_block = memory[curr_offset].clone();
304 {
305 let prev_block = &memory[prev_offset];
306 let ref_block = &memory[index];
307 if context.config.version == Version::Version10 || position.pass == 0 {
308 fill_block(prev_block, ref_block, &mut curr_block, false);
309 } else {
310 fill_block(prev_block, ref_block, &mut curr_block, true);
311 }
312 }
313
314 memory[curr_offset] = curr_block;
315 curr_offset += 1;
316 prev_offset += 1;
317 }
318 }
319
g(a: &mut u64, b: &mut u64, c: &mut u64, d: &mut u64)320 fn g(a: &mut u64, b: &mut u64, c: &mut u64, d: &mut u64) {
321 *a = f_bla_mka(*a, *b);
322 *d = rotr64(*d ^ *a, 32);
323 *c = f_bla_mka(*c, *d);
324 *b = rotr64(*b ^ *c, 24);
325 *a = f_bla_mka(*a, *b);
326 *d = rotr64(*d ^ *a, 16);
327 *c = f_bla_mka(*c, *d);
328 *b = rotr64(*b ^ *c, 63);
329 }
330
h0(context: &Context) -> [u8; common::PREHASH_SEED_LENGTH]331 fn h0(context: &Context) -> [u8; common::PREHASH_SEED_LENGTH] {
332 let input = [
333 &u32_as_32le(context.config.lanes),
334 &u32_as_32le(context.config.hash_length),
335 &u32_as_32le(context.config.mem_cost),
336 &u32_as_32le(context.config.time_cost),
337 &u32_as_32le(context.config.version.as_u32()),
338 &u32_as_32le(context.config.variant.as_u32()),
339 &len_as_32le(context.pwd),
340 context.pwd,
341 &len_as_32le(context.salt),
342 context.salt,
343 &len_as_32le(context.config.secret),
344 context.config.secret,
345 &len_as_32le(context.config.ad),
346 context.config.ad,
347 ];
348 let mut out = [0u8; common::PREHASH_SEED_LENGTH];
349 blake2b(&mut out[0..common::PREHASH_DIGEST_LENGTH], &input);
350 out
351 }
352
hprime(out: &mut [u8], input: &[u8])353 fn hprime(out: &mut [u8], input: &[u8]) {
354 let out_len = out.len();
355 if out_len <= common::BLAKE2B_OUT_LENGTH {
356 blake2b(out, &[&u32_as_32le(out_len as u32), input]);
357 } else {
358 let ai_len = 32;
359 let mut out_buffer = [0u8; common::BLAKE2B_OUT_LENGTH];
360 let mut in_buffer = [0u8; common::BLAKE2B_OUT_LENGTH];
361 blake2b(&mut out_buffer, &[&u32_as_32le(out_len as u32), input]);
362 out[0..ai_len].clone_from_slice(&out_buffer[0..ai_len]);
363 let mut out_pos = ai_len;
364 let mut to_produce = out_len - ai_len;
365
366 while to_produce > common::BLAKE2B_OUT_LENGTH {
367 in_buffer.clone_from_slice(&out_buffer);
368 blake2b(&mut out_buffer, &[&in_buffer]);
369 out[out_pos..out_pos + ai_len].clone_from_slice(&out_buffer[0..ai_len]);
370 out_pos += ai_len;
371 to_produce -= ai_len;
372 }
373 blake2b(&mut out[out_pos..out_len], &[&out_buffer]);
374 }
375 }
376
index_alpha(context: &Context, position: &Position, pseudo_rand: u32, same_lane: bool) -> u32377 fn index_alpha(context: &Context, position: &Position, pseudo_rand: u32, same_lane: bool) -> u32 {
378 // Pass 0:
379 // - This lane: all already finished segments plus already constructed blocks in this segment
380 // - Other lanes: all already finished segments
381 // Pass 1+:
382 // - This lane: (SYNC_POINTS - 1) last segments plus already constructed blocks in this segment
383 // - Other lanes : (SYNC_POINTS - 1) last segments
384 let reference_area_size: u32 = if position.pass == 0 {
385 // First pass
386 if position.slice == 0 {
387 // First slice
388 position.index - 1
389 } else if same_lane {
390 // The same lane => add current segment
391 position.slice * context.segment_length + position.index - 1
392 } else if position.index == 0 {
393 position.slice * context.segment_length - 1
394 } else {
395 position.slice * context.segment_length
396 }
397 } else {
398 // Second pass
399 if same_lane {
400 context.lane_length - context.segment_length + position.index - 1
401 } else if position.index == 0 {
402 context.lane_length - context.segment_length - 1
403 } else {
404 context.lane_length - context.segment_length
405 }
406 };
407 let reference_area_size = reference_area_size as u64;
408 let mut relative_position = pseudo_rand as u64;
409 relative_position = (relative_position * relative_position) >> 32;
410 relative_position = reference_area_size - 1 - ((reference_area_size * relative_position) >> 32);
411
412 // 1.2.5 Computing starting position
413 let start_position: u32 = if position.pass != 0 {
414 if position.slice == common::SYNC_POINTS - 1 {
415 0u32
416 } else {
417 (position.slice + 1) * context.segment_length
418 }
419 } else {
420 0u32
421 };
422 let start_position = start_position as u64;
423
424 // 1.2.6. Computing absolute position
425 ((start_position + relative_position) % context.lane_length as u64) as u32
426 }
427
len_as_32le(slice: &[u8]) -> [u8; 4]428 fn len_as_32le(slice: &[u8]) -> [u8; 4] {
429 u32_as_32le(slice.len() as u32)
430 }
431
next_addresses(address_block: &mut Block, input_block: &mut Block, zero_block: &Block)432 fn next_addresses(address_block: &mut Block, input_block: &mut Block, zero_block: &Block) {
433 input_block[6] += 1;
434 fill_block(zero_block, input_block, address_block, false);
435 fill_block(zero_block, &address_block.clone(), address_block, false);
436 }
437
p( v0: &mut u64, v1: &mut u64, v2: &mut u64, v3: &mut u64, v4: &mut u64, v5: &mut u64, v6: &mut u64, v7: &mut u64, v8: &mut u64, v9: &mut u64, v10: &mut u64, v11: &mut u64, v12: &mut u64, v13: &mut u64, v14: &mut u64, v15: &mut u64, )438 fn p(
439 v0: &mut u64,
440 v1: &mut u64,
441 v2: &mut u64,
442 v3: &mut u64,
443 v4: &mut u64,
444 v5: &mut u64,
445 v6: &mut u64,
446 v7: &mut u64,
447 v8: &mut u64,
448 v9: &mut u64,
449 v10: &mut u64,
450 v11: &mut u64,
451 v12: &mut u64,
452 v13: &mut u64,
453 v14: &mut u64,
454 v15: &mut u64,
455 ) {
456 g(v0, v4, v8, v12);
457 g(v1, v5, v9, v13);
458 g(v2, v6, v10, v14);
459 g(v3, v7, v11, v15);
460 g(v0, v5, v10, v15);
461 g(v1, v6, v11, v12);
462 g(v2, v7, v8, v13);
463 g(v3, v4, v9, v14);
464 }
465
rotr64(w: u64, c: u32) -> u64466 fn rotr64(w: u64, c: u32) -> u64 {
467 (w >> c) | (w << (64 - c))
468 }
469
u32_as_32le(val: u32) -> [u8; 4]470 fn u32_as_32le(val: u32) -> [u8; 4] {
471 unsafe { mem::transmute(val.to_le()) }
472 }
473