1
2# qhasm: int64 a
3
4# qhasm: int64 arg1
5
6# qhasm: int64 arg2
7
8# qhasm: int64 arg3
9
10# qhasm: int64 arg4
11
12# qhasm: input arg1
13
14# qhasm: input arg2
15
16# qhasm: input arg3
17
18# qhasm: input arg4
19
20# qhasm: int64 k
21
22# qhasm: int64 kbits
23
24# qhasm: int64 iv
25
26# qhasm: int64 i
27
28# qhasm: int64 x
29
30# qhasm: int64 m
31
32# qhasm: int64 out
33
34# qhasm: int64 bytes
35
36# qhasm: int6464 diag0
37
38# qhasm: int6464 diag1
39
40# qhasm: int6464 diag2
41
42# qhasm: int6464 diag3
43
44# qhasm: int6464 a0
45
46# qhasm: int6464 a1
47
48# qhasm: int6464 a2
49
50# qhasm: int6464 a3
51
52# qhasm: int6464 a4
53
54# qhasm: int6464 a5
55
56# qhasm: int6464 a6
57
58# qhasm: int6464 a7
59
60# qhasm: int6464 b0
61
62# qhasm: int6464 b1
63
64# qhasm: int6464 b2
65
66# qhasm: int6464 b3
67
68# qhasm: int6464 b4
69
70# qhasm: int6464 b5
71
72# qhasm: int6464 b6
73
74# qhasm: int6464 b7
75
76# qhasm: int64 in0
77
78# qhasm: int64 in1
79
80# qhasm: int64 in2
81
82# qhasm: int64 in3
83
84# qhasm: int64 in4
85
86# qhasm: int64 in5
87
88# qhasm: int64 in6
89
90# qhasm: int64 in7
91
92# qhasm: int64 in8
93
94# qhasm: int64 in9
95
96# qhasm: int64 in10
97
98# qhasm: int64 in11
99
100# qhasm: int64 in12
101
102# qhasm: int64 in13
103
104# qhasm: int64 in14
105
106# qhasm: int64 in15
107
108# qhasm: stack512 tmp
109
110# qhasm: int64 ctarget
111
112# qhasm: stack64 bytes_backup
113
114# qhasm: enter ECRYPT_keystream_bytes
115.text
116.p2align 5
117.globl _ECRYPT_keystream_bytes
118.globl ECRYPT_keystream_bytes
119_ECRYPT_keystream_bytes:
120ECRYPT_keystream_bytes:
121mov %rsp,%r11
122and $31,%r11
123add $96,%r11
124sub %r11,%rsp
125
126# qhasm: x = arg1
127# asm 1: mov  <arg1=int64#1,>x=int64#5
128# asm 2: mov  <arg1=%rdi,>x=%r8
129mov  %rdi,%r8
130
131# qhasm: m = arg2
132# asm 1: mov  <arg2=int64#2,>m=int64#2
133# asm 2: mov  <arg2=%rsi,>m=%rsi
134mov  %rsi,%rsi
135
136# qhasm: out = m
137# asm 1: mov  <m=int64#2,>out=int64#1
138# asm 2: mov  <m=%rsi,>out=%rdi
139mov  %rsi,%rdi
140
141# qhasm: bytes = arg3
142# asm 1: mov  <arg3=int64#3,>bytes=int64#3
143# asm 2: mov  <arg3=%rdx,>bytes=%rdx
144mov  %rdx,%rdx
145
146# qhasm:               unsigned>? bytes - 0
147# asm 1: cmp  $0,<bytes=int64#3
148# asm 2: cmp  $0,<bytes=%rdx
149cmp  $0,%rdx
150# comment:fp stack unchanged by jump
151
152# qhasm: goto done if !unsigned>
153jbe ._done
154
155# qhasm: a = 0
156# asm 1: mov  $0,>a=int64#7
157# asm 2: mov  $0,>a=%rax
158mov  $0,%rax
159
160# qhasm: i = bytes
161# asm 1: mov  <bytes=int64#3,>i=int64#4
162# asm 2: mov  <bytes=%rdx,>i=%rcx
163mov  %rdx,%rcx
164
165# qhasm: while (i) { *out++ = a; --i }
166rep stosb
167
168# qhasm: out -= bytes
169# asm 1: sub  <bytes=int64#3,<out=int64#1
170# asm 2: sub  <bytes=%rdx,<out=%rdi
171sub  %rdx,%rdi
172# comment:fp stack unchanged by jump
173
174# qhasm: goto start
175jmp ._start
176
177# qhasm: enter ECRYPT_decrypt_bytes
178.text
179.p2align 5
180.globl _ECRYPT_decrypt_bytes
181.globl ECRYPT_decrypt_bytes
182_ECRYPT_decrypt_bytes:
183ECRYPT_decrypt_bytes:
184mov %rsp,%r11
185and $31,%r11
186add $96,%r11
187sub %r11,%rsp
188
189# qhasm: x = arg1
190# asm 1: mov  <arg1=int64#1,>x=int64#5
191# asm 2: mov  <arg1=%rdi,>x=%r8
192mov  %rdi,%r8
193
194# qhasm: m = arg2
195# asm 1: mov  <arg2=int64#2,>m=int64#2
196# asm 2: mov  <arg2=%rsi,>m=%rsi
197mov  %rsi,%rsi
198
199# qhasm: out = arg3
200# asm 1: mov  <arg3=int64#3,>out=int64#1
201# asm 2: mov  <arg3=%rdx,>out=%rdi
202mov  %rdx,%rdi
203
204# qhasm: bytes = arg4
205# asm 1: mov  <arg4=int64#4,>bytes=int64#3
206# asm 2: mov  <arg4=%rcx,>bytes=%rdx
207mov  %rcx,%rdx
208
209# qhasm:               unsigned>? bytes - 0
210# asm 1: cmp  $0,<bytes=int64#3
211# asm 2: cmp  $0,<bytes=%rdx
212cmp  $0,%rdx
213# comment:fp stack unchanged by jump
214
215# qhasm: goto done if !unsigned>
216jbe ._done
217# comment:fp stack unchanged by jump
218
219# qhasm: goto start
220jmp ._start
221
222# qhasm: enter ECRYPT_encrypt_bytes
223.text
224.p2align 5
225.globl _ECRYPT_encrypt_bytes
226.globl ECRYPT_encrypt_bytes
227_ECRYPT_encrypt_bytes:
228ECRYPT_encrypt_bytes:
229mov %rsp,%r11
230and $31,%r11
231add $96,%r11
232sub %r11,%rsp
233
234# qhasm: x = arg1
235# asm 1: mov  <arg1=int64#1,>x=int64#5
236# asm 2: mov  <arg1=%rdi,>x=%r8
237mov  %rdi,%r8
238
239# qhasm: m = arg2
240# asm 1: mov  <arg2=int64#2,>m=int64#2
241# asm 2: mov  <arg2=%rsi,>m=%rsi
242mov  %rsi,%rsi
243
244# qhasm: out = arg3
245# asm 1: mov  <arg3=int64#3,>out=int64#1
246# asm 2: mov  <arg3=%rdx,>out=%rdi
247mov  %rdx,%rdi
248
249# qhasm: bytes = arg4
250# asm 1: mov  <arg4=int64#4,>bytes=int64#3
251# asm 2: mov  <arg4=%rcx,>bytes=%rdx
252mov  %rcx,%rdx
253
254# qhasm:               unsigned>? bytes - 0
255# asm 1: cmp  $0,<bytes=int64#3
256# asm 2: cmp  $0,<bytes=%rdx
257cmp  $0,%rdx
258# comment:fp stack unchanged by jump
259
260# qhasm: goto done if !unsigned>
261jbe ._done
262# comment:fp stack unchanged by fallthrough
263
264# qhasm: start:
265._start:
266
267# qhasm: bytesatleast1:
268._bytesatleast1:
269
270# qhasm:                   unsigned<? bytes - 64
271# asm 1: cmp  $64,<bytes=int64#3
272# asm 2: cmp  $64,<bytes=%rdx
273cmp  $64,%rdx
274# comment:fp stack unchanged by jump
275
276# qhasm:   goto nocopy if !unsigned<
277jae ._nocopy
278
279# qhasm:     ctarget = out
280# asm 1: mov  <out=int64#1,>ctarget=int64#6
281# asm 2: mov  <out=%rdi,>ctarget=%r9
282mov  %rdi,%r9
283
284# qhasm:     out = &tmp
285# asm 1: leaq <tmp=stack512#1,>out=int64#1
286# asm 2: leaq <tmp=32(%rsp),>out=%rdi
287leaq 32(%rsp),%rdi
288
289# qhasm:     i = bytes
290# asm 1: mov  <bytes=int64#3,>i=int64#4
291# asm 2: mov  <bytes=%rdx,>i=%rcx
292mov  %rdx,%rcx
293
294# qhasm:     while (i) { *out++ = *m++; --i }
295rep movsb
296
297# qhasm:     out = &tmp
298# asm 1: leaq <tmp=stack512#1,>out=int64#1
299# asm 2: leaq <tmp=32(%rsp),>out=%rdi
300leaq 32(%rsp),%rdi
301
302# qhasm:     m = &tmp
303# asm 1: leaq <tmp=stack512#1,>m=int64#2
304# asm 2: leaq <tmp=32(%rsp),>m=%rsi
305leaq 32(%rsp),%rsi
306# comment:fp stack unchanged by fallthrough
307
308# qhasm:   nocopy:
309._nocopy:
310
311# qhasm: bytes_backup = bytes
312# asm 1: movq <bytes=int64#3,>bytes_backup=stack64#1
313# asm 2: movq <bytes=%rdx,>bytes_backup=0(%rsp)
314movq %rdx,0(%rsp)
315
316# qhasm: diag0 = *(int128 *) (x + 0)
317# asm 1: movdqa 0(<x=int64#5),>diag0=int6464#1
318# asm 2: movdqa 0(<x=%r8),>diag0=%xmm0
319movdqa 0(%r8),%xmm0
320
321# qhasm: diag1 = *(int128 *) (x + 16)
322# asm 1: movdqa 16(<x=int64#5),>diag1=int6464#2
323# asm 2: movdqa 16(<x=%r8),>diag1=%xmm1
324movdqa 16(%r8),%xmm1
325
326# qhasm: diag2 = *(int128 *) (x + 32)
327# asm 1: movdqa 32(<x=int64#5),>diag2=int6464#3
328# asm 2: movdqa 32(<x=%r8),>diag2=%xmm2
329movdqa 32(%r8),%xmm2
330
331# qhasm: diag3 = *(int128 *) (x + 48)
332# asm 1: movdqa 48(<x=int64#5),>diag3=int6464#4
333# asm 2: movdqa 48(<x=%r8),>diag3=%xmm3
334movdqa 48(%r8),%xmm3
335
336# qhasm:             	a0 = diag1
337# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
338# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
339movdqa %xmm1,%xmm4
340
341# qhasm: i = 20
342# asm 1: mov  $20,>i=int64#3
343# asm 2: mov  $20,>i=%rdx
344mov  $20,%rdx
345
346# qhasm: mainloop:
347._mainloop:
348
349# qhasm: uint32323232	a0 += diag0
350# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
351# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
352paddd %xmm0,%xmm4
353
354# qhasm: 				a1 = diag0
355# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
356# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
357movdqa %xmm0,%xmm5
358
359# qhasm:             	b0 = a0
360# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
361# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
362movdqa %xmm4,%xmm6
363
364# qhasm: uint32323232	a0 <<= 7
365# asm 1: pslld $7,<a0=int6464#5
366# asm 2: pslld $7,<a0=%xmm4
367pslld $7,%xmm4
368
369# qhasm: uint32323232	b0 >>= 25
370# asm 1: psrld $25,<b0=int6464#7
371# asm 2: psrld $25,<b0=%xmm6
372psrld $25,%xmm6
373
374# qhasm:                 diag3 ^= a0
375# asm 1: pxor  <a0=int6464#5,<diag3=int6464#4
376# asm 2: pxor  <a0=%xmm4,<diag3=%xmm3
377pxor  %xmm4,%xmm3
378
379# qhasm:                 diag3 ^= b0
380# asm 1: pxor  <b0=int6464#7,<diag3=int6464#4
381# asm 2: pxor  <b0=%xmm6,<diag3=%xmm3
382pxor  %xmm6,%xmm3
383
384# qhasm: uint32323232			a1 += diag3
385# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
386# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
387paddd %xmm3,%xmm5
388
389# qhasm: 						a2 = diag3
390# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
391# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
392movdqa %xmm3,%xmm4
393
394# qhasm:             			b1 = a1
395# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
396# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
397movdqa %xmm5,%xmm6
398
399# qhasm: uint32323232			a1 <<= 9
400# asm 1: pslld $9,<a1=int6464#6
401# asm 2: pslld $9,<a1=%xmm5
402pslld $9,%xmm5
403
404# qhasm: uint32323232			b1 >>= 23
405# asm 1: psrld $23,<b1=int6464#7
406# asm 2: psrld $23,<b1=%xmm6
407psrld $23,%xmm6
408
409# qhasm: 				diag2 ^= a1
410# asm 1: pxor  <a1=int6464#6,<diag2=int6464#3
411# asm 2: pxor  <a1=%xmm5,<diag2=%xmm2
412pxor  %xmm5,%xmm2
413
414# qhasm: 		diag3 <<<= 32
415# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
416# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
417pshufd $0x93,%xmm3,%xmm3
418
419# qhasm: 				diag2 ^= b1
420# asm 1: pxor  <b1=int6464#7,<diag2=int6464#3
421# asm 2: pxor  <b1=%xmm6,<diag2=%xmm2
422pxor  %xmm6,%xmm2
423
424# qhasm: uint32323232					a2 += diag2
425# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
426# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
427paddd %xmm2,%xmm4
428
429# qhasm: 								a3 = diag2
430# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
431# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
432movdqa %xmm2,%xmm5
433
434# qhasm:             					b2 = a2
435# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
436# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
437movdqa %xmm4,%xmm6
438
439# qhasm: uint32323232					a2 <<= 13
440# asm 1: pslld $13,<a2=int6464#5
441# asm 2: pslld $13,<a2=%xmm4
442pslld $13,%xmm4
443
444# qhasm: uint32323232					b2 >>= 19
445# asm 1: psrld $19,<b2=int6464#7
446# asm 2: psrld $19,<b2=%xmm6
447psrld $19,%xmm6
448
449# qhasm: 						diag1 ^= a2
450# asm 1: pxor  <a2=int6464#5,<diag1=int6464#2
451# asm 2: pxor  <a2=%xmm4,<diag1=%xmm1
452pxor  %xmm4,%xmm1
453
454# qhasm: 				diag2 <<<= 64
455# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
456# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
457pshufd $0x4e,%xmm2,%xmm2
458
459# qhasm: 						diag1 ^= b2
460# asm 1: pxor  <b2=int6464#7,<diag1=int6464#2
461# asm 2: pxor  <b2=%xmm6,<diag1=%xmm1
462pxor  %xmm6,%xmm1
463
464# qhasm: uint32323232							a3 += diag1
465# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
466# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
467paddd %xmm1,%xmm5
468
469# qhasm: 		a4 = diag3
470# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
471# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
472movdqa %xmm3,%xmm4
473
474# qhasm:             							b3 = a3
475# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
476# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
477movdqa %xmm5,%xmm6
478
479# qhasm: uint32323232							a3 <<= 18
480# asm 1: pslld $18,<a3=int6464#6
481# asm 2: pslld $18,<a3=%xmm5
482pslld $18,%xmm5
483
484# qhasm: uint32323232							b3 >>= 14
485# asm 1: psrld $14,<b3=int6464#7
486# asm 2: psrld $14,<b3=%xmm6
487psrld $14,%xmm6
488
489# qhasm: 								diag0 ^= a3
490# asm 1: pxor  <a3=int6464#6,<diag0=int6464#1
491# asm 2: pxor  <a3=%xmm5,<diag0=%xmm0
492pxor  %xmm5,%xmm0
493
494# qhasm: 						diag1 <<<= 96
495# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
496# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
497pshufd $0x39,%xmm1,%xmm1
498
499# qhasm: 								diag0 ^= b3
500# asm 1: pxor  <b3=int6464#7,<diag0=int6464#1
501# asm 2: pxor  <b3=%xmm6,<diag0=%xmm0
502pxor  %xmm6,%xmm0
503
504# qhasm: uint32323232	a4 += diag0
505# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
506# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
507paddd %xmm0,%xmm4
508
509# qhasm: 				a5 = diag0
510# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
511# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
512movdqa %xmm0,%xmm5
513
514# qhasm:             	b4 = a4
515# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
516# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
517movdqa %xmm4,%xmm6
518
519# qhasm: uint32323232	a4 <<= 7
520# asm 1: pslld $7,<a4=int6464#5
521# asm 2: pslld $7,<a4=%xmm4
522pslld $7,%xmm4
523
524# qhasm: uint32323232	b4 >>= 25
525# asm 1: psrld $25,<b4=int6464#7
526# asm 2: psrld $25,<b4=%xmm6
527psrld $25,%xmm6
528
529# qhasm:                 diag1 ^= a4
530# asm 1: pxor  <a4=int6464#5,<diag1=int6464#2
531# asm 2: pxor  <a4=%xmm4,<diag1=%xmm1
532pxor  %xmm4,%xmm1
533
534# qhasm:                 diag1 ^= b4
535# asm 1: pxor  <b4=int6464#7,<diag1=int6464#2
536# asm 2: pxor  <b4=%xmm6,<diag1=%xmm1
537pxor  %xmm6,%xmm1
538
539# qhasm: uint32323232			a5 += diag1
540# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
541# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
542paddd %xmm1,%xmm5
543
544# qhasm: 						a6 = diag1
545# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
546# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
547movdqa %xmm1,%xmm4
548
549# qhasm:             			b5 = a5
550# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
551# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
552movdqa %xmm5,%xmm6
553
554# qhasm: uint32323232			a5 <<= 9
555# asm 1: pslld $9,<a5=int6464#6
556# asm 2: pslld $9,<a5=%xmm5
557pslld $9,%xmm5
558
559# qhasm: uint32323232			b5 >>= 23
560# asm 1: psrld $23,<b5=int6464#7
561# asm 2: psrld $23,<b5=%xmm6
562psrld $23,%xmm6
563
564# qhasm: 				diag2 ^= a5
565# asm 1: pxor  <a5=int6464#6,<diag2=int6464#3
566# asm 2: pxor  <a5=%xmm5,<diag2=%xmm2
567pxor  %xmm5,%xmm2
568
569# qhasm: 		diag1 <<<= 32
570# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
571# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
572pshufd $0x93,%xmm1,%xmm1
573
574# qhasm: 				diag2 ^= b5
575# asm 1: pxor  <b5=int6464#7,<diag2=int6464#3
576# asm 2: pxor  <b5=%xmm6,<diag2=%xmm2
577pxor  %xmm6,%xmm2
578
579# qhasm: uint32323232					a6 += diag2
580# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
581# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
582paddd %xmm2,%xmm4
583
584# qhasm: 								a7 = diag2
585# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
586# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
587movdqa %xmm2,%xmm5
588
589# qhasm:             					b6 = a6
590# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
591# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
592movdqa %xmm4,%xmm6
593
594# qhasm: uint32323232					a6 <<= 13
595# asm 1: pslld $13,<a6=int6464#5
596# asm 2: pslld $13,<a6=%xmm4
597pslld $13,%xmm4
598
599# qhasm: uint32323232					b6 >>= 19
600# asm 1: psrld $19,<b6=int6464#7
601# asm 2: psrld $19,<b6=%xmm6
602psrld $19,%xmm6
603
604# qhasm: 						diag3 ^= a6
605# asm 1: pxor  <a6=int6464#5,<diag3=int6464#4
606# asm 2: pxor  <a6=%xmm4,<diag3=%xmm3
607pxor  %xmm4,%xmm3
608
609# qhasm: 				diag2 <<<= 64
610# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
611# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
612pshufd $0x4e,%xmm2,%xmm2
613
614# qhasm: 						diag3 ^= b6
615# asm 1: pxor  <b6=int6464#7,<diag3=int6464#4
616# asm 2: pxor  <b6=%xmm6,<diag3=%xmm3
617pxor  %xmm6,%xmm3
618
619# qhasm: uint32323232							a7 += diag3
620# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
621# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
622paddd %xmm3,%xmm5
623
624# qhasm: 		a0 = diag1
625# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
626# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
627movdqa %xmm1,%xmm4
628
629# qhasm:             							b7 = a7
630# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
631# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
632movdqa %xmm5,%xmm6
633
634# qhasm: uint32323232							a7 <<= 18
635# asm 1: pslld $18,<a7=int6464#6
636# asm 2: pslld $18,<a7=%xmm5
637pslld $18,%xmm5
638
639# qhasm: uint32323232							b7 >>= 14
640# asm 1: psrld $14,<b7=int6464#7
641# asm 2: psrld $14,<b7=%xmm6
642psrld $14,%xmm6
643
644# qhasm: 								diag0 ^= a7
645# asm 1: pxor  <a7=int6464#6,<diag0=int6464#1
646# asm 2: pxor  <a7=%xmm5,<diag0=%xmm0
647pxor  %xmm5,%xmm0
648
649# qhasm: 						diag3 <<<= 96
650# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
651# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
652pshufd $0x39,%xmm3,%xmm3
653
654# qhasm: 								diag0 ^= b7
655# asm 1: pxor  <b7=int6464#7,<diag0=int6464#1
656# asm 2: pxor  <b7=%xmm6,<diag0=%xmm0
657pxor  %xmm6,%xmm0
658
659# qhasm: uint32323232	a0 += diag0
660# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
661# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
662paddd %xmm0,%xmm4
663
664# qhasm: 				a1 = diag0
665# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
666# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
667movdqa %xmm0,%xmm5
668
669# qhasm:             	b0 = a0
670# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
671# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
672movdqa %xmm4,%xmm6
673
674# qhasm: uint32323232	a0 <<= 7
675# asm 1: pslld $7,<a0=int6464#5
676# asm 2: pslld $7,<a0=%xmm4
677pslld $7,%xmm4
678
679# qhasm: uint32323232	b0 >>= 25
680# asm 1: psrld $25,<b0=int6464#7
681# asm 2: psrld $25,<b0=%xmm6
682psrld $25,%xmm6
683
684# qhasm:                 diag3 ^= a0
685# asm 1: pxor  <a0=int6464#5,<diag3=int6464#4
686# asm 2: pxor  <a0=%xmm4,<diag3=%xmm3
687pxor  %xmm4,%xmm3
688
689# qhasm:                 diag3 ^= b0
690# asm 1: pxor  <b0=int6464#7,<diag3=int6464#4
691# asm 2: pxor  <b0=%xmm6,<diag3=%xmm3
692pxor  %xmm6,%xmm3
693
694# qhasm: uint32323232			a1 += diag3
695# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
696# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
697paddd %xmm3,%xmm5
698
699# qhasm: 						a2 = diag3
700# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
701# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
702movdqa %xmm3,%xmm4
703
704# qhasm:             			b1 = a1
705# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
706# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
707movdqa %xmm5,%xmm6
708
709# qhasm: uint32323232			a1 <<= 9
710# asm 1: pslld $9,<a1=int6464#6
711# asm 2: pslld $9,<a1=%xmm5
712pslld $9,%xmm5
713
714# qhasm: uint32323232			b1 >>= 23
715# asm 1: psrld $23,<b1=int6464#7
716# asm 2: psrld $23,<b1=%xmm6
717psrld $23,%xmm6
718
719# qhasm: 				diag2 ^= a1
720# asm 1: pxor  <a1=int6464#6,<diag2=int6464#3
721# asm 2: pxor  <a1=%xmm5,<diag2=%xmm2
722pxor  %xmm5,%xmm2
723
724# qhasm: 		diag3 <<<= 32
725# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
726# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
727pshufd $0x93,%xmm3,%xmm3
728
729# qhasm: 				diag2 ^= b1
730# asm 1: pxor  <b1=int6464#7,<diag2=int6464#3
731# asm 2: pxor  <b1=%xmm6,<diag2=%xmm2
732pxor  %xmm6,%xmm2
733
734# qhasm: uint32323232					a2 += diag2
735# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
736# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
737paddd %xmm2,%xmm4
738
739# qhasm: 								a3 = diag2
740# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
741# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
742movdqa %xmm2,%xmm5
743
744# qhasm:             					b2 = a2
745# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
746# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
747movdqa %xmm4,%xmm6
748
749# qhasm: uint32323232					a2 <<= 13
750# asm 1: pslld $13,<a2=int6464#5
751# asm 2: pslld $13,<a2=%xmm4
752pslld $13,%xmm4
753
754# qhasm: uint32323232					b2 >>= 19
755# asm 1: psrld $19,<b2=int6464#7
756# asm 2: psrld $19,<b2=%xmm6
757psrld $19,%xmm6
758
759# qhasm: 						diag1 ^= a2
760# asm 1: pxor  <a2=int6464#5,<diag1=int6464#2
761# asm 2: pxor  <a2=%xmm4,<diag1=%xmm1
762pxor  %xmm4,%xmm1
763
764# qhasm: 				diag2 <<<= 64
765# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
766# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
767pshufd $0x4e,%xmm2,%xmm2
768
769# qhasm: 						diag1 ^= b2
770# asm 1: pxor  <b2=int6464#7,<diag1=int6464#2
771# asm 2: pxor  <b2=%xmm6,<diag1=%xmm1
772pxor  %xmm6,%xmm1
773
774# qhasm: uint32323232							a3 += diag1
775# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
776# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
777paddd %xmm1,%xmm5
778
779# qhasm: 		a4 = diag3
780# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
781# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
782movdqa %xmm3,%xmm4
783
784# qhasm:             							b3 = a3
785# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
786# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
787movdqa %xmm5,%xmm6
788
789# qhasm: uint32323232							a3 <<= 18
790# asm 1: pslld $18,<a3=int6464#6
791# asm 2: pslld $18,<a3=%xmm5
792pslld $18,%xmm5
793
794# qhasm: uint32323232							b3 >>= 14
795# asm 1: psrld $14,<b3=int6464#7
796# asm 2: psrld $14,<b3=%xmm6
797psrld $14,%xmm6
798
799# qhasm: 								diag0 ^= a3
800# asm 1: pxor  <a3=int6464#6,<diag0=int6464#1
801# asm 2: pxor  <a3=%xmm5,<diag0=%xmm0
802pxor  %xmm5,%xmm0
803
804# qhasm: 						diag1 <<<= 96
805# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
806# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
807pshufd $0x39,%xmm1,%xmm1
808
809# qhasm: 								diag0 ^= b3
810# asm 1: pxor  <b3=int6464#7,<diag0=int6464#1
811# asm 2: pxor  <b3=%xmm6,<diag0=%xmm0
812pxor  %xmm6,%xmm0
813
814# qhasm: uint32323232	a4 += diag0
815# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
816# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
817paddd %xmm0,%xmm4
818
819# qhasm: 				a5 = diag0
820# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
821# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
822movdqa %xmm0,%xmm5
823
824# qhasm:             	b4 = a4
825# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
826# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
827movdqa %xmm4,%xmm6
828
829# qhasm: uint32323232	a4 <<= 7
830# asm 1: pslld $7,<a4=int6464#5
831# asm 2: pslld $7,<a4=%xmm4
832pslld $7,%xmm4
833
834# qhasm: uint32323232	b4 >>= 25
835# asm 1: psrld $25,<b4=int6464#7
836# asm 2: psrld $25,<b4=%xmm6
837psrld $25,%xmm6
838
839# qhasm:                 diag1 ^= a4
840# asm 1: pxor  <a4=int6464#5,<diag1=int6464#2
841# asm 2: pxor  <a4=%xmm4,<diag1=%xmm1
842pxor  %xmm4,%xmm1
843
844# qhasm:                 diag1 ^= b4
845# asm 1: pxor  <b4=int6464#7,<diag1=int6464#2
846# asm 2: pxor  <b4=%xmm6,<diag1=%xmm1
847pxor  %xmm6,%xmm1
848
849# qhasm: uint32323232			a5 += diag1
850# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
851# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
852paddd %xmm1,%xmm5
853
854# qhasm: 						a6 = diag1
855# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
856# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
857movdqa %xmm1,%xmm4
858
859# qhasm:             			b5 = a5
860# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
861# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
862movdqa %xmm5,%xmm6
863
864# qhasm: uint32323232			a5 <<= 9
865# asm 1: pslld $9,<a5=int6464#6
866# asm 2: pslld $9,<a5=%xmm5
867pslld $9,%xmm5
868
869# qhasm: uint32323232			b5 >>= 23
870# asm 1: psrld $23,<b5=int6464#7
871# asm 2: psrld $23,<b5=%xmm6
872psrld $23,%xmm6
873
874# qhasm: 				diag2 ^= a5
875# asm 1: pxor  <a5=int6464#6,<diag2=int6464#3
876# asm 2: pxor  <a5=%xmm5,<diag2=%xmm2
877pxor  %xmm5,%xmm2
878
879# qhasm: 		diag1 <<<= 32
880# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
881# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
882pshufd $0x93,%xmm1,%xmm1
883
884# qhasm: 				diag2 ^= b5
885# asm 1: pxor  <b5=int6464#7,<diag2=int6464#3
886# asm 2: pxor  <b5=%xmm6,<diag2=%xmm2
887pxor  %xmm6,%xmm2
888
889# qhasm: uint32323232					a6 += diag2
890# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
891# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
892paddd %xmm2,%xmm4
893
894# qhasm: 								a7 = diag2
895# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
896# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
897movdqa %xmm2,%xmm5
898
899# qhasm:             					b6 = a6
900# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
901# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
902movdqa %xmm4,%xmm6
903
904# qhasm: uint32323232					a6 <<= 13
905# asm 1: pslld $13,<a6=int6464#5
906# asm 2: pslld $13,<a6=%xmm4
907pslld $13,%xmm4
908
909# qhasm: uint32323232					b6 >>= 19
910# asm 1: psrld $19,<b6=int6464#7
911# asm 2: psrld $19,<b6=%xmm6
912psrld $19,%xmm6
913
914# qhasm: 						diag3 ^= a6
915# asm 1: pxor  <a6=int6464#5,<diag3=int6464#4
916# asm 2: pxor  <a6=%xmm4,<diag3=%xmm3
917pxor  %xmm4,%xmm3
918
919# qhasm: 				diag2 <<<= 64
920# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
921# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
922pshufd $0x4e,%xmm2,%xmm2
923
924# qhasm: 						diag3 ^= b6
925# asm 1: pxor  <b6=int6464#7,<diag3=int6464#4
926# asm 2: pxor  <b6=%xmm6,<diag3=%xmm3
927pxor  %xmm6,%xmm3
928
929# qhasm:                  unsigned>? i -= 4
930# asm 1: sub  $4,<i=int64#3
931# asm 2: sub  $4,<i=%rdx
932sub  $4,%rdx
933
934# qhasm: uint32323232							a7 += diag3
935# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
936# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
937paddd %xmm3,%xmm5
938
939# qhasm: 		a0 = diag1
940# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
941# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
942movdqa %xmm1,%xmm4
943
944# qhasm:             							b7 = a7
945# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
946# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
947movdqa %xmm5,%xmm6
948
949# qhasm: uint32323232							a7 <<= 18
950# asm 1: pslld $18,<a7=int6464#6
951# asm 2: pslld $18,<a7=%xmm5
952pslld $18,%xmm5
953
954# qhasm: uint32323232							b7 >>= 14
955# asm 1: psrld $14,<b7=int6464#7
956# asm 2: psrld $14,<b7=%xmm6
957psrld $14,%xmm6
958
959# qhasm: 								diag0 ^= a7
960# asm 1: pxor  <a7=int6464#6,<diag0=int6464#1
961# asm 2: pxor  <a7=%xmm5,<diag0=%xmm0
962pxor  %xmm5,%xmm0
963
964# qhasm: 						diag3 <<<= 96
965# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
966# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
967pshufd $0x39,%xmm3,%xmm3
968
969# qhasm: 								diag0 ^= b7
970# asm 1: pxor  <b7=int6464#7,<diag0=int6464#1
971# asm 2: pxor  <b7=%xmm6,<diag0=%xmm0
972pxor  %xmm6,%xmm0
973# comment:fp stack unchanged by jump
974
975# qhasm: goto mainloop if unsigned>
976ja ._mainloop
977
978# qhasm: uint32323232 diag0 += *(int128 *) (x + 0)
979# asm 1: paddd 0(<x=int64#5),<diag0=int6464#1
980# asm 2: paddd 0(<x=%r8),<diag0=%xmm0
981paddd 0(%r8),%xmm0
982
983# qhasm: uint32323232 diag1 += *(int128 *) (x + 16)
984# asm 1: paddd 16(<x=int64#5),<diag1=int6464#2
985# asm 2: paddd 16(<x=%r8),<diag1=%xmm1
986paddd 16(%r8),%xmm1
987
988# qhasm: uint32323232 diag2 += *(int128 *) (x + 32)
989# asm 1: paddd 32(<x=int64#5),<diag2=int6464#3
990# asm 2: paddd 32(<x=%r8),<diag2=%xmm2
991paddd 32(%r8),%xmm2
992
993# qhasm: uint32323232 diag3 += *(int128 *) (x + 48)
994# asm 1: paddd 48(<x=int64#5),<diag3=int6464#4
995# asm 2: paddd 48(<x=%r8),<diag3=%xmm3
996paddd 48(%r8),%xmm3
997
998# qhasm: in0 = diag0
999# asm 1: movd   <diag0=int6464#1,>in0=int64#3
1000# asm 2: movd   <diag0=%xmm0,>in0=%rdx
1001movd   %xmm0,%rdx
1002
1003# qhasm: in12 = diag1
1004# asm 1: movd   <diag1=int6464#2,>in12=int64#4
1005# asm 2: movd   <diag1=%xmm1,>in12=%rcx
1006movd   %xmm1,%rcx
1007
1008# qhasm: in8 = diag2
1009# asm 1: movd   <diag2=int6464#3,>in8=int64#7
1010# asm 2: movd   <diag2=%xmm2,>in8=%rax
1011movd   %xmm2,%rax
1012
1013# qhasm: in4 = diag3
1014# asm 1: movd   <diag3=int6464#4,>in4=int64#8
1015# asm 2: movd   <diag3=%xmm3,>in4=%r10
1016movd   %xmm3,%r10
1017
1018# qhasm: diag0 <<<= 96
1019# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
1020# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
1021pshufd $0x39,%xmm0,%xmm0
1022
1023# qhasm: diag1 <<<= 96
1024# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
1025# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
1026pshufd $0x39,%xmm1,%xmm1
1027
1028# qhasm: diag2 <<<= 96
1029# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
1030# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
1031pshufd $0x39,%xmm2,%xmm2
1032
1033# qhasm: diag3 <<<= 96
1034# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
1035# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
1036pshufd $0x39,%xmm3,%xmm3
1037
1038# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
1039# asm 1: xorl 0(<m=int64#2),<in0=int64#3d
1040# asm 2: xorl 0(<m=%rsi),<in0=%edx
1041xorl 0(%rsi),%edx
1042
1043# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
1044# asm 1: xorl 48(<m=int64#2),<in12=int64#4d
1045# asm 2: xorl 48(<m=%rsi),<in12=%ecx
1046xorl 48(%rsi),%ecx
1047
1048# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
1049# asm 1: xorl 32(<m=int64#2),<in8=int64#7d
1050# asm 2: xorl 32(<m=%rsi),<in8=%eax
1051xorl 32(%rsi),%eax
1052
1053# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
1054# asm 1: xorl 16(<m=int64#2),<in4=int64#8d
1055# asm 2: xorl 16(<m=%rsi),<in4=%r10d
1056xorl 16(%rsi),%r10d
1057
1058# qhasm: *(uint32 *) (out + 0) = in0
1059# asm 1: movl   <in0=int64#3d,0(<out=int64#1)
1060# asm 2: movl   <in0=%edx,0(<out=%rdi)
1061movl   %edx,0(%rdi)
1062
1063# qhasm: *(uint32 *) (out + 48) = in12
1064# asm 1: movl   <in12=int64#4d,48(<out=int64#1)
1065# asm 2: movl   <in12=%ecx,48(<out=%rdi)
1066movl   %ecx,48(%rdi)
1067
1068# qhasm: *(uint32 *) (out + 32) = in8
1069# asm 1: movl   <in8=int64#7d,32(<out=int64#1)
1070# asm 2: movl   <in8=%eax,32(<out=%rdi)
1071movl   %eax,32(%rdi)
1072
1073# qhasm: *(uint32 *) (out + 16) = in4
1074# asm 1: movl   <in4=int64#8d,16(<out=int64#1)
1075# asm 2: movl   <in4=%r10d,16(<out=%rdi)
1076movl   %r10d,16(%rdi)
1077
1078# qhasm: in5 = diag0
1079# asm 1: movd   <diag0=int6464#1,>in5=int64#3
1080# asm 2: movd   <diag0=%xmm0,>in5=%rdx
1081movd   %xmm0,%rdx
1082
1083# qhasm: in1 = diag1
1084# asm 1: movd   <diag1=int6464#2,>in1=int64#4
1085# asm 2: movd   <diag1=%xmm1,>in1=%rcx
1086movd   %xmm1,%rcx
1087
1088# qhasm: in13 = diag2
1089# asm 1: movd   <diag2=int6464#3,>in13=int64#7
1090# asm 2: movd   <diag2=%xmm2,>in13=%rax
1091movd   %xmm2,%rax
1092
1093# qhasm: in9 = diag3
1094# asm 1: movd   <diag3=int6464#4,>in9=int64#8
1095# asm 2: movd   <diag3=%xmm3,>in9=%r10
1096movd   %xmm3,%r10
1097
1098# qhasm: diag0 <<<= 96
1099# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
1100# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
1101pshufd $0x39,%xmm0,%xmm0
1102
1103# qhasm: diag1 <<<= 96
1104# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
1105# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
1106pshufd $0x39,%xmm1,%xmm1
1107
1108# qhasm: diag2 <<<= 96
1109# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
1110# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
1111pshufd $0x39,%xmm2,%xmm2
1112
1113# qhasm: diag3 <<<= 96
1114# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
1115# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
1116pshufd $0x39,%xmm3,%xmm3
1117
1118# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
1119# asm 1: xorl 20(<m=int64#2),<in5=int64#3d
1120# asm 2: xorl 20(<m=%rsi),<in5=%edx
1121xorl 20(%rsi),%edx
1122
1123# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
1124# asm 1: xorl 4(<m=int64#2),<in1=int64#4d
1125# asm 2: xorl 4(<m=%rsi),<in1=%ecx
1126xorl 4(%rsi),%ecx
1127
1128# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
1129# asm 1: xorl 52(<m=int64#2),<in13=int64#7d
1130# asm 2: xorl 52(<m=%rsi),<in13=%eax
1131xorl 52(%rsi),%eax
1132
1133# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
1134# asm 1: xorl 36(<m=int64#2),<in9=int64#8d
1135# asm 2: xorl 36(<m=%rsi),<in9=%r10d
1136xorl 36(%rsi),%r10d
1137
1138# qhasm: *(uint32 *) (out + 20) = in5
1139# asm 1: movl   <in5=int64#3d,20(<out=int64#1)
1140# asm 2: movl   <in5=%edx,20(<out=%rdi)
1141movl   %edx,20(%rdi)
1142
1143# qhasm: *(uint32 *) (out + 4) = in1
1144# asm 1: movl   <in1=int64#4d,4(<out=int64#1)
1145# asm 2: movl   <in1=%ecx,4(<out=%rdi)
1146movl   %ecx,4(%rdi)
1147
1148# qhasm: *(uint32 *) (out + 52) = in13
1149# asm 1: movl   <in13=int64#7d,52(<out=int64#1)
1150# asm 2: movl   <in13=%eax,52(<out=%rdi)
1151movl   %eax,52(%rdi)
1152
1153# qhasm: *(uint32 *) (out + 36) = in9
1154# asm 1: movl   <in9=int64#8d,36(<out=int64#1)
1155# asm 2: movl   <in9=%r10d,36(<out=%rdi)
1156movl   %r10d,36(%rdi)
1157
1158# qhasm: in10 = diag0
1159# asm 1: movd   <diag0=int6464#1,>in10=int64#3
1160# asm 2: movd   <diag0=%xmm0,>in10=%rdx
1161movd   %xmm0,%rdx
1162
1163# qhasm: in6 = diag1
1164# asm 1: movd   <diag1=int6464#2,>in6=int64#4
1165# asm 2: movd   <diag1=%xmm1,>in6=%rcx
1166movd   %xmm1,%rcx
1167
1168# qhasm: in2 = diag2
1169# asm 1: movd   <diag2=int6464#3,>in2=int64#7
1170# asm 2: movd   <diag2=%xmm2,>in2=%rax
1171movd   %xmm2,%rax
1172
1173# qhasm: in14 = diag3
1174# asm 1: movd   <diag3=int6464#4,>in14=int64#8
1175# asm 2: movd   <diag3=%xmm3,>in14=%r10
1176movd   %xmm3,%r10
1177
1178# qhasm: diag0 <<<= 96
1179# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
1180# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
1181pshufd $0x39,%xmm0,%xmm0
1182
1183# qhasm: diag1 <<<= 96
1184# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
1185# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
1186pshufd $0x39,%xmm1,%xmm1
1187
1188# qhasm: diag2 <<<= 96
1189# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
1190# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
1191pshufd $0x39,%xmm2,%xmm2
1192
1193# qhasm: diag3 <<<= 96
1194# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
1195# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
1196pshufd $0x39,%xmm3,%xmm3
1197
1198# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
1199# asm 1: xorl 40(<m=int64#2),<in10=int64#3d
1200# asm 2: xorl 40(<m=%rsi),<in10=%edx
1201xorl 40(%rsi),%edx
1202
1203# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
1204# asm 1: xorl 24(<m=int64#2),<in6=int64#4d
1205# asm 2: xorl 24(<m=%rsi),<in6=%ecx
1206xorl 24(%rsi),%ecx
1207
1208# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
1209# asm 1: xorl 8(<m=int64#2),<in2=int64#7d
1210# asm 2: xorl 8(<m=%rsi),<in2=%eax
1211xorl 8(%rsi),%eax
1212
1213# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
1214# asm 1: xorl 56(<m=int64#2),<in14=int64#8d
1215# asm 2: xorl 56(<m=%rsi),<in14=%r10d
1216xorl 56(%rsi),%r10d
1217
1218# qhasm: *(uint32 *) (out + 40) = in10
1219# asm 1: movl   <in10=int64#3d,40(<out=int64#1)
1220# asm 2: movl   <in10=%edx,40(<out=%rdi)
1221movl   %edx,40(%rdi)
1222
1223# qhasm: *(uint32 *) (out + 24) = in6
1224# asm 1: movl   <in6=int64#4d,24(<out=int64#1)
1225# asm 2: movl   <in6=%ecx,24(<out=%rdi)
1226movl   %ecx,24(%rdi)
1227
1228# qhasm: *(uint32 *) (out + 8) = in2
1229# asm 1: movl   <in2=int64#7d,8(<out=int64#1)
1230# asm 2: movl   <in2=%eax,8(<out=%rdi)
1231movl   %eax,8(%rdi)
1232
1233# qhasm: *(uint32 *) (out + 56) = in14
1234# asm 1: movl   <in14=int64#8d,56(<out=int64#1)
1235# asm 2: movl   <in14=%r10d,56(<out=%rdi)
1236movl   %r10d,56(%rdi)
1237
1238# qhasm: in15 = diag0
1239# asm 1: movd   <diag0=int6464#1,>in15=int64#3
1240# asm 2: movd   <diag0=%xmm0,>in15=%rdx
1241movd   %xmm0,%rdx
1242
1243# qhasm: in11 = diag1
1244# asm 1: movd   <diag1=int6464#2,>in11=int64#4
1245# asm 2: movd   <diag1=%xmm1,>in11=%rcx
1246movd   %xmm1,%rcx
1247
1248# qhasm: in7 = diag2
1249# asm 1: movd   <diag2=int6464#3,>in7=int64#7
1250# asm 2: movd   <diag2=%xmm2,>in7=%rax
1251movd   %xmm2,%rax
1252
1253# qhasm: in3 = diag3
1254# asm 1: movd   <diag3=int6464#4,>in3=int64#8
1255# asm 2: movd   <diag3=%xmm3,>in3=%r10
1256movd   %xmm3,%r10
1257
1258# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
1259# asm 1: xorl 60(<m=int64#2),<in15=int64#3d
1260# asm 2: xorl 60(<m=%rsi),<in15=%edx
1261xorl 60(%rsi),%edx
1262
1263# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
1264# asm 1: xorl 44(<m=int64#2),<in11=int64#4d
1265# asm 2: xorl 44(<m=%rsi),<in11=%ecx
1266xorl 44(%rsi),%ecx
1267
1268# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
1269# asm 1: xorl 28(<m=int64#2),<in7=int64#7d
1270# asm 2: xorl 28(<m=%rsi),<in7=%eax
1271xorl 28(%rsi),%eax
1272
1273# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
1274# asm 1: xorl 12(<m=int64#2),<in3=int64#8d
1275# asm 2: xorl 12(<m=%rsi),<in3=%r10d
1276xorl 12(%rsi),%r10d
1277
1278# qhasm: *(uint32 *) (out + 60) = in15
1279# asm 1: movl   <in15=int64#3d,60(<out=int64#1)
1280# asm 2: movl   <in15=%edx,60(<out=%rdi)
1281movl   %edx,60(%rdi)
1282
1283# qhasm: *(uint32 *) (out + 44) = in11
1284# asm 1: movl   <in11=int64#4d,44(<out=int64#1)
1285# asm 2: movl   <in11=%ecx,44(<out=%rdi)
1286movl   %ecx,44(%rdi)
1287
1288# qhasm: *(uint32 *) (out + 28) = in7
1289# asm 1: movl   <in7=int64#7d,28(<out=int64#1)
1290# asm 2: movl   <in7=%eax,28(<out=%rdi)
1291movl   %eax,28(%rdi)
1292
1293# qhasm: *(uint32 *) (out + 12) = in3
1294# asm 1: movl   <in3=int64#8d,12(<out=int64#1)
1295# asm 2: movl   <in3=%r10d,12(<out=%rdi)
1296movl   %r10d,12(%rdi)
1297
1298# qhasm: bytes = bytes_backup
1299# asm 1: movq <bytes_backup=stack64#1,>bytes=int64#3
1300# asm 2: movq <bytes_backup=0(%rsp),>bytes=%rdx
1301movq 0(%rsp),%rdx
1302
1303# qhasm:   in8 = *(uint32 *) (x + 32)
1304# asm 1: movl   32(<x=int64#5),>in8=int64#4d
1305# asm 2: movl   32(<x=%r8),>in8=%ecx
1306movl   32(%r8),%ecx
1307
1308# qhasm:   in9 = *(uint32 *) (x + 52)
1309# asm 1: movl   52(<x=int64#5),>in9=int64#7d
1310# asm 2: movl   52(<x=%r8),>in9=%eax
1311movl   52(%r8),%eax
1312
1313# qhasm:   in8 += 1
1314# asm 1: add  $1,<in8=int64#4
1315# asm 2: add  $1,<in8=%rcx
1316add  $1,%rcx
1317
1318# qhasm:   in9 <<= 32
1319# asm 1: shl  $32,<in9=int64#7
1320# asm 2: shl  $32,<in9=%rax
1321shl  $32,%rax
1322
1323# qhasm:   in8 += in9
1324# asm 1: add  <in9=int64#7,<in8=int64#4
1325# asm 2: add  <in9=%rax,<in8=%rcx
1326add  %rax,%rcx
1327
1328# qhasm:   *(uint32 *) (x + 32) = in8
1329# asm 1: movl   <in8=int64#4d,32(<x=int64#5)
1330# asm 2: movl   <in8=%ecx,32(<x=%r8)
1331movl   %ecx,32(%r8)
1332
1333# qhasm:   (uint64) in8 >>= 32
1334# asm 1: shr  $32,<in8=int64#4
1335# asm 2: shr  $32,<in8=%rcx
1336shr  $32,%rcx
1337
1338# qhasm:   *(uint32 *) (x + 52) = in8
1339# asm 1: movl   <in8=int64#4d,52(<x=int64#5)
1340# asm 2: movl   <in8=%ecx,52(<x=%r8)
1341movl   %ecx,52(%r8)
1342
1343# qhasm:                          unsigned>? unsigned<? bytes - 64
1344# asm 1: cmp  $64,<bytes=int64#3
1345# asm 2: cmp  $64,<bytes=%rdx
1346cmp  $64,%rdx
1347# comment:fp stack unchanged by jump
1348
1349# qhasm:   goto bytesatleast65 if unsigned>
1350ja ._bytesatleast65
1351# comment:fp stack unchanged by jump
1352
1353# qhasm:     goto bytesatleast64 if !unsigned<
1354jae ._bytesatleast64
1355
1356# qhasm:       m = out
1357# asm 1: mov  <out=int64#1,>m=int64#2
1358# asm 2: mov  <out=%rdi,>m=%rsi
1359mov  %rdi,%rsi
1360
1361# qhasm:       out = ctarget
1362# asm 1: mov  <ctarget=int64#6,>out=int64#1
1363# asm 2: mov  <ctarget=%r9,>out=%rdi
1364mov  %r9,%rdi
1365
1366# qhasm:       i = bytes
1367# asm 1: mov  <bytes=int64#3,>i=int64#4
1368# asm 2: mov  <bytes=%rdx,>i=%rcx
1369mov  %rdx,%rcx
1370
1371# qhasm:       while (i) { *out++ = *m++; --i }
1372rep movsb
1373# comment:fp stack unchanged by fallthrough
1374
1375# qhasm:     bytesatleast64:
1376._bytesatleast64:
1377# comment:fp stack unchanged by fallthrough
1378
1379# qhasm:     done:
1380._done:
1381
1382# qhasm:     leave
1383add %r11,%rsp
1384mov %rdi,%rax
1385mov %rsi,%rdx
1386ret
1387
1388# qhasm:   bytesatleast65:
1389._bytesatleast65:
1390
1391# qhasm:   bytes -= 64
1392# asm 1: sub  $64,<bytes=int64#3
1393# asm 2: sub  $64,<bytes=%rdx
1394sub  $64,%rdx
1395
1396# qhasm:   out += 64
1397# asm 1: add  $64,<out=int64#1
1398# asm 2: add  $64,<out=%rdi
1399add  $64,%rdi
1400
1401# qhasm:   m += 64
1402# asm 1: add  $64,<m=int64#2
1403# asm 2: add  $64,<m=%rsi
1404add  $64,%rsi
1405# comment:fp stack unchanged by jump
1406
1407# qhasm: goto bytesatleast1
1408jmp ._bytesatleast1
1409
1410# qhasm: enter ECRYPT_init
1411.text
1412.p2align 5
1413.globl _ECRYPT_init
1414.globl ECRYPT_init
1415_ECRYPT_init:
1416ECRYPT_init:
1417mov %rsp,%r11
1418and $31,%r11
1419add $96,%r11
1420sub %r11,%rsp
1421
1422# qhasm: leave
1423add %r11,%rsp
1424mov %rdi,%rax
1425mov %rsi,%rdx
1426ret
1427
1428# qhasm: enter ECRYPT_keysetup
1429.text
1430.p2align 5
1431.globl _ECRYPT_keysetup
1432.globl ECRYPT_keysetup
1433_ECRYPT_keysetup:
1434ECRYPT_keysetup:
1435mov %rsp,%r11
1436and $31,%r11
1437add $96,%r11
1438sub %r11,%rsp
1439
1440# qhasm:   k = arg2
1441# asm 1: mov  <arg2=int64#2,>k=int64#2
1442# asm 2: mov  <arg2=%rsi,>k=%rsi
1443mov  %rsi,%rsi
1444
1445# qhasm:   kbits = arg3
1446# asm 1: mov  <arg3=int64#3,>kbits=int64#3
1447# asm 2: mov  <arg3=%rdx,>kbits=%rdx
1448mov  %rdx,%rdx
1449
1450# qhasm:   x = arg1
1451# asm 1: mov  <arg1=int64#1,>x=int64#1
1452# asm 2: mov  <arg1=%rdi,>x=%rdi
1453mov  %rdi,%rdi
1454
1455# qhasm:   in1 = *(uint32 *) (k + 0)
1456# asm 1: movl   0(<k=int64#2),>in1=int64#5d
1457# asm 2: movl   0(<k=%rsi),>in1=%r8d
1458movl   0(%rsi),%r8d
1459
1460# qhasm:   in2 = *(uint32 *) (k + 4)
1461# asm 1: movl   4(<k=int64#2),>in2=int64#6d
1462# asm 2: movl   4(<k=%rsi),>in2=%r9d
1463movl   4(%rsi),%r9d
1464
1465# qhasm:   in3 = *(uint32 *) (k + 8)
1466# asm 1: movl   8(<k=int64#2),>in3=int64#7d
1467# asm 2: movl   8(<k=%rsi),>in3=%eax
1468movl   8(%rsi),%eax
1469
1470# qhasm:   in4 = *(uint32 *) (k + 12)
1471# asm 1: movl   12(<k=int64#2),>in4=int64#8d
1472# asm 2: movl   12(<k=%rsi),>in4=%r10d
1473movl   12(%rsi),%r10d
1474
1475# qhasm:   *(uint32 *) (x + 20) = in1
1476# asm 1: movl   <in1=int64#5d,20(<x=int64#1)
1477# asm 2: movl   <in1=%r8d,20(<x=%rdi)
1478movl   %r8d,20(%rdi)
1479
1480# qhasm:   *(uint32 *) (x + 40) = in2
1481# asm 1: movl   <in2=int64#6d,40(<x=int64#1)
1482# asm 2: movl   <in2=%r9d,40(<x=%rdi)
1483movl   %r9d,40(%rdi)
1484
1485# qhasm:   *(uint32 *) (x + 60) = in3
1486# asm 1: movl   <in3=int64#7d,60(<x=int64#1)
1487# asm 2: movl   <in3=%eax,60(<x=%rdi)
1488movl   %eax,60(%rdi)
1489
1490# qhasm:   *(uint32 *) (x + 48) = in4
1491# asm 1: movl   <in4=int64#8d,48(<x=int64#1)
1492# asm 2: movl   <in4=%r10d,48(<x=%rdi)
1493movl   %r10d,48(%rdi)
1494
1495# qhasm:                    unsigned<? kbits - 256
1496# asm 1: cmp  $256,<kbits=int64#3
1497# asm 2: cmp  $256,<kbits=%rdx
1498cmp  $256,%rdx
1499# comment:fp stack unchanged by jump
1500
1501# qhasm:   goto kbits128 if unsigned<
1502jb ._kbits128
1503
1504# qhasm:   kbits256:
1505._kbits256:
1506
1507# qhasm:     in11 = *(uint32 *) (k + 16)
1508# asm 1: movl   16(<k=int64#2),>in11=int64#3d
1509# asm 2: movl   16(<k=%rsi),>in11=%edx
1510movl   16(%rsi),%edx
1511
1512# qhasm:     in12 = *(uint32 *) (k + 20)
1513# asm 1: movl   20(<k=int64#2),>in12=int64#4d
1514# asm 2: movl   20(<k=%rsi),>in12=%ecx
1515movl   20(%rsi),%ecx
1516
1517# qhasm:     in13 = *(uint32 *) (k + 24)
1518# asm 1: movl   24(<k=int64#2),>in13=int64#5d
1519# asm 2: movl   24(<k=%rsi),>in13=%r8d
1520movl   24(%rsi),%r8d
1521
1522# qhasm:     in14 = *(uint32 *) (k + 28)
1523# asm 1: movl   28(<k=int64#2),>in14=int64#2d
1524# asm 2: movl   28(<k=%rsi),>in14=%esi
1525movl   28(%rsi),%esi
1526
1527# qhasm:     *(uint32 *) (x + 28) = in11
1528# asm 1: movl   <in11=int64#3d,28(<x=int64#1)
1529# asm 2: movl   <in11=%edx,28(<x=%rdi)
1530movl   %edx,28(%rdi)
1531
1532# qhasm:     *(uint32 *) (x + 16) = in12
1533# asm 1: movl   <in12=int64#4d,16(<x=int64#1)
1534# asm 2: movl   <in12=%ecx,16(<x=%rdi)
1535movl   %ecx,16(%rdi)
1536
1537# qhasm:     *(uint32 *) (x + 36) = in13
1538# asm 1: movl   <in13=int64#5d,36(<x=int64#1)
1539# asm 2: movl   <in13=%r8d,36(<x=%rdi)
1540movl   %r8d,36(%rdi)
1541
1542# qhasm:     *(uint32 *) (x + 56) = in14
1543# asm 1: movl   <in14=int64#2d,56(<x=int64#1)
1544# asm 2: movl   <in14=%esi,56(<x=%rdi)
1545movl   %esi,56(%rdi)
1546
1547# qhasm:     in0 = 1634760805
1548# asm 1: mov  $1634760805,>in0=int64#2
1549# asm 2: mov  $1634760805,>in0=%rsi
1550mov  $1634760805,%rsi
1551
1552# qhasm:     in5 = 857760878
1553# asm 1: mov  $857760878,>in5=int64#3
1554# asm 2: mov  $857760878,>in5=%rdx
1555mov  $857760878,%rdx
1556
1557# qhasm:     in10 = 2036477234
1558# asm 1: mov  $2036477234,>in10=int64#4
1559# asm 2: mov  $2036477234,>in10=%rcx
1560mov  $2036477234,%rcx
1561
1562# qhasm:     in15 = 1797285236
1563# asm 1: mov  $1797285236,>in15=int64#5
1564# asm 2: mov  $1797285236,>in15=%r8
1565mov  $1797285236,%r8
1566
1567# qhasm:     *(uint32 *) (x + 0) = in0
1568# asm 1: movl   <in0=int64#2d,0(<x=int64#1)
1569# asm 2: movl   <in0=%esi,0(<x=%rdi)
1570movl   %esi,0(%rdi)
1571
1572# qhasm:     *(uint32 *) (x + 4) = in5
1573# asm 1: movl   <in5=int64#3d,4(<x=int64#1)
1574# asm 2: movl   <in5=%edx,4(<x=%rdi)
1575movl   %edx,4(%rdi)
1576
1577# qhasm:     *(uint32 *) (x + 8) = in10
1578# asm 1: movl   <in10=int64#4d,8(<x=int64#1)
1579# asm 2: movl   <in10=%ecx,8(<x=%rdi)
1580movl   %ecx,8(%rdi)
1581
1582# qhasm:     *(uint32 *) (x + 12) = in15
1583# asm 1: movl   <in15=int64#5d,12(<x=int64#1)
1584# asm 2: movl   <in15=%r8d,12(<x=%rdi)
1585movl   %r8d,12(%rdi)
1586# comment:fp stack unchanged by jump
1587
1588# qhasm:   goto keysetupdone
1589jmp ._keysetupdone
1590
1591# qhasm:   kbits128:
1592._kbits128:
1593
1594# qhasm:     in11 = *(uint32 *) (k + 0)
1595# asm 1: movl   0(<k=int64#2),>in11=int64#3d
1596# asm 2: movl   0(<k=%rsi),>in11=%edx
1597movl   0(%rsi),%edx
1598
1599# qhasm:     in12 = *(uint32 *) (k + 4)
1600# asm 1: movl   4(<k=int64#2),>in12=int64#4d
1601# asm 2: movl   4(<k=%rsi),>in12=%ecx
1602movl   4(%rsi),%ecx
1603
1604# qhasm:     in13 = *(uint32 *) (k + 8)
1605# asm 1: movl   8(<k=int64#2),>in13=int64#5d
1606# asm 2: movl   8(<k=%rsi),>in13=%r8d
1607movl   8(%rsi),%r8d
1608
1609# qhasm:     in14 = *(uint32 *) (k + 12)
1610# asm 1: movl   12(<k=int64#2),>in14=int64#2d
1611# asm 2: movl   12(<k=%rsi),>in14=%esi
1612movl   12(%rsi),%esi
1613
1614# qhasm:     *(uint32 *) (x + 28) = in11
1615# asm 1: movl   <in11=int64#3d,28(<x=int64#1)
1616# asm 2: movl   <in11=%edx,28(<x=%rdi)
1617movl   %edx,28(%rdi)
1618
1619# qhasm:     *(uint32 *) (x + 16) = in12
1620# asm 1: movl   <in12=int64#4d,16(<x=int64#1)
1621# asm 2: movl   <in12=%ecx,16(<x=%rdi)
1622movl   %ecx,16(%rdi)
1623
1624# qhasm:     *(uint32 *) (x + 36) = in13
1625# asm 1: movl   <in13=int64#5d,36(<x=int64#1)
1626# asm 2: movl   <in13=%r8d,36(<x=%rdi)
1627movl   %r8d,36(%rdi)
1628
1629# qhasm:     *(uint32 *) (x + 56) = in14
1630# asm 1: movl   <in14=int64#2d,56(<x=int64#1)
1631# asm 2: movl   <in14=%esi,56(<x=%rdi)
1632movl   %esi,56(%rdi)
1633
1634# qhasm:     in0 = 1634760805
1635# asm 1: mov  $1634760805,>in0=int64#2
1636# asm 2: mov  $1634760805,>in0=%rsi
1637mov  $1634760805,%rsi
1638
1639# qhasm:     in5 = 824206446
1640# asm 1: mov  $824206446,>in5=int64#3
1641# asm 2: mov  $824206446,>in5=%rdx
1642mov  $824206446,%rdx
1643
1644# qhasm:     in10 = 2036477238
1645# asm 1: mov  $2036477238,>in10=int64#4
1646# asm 2: mov  $2036477238,>in10=%rcx
1647mov  $2036477238,%rcx
1648
1649# qhasm:     in15 = 1797285236
1650# asm 1: mov  $1797285236,>in15=int64#5
1651# asm 2: mov  $1797285236,>in15=%r8
1652mov  $1797285236,%r8
1653
1654# qhasm:     *(uint32 *) (x + 0) = in0
1655# asm 1: movl   <in0=int64#2d,0(<x=int64#1)
1656# asm 2: movl   <in0=%esi,0(<x=%rdi)
1657movl   %esi,0(%rdi)
1658
1659# qhasm:     *(uint32 *) (x + 4) = in5
1660# asm 1: movl   <in5=int64#3d,4(<x=int64#1)
1661# asm 2: movl   <in5=%edx,4(<x=%rdi)
1662movl   %edx,4(%rdi)
1663
1664# qhasm:     *(uint32 *) (x + 8) = in10
1665# asm 1: movl   <in10=int64#4d,8(<x=int64#1)
1666# asm 2: movl   <in10=%ecx,8(<x=%rdi)
1667movl   %ecx,8(%rdi)
1668
1669# qhasm:     *(uint32 *) (x + 12) = in15
1670# asm 1: movl   <in15=int64#5d,12(<x=int64#1)
1671# asm 2: movl   <in15=%r8d,12(<x=%rdi)
1672movl   %r8d,12(%rdi)
1673
1674# qhasm:   keysetupdone:
1675._keysetupdone:
1676
1677# qhasm: leave
1678add %r11,%rsp
1679mov %rdi,%rax
1680mov %rsi,%rdx
1681ret
1682
1683# qhasm: enter ECRYPT_ivsetup
1684.text
1685.p2align 5
1686.globl _ECRYPT_ivsetup
1687.globl ECRYPT_ivsetup
1688_ECRYPT_ivsetup:
1689ECRYPT_ivsetup:
1690mov %rsp,%r11
1691and $31,%r11
1692add $96,%r11
1693sub %r11,%rsp
1694
1695# qhasm:   iv = arg2
1696# asm 1: mov  <arg2=int64#2,>iv=int64#2
1697# asm 2: mov  <arg2=%rsi,>iv=%rsi
1698mov  %rsi,%rsi
1699
1700# qhasm:   x = arg1
1701# asm 1: mov  <arg1=int64#1,>x=int64#1
1702# asm 2: mov  <arg1=%rdi,>x=%rdi
1703mov  %rdi,%rdi
1704
1705# qhasm:   in6 = *(uint32 *) (iv + 0)
1706# asm 1: movl   0(<iv=int64#2),>in6=int64#5d
1707# asm 2: movl   0(<iv=%rsi),>in6=%r8d
1708movl   0(%rsi),%r8d
1709
1710# qhasm:   in7 = *(uint32 *) (iv + 4)
1711# asm 1: movl   4(<iv=int64#2),>in7=int64#2d
1712# asm 2: movl   4(<iv=%rsi),>in7=%esi
1713movl   4(%rsi),%esi
1714
1715# qhasm:   in8 = 0
1716# asm 1: mov  $0,>in8=int64#6
1717# asm 2: mov  $0,>in8=%r9
1718mov  $0,%r9
1719
1720# qhasm:   in9 = 0
1721# asm 1: mov  $0,>in9=int64#7
1722# asm 2: mov  $0,>in9=%rax
1723mov  $0,%rax
1724
1725# qhasm:   *(uint32 *) (x + 24) = in6
1726# asm 1: movl   <in6=int64#5d,24(<x=int64#1)
1727# asm 2: movl   <in6=%r8d,24(<x=%rdi)
1728movl   %r8d,24(%rdi)
1729
1730# qhasm:   *(uint32 *) (x + 44) = in7
1731# asm 1: movl   <in7=int64#2d,44(<x=int64#1)
1732# asm 2: movl   <in7=%esi,44(<x=%rdi)
1733movl   %esi,44(%rdi)
1734
1735# qhasm:   *(uint32 *) (x + 32) = in8
1736# asm 1: movl   <in8=int64#6d,32(<x=int64#1)
1737# asm 2: movl   <in8=%r9d,32(<x=%rdi)
1738movl   %r9d,32(%rdi)
1739
1740# qhasm:   *(uint32 *) (x + 52) = in9
1741# asm 1: movl   <in9=int64#7d,52(<x=int64#1)
1742# asm 2: movl   <in9=%eax,52(<x=%rdi)
1743movl   %eax,52(%rdi)
1744
1745# qhasm: leave
1746add %r11,%rsp
1747mov %rdi,%rax
1748mov %rsi,%rdx
1749ret
1750