1
2# qhasm: int64 tp
3
4# qhasm: int64 pos
5
6# qhasm: int64 b
7
8# qhasm: int64 basep
9
10# qhasm: input tp
11
12# qhasm: input pos
13
14# qhasm: input b
15
16# qhasm: input basep
17
18# qhasm: int64 mask
19
20# qhasm: int64 u
21
22# qhasm: int64 tysubx0
23
24# qhasm: int64 tysubx1
25
26# qhasm: int64 tysubx2
27
28# qhasm: int64 tysubx3
29
30# qhasm: int64 txaddy0
31
32# qhasm: int64 txaddy1
33
34# qhasm: int64 txaddy2
35
36# qhasm: int64 txaddy3
37
38# qhasm: int64 tt2d0
39
40# qhasm: int64 tt2d1
41
42# qhasm: int64 tt2d2
43
44# qhasm: int64 tt2d3
45
46# qhasm: int64 tt0
47
48# qhasm: int64 tt1
49
50# qhasm: int64 tt2
51
52# qhasm: int64 tt3
53
54# qhasm: int64 subt0
55
56# qhasm: int64 subt1
57
58# qhasm: int64 t
59
60# qhasm: stack64 tp_stack
61
62# qhasm:   int64 caller1
63
64# qhasm:   int64 caller2
65
66# qhasm:   int64 caller3
67
68# qhasm:   int64 caller4
69
70# qhasm:   int64 caller5
71
72# qhasm:   int64 caller6
73
74# qhasm:   int64 caller7
75
76# qhasm:   caller caller1
77
78# qhasm:   caller caller2
79
80# qhasm:   caller caller3
81
82# qhasm:   caller caller4
83
84# qhasm:   caller caller5
85
86# qhasm:   caller caller6
87
88# qhasm:   caller caller7
89
90# qhasm:   stack64 caller1_stack
91
92# qhasm:   stack64 caller2_stack
93
94# qhasm:   stack64 caller3_stack
95
96# qhasm:   stack64 caller4_stack
97
98# qhasm:   stack64 caller5_stack
99
100# qhasm:   stack64 caller6_stack
101
102# qhasm:   stack64 caller7_stack
103
104# qhasm: enter crypto_sign_ed25519_amd64_64_choose_t
105.text
106.p2align 5
107.globl _crypto_sign_ed25519_amd64_64_choose_t
108.globl crypto_sign_ed25519_amd64_64_choose_t
109_crypto_sign_ed25519_amd64_64_choose_t:
110crypto_sign_ed25519_amd64_64_choose_t:
111mov %rsp,%r11
112and $31,%r11
113add $64,%r11
114sub %r11,%rsp
115
116# qhasm:   caller1_stack = caller1
117# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
118# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
119movq %r11,0(%rsp)
120
121# qhasm:   caller2_stack = caller2
122# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
123# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
124movq %r12,8(%rsp)
125
126# qhasm:   caller3_stack = caller3
127# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
128# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
129movq %r13,16(%rsp)
130
131# qhasm:   caller4_stack = caller4
132# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
133# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
134movq %r14,24(%rsp)
135
136# qhasm:   caller5_stack = caller5
137# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
138# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
139movq %r15,32(%rsp)
140
141# qhasm:   caller6_stack = caller6
142# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
143# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
144movq %rbx,40(%rsp)
145
146# qhasm:   caller7_stack = caller7
147# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
148# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
149movq %rbp,48(%rsp)
150
151# qhasm: tp_stack = tp
152# asm 1: movq <tp=int64#1,>tp_stack=stack64#8
153# asm 2: movq <tp=%rdi,>tp_stack=56(%rsp)
154movq %rdi,56(%rsp)
155
156# qhasm: pos *= 768
157# asm 1: imulq  $768,<pos=int64#2,>pos=int64#1
158# asm 2: imulq  $768,<pos=%rsi,>pos=%rdi
159imulq  $768,%rsi,%rdi
160
161# qhasm: mask = b
162# asm 1: mov  <b=int64#3,>mask=int64#2
163# asm 2: mov  <b=%rdx,>mask=%rsi
164mov  %rdx,%rsi
165
166# qhasm: (int64) mask >>= 7
167# asm 1: sar  $7,<mask=int64#2
168# asm 2: sar  $7,<mask=%rsi
169sar  $7,%rsi
170
171# qhasm: u = b
172# asm 1: mov  <b=int64#3,>u=int64#5
173# asm 2: mov  <b=%rdx,>u=%r8
174mov  %rdx,%r8
175
176# qhasm: u += mask
177# asm 1: add  <mask=int64#2,<u=int64#5
178# asm 2: add  <mask=%rsi,<u=%r8
179add  %rsi,%r8
180
181# qhasm: u ^= mask
182# asm 1: xor  <mask=int64#2,<u=int64#5
183# asm 2: xor  <mask=%rsi,<u=%r8
184xor  %rsi,%r8
185
186# qhasm: tysubx0 = 1
187# asm 1: mov  $1,>tysubx0=int64#2
188# asm 2: mov  $1,>tysubx0=%rsi
189mov  $1,%rsi
190
191# qhasm: tysubx1 = 0
192# asm 1: mov  $0,>tysubx1=int64#6
193# asm 2: mov  $0,>tysubx1=%r9
194mov  $0,%r9
195
196# qhasm: tysubx2 = 0
197# asm 1: mov  $0,>tysubx2=int64#7
198# asm 2: mov  $0,>tysubx2=%rax
199mov  $0,%rax
200
201# qhasm: tysubx3 = 0
202# asm 1: mov  $0,>tysubx3=int64#8
203# asm 2: mov  $0,>tysubx3=%r10
204mov  $0,%r10
205
206# qhasm: txaddy0 = 1
207# asm 1: mov  $1,>txaddy0=int64#9
208# asm 2: mov  $1,>txaddy0=%r11
209mov  $1,%r11
210
211# qhasm: txaddy1 = 0
212# asm 1: mov  $0,>txaddy1=int64#10
213# asm 2: mov  $0,>txaddy1=%r12
214mov  $0,%r12
215
216# qhasm: txaddy2 = 0
217# asm 1: mov  $0,>txaddy2=int64#11
218# asm 2: mov  $0,>txaddy2=%r13
219mov  $0,%r13
220
221# qhasm: txaddy3 = 0
222# asm 1: mov  $0,>txaddy3=int64#12
223# asm 2: mov  $0,>txaddy3=%r14
224mov  $0,%r14
225
226# qhasm: =? u - 1
227# asm 1: cmp  $1,<u=int64#5
228# asm 2: cmp  $1,<u=%r8
229cmp  $1,%r8
230
231# qhasm: t = *(uint64 *)(basep + 0 + pos)
232# asm 1: movq   0(<basep=int64#4,<pos=int64#1),>t=int64#13
233# asm 2: movq   0(<basep=%rcx,<pos=%rdi),>t=%r15
234movq   0(%rcx,%rdi),%r15
235
236# qhasm: tysubx0 = t if =
237# asm 1: cmove <t=int64#13,<tysubx0=int64#2
238# asm 2: cmove <t=%r15,<tysubx0=%rsi
239cmove %r15,%rsi
240
241# qhasm: t = *(uint64 *)(basep + 8 + pos)
242# asm 1: movq   8(<basep=int64#4,<pos=int64#1),>t=int64#13
243# asm 2: movq   8(<basep=%rcx,<pos=%rdi),>t=%r15
244movq   8(%rcx,%rdi),%r15
245
246# qhasm: tysubx1 = t if =
247# asm 1: cmove <t=int64#13,<tysubx1=int64#6
248# asm 2: cmove <t=%r15,<tysubx1=%r9
249cmove %r15,%r9
250
251# qhasm: t = *(uint64 *)(basep + 16 + pos)
252# asm 1: movq   16(<basep=int64#4,<pos=int64#1),>t=int64#13
253# asm 2: movq   16(<basep=%rcx,<pos=%rdi),>t=%r15
254movq   16(%rcx,%rdi),%r15
255
256# qhasm: tysubx2 = t if =
257# asm 1: cmove <t=int64#13,<tysubx2=int64#7
258# asm 2: cmove <t=%r15,<tysubx2=%rax
259cmove %r15,%rax
260
261# qhasm: t = *(uint64 *)(basep + 24 + pos)
262# asm 1: movq   24(<basep=int64#4,<pos=int64#1),>t=int64#13
263# asm 2: movq   24(<basep=%rcx,<pos=%rdi),>t=%r15
264movq   24(%rcx,%rdi),%r15
265
266# qhasm: tysubx3 = t if =
267# asm 1: cmove <t=int64#13,<tysubx3=int64#8
268# asm 2: cmove <t=%r15,<tysubx3=%r10
269cmove %r15,%r10
270
271# qhasm: t = *(uint64 *)(basep + 32 + pos)
272# asm 1: movq   32(<basep=int64#4,<pos=int64#1),>t=int64#13
273# asm 2: movq   32(<basep=%rcx,<pos=%rdi),>t=%r15
274movq   32(%rcx,%rdi),%r15
275
276# qhasm: txaddy0 = t if =
277# asm 1: cmove <t=int64#13,<txaddy0=int64#9
278# asm 2: cmove <t=%r15,<txaddy0=%r11
279cmove %r15,%r11
280
281# qhasm: t = *(uint64 *)(basep + 40 + pos)
282# asm 1: movq   40(<basep=int64#4,<pos=int64#1),>t=int64#13
283# asm 2: movq   40(<basep=%rcx,<pos=%rdi),>t=%r15
284movq   40(%rcx,%rdi),%r15
285
286# qhasm: txaddy1 = t if =
287# asm 1: cmove <t=int64#13,<txaddy1=int64#10
288# asm 2: cmove <t=%r15,<txaddy1=%r12
289cmove %r15,%r12
290
291# qhasm: t = *(uint64 *)(basep + 48 + pos)
292# asm 1: movq   48(<basep=int64#4,<pos=int64#1),>t=int64#13
293# asm 2: movq   48(<basep=%rcx,<pos=%rdi),>t=%r15
294movq   48(%rcx,%rdi),%r15
295
296# qhasm: txaddy2 = t if =
297# asm 1: cmove <t=int64#13,<txaddy2=int64#11
298# asm 2: cmove <t=%r15,<txaddy2=%r13
299cmove %r15,%r13
300
301# qhasm: t = *(uint64 *)(basep + 56 + pos)
302# asm 1: movq   56(<basep=int64#4,<pos=int64#1),>t=int64#13
303# asm 2: movq   56(<basep=%rcx,<pos=%rdi),>t=%r15
304movq   56(%rcx,%rdi),%r15
305
306# qhasm: txaddy3 = t if =
307# asm 1: cmove <t=int64#13,<txaddy3=int64#12
308# asm 2: cmove <t=%r15,<txaddy3=%r14
309cmove %r15,%r14
310
311# qhasm: =? u - 2
312# asm 1: cmp  $2,<u=int64#5
313# asm 2: cmp  $2,<u=%r8
314cmp  $2,%r8
315
316# qhasm: t = *(uint64 *)(basep + 96 + pos)
317# asm 1: movq   96(<basep=int64#4,<pos=int64#1),>t=int64#13
318# asm 2: movq   96(<basep=%rcx,<pos=%rdi),>t=%r15
319movq   96(%rcx,%rdi),%r15
320
321# qhasm: tysubx0 = t if =
322# asm 1: cmove <t=int64#13,<tysubx0=int64#2
323# asm 2: cmove <t=%r15,<tysubx0=%rsi
324cmove %r15,%rsi
325
326# qhasm: t = *(uint64 *)(basep + 104 + pos)
327# asm 1: movq   104(<basep=int64#4,<pos=int64#1),>t=int64#13
328# asm 2: movq   104(<basep=%rcx,<pos=%rdi),>t=%r15
329movq   104(%rcx,%rdi),%r15
330
331# qhasm: tysubx1 = t if =
332# asm 1: cmove <t=int64#13,<tysubx1=int64#6
333# asm 2: cmove <t=%r15,<tysubx1=%r9
334cmove %r15,%r9
335
336# qhasm: t = *(uint64 *)(basep + 112 + pos)
337# asm 1: movq   112(<basep=int64#4,<pos=int64#1),>t=int64#13
338# asm 2: movq   112(<basep=%rcx,<pos=%rdi),>t=%r15
339movq   112(%rcx,%rdi),%r15
340
341# qhasm: tysubx2 = t if =
342# asm 1: cmove <t=int64#13,<tysubx2=int64#7
343# asm 2: cmove <t=%r15,<tysubx2=%rax
344cmove %r15,%rax
345
346# qhasm: t = *(uint64 *)(basep + 120 + pos)
347# asm 1: movq   120(<basep=int64#4,<pos=int64#1),>t=int64#13
348# asm 2: movq   120(<basep=%rcx,<pos=%rdi),>t=%r15
349movq   120(%rcx,%rdi),%r15
350
351# qhasm: tysubx3 = t if =
352# asm 1: cmove <t=int64#13,<tysubx3=int64#8
353# asm 2: cmove <t=%r15,<tysubx3=%r10
354cmove %r15,%r10
355
356# qhasm: t = *(uint64 *)(basep + 128 + pos)
357# asm 1: movq   128(<basep=int64#4,<pos=int64#1),>t=int64#13
358# asm 2: movq   128(<basep=%rcx,<pos=%rdi),>t=%r15
359movq   128(%rcx,%rdi),%r15
360
361# qhasm: txaddy0 = t if =
362# asm 1: cmove <t=int64#13,<txaddy0=int64#9
363# asm 2: cmove <t=%r15,<txaddy0=%r11
364cmove %r15,%r11
365
366# qhasm: t = *(uint64 *)(basep + 136 + pos)
367# asm 1: movq   136(<basep=int64#4,<pos=int64#1),>t=int64#13
368# asm 2: movq   136(<basep=%rcx,<pos=%rdi),>t=%r15
369movq   136(%rcx,%rdi),%r15
370
371# qhasm: txaddy1 = t if =
372# asm 1: cmove <t=int64#13,<txaddy1=int64#10
373# asm 2: cmove <t=%r15,<txaddy1=%r12
374cmove %r15,%r12
375
376# qhasm: t = *(uint64 *)(basep + 144 + pos)
377# asm 1: movq   144(<basep=int64#4,<pos=int64#1),>t=int64#13
378# asm 2: movq   144(<basep=%rcx,<pos=%rdi),>t=%r15
379movq   144(%rcx,%rdi),%r15
380
381# qhasm: txaddy2 = t if =
382# asm 1: cmove <t=int64#13,<txaddy2=int64#11
383# asm 2: cmove <t=%r15,<txaddy2=%r13
384cmove %r15,%r13
385
386# qhasm: t = *(uint64 *)(basep + 152 + pos)
387# asm 1: movq   152(<basep=int64#4,<pos=int64#1),>t=int64#13
388# asm 2: movq   152(<basep=%rcx,<pos=%rdi),>t=%r15
389movq   152(%rcx,%rdi),%r15
390
391# qhasm: txaddy3 = t if =
392# asm 1: cmove <t=int64#13,<txaddy3=int64#12
393# asm 2: cmove <t=%r15,<txaddy3=%r14
394cmove %r15,%r14
395
396# qhasm: =? u - 3
397# asm 1: cmp  $3,<u=int64#5
398# asm 2: cmp  $3,<u=%r8
399cmp  $3,%r8
400
401# qhasm: t = *(uint64 *)(basep + 192 + pos)
402# asm 1: movq   192(<basep=int64#4,<pos=int64#1),>t=int64#13
403# asm 2: movq   192(<basep=%rcx,<pos=%rdi),>t=%r15
404movq   192(%rcx,%rdi),%r15
405
406# qhasm: tysubx0 = t if =
407# asm 1: cmove <t=int64#13,<tysubx0=int64#2
408# asm 2: cmove <t=%r15,<tysubx0=%rsi
409cmove %r15,%rsi
410
411# qhasm: t = *(uint64 *)(basep + 200 + pos)
412# asm 1: movq   200(<basep=int64#4,<pos=int64#1),>t=int64#13
413# asm 2: movq   200(<basep=%rcx,<pos=%rdi),>t=%r15
414movq   200(%rcx,%rdi),%r15
415
416# qhasm: tysubx1 = t if =
417# asm 1: cmove <t=int64#13,<tysubx1=int64#6
418# asm 2: cmove <t=%r15,<tysubx1=%r9
419cmove %r15,%r9
420
421# qhasm: t = *(uint64 *)(basep + 208 + pos)
422# asm 1: movq   208(<basep=int64#4,<pos=int64#1),>t=int64#13
423# asm 2: movq   208(<basep=%rcx,<pos=%rdi),>t=%r15
424movq   208(%rcx,%rdi),%r15
425
426# qhasm: tysubx2 = t if =
427# asm 1: cmove <t=int64#13,<tysubx2=int64#7
428# asm 2: cmove <t=%r15,<tysubx2=%rax
429cmove %r15,%rax
430
431# qhasm: t = *(uint64 *)(basep + 216 + pos)
432# asm 1: movq   216(<basep=int64#4,<pos=int64#1),>t=int64#13
433# asm 2: movq   216(<basep=%rcx,<pos=%rdi),>t=%r15
434movq   216(%rcx,%rdi),%r15
435
436# qhasm: tysubx3 = t if =
437# asm 1: cmove <t=int64#13,<tysubx3=int64#8
438# asm 2: cmove <t=%r15,<tysubx3=%r10
439cmove %r15,%r10
440
441# qhasm: t = *(uint64 *)(basep + 224 + pos)
442# asm 1: movq   224(<basep=int64#4,<pos=int64#1),>t=int64#13
443# asm 2: movq   224(<basep=%rcx,<pos=%rdi),>t=%r15
444movq   224(%rcx,%rdi),%r15
445
446# qhasm: txaddy0 = t if =
447# asm 1: cmove <t=int64#13,<txaddy0=int64#9
448# asm 2: cmove <t=%r15,<txaddy0=%r11
449cmove %r15,%r11
450
451# qhasm: t = *(uint64 *)(basep + 232 + pos)
452# asm 1: movq   232(<basep=int64#4,<pos=int64#1),>t=int64#13
453# asm 2: movq   232(<basep=%rcx,<pos=%rdi),>t=%r15
454movq   232(%rcx,%rdi),%r15
455
456# qhasm: txaddy1 = t if =
457# asm 1: cmove <t=int64#13,<txaddy1=int64#10
458# asm 2: cmove <t=%r15,<txaddy1=%r12
459cmove %r15,%r12
460
461# qhasm: t = *(uint64 *)(basep + 240 + pos)
462# asm 1: movq   240(<basep=int64#4,<pos=int64#1),>t=int64#13
463# asm 2: movq   240(<basep=%rcx,<pos=%rdi),>t=%r15
464movq   240(%rcx,%rdi),%r15
465
466# qhasm: txaddy2 = t if =
467# asm 1: cmove <t=int64#13,<txaddy2=int64#11
468# asm 2: cmove <t=%r15,<txaddy2=%r13
469cmove %r15,%r13
470
471# qhasm: t = *(uint64 *)(basep + 248 + pos)
472# asm 1: movq   248(<basep=int64#4,<pos=int64#1),>t=int64#13
473# asm 2: movq   248(<basep=%rcx,<pos=%rdi),>t=%r15
474movq   248(%rcx,%rdi),%r15
475
476# qhasm: txaddy3 = t if =
477# asm 1: cmove <t=int64#13,<txaddy3=int64#12
478# asm 2: cmove <t=%r15,<txaddy3=%r14
479cmove %r15,%r14
480
481# qhasm: =? u - 4
482# asm 1: cmp  $4,<u=int64#5
483# asm 2: cmp  $4,<u=%r8
484cmp  $4,%r8
485
486# qhasm: t = *(uint64 *)(basep + 288 + pos)
487# asm 1: movq   288(<basep=int64#4,<pos=int64#1),>t=int64#13
488# asm 2: movq   288(<basep=%rcx,<pos=%rdi),>t=%r15
489movq   288(%rcx,%rdi),%r15
490
491# qhasm: tysubx0 = t if =
492# asm 1: cmove <t=int64#13,<tysubx0=int64#2
493# asm 2: cmove <t=%r15,<tysubx0=%rsi
494cmove %r15,%rsi
495
496# qhasm: t = *(uint64 *)(basep + 296 + pos)
497# asm 1: movq   296(<basep=int64#4,<pos=int64#1),>t=int64#13
498# asm 2: movq   296(<basep=%rcx,<pos=%rdi),>t=%r15
499movq   296(%rcx,%rdi),%r15
500
501# qhasm: tysubx1 = t if =
502# asm 1: cmove <t=int64#13,<tysubx1=int64#6
503# asm 2: cmove <t=%r15,<tysubx1=%r9
504cmove %r15,%r9
505
506# qhasm: t = *(uint64 *)(basep + 304 + pos)
507# asm 1: movq   304(<basep=int64#4,<pos=int64#1),>t=int64#13
508# asm 2: movq   304(<basep=%rcx,<pos=%rdi),>t=%r15
509movq   304(%rcx,%rdi),%r15
510
511# qhasm: tysubx2 = t if =
512# asm 1: cmove <t=int64#13,<tysubx2=int64#7
513# asm 2: cmove <t=%r15,<tysubx2=%rax
514cmove %r15,%rax
515
516# qhasm: t = *(uint64 *)(basep + 312 + pos)
517# asm 1: movq   312(<basep=int64#4,<pos=int64#1),>t=int64#13
518# asm 2: movq   312(<basep=%rcx,<pos=%rdi),>t=%r15
519movq   312(%rcx,%rdi),%r15
520
521# qhasm: tysubx3 = t if =
522# asm 1: cmove <t=int64#13,<tysubx3=int64#8
523# asm 2: cmove <t=%r15,<tysubx3=%r10
524cmove %r15,%r10
525
526# qhasm: t = *(uint64 *)(basep + 320 + pos)
527# asm 1: movq   320(<basep=int64#4,<pos=int64#1),>t=int64#13
528# asm 2: movq   320(<basep=%rcx,<pos=%rdi),>t=%r15
529movq   320(%rcx,%rdi),%r15
530
531# qhasm: txaddy0 = t if =
532# asm 1: cmove <t=int64#13,<txaddy0=int64#9
533# asm 2: cmove <t=%r15,<txaddy0=%r11
534cmove %r15,%r11
535
536# qhasm: t = *(uint64 *)(basep + 328 + pos)
537# asm 1: movq   328(<basep=int64#4,<pos=int64#1),>t=int64#13
538# asm 2: movq   328(<basep=%rcx,<pos=%rdi),>t=%r15
539movq   328(%rcx,%rdi),%r15
540
541# qhasm: txaddy1 = t if =
542# asm 1: cmove <t=int64#13,<txaddy1=int64#10
543# asm 2: cmove <t=%r15,<txaddy1=%r12
544cmove %r15,%r12
545
546# qhasm: t = *(uint64 *)(basep + 336 + pos)
547# asm 1: movq   336(<basep=int64#4,<pos=int64#1),>t=int64#13
548# asm 2: movq   336(<basep=%rcx,<pos=%rdi),>t=%r15
549movq   336(%rcx,%rdi),%r15
550
551# qhasm: txaddy2 = t if =
552# asm 1: cmove <t=int64#13,<txaddy2=int64#11
553# asm 2: cmove <t=%r15,<txaddy2=%r13
554cmove %r15,%r13
555
556# qhasm: t = *(uint64 *)(basep + 344 + pos)
557# asm 1: movq   344(<basep=int64#4,<pos=int64#1),>t=int64#13
558# asm 2: movq   344(<basep=%rcx,<pos=%rdi),>t=%r15
559movq   344(%rcx,%rdi),%r15
560
561# qhasm: txaddy3 = t if =
562# asm 1: cmove <t=int64#13,<txaddy3=int64#12
563# asm 2: cmove <t=%r15,<txaddy3=%r14
564cmove %r15,%r14
565
566# qhasm: =? u - 5
567# asm 1: cmp  $5,<u=int64#5
568# asm 2: cmp  $5,<u=%r8
569cmp  $5,%r8
570
571# qhasm: t = *(uint64 *)(basep + 384 + pos)
572# asm 1: movq   384(<basep=int64#4,<pos=int64#1),>t=int64#13
573# asm 2: movq   384(<basep=%rcx,<pos=%rdi),>t=%r15
574movq   384(%rcx,%rdi),%r15
575
576# qhasm: tysubx0 = t if =
577# asm 1: cmove <t=int64#13,<tysubx0=int64#2
578# asm 2: cmove <t=%r15,<tysubx0=%rsi
579cmove %r15,%rsi
580
581# qhasm: t = *(uint64 *)(basep + 392 + pos)
582# asm 1: movq   392(<basep=int64#4,<pos=int64#1),>t=int64#13
583# asm 2: movq   392(<basep=%rcx,<pos=%rdi),>t=%r15
584movq   392(%rcx,%rdi),%r15
585
586# qhasm: tysubx1 = t if =
587# asm 1: cmove <t=int64#13,<tysubx1=int64#6
588# asm 2: cmove <t=%r15,<tysubx1=%r9
589cmove %r15,%r9
590
591# qhasm: t = *(uint64 *)(basep + 400 + pos)
592# asm 1: movq   400(<basep=int64#4,<pos=int64#1),>t=int64#13
593# asm 2: movq   400(<basep=%rcx,<pos=%rdi),>t=%r15
594movq   400(%rcx,%rdi),%r15
595
596# qhasm: tysubx2 = t if =
597# asm 1: cmove <t=int64#13,<tysubx2=int64#7
598# asm 2: cmove <t=%r15,<tysubx2=%rax
599cmove %r15,%rax
600
601# qhasm: t = *(uint64 *)(basep + 408 + pos)
602# asm 1: movq   408(<basep=int64#4,<pos=int64#1),>t=int64#13
603# asm 2: movq   408(<basep=%rcx,<pos=%rdi),>t=%r15
604movq   408(%rcx,%rdi),%r15
605
606# qhasm: tysubx3 = t if =
607# asm 1: cmove <t=int64#13,<tysubx3=int64#8
608# asm 2: cmove <t=%r15,<tysubx3=%r10
609cmove %r15,%r10
610
611# qhasm: t = *(uint64 *)(basep + 416 + pos)
612# asm 1: movq   416(<basep=int64#4,<pos=int64#1),>t=int64#13
613# asm 2: movq   416(<basep=%rcx,<pos=%rdi),>t=%r15
614movq   416(%rcx,%rdi),%r15
615
616# qhasm: txaddy0 = t if =
617# asm 1: cmove <t=int64#13,<txaddy0=int64#9
618# asm 2: cmove <t=%r15,<txaddy0=%r11
619cmove %r15,%r11
620
621# qhasm: t = *(uint64 *)(basep + 424 + pos)
622# asm 1: movq   424(<basep=int64#4,<pos=int64#1),>t=int64#13
623# asm 2: movq   424(<basep=%rcx,<pos=%rdi),>t=%r15
624movq   424(%rcx,%rdi),%r15
625
626# qhasm: txaddy1 = t if =
627# asm 1: cmove <t=int64#13,<txaddy1=int64#10
628# asm 2: cmove <t=%r15,<txaddy1=%r12
629cmove %r15,%r12
630
631# qhasm: t = *(uint64 *)(basep + 432 + pos)
632# asm 1: movq   432(<basep=int64#4,<pos=int64#1),>t=int64#13
633# asm 2: movq   432(<basep=%rcx,<pos=%rdi),>t=%r15
634movq   432(%rcx,%rdi),%r15
635
636# qhasm: txaddy2 = t if =
637# asm 1: cmove <t=int64#13,<txaddy2=int64#11
638# asm 2: cmove <t=%r15,<txaddy2=%r13
639cmove %r15,%r13
640
641# qhasm: t = *(uint64 *)(basep + 440 + pos)
642# asm 1: movq   440(<basep=int64#4,<pos=int64#1),>t=int64#13
643# asm 2: movq   440(<basep=%rcx,<pos=%rdi),>t=%r15
644movq   440(%rcx,%rdi),%r15
645
646# qhasm: txaddy3 = t if =
647# asm 1: cmove <t=int64#13,<txaddy3=int64#12
648# asm 2: cmove <t=%r15,<txaddy3=%r14
649cmove %r15,%r14
650
651# qhasm: =? u - 6
652# asm 1: cmp  $6,<u=int64#5
653# asm 2: cmp  $6,<u=%r8
654cmp  $6,%r8
655
656# qhasm: t = *(uint64 *)(basep + 480 + pos)
657# asm 1: movq   480(<basep=int64#4,<pos=int64#1),>t=int64#13
658# asm 2: movq   480(<basep=%rcx,<pos=%rdi),>t=%r15
659movq   480(%rcx,%rdi),%r15
660
661# qhasm: tysubx0 = t if =
662# asm 1: cmove <t=int64#13,<tysubx0=int64#2
663# asm 2: cmove <t=%r15,<tysubx0=%rsi
664cmove %r15,%rsi
665
666# qhasm: t = *(uint64 *)(basep + 488 + pos)
667# asm 1: movq   488(<basep=int64#4,<pos=int64#1),>t=int64#13
668# asm 2: movq   488(<basep=%rcx,<pos=%rdi),>t=%r15
669movq   488(%rcx,%rdi),%r15
670
671# qhasm: tysubx1 = t if =
672# asm 1: cmove <t=int64#13,<tysubx1=int64#6
673# asm 2: cmove <t=%r15,<tysubx1=%r9
674cmove %r15,%r9
675
676# qhasm: t = *(uint64 *)(basep + 496 + pos)
677# asm 1: movq   496(<basep=int64#4,<pos=int64#1),>t=int64#13
678# asm 2: movq   496(<basep=%rcx,<pos=%rdi),>t=%r15
679movq   496(%rcx,%rdi),%r15
680
681# qhasm: tysubx2 = t if =
682# asm 1: cmove <t=int64#13,<tysubx2=int64#7
683# asm 2: cmove <t=%r15,<tysubx2=%rax
684cmove %r15,%rax
685
686# qhasm: t = *(uint64 *)(basep + 504 + pos)
687# asm 1: movq   504(<basep=int64#4,<pos=int64#1),>t=int64#13
688# asm 2: movq   504(<basep=%rcx,<pos=%rdi),>t=%r15
689movq   504(%rcx,%rdi),%r15
690
691# qhasm: tysubx3 = t if =
692# asm 1: cmove <t=int64#13,<tysubx3=int64#8
693# asm 2: cmove <t=%r15,<tysubx3=%r10
694cmove %r15,%r10
695
696# qhasm: t = *(uint64 *)(basep + 512 + pos)
697# asm 1: movq   512(<basep=int64#4,<pos=int64#1),>t=int64#13
698# asm 2: movq   512(<basep=%rcx,<pos=%rdi),>t=%r15
699movq   512(%rcx,%rdi),%r15
700
701# qhasm: txaddy0 = t if =
702# asm 1: cmove <t=int64#13,<txaddy0=int64#9
703# asm 2: cmove <t=%r15,<txaddy0=%r11
704cmove %r15,%r11
705
706# qhasm: t = *(uint64 *)(basep + 520 + pos)
707# asm 1: movq   520(<basep=int64#4,<pos=int64#1),>t=int64#13
708# asm 2: movq   520(<basep=%rcx,<pos=%rdi),>t=%r15
709movq   520(%rcx,%rdi),%r15
710
711# qhasm: txaddy1 = t if =
712# asm 1: cmove <t=int64#13,<txaddy1=int64#10
713# asm 2: cmove <t=%r15,<txaddy1=%r12
714cmove %r15,%r12
715
716# qhasm: t = *(uint64 *)(basep + 528 + pos)
717# asm 1: movq   528(<basep=int64#4,<pos=int64#1),>t=int64#13
718# asm 2: movq   528(<basep=%rcx,<pos=%rdi),>t=%r15
719movq   528(%rcx,%rdi),%r15
720
721# qhasm: txaddy2 = t if =
722# asm 1: cmove <t=int64#13,<txaddy2=int64#11
723# asm 2: cmove <t=%r15,<txaddy2=%r13
724cmove %r15,%r13
725
726# qhasm: t = *(uint64 *)(basep + 536 + pos)
727# asm 1: movq   536(<basep=int64#4,<pos=int64#1),>t=int64#13
728# asm 2: movq   536(<basep=%rcx,<pos=%rdi),>t=%r15
729movq   536(%rcx,%rdi),%r15
730
731# qhasm: txaddy3 = t if =
732# asm 1: cmove <t=int64#13,<txaddy3=int64#12
733# asm 2: cmove <t=%r15,<txaddy3=%r14
734cmove %r15,%r14
735
736# qhasm: =? u - 7
737# asm 1: cmp  $7,<u=int64#5
738# asm 2: cmp  $7,<u=%r8
739cmp  $7,%r8
740
741# qhasm: t = *(uint64 *)(basep + 576 + pos)
742# asm 1: movq   576(<basep=int64#4,<pos=int64#1),>t=int64#13
743# asm 2: movq   576(<basep=%rcx,<pos=%rdi),>t=%r15
744movq   576(%rcx,%rdi),%r15
745
746# qhasm: tysubx0 = t if =
747# asm 1: cmove <t=int64#13,<tysubx0=int64#2
748# asm 2: cmove <t=%r15,<tysubx0=%rsi
749cmove %r15,%rsi
750
751# qhasm: t = *(uint64 *)(basep + 584 + pos)
752# asm 1: movq   584(<basep=int64#4,<pos=int64#1),>t=int64#13
753# asm 2: movq   584(<basep=%rcx,<pos=%rdi),>t=%r15
754movq   584(%rcx,%rdi),%r15
755
756# qhasm: tysubx1 = t if =
757# asm 1: cmove <t=int64#13,<tysubx1=int64#6
758# asm 2: cmove <t=%r15,<tysubx1=%r9
759cmove %r15,%r9
760
761# qhasm: t = *(uint64 *)(basep + 592 + pos)
762# asm 1: movq   592(<basep=int64#4,<pos=int64#1),>t=int64#13
763# asm 2: movq   592(<basep=%rcx,<pos=%rdi),>t=%r15
764movq   592(%rcx,%rdi),%r15
765
766# qhasm: tysubx2 = t if =
767# asm 1: cmove <t=int64#13,<tysubx2=int64#7
768# asm 2: cmove <t=%r15,<tysubx2=%rax
769cmove %r15,%rax
770
771# qhasm: t = *(uint64 *)(basep + 600 + pos)
772# asm 1: movq   600(<basep=int64#4,<pos=int64#1),>t=int64#13
773# asm 2: movq   600(<basep=%rcx,<pos=%rdi),>t=%r15
774movq   600(%rcx,%rdi),%r15
775
776# qhasm: tysubx3 = t if =
777# asm 1: cmove <t=int64#13,<tysubx3=int64#8
778# asm 2: cmove <t=%r15,<tysubx3=%r10
779cmove %r15,%r10
780
781# qhasm: t = *(uint64 *)(basep + 608 + pos)
782# asm 1: movq   608(<basep=int64#4,<pos=int64#1),>t=int64#13
783# asm 2: movq   608(<basep=%rcx,<pos=%rdi),>t=%r15
784movq   608(%rcx,%rdi),%r15
785
786# qhasm: txaddy0 = t if =
787# asm 1: cmove <t=int64#13,<txaddy0=int64#9
788# asm 2: cmove <t=%r15,<txaddy0=%r11
789cmove %r15,%r11
790
791# qhasm: t = *(uint64 *)(basep + 616 + pos)
792# asm 1: movq   616(<basep=int64#4,<pos=int64#1),>t=int64#13
793# asm 2: movq   616(<basep=%rcx,<pos=%rdi),>t=%r15
794movq   616(%rcx,%rdi),%r15
795
796# qhasm: txaddy1 = t if =
797# asm 1: cmove <t=int64#13,<txaddy1=int64#10
798# asm 2: cmove <t=%r15,<txaddy1=%r12
799cmove %r15,%r12
800
801# qhasm: t = *(uint64 *)(basep + 624 + pos)
802# asm 1: movq   624(<basep=int64#4,<pos=int64#1),>t=int64#13
803# asm 2: movq   624(<basep=%rcx,<pos=%rdi),>t=%r15
804movq   624(%rcx,%rdi),%r15
805
806# qhasm: txaddy2 = t if =
807# asm 1: cmove <t=int64#13,<txaddy2=int64#11
808# asm 2: cmove <t=%r15,<txaddy2=%r13
809cmove %r15,%r13
810
811# qhasm: t = *(uint64 *)(basep + 632 + pos)
812# asm 1: movq   632(<basep=int64#4,<pos=int64#1),>t=int64#13
813# asm 2: movq   632(<basep=%rcx,<pos=%rdi),>t=%r15
814movq   632(%rcx,%rdi),%r15
815
816# qhasm: txaddy3 = t if =
817# asm 1: cmove <t=int64#13,<txaddy3=int64#12
818# asm 2: cmove <t=%r15,<txaddy3=%r14
819cmove %r15,%r14
820
821# qhasm: =? u - 8
822# asm 1: cmp  $8,<u=int64#5
823# asm 2: cmp  $8,<u=%r8
824cmp  $8,%r8
825
826# qhasm: t = *(uint64 *)(basep + 672 + pos)
827# asm 1: movq   672(<basep=int64#4,<pos=int64#1),>t=int64#13
828# asm 2: movq   672(<basep=%rcx,<pos=%rdi),>t=%r15
829movq   672(%rcx,%rdi),%r15
830
831# qhasm: tysubx0 = t if =
832# asm 1: cmove <t=int64#13,<tysubx0=int64#2
833# asm 2: cmove <t=%r15,<tysubx0=%rsi
834cmove %r15,%rsi
835
836# qhasm: t = *(uint64 *)(basep + 680 + pos)
837# asm 1: movq   680(<basep=int64#4,<pos=int64#1),>t=int64#13
838# asm 2: movq   680(<basep=%rcx,<pos=%rdi),>t=%r15
839movq   680(%rcx,%rdi),%r15
840
841# qhasm: tysubx1 = t if =
842# asm 1: cmove <t=int64#13,<tysubx1=int64#6
843# asm 2: cmove <t=%r15,<tysubx1=%r9
844cmove %r15,%r9
845
846# qhasm: t = *(uint64 *)(basep + 688 + pos)
847# asm 1: movq   688(<basep=int64#4,<pos=int64#1),>t=int64#13
848# asm 2: movq   688(<basep=%rcx,<pos=%rdi),>t=%r15
849movq   688(%rcx,%rdi),%r15
850
851# qhasm: tysubx2 = t if =
852# asm 1: cmove <t=int64#13,<tysubx2=int64#7
853# asm 2: cmove <t=%r15,<tysubx2=%rax
854cmove %r15,%rax
855
856# qhasm: t = *(uint64 *)(basep + 696 + pos)
857# asm 1: movq   696(<basep=int64#4,<pos=int64#1),>t=int64#13
858# asm 2: movq   696(<basep=%rcx,<pos=%rdi),>t=%r15
859movq   696(%rcx,%rdi),%r15
860
861# qhasm: tysubx3 = t if =
862# asm 1: cmove <t=int64#13,<tysubx3=int64#8
863# asm 2: cmove <t=%r15,<tysubx3=%r10
864cmove %r15,%r10
865
866# qhasm: t = *(uint64 *)(basep + 704 + pos)
867# asm 1: movq   704(<basep=int64#4,<pos=int64#1),>t=int64#13
868# asm 2: movq   704(<basep=%rcx,<pos=%rdi),>t=%r15
869movq   704(%rcx,%rdi),%r15
870
871# qhasm: txaddy0 = t if =
872# asm 1: cmove <t=int64#13,<txaddy0=int64#9
873# asm 2: cmove <t=%r15,<txaddy0=%r11
874cmove %r15,%r11
875
876# qhasm: t = *(uint64 *)(basep + 712 + pos)
877# asm 1: movq   712(<basep=int64#4,<pos=int64#1),>t=int64#13
878# asm 2: movq   712(<basep=%rcx,<pos=%rdi),>t=%r15
879movq   712(%rcx,%rdi),%r15
880
881# qhasm: txaddy1 = t if =
882# asm 1: cmove <t=int64#13,<txaddy1=int64#10
883# asm 2: cmove <t=%r15,<txaddy1=%r12
884cmove %r15,%r12
885
886# qhasm: t = *(uint64 *)(basep + 720 + pos)
887# asm 1: movq   720(<basep=int64#4,<pos=int64#1),>t=int64#13
888# asm 2: movq   720(<basep=%rcx,<pos=%rdi),>t=%r15
889movq   720(%rcx,%rdi),%r15
890
891# qhasm: txaddy2 = t if =
892# asm 1: cmove <t=int64#13,<txaddy2=int64#11
893# asm 2: cmove <t=%r15,<txaddy2=%r13
894cmove %r15,%r13
895
896# qhasm: t = *(uint64 *)(basep + 728 + pos)
897# asm 1: movq   728(<basep=int64#4,<pos=int64#1),>t=int64#13
898# asm 2: movq   728(<basep=%rcx,<pos=%rdi),>t=%r15
899movq   728(%rcx,%rdi),%r15
900
901# qhasm: txaddy3 = t if =
902# asm 1: cmove <t=int64#13,<txaddy3=int64#12
903# asm 2: cmove <t=%r15,<txaddy3=%r14
904cmove %r15,%r14
905
906# qhasm: signed<? b - 0
907# asm 1: cmp  $0,<b=int64#3
908# asm 2: cmp  $0,<b=%rdx
909cmp  $0,%rdx
910
911# qhasm: t = tysubx0
912# asm 1: mov  <tysubx0=int64#2,>t=int64#13
913# asm 2: mov  <tysubx0=%rsi,>t=%r15
914mov  %rsi,%r15
915
916# qhasm: tysubx0 = txaddy0 if signed<
917# asm 1: cmovl <txaddy0=int64#9,<tysubx0=int64#2
918# asm 2: cmovl <txaddy0=%r11,<tysubx0=%rsi
919cmovl %r11,%rsi
920
921# qhasm: txaddy0 = t if signed<
922# asm 1: cmovl <t=int64#13,<txaddy0=int64#9
923# asm 2: cmovl <t=%r15,<txaddy0=%r11
924cmovl %r15,%r11
925
926# qhasm: t = tysubx1
927# asm 1: mov  <tysubx1=int64#6,>t=int64#13
928# asm 2: mov  <tysubx1=%r9,>t=%r15
929mov  %r9,%r15
930
931# qhasm: tysubx1 = txaddy1 if signed<
932# asm 1: cmovl <txaddy1=int64#10,<tysubx1=int64#6
933# asm 2: cmovl <txaddy1=%r12,<tysubx1=%r9
934cmovl %r12,%r9
935
936# qhasm: txaddy1 = t if signed<
937# asm 1: cmovl <t=int64#13,<txaddy1=int64#10
938# asm 2: cmovl <t=%r15,<txaddy1=%r12
939cmovl %r15,%r12
940
941# qhasm: t = tysubx2
942# asm 1: mov  <tysubx2=int64#7,>t=int64#13
943# asm 2: mov  <tysubx2=%rax,>t=%r15
944mov  %rax,%r15
945
946# qhasm: tysubx2 = txaddy2 if signed<
947# asm 1: cmovl <txaddy2=int64#11,<tysubx2=int64#7
948# asm 2: cmovl <txaddy2=%r13,<tysubx2=%rax
949cmovl %r13,%rax
950
951# qhasm: txaddy2 = t if signed<
952# asm 1: cmovl <t=int64#13,<txaddy2=int64#11
953# asm 2: cmovl <t=%r15,<txaddy2=%r13
954cmovl %r15,%r13
955
956# qhasm: t = tysubx3
957# asm 1: mov  <tysubx3=int64#8,>t=int64#13
958# asm 2: mov  <tysubx3=%r10,>t=%r15
959mov  %r10,%r15
960
961# qhasm: tysubx3 = txaddy3 if signed<
962# asm 1: cmovl <txaddy3=int64#12,<tysubx3=int64#8
963# asm 2: cmovl <txaddy3=%r14,<tysubx3=%r10
964cmovl %r14,%r10
965
966# qhasm: txaddy3 = t if signed<
967# asm 1: cmovl <t=int64#13,<txaddy3=int64#12
968# asm 2: cmovl <t=%r15,<txaddy3=%r14
969cmovl %r15,%r14
970
971# qhasm: tp = tp_stack
972# asm 1: movq <tp_stack=stack64#8,>tp=int64#13
973# asm 2: movq <tp_stack=56(%rsp),>tp=%r15
974movq 56(%rsp),%r15
975
976# qhasm: *(uint64 *)(tp + 0) = tysubx0
977# asm 1: movq   <tysubx0=int64#2,0(<tp=int64#13)
978# asm 2: movq   <tysubx0=%rsi,0(<tp=%r15)
979movq   %rsi,0(%r15)
980
981# qhasm: *(uint64 *)(tp + 8) = tysubx1
982# asm 1: movq   <tysubx1=int64#6,8(<tp=int64#13)
983# asm 2: movq   <tysubx1=%r9,8(<tp=%r15)
984movq   %r9,8(%r15)
985
986# qhasm: *(uint64 *)(tp + 16) = tysubx2
987# asm 1: movq   <tysubx2=int64#7,16(<tp=int64#13)
988# asm 2: movq   <tysubx2=%rax,16(<tp=%r15)
989movq   %rax,16(%r15)
990
991# qhasm: *(uint64 *)(tp + 24) = tysubx3
992# asm 1: movq   <tysubx3=int64#8,24(<tp=int64#13)
993# asm 2: movq   <tysubx3=%r10,24(<tp=%r15)
994movq   %r10,24(%r15)
995
996# qhasm: *(uint64 *)(tp + 32) = txaddy0
997# asm 1: movq   <txaddy0=int64#9,32(<tp=int64#13)
998# asm 2: movq   <txaddy0=%r11,32(<tp=%r15)
999movq   %r11,32(%r15)
1000
1001# qhasm: *(uint64 *)(tp + 40) = txaddy1
1002# asm 1: movq   <txaddy1=int64#10,40(<tp=int64#13)
1003# asm 2: movq   <txaddy1=%r12,40(<tp=%r15)
1004movq   %r12,40(%r15)
1005
1006# qhasm: *(uint64 *)(tp + 48) = txaddy2
1007# asm 1: movq   <txaddy2=int64#11,48(<tp=int64#13)
1008# asm 2: movq   <txaddy2=%r13,48(<tp=%r15)
1009movq   %r13,48(%r15)
1010
1011# qhasm: *(uint64 *)(tp + 56) = txaddy3
1012# asm 1: movq   <txaddy3=int64#12,56(<tp=int64#13)
1013# asm 2: movq   <txaddy3=%r14,56(<tp=%r15)
1014movq   %r14,56(%r15)
1015
1016# qhasm: tt2d0 = 0
1017# asm 1: mov  $0,>tt2d0=int64#2
1018# asm 2: mov  $0,>tt2d0=%rsi
1019mov  $0,%rsi
1020
1021# qhasm: tt2d1 = 0
1022# asm 1: mov  $0,>tt2d1=int64#6
1023# asm 2: mov  $0,>tt2d1=%r9
1024mov  $0,%r9
1025
1026# qhasm: tt2d2 = 0
1027# asm 1: mov  $0,>tt2d2=int64#7
1028# asm 2: mov  $0,>tt2d2=%rax
1029mov  $0,%rax
1030
1031# qhasm: tt2d3 = 0
1032# asm 1: mov  $0,>tt2d3=int64#8
1033# asm 2: mov  $0,>tt2d3=%r10
1034mov  $0,%r10
1035
1036# qhasm: =? u - 1
1037# asm 1: cmp  $1,<u=int64#5
1038# asm 2: cmp  $1,<u=%r8
1039cmp  $1,%r8
1040
1041# qhasm: t = *(uint64 *)(basep + 64 + pos)
1042# asm 1: movq   64(<basep=int64#4,<pos=int64#1),>t=int64#9
1043# asm 2: movq   64(<basep=%rcx,<pos=%rdi),>t=%r11
1044movq   64(%rcx,%rdi),%r11
1045
1046# qhasm: tt2d0 = t if =
1047# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1048# asm 2: cmove <t=%r11,<tt2d0=%rsi
1049cmove %r11,%rsi
1050
1051# qhasm: t = *(uint64 *)(basep + 72 + pos)
1052# asm 1: movq   72(<basep=int64#4,<pos=int64#1),>t=int64#9
1053# asm 2: movq   72(<basep=%rcx,<pos=%rdi),>t=%r11
1054movq   72(%rcx,%rdi),%r11
1055
1056# qhasm: tt2d1 = t if =
1057# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1058# asm 2: cmove <t=%r11,<tt2d1=%r9
1059cmove %r11,%r9
1060
1061# qhasm: t = *(uint64 *)(basep + 80 + pos)
1062# asm 1: movq   80(<basep=int64#4,<pos=int64#1),>t=int64#9
1063# asm 2: movq   80(<basep=%rcx,<pos=%rdi),>t=%r11
1064movq   80(%rcx,%rdi),%r11
1065
1066# qhasm: tt2d2 = t if =
1067# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1068# asm 2: cmove <t=%r11,<tt2d2=%rax
1069cmove %r11,%rax
1070
1071# qhasm: t = *(uint64 *)(basep + 88 + pos)
1072# asm 1: movq   88(<basep=int64#4,<pos=int64#1),>t=int64#9
1073# asm 2: movq   88(<basep=%rcx,<pos=%rdi),>t=%r11
1074movq   88(%rcx,%rdi),%r11
1075
1076# qhasm: tt2d3 = t if =
1077# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1078# asm 2: cmove <t=%r11,<tt2d3=%r10
1079cmove %r11,%r10
1080
1081# qhasm: =? u - 2
1082# asm 1: cmp  $2,<u=int64#5
1083# asm 2: cmp  $2,<u=%r8
1084cmp  $2,%r8
1085
1086# qhasm: t = *(uint64 *)(basep + 160 + pos)
1087# asm 1: movq   160(<basep=int64#4,<pos=int64#1),>t=int64#9
1088# asm 2: movq   160(<basep=%rcx,<pos=%rdi),>t=%r11
1089movq   160(%rcx,%rdi),%r11
1090
1091# qhasm: tt2d0 = t if =
1092# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1093# asm 2: cmove <t=%r11,<tt2d0=%rsi
1094cmove %r11,%rsi
1095
1096# qhasm: t = *(uint64 *)(basep + 168 + pos)
1097# asm 1: movq   168(<basep=int64#4,<pos=int64#1),>t=int64#9
1098# asm 2: movq   168(<basep=%rcx,<pos=%rdi),>t=%r11
1099movq   168(%rcx,%rdi),%r11
1100
1101# qhasm: tt2d1 = t if =
1102# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1103# asm 2: cmove <t=%r11,<tt2d1=%r9
1104cmove %r11,%r9
1105
1106# qhasm: t = *(uint64 *)(basep + 176 + pos)
1107# asm 1: movq   176(<basep=int64#4,<pos=int64#1),>t=int64#9
1108# asm 2: movq   176(<basep=%rcx,<pos=%rdi),>t=%r11
1109movq   176(%rcx,%rdi),%r11
1110
1111# qhasm: tt2d2 = t if =
1112# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1113# asm 2: cmove <t=%r11,<tt2d2=%rax
1114cmove %r11,%rax
1115
1116# qhasm: t = *(uint64 *)(basep + 184 + pos)
1117# asm 1: movq   184(<basep=int64#4,<pos=int64#1),>t=int64#9
1118# asm 2: movq   184(<basep=%rcx,<pos=%rdi),>t=%r11
1119movq   184(%rcx,%rdi),%r11
1120
1121# qhasm: tt2d3 = t if =
1122# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1123# asm 2: cmove <t=%r11,<tt2d3=%r10
1124cmove %r11,%r10
1125
1126# qhasm: =? u - 3
1127# asm 1: cmp  $3,<u=int64#5
1128# asm 2: cmp  $3,<u=%r8
1129cmp  $3,%r8
1130
1131# qhasm: t = *(uint64 *)(basep + 256 + pos)
1132# asm 1: movq   256(<basep=int64#4,<pos=int64#1),>t=int64#9
1133# asm 2: movq   256(<basep=%rcx,<pos=%rdi),>t=%r11
1134movq   256(%rcx,%rdi),%r11
1135
1136# qhasm: tt2d0 = t if =
1137# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1138# asm 2: cmove <t=%r11,<tt2d0=%rsi
1139cmove %r11,%rsi
1140
1141# qhasm: t = *(uint64 *)(basep + 264 + pos)
1142# asm 1: movq   264(<basep=int64#4,<pos=int64#1),>t=int64#9
1143# asm 2: movq   264(<basep=%rcx,<pos=%rdi),>t=%r11
1144movq   264(%rcx,%rdi),%r11
1145
1146# qhasm: tt2d1 = t if =
1147# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1148# asm 2: cmove <t=%r11,<tt2d1=%r9
1149cmove %r11,%r9
1150
1151# qhasm: t = *(uint64 *)(basep + 272 + pos)
1152# asm 1: movq   272(<basep=int64#4,<pos=int64#1),>t=int64#9
1153# asm 2: movq   272(<basep=%rcx,<pos=%rdi),>t=%r11
1154movq   272(%rcx,%rdi),%r11
1155
1156# qhasm: tt2d2 = t if =
1157# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1158# asm 2: cmove <t=%r11,<tt2d2=%rax
1159cmove %r11,%rax
1160
1161# qhasm: t = *(uint64 *)(basep + 280 + pos)
1162# asm 1: movq   280(<basep=int64#4,<pos=int64#1),>t=int64#9
1163# asm 2: movq   280(<basep=%rcx,<pos=%rdi),>t=%r11
1164movq   280(%rcx,%rdi),%r11
1165
1166# qhasm: tt2d3 = t if =
1167# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1168# asm 2: cmove <t=%r11,<tt2d3=%r10
1169cmove %r11,%r10
1170
1171# qhasm: =? u - 4
1172# asm 1: cmp  $4,<u=int64#5
1173# asm 2: cmp  $4,<u=%r8
1174cmp  $4,%r8
1175
1176# qhasm: t = *(uint64 *)(basep + 352 + pos)
1177# asm 1: movq   352(<basep=int64#4,<pos=int64#1),>t=int64#9
1178# asm 2: movq   352(<basep=%rcx,<pos=%rdi),>t=%r11
1179movq   352(%rcx,%rdi),%r11
1180
1181# qhasm: tt2d0 = t if =
1182# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1183# asm 2: cmove <t=%r11,<tt2d0=%rsi
1184cmove %r11,%rsi
1185
1186# qhasm: t = *(uint64 *)(basep + 360 + pos)
1187# asm 1: movq   360(<basep=int64#4,<pos=int64#1),>t=int64#9
1188# asm 2: movq   360(<basep=%rcx,<pos=%rdi),>t=%r11
1189movq   360(%rcx,%rdi),%r11
1190
1191# qhasm: tt2d1 = t if =
1192# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1193# asm 2: cmove <t=%r11,<tt2d1=%r9
1194cmove %r11,%r9
1195
1196# qhasm: t = *(uint64 *)(basep + 368 + pos)
1197# asm 1: movq   368(<basep=int64#4,<pos=int64#1),>t=int64#9
1198# asm 2: movq   368(<basep=%rcx,<pos=%rdi),>t=%r11
1199movq   368(%rcx,%rdi),%r11
1200
1201# qhasm: tt2d2 = t if =
1202# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1203# asm 2: cmove <t=%r11,<tt2d2=%rax
1204cmove %r11,%rax
1205
1206# qhasm: t = *(uint64 *)(basep + 376 + pos)
1207# asm 1: movq   376(<basep=int64#4,<pos=int64#1),>t=int64#9
1208# asm 2: movq   376(<basep=%rcx,<pos=%rdi),>t=%r11
1209movq   376(%rcx,%rdi),%r11
1210
1211# qhasm: tt2d3 = t if =
1212# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1213# asm 2: cmove <t=%r11,<tt2d3=%r10
1214cmove %r11,%r10
1215
1216# qhasm: =? u - 5
1217# asm 1: cmp  $5,<u=int64#5
1218# asm 2: cmp  $5,<u=%r8
1219cmp  $5,%r8
1220
1221# qhasm: t = *(uint64 *)(basep + 448 + pos)
1222# asm 1: movq   448(<basep=int64#4,<pos=int64#1),>t=int64#9
1223# asm 2: movq   448(<basep=%rcx,<pos=%rdi),>t=%r11
1224movq   448(%rcx,%rdi),%r11
1225
1226# qhasm: tt2d0 = t if =
1227# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1228# asm 2: cmove <t=%r11,<tt2d0=%rsi
1229cmove %r11,%rsi
1230
1231# qhasm: t = *(uint64 *)(basep + 456 + pos)
1232# asm 1: movq   456(<basep=int64#4,<pos=int64#1),>t=int64#9
1233# asm 2: movq   456(<basep=%rcx,<pos=%rdi),>t=%r11
1234movq   456(%rcx,%rdi),%r11
1235
1236# qhasm: tt2d1 = t if =
1237# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1238# asm 2: cmove <t=%r11,<tt2d1=%r9
1239cmove %r11,%r9
1240
1241# qhasm: t = *(uint64 *)(basep + 464 + pos)
1242# asm 1: movq   464(<basep=int64#4,<pos=int64#1),>t=int64#9
1243# asm 2: movq   464(<basep=%rcx,<pos=%rdi),>t=%r11
1244movq   464(%rcx,%rdi),%r11
1245
1246# qhasm: tt2d2 = t if =
1247# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1248# asm 2: cmove <t=%r11,<tt2d2=%rax
1249cmove %r11,%rax
1250
1251# qhasm: t = *(uint64 *)(basep + 472 + pos)
1252# asm 1: movq   472(<basep=int64#4,<pos=int64#1),>t=int64#9
1253# asm 2: movq   472(<basep=%rcx,<pos=%rdi),>t=%r11
1254movq   472(%rcx,%rdi),%r11
1255
1256# qhasm: tt2d3 = t if =
1257# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1258# asm 2: cmove <t=%r11,<tt2d3=%r10
1259cmove %r11,%r10
1260
1261# qhasm: =? u - 6
1262# asm 1: cmp  $6,<u=int64#5
1263# asm 2: cmp  $6,<u=%r8
1264cmp  $6,%r8
1265
1266# qhasm: t = *(uint64 *)(basep + 544 + pos)
1267# asm 1: movq   544(<basep=int64#4,<pos=int64#1),>t=int64#9
1268# asm 2: movq   544(<basep=%rcx,<pos=%rdi),>t=%r11
1269movq   544(%rcx,%rdi),%r11
1270
1271# qhasm: tt2d0 = t if =
1272# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1273# asm 2: cmove <t=%r11,<tt2d0=%rsi
1274cmove %r11,%rsi
1275
1276# qhasm: t = *(uint64 *)(basep + 552 + pos)
1277# asm 1: movq   552(<basep=int64#4,<pos=int64#1),>t=int64#9
1278# asm 2: movq   552(<basep=%rcx,<pos=%rdi),>t=%r11
1279movq   552(%rcx,%rdi),%r11
1280
1281# qhasm: tt2d1 = t if =
1282# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1283# asm 2: cmove <t=%r11,<tt2d1=%r9
1284cmove %r11,%r9
1285
1286# qhasm: t = *(uint64 *)(basep + 560 + pos)
1287# asm 1: movq   560(<basep=int64#4,<pos=int64#1),>t=int64#9
1288# asm 2: movq   560(<basep=%rcx,<pos=%rdi),>t=%r11
1289movq   560(%rcx,%rdi),%r11
1290
1291# qhasm: tt2d2 = t if =
1292# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1293# asm 2: cmove <t=%r11,<tt2d2=%rax
1294cmove %r11,%rax
1295
1296# qhasm: t = *(uint64 *)(basep + 568 + pos)
1297# asm 1: movq   568(<basep=int64#4,<pos=int64#1),>t=int64#9
1298# asm 2: movq   568(<basep=%rcx,<pos=%rdi),>t=%r11
1299movq   568(%rcx,%rdi),%r11
1300
1301# qhasm: tt2d3 = t if =
1302# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1303# asm 2: cmove <t=%r11,<tt2d3=%r10
1304cmove %r11,%r10
1305
1306# qhasm: =? u - 7
1307# asm 1: cmp  $7,<u=int64#5
1308# asm 2: cmp  $7,<u=%r8
1309cmp  $7,%r8
1310
1311# qhasm: t = *(uint64 *)(basep + 640 + pos)
1312# asm 1: movq   640(<basep=int64#4,<pos=int64#1),>t=int64#9
1313# asm 2: movq   640(<basep=%rcx,<pos=%rdi),>t=%r11
1314movq   640(%rcx,%rdi),%r11
1315
1316# qhasm: tt2d0 = t if =
1317# asm 1: cmove <t=int64#9,<tt2d0=int64#2
1318# asm 2: cmove <t=%r11,<tt2d0=%rsi
1319cmove %r11,%rsi
1320
1321# qhasm: t = *(uint64 *)(basep + 648 + pos)
1322# asm 1: movq   648(<basep=int64#4,<pos=int64#1),>t=int64#9
1323# asm 2: movq   648(<basep=%rcx,<pos=%rdi),>t=%r11
1324movq   648(%rcx,%rdi),%r11
1325
1326# qhasm: tt2d1 = t if =
1327# asm 1: cmove <t=int64#9,<tt2d1=int64#6
1328# asm 2: cmove <t=%r11,<tt2d1=%r9
1329cmove %r11,%r9
1330
1331# qhasm: t = *(uint64 *)(basep + 656 + pos)
1332# asm 1: movq   656(<basep=int64#4,<pos=int64#1),>t=int64#9
1333# asm 2: movq   656(<basep=%rcx,<pos=%rdi),>t=%r11
1334movq   656(%rcx,%rdi),%r11
1335
1336# qhasm: tt2d2 = t if =
1337# asm 1: cmove <t=int64#9,<tt2d2=int64#7
1338# asm 2: cmove <t=%r11,<tt2d2=%rax
1339cmove %r11,%rax
1340
1341# qhasm: t = *(uint64 *)(basep + 664 + pos)
1342# asm 1: movq   664(<basep=int64#4,<pos=int64#1),>t=int64#9
1343# asm 2: movq   664(<basep=%rcx,<pos=%rdi),>t=%r11
1344movq   664(%rcx,%rdi),%r11
1345
1346# qhasm: tt2d3 = t if =
1347# asm 1: cmove <t=int64#9,<tt2d3=int64#8
1348# asm 2: cmove <t=%r11,<tt2d3=%r10
1349cmove %r11,%r10
1350
1351# qhasm: =? u - 8
1352# asm 1: cmp  $8,<u=int64#5
1353# asm 2: cmp  $8,<u=%r8
1354cmp  $8,%r8
1355
1356# qhasm: t = *(uint64 *)(basep + 736 + pos)
1357# asm 1: movq   736(<basep=int64#4,<pos=int64#1),>t=int64#5
1358# asm 2: movq   736(<basep=%rcx,<pos=%rdi),>t=%r8
1359movq   736(%rcx,%rdi),%r8
1360
1361# qhasm: tt2d0 = t if =
1362# asm 1: cmove <t=int64#5,<tt2d0=int64#2
1363# asm 2: cmove <t=%r8,<tt2d0=%rsi
1364cmove %r8,%rsi
1365
1366# qhasm: t = *(uint64 *)(basep + 744 + pos)
1367# asm 1: movq   744(<basep=int64#4,<pos=int64#1),>t=int64#5
1368# asm 2: movq   744(<basep=%rcx,<pos=%rdi),>t=%r8
1369movq   744(%rcx,%rdi),%r8
1370
1371# qhasm: tt2d1 = t if =
1372# asm 1: cmove <t=int64#5,<tt2d1=int64#6
1373# asm 2: cmove <t=%r8,<tt2d1=%r9
1374cmove %r8,%r9
1375
1376# qhasm: t = *(uint64 *)(basep + 752 + pos)
1377# asm 1: movq   752(<basep=int64#4,<pos=int64#1),>t=int64#5
1378# asm 2: movq   752(<basep=%rcx,<pos=%rdi),>t=%r8
1379movq   752(%rcx,%rdi),%r8
1380
1381# qhasm: tt2d2 = t if =
1382# asm 1: cmove <t=int64#5,<tt2d2=int64#7
1383# asm 2: cmove <t=%r8,<tt2d2=%rax
1384cmove %r8,%rax
1385
1386# qhasm: t = *(uint64 *)(basep + 760 + pos)
1387# asm 1: movq   760(<basep=int64#4,<pos=int64#1),>t=int64#1
1388# asm 2: movq   760(<basep=%rcx,<pos=%rdi),>t=%rdi
1389movq   760(%rcx,%rdi),%rdi
1390
1391# qhasm: tt2d3 = t if =
1392# asm 1: cmove <t=int64#1,<tt2d3=int64#8
1393# asm 2: cmove <t=%rdi,<tt2d3=%r10
1394cmove %rdi,%r10
1395
1396# qhasm: tt0 = 0
1397# asm 1: mov  $0,>tt0=int64#1
1398# asm 2: mov  $0,>tt0=%rdi
1399mov  $0,%rdi
1400
1401# qhasm: tt1 = 0
1402# asm 1: mov  $0,>tt1=int64#4
1403# asm 2: mov  $0,>tt1=%rcx
1404mov  $0,%rcx
1405
1406# qhasm: tt2 = 0
1407# asm 1: mov  $0,>tt2=int64#5
1408# asm 2: mov  $0,>tt2=%r8
1409mov  $0,%r8
1410
1411# qhasm: tt3 = 0
1412# asm 1: mov  $0,>tt3=int64#9
1413# asm 2: mov  $0,>tt3=%r11
1414mov  $0,%r11
1415
1416# qhasm: carry? tt0 -= tt2d0
1417# asm 1: sub  <tt2d0=int64#2,<tt0=int64#1
1418# asm 2: sub  <tt2d0=%rsi,<tt0=%rdi
1419sub  %rsi,%rdi
1420
1421# qhasm: carry? tt1 -= tt2d1 - carry
1422# asm 1: sbb  <tt2d1=int64#6,<tt1=int64#4
1423# asm 2: sbb  <tt2d1=%r9,<tt1=%rcx
1424sbb  %r9,%rcx
1425
1426# qhasm: carry? tt2 -= tt2d2 - carry
1427# asm 1: sbb  <tt2d2=int64#7,<tt2=int64#5
1428# asm 2: sbb  <tt2d2=%rax,<tt2=%r8
1429sbb  %rax,%r8
1430
1431# qhasm: carry? tt3 -= tt2d3 - carry
1432# asm 1: sbb  <tt2d3=int64#8,<tt3=int64#9
1433# asm 2: sbb  <tt2d3=%r10,<tt3=%r11
1434sbb  %r10,%r11
1435
1436# qhasm: subt0 = 0
1437# asm 1: mov  $0,>subt0=int64#10
1438# asm 2: mov  $0,>subt0=%r12
1439mov  $0,%r12
1440
1441# qhasm: subt1 = 38
1442# asm 1: mov  $38,>subt1=int64#11
1443# asm 2: mov  $38,>subt1=%r13
1444mov  $38,%r13
1445
1446# qhasm: subt1 = subt0 if !carry
1447# asm 1: cmovae <subt0=int64#10,<subt1=int64#11
1448# asm 2: cmovae <subt0=%r12,<subt1=%r13
1449cmovae %r12,%r13
1450
1451# qhasm: carry? tt0 -= subt1
1452# asm 1: sub  <subt1=int64#11,<tt0=int64#1
1453# asm 2: sub  <subt1=%r13,<tt0=%rdi
1454sub  %r13,%rdi
1455
1456# qhasm: carry? tt1 -= subt0 - carry
1457# asm 1: sbb  <subt0=int64#10,<tt1=int64#4
1458# asm 2: sbb  <subt0=%r12,<tt1=%rcx
1459sbb  %r12,%rcx
1460
1461# qhasm: carry? tt2 -= subt0 - carry
1462# asm 1: sbb  <subt0=int64#10,<tt2=int64#5
1463# asm 2: sbb  <subt0=%r12,<tt2=%r8
1464sbb  %r12,%r8
1465
1466# qhasm: carry? tt3 -= subt0 - carry
1467# asm 1: sbb  <subt0=int64#10,<tt3=int64#9
1468# asm 2: sbb  <subt0=%r12,<tt3=%r11
1469sbb  %r12,%r11
1470
1471# qhasm: subt0 = subt1 if carry
1472# asm 1: cmovc <subt1=int64#11,<subt0=int64#10
1473# asm 2: cmovc <subt1=%r13,<subt0=%r12
1474cmovc %r13,%r12
1475
1476# qhasm: tt0 -= subt0
1477# asm 1: sub  <subt0=int64#10,<tt0=int64#1
1478# asm 2: sub  <subt0=%r12,<tt0=%rdi
1479sub  %r12,%rdi
1480
1481# qhasm: signed<? b - 0
1482# asm 1: cmp  $0,<b=int64#3
1483# asm 2: cmp  $0,<b=%rdx
1484cmp  $0,%rdx
1485
1486# qhasm: tt2d0 = tt0 if signed<
1487# asm 1: cmovl <tt0=int64#1,<tt2d0=int64#2
1488# asm 2: cmovl <tt0=%rdi,<tt2d0=%rsi
1489cmovl %rdi,%rsi
1490
1491# qhasm: tt2d1 = tt1 if signed<
1492# asm 1: cmovl <tt1=int64#4,<tt2d1=int64#6
1493# asm 2: cmovl <tt1=%rcx,<tt2d1=%r9
1494cmovl %rcx,%r9
1495
1496# qhasm: tt2d2 = tt2 if signed<
1497# asm 1: cmovl <tt2=int64#5,<tt2d2=int64#7
1498# asm 2: cmovl <tt2=%r8,<tt2d2=%rax
1499cmovl %r8,%rax
1500
1501# qhasm: tt2d3 = tt3 if signed<
1502# asm 1: cmovl <tt3=int64#9,<tt2d3=int64#8
1503# asm 2: cmovl <tt3=%r11,<tt2d3=%r10
1504cmovl %r11,%r10
1505
1506# qhasm: *(uint64 *)(tp + 64) = tt2d0
1507# asm 1: movq   <tt2d0=int64#2,64(<tp=int64#13)
1508# asm 2: movq   <tt2d0=%rsi,64(<tp=%r15)
1509movq   %rsi,64(%r15)
1510
1511# qhasm: *(uint64 *)(tp + 72) = tt2d1
1512# asm 1: movq   <tt2d1=int64#6,72(<tp=int64#13)
1513# asm 2: movq   <tt2d1=%r9,72(<tp=%r15)
1514movq   %r9,72(%r15)
1515
1516# qhasm: *(uint64 *)(tp + 80) = tt2d2
1517# asm 1: movq   <tt2d2=int64#7,80(<tp=int64#13)
1518# asm 2: movq   <tt2d2=%rax,80(<tp=%r15)
1519movq   %rax,80(%r15)
1520
1521# qhasm: *(uint64 *)(tp + 88) = tt2d3
1522# asm 1: movq   <tt2d3=int64#8,88(<tp=int64#13)
1523# asm 2: movq   <tt2d3=%r10,88(<tp=%r15)
1524movq   %r10,88(%r15)
1525
1526# qhasm:   caller1 = caller1_stack
1527# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
1528# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
1529movq 0(%rsp),%r11
1530
1531# qhasm:   caller2 = caller2_stack
1532# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
1533# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
1534movq 8(%rsp),%r12
1535
1536# qhasm:   caller3 = caller3_stack
1537# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
1538# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
1539movq 16(%rsp),%r13
1540
1541# qhasm:   caller4 = caller4_stack
1542# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
1543# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
1544movq 24(%rsp),%r14
1545
1546# qhasm:   caller5 = caller5_stack
1547# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
1548# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
1549movq 32(%rsp),%r15
1550
1551# qhasm:   caller6 = caller6_stack
1552# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
1553# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
1554movq 40(%rsp),%rbx
1555
1556# qhasm:   caller7 = caller7_stack
1557# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
1558# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
1559movq 48(%rsp),%rbp
1560
1561# qhasm: leave
1562add %r11,%rsp
1563mov %rdi,%rax
1564mov %rsi,%rdx
1565ret
1566