1
2# qhasm: int64 x
3
4# qhasm: int64 arg2
5
6# qhasm: int64 arg3
7
8# qhasm: int64 arg4
9
10# qhasm: input x
11
12# qhasm: input arg2
13
14# qhasm: input arg3
15
16# qhasm: input arg4
17
18# qhasm: int64 i
19
20# qhasm: int64 a
21
22# qhasm: int64 m
23
24# qhasm: int64 out
25
26# qhasm: int64 bytes
27
28# qhasm: stack64 ctarget
29
30# qhasm: stack512 tmp
31
32# qhasm: stack64 bytes_stack
33
34# qhasm: stack64 out_stack
35
36# qhasm: stack64 m_stack
37
38# qhasm: stack64 x_stack
39
40# qhasm: int64 z0
41
42# qhasm: int64 z1
43
44# qhasm: int64 z2
45
46# qhasm: int64 z3
47
48# qhasm: int64 z4
49
50# qhasm: int64 z5
51
52# qhasm: int64 z6
53
54# qhasm: int64 z7
55
56# qhasm: int64 z8
57
58# qhasm: int64 z9
59
60# qhasm: int64 z10
61
62# qhasm: int64 z11
63
64# qhasm: int64 z12
65
66# qhasm: int64 z13
67
68# qhasm: int64 z14
69
70# qhasm: int64 z15
71
72# qhasm: int64 u0
73
74# qhasm: int64 u1
75
76# qhasm: int64 u2
77
78# qhasm: int64 u3
79
80# qhasm: int64 u4
81
82# qhasm: int64 u5
83
84# qhasm: int64 u6
85
86# qhasm: int64 u7
87
88# qhasm: int64 u8
89
90# qhasm: int64 u9
91
92# qhasm: int64 u10
93
94# qhasm: int64 u11
95
96# qhasm: int64 u12
97
98# qhasm: int64 u13
99
100# qhasm: int64 u14
101
102# qhasm: int64 u15
103
104# qhasm: int64 y0
105
106# qhasm: int64 y1
107
108# qhasm: int64 y2
109
110# qhasm: int64 y3
111
112# qhasm: int64 y4
113
114# qhasm: int64 y5
115
116# qhasm: int64 y6
117
118# qhasm: int64 y7
119
120# qhasm: int64 y8
121
122# qhasm: int64 y9
123
124# qhasm: int64 y10
125
126# qhasm: int64 y11
127
128# qhasm: int64 y12
129
130# qhasm: int64 y13
131
132# qhasm: int64 y14
133
134# qhasm: int64 y15
135
136# qhasm: int64 x0
137
138# qhasm: int64 x1
139
140# qhasm: int64 x2
141
142# qhasm: int64 x3
143
144# qhasm: int64 x4
145
146# qhasm: int64 x5
147
148# qhasm: int64 x6
149
150# qhasm: int64 x7
151
152# qhasm: int64 x8
153
154# qhasm: int64 x9
155
156# qhasm: int64 x10
157
158# qhasm: int64 x11
159
160# qhasm: int64 x12
161
162# qhasm: int64 x13
163
164# qhasm: int64 x14
165
166# qhasm: int64 x15
167
168# qhasm: int64 q0
169
170# qhasm: int64 q1
171
172# qhasm: int64 q2
173
174# qhasm: int64 q3
175
176# qhasm: int64 q4
177
178# qhasm: int64 q5
179
180# qhasm: int64 q6
181
182# qhasm: int64 q7
183
184# qhasm: int64 q8
185
186# qhasm: int64 q9
187
188# qhasm: int64 q10
189
190# qhasm: int64 q11
191
192# qhasm: int64 q12
193
194# qhasm: int64 q13
195
196# qhasm: int64 q14
197
198# qhasm: int64 q15
199
200# qhasm: int64 m0
201
202# qhasm: int64 m1
203
204# qhasm: int64 m2
205
206# qhasm: int64 m3
207
208# qhasm: int64 m4
209
210# qhasm: int64 m5
211
212# qhasm: int64 m6
213
214# qhasm: int64 m7
215
216# qhasm: int64 m8
217
218# qhasm: int64 m9
219
220# qhasm: int64 m10
221
222# qhasm: int64 m11
223
224# qhasm: int64 m12
225
226# qhasm: int64 m13
227
228# qhasm: int64 m14
229
230# qhasm: int64 m15
231
232# qhasm: enter ECRYPT_init
233.section ".text"
234.align 32
235.global ECRYPT_init
236ECRYPT_init:
237save %sp,-288,%sp
238
239# qhasm: leave
240ret
241restore
242
243# qhasm: enter ECRYPT_ivsetup
244.section ".text"
245.align 32
246.global ECRYPT_ivsetup
247ECRYPT_ivsetup:
248save %sp,-288,%sp
249
250# qhasm:   x6 = *(uint32 *) (arg2 + 0)
251# asm 1: lduw [<arg2=int64#2+0],>x6=int64#5
252# asm 2: lduw [<arg2=%i1+0],>x6=%i4
253lduw [%i1+0],%i4
254
255# qhasm:   x8 = 0
256# asm 1: add %g0,0,>x8=int64#6
257# asm 2: add %g0,0,>x8=%i5
258add %g0,0,%i5
259
260# qhasm:   x7 = *(uint32 *) (arg2 + 4)
261# asm 1: lduw [<arg2=int64#2+4],>x7=int64#2
262# asm 2: lduw [<arg2=%i1+4],>x7=%i1
263lduw [%i1+4],%i1
264
265# qhasm:   x9 = 0
266# asm 1: add %g0,0,>x9=int64#7
267# asm 2: add %g0,0,>x9=%g1
268add %g0,0,%g1
269
270# qhasm:   x += 24
271# asm 1: add <x=int64#1,24,>x=int64#1
272# asm 2: add <x=%i0,24,>x=%i0
273add %i0,24,%i0
274
275# qhasm:   *(swapendian int32 *) x = x6
276# asm 1: stwa <x6=int64#5,[<x=int64#1] 0x88
277# asm 2: stwa <x6=%i4,[<x=%i0] 0x88
278stwa %i4,[%i0] 0x88
279
280# qhasm:   x += 4
281# asm 1: add <x=int64#1,4,>x=int64#1
282# asm 2: add <x=%i0,4,>x=%i0
283add %i0,4,%i0
284
285# qhasm:   *(swapendian int32 *) x = x7
286# asm 1: stwa <x7=int64#2,[<x=int64#1] 0x88
287# asm 2: stwa <x7=%i1,[<x=%i0] 0x88
288stwa %i1,[%i0] 0x88
289
290# qhasm:   x += 4
291# asm 1: add <x=int64#1,4,>x=int64#1
292# asm 2: add <x=%i0,4,>x=%i0
293add %i0,4,%i0
294
295# qhasm:   *(int32 *) (x + 0) = x8
296# asm 1: stw <x8=int64#6,[<x=int64#1+0]
297# asm 2: stw <x8=%i5,[<x=%i0+0]
298stw %i5,[%i0+0]
299
300# qhasm:   x += 4
301# asm 1: add <x=int64#1,4,>x=int64#1
302# asm 2: add <x=%i0,4,>x=%i0
303add %i0,4,%i0
304
305# qhasm:   *(int32 *) (x + 0) = x9
306# asm 1: stw <x9=int64#7,[<x=int64#1+0]
307# asm 2: stw <x9=%g1,[<x=%i0+0]
308stw %g1,[%i0+0]
309
310# qhasm: leave
311ret
312restore
313
314# qhasm: enter ECRYPT_keysetup
315.section ".text"
316.align 32
317.global ECRYPT_keysetup
318ECRYPT_keysetup:
319save %sp,-288,%sp
320
321# qhasm:                  unsigned>? arg3 - 128
322# asm 1: subcc <arg3=int64#3,128,%g0
323# asm 2: subcc <arg3=%i2,128,%g0
324subcc %i2,128,%g0
325
326# qhasm: goto kbits256 if unsigned>
327bgu,pt %xcc,._kbits256
328nop
329
330# qhasm: kbits128:
331._kbits128:
332
333# qhasm:   x1 = *(uint32 *) (arg2 + 0)
334# asm 1: lduw [<arg2=int64#2+0],>x1=int64#3
335# asm 2: lduw [<arg2=%i1+0],>x1=%i2
336lduw [%i1+0],%i2
337
338# qhasm:   x0 = 1634760805 & 0xfffffc00
339# asm 1: sethi %lm(1634760805),>x0=int64#4
340# asm 2: sethi %lm(1634760805),>x0=%i3
341sethi %lm(1634760805),%i3
342
343# qhasm:   x2 = *(uint32 *) (arg2 + 4)
344# asm 1: lduw [<arg2=int64#2+4],>x2=int64#5
345# asm 2: lduw [<arg2=%i1+4],>x2=%i4
346lduw [%i1+4],%i4
347
348# qhasm:   x5 = 824206446 & 0xfffffc00
349# asm 1: sethi %lm(824206446),>x5=int64#6
350# asm 2: sethi %lm(824206446),>x5=%i5
351sethi %lm(824206446),%i5
352
353# qhasm:   x3 = *(uint32 *) (arg2 + 8)
354# asm 1: lduw [<arg2=int64#2+8],>x3=int64#7
355# asm 2: lduw [<arg2=%i1+8],>x3=%g1
356lduw [%i1+8],%g1
357
358# qhasm:   x10 = 2036477238 & 0xfffffc00
359# asm 1: sethi %lm(2036477238),>x10=int64#8
360# asm 2: sethi %lm(2036477238),>x10=%g4
361sethi %lm(2036477238),%g4
362
363# qhasm:   x4 = *(uint32 *) (arg2 + 12)
364# asm 1: lduw [<arg2=int64#2+12],>x4=int64#9
365# asm 2: lduw [<arg2=%i1+12],>x4=%g5
366lduw [%i1+12],%g5
367
368# qhasm:   x15 = 1797285236 & 0xfffffc00
369# asm 1: sethi %lm(1797285236),>x15=int64#10
370# asm 2: sethi %lm(1797285236),>x15=%o0
371sethi %lm(1797285236),%o0
372
373# qhasm:   x11 = *(uint32 *) (arg2 + 0)
374# asm 1: lduw [<arg2=int64#2+0],>x11=int64#11
375# asm 2: lduw [<arg2=%i1+0],>x11=%o1
376lduw [%i1+0],%o1
377
378# qhasm:   x0 |= 1634760805 & 0x3ff
379# asm 1: or <x0=int64#4,%lo(1634760805),>x0=int64#4
380# asm 2: or <x0=%i3,%lo(1634760805),>x0=%i3
381or %i3,%lo(1634760805),%i3
382
383# qhasm:   x12 = *(uint32 *) (arg2 + 4)
384# asm 1: lduw [<arg2=int64#2+4],>x12=int64#12
385# asm 2: lduw [<arg2=%i1+4],>x12=%o2
386lduw [%i1+4],%o2
387
388# qhasm:   x5 |= 824206446 & 0x3ff
389# asm 1: or <x5=int64#6,%lo(824206446),>x5=int64#6
390# asm 2: or <x5=%i5,%lo(824206446),>x5=%i5
391or %i5,%lo(824206446),%i5
392
393# qhasm:   x13 = *(uint32 *) (arg2 + 8)
394# asm 1: lduw [<arg2=int64#2+8],>x13=int64#13
395# asm 2: lduw [<arg2=%i1+8],>x13=%o3
396lduw [%i1+8],%o3
397
398# qhasm:   x10 |= 2036477238 & 0x3ff
399# asm 1: or <x10=int64#8,%lo(2036477238),>x10=int64#8
400# asm 2: or <x10=%g4,%lo(2036477238),>x10=%g4
401or %g4,%lo(2036477238),%g4
402
403# qhasm:   x14 = *(uint32 *) (arg2 + 12)
404# asm 1: lduw [<arg2=int64#2+12],>x14=int64#2
405# asm 2: lduw [<arg2=%i1+12],>x14=%i1
406lduw [%i1+12],%i1
407
408# qhasm:   x15 |= 1797285236 & 0x3ff
409# asm 1: or <x15=int64#10,%lo(1797285236),>x15=int64#10
410# asm 2: or <x15=%o0,%lo(1797285236),>x15=%o0
411or %o0,%lo(1797285236),%o0
412
413# qhasm: goto storekey
414b ._storekey
415nop
416
417# qhasm: kbits256:
418._kbits256:
419
420# qhasm:   x1 = *(uint32 *) (arg2 + 0)
421# asm 1: lduw [<arg2=int64#2+0],>x1=int64#3
422# asm 2: lduw [<arg2=%i1+0],>x1=%i2
423lduw [%i1+0],%i2
424
425# qhasm:   x0 = 1634760805 & 0xfffffc00
426# asm 1: sethi %lm(1634760805),>x0=int64#4
427# asm 2: sethi %lm(1634760805),>x0=%i3
428sethi %lm(1634760805),%i3
429
430# qhasm:   x2 = *(uint32 *) (arg2 + 4)
431# asm 1: lduw [<arg2=int64#2+4],>x2=int64#5
432# asm 2: lduw [<arg2=%i1+4],>x2=%i4
433lduw [%i1+4],%i4
434
435# qhasm:   x5 = 857760878 & 0xfffffc00
436# asm 1: sethi %lm(857760878),>x5=int64#6
437# asm 2: sethi %lm(857760878),>x5=%i5
438sethi %lm(857760878),%i5
439
440# qhasm:   x3 = *(uint32 *) (arg2 + 8)
441# asm 1: lduw [<arg2=int64#2+8],>x3=int64#7
442# asm 2: lduw [<arg2=%i1+8],>x3=%g1
443lduw [%i1+8],%g1
444
445# qhasm:   x10 = 2036477234 & 0xfffffc00
446# asm 1: sethi %lm(2036477234),>x10=int64#8
447# asm 2: sethi %lm(2036477234),>x10=%g4
448sethi %lm(2036477234),%g4
449
450# qhasm:   x4 = *(uint32 *) (arg2 + 12)
451# asm 1: lduw [<arg2=int64#2+12],>x4=int64#9
452# asm 2: lduw [<arg2=%i1+12],>x4=%g5
453lduw [%i1+12],%g5
454
455# qhasm:   x15 = 1797285236 & 0xfffffc00
456# asm 1: sethi %lm(1797285236),>x15=int64#10
457# asm 2: sethi %lm(1797285236),>x15=%o0
458sethi %lm(1797285236),%o0
459
460# qhasm:   x11 = *(uint32 *) (arg2 + 16)
461# asm 1: lduw [<arg2=int64#2+16],>x11=int64#11
462# asm 2: lduw [<arg2=%i1+16],>x11=%o1
463lduw [%i1+16],%o1
464
465# qhasm:   x0 |= 1634760805 & 0x3ff
466# asm 1: or <x0=int64#4,%lo(1634760805),>x0=int64#4
467# asm 2: or <x0=%i3,%lo(1634760805),>x0=%i3
468or %i3,%lo(1634760805),%i3
469
470# qhasm:   x12 = *(uint32 *) (arg2 + 20)
471# asm 1: lduw [<arg2=int64#2+20],>x12=int64#12
472# asm 2: lduw [<arg2=%i1+20],>x12=%o2
473lduw [%i1+20],%o2
474
475# qhasm:   x5 |= 857760878 & 0x3ff
476# asm 1: or <x5=int64#6,%lo(857760878),>x5=int64#6
477# asm 2: or <x5=%i5,%lo(857760878),>x5=%i5
478or %i5,%lo(857760878),%i5
479
480# qhasm:   x13 = *(uint32 *) (arg2 + 24)
481# asm 1: lduw [<arg2=int64#2+24],>x13=int64#13
482# asm 2: lduw [<arg2=%i1+24],>x13=%o3
483lduw [%i1+24],%o3
484
485# qhasm:   x10 |= 2036477234 & 0x3ff
486# asm 1: or <x10=int64#8,%lo(2036477234),>x10=int64#8
487# asm 2: or <x10=%g4,%lo(2036477234),>x10=%g4
488or %g4,%lo(2036477234),%g4
489
490# qhasm:   x14 = *(uint32 *) (arg2 + 28)
491# asm 1: lduw [<arg2=int64#2+28],>x14=int64#2
492# asm 2: lduw [<arg2=%i1+28],>x14=%i1
493lduw [%i1+28],%i1
494
495# qhasm:   x15 |= 1797285236 & 0x3ff
496# asm 1: or <x15=int64#10,%lo(1797285236),>x15=int64#10
497# asm 2: or <x15=%o0,%lo(1797285236),>x15=%o0
498or %o0,%lo(1797285236),%o0
499
500# qhasm: storekey:
501._storekey:
502
503# qhasm:   *(int32 *) (x + 0) = x0
504# asm 1: stw <x0=int64#4,[<x=int64#1+0]
505# asm 2: stw <x0=%i3,[<x=%i0+0]
506stw %i3,[%i0+0]
507
508# qhasm:   x += 4
509# asm 1: add <x=int64#1,4,>x=int64#1
510# asm 2: add <x=%i0,4,>x=%i0
511add %i0,4,%i0
512
513# qhasm:   *(swapendian int32 *) x = x1
514# asm 1: stwa <x1=int64#3,[<x=int64#1] 0x88
515# asm 2: stwa <x1=%i2,[<x=%i0] 0x88
516stwa %i2,[%i0] 0x88
517
518# qhasm:   x += 4
519# asm 1: add <x=int64#1,4,>x=int64#1
520# asm 2: add <x=%i0,4,>x=%i0
521add %i0,4,%i0
522
523# qhasm:   *(swapendian int32 *) x = x2
524# asm 1: stwa <x2=int64#5,[<x=int64#1] 0x88
525# asm 2: stwa <x2=%i4,[<x=%i0] 0x88
526stwa %i4,[%i0] 0x88
527
528# qhasm:   x += 4
529# asm 1: add <x=int64#1,4,>x=int64#1
530# asm 2: add <x=%i0,4,>x=%i0
531add %i0,4,%i0
532
533# qhasm:   *(swapendian int32 *) x = x3
534# asm 1: stwa <x3=int64#7,[<x=int64#1] 0x88
535# asm 2: stwa <x3=%g1,[<x=%i0] 0x88
536stwa %g1,[%i0] 0x88
537
538# qhasm:   x += 4
539# asm 1: add <x=int64#1,4,>x=int64#1
540# asm 2: add <x=%i0,4,>x=%i0
541add %i0,4,%i0
542
543# qhasm:   *(swapendian int32 *) x = x4
544# asm 1: stwa <x4=int64#9,[<x=int64#1] 0x88
545# asm 2: stwa <x4=%g5,[<x=%i0] 0x88
546stwa %g5,[%i0] 0x88
547
548# qhasm:   x += 4
549# asm 1: add <x=int64#1,4,>x=int64#1
550# asm 2: add <x=%i0,4,>x=%i0
551add %i0,4,%i0
552
553# qhasm:   *(int32 *) (x + 0) = x5
554# asm 1: stw <x5=int64#6,[<x=int64#1+0]
555# asm 2: stw <x5=%i5,[<x=%i0+0]
556stw %i5,[%i0+0]
557
558# qhasm:   x += 20
559# asm 1: add <x=int64#1,20,>x=int64#1
560# asm 2: add <x=%i0,20,>x=%i0
561add %i0,20,%i0
562
563# qhasm:   *(int32 *) (x + 0) = x10
564# asm 1: stw <x10=int64#8,[<x=int64#1+0]
565# asm 2: stw <x10=%g4,[<x=%i0+0]
566stw %g4,[%i0+0]
567
568# qhasm:   x += 4
569# asm 1: add <x=int64#1,4,>x=int64#1
570# asm 2: add <x=%i0,4,>x=%i0
571add %i0,4,%i0
572
573# qhasm:   *(swapendian int32 *) x = x11
574# asm 1: stwa <x11=int64#11,[<x=int64#1] 0x88
575# asm 2: stwa <x11=%o1,[<x=%i0] 0x88
576stwa %o1,[%i0] 0x88
577
578# qhasm:   x += 4
579# asm 1: add <x=int64#1,4,>x=int64#1
580# asm 2: add <x=%i0,4,>x=%i0
581add %i0,4,%i0
582
583# qhasm:   *(swapendian int32 *) x = x12
584# asm 1: stwa <x12=int64#12,[<x=int64#1] 0x88
585# asm 2: stwa <x12=%o2,[<x=%i0] 0x88
586stwa %o2,[%i0] 0x88
587
588# qhasm:   x += 4
589# asm 1: add <x=int64#1,4,>x=int64#1
590# asm 2: add <x=%i0,4,>x=%i0
591add %i0,4,%i0
592
593# qhasm:   *(swapendian int32 *) x = x13
594# asm 1: stwa <x13=int64#13,[<x=int64#1] 0x88
595# asm 2: stwa <x13=%o3,[<x=%i0] 0x88
596stwa %o3,[%i0] 0x88
597
598# qhasm:   x += 4
599# asm 1: add <x=int64#1,4,>x=int64#1
600# asm 2: add <x=%i0,4,>x=%i0
601add %i0,4,%i0
602
603# qhasm:   *(swapendian int32 *) x = x14
604# asm 1: stwa <x14=int64#2,[<x=int64#1] 0x88
605# asm 2: stwa <x14=%i1,[<x=%i0] 0x88
606stwa %i1,[%i0] 0x88
607
608# qhasm:   x += 4
609# asm 1: add <x=int64#1,4,>x=int64#1
610# asm 2: add <x=%i0,4,>x=%i0
611add %i0,4,%i0
612
613# qhasm:   *(int32 *) (x + 0) = x15
614# asm 1: stw <x15=int64#10,[<x=int64#1+0]
615# asm 2: stw <x15=%o0,[<x=%i0+0]
616stw %o0,[%i0+0]
617
618# qhasm: leave
619ret
620restore
621
622# qhasm: enter ECRYPT_keystream_bytes
623.section ".text"
624.align 32
625.global ECRYPT_keystream_bytes
626ECRYPT_keystream_bytes:
627save %sp,-288,%sp
628
629# qhasm: bytes = arg3
630# asm 1: add %g0,<arg3=int64#3,>bytes=int64#5
631# asm 2: add %g0,<arg3=%i2,>bytes=%i4
632add %g0,%i2,%i4
633
634# qhasm: m = arg2
635# asm 1: add %g0,<arg2=int64#2,>m=int64#6
636# asm 2: add %g0,<arg2=%i1,>m=%i5
637add %g0,%i1,%i5
638
639# qhasm: out = arg2
640# asm 1: add %g0,<arg2=int64#2,>out=int64#2
641# asm 2: add %g0,<arg2=%i1,>out=%i1
642add %g0,%i1,%i1
643
644# qhasm:               unsigned>? bytes - 0
645# asm 1: subcc <bytes=int64#5,0,%g0
646# asm 2: subcc <bytes=%i4,0,%g0
647subcc %i4,0,%g0
648
649# qhasm: goto done if !unsigned>
650bleu,pt %xcc,._done
651nop
652
653# qhasm:   a = 0
654# asm 1: add %g0,0,>a=int64#3
655# asm 2: add %g0,0,>a=%i2
656add %g0,0,%i2
657
658# qhasm:   i = bytes
659# asm 1: add %g0,<bytes=int64#5,>i=int64#4
660# asm 2: add %g0,<bytes=%i4,>i=%i3
661add %g0,%i4,%i3
662
663# qhasm:   zeroloop:
664._zeroloop:
665
666# qhasm:     *(int8 *) (out + 0) = a
667# asm 1: stb <a=int64#3,[<out=int64#2+0]
668# asm 2: stb <a=%i2,[<out=%i1+0]
669stb %i2,[%i1+0]
670
671# qhasm:     out += 1
672# asm 1: add <out=int64#2,1,>out=int64#2
673# asm 2: add <out=%i1,1,>out=%i1
674add %i1,1,%i1
675
676# qhasm:                    unsigned>? i -= 1
677# asm 1: subcc <i=int64#4,1,>i=int64#4
678# asm 2: subcc <i=%i3,1,>i=%i3
679subcc %i3,1,%i3
680
681# qhasm:   goto zeroloop if unsigned>
682bgu,pt %xcc,._zeroloop
683nop
684
685# qhasm:   out -= bytes
686# asm 1: sub <out=int64#2,<bytes=int64#5,>out=int64#2
687# asm 2: sub <out=%i1,<bytes=%i4,>out=%i1
688sub %i1,%i4,%i1
689
690# qhasm: goto bytesatleast1
691b ._bytesatleast1
692nop
693
694# qhasm: enter ECRYPT_decrypt_bytes
695.section ".text"
696.align 32
697.global ECRYPT_decrypt_bytes
698ECRYPT_decrypt_bytes:
699save %sp,-288,%sp
700
701# qhasm: bytes = arg4
702# asm 1: add %g0,<arg4=int64#4,>bytes=int64#5
703# asm 2: add %g0,<arg4=%i3,>bytes=%i4
704add %g0,%i3,%i4
705
706# qhasm: m = arg2
707# asm 1: add %g0,<arg2=int64#2,>m=int64#6
708# asm 2: add %g0,<arg2=%i1,>m=%i5
709add %g0,%i1,%i5
710
711# qhasm: out = arg3
712# asm 1: add %g0,<arg3=int64#3,>out=int64#2
713# asm 2: add %g0,<arg3=%i2,>out=%i1
714add %g0,%i2,%i1
715
716# qhasm:               unsigned>? bytes - 0
717# asm 1: subcc <bytes=int64#5,0,%g0
718# asm 2: subcc <bytes=%i4,0,%g0
719subcc %i4,0,%g0
720
721# qhasm: goto done if !unsigned>
722bleu,pt %xcc,._done
723nop
724
725# qhasm: goto bytesatleast1
726b ._bytesatleast1
727nop
728
729# qhasm: enter ECRYPT_encrypt_bytes
730.section ".text"
731.align 32
732.global ECRYPT_encrypt_bytes
733ECRYPT_encrypt_bytes:
734save %sp,-288,%sp
735
736# qhasm: bytes = arg4
737# asm 1: add %g0,<arg4=int64#4,>bytes=int64#5
738# asm 2: add %g0,<arg4=%i3,>bytes=%i4
739add %g0,%i3,%i4
740
741# qhasm: m = arg2
742# asm 1: add %g0,<arg2=int64#2,>m=int64#6
743# asm 2: add %g0,<arg2=%i1,>m=%i5
744add %g0,%i1,%i5
745
746# qhasm: out = arg3
747# asm 1: add %g0,<arg3=int64#3,>out=int64#2
748# asm 2: add %g0,<arg3=%i2,>out=%i1
749add %g0,%i2,%i1
750
751# qhasm:               unsigned>? bytes - 0
752# asm 1: subcc <bytes=int64#5,0,%g0
753# asm 2: subcc <bytes=%i4,0,%g0
754subcc %i4,0,%g0
755
756# qhasm: goto done if !unsigned>
757bleu,pt %xcc,._done
758nop
759
760# qhasm: bytesatleast1:
761._bytesatleast1:
762
763# qhasm:                           unsigned<? bytes - 64
764# asm 1: subcc <bytes=int64#5,64,%g0
765# asm 2: subcc <bytes=%i4,64,%g0
766subcc %i4,64,%g0
767
768# qhasm:   goto bytesatleast64 if !unsigned<
769bgeu,pt %xcc,._bytesatleast64
770nop
771
772# qhasm:     ctarget = out
773# asm 1: stx <out=int64#2,[%fp+2023->ctarget=stack64#1]
774# asm 2: stx <out=%i1,[%fp+2023->ctarget=0]
775stx %i1,[%fp+2023-0]
776
777# qhasm:     out = &tmp
778# asm 1: add %fp,1967->tmp=stack512#1,>out=int64#2
779# asm 2: add %fp,1967->tmp=48,>out=%i1
780add %fp,1967-48,%i1
781
782# qhasm:     i = 0
783# asm 1: add %g0,0,>i=int64#3
784# asm 2: add %g0,0,>i=%i2
785add %g0,0,%i2
786
787# qhasm:     mcopyloop:
788._mcopyloop:
789
790# qhasm:       a = *(int8 *) (m + i)
791# asm 1: ldsb [<m=int64#6+<i=int64#3],>a=int64#4
792# asm 2: ldsb [<m=%i5+<i=%i2],>a=%i3
793ldsb [%i5+%i2],%i3
794
795# qhasm:       *(int8 *) (out + i) = a
796# asm 1: stb <a=int64#4,[<out=int64#2+<i=int64#3]
797# asm 2: stb <a=%i3,[<out=%i1+<i=%i2]
798stb %i3,[%i1+%i2]
799
800# qhasm:       i += 1
801# asm 1: add <i=int64#3,1,>i=int64#3
802# asm 2: add <i=%i2,1,>i=%i2
803add %i2,1,%i2
804
805# qhasm:                       unsigned<? i - bytes
806# asm 1: subcc <i=int64#3,<bytes=int64#5,%g0
807# asm 2: subcc <i=%i2,<bytes=%i4,%g0
808subcc %i2,%i4,%g0
809
810# qhasm:     goto mcopyloop if unsigned<
811blu,pt %xcc,._mcopyloop
812nop
813
814# qhasm:     m = &tmp
815# asm 1: add %fp,1967->tmp=stack512#1,>m=int64#6
816# asm 2: add %fp,1967->tmp=48,>m=%i5
817add %fp,1967-48,%i5
818
819# qhasm:   bytesatleast64:
820._bytesatleast64:
821
822# qhasm:     x0 = *(uint32 *) (x + 0)
823# asm 1: lduw [<x=int64#1+0],>x0=int64#3
824# asm 2: lduw [<x=%i0+0],>x0=%i2
825lduw [%i0+0],%i2
826
827# qhasm:     x1 = *(uint32 *) (x + 4)
828# asm 1: lduw [<x=int64#1+4],>x1=int64#4
829# asm 2: lduw [<x=%i0+4],>x1=%i3
830lduw [%i0+4],%i3
831
832# qhasm:     x2 = *(uint32 *) (x + 8)
833# asm 1: lduw [<x=int64#1+8],>x2=int64#7
834# asm 2: lduw [<x=%i0+8],>x2=%g1
835lduw [%i0+8],%g1
836
837# qhasm:     x3 = *(uint32 *) (x + 12)
838# asm 1: lduw [<x=int64#1+12],>x3=int64#8
839# asm 2: lduw [<x=%i0+12],>x3=%g4
840lduw [%i0+12],%g4
841
842# qhasm:     x4 = *(uint32 *) (x + 16)
843# asm 1: lduw [<x=int64#1+16],>x4=int64#9
844# asm 2: lduw [<x=%i0+16],>x4=%g5
845lduw [%i0+16],%g5
846
847# qhasm:     x5 = *(uint32 *) (x + 20)
848# asm 1: lduw [<x=int64#1+20],>x5=int64#10
849# asm 2: lduw [<x=%i0+20],>x5=%o0
850lduw [%i0+20],%o0
851
852# qhasm:     x6 = *(uint32 *) (x + 24)
853# asm 1: lduw [<x=int64#1+24],>x6=int64#11
854# asm 2: lduw [<x=%i0+24],>x6=%o1
855lduw [%i0+24],%o1
856
857# qhasm:     x7 = *(uint32 *) (x + 28)
858# asm 1: lduw [<x=int64#1+28],>x7=int64#12
859# asm 2: lduw [<x=%i0+28],>x7=%o2
860lduw [%i0+28],%o2
861
862# qhasm:     x8 = *(uint32 *) (x + 32)
863# asm 1: lduw [<x=int64#1+32],>x8=int64#13
864# asm 2: lduw [<x=%i0+32],>x8=%o3
865lduw [%i0+32],%o3
866
867# qhasm:     x9 = *(uint32 *) (x + 36)
868# asm 1: lduw [<x=int64#1+36],>x9=int64#14
869# asm 2: lduw [<x=%i0+36],>x9=%o4
870lduw [%i0+36],%o4
871
872# qhasm:     x10 = *(uint32 *) (x + 40)
873# asm 1: lduw [<x=int64#1+40],>x10=int64#15
874# asm 2: lduw [<x=%i0+40],>x10=%o5
875lduw [%i0+40],%o5
876
877# qhasm:     x11 = *(uint32 *) (x + 44)
878# asm 1: lduw [<x=int64#1+44],>x11=int64#16
879# asm 2: lduw [<x=%i0+44],>x11=%o7
880lduw [%i0+44],%o7
881
882# qhasm:     x12 = *(uint32 *) (x + 48)
883# asm 1: lduw [<x=int64#1+48],>x12=int64#17
884# asm 2: lduw [<x=%i0+48],>x12=%l0
885lduw [%i0+48],%l0
886
887# qhasm:     x13 = *(uint32 *) (x + 52)
888# asm 1: lduw [<x=int64#1+52],>x13=int64#18
889# asm 2: lduw [<x=%i0+52],>x13=%l1
890lduw [%i0+52],%l1
891
892# qhasm:     x14 = *(uint32 *) (x + 56)
893# asm 1: lduw [<x=int64#1+56],>x14=int64#19
894# asm 2: lduw [<x=%i0+56],>x14=%l2
895lduw [%i0+56],%l2
896
897# qhasm:     x15 = *(uint32 *) (x + 60)
898# asm 1: lduw [<x=int64#1+60],>x15=int64#20
899# asm 2: lduw [<x=%i0+60],>x15=%l3
900lduw [%i0+60],%l3
901
902# qhasm:     i = 20
903# asm 1: add %g0,20,>i=int64#21
904# asm 2: add %g0,20,>i=%l4
905add %g0,20,%l4
906
907# qhasm:     bytes_stack = bytes
908# asm 1: stx <bytes=int64#5,[%fp+2023->bytes_stack=stack64#2]
909# asm 2: stx <bytes=%i4,[%fp+2023->bytes_stack=8]
910stx %i4,[%fp+2023-8]
911
912# qhasm:     out_stack = out
913# asm 1: stx <out=int64#2,[%fp+2023->out_stack=stack64#3]
914# asm 2: stx <out=%i1,[%fp+2023->out_stack=16]
915stx %i1,[%fp+2023-16]
916
917# qhasm:     m_stack = m
918# asm 1: stx <m=int64#6,[%fp+2023->m_stack=stack64#4]
919# asm 2: stx <m=%i5,[%fp+2023->m_stack=24]
920stx %i5,[%fp+2023-24]
921
922# qhasm:     x_stack = x
923# asm 1: stx <x=int64#1,[%fp+2023->x_stack=stack64#5]
924# asm 2: stx <x=%i0,[%fp+2023->x_stack=32]
925stx %i0,[%fp+2023-32]
926
927# qhasm:     mainloop:
928._mainloop:
929
930# qhasm: y4 = x0 + x12
931# asm 1: add <x0=int64#3,<x12=int64#17,>y4=int64#1
932# asm 2: add <x0=%i2,<x12=%l0,>y4=%i0
933add %i2,%l0,%i0
934
935# qhasm: 		y9 = x5 + x1
936# asm 1: add <x5=int64#10,<x1=int64#4,>y9=int64#2
937# asm 2: add <x5=%o0,<x1=%i3,>y9=%i1
938add %o0,%i3,%i1
939
940# qhasm: z4 = (uint32) y4 << 7
941# asm 1: sll <y4=int64#1,7,>z4=int64#5
942# asm 2: sll <y4=%i0,7,>z4=%i4
943sll %i0,7,%i4
944
945# qhasm: 				y14 = x10 + x6
946# asm 1: add <x10=int64#15,<x6=int64#11,>y14=int64#6
947# asm 2: add <x10=%o5,<x6=%o1,>y14=%i5
948add %o5,%o1,%i5
949
950# qhasm: y4 = (uint32) y4 >> 25
951# asm 1: srl <y4=int64#1,25,>y4=int64#1
952# asm 2: srl <y4=%i0,25,>y4=%i0
953srl %i0,25,%i0
954
955# qhasm: 						y3 = x15 + x11
956# asm 1: add <x15=int64#20,<x11=int64#16,>y3=int64#22
957# asm 2: add <x15=%l3,<x11=%o7,>y3=%l5
958add %l3,%o7,%l5
959
960# qhasm: 		z9 = (uint32) y9 << 7
961# asm 1: sll <y9=int64#2,7,>z9=int64#23
962# asm 2: sll <y9=%i1,7,>z9=%l6
963sll %i1,7,%l6
964
965# qhasm: y4 |= z4
966# asm 1: or  <y4=int64#1,<z4=int64#5,>y4=int64#1
967# asm 2: or  <y4=%i0,<z4=%i4,>y4=%i0
968or  %i0,%i4,%i0
969
970# qhasm: 		y9 = (uint32) y9 >> 25
971# asm 1: srl <y9=int64#2,25,>y9=int64#2
972# asm 2: srl <y9=%i1,25,>y9=%i1
973srl %i1,25,%i1
974
975# qhasm: x4 ^= y4
976# asm 1: xor <x4=int64#9,<y4=int64#1,>x4=int64#1
977# asm 2: xor <x4=%g5,<y4=%i0,>x4=%i0
978xor %g5,%i0,%i0
979
980# qhasm: 				z14 = (uint32) y14 << 7
981# asm 1: sll <y14=int64#6,7,>z14=int64#5
982# asm 2: sll <y14=%i5,7,>z14=%i4
983sll %i5,7,%i4
984
985# qhasm: 		y9 |= z9
986# asm 1: or  <y9=int64#2,<z9=int64#23,>y9=int64#2
987# asm 2: or  <y9=%i1,<z9=%l6,>y9=%i1
988or  %i1,%l6,%i1
989
990# qhasm: 				y14 = (uint32) y14 >> 25
991# asm 1: srl <y14=int64#6,25,>y14=int64#6
992# asm 2: srl <y14=%i5,25,>y14=%i5
993srl %i5,25,%i5
994
995# qhasm: y8 = x4 + x0
996# asm 1: add <x4=int64#1,<x0=int64#3,>y8=int64#9
997# asm 2: add <x4=%i0,<x0=%i2,>y8=%g5
998add %i0,%i2,%g5
999
1000# qhasm: 						z3 = (uint32) y3 << 7
1001# asm 1: sll <y3=int64#22,7,>z3=int64#23
1002# asm 2: sll <y3=%l5,7,>z3=%l6
1003sll %l5,7,%l6
1004
1005# qhasm: 		x9 ^= y9
1006# asm 1: xor <x9=int64#14,<y9=int64#2,>x9=int64#2
1007# asm 2: xor <x9=%o4,<y9=%i1,>x9=%i1
1008xor %o4,%i1,%i1
1009
1010# qhasm: 						y3 = (uint32) y3 >> 25
1011# asm 1: srl <y3=int64#22,25,>y3=int64#14
1012# asm 2: srl <y3=%l5,25,>y3=%o4
1013srl %l5,25,%o4
1014
1015# qhasm: 				y14 |= z14
1016# asm 1: or  <y14=int64#6,<z14=int64#5,>y14=int64#5
1017# asm 2: or  <y14=%i5,<z14=%i4,>y14=%i4
1018or  %i5,%i4,%i4
1019
1020# qhasm: z8 = (uint32) y8 << 9
1021# asm 1: sll <y8=int64#9,9,>z8=int64#6
1022# asm 2: sll <y8=%g5,9,>z8=%i5
1023sll %g5,9,%i5
1024
1025# qhasm: 		y13 = x9 + x5
1026# asm 1: add <x9=int64#2,<x5=int64#10,>y13=int64#22
1027# asm 2: add <x9=%i1,<x5=%o0,>y13=%l5
1028add %i1,%o0,%l5
1029
1030# qhasm: y8 = (uint32) y8 >> 23
1031# asm 1: srl <y8=int64#9,23,>y8=int64#9
1032# asm 2: srl <y8=%g5,23,>y8=%g5
1033srl %g5,23,%g5
1034
1035# qhasm: 				x14 ^= y14
1036# asm 1: xor <x14=int64#19,<y14=int64#5,>x14=int64#5
1037# asm 2: xor <x14=%l2,<y14=%i4,>x14=%i4
1038xor %l2,%i4,%i4
1039
1040# qhasm: 						y3 |= z3
1041# asm 1: or  <y3=int64#14,<z3=int64#23,>y3=int64#14
1042# asm 2: or  <y3=%o4,<z3=%l6,>y3=%o4
1043or  %o4,%l6,%o4
1044
1045# qhasm: 				y2 = x14 + x10
1046# asm 1: add <x14=int64#5,<x10=int64#15,>y2=int64#19
1047# asm 2: add <x14=%i4,<x10=%o5,>y2=%l2
1048add %i4,%o5,%l2
1049
1050# qhasm: 		z13 = (uint32) y13 << 9
1051# asm 1: sll <y13=int64#22,9,>z13=int64#23
1052# asm 2: sll <y13=%l5,9,>z13=%l6
1053sll %l5,9,%l6
1054
1055# qhasm: 						x3 ^= y3
1056# asm 1: xor <x3=int64#8,<y3=int64#14,>x3=int64#8
1057# asm 2: xor <x3=%g4,<y3=%o4,>x3=%g4
1058xor %g4,%o4,%g4
1059
1060# qhasm: 		y13 = (uint32) y13 >> 23
1061# asm 1: srl <y13=int64#22,23,>y13=int64#14
1062# asm 2: srl <y13=%l5,23,>y13=%o4
1063srl %l5,23,%o4
1064
1065# qhasm: y8 |= z8
1066# asm 1: or  <y8=int64#9,<z8=int64#6,>y8=int64#6
1067# asm 2: or  <y8=%g5,<z8=%i5,>y8=%i5
1068or  %g5,%i5,%i5
1069
1070# qhasm: 				z2 = (uint32) y2 << 9
1071# asm 1: sll <y2=int64#19,9,>z2=int64#9
1072# asm 2: sll <y2=%l2,9,>z2=%g5
1073sll %l2,9,%g5
1074
1075# qhasm: 						y7 = x3 + x15
1076# asm 1: add <x3=int64#8,<x15=int64#20,>y7=int64#22
1077# asm 2: add <x3=%g4,<x15=%l3,>y7=%l5
1078add %g4,%l3,%l5
1079
1080# qhasm: 				y2 = (uint32) y2 >> 23
1081# asm 1: srl <y2=int64#19,23,>y2=int64#19
1082# asm 2: srl <y2=%l2,23,>y2=%l2
1083srl %l2,23,%l2
1084
1085# qhasm: x8 ^= y8
1086# asm 1: xor <x8=int64#13,<y8=int64#6,>x8=int64#6
1087# asm 2: xor <x8=%o3,<y8=%i5,>x8=%i5
1088xor %o3,%i5,%i5
1089
1090# qhasm: 		y13 |= z13
1091# asm 1: or  <y13=int64#14,<z13=int64#23,>y13=int64#13
1092# asm 2: or  <y13=%o4,<z13=%l6,>y13=%o3
1093or  %o4,%l6,%o3
1094
1095# qhasm: y12 = x8 + x4
1096# asm 1: add <x8=int64#6,<x4=int64#1,>y12=int64#14
1097# asm 2: add <x8=%i5,<x4=%i0,>y12=%o4
1098add %i5,%i0,%o4
1099
1100# qhasm: 						z7 = (uint32) y7 << 9
1101# asm 1: sll <y7=int64#22,9,>z7=int64#23
1102# asm 2: sll <y7=%l5,9,>z7=%l6
1103sll %l5,9,%l6
1104
1105# qhasm: 		x13 ^= y13
1106# asm 1: xor <x13=int64#18,<y13=int64#13,>x13=int64#18
1107# asm 2: xor <x13=%l1,<y13=%o3,>x13=%l1
1108xor %l1,%o3,%l1
1109
1110# qhasm: 						y7 = (uint32) y7 >> 23
1111# asm 1: srl <y7=int64#22,23,>y7=int64#13
1112# asm 2: srl <y7=%l5,23,>y7=%o3
1113srl %l5,23,%o3
1114
1115# qhasm: 				y2 |= z2
1116# asm 1: or  <y2=int64#19,<z2=int64#9,>y2=int64#9
1117# asm 2: or  <y2=%l2,<z2=%g5,>y2=%g5
1118or  %l2,%g5,%g5
1119
1120# qhasm: z12 = (uint32) y12 << 13
1121# asm 1: sll <y12=int64#14,13,>z12=int64#19
1122# asm 2: sll <y12=%o4,13,>z12=%l2
1123sll %o4,13,%l2
1124
1125# qhasm: 		y1 = x13 + x9
1126# asm 1: add <x13=int64#18,<x9=int64#2,>y1=int64#22
1127# asm 2: add <x13=%l1,<x9=%i1,>y1=%l5
1128add %l1,%i1,%l5
1129
1130# qhasm: y12 = (uint32) y12 >> 19
1131# asm 1: srl <y12=int64#14,19,>y12=int64#14
1132# asm 2: srl <y12=%o4,19,>y12=%o4
1133srl %o4,19,%o4
1134
1135# qhasm: 				x2 ^= y2
1136# asm 1: xor <x2=int64#7,<y2=int64#9,>x2=int64#7
1137# asm 2: xor <x2=%g1,<y2=%g5,>x2=%g1
1138xor %g1,%g5,%g1
1139
1140# qhasm: 						y7 |= z7
1141# asm 1: or  <y7=int64#13,<z7=int64#23,>y7=int64#9
1142# asm 2: or  <y7=%o3,<z7=%l6,>y7=%g5
1143or  %o3,%l6,%g5
1144
1145# qhasm: 				y6 = x2 + x14
1146# asm 1: add <x2=int64#7,<x14=int64#5,>y6=int64#13
1147# asm 2: add <x2=%g1,<x14=%i4,>y6=%o3
1148add %g1,%i4,%o3
1149
1150# qhasm: 		z1 = (uint32) y1 << 13
1151# asm 1: sll <y1=int64#22,13,>z1=int64#23
1152# asm 2: sll <y1=%l5,13,>z1=%l6
1153sll %l5,13,%l6
1154
1155# qhasm: 						x7 ^= y7
1156# asm 1: xor <x7=int64#12,<y7=int64#9,>x7=int64#9
1157# asm 2: xor <x7=%o2,<y7=%g5,>x7=%g5
1158xor %o2,%g5,%g5
1159
1160# qhasm: 		y1 = (uint32) y1 >> 19
1161# asm 1: srl <y1=int64#22,19,>y1=int64#12
1162# asm 2: srl <y1=%l5,19,>y1=%o2
1163srl %l5,19,%o2
1164
1165# qhasm: y12 |= z12
1166# asm 1: or  <y12=int64#14,<z12=int64#19,>y12=int64#14
1167# asm 2: or  <y12=%o4,<z12=%l2,>y12=%o4
1168or  %o4,%l2,%o4
1169
1170# qhasm: 				z6 = (uint32) y6 << 13
1171# asm 1: sll <y6=int64#13,13,>z6=int64#19
1172# asm 2: sll <y6=%o3,13,>z6=%l2
1173sll %o3,13,%l2
1174
1175# qhasm: 						y11 = x7 + x3
1176# asm 1: add <x7=int64#9,<x3=int64#8,>y11=int64#22
1177# asm 2: add <x7=%g5,<x3=%g4,>y11=%l5
1178add %g5,%g4,%l5
1179
1180# qhasm: 				y6 = (uint32) y6 >> 19
1181# asm 1: srl <y6=int64#13,19,>y6=int64#13
1182# asm 2: srl <y6=%o3,19,>y6=%o3
1183srl %o3,19,%o3
1184
1185# qhasm: x12 ^= y12
1186# asm 1: xor <x12=int64#17,<y12=int64#14,>x12=int64#14
1187# asm 2: xor <x12=%l0,<y12=%o4,>x12=%o4
1188xor %l0,%o4,%o4
1189
1190# qhasm: 		y1 |= z1
1191# asm 1: or  <y1=int64#12,<z1=int64#23,>y1=int64#12
1192# asm 2: or  <y1=%o2,<z1=%l6,>y1=%o2
1193or  %o2,%l6,%o2
1194
1195# qhasm: y0 = x12 + x8
1196# asm 1: add <x12=int64#14,<x8=int64#6,>y0=int64#17
1197# asm 2: add <x12=%o4,<x8=%i5,>y0=%l0
1198add %o4,%i5,%l0
1199
1200# qhasm: 						z11 = (uint32) y11 << 13
1201# asm 1: sll <y11=int64#22,13,>z11=int64#23
1202# asm 2: sll <y11=%l5,13,>z11=%l6
1203sll %l5,13,%l6
1204
1205# qhasm: 		x1 ^= y1
1206# asm 1: xor <x1=int64#4,<y1=int64#12,>x1=int64#4
1207# asm 2: xor <x1=%i3,<y1=%o2,>x1=%i3
1208xor %i3,%o2,%i3
1209
1210# qhasm: 						y11 = (uint32) y11 >> 19
1211# asm 1: srl <y11=int64#22,19,>y11=int64#12
1212# asm 2: srl <y11=%l5,19,>y11=%o2
1213srl %l5,19,%o2
1214
1215# qhasm: 				y6 |= z6
1216# asm 1: or  <y6=int64#13,<z6=int64#19,>y6=int64#13
1217# asm 2: or  <y6=%o3,<z6=%l2,>y6=%o3
1218or  %o3,%l2,%o3
1219
1220# qhasm: z0 = (uint32) y0 << 18
1221# asm 1: sll <y0=int64#17,18,>z0=int64#19
1222# asm 2: sll <y0=%l0,18,>z0=%l2
1223sll %l0,18,%l2
1224
1225# qhasm: 		y5 = x1 + x13
1226# asm 1: add <x1=int64#4,<x13=int64#18,>y5=int64#22
1227# asm 2: add <x1=%i3,<x13=%l1,>y5=%l5
1228add %i3,%l1,%l5
1229
1230# qhasm: y0 = (uint32) y0 >> 14
1231# asm 1: srl <y0=int64#17,14,>y0=int64#17
1232# asm 2: srl <y0=%l0,14,>y0=%l0
1233srl %l0,14,%l0
1234
1235# qhasm: 				x6 ^= y6
1236# asm 1: xor <x6=int64#11,<y6=int64#13,>x6=int64#11
1237# asm 2: xor <x6=%o1,<y6=%o3,>x6=%o1
1238xor %o1,%o3,%o1
1239
1240# qhasm: 						y11 |= z11
1241# asm 1: or  <y11=int64#12,<z11=int64#23,>y11=int64#12
1242# asm 2: or  <y11=%o2,<z11=%l6,>y11=%o2
1243or  %o2,%l6,%o2
1244
1245# qhasm: 				y10 = x6 + x2
1246# asm 1: add <x6=int64#11,<x2=int64#7,>y10=int64#13
1247# asm 2: add <x6=%o1,<x2=%g1,>y10=%o3
1248add %o1,%g1,%o3
1249
1250# qhasm: 		z5 = (uint32) y5 << 18
1251# asm 1: sll <y5=int64#22,18,>z5=int64#23
1252# asm 2: sll <y5=%l5,18,>z5=%l6
1253sll %l5,18,%l6
1254
1255# qhasm: 						x11 ^= y11
1256# asm 1: xor <x11=int64#16,<y11=int64#12,>x11=int64#12
1257# asm 2: xor <x11=%o7,<y11=%o2,>x11=%o2
1258xor %o7,%o2,%o2
1259
1260# qhasm: 		y5 = (uint32) y5 >> 14
1261# asm 1: srl <y5=int64#22,14,>y5=int64#16
1262# asm 2: srl <y5=%l5,14,>y5=%o7
1263srl %l5,14,%o7
1264
1265# qhasm: y0 |= z0
1266# asm 1: or  <y0=int64#17,<z0=int64#19,>y0=int64#17
1267# asm 2: or  <y0=%l0,<z0=%l2,>y0=%l0
1268or  %l0,%l2,%l0
1269
1270# qhasm: 				z10 = (uint32) y10 << 18
1271# asm 1: sll <y10=int64#13,18,>z10=int64#19
1272# asm 2: sll <y10=%o3,18,>z10=%l2
1273sll %o3,18,%l2
1274
1275# qhasm: 						y15 = x11 + x7
1276# asm 1: add <x11=int64#12,<x7=int64#9,>y15=int64#22
1277# asm 2: add <x11=%o2,<x7=%g5,>y15=%l5
1278add %o2,%g5,%l5
1279
1280# qhasm: 				y10 = (uint32) y10 >> 14
1281# asm 1: srl <y10=int64#13,14,>y10=int64#13
1282# asm 2: srl <y10=%o3,14,>y10=%o3
1283srl %o3,14,%o3
1284
1285# qhasm: x0 ^= y0
1286# asm 1: xor <x0=int64#3,<y0=int64#17,>x0=int64#3
1287# asm 2: xor <x0=%i2,<y0=%l0,>x0=%i2
1288xor %i2,%l0,%i2
1289
1290# qhasm: 		y5 |= z5
1291# asm 1: or  <y5=int64#16,<z5=int64#23,>y5=int64#16
1292# asm 2: or  <y5=%o7,<z5=%l6,>y5=%o7
1293or  %o7,%l6,%o7
1294
1295# qhasm: u1 = x0 + x3
1296# asm 1: add <x0=int64#3,<x3=int64#8,>u1=int64#17
1297# asm 2: add <x0=%i2,<x3=%g4,>u1=%l0
1298add %i2,%g4,%l0
1299
1300# qhasm: 						z15 = (uint32) y15 << 18
1301# asm 1: sll <y15=int64#22,18,>z15=int64#23
1302# asm 2: sll <y15=%l5,18,>z15=%l6
1303sll %l5,18,%l6
1304
1305# qhasm: 		x5 ^= y5
1306# asm 1: xor <x5=int64#10,<y5=int64#16,>x5=int64#10
1307# asm 2: xor <x5=%o0,<y5=%o7,>x5=%o0
1308xor %o0,%o7,%o0
1309
1310# qhasm: 						y15 = (uint32) y15 >> 14
1311# asm 1: srl <y15=int64#22,14,>y15=int64#16
1312# asm 2: srl <y15=%l5,14,>y15=%o7
1313srl %l5,14,%o7
1314
1315# qhasm: 				y10 |= z10
1316# asm 1: or  <y10=int64#13,<z10=int64#19,>y10=int64#13
1317# asm 2: or  <y10=%o3,<z10=%l2,>y10=%o3
1318or  %o3,%l2,%o3
1319
1320# qhasm: z1 = (uint32) u1 << 7
1321# asm 1: sll <u1=int64#17,7,>z1=int64#19
1322# asm 2: sll <u1=%l0,7,>z1=%l2
1323sll %l0,7,%l2
1324
1325# qhasm: 		u6 = x5 + x4
1326# asm 1: add <x5=int64#10,<x4=int64#1,>u6=int64#22
1327# asm 2: add <x5=%o0,<x4=%i0,>u6=%l5
1328add %o0,%i0,%l5
1329
1330# qhasm: u1 = (uint32) u1 >> 25
1331# asm 1: srl <u1=int64#17,25,>u1=int64#17
1332# asm 2: srl <u1=%l0,25,>u1=%l0
1333srl %l0,25,%l0
1334
1335# qhasm: 				x10 ^= y10
1336# asm 1: xor <x10=int64#15,<y10=int64#13,>x10=int64#15
1337# asm 2: xor <x10=%o5,<y10=%o3,>x10=%o5
1338xor %o5,%o3,%o5
1339
1340# qhasm: 						y15 |= z15
1341# asm 1: or  <y15=int64#16,<z15=int64#23,>y15=int64#13
1342# asm 2: or  <y15=%o7,<z15=%l6,>y15=%o3
1343or  %o7,%l6,%o3
1344
1345# qhasm: 				u11 = x10 + x9
1346# asm 1: add <x10=int64#15,<x9=int64#2,>u11=int64#16
1347# asm 2: add <x10=%o5,<x9=%i1,>u11=%o7
1348add %o5,%i1,%o7
1349
1350# qhasm: 		z6 = (uint32) u6 << 7
1351# asm 1: sll <u6=int64#22,7,>z6=int64#23
1352# asm 2: sll <u6=%l5,7,>z6=%l6
1353sll %l5,7,%l6
1354
1355# qhasm: 						x15 ^= y15
1356# asm 1: xor <x15=int64#20,<y15=int64#13,>x15=int64#20
1357# asm 2: xor <x15=%l3,<y15=%o3,>x15=%l3
1358xor %l3,%o3,%l3
1359
1360# qhasm: 		u6 = (uint32) u6 >> 25
1361# asm 1: srl <u6=int64#22,25,>u6=int64#13
1362# asm 2: srl <u6=%l5,25,>u6=%o3
1363srl %l5,25,%o3
1364
1365# qhasm: u1 |= z1
1366# asm 1: or  <u1=int64#17,<z1=int64#19,>u1=int64#17
1367# asm 2: or  <u1=%l0,<z1=%l2,>u1=%l0
1368or  %l0,%l2,%l0
1369
1370# qhasm: 				z11 = (uint32) u11 << 7
1371# asm 1: sll <u11=int64#16,7,>z11=int64#19
1372# asm 2: sll <u11=%o7,7,>z11=%l2
1373sll %o7,7,%l2
1374
1375# qhasm: 						u12 = x15 + x14
1376# asm 1: add <x15=int64#20,<x14=int64#5,>u12=int64#22
1377# asm 2: add <x15=%l3,<x14=%i4,>u12=%l5
1378add %l3,%i4,%l5
1379
1380# qhasm: 				u11 = (uint32) u11 >> 25
1381# asm 1: srl <u11=int64#16,25,>u11=int64#16
1382# asm 2: srl <u11=%o7,25,>u11=%o7
1383srl %o7,25,%o7
1384
1385# qhasm: 		u6 |= z6
1386# asm 1: or  <u6=int64#13,<z6=int64#23,>u6=int64#13
1387# asm 2: or  <u6=%o3,<z6=%l6,>u6=%o3
1388or  %o3,%l6,%o3
1389
1390# qhasm: x1 ^= u1
1391# asm 1: xor <x1=int64#4,<u1=int64#17,>x1=int64#4
1392# asm 2: xor <x1=%i3,<u1=%l0,>x1=%i3
1393xor %i3,%l0,%i3
1394
1395# qhasm: 						z12 = (uint32) u12 << 7
1396# asm 1: sll <u12=int64#22,7,>z12=int64#17
1397# asm 2: sll <u12=%l5,7,>z12=%l0
1398sll %l5,7,%l0
1399
1400# qhasm: 		x6 ^= u6
1401# asm 1: xor <x6=int64#11,<u6=int64#13,>x6=int64#11
1402# asm 2: xor <x6=%o1,<u6=%o3,>x6=%o1
1403xor %o1,%o3,%o1
1404
1405# qhasm: 						u12 = (uint32) u12 >> 25
1406# asm 1: srl <u12=int64#22,25,>u12=int64#13
1407# asm 2: srl <u12=%l5,25,>u12=%o3
1408srl %l5,25,%o3
1409
1410# qhasm: 				u11 |= z11
1411# asm 1: or  <u11=int64#16,<z11=int64#19,>u11=int64#16
1412# asm 2: or  <u11=%o7,<z11=%l2,>u11=%o7
1413or  %o7,%l2,%o7
1414
1415# qhasm: u2 = x1 + x0
1416# asm 1: add <x1=int64#4,<x0=int64#3,>u2=int64#19
1417# asm 2: add <x1=%i3,<x0=%i2,>u2=%l2
1418add %i3,%i2,%l2
1419
1420# qhasm: 		u7 = x6 + x5
1421# asm 1: add <x6=int64#11,<x5=int64#10,>u7=int64#22
1422# asm 2: add <x6=%o1,<x5=%o0,>u7=%l5
1423add %o1,%o0,%l5
1424
1425# qhasm: z2 = (uint32) u2 << 9
1426# asm 1: sll <u2=int64#19,9,>z2=int64#23
1427# asm 2: sll <u2=%l2,9,>z2=%l6
1428sll %l2,9,%l6
1429
1430# qhasm: 				x11 ^= u11
1431# asm 1: xor <x11=int64#12,<u11=int64#16,>x11=int64#16
1432# asm 2: xor <x11=%o2,<u11=%o7,>x11=%o7
1433xor %o2,%o7,%o7
1434
1435# qhasm: u2 = (uint32) u2 >> 23
1436# asm 1: srl <u2=int64#19,23,>u2=int64#12
1437# asm 2: srl <u2=%l2,23,>u2=%o2
1438srl %l2,23,%o2
1439
1440# qhasm: 						u12 |= z12
1441# asm 1: or  <u12=int64#13,<z12=int64#17,>u12=int64#13
1442# asm 2: or  <u12=%o3,<z12=%l0,>u12=%o3
1443or  %o3,%l0,%o3
1444
1445# qhasm: 		z7 = (uint32) u7 << 9
1446# asm 1: sll <u7=int64#22,9,>z7=int64#19
1447# asm 2: sll <u7=%l5,9,>z7=%l2
1448sll %l5,9,%l2
1449
1450# qhasm: 				u8 = x11 + x10
1451# asm 1: add <x11=int64#16,<x10=int64#15,>u8=int64#24
1452# asm 2: add <x11=%o7,<x10=%o5,>u8=%l7
1453add %o7,%o5,%l7
1454
1455# qhasm: 		u7 = (uint32) u7 >> 23
1456# asm 1: srl <u7=int64#22,23,>u7=int64#22
1457# asm 2: srl <u7=%l5,23,>u7=%l5
1458srl %l5,23,%l5
1459
1460# qhasm: 						x12 ^= u12
1461# asm 1: xor <x12=int64#14,<u12=int64#13,>x12=int64#17
1462# asm 2: xor <x12=%o4,<u12=%o3,>x12=%l0
1463xor %o4,%o3,%l0
1464
1465# qhasm: 				z8 = (uint32) u8 << 9
1466# asm 1: sll <u8=int64#24,9,>z8=int64#13
1467# asm 2: sll <u8=%l7,9,>z8=%o3
1468sll %l7,9,%o3
1469
1470# qhasm: u2 |= z2
1471# asm 1: or  <u2=int64#12,<z2=int64#23,>u2=int64#12
1472# asm 2: or  <u2=%o2,<z2=%l6,>u2=%o2
1473or  %o2,%l6,%o2
1474
1475# qhasm: 				u8 = (uint32) u8 >> 23
1476# asm 1: srl <u8=int64#24,23,>u8=int64#14
1477# asm 2: srl <u8=%l7,23,>u8=%o4
1478srl %l7,23,%o4
1479
1480# qhasm: 						u13 = x12 + x15
1481# asm 1: add <x12=int64#17,<x15=int64#20,>u13=int64#23
1482# asm 2: add <x12=%l0,<x15=%l3,>u13=%l6
1483add %l0,%l3,%l6
1484
1485# qhasm: 		u7 |= z7
1486# asm 1: or  <u7=int64#22,<z7=int64#19,>u7=int64#19
1487# asm 2: or  <u7=%l5,<z7=%l2,>u7=%l2
1488or  %l5,%l2,%l2
1489
1490# qhasm: x2 ^= u2
1491# asm 1: xor <x2=int64#7,<u2=int64#12,>x2=int64#7
1492# asm 2: xor <x2=%g1,<u2=%o2,>x2=%g1
1493xor %g1,%o2,%g1
1494
1495# qhasm: 						z13 = (uint32) u13 << 9
1496# asm 1: sll <u13=int64#23,9,>z13=int64#22
1497# asm 2: sll <u13=%l6,9,>z13=%l5
1498sll %l6,9,%l5
1499
1500# qhasm: 		x7 ^= u7
1501# asm 1: xor <x7=int64#9,<u7=int64#19,>x7=int64#12
1502# asm 2: xor <x7=%g5,<u7=%l2,>x7=%o2
1503xor %g5,%l2,%o2
1504
1505# qhasm: 						u13 = (uint32) u13 >> 23
1506# asm 1: srl <u13=int64#23,23,>u13=int64#9
1507# asm 2: srl <u13=%l6,23,>u13=%g5
1508srl %l6,23,%g5
1509
1510# qhasm: 				u8 |= z8
1511# asm 1: or  <u8=int64#14,<z8=int64#13,>u8=int64#13
1512# asm 2: or  <u8=%o4,<z8=%o3,>u8=%o3
1513or  %o4,%o3,%o3
1514
1515# qhasm: u3 = x2 + x1
1516# asm 1: add <x2=int64#7,<x1=int64#4,>u3=int64#14
1517# asm 2: add <x2=%g1,<x1=%i3,>u3=%o4
1518add %g1,%i3,%o4
1519
1520# qhasm: 		u4 = x7 + x6
1521# asm 1: add <x7=int64#12,<x6=int64#11,>u4=int64#19
1522# asm 2: add <x7=%o2,<x6=%o1,>u4=%l2
1523add %o2,%o1,%l2
1524
1525# qhasm: z3 = (uint32) u3 << 13
1526# asm 1: sll <u3=int64#14,13,>z3=int64#23
1527# asm 2: sll <u3=%o4,13,>z3=%l6
1528sll %o4,13,%l6
1529
1530# qhasm: 				x8 ^= u8
1531# asm 1: xor <x8=int64#6,<u8=int64#13,>x8=int64#13
1532# asm 2: xor <x8=%i5,<u8=%o3,>x8=%o3
1533xor %i5,%o3,%o3
1534
1535# qhasm: 		z4 = (uint32) u4 << 13
1536# asm 1: sll <u4=int64#19,13,>z4=int64#6
1537# asm 2: sll <u4=%l2,13,>z4=%i5
1538sll %l2,13,%i5
1539
1540# qhasm: 						u13 |= z13
1541# asm 1: or  <u13=int64#9,<z13=int64#22,>u13=int64#9
1542# asm 2: or  <u13=%g5,<z13=%l5,>u13=%g5
1543or  %g5,%l5,%g5
1544
1545# qhasm: u3 = (uint32) u3 >> 19
1546# asm 1: srl <u3=int64#14,19,>u3=int64#14
1547# asm 2: srl <u3=%o4,19,>u3=%o4
1548srl %o4,19,%o4
1549
1550# qhasm: 				u9 = x8 + x11
1551# asm 1: add <x8=int64#13,<x11=int64#16,>u9=int64#22
1552# asm 2: add <x8=%o3,<x11=%o7,>u9=%l5
1553add %o3,%o7,%l5
1554
1555# qhasm: 		u4 = (uint32) u4 >> 19
1556# asm 1: srl <u4=int64#19,19,>u4=int64#19
1557# asm 2: srl <u4=%l2,19,>u4=%l2
1558srl %l2,19,%l2
1559
1560# qhasm: 						x13 ^= u13
1561# asm 1: xor <x13=int64#18,<u13=int64#9,>x13=int64#18
1562# asm 2: xor <x13=%l1,<u13=%g5,>x13=%l1
1563xor %l1,%g5,%l1
1564
1565# qhasm: 				z9 = (uint32) u9 << 13
1566# asm 1: sll <u9=int64#22,13,>z9=int64#24
1567# asm 2: sll <u9=%l5,13,>z9=%l7
1568sll %l5,13,%l7
1569
1570# qhasm: u3 |= z3
1571# asm 1: or  <u3=int64#14,<z3=int64#23,>u3=int64#9
1572# asm 2: or  <u3=%o4,<z3=%l6,>u3=%g5
1573or  %o4,%l6,%g5
1574
1575# qhasm: 				u9 = (uint32) u9 >> 19
1576# asm 1: srl <u9=int64#22,19,>u9=int64#14
1577# asm 2: srl <u9=%l5,19,>u9=%o4
1578srl %l5,19,%o4
1579
1580# qhasm: 						u14 = x13 + x12
1581# asm 1: add <x13=int64#18,<x12=int64#17,>u14=int64#22
1582# asm 2: add <x13=%l1,<x12=%l0,>u14=%l5
1583add %l1,%l0,%l5
1584
1585# qhasm: 		u4 |= z4
1586# asm 1: or  <u4=int64#19,<z4=int64#6,>u4=int64#6
1587# asm 2: or  <u4=%l2,<z4=%i5,>u4=%i5
1588or  %l2,%i5,%i5
1589
1590# qhasm: x3 ^= u3
1591# asm 1: xor <x3=int64#8,<u3=int64#9,>x3=int64#8
1592# asm 2: xor <x3=%g4,<u3=%g5,>x3=%g4
1593xor %g4,%g5,%g4
1594
1595# qhasm: 						z14 = (uint32) u14 << 13
1596# asm 1: sll <u14=int64#22,13,>z14=int64#19
1597# asm 2: sll <u14=%l5,13,>z14=%l2
1598sll %l5,13,%l2
1599
1600# qhasm: 		x4 ^= u4
1601# asm 1: xor <x4=int64#1,<u4=int64#6,>x4=int64#9
1602# asm 2: xor <x4=%i0,<u4=%i5,>x4=%g5
1603xor %i0,%i5,%g5
1604
1605# qhasm: 						u14 = (uint32) u14 >> 19
1606# asm 1: srl <u14=int64#22,19,>u14=int64#1
1607# asm 2: srl <u14=%l5,19,>u14=%i0
1608srl %l5,19,%i0
1609
1610# qhasm: 				u9 |= z9
1611# asm 1: or  <u9=int64#14,<z9=int64#24,>u9=int64#6
1612# asm 2: or  <u9=%o4,<z9=%l7,>u9=%i5
1613or  %o4,%l7,%i5
1614
1615# qhasm: u0 = x3 + x2
1616# asm 1: add <x3=int64#8,<x2=int64#7,>u0=int64#22
1617# asm 2: add <x3=%g4,<x2=%g1,>u0=%l5
1618add %g4,%g1,%l5
1619
1620# qhasm: 		u5 = x4 + x7
1621# asm 1: add <x4=int64#9,<x7=int64#12,>u5=int64#23
1622# asm 2: add <x4=%g5,<x7=%o2,>u5=%l6
1623add %g5,%o2,%l6
1624
1625# qhasm: z0 = (uint32) u0 << 18
1626# asm 1: sll <u0=int64#22,18,>z0=int64#24
1627# asm 2: sll <u0=%l5,18,>z0=%l7
1628sll %l5,18,%l7
1629
1630# qhasm: 				x9 ^= u9
1631# asm 1: xor <x9=int64#2,<u9=int64#6,>x9=int64#14
1632# asm 2: xor <x9=%i1,<u9=%i5,>x9=%o4
1633xor %i1,%i5,%o4
1634
1635# qhasm: 		z5 = (uint32) u5 << 18
1636# asm 1: sll <u5=int64#23,18,>z5=int64#2
1637# asm 2: sll <u5=%l6,18,>z5=%i1
1638sll %l6,18,%i1
1639
1640# qhasm: 						u14 |= z14
1641# asm 1: or  <u14=int64#1,<z14=int64#19,>u14=int64#1
1642# asm 2: or  <u14=%i0,<z14=%l2,>u14=%i0
1643or  %i0,%l2,%i0
1644
1645# qhasm: u0 = (uint32) u0 >> 14
1646# asm 1: srl <u0=int64#22,14,>u0=int64#6
1647# asm 2: srl <u0=%l5,14,>u0=%i5
1648srl %l5,14,%i5
1649
1650# qhasm: 				u10 = x9 + x8
1651# asm 1: add <x9=int64#14,<x8=int64#13,>u10=int64#22
1652# asm 2: add <x9=%o4,<x8=%o3,>u10=%l5
1653add %o4,%o3,%l5
1654
1655# qhasm: 		u5 = (uint32) u5 >> 14
1656# asm 1: srl <u5=int64#23,14,>u5=int64#23
1657# asm 2: srl <u5=%l6,14,>u5=%l6
1658srl %l6,14,%l6
1659
1660# qhasm: 						x14 ^= u14
1661# asm 1: xor <x14=int64#5,<u14=int64#1,>x14=int64#19
1662# asm 2: xor <x14=%i4,<u14=%i0,>x14=%l2
1663xor %i4,%i0,%l2
1664
1665# qhasm: 				z10 = (uint32) u10 << 18
1666# asm 1: sll <u10=int64#22,18,>z10=int64#1
1667# asm 2: sll <u10=%l5,18,>z10=%i0
1668sll %l5,18,%i0
1669
1670# qhasm: u0 |= z0
1671# asm 1: or  <u0=int64#6,<z0=int64#24,>u0=int64#5
1672# asm 2: or  <u0=%i5,<z0=%l7,>u0=%i4
1673or  %i5,%l7,%i4
1674
1675# qhasm: 				u10 = (uint32) u10 >> 14
1676# asm 1: srl <u10=int64#22,14,>u10=int64#6
1677# asm 2: srl <u10=%l5,14,>u10=%i5
1678srl %l5,14,%i5
1679
1680# qhasm: 						u15 = x14 + x13
1681# asm 1: add <x14=int64#19,<x13=int64#18,>u15=int64#22
1682# asm 2: add <x14=%l2,<x13=%l1,>u15=%l5
1683add %l2,%l1,%l5
1684
1685# qhasm: 		u5 |= z5
1686# asm 1: or  <u5=int64#23,<z5=int64#2,>u5=int64#2
1687# asm 2: or  <u5=%l6,<z5=%i1,>u5=%i1
1688or  %l6,%i1,%i1
1689
1690# qhasm: x0 ^= u0
1691# asm 1: xor <x0=int64#3,<u0=int64#5,>x0=int64#3
1692# asm 2: xor <x0=%i2,<u0=%i4,>x0=%i2
1693xor %i2,%i4,%i2
1694
1695# qhasm: 						z15 = (uint32) u15 << 18
1696# asm 1: sll <u15=int64#22,18,>z15=int64#5
1697# asm 2: sll <u15=%l5,18,>z15=%i4
1698sll %l5,18,%i4
1699
1700# qhasm: 		x5 ^= u5
1701# asm 1: xor <x5=int64#10,<u5=int64#2,>x5=int64#10
1702# asm 2: xor <x5=%o0,<u5=%i1,>x5=%o0
1703xor %o0,%i1,%o0
1704
1705# qhasm: 						u15 = (uint32) u15 >> 14
1706# asm 1: srl <u15=int64#22,14,>u15=int64#2
1707# asm 2: srl <u15=%l5,14,>u15=%i1
1708srl %l5,14,%i1
1709
1710# qhasm:                  unsigned>? i -= 2
1711# asm 1: subcc <i=int64#21,2,>i=int64#21
1712# asm 2: subcc <i=%l4,2,>i=%l4
1713subcc %l4,2,%l4
1714
1715# qhasm: 				u10 |= z10
1716# asm 1: or  <u10=int64#6,<z10=int64#1,>u10=int64#1
1717# asm 2: or  <u10=%i5,<z10=%i0,>u10=%i0
1718or  %i5,%i0,%i0
1719
1720# qhasm: 						u15 |= z15
1721# asm 1: or  <u15=int64#2,<z15=int64#5,>u15=int64#2
1722# asm 2: or  <u15=%i1,<z15=%i4,>u15=%i1
1723or  %i1,%i4,%i1
1724
1725# qhasm: 				x10 ^= u10
1726# asm 1: xor <x10=int64#15,<u10=int64#1,>x10=int64#15
1727# asm 2: xor <x10=%o5,<u10=%i0,>x10=%o5
1728xor %o5,%i0,%o5
1729
1730# qhasm: 						x15 ^= u15
1731# asm 1: xor <x15=int64#20,<u15=int64#2,>x15=int64#20
1732# asm 2: xor <x15=%l3,<u15=%i1,>x15=%l3
1733xor %l3,%i1,%l3
1734
1735# qhasm: goto mainloop if unsigned>
1736bgu,pt %xcc,._mainloop
1737nop
1738
1739# qhasm:   x = x_stack
1740# asm 1: ldx [%fp+2023-<x_stack=stack64#5],>x=int64#1
1741# asm 2: ldx [%fp+2023-<x_stack=32],>x=%i0
1742ldx [%fp+2023-32],%i0
1743
1744# qhasm:   q0 = *(uint32 *) (x + 0)
1745# asm 1: lduw [<x=int64#1+0],>q0=int64#2
1746# asm 2: lduw [<x=%i0+0],>q0=%i1
1747lduw [%i0+0],%i1
1748
1749# qhasm:   q1 = *(uint32 *) (x + 4)
1750# asm 1: lduw [<x=int64#1+4],>q1=int64#5
1751# asm 2: lduw [<x=%i0+4],>q1=%i4
1752lduw [%i0+4],%i4
1753
1754# qhasm:   q2 = *(uint32 *) (x + 8)
1755# asm 1: lduw [<x=int64#1+8],>q2=int64#6
1756# asm 2: lduw [<x=%i0+8],>q2=%i5
1757lduw [%i0+8],%i5
1758
1759# qhasm:   q3 = *(uint32 *) (x + 12)
1760# asm 1: lduw [<x=int64#1+12],>q3=int64#21
1761# asm 2: lduw [<x=%i0+12],>q3=%l4
1762lduw [%i0+12],%l4
1763
1764# qhasm:   x0 += q0
1765# asm 1: add <x0=int64#3,<q0=int64#2,>x0=int64#2
1766# asm 2: add <x0=%i2,<q0=%i1,>x0=%i1
1767add %i2,%i1,%i1
1768
1769# qhasm:   q4 = *(uint32 *) (x + 16)
1770# asm 1: lduw [<x=int64#1+16],>q4=int64#3
1771# asm 2: lduw [<x=%i0+16],>q4=%i2
1772lduw [%i0+16],%i2
1773
1774# qhasm:   x1 += q1
1775# asm 1: add <x1=int64#4,<q1=int64#5,>x1=int64#4
1776# asm 2: add <x1=%i3,<q1=%i4,>x1=%i3
1777add %i3,%i4,%i3
1778
1779# qhasm:   q5 = *(uint32 *) (x + 20)
1780# asm 1: lduw [<x=int64#1+20],>q5=int64#5
1781# asm 2: lduw [<x=%i0+20],>q5=%i4
1782lduw [%i0+20],%i4
1783
1784# qhasm:   x2 += q2
1785# asm 1: add <x2=int64#7,<q2=int64#6,>x2=int64#6
1786# asm 2: add <x2=%g1,<q2=%i5,>x2=%i5
1787add %g1,%i5,%i5
1788
1789# qhasm:   q6 = *(uint32 *) (x + 24)
1790# asm 1: lduw [<x=int64#1+24],>q6=int64#7
1791# asm 2: lduw [<x=%i0+24],>q6=%g1
1792lduw [%i0+24],%g1
1793
1794# qhasm:   x3 += q3
1795# asm 1: add <x3=int64#8,<q3=int64#21,>x3=int64#8
1796# asm 2: add <x3=%g4,<q3=%l4,>x3=%g4
1797add %g4,%l4,%g4
1798
1799# qhasm:   q7 = *(uint32 *) (x + 28)
1800# asm 1: lduw [<x=int64#1+28],>q7=int64#21
1801# asm 2: lduw [<x=%i0+28],>q7=%l4
1802lduw [%i0+28],%l4
1803
1804# qhasm:   x4 += q4
1805# asm 1: add <x4=int64#9,<q4=int64#3,>x4=int64#3
1806# asm 2: add <x4=%g5,<q4=%i2,>x4=%i2
1807add %g5,%i2,%i2
1808
1809# qhasm:   q8 = *(uint32 *) (x + 32)
1810# asm 1: lduw [<x=int64#1+32],>q8=int64#9
1811# asm 2: lduw [<x=%i0+32],>q8=%g5
1812lduw [%i0+32],%g5
1813
1814# qhasm:   x5 += q5
1815# asm 1: add <x5=int64#10,<q5=int64#5,>x5=int64#5
1816# asm 2: add <x5=%o0,<q5=%i4,>x5=%i4
1817add %o0,%i4,%i4
1818
1819# qhasm:   q9 = *(uint32 *) (x + 36)
1820# asm 1: lduw [<x=int64#1+36],>q9=int64#10
1821# asm 2: lduw [<x=%i0+36],>q9=%o0
1822lduw [%i0+36],%o0
1823
1824# qhasm:   x6 += q6
1825# asm 1: add <x6=int64#11,<q6=int64#7,>x6=int64#7
1826# asm 2: add <x6=%o1,<q6=%g1,>x6=%g1
1827add %o1,%g1,%g1
1828
1829# qhasm:   q10 = *(uint32 *) (x + 40)
1830# asm 1: lduw [<x=int64#1+40],>q10=int64#11
1831# asm 2: lduw [<x=%i0+40],>q10=%o1
1832lduw [%i0+40],%o1
1833
1834# qhasm:   x7 += q7
1835# asm 1: add <x7=int64#12,<q7=int64#21,>x7=int64#12
1836# asm 2: add <x7=%o2,<q7=%l4,>x7=%o2
1837add %o2,%l4,%o2
1838
1839# qhasm:   q11 = *(uint32 *) (x + 44)
1840# asm 1: lduw [<x=int64#1+44],>q11=int64#21
1841# asm 2: lduw [<x=%i0+44],>q11=%l4
1842lduw [%i0+44],%l4
1843
1844# qhasm:   x8 += q8
1845# asm 1: add <x8=int64#13,<q8=int64#9,>x8=int64#13
1846# asm 2: add <x8=%o3,<q8=%g5,>x8=%o3
1847add %o3,%g5,%o3
1848
1849# qhasm:   q8 += 1
1850# asm 1: add <q8=int64#9,1,>q8=int64#9
1851# asm 2: add <q8=%g5,1,>q8=%g5
1852add %g5,1,%g5
1853
1854# qhasm:   *(uint32 *) (x + 32) = q8
1855# asm 1: stw <q8=int64#9,[<x=int64#1+32]
1856# asm 2: stw <q8=%g5,[<x=%i0+32]
1857stw %g5,[%i0+32]
1858
1859# qhasm:   q8 = (uint64) q8 >> 32
1860# asm 1: srlx <q8=int64#9,32,>q8=int64#9
1861# asm 2: srlx <q8=%g5,32,>q8=%g5
1862srlx %g5,32,%g5
1863
1864# qhasm:   q12 = *(uint32 *) (x + 48)
1865# asm 1: lduw [<x=int64#1+48],>q12=int64#22
1866# asm 2: lduw [<x=%i0+48],>q12=%l5
1867lduw [%i0+48],%l5
1868
1869# qhasm:   x9 += q9
1870# asm 1: add <x9=int64#14,<q9=int64#10,>x9=int64#14
1871# asm 2: add <x9=%o4,<q9=%o0,>x9=%o4
1872add %o4,%o0,%o4
1873
1874# qhasm:   q9 += q8
1875# asm 1: add <q9=int64#10,<q8=int64#9,>q9=int64#9
1876# asm 2: add <q9=%o0,<q8=%g5,>q9=%g5
1877add %o0,%g5,%g5
1878
1879# qhasm:   *(uint32 *) (x + 36) = q9
1880# asm 1: stw <q9=int64#9,[<x=int64#1+36]
1881# asm 2: stw <q9=%g5,[<x=%i0+36]
1882stw %g5,[%i0+36]
1883
1884# qhasm:   q13 = *(uint32 *) (x + 52)
1885# asm 1: lduw [<x=int64#1+52],>q13=int64#9
1886# asm 2: lduw [<x=%i0+52],>q13=%g5
1887lduw [%i0+52],%g5
1888
1889# qhasm:   x10 += q10
1890# asm 1: add <x10=int64#15,<q10=int64#11,>x10=int64#10
1891# asm 2: add <x10=%o5,<q10=%o1,>x10=%o0
1892add %o5,%o1,%o0
1893
1894# qhasm:   q14 = *(uint32 *) (x + 56)
1895# asm 1: lduw [<x=int64#1+56],>q14=int64#11
1896# asm 2: lduw [<x=%i0+56],>q14=%o1
1897lduw [%i0+56],%o1
1898
1899# qhasm:   x11 += q11
1900# asm 1: add <x11=int64#16,<q11=int64#21,>x11=int64#15
1901# asm 2: add <x11=%o7,<q11=%l4,>x11=%o5
1902add %o7,%l4,%o5
1903
1904# qhasm:   q15 = *(uint32 *) (x + 60)
1905# asm 1: lduw [<x=int64#1+60],>q15=int64#16
1906# asm 2: lduw [<x=%i0+60],>q15=%o7
1907lduw [%i0+60],%o7
1908
1909# qhasm:   x12 += q12
1910# asm 1: add <x12=int64#17,<q12=int64#22,>x12=int64#17
1911# asm 2: add <x12=%l0,<q12=%l5,>x12=%l0
1912add %l0,%l5,%l0
1913
1914# qhasm:   x13 += q13
1915# asm 1: add <x13=int64#18,<q13=int64#9,>x13=int64#9
1916# asm 2: add <x13=%l1,<q13=%g5,>x13=%g5
1917add %l1,%g5,%g5
1918
1919# qhasm:   x14 += q14
1920# asm 1: add <x14=int64#19,<q14=int64#11,>x14=int64#11
1921# asm 2: add <x14=%l2,<q14=%o1,>x14=%o1
1922add %l2,%o1,%o1
1923
1924# qhasm:   x15 += q15
1925# asm 1: add <x15=int64#20,<q15=int64#16,>x15=int64#16
1926# asm 2: add <x15=%l3,<q15=%o7,>x15=%o7
1927add %l3,%o7,%o7
1928
1929# qhasm:   m = m_stack
1930# asm 1: ldx [%fp+2023-<m_stack=stack64#4],>m=int64#18
1931# asm 2: ldx [%fp+2023-<m_stack=24],>m=%l1
1932ldx [%fp+2023-24],%l1
1933
1934# qhasm:   m0 = *(swapendian uint32 *) m
1935# asm 1: lduwa [<m=int64#18] 0x88,>m0=int64#19
1936# asm 2: lduwa [<m=%l1] 0x88,>m0=%l2
1937lduwa [%l1] 0x88,%l2
1938
1939# qhasm:   m += 4
1940# asm 1: add <m=int64#18,4,>m=int64#18
1941# asm 2: add <m=%l1,4,>m=%l1
1942add %l1,4,%l1
1943
1944# qhasm:   m1 = *(swapendian uint32 *) m
1945# asm 1: lduwa [<m=int64#18] 0x88,>m1=int64#20
1946# asm 2: lduwa [<m=%l1] 0x88,>m1=%l3
1947lduwa [%l1] 0x88,%l3
1948
1949# qhasm:   m += 4
1950# asm 1: add <m=int64#18,4,>m=int64#18
1951# asm 2: add <m=%l1,4,>m=%l1
1952add %l1,4,%l1
1953
1954# qhasm:   m2 = *(swapendian uint32 *) m
1955# asm 1: lduwa [<m=int64#18] 0x88,>m2=int64#21
1956# asm 2: lduwa [<m=%l1] 0x88,>m2=%l4
1957lduwa [%l1] 0x88,%l4
1958
1959# qhasm:   m += 4
1960# asm 1: add <m=int64#18,4,>m=int64#18
1961# asm 2: add <m=%l1,4,>m=%l1
1962add %l1,4,%l1
1963
1964# qhasm:   m3 = *(swapendian uint32 *) m
1965# asm 1: lduwa [<m=int64#18] 0x88,>m3=int64#22
1966# asm 2: lduwa [<m=%l1] 0x88,>m3=%l5
1967lduwa [%l1] 0x88,%l5
1968
1969# qhasm:   m += 4
1970# asm 1: add <m=int64#18,4,>m=int64#18
1971# asm 2: add <m=%l1,4,>m=%l1
1972add %l1,4,%l1
1973
1974# qhasm:   x0 ^= m0
1975# asm 1: xor <x0=int64#2,<m0=int64#19,>x0=int64#2
1976# asm 2: xor <x0=%i1,<m0=%l2,>x0=%i1
1977xor %i1,%l2,%i1
1978
1979# qhasm:   m4 = *(swapendian uint32 *) m
1980# asm 1: lduwa [<m=int64#18] 0x88,>m4=int64#19
1981# asm 2: lduwa [<m=%l1] 0x88,>m4=%l2
1982lduwa [%l1] 0x88,%l2
1983
1984# qhasm:   m += 4
1985# asm 1: add <m=int64#18,4,>m=int64#18
1986# asm 2: add <m=%l1,4,>m=%l1
1987add %l1,4,%l1
1988
1989# qhasm:   x1 ^= m1
1990# asm 1: xor <x1=int64#4,<m1=int64#20,>x1=int64#4
1991# asm 2: xor <x1=%i3,<m1=%l3,>x1=%i3
1992xor %i3,%l3,%i3
1993
1994# qhasm:   m5 = *(swapendian uint32 *) m
1995# asm 1: lduwa [<m=int64#18] 0x88,>m5=int64#20
1996# asm 2: lduwa [<m=%l1] 0x88,>m5=%l3
1997lduwa [%l1] 0x88,%l3
1998
1999# qhasm:   m += 4
2000# asm 1: add <m=int64#18,4,>m=int64#18
2001# asm 2: add <m=%l1,4,>m=%l1
2002add %l1,4,%l1
2003
2004# qhasm:   x2 ^= m2
2005# asm 1: xor <x2=int64#6,<m2=int64#21,>x2=int64#21
2006# asm 2: xor <x2=%i5,<m2=%l4,>x2=%l4
2007xor %i5,%l4,%l4
2008
2009# qhasm:   m6 = *(swapendian uint32 *) m
2010# asm 1: lduwa [<m=int64#18] 0x88,>m6=int64#6
2011# asm 2: lduwa [<m=%l1] 0x88,>m6=%i5
2012lduwa [%l1] 0x88,%i5
2013
2014# qhasm:   m += 4
2015# asm 1: add <m=int64#18,4,>m=int64#18
2016# asm 2: add <m=%l1,4,>m=%l1
2017add %l1,4,%l1
2018
2019# qhasm:   x3 ^= m3
2020# asm 1: xor <x3=int64#8,<m3=int64#22,>x3=int64#8
2021# asm 2: xor <x3=%g4,<m3=%l5,>x3=%g4
2022xor %g4,%l5,%g4
2023
2024# qhasm:   m7 = *(swapendian uint32 *) m
2025# asm 1: lduwa [<m=int64#18] 0x88,>m7=int64#22
2026# asm 2: lduwa [<m=%l1] 0x88,>m7=%l5
2027lduwa [%l1] 0x88,%l5
2028
2029# qhasm:   m += 4
2030# asm 1: add <m=int64#18,4,>m=int64#18
2031# asm 2: add <m=%l1,4,>m=%l1
2032add %l1,4,%l1
2033
2034# qhasm:   x4 ^= m4
2035# asm 1: xor <x4=int64#3,<m4=int64#19,>x4=int64#3
2036# asm 2: xor <x4=%i2,<m4=%l2,>x4=%i2
2037xor %i2,%l2,%i2
2038
2039# qhasm:   m8 = *(swapendian uint32 *) m
2040# asm 1: lduwa [<m=int64#18] 0x88,>m8=int64#19
2041# asm 2: lduwa [<m=%l1] 0x88,>m8=%l2
2042lduwa [%l1] 0x88,%l2
2043
2044# qhasm:   m += 4
2045# asm 1: add <m=int64#18,4,>m=int64#18
2046# asm 2: add <m=%l1,4,>m=%l1
2047add %l1,4,%l1
2048
2049# qhasm:   x5 ^= m5
2050# asm 1: xor <x5=int64#5,<m5=int64#20,>x5=int64#5
2051# asm 2: xor <x5=%i4,<m5=%l3,>x5=%i4
2052xor %i4,%l3,%i4
2053
2054# qhasm:   m9 = *(swapendian uint32 *) m
2055# asm 1: lduwa [<m=int64#18] 0x88,>m9=int64#20
2056# asm 2: lduwa [<m=%l1] 0x88,>m9=%l3
2057lduwa [%l1] 0x88,%l3
2058
2059# qhasm:   m += 4
2060# asm 1: add <m=int64#18,4,>m=int64#18
2061# asm 2: add <m=%l1,4,>m=%l1
2062add %l1,4,%l1
2063
2064# qhasm:   x6 ^= m6
2065# asm 1: xor <x6=int64#7,<m6=int64#6,>x6=int64#7
2066# asm 2: xor <x6=%g1,<m6=%i5,>x6=%g1
2067xor %g1,%i5,%g1
2068
2069# qhasm:   m10 = *(swapendian uint32 *) m
2070# asm 1: lduwa [<m=int64#18] 0x88,>m10=int64#6
2071# asm 2: lduwa [<m=%l1] 0x88,>m10=%i5
2072lduwa [%l1] 0x88,%i5
2073
2074# qhasm:   m += 4
2075# asm 1: add <m=int64#18,4,>m=int64#18
2076# asm 2: add <m=%l1,4,>m=%l1
2077add %l1,4,%l1
2078
2079# qhasm:   x7 ^= m7
2080# asm 1: xor <x7=int64#12,<m7=int64#22,>x7=int64#12
2081# asm 2: xor <x7=%o2,<m7=%l5,>x7=%o2
2082xor %o2,%l5,%o2
2083
2084# qhasm:   m11 = *(swapendian uint32 *) m
2085# asm 1: lduwa [<m=int64#18] 0x88,>m11=int64#22
2086# asm 2: lduwa [<m=%l1] 0x88,>m11=%l5
2087lduwa [%l1] 0x88,%l5
2088
2089# qhasm:   m += 4
2090# asm 1: add <m=int64#18,4,>m=int64#18
2091# asm 2: add <m=%l1,4,>m=%l1
2092add %l1,4,%l1
2093
2094# qhasm:   x8 ^= m8
2095# asm 1: xor <x8=int64#13,<m8=int64#19,>x8=int64#13
2096# asm 2: xor <x8=%o3,<m8=%l2,>x8=%o3
2097xor %o3,%l2,%o3
2098
2099# qhasm:   m12 = *(swapendian uint32 *) m
2100# asm 1: lduwa [<m=int64#18] 0x88,>m12=int64#19
2101# asm 2: lduwa [<m=%l1] 0x88,>m12=%l2
2102lduwa [%l1] 0x88,%l2
2103
2104# qhasm:   m += 4
2105# asm 1: add <m=int64#18,4,>m=int64#18
2106# asm 2: add <m=%l1,4,>m=%l1
2107add %l1,4,%l1
2108
2109# qhasm:   x9 ^= m9
2110# asm 1: xor <x9=int64#14,<m9=int64#20,>x9=int64#14
2111# asm 2: xor <x9=%o4,<m9=%l3,>x9=%o4
2112xor %o4,%l3,%o4
2113
2114# qhasm:   m13 = *(swapendian uint32 *) m
2115# asm 1: lduwa [<m=int64#18] 0x88,>m13=int64#20
2116# asm 2: lduwa [<m=%l1] 0x88,>m13=%l3
2117lduwa [%l1] 0x88,%l3
2118
2119# qhasm:   m += 4
2120# asm 1: add <m=int64#18,4,>m=int64#18
2121# asm 2: add <m=%l1,4,>m=%l1
2122add %l1,4,%l1
2123
2124# qhasm:   x10 ^= m10
2125# asm 1: xor <x10=int64#10,<m10=int64#6,>x10=int64#10
2126# asm 2: xor <x10=%o0,<m10=%i5,>x10=%o0
2127xor %o0,%i5,%o0
2128
2129# qhasm:   m14 = *(swapendian uint32 *) m
2130# asm 1: lduwa [<m=int64#18] 0x88,>m14=int64#23
2131# asm 2: lduwa [<m=%l1] 0x88,>m14=%l6
2132lduwa [%l1] 0x88,%l6
2133
2134# qhasm:   m += 4
2135# asm 1: add <m=int64#18,4,>m=int64#6
2136# asm 2: add <m=%l1,4,>m=%i5
2137add %l1,4,%i5
2138
2139# qhasm:   x11 ^= m11
2140# asm 1: xor <x11=int64#15,<m11=int64#22,>x11=int64#15
2141# asm 2: xor <x11=%o5,<m11=%l5,>x11=%o5
2142xor %o5,%l5,%o5
2143
2144# qhasm:   m15 = *(swapendian uint32 *) m
2145# asm 1: lduwa [<m=int64#6] 0x88,>m15=int64#18
2146# asm 2: lduwa [<m=%i5] 0x88,>m15=%l1
2147lduwa [%i5] 0x88,%l1
2148
2149# qhasm:   m += 4
2150# asm 1: add <m=int64#6,4,>m=int64#6
2151# asm 2: add <m=%i5,4,>m=%i5
2152add %i5,4,%i5
2153
2154# qhasm:   x12 ^= m12
2155# asm 1: xor <x12=int64#17,<m12=int64#19,>x12=int64#17
2156# asm 2: xor <x12=%l0,<m12=%l2,>x12=%l0
2157xor %l0,%l2,%l0
2158
2159# qhasm:   x13 ^= m13
2160# asm 1: xor <x13=int64#9,<m13=int64#20,>x13=int64#9
2161# asm 2: xor <x13=%g5,<m13=%l3,>x13=%g5
2162xor %g5,%l3,%g5
2163
2164# qhasm:   x14 ^= m14
2165# asm 1: xor <x14=int64#11,<m14=int64#23,>x14=int64#11
2166# asm 2: xor <x14=%o1,<m14=%l6,>x14=%o1
2167xor %o1,%l6,%o1
2168
2169# qhasm:   x15 ^= m15
2170# asm 1: xor <x15=int64#16,<m15=int64#18,>x15=int64#16
2171# asm 2: xor <x15=%o7,<m15=%l1,>x15=%o7
2172xor %o7,%l1,%o7
2173
2174# qhasm:   out = out_stack
2175# asm 1: ldx [%fp+2023-<out_stack=stack64#3],>out=int64#18
2176# asm 2: ldx [%fp+2023-<out_stack=16],>out=%l1
2177ldx [%fp+2023-16],%l1
2178
2179# qhasm:   *(swapendian uint32 *) out = x0
2180# asm 1: stwa <x0=int64#2,[<out=int64#18] 0x88
2181# asm 2: stwa <x0=%i1,[<out=%l1] 0x88
2182stwa %i1,[%l1] 0x88
2183
2184# qhasm:   out += 4
2185# asm 1: add <out=int64#18,4,>out=int64#2
2186# asm 2: add <out=%l1,4,>out=%i1
2187add %l1,4,%i1
2188
2189# qhasm:   *(swapendian uint32 *) out = x1
2190# asm 1: stwa <x1=int64#4,[<out=int64#2] 0x88
2191# asm 2: stwa <x1=%i3,[<out=%i1] 0x88
2192stwa %i3,[%i1] 0x88
2193
2194# qhasm:   out += 4
2195# asm 1: add <out=int64#2,4,>out=int64#2
2196# asm 2: add <out=%i1,4,>out=%i1
2197add %i1,4,%i1
2198
2199# qhasm:   *(swapendian uint32 *) out = x2
2200# asm 1: stwa <x2=int64#21,[<out=int64#2] 0x88
2201# asm 2: stwa <x2=%l4,[<out=%i1] 0x88
2202stwa %l4,[%i1] 0x88
2203
2204# qhasm:   out += 4
2205# asm 1: add <out=int64#2,4,>out=int64#2
2206# asm 2: add <out=%i1,4,>out=%i1
2207add %i1,4,%i1
2208
2209# qhasm:   *(swapendian uint32 *) out = x3
2210# asm 1: stwa <x3=int64#8,[<out=int64#2] 0x88
2211# asm 2: stwa <x3=%g4,[<out=%i1] 0x88
2212stwa %g4,[%i1] 0x88
2213
2214# qhasm:   out += 4
2215# asm 1: add <out=int64#2,4,>out=int64#2
2216# asm 2: add <out=%i1,4,>out=%i1
2217add %i1,4,%i1
2218
2219# qhasm:   *(swapendian uint32 *) out = x4
2220# asm 1: stwa <x4=int64#3,[<out=int64#2] 0x88
2221# asm 2: stwa <x4=%i2,[<out=%i1] 0x88
2222stwa %i2,[%i1] 0x88
2223
2224# qhasm:   out += 4
2225# asm 1: add <out=int64#2,4,>out=int64#2
2226# asm 2: add <out=%i1,4,>out=%i1
2227add %i1,4,%i1
2228
2229# qhasm:   *(swapendian uint32 *) out = x5
2230# asm 1: stwa <x5=int64#5,[<out=int64#2] 0x88
2231# asm 2: stwa <x5=%i4,[<out=%i1] 0x88
2232stwa %i4,[%i1] 0x88
2233
2234# qhasm:   out += 4
2235# asm 1: add <out=int64#2,4,>out=int64#2
2236# asm 2: add <out=%i1,4,>out=%i1
2237add %i1,4,%i1
2238
2239# qhasm:   *(swapendian uint32 *) out = x6
2240# asm 1: stwa <x6=int64#7,[<out=int64#2] 0x88
2241# asm 2: stwa <x6=%g1,[<out=%i1] 0x88
2242stwa %g1,[%i1] 0x88
2243
2244# qhasm:   out += 4
2245# asm 1: add <out=int64#2,4,>out=int64#2
2246# asm 2: add <out=%i1,4,>out=%i1
2247add %i1,4,%i1
2248
2249# qhasm:   *(swapendian uint32 *) out = x7
2250# asm 1: stwa <x7=int64#12,[<out=int64#2] 0x88
2251# asm 2: stwa <x7=%o2,[<out=%i1] 0x88
2252stwa %o2,[%i1] 0x88
2253
2254# qhasm:   out += 4
2255# asm 1: add <out=int64#2,4,>out=int64#2
2256# asm 2: add <out=%i1,4,>out=%i1
2257add %i1,4,%i1
2258
2259# qhasm:   *(swapendian uint32 *) out = x8
2260# asm 1: stwa <x8=int64#13,[<out=int64#2] 0x88
2261# asm 2: stwa <x8=%o3,[<out=%i1] 0x88
2262stwa %o3,[%i1] 0x88
2263
2264# qhasm:   out += 4
2265# asm 1: add <out=int64#2,4,>out=int64#2
2266# asm 2: add <out=%i1,4,>out=%i1
2267add %i1,4,%i1
2268
2269# qhasm:   *(swapendian uint32 *) out = x9
2270# asm 1: stwa <x9=int64#14,[<out=int64#2] 0x88
2271# asm 2: stwa <x9=%o4,[<out=%i1] 0x88
2272stwa %o4,[%i1] 0x88
2273
2274# qhasm:   out += 4
2275# asm 1: add <out=int64#2,4,>out=int64#2
2276# asm 2: add <out=%i1,4,>out=%i1
2277add %i1,4,%i1
2278
2279# qhasm:   *(swapendian uint32 *) out = x10
2280# asm 1: stwa <x10=int64#10,[<out=int64#2] 0x88
2281# asm 2: stwa <x10=%o0,[<out=%i1] 0x88
2282stwa %o0,[%i1] 0x88
2283
2284# qhasm:   out += 4
2285# asm 1: add <out=int64#2,4,>out=int64#2
2286# asm 2: add <out=%i1,4,>out=%i1
2287add %i1,4,%i1
2288
2289# qhasm:   *(swapendian uint32 *) out = x11
2290# asm 1: stwa <x11=int64#15,[<out=int64#2] 0x88
2291# asm 2: stwa <x11=%o5,[<out=%i1] 0x88
2292stwa %o5,[%i1] 0x88
2293
2294# qhasm:   out += 4
2295# asm 1: add <out=int64#2,4,>out=int64#2
2296# asm 2: add <out=%i1,4,>out=%i1
2297add %i1,4,%i1
2298
2299# qhasm:   *(swapendian uint32 *) out = x12
2300# asm 1: stwa <x12=int64#17,[<out=int64#2] 0x88
2301# asm 2: stwa <x12=%l0,[<out=%i1] 0x88
2302stwa %l0,[%i1] 0x88
2303
2304# qhasm:   out += 4
2305# asm 1: add <out=int64#2,4,>out=int64#2
2306# asm 2: add <out=%i1,4,>out=%i1
2307add %i1,4,%i1
2308
2309# qhasm:   *(swapendian uint32 *) out = x13
2310# asm 1: stwa <x13=int64#9,[<out=int64#2] 0x88
2311# asm 2: stwa <x13=%g5,[<out=%i1] 0x88
2312stwa %g5,[%i1] 0x88
2313
2314# qhasm:   out += 4
2315# asm 1: add <out=int64#2,4,>out=int64#2
2316# asm 2: add <out=%i1,4,>out=%i1
2317add %i1,4,%i1
2318
2319# qhasm:   *(swapendian uint32 *) out = x14
2320# asm 1: stwa <x14=int64#11,[<out=int64#2] 0x88
2321# asm 2: stwa <x14=%o1,[<out=%i1] 0x88
2322stwa %o1,[%i1] 0x88
2323
2324# qhasm:   out += 4
2325# asm 1: add <out=int64#2,4,>out=int64#2
2326# asm 2: add <out=%i1,4,>out=%i1
2327add %i1,4,%i1
2328
2329# qhasm:   *(swapendian uint32 *) out = x15
2330# asm 1: stwa <x15=int64#16,[<out=int64#2] 0x88
2331# asm 2: stwa <x15=%o7,[<out=%i1] 0x88
2332stwa %o7,[%i1] 0x88
2333
2334# qhasm:   out += 4
2335# asm 1: add <out=int64#2,4,>out=int64#2
2336# asm 2: add <out=%i1,4,>out=%i1
2337add %i1,4,%i1
2338
2339# qhasm:   bytes = bytes_stack
2340# asm 1: ldx [%fp+2023-<bytes_stack=stack64#2],>bytes=int64#3
2341# asm 2: ldx [%fp+2023-<bytes_stack=8],>bytes=%i2
2342ldx [%fp+2023-8],%i2
2343
2344# qhasm:                         unsigned>? bytes -= 64
2345# asm 1: subcc <bytes=int64#3,64,>bytes=int64#5
2346# asm 2: subcc <bytes=%i2,64,>bytes=%i4
2347subcc %i2,64,%i4
2348
2349# qhasm:   goto bytesatleast1 if unsigned>
2350bgu,pt %xcc,._bytesatleast1
2351nop
2352
2353# qhasm:   goto done if !unsigned<
2354bgeu,pt %xcc,._done
2355nop
2356
2357# qhasm:     m = ctarget
2358# asm 1: ldx [%fp+2023-<ctarget=stack64#1],>m=int64#1
2359# asm 2: ldx [%fp+2023-<ctarget=0],>m=%i0
2360ldx [%fp+2023-0],%i0
2361
2362# qhasm:     bytes += 64
2363# asm 1: add <bytes=int64#5,64,>bytes=int64#3
2364# asm 2: add <bytes=%i4,64,>bytes=%i2
2365add %i4,64,%i2
2366
2367# qhasm:     out -= 64
2368# asm 1: sub <out=int64#2,64,>out=int64#2
2369# asm 2: sub <out=%i1,64,>out=%i1
2370sub %i1,64,%i1
2371
2372# qhasm:     i = 0
2373# asm 1: add %g0,0,>i=int64#4
2374# asm 2: add %g0,0,>i=%i3
2375add %g0,0,%i3
2376
2377# qhasm:     ccopyloop:
2378._ccopyloop:
2379
2380# qhasm:       a = *(int8 *) (out + i)
2381# asm 1: ldsb [<out=int64#2+<i=int64#4],>a=int64#5
2382# asm 2: ldsb [<out=%i1+<i=%i3],>a=%i4
2383ldsb [%i1+%i3],%i4
2384
2385# qhasm:       *(int8 *) (m + i) = a
2386# asm 1: stb <a=int64#5,[<m=int64#1+<i=int64#4]
2387# asm 2: stb <a=%i4,[<m=%i0+<i=%i3]
2388stb %i4,[%i0+%i3]
2389
2390# qhasm:       i += 1
2391# asm 1: add <i=int64#4,1,>i=int64#4
2392# asm 2: add <i=%i3,1,>i=%i3
2393add %i3,1,%i3
2394
2395# qhasm:                       unsigned<? i - bytes
2396# asm 1: subcc <i=int64#4,<bytes=int64#3,%g0
2397# asm 2: subcc <i=%i3,<bytes=%i2,%g0
2398subcc %i3,%i2,%g0
2399
2400# qhasm:     goto ccopyloop if unsigned<
2401blu,pt %xcc,._ccopyloop
2402nop
2403
2404# qhasm: done:
2405._done:
2406
2407# qhasm: leave
2408ret
2409restore
2410