1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
3; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R1
4; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
5; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R1
6; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
7; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R2R5
8; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -relocation-model=pic \
9; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R2R5
10; RUN: llc < %s -mtriple=mips -mcpu=mips32r5 -relocation-model=pic \
11; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R2R5
12; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -relocation-model=pic \
13; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP32R6
14
15; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
16; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
17; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
18; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
19; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
20; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R2
21; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
22; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R2R5
23; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r3 -relocation-model=pic \
24; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R2R5
25; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r5 -relocation-model=pic \
26; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R2R5
27; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r6 -relocation-model=pic \
28; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP64R6
29
30; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -mattr=+micromips \
31; RUN:   -relocation-model=pic -mips-jalr-reloc=false | \
32; RUN:   FileCheck %s -check-prefix=MMR3
33; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -mattr=+micromips \
34; RUN:   -relocation-model=pic -mips-jalr-reloc=false | \
35; RUN:   FileCheck %s -check-prefix=MMR6
36
37define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
38; GP32-LABEL: udiv_i1:
39; GP32:       # %bb.0: # %entry
40; GP32-NEXT:    jr $ra
41; GP32-NEXT:    move $2, $4
42;
43; GP32R6-LABEL: udiv_i1:
44; GP32R6:       # %bb.0: # %entry
45; GP32R6-NEXT:    jr $ra
46; GP32R6-NEXT:    move $2, $4
47;
48; GP64-LABEL: udiv_i1:
49; GP64:       # %bb.0: # %entry
50; GP64-NEXT:    jr $ra
51; GP64-NEXT:    move $2, $4
52;
53; GP64R6-LABEL: udiv_i1:
54; GP64R6:       # %bb.0: # %entry
55; GP64R6-NEXT:    jr $ra
56; GP64R6-NEXT:    move $2, $4
57;
58; MMR3-LABEL: udiv_i1:
59; MMR3:       # %bb.0: # %entry
60; MMR3-NEXT:    move $2, $4
61; MMR3-NEXT:    jrc $ra
62;
63; MMR6-LABEL: udiv_i1:
64; MMR6:       # %bb.0: # %entry
65; MMR6-NEXT:    move $2, $4
66; MMR6-NEXT:    jrc $ra
67entry:
68  %r = udiv i1 %a, %b
69  ret i1 %r
70}
71
72define zeroext i8 @udiv_i8(i8 zeroext %a, i8 zeroext %b) {
73; GP32-LABEL: udiv_i8:
74; GP32:       # %bb.0: # %entry
75; GP32-NEXT:    divu $zero, $4, $5
76; GP32-NEXT:    teq $5, $zero, 7
77; GP32-NEXT:    jr $ra
78; GP32-NEXT:    mflo $2
79;
80; GP32R6-LABEL: udiv_i8:
81; GP32R6:       # %bb.0: # %entry
82; GP32R6-NEXT:    divu $2, $4, $5
83; GP32R6-NEXT:    teq $5, $zero, 7
84; GP32R6-NEXT:    jrc $ra
85;
86; GP64-LABEL: udiv_i8:
87; GP64:       # %bb.0: # %entry
88; GP64-NEXT:    divu $zero, $4, $5
89; GP64-NEXT:    teq $5, $zero, 7
90; GP64-NEXT:    jr $ra
91; GP64-NEXT:    mflo $2
92;
93; GP64R6-LABEL: udiv_i8:
94; GP64R6:       # %bb.0: # %entry
95; GP64R6-NEXT:    divu $2, $4, $5
96; GP64R6-NEXT:    teq $5, $zero, 7
97; GP64R6-NEXT:    jrc $ra
98;
99; MMR3-LABEL: udiv_i8:
100; MMR3:       # %bb.0: # %entry
101; MMR3-NEXT:    divu $zero, $4, $5
102; MMR3-NEXT:    teq $5, $zero, 7
103; MMR3-NEXT:    mflo16 $2
104; MMR3-NEXT:    jrc $ra
105;
106; MMR6-LABEL: udiv_i8:
107; MMR6:       # %bb.0: # %entry
108; MMR6-NEXT:    divu $2, $4, $5
109; MMR6-NEXT:    teq $5, $zero, 7
110; MMR6-NEXT:    jrc $ra
111entry:
112  %r = udiv i8 %a, %b
113  ret i8 %r
114}
115
116define zeroext i16 @udiv_i16(i16 zeroext %a, i16 zeroext %b) {
117; GP32-LABEL: udiv_i16:
118; GP32:       # %bb.0: # %entry
119; GP32-NEXT:    divu $zero, $4, $5
120; GP32-NEXT:    teq $5, $zero, 7
121; GP32-NEXT:    jr $ra
122; GP32-NEXT:    mflo $2
123;
124; GP32R6-LABEL: udiv_i16:
125; GP32R6:       # %bb.0: # %entry
126; GP32R6-NEXT:    divu $2, $4, $5
127; GP32R6-NEXT:    teq $5, $zero, 7
128; GP32R6-NEXT:    jrc $ra
129;
130; GP64-LABEL: udiv_i16:
131; GP64:       # %bb.0: # %entry
132; GP64-NEXT:    divu $zero, $4, $5
133; GP64-NEXT:    teq $5, $zero, 7
134; GP64-NEXT:    jr $ra
135; GP64-NEXT:    mflo $2
136;
137; GP64R6-LABEL: udiv_i16:
138; GP64R6:       # %bb.0: # %entry
139; GP64R6-NEXT:    divu $2, $4, $5
140; GP64R6-NEXT:    teq $5, $zero, 7
141; GP64R6-NEXT:    jrc $ra
142;
143; MMR3-LABEL: udiv_i16:
144; MMR3:       # %bb.0: # %entry
145; MMR3-NEXT:    divu $zero, $4, $5
146; MMR3-NEXT:    teq $5, $zero, 7
147; MMR3-NEXT:    mflo16 $2
148; MMR3-NEXT:    jrc $ra
149;
150; MMR6-LABEL: udiv_i16:
151; MMR6:       # %bb.0: # %entry
152; MMR6-NEXT:    divu $2, $4, $5
153; MMR6-NEXT:    teq $5, $zero, 7
154; MMR6-NEXT:    jrc $ra
155entry:
156  %r = udiv i16 %a, %b
157  ret i16 %r
158}
159
160define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
161; GP32-LABEL: udiv_i32:
162; GP32:       # %bb.0: # %entry
163; GP32-NEXT:    divu $zero, $4, $5
164; GP32-NEXT:    teq $5, $zero, 7
165; GP32-NEXT:    jr $ra
166; GP32-NEXT:    mflo $2
167;
168; GP32R6-LABEL: udiv_i32:
169; GP32R6:       # %bb.0: # %entry
170; GP32R6-NEXT:    divu $2, $4, $5
171; GP32R6-NEXT:    teq $5, $zero, 7
172; GP32R6-NEXT:    jrc $ra
173;
174; GP64-LABEL: udiv_i32:
175; GP64:       # %bb.0: # %entry
176; GP64-NEXT:    divu $zero, $4, $5
177; GP64-NEXT:    teq $5, $zero, 7
178; GP64-NEXT:    jr $ra
179; GP64-NEXT:    mflo $2
180;
181; GP64R6-LABEL: udiv_i32:
182; GP64R6:       # %bb.0: # %entry
183; GP64R6-NEXT:    divu $2, $4, $5
184; GP64R6-NEXT:    teq $5, $zero, 7
185; GP64R6-NEXT:    jrc $ra
186;
187; MMR3-LABEL: udiv_i32:
188; MMR3:       # %bb.0: # %entry
189; MMR3-NEXT:    divu $zero, $4, $5
190; MMR3-NEXT:    teq $5, $zero, 7
191; MMR3-NEXT:    mflo16 $2
192; MMR3-NEXT:    jrc $ra
193;
194; MMR6-LABEL: udiv_i32:
195; MMR6:       # %bb.0: # %entry
196; MMR6-NEXT:    divu $2, $4, $5
197; MMR6-NEXT:    teq $5, $zero, 7
198; MMR6-NEXT:    jrc $ra
199entry:
200  %r = udiv i32 %a, %b
201  ret i32 %r
202}
203
204define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
205; GP32-LABEL: udiv_i64:
206; GP32:       # %bb.0: # %entry
207; GP32-NEXT:    lui $2, %hi(_gp_disp)
208; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
209; GP32-NEXT:    addiu $sp, $sp, -24
210; GP32-NEXT:    .cfi_def_cfa_offset 24
211; GP32-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
212; GP32-NEXT:    .cfi_offset 31, -4
213; GP32-NEXT:    addu $gp, $2, $25
214; GP32-NEXT:    lw $25, %call16(__udivdi3)($gp)
215; GP32-NEXT:    jalr $25
216; GP32-NEXT:    nop
217; GP32-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
218; GP32-NEXT:    jr $ra
219; GP32-NEXT:    addiu $sp, $sp, 24
220;
221; GP32R6-LABEL: udiv_i64:
222; GP32R6:       # %bb.0: # %entry
223; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
224; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
225; GP32R6-NEXT:    addiu $sp, $sp, -24
226; GP32R6-NEXT:    .cfi_def_cfa_offset 24
227; GP32R6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
228; GP32R6-NEXT:    .cfi_offset 31, -4
229; GP32R6-NEXT:    addu $gp, $2, $25
230; GP32R6-NEXT:    lw $25, %call16(__udivdi3)($gp)
231; GP32R6-NEXT:    jalrc $25
232; GP32R6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
233; GP32R6-NEXT:    jr $ra
234; GP32R6-NEXT:    addiu $sp, $sp, 24
235;
236; GP64-LABEL: udiv_i64:
237; GP64:       # %bb.0: # %entry
238; GP64-NEXT:    ddivu $zero, $4, $5
239; GP64-NEXT:    teq $5, $zero, 7
240; GP64-NEXT:    jr $ra
241; GP64-NEXT:    mflo $2
242;
243; GP64R6-LABEL: udiv_i64:
244; GP64R6:       # %bb.0: # %entry
245; GP64R6-NEXT:    ddivu $2, $4, $5
246; GP64R6-NEXT:    teq $5, $zero, 7
247; GP64R6-NEXT:    jrc $ra
248;
249; MMR3-LABEL: udiv_i64:
250; MMR3:       # %bb.0: # %entry
251; MMR3-NEXT:    lui $2, %hi(_gp_disp)
252; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
253; MMR3-NEXT:    addiusp -24
254; MMR3-NEXT:    .cfi_def_cfa_offset 24
255; MMR3-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
256; MMR3-NEXT:    .cfi_offset 31, -4
257; MMR3-NEXT:    addu $2, $2, $25
258; MMR3-NEXT:    lw $25, %call16(__udivdi3)($2)
259; MMR3-NEXT:    move $gp, $2
260; MMR3-NEXT:    jalr $25
261; MMR3-NEXT:    nop
262; MMR3-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
263; MMR3-NEXT:    addiusp 24
264; MMR3-NEXT:    jrc $ra
265;
266; MMR6-LABEL: udiv_i64:
267; MMR6:       # %bb.0: # %entry
268; MMR6-NEXT:    lui $2, %hi(_gp_disp)
269; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
270; MMR6-NEXT:    addiu $sp, $sp, -24
271; MMR6-NEXT:    .cfi_def_cfa_offset 24
272; MMR6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
273; MMR6-NEXT:    .cfi_offset 31, -4
274; MMR6-NEXT:    addu $2, $2, $25
275; MMR6-NEXT:    lw $25, %call16(__udivdi3)($2)
276; MMR6-NEXT:    move $gp, $2
277; MMR6-NEXT:    jalr $25
278; MMR6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
279; MMR6-NEXT:    addiu $sp, $sp, 24
280; MMR6-NEXT:    jrc $ra
281entry:
282  %r = udiv i64 %a, %b
283  ret i64 %r
284}
285
286define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
287; GP32-LABEL: udiv_i128:
288; GP32:       # %bb.0: # %entry
289; GP32-NEXT:    lui $2, %hi(_gp_disp)
290; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
291; GP32-NEXT:    addiu $sp, $sp, -40
292; GP32-NEXT:    .cfi_def_cfa_offset 40
293; GP32-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
294; GP32-NEXT:    .cfi_offset 31, -4
295; GP32-NEXT:    addu $gp, $2, $25
296; GP32-NEXT:    lw $1, 60($sp)
297; GP32-NEXT:    lw $2, 64($sp)
298; GP32-NEXT:    lw $3, 68($sp)
299; GP32-NEXT:    sw $3, 28($sp)
300; GP32-NEXT:    sw $2, 24($sp)
301; GP32-NEXT:    sw $1, 20($sp)
302; GP32-NEXT:    lw $1, 56($sp)
303; GP32-NEXT:    sw $1, 16($sp)
304; GP32-NEXT:    lw $25, %call16(__udivti3)($gp)
305; GP32-NEXT:    jalr $25
306; GP32-NEXT:    nop
307; GP32-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
308; GP32-NEXT:    jr $ra
309; GP32-NEXT:    addiu $sp, $sp, 40
310;
311; GP32R6-LABEL: udiv_i128:
312; GP32R6:       # %bb.0: # %entry
313; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
314; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
315; GP32R6-NEXT:    addiu $sp, $sp, -40
316; GP32R6-NEXT:    .cfi_def_cfa_offset 40
317; GP32R6-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
318; GP32R6-NEXT:    .cfi_offset 31, -4
319; GP32R6-NEXT:    addu $gp, $2, $25
320; GP32R6-NEXT:    lw $1, 60($sp)
321; GP32R6-NEXT:    lw $2, 64($sp)
322; GP32R6-NEXT:    lw $3, 68($sp)
323; GP32R6-NEXT:    sw $3, 28($sp)
324; GP32R6-NEXT:    sw $2, 24($sp)
325; GP32R6-NEXT:    sw $1, 20($sp)
326; GP32R6-NEXT:    lw $1, 56($sp)
327; GP32R6-NEXT:    sw $1, 16($sp)
328; GP32R6-NEXT:    lw $25, %call16(__udivti3)($gp)
329; GP32R6-NEXT:    jalrc $25
330; GP32R6-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
331; GP32R6-NEXT:    jr $ra
332; GP32R6-NEXT:    addiu $sp, $sp, 40
333;
334; GP64-LABEL: udiv_i128:
335; GP64:       # %bb.0: # %entry
336; GP64-NEXT:    daddiu $sp, $sp, -16
337; GP64-NEXT:    .cfi_def_cfa_offset 16
338; GP64-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
339; GP64-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
340; GP64-NEXT:    .cfi_offset 31, -8
341; GP64-NEXT:    .cfi_offset 28, -16
342; GP64-NEXT:    lui $1, %hi(%neg(%gp_rel(udiv_i128)))
343; GP64-NEXT:    daddu $1, $1, $25
344; GP64-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(udiv_i128)))
345; GP64-NEXT:    ld $25, %call16(__udivti3)($gp)
346; GP64-NEXT:    jalr $25
347; GP64-NEXT:    nop
348; GP64-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
349; GP64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
350; GP64-NEXT:    jr $ra
351; GP64-NEXT:    daddiu $sp, $sp, 16
352;
353; GP64R6-LABEL: udiv_i128:
354; GP64R6:       # %bb.0: # %entry
355; GP64R6-NEXT:    daddiu $sp, $sp, -16
356; GP64R6-NEXT:    .cfi_def_cfa_offset 16
357; GP64R6-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
358; GP64R6-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
359; GP64R6-NEXT:    .cfi_offset 31, -8
360; GP64R6-NEXT:    .cfi_offset 28, -16
361; GP64R6-NEXT:    lui $1, %hi(%neg(%gp_rel(udiv_i128)))
362; GP64R6-NEXT:    daddu $1, $1, $25
363; GP64R6-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(udiv_i128)))
364; GP64R6-NEXT:    ld $25, %call16(__udivti3)($gp)
365; GP64R6-NEXT:    jalrc $25
366; GP64R6-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
367; GP64R6-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
368; GP64R6-NEXT:    jr $ra
369; GP64R6-NEXT:    daddiu $sp, $sp, 16
370;
371; MMR3-LABEL: udiv_i128:
372; MMR3:       # %bb.0: # %entry
373; MMR3-NEXT:    lui $2, %hi(_gp_disp)
374; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
375; MMR3-NEXT:    addiusp -48
376; MMR3-NEXT:    .cfi_def_cfa_offset 48
377; MMR3-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
378; MMR3-NEXT:    swp $16, 36($sp)
379; MMR3-NEXT:    .cfi_offset 31, -4
380; MMR3-NEXT:    .cfi_offset 17, -8
381; MMR3-NEXT:    .cfi_offset 16, -12
382; MMR3-NEXT:    addu $16, $2, $25
383; MMR3-NEXT:    move $1, $7
384; MMR3-NEXT:    lw $7, 68($sp)
385; MMR3-NEXT:    lw $17, 72($sp)
386; MMR3-NEXT:    lw $3, 76($sp)
387; MMR3-NEXT:    move $2, $sp
388; MMR3-NEXT:    sw16 $3, 28($2)
389; MMR3-NEXT:    sw16 $17, 24($2)
390; MMR3-NEXT:    sw16 $7, 20($2)
391; MMR3-NEXT:    lw $3, 64($sp)
392; MMR3-NEXT:    sw16 $3, 16($2)
393; MMR3-NEXT:    lw $25, %call16(__udivti3)($16)
394; MMR3-NEXT:    move $7, $1
395; MMR3-NEXT:    move $gp, $16
396; MMR3-NEXT:    jalr $25
397; MMR3-NEXT:    nop
398; MMR3-NEXT:    lwp $16, 36($sp)
399; MMR3-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
400; MMR3-NEXT:    addiusp 48
401; MMR3-NEXT:    jrc $ra
402;
403; MMR6-LABEL: udiv_i128:
404; MMR6:       # %bb.0: # %entry
405; MMR6-NEXT:    lui $2, %hi(_gp_disp)
406; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
407; MMR6-NEXT:    addiu $sp, $sp, -48
408; MMR6-NEXT:    .cfi_def_cfa_offset 48
409; MMR6-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
410; MMR6-NEXT:    sw $17, 40($sp) # 4-byte Folded Spill
411; MMR6-NEXT:    sw $16, 36($sp) # 4-byte Folded Spill
412; MMR6-NEXT:    .cfi_offset 31, -4
413; MMR6-NEXT:    .cfi_offset 17, -8
414; MMR6-NEXT:    .cfi_offset 16, -12
415; MMR6-NEXT:    addu $16, $2, $25
416; MMR6-NEXT:    move $1, $7
417; MMR6-NEXT:    lw $7, 68($sp)
418; MMR6-NEXT:    lw $17, 72($sp)
419; MMR6-NEXT:    lw $3, 76($sp)
420; MMR6-NEXT:    move $2, $sp
421; MMR6-NEXT:    sw16 $3, 28($2)
422; MMR6-NEXT:    sw16 $17, 24($2)
423; MMR6-NEXT:    sw16 $7, 20($2)
424; MMR6-NEXT:    lw $3, 64($sp)
425; MMR6-NEXT:    sw16 $3, 16($2)
426; MMR6-NEXT:    lw $25, %call16(__udivti3)($16)
427; MMR6-NEXT:    move $7, $1
428; MMR6-NEXT:    move $gp, $16
429; MMR6-NEXT:    jalr $25
430; MMR6-NEXT:    lw $16, 36($sp) # 4-byte Folded Reload
431; MMR6-NEXT:    lw $17, 40($sp) # 4-byte Folded Reload
432; MMR6-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
433; MMR6-NEXT:    addiu $sp, $sp, 48
434; MMR6-NEXT:    jrc $ra
435entry:
436  %r = udiv i128 %a, %b
437  ret i128 %r
438}
439