1/* newlib libgcc routines of Andes NDS32 cpu for GNU compiler 2 Copyright (C) 2012-2020 Free Software Foundation, Inc. 3 Contributed by Andes Technology Corporation. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it 8 under the terms of the GNU General Public License as published 9 by the Free Software Foundation; either version 3, or (at your 10 option) any later version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT 13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 15 License for more details. 16 17 Under Section 7 of GPL version 3, you are granted additional 18 permissions described in the GCC Runtime Library Exception, version 19 3.1, as published by the Free Software Foundation. 20 21 You should have received a copy of the GNU General Public License and 22 a copy of the GCC Runtime Library Exception along with this program; 23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 24 <http://www.gnu.org/licenses/>. */ 25 26 .section .mdebug.abi_nds32 27 .previous 28 29#ifdef L_divsi3 30 31 .text 32 .align 2 33 .globl __divsi3 34 .type __divsi3, @function 35__divsi3: 36 movi $r5, 0 ! res = 0 37 xor $r4, $r0, $r1 ! neg 38 bltz $r0, .L1 39 bltz $r1, .L2 40.L3: 41 movi $r2, 1 ! bit = 1 42 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor 43 beqz $r3, .L5 44 bltz $r1, .L5 45 46.L4: 47 slli $r2, $r2, 1 48 beqz $r2, .L6 49 slli $r1, $r1, 1 50 slt $r3, $r1, $r0 51 beqz $r3, .L5 52 bgez $r1, .L4 53 54.L5: 55 slt $r3, $r0, $r1 56 bnez $r3, .L8 57 sub $r0, $r0, $r1 58 or $r5, $r5, $r2 59.L8: 60 srli $r1, $r1, 1 61 srli $r2, $r2, 1 62 bnez $r2, .L5 63.L6: 64 bgez $r4, .L7 65 subri $r5, $r5, 0 ! negate if $r4 < 0 66.L7: 67 move $r0, $r5 68 ret 69.L1: 70 subri $r0, $r0, 0 ! change neg to pos 71 bgez $r1, .L3 72.L2: 73 subri $r1, $r1, 0 ! change neg to pos 74 j .L3 75 .size __divsi3, .-__divsi3 76 77#endif /* L_divsi3 */ 78 79 80#ifdef L_modsi3 81 82 .text 83 .align 2 84 .globl __modsi3 85 .type __modsi3, @function 86__modsi3: 87 movi $r5, 0 ! res = 0 88 move $r4, $r0 ! neg 89 bltz $r0, .L1 90 bltz $r1, .L2 91.L3: 92 movi $r2, 1 ! bit = 1 93 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor 94 beqz $r3, .L5 95 bltz $r1, .L5 96 97.L4: 98 slli $r2, $r2, 1 99 beqz $r2, .L6 100 slli $r1, $r1, 1 101 slt $r3, $r1, $r0 102 beqz $r3, .L5 103 bgez $r1, .L4 104 105.L5: 106 slt $r3, $r0, $r1 107 bnez $r3, .L8 108 sub $r0, $r0, $r1 109 or $r5, $r5, $r2 110.L8: 111 srli $r1, $r1, 1 112 srli $r2, $r2, 1 113 bnez $r2, .L5 114.L6: 115 bgez $r4, .L7 116 subri $r0, $r0, 0 ! negate if $r4 < 0 117.L7: 118 ret 119.L1: 120 subri $r0, $r0, 0 ! change neg to pos 121 bgez $r1, .L3 122.L2: 123 subri $r1, $r1, 0 ! change neg to pos 124 j .L3 125 .size __modsi3, .-__modsi3 126 127#endif /* L_modsi3 */ 128 129 130#ifdef L_udivsi3 131 132 .text 133 .align 2 134 .globl __udivsi3 135 .type __udivsi3, @function 136__udivsi3: 137 movi $r5, 0 ! res = 0 138 movi $r2, 1 ! bit = 1 139 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor 140 beqz $r3, .L5 141 bltz $r1, .L5 142 143.L4: 144 slli $r2, $r2, 1 145 beqz $r2, .L6 146 slli $r1, $r1, 1 147 slt $r3, $r1, $r0 148 beqz $r3, .L5 149 bgez $r1, .L4 150 151.L5: 152 slt $r3, $r0, $r1 153 bnez $r3, .L8 154 sub $r0, $r0, $r1 155 or $r5, $r5, $r2 156.L8: 157 srli $r1, $r1, 1 158 srli $r2, $r2, 1 159 bnez $r2, .L5 160.L6: 161 move $r0, $r5 162 ret 163 .size __udivsi3, .-__udivsi3 164 165#endif /* L_udivsi3 */ 166 167 168#ifdef L_umodsi3 169 170 .text 171 .align 2 172 .globl __umodsi3 173 .type __umodsi3, @function 174__umodsi3: 175 movi $r5, 0 ! res = 0 176 movi $r2, 1 ! bit = 1 177 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor 178 beqz $r3, .L5 179 bltz $r1, .L5 180 181.L4: 182 slli $r2, $r2, 1 183 beqz $r2, .L6 184 slli $r1, $r1, 1 185 slt $r3, $r1, $r0 186 beqz $r3, .L5 187 bgez $r1, .L4 188 189.L5: 190 slt $r3, $r0, $r1 191 bnez $r3, .L8 192 sub $r0, $r0, $r1 193 or $r5, $r5, $r2 194.L8: 195 srli $r1, $r1, 1 196 srli $r2, $r2, 1 197 bnez $r2, .L5 198.L6: 199 ret 200 .size __umodsi3, .-__umodsi3 201 202#endif /* L_umodsi3 */ 203 204/* ----------------------------------------------------------- */ 205