1 /* $OpenBSD: fpu_explode.c,v 1.12 2024/03/29 21:02:11 miod Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93 45 * $NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $ 46 */ 47 48 /* 49 * FPU subroutines: `explode' the machine's `packed binary' format numbers 50 * into our internal format. 51 */ 52 53 #include <sys/types.h> 54 55 #include <machine/fsr.h> 56 #include <machine/ieee.h> 57 #include <machine/instr.h> 58 59 #include "fpu_arith.h" 60 #include "fpu_emu.h" 61 #include "fpu_extern.h" 62 #include "fpu_reg.h" 63 64 /* 65 * N.B.: in all of the following, we assume the FP format is 66 * 67 * --------------------------- 68 * | s | exponent | fraction | 69 * --------------------------- 70 * 71 * (which represents -1**s * 1.fraction * 2**exponent), so that the 72 * sign bit is way at the top (bit 31), the exponent is next, and 73 * then the remaining bits mark the fraction. A zero exponent means 74 * zero or denormalized (0.fraction rather than 1.fraction), and the 75 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN. 76 * 77 * Since the sign bit is always the topmost bit---this holds even for 78 * integers---we set that outside all the *tof functions. Each function 79 * returns the class code for the new number (but note that we use 80 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate). 81 */ 82 83 /* 84 * int -> fpn. 85 */ 86 int 87 __fpu_itof(fp, i) 88 struct fpn *fp; 89 u_int i; 90 { 91 92 if (i == 0) 93 return (FPC_ZERO); 94 /* 95 * The value FP_1 represents 2^FP_LG, so set the exponent 96 * there and let normalization fix it up. Convert negative 97 * numbers to sign-and-magnitude. Note that this relies on 98 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 99 */ 100 fp->fp_exp = FP_LG; 101 fp->fp_mant[0] = (fp->fp_sign && (int)i < 0) ? -i : i; 102 fp->fp_mant[1] = 0; 103 fp->fp_mant[2] = 0; 104 fp->fp_mant[3] = 0; 105 __fpu_norm(fp); 106 return (FPC_NUM); 107 } 108 109 /* 110 * uint -> fpn. 111 */ 112 int 113 __fpu_uitof(fp, i) 114 struct fpn *fp; 115 u_int i; 116 { 117 118 if (i == 0) 119 return (FPC_ZERO); 120 /* 121 * The value FP_1 represents 2^FP_LG, so set the exponent 122 * there and let normalization fix it up. 123 * Note that this relies on fpu_norm()'s handling of 124 * `supernormals'; see fpu_subr.c. 125 */ 126 fp->fp_exp = FP_LG; 127 fp->fp_mant[0] = i; 128 fp->fp_mant[1] = 0; 129 fp->fp_mant[2] = 0; 130 fp->fp_mant[3] = 0; 131 __fpu_norm(fp); 132 return (FPC_NUM); 133 } 134 135 /* 136 * 64-bit int -> fpn. 137 */ 138 int 139 __fpu_xtof(fp, i) 140 struct fpn *fp; 141 u_int64_t i; 142 { 143 144 if (i == 0) 145 return (FPC_ZERO); 146 /* 147 * The value FP_1 represents 2^FP_LG, so set the exponent 148 * there and let normalization fix it up. Convert negative 149 * numbers to sign-and-magnitude. Note that this relies on 150 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 151 */ 152 fp->fp_exp = FP_LG2; 153 i = (fp->fp_sign && (int64_t)i < 0) ? -i : i; 154 fp->fp_mant[0] = (i >> 32) & 0xffffffff; 155 fp->fp_mant[1] = (i >> 0) & 0xffffffff; 156 fp->fp_mant[2] = 0; 157 fp->fp_mant[3] = 0; 158 __fpu_norm(fp); 159 return (FPC_NUM); 160 } 161 162 /* 163 * 64-bit uint -> fpn. 164 */ 165 int 166 __fpu_uxtof(fp, i) 167 struct fpn *fp; 168 u_int64_t i; 169 { 170 171 if (i == 0) 172 return (FPC_ZERO); 173 /* 174 * The value FP_1 represents 2^FP_LG, so set the exponent 175 * there and let normalization fix it up. 176 * Note that this relies on fpu_norm()'s handling of 177 * `supernormals'; see fpu_subr.c. 178 */ 179 fp->fp_exp = FP_LG2; 180 fp->fp_mant[0] = (i >> 32) & 0xffffffff; 181 fp->fp_mant[1] = (i >> 0) & 0xffffffff; 182 fp->fp_mant[2] = 0; 183 fp->fp_mant[3] = 0; 184 __fpu_norm(fp); 185 return (FPC_NUM); 186 } 187 188 #define mask(nbits) ((1L << (nbits)) - 1) 189 190 /* 191 * All external floating formats convert to internal in the same manner, 192 * as defined here. Note that only normals get an implied 1.0 inserted. 193 */ 194 #define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \ 195 if (exp == 0) { \ 196 if (allfrac == 0) \ 197 return (FPC_ZERO); \ 198 fp->fp_exp = 1 - expbias; \ 199 fp->fp_mant[0] = f0; \ 200 fp->fp_mant[1] = f1; \ 201 fp->fp_mant[2] = f2; \ 202 fp->fp_mant[3] = f3; \ 203 __fpu_norm(fp); \ 204 return (FPC_NUM); \ 205 } \ 206 if (exp == (2 * expbias + 1)) { \ 207 if (allfrac == 0) \ 208 return (FPC_INF); \ 209 fp->fp_mant[0] = f0; \ 210 fp->fp_mant[1] = f1; \ 211 fp->fp_mant[2] = f2; \ 212 fp->fp_mant[3] = f3; \ 213 return (FPC_QNAN); \ 214 } \ 215 fp->fp_exp = exp - expbias; \ 216 fp->fp_mant[0] = FP_1 | f0; \ 217 fp->fp_mant[1] = f1; \ 218 fp->fp_mant[2] = f2; \ 219 fp->fp_mant[3] = f3; \ 220 return (FPC_NUM) 221 222 /* 223 * 32-bit single precision -> fpn. 224 * We assume a single occupies at most (64-FP_LG) bits in the internal 225 * format: i.e., needs at most fp_mant[0] and fp_mant[1]. 226 */ 227 int 228 __fpu_stof(fp, i) 229 struct fpn *fp; 230 u_int i; 231 { 232 int exp; 233 u_int frac, f0, f1; 234 #define SNG_SHIFT (SNG_FRACBITS - FP_LG) 235 236 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS); 237 frac = i & mask(SNG_FRACBITS); 238 f0 = frac >> SNG_SHIFT; 239 f1 = frac << (32 - SNG_SHIFT); 240 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0); 241 } 242 243 /* 244 * 64-bit double -> fpn. 245 * We assume this uses at most (96-FP_LG) bits. 246 */ 247 int 248 __fpu_dtof(fp, i, j) 249 struct fpn *fp; 250 u_int i, j; 251 { 252 int exp; 253 u_int frac, f0, f1, f2; 254 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG) 255 256 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS); 257 frac = i & mask(DBL_FRACBITS - 32); 258 f0 = frac >> DBL_SHIFT; 259 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT); 260 f2 = j << (32 - DBL_SHIFT); 261 frac |= j; 262 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0); 263 } 264 265 /* 266 * 128-bit extended -> fpn. 267 */ 268 int 269 __fpu_qtof(fp, i, j, k, l) 270 struct fpn *fp; 271 u_int i, j, k, l; 272 { 273 int exp; 274 u_int frac, f0, f1, f2, f3; 275 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */ 276 277 /* 278 * Note that ext and fpn `line up', hence no shifting needed. 279 */ 280 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS); 281 frac = i & mask(EXT_FRACBITS - 3 * 32); 282 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT)); 283 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT)); 284 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT)); 285 f3 = l << EXT_SHIFT; 286 frac |= j | k | l; 287 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3); 288 } 289 290 #if 0 /* __fpu_explode is unused */ 291 /* 292 * Explode the contents of a / regpair / regquad. 293 * If the input is a signalling NaN, an NV (invalid) exception 294 * will be set. (Note that nothing but NV can occur until ALU 295 * operations are performed.) 296 */ 297 void 298 __fpu_explode(fe, fp, type, reg) 299 struct fpemu *fe; 300 struct fpn *fp; 301 int type, reg; 302 { 303 u_int32_t s = 0/* XXX gcc */, *sp; 304 u_int64_t l[2]; 305 306 if (type == FTYPE_LNG || type == FTYPE_DBL || type == FTYPE_EXT) { 307 l[0] = __fpu_getreg64(reg & ~1); 308 sp = (u_int32_t *)l; 309 fp->fp_sign = sp[0] >> 31; 310 fp->fp_sticky = 0; 311 switch (type) { 312 case FTYPE_LNG: 313 s = __fpu_xtof(fp, l[0]); 314 break; 315 case FTYPE_DBL: 316 s = __fpu_dtof(fp, sp[0], sp[1]); 317 break; 318 case FTYPE_EXT: 319 l[1] = __fpu_getreg64((reg & ~1) + 2); 320 s = __fpu_qtof(fp, sp[0], sp[1], sp[2], sp[3]); 321 break; 322 default: 323 #ifdef DIAGNOSTIC 324 __utrap_panic("fpu_explode"); 325 #endif 326 break; 327 } 328 } else { 329 #ifdef DIAGNOSTIC 330 if (type != FTYPE_SNG) 331 __utrap_panic("fpu_explode"); 332 #endif 333 s = __fpu_getreg32(reg); 334 fp->fp_sign = s >> 31; 335 fp->fp_sticky = 0; 336 s = __fpu_stof(fp, s); 337 } 338 339 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) { 340 /* 341 * Input is a signalling NaN. All operations that return 342 * an input NaN operand put it through a ``NaN conversion'', 343 * which basically just means ``turn on the quiet bit''. 344 * We do this here so that all NaNs internally look quiet 345 * (we can tell signalling ones by their class). 346 */ 347 fp->fp_mant[0] |= FP_QUIETBIT; 348 fe->fe_cx = FSR_NV; /* assert invalid operand */ 349 s = FPC_SNAN; 350 } 351 fp->fp_class = s; 352 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' : 353 ((type == FTYPE_INT) ? 'i' : 354 ((type == FTYPE_SNG) ? 's' : 355 ((type == FTYPE_DBL) ? 'd' : 356 ((type == FTYPE_EXT) ? 'q' : '?')))), 357 reg)); 358 DUMPFPN(FPE_REG, fp); 359 DPRINTF(FPE_REG, ("\n")); 360 } 361 #endif 362