1 /* $NetBSD: fpu_explode.c,v 1.1 2001/06/13 06:01:47 simonb Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93 45 */ 46 47 /* 48 * FPU subroutines: `explode' the machine's `packed binary' format numbers 49 * into our internal format. 50 */ 51 52 #include <sys/types.h> 53 #include <sys/systm.h> 54 55 #include <machine/ieee.h> 56 #include <powerpc/instr.h> 57 #include <machine/reg.h> 58 #include <machine/fpu.h> 59 60 #include <powerpc/fpu/fpu_arith.h> 61 #include <powerpc/fpu/fpu_emu.h> 62 #include <powerpc/fpu/fpu_extern.h> 63 64 /* 65 * N.B.: in all of the following, we assume the FP format is 66 * 67 * --------------------------- 68 * | s | exponent | fraction | 69 * --------------------------- 70 * 71 * (which represents -1**s * 1.fraction * 2**exponent), so that the 72 * sign bit is way at the top (bit 31), the exponent is next, and 73 * then the remaining bits mark the fraction. A zero exponent means 74 * zero or denormalized (0.fraction rather than 1.fraction), and the 75 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN. 76 * 77 * Since the sign bit is always the topmost bit---this holds even for 78 * integers---we set that outside all the *tof functions. Each function 79 * returns the class code for the new number (but note that we use 80 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate). 81 */ 82 83 /* 84 * int -> fpn. 85 */ 86 int 87 fpu_itof(struct fpn *fp, u_int i) 88 { 89 90 if (i == 0) 91 return (FPC_ZERO); 92 /* 93 * The value FP_1 represents 2^FP_LG, so set the exponent 94 * there and let normalization fix it up. Convert negative 95 * numbers to sign-and-magnitude. Note that this relies on 96 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 97 */ 98 fp->fp_exp = FP_LG; 99 fp->fp_mant[0] = (int)i < 0 ? -i : i; 100 fp->fp_mant[1] = 0; 101 fp->fp_mant[2] = 0; 102 fp->fp_mant[3] = 0; 103 fpu_norm(fp); 104 return (FPC_NUM); 105 } 106 107 /* 108 * 64-bit int -> fpn. 109 */ 110 int 111 fpu_xtof(struct fpn *fp, u_int64_t i) 112 { 113 114 if (i == 0) 115 return (FPC_ZERO); 116 /* 117 * The value FP_1 represents 2^FP_LG, so set the exponent 118 * there and let normalization fix it up. Convert negative 119 * numbers to sign-and-magnitude. Note that this relies on 120 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 121 */ 122 fp->fp_exp = FP_LG2; 123 *((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i; 124 fp->fp_mant[2] = 0; 125 fp->fp_mant[3] = 0; 126 fpu_norm(fp); 127 return (FPC_NUM); 128 } 129 130 #define mask(nbits) ((1L << (nbits)) - 1) 131 132 /* 133 * All external floating formats convert to internal in the same manner, 134 * as defined here. Note that only normals get an implied 1.0 inserted. 135 */ 136 #define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \ 137 if (exp == 0) { \ 138 if (allfrac == 0) \ 139 return (FPC_ZERO); \ 140 fp->fp_exp = 1 - expbias; \ 141 fp->fp_mant[0] = f0; \ 142 fp->fp_mant[1] = f1; \ 143 fp->fp_mant[2] = f2; \ 144 fp->fp_mant[3] = f3; \ 145 fpu_norm(fp); \ 146 return (FPC_NUM); \ 147 } \ 148 if (exp == (2 * expbias + 1)) { \ 149 if (allfrac == 0) \ 150 return (FPC_INF); \ 151 fp->fp_mant[0] = f0; \ 152 fp->fp_mant[1] = f1; \ 153 fp->fp_mant[2] = f2; \ 154 fp->fp_mant[3] = f3; \ 155 return (FPC_QNAN); \ 156 } \ 157 fp->fp_exp = exp - expbias; \ 158 fp->fp_mant[0] = FP_1 | f0; \ 159 fp->fp_mant[1] = f1; \ 160 fp->fp_mant[2] = f2; \ 161 fp->fp_mant[3] = f3; \ 162 return (FPC_NUM) 163 164 /* 165 * 32-bit single precision -> fpn. 166 * We assume a single occupies at most (64-FP_LG) bits in the internal 167 * format: i.e., needs at most fp_mant[0] and fp_mant[1]. 168 */ 169 int 170 fpu_stof(struct fpn *fp, u_int i) 171 { 172 int exp; 173 u_int frac, f0, f1; 174 #define SNG_SHIFT (SNG_FRACBITS - FP_LG) 175 176 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS); 177 frac = i & mask(SNG_FRACBITS); 178 f0 = frac >> SNG_SHIFT; 179 f1 = frac << (32 - SNG_SHIFT); 180 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0); 181 } 182 183 /* 184 * 64-bit double -> fpn. 185 * We assume this uses at most (96-FP_LG) bits. 186 */ 187 int 188 fpu_dtof(struct fpn *fp, u_int i, u_int j) 189 { 190 int exp; 191 u_int frac, f0, f1, f2; 192 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG) 193 194 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS); 195 frac = i & mask(DBL_FRACBITS - 32); 196 f0 = frac >> DBL_SHIFT; 197 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT); 198 f2 = j << (32 - DBL_SHIFT); 199 frac |= j; 200 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0); 201 } 202 203 /* 204 * 128-bit extended -> fpn. 205 */ 206 int 207 fpu_qtof(struct fpn *fp, u_int i, u_int j, u_int k, u_int l) 208 { 209 int exp; 210 u_int frac, f0, f1, f2, f3; 211 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */ 212 213 /* 214 * Note that ext and fpn `line up', hence no shifting needed. 215 */ 216 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS); 217 frac = i & mask(EXT_FRACBITS - 3 * 32); 218 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT)); 219 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT)); 220 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT)); 221 f3 = l << EXT_SHIFT; 222 frac |= j | k | l; 223 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3); 224 } 225 226 /* 227 * Explode the contents of a register / regpair / regquad. 228 * If the input is a signalling NaN, an NV (invalid) exception 229 * will be set. (Note that nothing but NV can occur until ALU 230 * operations are performed.) 231 */ 232 void 233 fpu_explode(struct fpemu *fe, struct fpn *fp, int type, int reg) 234 { 235 u_int s, *space; 236 u_int64_t l, *xspace; 237 238 xspace = (u_int64_t *)&fe->fe_fpstate->fpreg[reg]; 239 l = xspace[0]; 240 space = (u_int *)&fe->fe_fpstate->fpreg[reg]; 241 s = space[0]; 242 fp->fp_sign = s >> 31; 243 fp->fp_sticky = 0; 244 switch (type) { 245 246 case FTYPE_LNG: 247 s = fpu_xtof(fp, l); 248 break; 249 250 case FTYPE_INT: 251 s = fpu_itof(fp, space[1]); 252 break; 253 254 case FTYPE_SNG: 255 s = fpu_stof(fp, s); 256 break; 257 258 case FTYPE_DBL: 259 s = fpu_dtof(fp, s, space[1]); 260 break; 261 262 case FTYPE_EXT: 263 s = fpu_qtof(fp, s, space[1], space[2], space[3]); 264 break; 265 266 default: 267 panic("fpu_explode"); 268 } 269 270 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) { 271 /* 272 * Input is a signalling NaN. All operations that return 273 * an input NaN operand put it through a ``NaN conversion'', 274 * which basically just means ``turn on the quiet bit''. 275 * We do this here so that all NaNs internally look quiet 276 * (we can tell signalling ones by their class). 277 */ 278 fp->fp_mant[0] |= FP_QUIETBIT; 279 fe->fe_cx = FPSCR_VXSNAN; /* assert invalid operand */ 280 s = FPC_SNAN; 281 } 282 fp->fp_class = s; 283 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' : 284 ((type == FTYPE_INT) ? 'i' : 285 ((type == FTYPE_SNG) ? 's' : 286 ((type == FTYPE_DBL) ? 'd' : 287 ((type == FTYPE_EXT) ? 'q' : '?')))), 288 reg)); 289 DUMPFPN(FPE_REG, fp); 290 DPRINTF(FPE_REG, ("\n")); 291 } 292