xref: /netbsd/external/bsd/ntp/dist/include/ntp_fp.h (revision 9034ec65)
1 /*	$NetBSD: ntp_fp.h,v 1.11 2020/05/25 20:47:19 christos Exp $	*/
2 
3 /*
4  * ntp_fp.h - definitions for NTP fixed/floating-point arithmetic
5  */
6 
7 #ifndef NTP_FP_H
8 #define NTP_FP_H
9 
10 #include "ntp_types.h"
11 
12 /*
13  * NTP uses two fixed point formats.  The first (l_fp) is the "long"
14  * format and is 64 bits long with the decimal between bits 31 and 32.
15  * This is used for time stamps in the NTP packet header (in network
16  * byte order) and for internal computations of offsets (in local host
17  * byte order). We use the same structure for both signed and unsigned
18  * values, which is a big hack but saves rewriting all the operators
19  * twice. Just to confuse this, we also sometimes just carry the
20  * fractional part in calculations, in both signed and unsigned forms.
21  * Anyway, an l_fp looks like:
22  *
23  *    0			  1		      2			  3
24  *    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
25  *   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
26  *   |			       Integral Part			     |
27  *   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
28  *   |			       Fractional Part			     |
29  *   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
30  *
31  */
32 typedef struct {
33 	union {
34 		u_int32 Xl_ui;
35 		int32 Xl_i;
36 	} Ul_i;
37 	u_int32	l_uf;
38 } l_fp;
39 
40 #define l_ui	Ul_i.Xl_ui		/* unsigned integral part */
41 #define	l_i	Ul_i.Xl_i		/* signed integral part */
42 
43 /*
44  * Fractional precision (of an l_fp) is actually the number of
45  * bits in a long.
46  */
47 #define	FRACTION_PREC	(32)
48 
49 
50 /*
51  * The second fixed point format is 32 bits, with the decimal between
52  * bits 15 and 16.  There is a signed version (s_fp) and an unsigned
53  * version (u_fp).  This is used to represent synchronizing distance
54  * and synchronizing dispersion in the NTP packet header (again, in
55  * network byte order) and internally to hold both distance and
56  * dispersion values (in local byte order).  In network byte order
57  * it looks like:
58  *
59  *    0			  1		      2			  3
60  *    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
61  *   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
62  *   |		  Integer Part	     |	   Fraction Part	     |
63  *   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64  *
65  */
66 typedef int32 s_fp;
67 typedef u_int32 u_fp;
68 
69 /*
70  * A unit second in fp format.	Actually 2**(half_the_bits_in_a_long)
71  */
72 #define	FP_SECOND	(0x10000)
73 
74 /*
75  * Byte order conversions
76  */
77 #define	HTONS_FP(x)	(htonl(x))
78 #define	NTOHS_FP(x)	(ntohl(x))
79 
80 #define	NTOHL_MFP(ni, nf, hi, hf)				\
81 	do {							\
82 		(hi) = ntohl(ni);				\
83 		(hf) = ntohl(nf);				\
84 	} while (FALSE)
85 
86 #define	HTONL_MFP(hi, hf, ni, nf)				\
87 	do {							\
88 		(ni) = htonl(hi);				\
89 		(nf) = htonl(hf);				\
90 	} while (FALSE)
91 
92 #define HTONL_FP(h, n)						\
93 	HTONL_MFP((h)->l_ui, (h)->l_uf, (n)->l_ui, (n)->l_uf)
94 
95 #define NTOHL_FP(n, h)						\
96 	NTOHL_MFP((n)->l_ui, (n)->l_uf, (h)->l_ui, (h)->l_uf)
97 
98 /* Convert unsigned ts fraction to net order ts */
99 #define	HTONL_UF(uf, nts)					\
100 	do {							\
101 		(nts)->l_ui = 0;				\
102 		(nts)->l_uf = htonl(uf);			\
103 	} while (FALSE)
104 
105 /*
106  * Conversions between the two fixed point types
107  */
108 #define	MFPTOFP(x_i, x_f)	(((x_i) >= 0x00010000) ? 0x7fffffff : \
109 				(((x_i) <= -0x00010000) ? 0x80000000 : \
110 				(((x_i)<<16) | (((x_f)>>16)&0xffff))))
111 #define	LFPTOFP(v)		MFPTOFP((v)->l_i, (v)->l_uf)
112 
113 #define UFPTOLFP(x, v) ((v)->l_ui = (u_fp)(x)>>16, (v)->l_uf = (x)<<16)
114 #define FPTOLFP(x, v)  (UFPTOLFP((x), (v)), (x) < 0 ? (v)->l_ui -= 0x10000 : 0)
115 
116 #define MAXLFP(v) ((v)->l_ui = 0x7fffffffu, (v)->l_uf = 0xffffffffu)
117 #define MINLFP(v) ((v)->l_ui = 0x80000000u, (v)->l_uf = 0u)
118 
119 /*
120  * Primitive operations on long fixed point values.  If these are
121  * reminiscent of assembler op codes it's only because some may
122  * be replaced by inline assembler for particular machines someday.
123  * These are the (kind of inefficient) run-anywhere versions.
124  */
125 #define	M_NEG(v_i, v_f)		/* v = -v */ \
126 	do { \
127 		(v_f) = ~(v_f) + 1u; \
128 		(v_i) = ~(v_i) + ((v_f) == 0); \
129 	} while (FALSE)
130 
131 #define	M_NEGM(r_i, r_f, a_i, a_f)	/* r = -a */ \
132 	do { \
133 		(r_f) = ~(a_f) + 1u; \
134 		(r_i) = ~(a_i) + ((r_f) == 0); \
135 	} while (FALSE)
136 
137 #define M_ADD(r_i, r_f, a_i, a_f)	/* r += a */ \
138 	do { \
139 		u_int32 add_t = (r_f); \
140 		(r_f) += (a_f); \
141 		(r_i) += (a_i) + ((u_int32)(r_f) < add_t); \
142 	} while (FALSE)
143 
144 #define M_ADD3(r_o, r_i, r_f, a_o, a_i, a_f) /* r += a, three word */ \
145 	do { \
146 		u_int32 add_t, add_c; \
147 		add_t  = (r_f); \
148 		(r_f) += (a_f); \
149 		add_c  = ((u_int32)(r_f) < add_t); \
150 		(r_i) += add_c; \
151 		add_c  = ((u_int32)(r_i) < add_c); \
152 		add_t  = (r_i); \
153 		(r_i) += (a_i); \
154 		add_c |= ((u_int32)(r_i) < add_t); \
155 		(r_o) += (a_o) + add_c; \
156 	} while (FALSE)
157 
158 #define M_SUB(r_i, r_f, a_i, a_f)	/* r -= a */ \
159 	do { \
160 		u_int32 sub_t = (r_f); \
161 		(r_f) -= (a_f); \
162 		(r_i) -= (a_i) + ((u_int32)(r_f) > sub_t); \
163 	} while (FALSE)
164 
165 #define	M_RSHIFTU(v_i, v_f)		/* v >>= 1, v is unsigned */ \
166 	do { \
167 		(v_f) = ((u_int32)(v_f) >> 1) | ((u_int32)(v_i) << 31);	\
168 		(v_i) = ((u_int32)(v_i) >> 1); \
169 	} while (FALSE)
170 
171 #define	M_RSHIFT(v_i, v_f)		/* v >>= 1, v is signed */ \
172 	do { \
173 		(v_f) = ((u_int32)(v_f) >> 1) | ((u_int32)(v_i) << 31);	\
174 		(v_i) = ((u_int32)(v_i) >> 1) | ((u_int32)(v_i) & 0x80000000);	\
175 	} while (FALSE)
176 
177 #define	M_LSHIFT(v_i, v_f)		/* v <<= 1 */ \
178 	do { \
179 		(v_i) = ((u_int32)(v_i) << 1) | ((u_int32)(v_f) >> 31);	\
180 		(v_f) = ((u_int32)(v_f) << 1); \
181 	} while (FALSE)
182 
183 #define	M_LSHIFT3(v_o, v_i, v_f)	/* v <<= 1, with overflow */ \
184 	do { \
185 		(v_o) = ((u_int32)(v_o) << 1) | ((u_int32)(v_i) >> 31);	\
186 		(v_i) = ((u_int32)(v_i) << 1) | ((u_int32)(v_f) >> 31);	\
187 		(v_f) = ((u_int32)(v_f) << 1); \
188 	} while (FALSE)
189 
190 #define	M_ADDUF(r_i, r_f, uf)		/* r += uf, uf is u_int32 fraction */ \
191 	M_ADD((r_i), (r_f), 0, (uf))	/* let optimizer worry about it */
192 
193 #define	M_SUBUF(r_i, r_f, uf)		/* r -= uf, uf is u_int32 fraction */ \
194 	M_SUB((r_i), (r_f), 0, (uf))	/* let optimizer worry about it */
195 
196 #define	M_ADDF(r_i, r_f, f)		/* r += f, f is a int32 fraction */ \
197 	do { \
198 		int32 add_f = (int32)(f); \
199 		if (add_f >= 0) \
200 			M_ADD((r_i), (r_f), 0, (uint32)( add_f)); \
201 		else \
202 			M_SUB((r_i), (r_f), 0, (uint32)(-add_f)); \
203 	} while(0)
204 
205 #define	M_ISNEG(v_i)			/* v < 0 */ \
206 	(((v_i) & 0x80000000) != 0)
207 
208 #define	M_ISGT(a_i, a_f, b_i, b_f)	/* a > b signed */ \
209 	(((u_int32)((a_i) ^ 0x80000000) > (u_int32)((b_i) ^ 0x80000000)) || \
210 	  ((a_i) == (b_i) && ((u_int32)(a_f)) > ((u_int32)(b_f))))
211 
212 #define	M_ISGTU(a_i, a_f, b_i, b_f)	/* a > b unsigned */ \
213 	(((u_int32)(a_i)) > ((u_int32)(b_i)) || \
214 	  ((a_i) == (b_i) && ((u_int32)(a_f)) > ((u_int32)(b_f))))
215 
216 #define	M_ISHIS(a_i, a_f, b_i, b_f)	/* a >= b unsigned */ \
217 	(((u_int32)(a_i)) > ((u_int32)(b_i)) || \
218 	  ((a_i) == (b_i) && ((u_int32)(a_f)) >= ((u_int32)(b_f))))
219 
220 #define	M_ISGEQ(a_i, a_f, b_i, b_f)	/* a >= b signed */ \
221 	(((u_int32)((a_i) ^ 0x80000000) > (u_int32)((b_i) ^ 0x80000000)) || \
222 	  ((a_i) == (b_i) && (u_int32)(a_f) >= (u_int32)(b_f)))
223 
224 #define	M_ISEQU(a_i, a_f, b_i, b_f)	/* a == b unsigned */ \
225 	((u_int32)(a_i) == (u_int32)(b_i) && (u_int32)(a_f) == (u_int32)(b_f))
226 
227 /*
228  * Operations on the long fp format
229  */
230 #define	L_ADD(r, a)	M_ADD((r)->l_ui, (r)->l_uf, (a)->l_ui, (a)->l_uf)
231 #define	L_SUB(r, a)	M_SUB((r)->l_ui, (r)->l_uf, (a)->l_ui, (a)->l_uf)
232 #define	L_NEG(v)	M_NEG((v)->l_ui, (v)->l_uf)
233 #define L_ADDUF(r, uf)	M_ADDUF((r)->l_ui, (r)->l_uf, (uf))
234 #define L_SUBUF(r, uf)	M_SUBUF((r)->l_ui, (r)->l_uf, (uf))
235 #define	L_ADDF(r, f)	M_ADDF((r)->l_ui, (r)->l_uf, (f))
236 #define	L_RSHIFT(v)	M_RSHIFT((v)->l_i, (v)->l_uf)
237 #define	L_RSHIFTU(v)	M_RSHIFTU((v)->l_ui, (v)->l_uf)
238 #define	L_LSHIFT(v)	M_LSHIFT((v)->l_ui, (v)->l_uf)
239 #define	L_CLR(v)	((v)->l_ui = (v)->l_uf = 0)
240 
241 #define	L_ISNEG(v)	M_ISNEG((v)->l_ui)
242 #define L_ISZERO(v)	(((v)->l_ui | (v)->l_uf) == 0)
243 #define	L_ISGT(a, b)	M_ISGT((a)->l_i, (a)->l_uf, (b)->l_i, (b)->l_uf)
244 #define	L_ISGTU(a, b)	M_ISGTU((a)->l_ui, (a)->l_uf, (b)->l_ui, (b)->l_uf)
245 #define	L_ISHIS(a, b)	M_ISHIS((a)->l_ui, (a)->l_uf, (b)->l_ui, (b)->l_uf)
246 #define	L_ISGEQ(a, b)	M_ISGEQ((a)->l_ui, (a)->l_uf, (b)->l_ui, (b)->l_uf)
247 #define	L_ISEQU(a, b)	M_ISEQU((a)->l_ui, (a)->l_uf, (b)->l_ui, (b)->l_uf)
248 
249 /*
250  * s_fp/double and u_fp/double conversions
251  */
252 #define FRIC		65536.0			/* 2^16 as a double */
253 #define DTOFP(r)	((s_fp)((r) * FRIC))
254 #define DTOUFP(r)	((u_fp)((r) * FRIC))
255 #define FPTOD(r)	((double)(r) / FRIC)
256 
257 /*
258  * l_fp/double conversions
259  */
260 #define FRAC		4294967296.0 		/* 2^32 as a double */
261 
262 /*
263  * Use 64 bit integers if available.  Solaris on SPARC has a problem
264  * compiling parsesolaris.c if ntp_fp.h includes math.h, due to
265  * archaic gets() and printf() prototypes used in Solaris kernel
266  * headers.  So far the problem has only been seen with gcc, but it
267  * may also affect Sun compilers, in which case the defined(__GNUC__)
268  * term should be removed.
269  * XSCALE also generates bad code for these, at least with GCC 3.3.5.
270  * This is unrelated to math.h, but the same solution applies.
271  */
272 #if defined(HAVE_U_INT64) && \
273     !(defined(__SVR4) && defined(__sun) && \
274       defined(sparc) && defined(__GNUC__) || \
275       defined(__arm__) && defined(__XSCALE__) && defined(__GNUC__))
276 
277 #include <math.h>	/* ldexp() */
278 
279 #define M_DTOLFP(d, r_ui, r_uf)		/* double to l_fp */	\
280 	do {							\
281 		double	d_tmp;					\
282 		u_int64	q_tmp;					\
283 		int	M_isneg;					\
284 								\
285 		d_tmp = (d);					\
286 		M_isneg = (d_tmp < 0.);				\
287 		if (M_isneg) {					\
288 			d_tmp = -d_tmp;				\
289 		}						\
290 		q_tmp = (u_int64)ldexp(d_tmp, 32);		\
291 		if (M_isneg) {					\
292 			q_tmp = ~q_tmp + 1;			\
293 		}						\
294 		(r_uf) = (u_int32)q_tmp;			\
295 		(r_ui) = (u_int32)(q_tmp >> 32);		\
296 	} while (FALSE)
297 
298 #define M_LFPTOD(r_ui, r_uf, d) 	/* l_fp to double */	\
299 	do {							\
300 		double	d_tmp;					\
301 		u_int64	q_tmp;					\
302 		int	M_isneg;				\
303 								\
304 		q_tmp = ((u_int64)(r_ui) << 32) + (r_uf);	\
305 		M_isneg = M_ISNEG(r_ui);			\
306 		if (M_isneg) {					\
307 			q_tmp = ~q_tmp + 1;			\
308 		}						\
309 		d_tmp = ldexp((double)q_tmp, -32);		\
310 		if (M_isneg) {					\
311 			d_tmp = -d_tmp;				\
312 		}						\
313 		(d) = d_tmp;					\
314 	} while (FALSE)
315 
316 #else /* use only 32 bit unsigned values */
317 
318 #define M_DTOLFP(d, r_ui, r_uf) 		/* double to l_fp */ \
319 	do { \
320 		double d_tmp; \
321 		if ((d_tmp = (d)) < 0) { \
322 			(r_ui) = (u_int32)(-d_tmp); \
323 			(r_uf) = (u_int32)(-(d_tmp + (double)(r_ui)) * FRAC); \
324 			M_NEG((r_ui), (r_uf)); \
325 		} else { \
326 			(r_ui) = (u_int32)d_tmp; \
327 			(r_uf) = (u_int32)((d_tmp - (double)(r_ui)) * FRAC); \
328 		} \
329 	} while (0)
330 #define M_LFPTOD(r_ui, r_uf, d) 		/* l_fp to double */ \
331 	do { \
332 		u_int32 l_thi, l_tlo; \
333 		l_thi = (r_ui); l_tlo = (r_uf); \
334 		if (M_ISNEG(l_thi)) { \
335 			M_NEG(l_thi, l_tlo); \
336 			(d) = -((double)l_thi + (double)l_tlo / FRAC); \
337 		} else { \
338 			(d) = (double)l_thi + (double)l_tlo / FRAC; \
339 		} \
340 	} while (0)
341 #endif
342 
343 #define DTOLFP(d, v) 	M_DTOLFP((d), (v)->l_ui, (v)->l_uf)
344 #define LFPTOD(v, d) 	M_LFPTOD((v)->l_ui, (v)->l_uf, (d))
345 
346 /*
347  * Prototypes
348  */
349 extern	char *	dofptoa		(u_fp, char, short, int);
350 extern	char *	dolfptoa	(u_int32, u_int32, char, short, int);
351 
352 extern	int	atolfp		(const char *, l_fp *);
353 extern	int	buftvtots	(const char *, l_fp *);
354 extern	char *	fptoa		(s_fp, short);
355 extern	char *	fptoms		(s_fp, short);
356 extern	int	hextolfp	(const char *, l_fp *);
357 extern  void	gpstolfp	(u_int, u_int, unsigned long, l_fp *);
358 extern	int	mstolfp		(const char *, l_fp *);
359 extern	char *	prettydate	(l_fp *);
360 extern	char *	gmprettydate	(l_fp *);
361 extern	char *	uglydate	(l_fp *);
362 extern  void	mfp_mul		(int32 *, u_int32 *, int32, u_int32, int32, u_int32);
363 
364 extern	void	set_sys_fuzz	(double);
365 extern	void	init_systime	(void);
366 extern	void	get_systime	(l_fp *);
367 extern	int	step_systime	(double);
368 extern	int	adj_systime	(double);
369 extern	int	clamp_systime	(void);
370 
371 extern	struct tm * ntp2unix_tm (u_int32 ntp, int local);
372 
373 #define	lfptoa(fpv, ndec)	mfptoa((fpv)->l_ui, (fpv)->l_uf, (ndec))
374 #define	lfptoms(fpv, ndec)	mfptoms((fpv)->l_ui, (fpv)->l_uf, (ndec))
375 
376 #define stoa(addr)		socktoa(addr)
377 #define	ntoa(addr)		stoa(addr)
378 #define sptoa(addr)		sockporttoa(addr)
379 #define stohost(addr)		socktohost(addr)
380 
381 #define	ufptoa(fpv, ndec)	dofptoa((fpv), 0, (ndec), 0)
382 #define	ufptoms(fpv, ndec)	dofptoa((fpv), 0, (ndec), 1)
383 #define	ulfptoa(fpv, ndec)	dolfptoa((fpv)->l_ui, (fpv)->l_uf, 0, (ndec), 0)
384 #define	ulfptoms(fpv, ndec)	dolfptoa((fpv)->l_ui, (fpv)->l_uf, 0, (ndec), 1)
385 #define	umfptoa(fpi, fpf, ndec) dolfptoa((fpi), (fpf), 0, (ndec), 0)
386 
387 /*
388  * Optional callback from libntp step_systime() to ntpd.  Optional
389 *  because other libntp clients like ntpdate don't use it.
390  */
391 typedef void (*time_stepped_callback)(void);
392 extern time_stepped_callback	step_callback;
393 
394 /*
395  * Multi-thread locking for get_systime()
396  *
397  * On most systems, get_systime() is used solely by the main ntpd
398  * thread, but on Windows it's also used by the dedicated I/O thread.
399  * The [Bug 2037] changes to get_systime() have it keep state between
400  * calls to ensure time moves in only one direction, which means its
401  * use on Windows needs to be protected against simultaneous execution
402  * to avoid falsely detecting Lamport violations by ensuring only one
403  * thread at a time is in get_systime().
404  */
405 #ifdef SYS_WINNT
406 extern CRITICAL_SECTION get_systime_cs;
407 # define INIT_GET_SYSTIME_CRITSEC()				\
408 		InitializeCriticalSection(&get_systime_cs)
409 # define ENTER_GET_SYSTIME_CRITSEC()				\
410 		EnterCriticalSection(&get_systime_cs)
411 # define LEAVE_GET_SYSTIME_CRITSEC()				\
412 		LeaveCriticalSection(&get_systime_cs)
413 # define INIT_WIN_PRECISE_TIME()				\
414 		init_win_precise_time()
415 #else	/* !SYS_WINNT follows */
416 # define INIT_GET_SYSTIME_CRITSEC()			\
417 		do {} while (FALSE)
418 # define ENTER_GET_SYSTIME_CRITSEC()			\
419 		do {} while (FALSE)
420 # define LEAVE_GET_SYSTIME_CRITSEC()			\
421 		do {} while (FALSE)
422 # define INIT_WIN_PRECISE_TIME()			\
423 		do {} while (FALSE)
424 #endif
425 
426 #endif /* NTP_FP_H */
427