xref: /freebsd/contrib/ntp/ntpd/refclock_shm.c (revision 9c2daa00)
1 /*
2  * refclock_shm - clock driver for utc via shared memory
3  * - under construction -
4  * To add new modes: Extend or union the shmTime-struct. Do not
5  * extend/shrink size, because otherwise existing implementations
6  * will specify wrong size of shared memory-segment
7  * PB 18.3.97
8  */
9 
10 #ifdef HAVE_CONFIG_H
11 # include <config.h>
12 #endif
13 
14 #include "ntp_types.h"
15 
16 #if defined(REFCLOCK) && defined(CLOCK_SHM)
17 
18 #include "ntpd.h"
19 #undef fileno
20 #include "ntp_io.h"
21 #undef fileno
22 #include "ntp_refclock.h"
23 #undef fileno
24 #include "timespecops.h"
25 #undef fileno
26 #include "ntp_stdlib.h"
27 #include "ntp_assert.h"
28 
29 #undef fileno
30 #include <ctype.h>
31 #undef fileno
32 
33 #ifndef SYS_WINNT
34 # include <sys/ipc.h>
35 # include <sys/shm.h>
36 # include <assert.h>
37 # include <unistd.h>
38 # include <stdio.h>
39 #endif
40 
41 #ifdef HAVE_STDATOMIC_H
42 # include <stdatomic.h>
43 #endif /* HAVE_STDATOMIC_H */
44 
45 /*
46  * This driver supports a reference clock attached thru shared memory
47  */
48 
49 /*
50  * SHM interface definitions
51  */
52 #define PRECISION       (-1)    /* precision assumed (0.5 s) */
53 #define REFID           "SHM"   /* reference ID */
54 #define DESCRIPTION     "SHM/Shared memory interface"
55 
56 #define NSAMPLES        3       /* stages of median filter */
57 
58 /*
59  * Mode flags
60  */
61 #define SHM_MODE_PRIVATE 0x0001
62 
63 /*
64  * Function prototypes
65  */
66 static  int     shm_start       (int unit, struct peer *peer);
67 static  void    shm_shutdown    (int unit, struct peer *peer);
68 static  void    shm_poll        (int unit, struct peer *peer);
69 static  void    shm_timer       (int unit, struct peer *peer);
70 static	void	shm_clockstats  (int unit, struct peer *peer);
71 static	void	shm_control	(int unit, const struct refclockstat * in_st,
72 				 struct refclockstat * out_st, struct peer *peer);
73 
74 /*
75  * Transfer vector
76  */
77 struct  refclock refclock_shm = {
78 	shm_start,              /* start up driver */
79 	shm_shutdown,           /* shut down driver */
80 	shm_poll,		/* transmit poll message */
81 	shm_control,		/* control settings */
82 	noentry,		/* not used: init */
83 	noentry,		/* not used: buginfo */
84 	shm_timer,              /* once per second */
85 };
86 
87 struct shmTime {
88 	int    mode; /* 0 - if valid is set:
89 		      *       use values,
90 		      *       clear valid
91 		      * 1 - if valid is set:
92 		      *       if count before and after read of values is equal,
93 		      *         use values
94 		      *       clear valid
95 		      */
96 	volatile int    count;
97 	time_t		clockTimeStampSec;
98 	int		clockTimeStampUSec;
99 	time_t		receiveTimeStampSec;
100 	int		receiveTimeStampUSec;
101 	int		leap;
102 	int		precision;
103 	int		nsamples;
104 	volatile int    valid;
105 	unsigned	clockTimeStampNSec;	/* Unsigned ns timestamps */
106 	unsigned	receiveTimeStampNSec;	/* Unsigned ns timestamps */
107 	int		dummy[8];
108 };
109 
110 struct shmunit {
111 	struct shmTime *shm;	/* pointer to shared memory segment */
112 	int forall;		/* access for all UIDs?	*/
113 
114 	/* debugging/monitoring counters - reset when printed */
115 	int ticks;		/* number of attempts to read data*/
116 	int good;		/* number of valid samples */
117 	int notready;		/* number of peeks without data ready */
118 	int bad;		/* number of invalid samples */
119 	int clash;		/* number of access clashes while reading */
120 
121 	time_t max_delta;	/* difference limit */
122 	time_t max_delay;	/* age/stale limit */
123 };
124 
125 
126 static struct shmTime*
getShmTime(int unit,int forall)127 getShmTime(
128 	int unit,
129 	int/*BOOL*/ forall
130 	)
131 {
132 	struct shmTime *p = NULL;
133 
134 #ifndef SYS_WINNT
135 
136 	int shmid;
137 
138 	/* 0x4e545030 is NTP0.
139 	 * Big units will give non-ascii but that's OK
140 	 * as long as everybody does it the same way.
141 	 */
142 	shmid=shmget(0x4e545030 + unit, sizeof (struct shmTime),
143 		      IPC_CREAT | (forall ? 0666 : 0600));
144 	if (shmid == -1) { /* error */
145 		msyslog(LOG_ERR, "SHM shmget (unit %d): %m", unit);
146 		return NULL;
147 	}
148 	p = (struct shmTime *)shmat (shmid, 0, 0);
149 	if (p == (struct shmTime *)-1) { /* error */
150 		msyslog(LOG_ERR, "SHM shmat (unit %d): %m", unit);
151 		return NULL;
152 	}
153 
154 	return p;
155 #else
156 
157 	static const char * nspref[2] = { "Local", "Global" };
158 	char buf[20];
159 	LPSECURITY_ATTRIBUTES psec = 0;
160 	HANDLE shmid = 0;
161 	SECURITY_DESCRIPTOR sd;
162 	SECURITY_ATTRIBUTES sa;
163 	unsigned int numch;
164 
165 	numch = snprintf(buf, sizeof(buf), "%s\\NTP%d",
166 			 nspref[forall != 0], (unit & 0xFF));
167 	if (numch >= sizeof(buf)) {
168 		msyslog(LOG_ERR, "SHM name too long (unit %d)", unit);
169 		return NULL;
170 	}
171 	if (forall) { /* world access */
172 		if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
173 			msyslog(LOG_ERR,"SHM InitializeSecurityDescriptor (unit %d): %m", unit);
174 			return NULL;
175 		}
176 		if (!SetSecurityDescriptorDacl(&sd, TRUE, NULL, FALSE)) {
177 			msyslog(LOG_ERR, "SHM SetSecurityDescriptorDacl (unit %d): %m", unit);
178 			return NULL;
179 		}
180 		sa.nLength = sizeof(SECURITY_ATTRIBUTES);
181 		sa.lpSecurityDescriptor = &sd;
182 		sa.bInheritHandle = FALSE;
183 		psec = &sa;
184 	}
185 	shmid = CreateFileMapping ((HANDLE)0xffffffff, psec, PAGE_READWRITE,
186 				   0, sizeof (struct shmTime), buf);
187 	if (shmid == NULL) { /*error*/
188 		char buf[1000];
189 		FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM,
190 			       0, GetLastError (), 0, buf, sizeof (buf), 0);
191 		msyslog(LOG_ERR, "SHM CreateFileMapping (unit %d): %s", unit, buf);
192 		return NULL;
193 	}
194 	p = (struct shmTime *)MapViewOfFile(shmid, FILE_MAP_WRITE, 0, 0,
195 					    sizeof (struct shmTime));
196 	if (p == NULL) { /*error*/
197 		char buf[1000];
198 		FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM,
199 			       0, GetLastError (), 0, buf, sizeof (buf), 0);
200 		msyslog(LOG_ERR,"SHM MapViewOfFile (unit %d): %s", unit, buf);
201 		return NULL;
202 	}
203 
204 	return p;
205 #endif
206 
207 	/* NOTREACHED */
208 	ENSURE(!"getShmTime(): Not reached.");
209 }
210 
211 
212 /*
213  * shm_start - attach to shared memory
214  */
215 static int
shm_start(int unit,struct peer * peer)216 shm_start(
217 	int unit,
218 	struct peer *peer
219 	)
220 {
221 	struct refclockproc * const pp = peer->procptr;
222 	struct shmunit *      const up = emalloc_zero(sizeof(*up));
223 
224 	pp->io.clock_recv = noentry;
225 	pp->io.srcclock = peer;
226 	pp->io.datalen = 0;
227 	pp->io.fd = -1;
228 
229 	up->forall = (unit >= 2) && !(peer->ttl & SHM_MODE_PRIVATE);
230 
231 	up->shm = getShmTime(unit, up->forall);
232 
233 	/*
234 	 * Initialize miscellaneous peer variables
235 	 */
236 	memcpy((char *)&pp->refid, REFID, 4);
237 	if (up->shm != 0) {
238 		pp->unitptr = up;
239 		up->shm->precision = PRECISION;
240 		peer->precision = up->shm->precision;
241 		up->shm->valid = 0;
242 		up->shm->nsamples = NSAMPLES;
243 		pp->clockdesc = DESCRIPTION;
244 		/* items to be changed later in 'shm_control()': */
245 		up->max_delay = 5;
246 		up->max_delta = 4*3600;
247 		return 1;
248 	} else {
249 		free(up);
250 		pp->unitptr = NULL;
251 		return 0;
252 	}
253 }
254 
255 
256 /*
257  * shm_control - configure flag1/time2 params
258  *
259  * These are not yet available during 'shm_start', so we have to do any
260  * pre-computations we want to avoid during regular poll/timer callbacks
261  * in this callback.
262  */
263 static void
shm_control(int unit,const struct refclockstat * in_st,struct refclockstat * out_st,struct peer * peer)264 shm_control(
265 	int                         unit,
266 	const struct refclockstat * in_st,
267 	struct refclockstat       * out_st,
268 	struct peer               * peer
269 	)
270 {
271 	struct refclockproc * const pp = peer->procptr;
272 	struct shmunit *      const up = pp->unitptr;
273 
274 	UNUSED_ARG(unit);
275 	UNUSED_ARG(in_st);
276 	UNUSED_ARG(out_st);
277 	if (NULL == up)
278 		return;
279 	if (pp->sloppyclockflag & CLK_FLAG1)
280 		up->max_delta = 0;
281 	else if (pp->fudgetime2 < 1. || pp->fudgetime2 > 86400.)
282 		up->max_delta = 4*3600;
283 	else
284 		up->max_delta = (time_t)floor(pp->fudgetime2 + 0.5);
285 }
286 
287 
288 /*
289  * shm_shutdown - shut down the clock
290  */
291 static void
shm_shutdown(int unit,struct peer * peer)292 shm_shutdown(
293 	int unit,
294 	struct peer *peer
295 	)
296 {
297 	struct refclockproc * const pp = peer->procptr;
298 	struct shmunit *      const up = pp->unitptr;
299 
300 	UNUSED_ARG(unit);
301 	if (NULL == up)
302 		return;
303 #ifndef SYS_WINNT
304 
305 	/* HMS: shmdt() wants char* or const void * */
306 	(void)shmdt((char *)up->shm);
307 
308 #else
309 
310 	UnmapViewOfFile(up->shm);
311 
312 #endif
313 	free(up);
314 }
315 
316 
317 /*
318  * shm_poll - called by the transmit procedure
319  */
320 static void
shm_poll(int unit,struct peer * peer)321 shm_poll(
322 	int unit,
323 	struct peer *peer
324 	)
325 {
326 	struct refclockproc * const pp = peer->procptr;
327 	struct shmunit *      const up = pp->unitptr;
328 	int major_error;
329 
330 	pp->polls++;
331 
332 	/* get dominant reason if we have no samples at all */
333 	major_error = max(up->notready, up->bad);
334 	major_error = max(major_error, up->clash);
335 
336         /*
337          * Process median filter samples. If none received, see what
338          * happened, tell the core and keep going.
339          */
340         if (pp->coderecv != pp->codeproc) {
341 		/* have some samples, everything OK */
342 		pp->lastref = pp->lastrec;
343 		refclock_report(peer, CEVNT_NOMINAL);
344 		refclock_receive(peer);
345 	} else if (NULL == up->shm) { /* is this possible at all? */
346 		/* we're out of business without SHM access */
347 		refclock_report(peer, CEVNT_FAULT);
348 	} else if (major_error == up->clash) {
349 		/* too many collisions is like a bad signal */
350                 refclock_report(peer, CEVNT_PROP);
351 	} else if (major_error == up->bad) {
352 		/* too much stale/bad/garbled data */
353                 refclock_report(peer, CEVNT_BADREPLY);
354 	} else {
355 		/* in any other case assume it's just a timeout */
356                 refclock_report(peer, CEVNT_TIMEOUT);
357         }
358 	/* shm_clockstats() clears the tallies, so it must be last... */
359 	shm_clockstats(unit, peer);
360 }
361 
362 
363 enum segstat_t {
364     OK, NO_SEGMENT, NOT_READY, BAD_MODE, CLASH
365 };
366 
367 struct shm_stat_t {
368     int status;
369     int mode;
370     struct timespec tvc, tvr, tvt;
371     int precision;
372     int leap;
373 };
374 
memory_barrier(void)375 static inline void memory_barrier(void)
376 {
377 #ifdef HAVE_ATOMIC_THREAD_FENCE
378     atomic_thread_fence(memory_order_seq_cst);
379 #endif /* HAVE_ATOMIC_THREAD_FENCE */
380 }
381 
shm_query(volatile struct shmTime * shm_in,struct shm_stat_t * shm_stat)382 static enum segstat_t shm_query(volatile struct shmTime *shm_in, struct shm_stat_t *shm_stat)
383 /* try to grab a sample from the specified SHM segment */
384 {
385     struct shmTime shmcopy;
386     volatile struct shmTime *shm = shm_in;
387     volatile int cnt;
388 
389     unsigned int cns_new, rns_new;
390 
391     /*
392      * This is the main routine. It snatches the time from the shm
393      * board and tacks on a local timestamp.
394      */
395     if (shm == NULL) {
396 	shm_stat->status = NO_SEGMENT;
397 	return NO_SEGMENT;
398     }
399 
400     /*@-type@*//* splint is confused about struct timespec */
401     shm_stat->tvc.tv_sec = shm_stat->tvc.tv_nsec = 0;
402     {
403 	time_t now;
404 
405 	time(&now);
406 	shm_stat->tvc.tv_sec = now;
407     }
408 
409     /* relying on word access to be atomic here */
410     if (shm->valid == 0) {
411 	shm_stat->status = NOT_READY;
412 	return NOT_READY;
413     }
414 
415     cnt = shm->count;
416 
417     /*
418      * This is proof against concurrency issues if either
419      * (a) the memory_barrier() call works on this host, or
420      * (b) memset compiles to an uninterruptible single-instruction bitblt.
421      */
422     memory_barrier();
423     memcpy(&shmcopy, (void*)(uintptr_t)shm, sizeof(struct shmTime));
424     shm->valid = 0;
425     memory_barrier();
426 
427     /*
428      * Clash detection in case neither (a) nor (b) was true.
429      * Not supported in mode 0, and word access to the count field
430      * must be atomic for this to work.
431      */
432     if (shmcopy.mode > 0 && cnt != shm->count) {
433 	shm_stat->status = CLASH;
434 	return shm_stat->status;
435     }
436 
437     shm_stat->status = OK;
438     shm_stat->mode = shmcopy.mode;
439 
440     switch (shmcopy.mode) {
441     case 0:
442 	shm_stat->tvr.tv_sec	= shmcopy.receiveTimeStampSec;
443 	shm_stat->tvr.tv_nsec	= shmcopy.receiveTimeStampUSec * 1000;
444 	rns_new		= shmcopy.receiveTimeStampNSec;
445 	shm_stat->tvt.tv_sec	= shmcopy.clockTimeStampSec;
446 	shm_stat->tvt.tv_nsec	= shmcopy.clockTimeStampUSec * 1000;
447 	cns_new		= shmcopy.clockTimeStampNSec;
448 
449 	/* Since the following comparisons are between unsigned
450 	** variables they are always well defined, and any
451 	** (signed) underflow will turn into very large unsigned
452 	** values, well above the 1000 cutoff.
453 	**
454 	** Note: The usecs *must* be a *truncated*
455 	** representation of the nsecs. This code will fail for
456 	** *rounded* usecs, and the logic to deal with
457 	** wrap-arounds in the presence of rounded values is
458 	** much more convoluted.
459 	*/
460 	if (   ((cns_new - (unsigned)shm_stat->tvt.tv_nsec) < 1000)
461 	       && ((rns_new - (unsigned)shm_stat->tvr.tv_nsec) < 1000)) {
462 	    shm_stat->tvt.tv_nsec = cns_new;
463 	    shm_stat->tvr.tv_nsec = rns_new;
464 	}
465 	/* At this point shm_stat->tvr and shm_stat->tvt contain valid ns-level
466 	** timestamps, possibly generated by extending the old
467 	** us-level timestamps
468 	*/
469 	break;
470 
471     case 1:
472 
473 	shm_stat->tvr.tv_sec	= shmcopy.receiveTimeStampSec;
474 	shm_stat->tvr.tv_nsec	= shmcopy.receiveTimeStampUSec * 1000;
475 	rns_new		= shmcopy.receiveTimeStampNSec;
476 	shm_stat->tvt.tv_sec	= shmcopy.clockTimeStampSec;
477 	shm_stat->tvt.tv_nsec	= shmcopy.clockTimeStampUSec * 1000;
478 	cns_new		= shmcopy.clockTimeStampNSec;
479 
480 	/* See the case above for an explanation of the
481 	** following test.
482 	*/
483 	if (   ((cns_new - (unsigned)shm_stat->tvt.tv_nsec) < 1000)
484 	       && ((rns_new - (unsigned)shm_stat->tvr.tv_nsec) < 1000)) {
485 	    shm_stat->tvt.tv_nsec = cns_new;
486 	    shm_stat->tvr.tv_nsec = rns_new;
487 	}
488 	/* At this point shm_stat->tvr and shm_stat->tvt contains valid ns-level
489 	** timestamps, possibly generated by extending the old
490 	** us-level timestamps
491 	*/
492 	break;
493 
494     default:
495 	shm_stat->status = BAD_MODE;
496 	break;
497     }
498     /*@-type@*/
499 
500     /*
501      * leap field is not a leap offset but a leap notification code.
502      * The values are magic numbers used by NTP and set by GPSD, if at all, in
503      * the subframe code.
504      */
505     shm_stat->leap = shmcopy.leap;
506     shm_stat->precision = shmcopy.precision;
507 
508     return shm_stat->status;
509 }
510 
511 /*
512  * shm_timer - called once every second.
513  *
514  * This tries to grab a sample from the SHM segment, filtering bad ones
515  */
516 static void
shm_timer(int unit,struct peer * peer)517 shm_timer(
518 	int unit,
519 	struct peer *peer
520 	)
521 {
522 	struct refclockproc * const pp = peer->procptr;
523 	struct shmunit *      const up = pp->unitptr;
524 
525 	volatile struct shmTime *shm;
526 
527 	l_fp tsrcv;
528 	l_fp tsref;
529 	int c;
530 
531 	/* for formatting 'a_lastcode': */
532 	struct calendar cd;
533 	time_t tt;
534 	vint64 ts;
535 
536 	enum segstat_t status;
537 	struct shm_stat_t shm_stat;
538 
539 	up->ticks++;
540 	if ((shm = up->shm) == NULL) {
541 		/* try to map again - this may succeed if meanwhile some-
542 		body has ipcrm'ed the old (unaccessible) shared mem segment */
543 		shm = up->shm = getShmTime(unit, up->forall);
544 		if (shm == NULL) {
545 			DPRINTF(1, ("%s: no SHM segment\n",
546 				    refnumtoa(&peer->srcadr)));
547 			return;
548 		}
549 	}
550 
551 	/* query the segment, atomically */
552 	status = shm_query(shm, &shm_stat);
553 
554 	switch (status) {
555 	case OK:
556 	    DPRINTF(2, ("%s: SHM type %d sample\n",
557 			refnumtoa(&peer->srcadr), shm_stat.mode));
558 	    break;
559 	case NO_SEGMENT:
560 	    /* should never happen, but is harmless */
561 	    return;
562 	case NOT_READY:
563 	    DPRINTF(1, ("%s: SHM not ready\n",refnumtoa(&peer->srcadr)));
564 	    up->notready++;
565 	    return;
566 	case BAD_MODE:
567 	    DPRINTF(1, ("%s: SHM type blooper, mode=%d\n",
568 			refnumtoa(&peer->srcadr), shm->mode));
569 	    up->bad++;
570 	    msyslog (LOG_ERR, "SHM: bad mode found in shared memory: %d",
571 		     shm->mode);
572 	    return;
573 	case CLASH:
574 	    DPRINTF(1, ("%s: type 1 access clash\n",
575 			refnumtoa(&peer->srcadr)));
576 	    msyslog (LOG_NOTICE, "SHM: access clash in shared memory");
577 	    up->clash++;
578 	    return;
579 	default:
580 	    DPRINTF(1, ("%s: internal error, unknown SHM fetch status\n",
581 			refnumtoa(&peer->srcadr)));
582 	    msyslog (LOG_NOTICE, "internal error, unknown SHM fetch status");
583 	    up->bad++;
584 	    return;
585 	}
586 
587 
588 	/* format the last time code in human-readable form into
589 	 * 'pp->a_lastcode'. Someone claimed: "NetBSD has incompatible
590 	 * tv_sec". I can't find a base for this claim, but we can work
591 	 * around that potential problem. BTW, simply casting a pointer
592 	 * is a receipe for disaster on some architectures.
593 	 */
594 	tt = (time_t)shm_stat.tvt.tv_sec;
595 	ts = time_to_vint64(&tt);
596 	ntpcal_time_to_date(&cd, &ts);
597 
598 	/* add ntpq -c cv timecode in ISO 8601 format */
599 	c = snprintf(pp->a_lastcode, sizeof(pp->a_lastcode),
600 		     "%04u-%02u-%02uT%02u:%02u:%02u.%09ldZ",
601 		     cd.year, cd.month, cd.monthday,
602 		     cd.hour, cd.minute, cd.second,
603 		     (long)shm_stat.tvt.tv_nsec);
604 	pp->lencode = (c > 0 && (size_t)c < sizeof(pp->a_lastcode)) ? c : 0;
605 
606 	/* check 1: age control of local time stamp */
607 	tt = shm_stat.tvc.tv_sec - shm_stat.tvr.tv_sec;
608 	if (tt < 0 || tt > up->max_delay) {
609 		DPRINTF(1, ("%s:SHM stale/bad receive time, delay=%llds\n",
610 			    refnumtoa(&peer->srcadr), (long long)tt));
611 		up->bad++;
612 		msyslog (LOG_ERR, "SHM: stale/bad receive time, delay=%llds",
613 			 (long long)tt);
614 		return;
615 	}
616 
617 	/* check 2: delta check */
618 	tt = shm_stat.tvr.tv_sec - shm_stat.tvt.tv_sec - (shm_stat.tvr.tv_nsec < shm_stat.tvt.tv_nsec);
619 	if (tt < 0)
620 		tt = -tt;
621 	if (up->max_delta > 0 && tt > up->max_delta) {
622 		DPRINTF(1, ("%s: SHM diff limit exceeded, delta=%llds\n",
623 			    refnumtoa(&peer->srcadr), (long long)tt));
624 		up->bad++;
625 		msyslog (LOG_ERR, "SHM: difference limit exceeded, delta=%llds\n",
626 			 (long long)tt);
627 		return;
628 	}
629 
630 	/* if we really made it to this point... we're winners! */
631 	DPRINTF(2, ("%s: SHM feeding data\n",
632 		    refnumtoa(&peer->srcadr)));
633 	tsrcv = tspec_stamp_to_lfp(shm_stat.tvr);
634 	tsref = tspec_stamp_to_lfp(shm_stat.tvt);
635 	pp->leap = shm_stat.leap;
636 	peer->precision = shm_stat.precision;
637 	refclock_process_offset(pp, tsref, tsrcv, pp->fudgetime1);
638 	up->good++;
639 }
640 
641 /*
642  * shm_clockstats - dump and reset counters
643  */
shm_clockstats(int unit,struct peer * peer)644 static void shm_clockstats(
645 	int unit,
646 	struct peer *peer
647 	)
648 {
649 	struct refclockproc * const pp = peer->procptr;
650 	struct shmunit *      const up = pp->unitptr;
651 
652 	UNUSED_ARG(unit);
653 	if (pp->sloppyclockflag & CLK_FLAG4) {
654 		mprintf_clock_stats(
655 			&peer->srcadr, "%3d %3d %3d %3d %3d",
656 			up->ticks, up->good, up->notready,
657 			up->bad, up->clash);
658 	}
659 	up->ticks = up->good = up->notready = up->bad = up->clash = 0;
660 }
661 
662 #else
663 NONEMPTY_TRANSLATION_UNIT
664 #endif /* REFCLOCK */
665