1 /* sim_timer.c: simulator timer library
2
3 Copyright (c) 1993-2010 Robert M Supnik
4 Copyright (c) 2021 The DPS8M Development Team
5
6 Permission is hereby granted, free of charge, to any person obtaining a
7 copy of this software and associated documentation files (the "Software"),
8 to deal in the Software without restriction, including without limitation
9 the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 and/or sell copies of the Software, and to permit persons to whom the
11 Software is furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
23 Except as contained in this notice, the name of Robert M Supnik shall not be
24 used in advertising or otherwise to promote the sale, use or other dealings
25 in this Software without prior written authorization from Robert M Supnik.
26 */
27
28 /*
29 This library includes the following routines:
30
31 sim_timer_init - initialize timing system
32 sim_os_msec - return elapsed time in msec
33 sim_os_sleep - sleep specified number of seconds
34 sim_os_ms_sleep - sleep specified number of milliseconds
35 sim_idle_ms_sleep - sleep specified number of milliseconds
36 or until awakened by an asynchronous
37 event
38 sim_timespec_diff subtract two timespec values
39 sim_timer_activate_after schedule unit for specific time
40 */
41
42 #include "sim_defs.h"
43 #include <ctype.h>
44 #include <math.h>
45
46 #define SIM_INTERNAL_CLK (SIM_NTIMERS+(1<<30))
47 #define SIM_INTERNAL_UNIT sim_internal_timer_unit
48 #ifndef MIN
49 # define MIN(a,b) (((a) < (b)) ? (a) : (b))
50 #endif
51 #ifndef MAX
52 # define MAX(a,b) (((a) > (b)) ? (a) : (b))
53 #endif
54
55 uint32 sim_idle_ms_sleep (unsigned int msec);
56
57 static int32 sim_calb_tmr = -1; /* the system calibrated timer */
58 static int32 sim_calb_tmr_last = -1; /* shadow value when at sim> prompt */
59 static double sim_inst_per_sec_last = 0; /* shadow value when at sim> prompt */
60
61 static uint32 sim_idle_rate_ms = 0;
62 static uint32 sim_os_sleep_min_ms = 0;
63 static uint32 sim_os_sleep_inc_ms = 0;
64 static uint32 sim_os_clock_resoluton_ms = 0;
65 static uint32 sim_os_tick_hz = 0;
66 static uint32 sim_idle_calib_pct = 0;
67 static UNIT *sim_clock_unit[SIM_NTIMERS+1] = {NULL};
68 UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS+1] = {NULL};
69 static int32 sim_cosched_interval[SIM_NTIMERS+1];
70 static t_bool sim_catchup_ticks = FALSE;
71
72 #define sleep1Samples 10
73
_compute_minimum_sleep(void)74 static uint32 _compute_minimum_sleep (void)
75 {
76 uint32 i, tot, tim;
77
78 sim_os_set_thread_priority (PRIORITY_ABOVE_NORMAL);
79 sim_idle_ms_sleep (1); /* Start sampling on a tick boundary */
80 for (i = 0, tot = 0; i < sleep1Samples; i++)
81 tot += sim_idle_ms_sleep (1);
82 tim = tot / sleep1Samples; /* Truncated average */
83 sim_os_sleep_min_ms = tim;
84 sim_idle_ms_sleep (1); /* Start sampling on a tick boundary */
85 for (i = 0, tot = 0; i < sleep1Samples; i++)
86 tot += sim_idle_ms_sleep (sim_os_sleep_min_ms + 1);
87 tim = tot / sleep1Samples; /* Truncated average */
88 sim_os_sleep_inc_ms = tim - sim_os_sleep_min_ms;
89 sim_os_set_thread_priority (PRIORITY_NORMAL);
90 return sim_os_sleep_min_ms;
91 }
92
sim_idle_ms_sleep(unsigned int msec)93 uint32 sim_idle_ms_sleep (unsigned int msec)
94 {
95 return sim_os_ms_sleep (msec);
96 }
97
98 #if defined(_WIN32)
99 /* On Windows there are several potentially disjoint threading APIs */
100 /* in use (base win32 pthreads, libSDL provided threading, and direct */
101 /* calls to beginthreadex), so go directly to the Win32 threading APIs */
102 /* to manage thread priority */
sim_os_set_thread_priority(int below_normal_above)103 t_stat sim_os_set_thread_priority (int below_normal_above)
104 {
105 const static int val[3] = {THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL};
106
107 if ((below_normal_above < -1) || (below_normal_above > 1))
108 return SCPE_ARG;
109 SetThreadPriority (GetCurrentThread(), val[1 + below_normal_above]);
110 return SCPE_OK;
111 }
112 #else
113 /* Native pthreads priority implementation */
sim_os_set_thread_priority(int below_normal_above)114 t_stat sim_os_set_thread_priority (int below_normal_above)
115 {
116 int sched_policy, min_prio, max_prio;
117 struct sched_param sched_priority;
118
119 # ifndef __gnu_hurd__
120 if ((below_normal_above < -1) || (below_normal_above > 1))
121 return SCPE_ARG;
122
123 pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
124 min_prio = sched_get_priority_min(sched_policy);
125 max_prio = sched_get_priority_max(sched_policy);
126 switch (below_normal_above) {
127 case PRIORITY_BELOW_NORMAL:
128 sched_priority.sched_priority = min_prio;
129 break;
130 case PRIORITY_NORMAL:
131 sched_priority.sched_priority = (max_prio + min_prio) / 2;
132 break;
133 case PRIORITY_ABOVE_NORMAL:
134 sched_priority.sched_priority = max_prio;
135 break;
136 }
137 pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
138 # endif /* ifndef __gnu_hurd__ */
139 return SCPE_OK;
140 }
141 #endif
142
143 /* OS-dependent timer and clock routines */
144
145 #if defined (_WIN32)
146
147 /* Win32 routines */
148
149 const t_bool rtc_avail = TRUE;
150
sim_os_msec(void)151 uint32 sim_os_msec (void)
152 {
153 return timeGetTime ();
154 }
155
sim_os_sleep(unsigned int sec)156 void sim_os_sleep (unsigned int sec)
157 {
158 Sleep (sec * 1000);
159 return;
160 }
161
sim_timer_exit(void)162 void sim_timer_exit (void)
163 {
164 timeEndPeriod (sim_idle_rate_ms);
165 return;
166 }
167
sim_os_ms_sleep_init(void)168 uint32 sim_os_ms_sleep_init (void)
169 {
170 TIMECAPS timers;
171
172 if (timeGetDevCaps (&timers, sizeof (timers)) != TIMERR_NOERROR)
173 return 0;
174 if (timers.wPeriodMin == 0)
175 return 0;
176 if (timeBeginPeriod (timers.wPeriodMin) != TIMERR_NOERROR)
177 return 0;
178 atexit (sim_timer_exit);
179 /* return measured actual minimum sleep time */
180 return _compute_minimum_sleep ();
181 }
182
sim_os_ms_sleep(unsigned int msec)183 uint32 sim_os_ms_sleep (unsigned int msec)
184 {
185 uint32 stime = sim_os_msec();
186
187 Sleep (msec);
188 return sim_os_msec () - stime;
189 }
190
191 #else
192
193 /* UNIX routines */
194
195 # include <time.h>
196 # include <sys/time.h>
197 # include <unistd.h>
198 # define NANOS_PER_MILLI 1000000
199 # define MILLIS_PER_SEC 1000
200
201 const t_bool rtc_avail = TRUE;
202
sim_os_msec(void)203 uint32 sim_os_msec (void)
204 {
205 struct timeval cur;
206 struct timezone foo;
207 uint32 msec;
208
209 gettimeofday (&cur, &foo);
210 msec = (((uint32) cur.tv_sec) * 1000) + (((uint32) cur.tv_usec) / 1000);
211 return msec;
212 }
213
sim_os_sleep(unsigned int sec)214 void sim_os_sleep (unsigned int sec)
215 {
216 sleep (sec);
217 return;
218 }
219
sim_os_ms_sleep_init(void)220 uint32 sim_os_ms_sleep_init (void)
221 {
222 return _compute_minimum_sleep ();
223 }
224
sim_os_ms_sleep(unsigned int milliseconds)225 uint32 sim_os_ms_sleep (unsigned int milliseconds)
226 {
227 uint32 stime = sim_os_msec ();
228 struct timespec treq;
229
230 treq.tv_sec = milliseconds / MILLIS_PER_SEC;
231 treq.tv_nsec = (milliseconds % MILLIS_PER_SEC) * NANOS_PER_MILLI;
232 (void) nanosleep (&treq, NULL);
233 return sim_os_msec () - stime;
234 }
235
236 #endif
237
238 /* diff = min - sub */
239 void
sim_timespec_diff(struct timespec * diff,const struct timespec * min,struct timespec * sub)240 sim_timespec_diff (struct timespec *diff, const struct timespec *min, struct timespec *sub)
241 {
242 /* move the minuend value to the difference and operate there. */
243 *diff = *min;
244 /* Borrow as needed for the nsec value */
245 while (sub->tv_nsec > diff->tv_nsec) {
246 --diff->tv_sec;
247 diff->tv_nsec += 1000000000;
248 }
249 diff->tv_nsec -= sub->tv_nsec;
250 diff->tv_sec -= sub->tv_sec;
251 /* Normalize the result */
252 while (diff->tv_nsec > 1000000000) {
253 ++diff->tv_sec;
254 diff->tv_nsec -= 1000000000;
255 }
256 }
257
258 /* Forward declarations */
259
260 static double _timespec_to_double (struct timespec *time);
261 static void _double_to_timespec (struct timespec *time, double dtime);
262 static void _rtcn_configure_calibrated_clock (int32 newtmr);
263 static void _sim_coschedule_cancel(UNIT *uptr);
264
265 /* OS independent clock calibration package */
266
267 static int32 rtc_ticks[SIM_NTIMERS+1] = { 0 }; /* ticks */
268 static uint32 rtc_hz[SIM_NTIMERS+1] = { 0 }; /* tick rate */
269 static uint32 rtc_rtime[SIM_NTIMERS+1] = { 0 }; /* real time */
270 static uint32 rtc_vtime[SIM_NTIMERS+1] = { 0 }; /* virtual time */
271 static double rtc_gtime[SIM_NTIMERS+1] = { 0 }; /* instruction time */
272 static uint32 rtc_nxintv[SIM_NTIMERS+1] = { 0 }; /* next interval */
273 static int32 rtc_based[SIM_NTIMERS+1] = { 0 }; /* base delay */
274 static int32 rtc_currd[SIM_NTIMERS+1] = { 0 }; /* current delay */
275 static int32 rtc_initd[SIM_NTIMERS+1] = { 0 }; /* initial delay */
276 static uint32 rtc_elapsed[SIM_NTIMERS+1] = { 0 }; /* sec since init */
277 static uint32 rtc_calibrations[SIM_NTIMERS+1] = { 0 }; /* calibration count */
278 static double rtc_clock_skew_max[SIM_NTIMERS+1] = { 0 }; /* asynchronous max skew */
279 static double rtc_clock_start_gtime[SIM_NTIMERS+1] = { 0 };/* reference instruction time for clock */
280 static double rtc_clock_tick_size[SIM_NTIMERS+1] = { 0 }; /* 1/hz */
281 static uint32 rtc_calib_initializations[SIM_NTIMERS+1] = { 0 };/* Initialization Count */
282 static double rtc_calib_tick_time[SIM_NTIMERS+1] = { 0 }; /* ticks time */
283 static double rtc_calib_tick_time_tot[SIM_NTIMERS+1] = { 0 };/* ticks time - total*/
284 static uint32 rtc_calib_ticks_acked[SIM_NTIMERS+1] = { 0 };/* ticks Acked */
285 static uint32 rtc_calib_ticks_acked_tot[SIM_NTIMERS+1] = { 0 };/* ticks Acked - total */
286 static uint32 rtc_clock_ticks[SIM_NTIMERS+1] = { 0 };/* ticks delivered since catchup base */
287 static uint32 rtc_clock_ticks_tot[SIM_NTIMERS+1] = { 0 };/* ticks delivered since catchup base - total */
288 static double rtc_clock_catchup_base_time[SIM_NTIMERS+1] = { 0 };/* reference time for catchup ticks */
289 static uint32 rtc_clock_catchup_ticks[SIM_NTIMERS+1] = { 0 };/* Record of catchups */
290 static uint32 rtc_clock_catchup_ticks_tot[SIM_NTIMERS+1] = { 0 };/* Record of catchups - total */
291 static t_bool rtc_clock_catchup_pending[SIM_NTIMERS+1] = { 0 };/* clock tick catchup pending */
292 static t_bool rtc_clock_catchup_eligible[SIM_NTIMERS+1] = { 0 };/* clock tick catchup eligible */
293 static uint32 rtc_clock_time_idled[SIM_NTIMERS+1] = { 0 };/* total time idled */
294 static uint32 rtc_clock_calib_skip_idle[SIM_NTIMERS+1] = { 0 };/* Calibrations skipped due to idling */
295 static uint32 rtc_clock_calib_gap2big[SIM_NTIMERS+1] = { 0 };/* Calibrations skipped Gap Too Big */
296 static uint32 rtc_clock_calib_backwards[SIM_NTIMERS+1] = { 0 };/* Calibrations skipped Clock Running Backwards */
297
298 UNIT sim_timer_units[SIM_NTIMERS+1]; /* one for each timer and one for an */
299 /* internal clock if no clocks are registered */
300 UNIT sim_internal_timer_unit; /* Internal calibration timer */
301 UNIT sim_throttle_unit; /* one for throttle */
302
303 t_stat sim_throt_svc (UNIT *uptr);
304 t_stat sim_timer_tick_svc (UNIT *uptr);
305
306 #define DBG_TRC 0x008 /* tracing */
307 #define DBG_CAL 0x010 /* calibration activities */
308 #define DBG_TIM 0x020 /* timer thread activities */
309 #define DBG_ACK 0x080 /* interrupt acknowledgement activities */
310 DEBTAB sim_timer_debug[] = {
311 {"TRACE", DBG_TRC, "Trace routine calls"},
312 {"IACK", DBG_ACK, "interrupt acknowledgement activities"},
313 {"CALIB", DBG_CAL, "Calibration activities"},
314 {"TIME", DBG_TIM, "Activation and scheduling activities"},
315 {0}
316 };
317
318 /* Forward device declarations */
319 extern DEVICE sim_timer_dev;
320 extern DEVICE sim_throttle_dev;
321
sim_rtcn_init_all(void)322 void sim_rtcn_init_all (void)
323 {
324 int32 tmr;
325
326 for (tmr = 0; tmr <= SIM_NTIMERS; tmr++)
327 if (rtc_initd[tmr] != 0)
328 sim_rtcn_init (rtc_initd[tmr], tmr);
329 return;
330 }
331
sim_rtcn_init(int32 time,int32 tmr)332 int32 sim_rtcn_init (int32 time, int32 tmr)
333 {
334 return sim_rtcn_init_unit (NULL, time, tmr);
335 }
336
sim_rtcn_init_unit(UNIT * uptr,int32 time,int32 tmr)337 int32 sim_rtcn_init_unit (UNIT *uptr, int32 time, int32 tmr)
338 {
339 if (time == 0)
340 time = 1;
341 if (tmr == SIM_INTERNAL_CLK)
342 tmr = SIM_NTIMERS;
343 else {
344 if ((tmr < 0) || (tmr >= SIM_NTIMERS))
345 return time;
346 }
347 /*
348 * If we'd previously succeeded in calibrating a tick value, then use that
349 * delay as a better default to setup when we're re-initialized.
350 * Re-initializing happens on any boot or after any breakpoint/continue.
351 */
352 if (rtc_currd[tmr])
353 time = rtc_currd[tmr];
354 if (!uptr)
355 uptr = sim_clock_unit[tmr];
356 sim_debug (DBG_CAL, &sim_timer_dev, "_sim_rtcn_init_unit(unit=%s, time=%d, tmr=%d)\n", sim_uname(uptr), time, tmr);
357 if (uptr) {
358 if (!sim_clock_unit[tmr])
359 sim_register_clock_unit_tmr (uptr, tmr);
360 }
361 rtc_clock_start_gtime[tmr] = sim_gtime();
362 rtc_rtime[tmr] = sim_os_msec ();
363 rtc_vtime[tmr] = rtc_rtime[tmr];
364 rtc_nxintv[tmr] = 1000;
365 rtc_ticks[tmr] = 0;
366 rtc_hz[tmr] = 0;
367 rtc_based[tmr] = time;
368 rtc_currd[tmr] = time;
369 rtc_initd[tmr] = time;
370 rtc_elapsed[tmr] = 0;
371 rtc_calibrations[tmr] = 0;
372 rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr];
373 rtc_clock_ticks[tmr] = 0;
374 rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr];
375 rtc_calib_tick_time[tmr] = 0;
376 rtc_clock_catchup_pending[tmr] = FALSE;
377 rtc_clock_catchup_eligible[tmr] = FALSE;
378 rtc_clock_catchup_ticks_tot[tmr] += rtc_clock_catchup_ticks[tmr];
379 rtc_clock_catchup_ticks[tmr] = 0;
380 rtc_calib_ticks_acked_tot[tmr] += rtc_calib_ticks_acked[tmr];
381 rtc_calib_ticks_acked[tmr] = 0;
382 ++rtc_calib_initializations[tmr];
383 _rtcn_configure_calibrated_clock (tmr);
384 return time;
385 }
386
sim_rtcn_calb(int32 ticksper,int32 tmr)387 int32 sim_rtcn_calb (int32 ticksper, int32 tmr)
388 {
389
390 if (tmr == SIM_INTERNAL_CLK)
391 tmr = SIM_NTIMERS;
392 else {
393 if ((tmr < 0) || (tmr >= SIM_NTIMERS))
394 return 10000;
395 }
396 if (rtc_hz[tmr] != ticksper) { /* changing tick rate? */
397 rtc_hz[tmr] = ticksper;
398 rtc_clock_tick_size[tmr] = 1.0/ticksper;
399 _rtcn_configure_calibrated_clock (tmr);
400 rtc_currd[tmr] = (int32)(sim_timer_inst_per_sec()/ticksper);
401 }
402 if (sim_clock_unit[tmr] == NULL) { /* Not using TIMER units? */
403 rtc_clock_ticks[tmr] += 1;
404 rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr];
405 }
406 if (rtc_clock_catchup_pending[tmr]) { /* catchup tick? */
407 ++rtc_clock_catchup_ticks[tmr]; /* accumulating which were catchups */
408 rtc_clock_catchup_pending[tmr] = FALSE;
409 }
410 return rtc_currd[tmr]; /* return now avoiding counting catchup tick in calibration */
411 }
412
413 /* sim_timer_init - get minimum sleep time available on this host */
414
sim_timer_init(void)415 t_bool sim_timer_init (void)
416 {
417 int tmr;
418 uint32 clock_start, clock_last, clock_now;
419
420 sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_init()\n");
421 for (tmr=0; tmr<=SIM_NTIMERS; tmr++) {
422 sim_timer_units[tmr].action = &sim_timer_tick_svc;
423 sim_timer_units[tmr].flags = UNIT_DIS | UNIT_IDLE;
424 }
425 SIM_INTERNAL_UNIT.flags = UNIT_DIS | UNIT_IDLE;
426 sim_register_internal_device (&sim_timer_dev);
427 sim_register_clock_unit_tmr (&SIM_INTERNAL_UNIT, SIM_INTERNAL_CLK);
428 sim_idle_rate_ms = sim_os_ms_sleep_init (); /* get OS timer rate */
429
430 clock_last = clock_start = sim_os_msec ();
431 sim_os_clock_resoluton_ms = 1000;
432 do {
433 uint32 clock_diff;
434
435 clock_now = sim_os_msec ();
436 clock_diff = clock_now - clock_last;
437 if ((clock_diff > 0) && (clock_diff < sim_os_clock_resoluton_ms))
438 sim_os_clock_resoluton_ms = clock_diff;
439 clock_last = clock_now;
440 } while (clock_now < clock_start + 100);
441 sim_os_tick_hz = 1000/(sim_os_clock_resoluton_ms * (sim_idle_rate_ms/sim_os_clock_resoluton_ms));
442 return (sim_idle_rate_ms != 0);
443 }
444
445 /* sim_show_timers - show running timer information */
sim_show_timers(FILE * st,DEVICE * dptr,UNIT * uptr,int32 val,CONST char * desc)446 t_stat sim_show_timers (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, CONST char* desc)
447 {
448 int tmr, clocks;
449 struct timespec now;
450 time_t time_t_now;
451 int32 calb_tmr = (sim_calb_tmr == -1) ? sim_calb_tmr_last : sim_calb_tmr;
452
453 for (tmr=clocks=0; tmr<=SIM_NTIMERS; ++tmr) {
454 if (0 == rtc_initd[tmr])
455 continue;
456
457 if (sim_clock_unit[tmr]) {
458 ++clocks;
459 fprintf (st, "%s clock device is %s%s%s\n", sim_name,
460 (tmr == SIM_NTIMERS) ? "Internal Calibrated Timer(" : "",
461 sim_uname(sim_clock_unit[tmr]),
462 (tmr == SIM_NTIMERS) ? ")" : "");
463 }
464
465 fprintf (st, "%s%sTimer %d:\n", "", rtc_hz[tmr] ? "Calibrated " : "Uncalibrated ", tmr);
466 if (rtc_hz[tmr]) {
467 fprintf (st, " Running at: %lu Hz\n", (unsigned long)rtc_hz[tmr]);
468 fprintf (st, " Tick Size: %s\n", sim_fmt_secs (rtc_clock_tick_size[tmr]));
469 fprintf (st, " Ticks in current second: %lu\n", (unsigned long)rtc_ticks[tmr]);
470 }
471 fprintf (st, " Seconds Running: %lu (%s)\n", (unsigned long)rtc_elapsed[tmr], sim_fmt_secs ((double)rtc_elapsed[tmr]));
472 if (tmr == calb_tmr) {
473 fprintf (st, " Calibration Opportunities: %lu\n", (unsigned long)rtc_calibrations[tmr]);
474 if (sim_idle_calib_pct)
475 fprintf (st, " Calib Skip Idle Thresh %%: %lu\n", (unsigned long)sim_idle_calib_pct);
476 if (rtc_clock_calib_skip_idle[tmr])
477 fprintf (st, " Calibs Skip While Idle: %lu\n", (unsigned long)rtc_clock_calib_skip_idle[tmr]);
478 if (rtc_clock_calib_backwards[tmr])
479 fprintf (st, " Calibs Skip Backwards: %lu\n", (unsigned long)rtc_clock_calib_backwards[tmr]);
480 if (rtc_clock_calib_gap2big[tmr])
481 fprintf (st, " Calibs Skip Gap Too Big: %lu\n", (unsigned long)rtc_clock_calib_gap2big[tmr]);
482 }
483 if (rtc_gtime[tmr])
484 fprintf (st, " Instruction Time: %.0f\n", rtc_gtime[tmr]);
485 fprintf (st, " Current Insts Per Tick: %lu\n", (unsigned long)rtc_currd[tmr]);
486 fprintf (st, " Initializations: %lu\n", (unsigned long)rtc_calib_initializations[tmr]);
487 fprintf (st, " Total Ticks: %lu\n", (unsigned long)rtc_clock_ticks_tot[tmr]+(unsigned long)rtc_clock_ticks[tmr]);
488 if (rtc_clock_skew_max[tmr] != 0.0)
489 fprintf (st, " Peak Clock Skew: %s%s\n", sim_fmt_secs (fabs(rtc_clock_skew_max[tmr])), (rtc_clock_skew_max[tmr] < 0) ? " fast" : " slow");
490 if (rtc_calib_ticks_acked[tmr])
491 fprintf (st, " Ticks Acked: %lu\n", (unsigned long)rtc_calib_ticks_acked[tmr]);
492 if (rtc_calib_ticks_acked_tot[tmr]+rtc_calib_ticks_acked[tmr] != rtc_calib_ticks_acked[tmr])
493 fprintf (st, " Total Ticks Acked: %lu\n", (unsigned long)rtc_calib_ticks_acked_tot[tmr]+(unsigned long)rtc_calib_ticks_acked[tmr]);
494 if (rtc_calib_tick_time[tmr])
495 fprintf (st, " Tick Time: %s\n", sim_fmt_secs (rtc_calib_tick_time[tmr]));
496 if (rtc_calib_tick_time_tot[tmr]+rtc_calib_tick_time[tmr] != rtc_calib_tick_time[tmr])
497 fprintf (st, " Total Tick Time: %s\n", sim_fmt_secs (rtc_calib_tick_time_tot[tmr]+rtc_calib_tick_time[tmr]));
498 if (rtc_clock_catchup_ticks[tmr])
499 fprintf (st, " Catchup Ticks Sched: %lu\n", (unsigned long)rtc_clock_catchup_ticks[tmr]);
500 if (rtc_clock_catchup_ticks_tot[tmr]+rtc_clock_catchup_ticks[tmr] != rtc_clock_catchup_ticks[tmr])
501 fprintf (st, " Total Catchup Ticks Sched: %lu\n", (unsigned long)rtc_clock_catchup_ticks_tot[tmr]+(unsigned long)rtc_clock_catchup_ticks[tmr]);
502 clock_gettime (CLOCK_REALTIME, &now);
503 time_t_now = (time_t)now.tv_sec;
504 fprintf (st, " Wall Clock Time Now: %8.8s.%03d\n", 11+ctime(&time_t_now), (int)(now.tv_nsec/1000000));
505 if (rtc_clock_catchup_eligible[tmr]) {
506 _double_to_timespec (&now, rtc_clock_catchup_base_time[tmr]+rtc_calib_tick_time[tmr]);
507 time_t_now = (time_t)now.tv_sec;
508 fprintf (st, " Catchup Tick Time: %8.8s.%03d\n", 11+ctime(&time_t_now), (int)(now.tv_nsec/1000000));
509 _double_to_timespec (&now, rtc_clock_catchup_base_time[tmr]);
510 time_t_now = (time_t)now.tv_sec;
511 fprintf (st, " Catchup Base Time: %8.8s.%03d\n", 11+ctime(&time_t_now), (int)(now.tv_nsec/1000000));
512 }
513 if (rtc_clock_time_idled[tmr])
514 fprintf (st, " Total Time Idled: %s\n", sim_fmt_secs (rtc_clock_time_idled[tmr]/1000.0));
515 }
516 if (clocks == 0)
517 fprintf (st, "%s clock device is not specified, co-scheduling is unavailable\n", sim_name);
518 return SCPE_OK;
519 }
520
sim_show_clock_queues(FILE * st,DEVICE * dptr,UNIT * uptr,int32 flag,CONST char * cptr)521 t_stat sim_show_clock_queues (FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, CONST char *cptr)
522 {
523 int tmr;
524
525 for (tmr=0; tmr<=SIM_NTIMERS; ++tmr) {
526 if (sim_clock_unit[tmr] == NULL)
527 continue;
528 if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) {
529 int32 accum;
530
531 fprintf (st, "%s clock (%s) co-schedule event queue status\n",
532 sim_name, sim_uname(sim_clock_unit[tmr]));
533 accum = 0;
534 for (uptr = sim_clock_cosched_queue[tmr]; uptr != QUEUE_LIST_END; uptr = uptr->next) {
535 if ((dptr = find_dev_from_unit (uptr)) != NULL) {
536 fprintf (st, " %s", sim_dname (dptr));
537 if (dptr->numunits > 1)
538 fprintf (st, " unit %d", (int32) (uptr - dptr->units));
539 }
540 else
541 fprintf (st, " Unknown");
542 if (accum > 0)
543 fprintf (st, " after %d ticks", accum);
544 fprintf (st, "\n");
545 accum = accum + uptr->time;
546 }
547 }
548 }
549 return SCPE_OK;
550 }
551
552 REG sim_timer_reg[] = {
553 { NULL }
554 };
555
556 /* Clear catchup */
557
sim_timer_clr_catchup(UNIT * uptr,int32 val,CONST char * cptr,void * desc)558 t_stat sim_timer_clr_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc)
559 {
560 if (sim_catchup_ticks)
561 sim_catchup_ticks = FALSE;
562 return SCPE_OK;
563 }
564
sim_timer_set_catchup(UNIT * uptr,int32 val,CONST char * cptr,void * desc)565 t_stat sim_timer_set_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc)
566 {
567 if (!sim_catchup_ticks)
568 sim_catchup_ticks = TRUE;
569 return SCPE_OK;
570 }
571
sim_timer_show_catchup(FILE * st,UNIT * uptr,int32 val,CONST void * desc)572 t_stat sim_timer_show_catchup (FILE *st, UNIT *uptr, int32 val, CONST void *desc)
573 {
574 fprintf (st, "Calibrated Ticks%s", sim_catchup_ticks ? " with Catchup Ticks" : "");
575 return SCPE_OK;
576 }
577
578 MTAB sim_timer_mod[] = {
579 { MTAB_VDV, MTAB_VDV, "CATCHUP", "CATCHUP", &sim_timer_set_catchup, &sim_timer_show_catchup, NULL, "Enables/Displays Clock Tick catchup mode" },
580 { MTAB_VDV, 0, NULL, "NOCATCHUP", &sim_timer_clr_catchup, NULL, NULL, "Disables Clock Tick catchup mode" },
581 { 0 },
582 };
583
584 static t_stat sim_timer_clock_reset (DEVICE *dptr);
585
586 DEVICE sim_timer_dev = {
587 "TIMER", sim_timer_units, sim_timer_reg, sim_timer_mod,
588 SIM_NTIMERS+1, 0, 0, 0, 0, 0,
589 NULL, NULL, &sim_timer_clock_reset, NULL, NULL, NULL,
590 NULL, DEV_DEBUG | DEV_NOSAVE, 0, sim_timer_debug};
591
592 /* Clock assist activites */
sim_timer_tick_svc(UNIT * uptr)593 t_stat sim_timer_tick_svc (UNIT *uptr)
594 {
595 int tmr = (int)(uptr-sim_timer_units);
596 t_stat stat;
597
598 rtc_clock_ticks[tmr] += 1;
599 rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr];
600 /*
601 * Some devices may depend on executing during the same instruction or
602 * immediately after the clock tick event. To satisfy this, we directly
603 * run the clock event here and if it completes successfully, schedule any
604 * currently coschedule units to run now. Ticks should never return a
605 * non-success status, while co-schedule activities might, so they are
606 * queued to run from sim_process_event
607 */
608 if (sim_clock_unit[tmr]->action == NULL)
609 return SCPE_IERR;
610 stat = sim_clock_unit[tmr]->action (sim_clock_unit[tmr]);
611 --sim_cosched_interval[tmr]; /* Countdown ticks */
612 if (stat == SCPE_OK) {
613 if (rtc_clock_catchup_eligible[tmr]) { /* calibration started? */
614 struct timespec now;
615 double skew;
616
617 clock_gettime(CLOCK_REALTIME, &now);
618 skew = (_timespec_to_double(&now) - (rtc_calib_tick_time[tmr]+rtc_clock_catchup_base_time[tmr]));
619
620 if (fabs(skew) > fabs(rtc_clock_skew_max[tmr]))
621 rtc_clock_skew_max[tmr] = skew;
622 }
623 while ((sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) &&
624 (sim_cosched_interval[tmr] < sim_clock_cosched_queue[tmr]->time)) {
625 UNIT *cptr = sim_clock_cosched_queue[tmr];
626 sim_clock_cosched_queue[tmr] = cptr->next;
627 cptr->next = NULL;
628 cptr->cancel = NULL;
629 _sim_activate (cptr, 0);
630 }
631 if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END)
632 sim_cosched_interval[tmr] = sim_clock_cosched_queue[tmr]->time;
633 else
634 sim_cosched_interval[tmr] = 0;
635 }
636 sim_timer_activate_after (uptr, 1000000/rtc_hz[tmr]);
637 return stat;
638 }
639
640 int
sim_usleep(useconds_t tusleep)641 sim_usleep(useconds_t tusleep)
642 {
643 #if ( !defined(__APPLE__) && !defined(__OpenBSD__) )
644 struct timespec rqt;
645
646 rqt.tv_sec = tusleep / 1000000;
647 rqt.tv_nsec = (tusleep % 1000000) * 1000;
648 return clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, NULL);
649 #else
650 return usleep(tusleep);
651 #endif /* if ( !defined(__APPLE__) && !defined(__OpenBSD__) ) */
652 }
653
_timespec_to_double(struct timespec * time)654 static double _timespec_to_double (struct timespec *time)
655 {
656 return ((double)time->tv_sec)+(double)(time->tv_nsec)/1000000000.0;
657 }
658
_double_to_timespec(struct timespec * time,double dtime)659 static void _double_to_timespec (struct timespec *time, double dtime)
660 {
661 time->tv_sec = (time_t)floor(dtime);
662 time->tv_nsec = (long)((dtime-floor(dtime))*1000000000.0);
663 }
664
665 #define CLK_TPS 10
666 #define CLK_INIT (SIM_INITIAL_IPS/CLK_TPS)
667 static int32 sim_int_clk_tps;
668
sim_timer_clock_tick_svc(UNIT * uptr)669 static t_stat sim_timer_clock_tick_svc (UNIT *uptr)
670 {
671 sim_rtcn_calb (sim_int_clk_tps, SIM_INTERNAL_CLK);
672 sim_activate_after (uptr, 1000000/sim_int_clk_tps); /* reactivate unit */
673 return SCPE_OK;
674 }
675
_rtcn_configure_calibrated_clock(int32 newtmr)676 static void _rtcn_configure_calibrated_clock (int32 newtmr)
677 {
678 int32 tmr;
679
680 /* Look for a timer running slower than the host system clock */
681 sim_int_clk_tps = MIN(CLK_TPS, sim_os_tick_hz);
682 for (tmr=0; tmr<SIM_NTIMERS; tmr++) {
683 if ((rtc_hz[tmr]) &&
684 (rtc_hz[tmr] <= (uint32)sim_os_tick_hz))
685 break;
686 }
687 if (tmr == SIM_NTIMERS) { /* None found? */
688 if ((tmr != newtmr) && (!sim_is_active (&SIM_INTERNAL_UNIT))) {
689 /* Start the internal timer */
690 sim_calb_tmr = SIM_NTIMERS;
691 sim_debug (DBG_CAL, &sim_timer_dev, "_rtcn_configure_calibrated_clock() - Starting Internal Calibrated Timer at %dHz\n", sim_int_clk_tps);
692 SIM_INTERNAL_UNIT.action = &sim_timer_clock_tick_svc;
693 SIM_INTERNAL_UNIT.flags = UNIT_DIS | UNIT_IDLE;
694 sim_activate_abs (&SIM_INTERNAL_UNIT, 0);
695 sim_rtcn_init_unit (&SIM_INTERNAL_UNIT, (CLK_INIT*CLK_TPS)/sim_int_clk_tps, SIM_INTERNAL_CLK);
696 }
697 return;
698 }
699 if ((tmr == newtmr) &&
700 (sim_calb_tmr == newtmr)) /* already set? */
701 return;
702 if (sim_calb_tmr == SIM_NTIMERS) { /* was old the internal timer? */
703 sim_debug (DBG_CAL, &sim_timer_dev, "_rtcn_configure_calibrated_clock() - Stopping Internal Calibrated Timer, New Timer = %d (%dHz)\n", tmr, rtc_hz[tmr]);
704 rtc_initd[SIM_NTIMERS] = 0;
705 rtc_hz[SIM_NTIMERS] = 0;
706 sim_cancel (&SIM_INTERNAL_UNIT);
707 /* Migrate any coscheduled devices to the standard queue and they will requeue themselves */
708 while (sim_clock_cosched_queue[SIM_NTIMERS] != QUEUE_LIST_END) {
709 UNIT *uptr = sim_clock_cosched_queue[SIM_NTIMERS];
710 _sim_coschedule_cancel (uptr);
711 _sim_activate (uptr, 1);
712 }
713 }
714 else {
715 sim_debug (DBG_CAL, &sim_timer_dev, "_rtcn_configure_calibrated_clock() - Changing Calibrated Timer from %d (%dHz) to %d (%dHz)\n", sim_calb_tmr, rtc_hz[sim_calb_tmr], tmr, rtc_hz[tmr]);
716 sim_calb_tmr = tmr;
717 }
718 sim_calb_tmr = tmr;
719 }
720
sim_timer_clock_reset(DEVICE * dptr)721 static t_stat sim_timer_clock_reset (DEVICE *dptr)
722 {
723 sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_clock_reset()\n");
724 _rtcn_configure_calibrated_clock (sim_calb_tmr);
725 if (sim_switches & SWMASK ('P')) {
726 sim_cancel (&SIM_INTERNAL_UNIT);
727 sim_calb_tmr = -1;
728 }
729 return SCPE_OK;
730 }
731
sim_start_timer_services(void)732 void sim_start_timer_services (void)
733 {
734 sim_debug (DBG_TRC, &sim_timer_dev, "sim_start_timer_services()\n");
735 _rtcn_configure_calibrated_clock (sim_calb_tmr);
736 }
737
sim_stop_timer_services(void)738 void sim_stop_timer_services (void)
739 {
740 int tmr;
741
742 sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services()\n");
743
744 for (tmr=0; tmr<=SIM_NTIMERS; tmr++) {
745 int32 accum;
746
747 if (sim_clock_unit[tmr]) {
748 /* Stop clock assist unit and make sure the clock unit has a tick queued */
749 sim_cancel (&sim_timer_units[tmr]);
750 if (rtc_hz[tmr])
751 sim_activate (sim_clock_unit[tmr], rtc_currd[tmr]);
752 /* Move coscheduled units to the standard event queue */
753 accum = 1;
754 while (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) {
755 UNIT *cptr = sim_clock_cosched_queue[tmr];
756
757 sim_clock_cosched_queue[tmr] = cptr->next;
758 cptr->next = NULL;
759 cptr->cancel = NULL;
760
761 accum += cptr->time;
762 _sim_activate (cptr, accum*rtc_currd[tmr]);
763 }
764 }
765 }
766 sim_cancel (&SIM_INTERNAL_UNIT); /* Make sure Internal Timer is stopped */
767 sim_calb_tmr_last = sim_calb_tmr; /* Save calibrated timer value for display */
768 sim_inst_per_sec_last = sim_timer_inst_per_sec (); /* Save execution rate for display */
769 sim_calb_tmr = -1;
770 }
771
772 /* Instruction Execution rate. */
773
sim_timer_inst_per_sec(void)774 double sim_timer_inst_per_sec (void)
775 {
776 double inst_per_sec = SIM_INITIAL_IPS;
777
778 if (sim_calb_tmr == -1)
779 return inst_per_sec;
780 inst_per_sec = ((double)rtc_currd[sim_calb_tmr])*rtc_hz[sim_calb_tmr];
781 if (0 == inst_per_sec)
782 inst_per_sec = ((double)rtc_currd[sim_calb_tmr])*sim_int_clk_tps;
783 return inst_per_sec;
784 }
785
sim_timer_activate(UNIT * uptr,int32 interval)786 t_stat sim_timer_activate (UNIT *uptr, int32 interval)
787 {
788 return sim_timer_activate_after (uptr, (uint32)((interval * 1000000.0) / sim_timer_inst_per_sec ()));
789 }
790
sim_timer_activate_after(UNIT * uptr,uint32 usec_delay)791 t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay)
792 {
793 int inst_delay, tmr;
794 double inst_delay_d, inst_per_sec;
795
796 /* If this is a clock unit, we need to schedule the related timer unit instead */
797 for (tmr=0; tmr<=SIM_NTIMERS; tmr++)
798 if (sim_clock_unit[tmr] == uptr) {
799 uptr = &sim_timer_units[tmr];
800 break;
801 }
802 if (sim_is_active (uptr)) /* already active? */
803 return SCPE_OK;
804 inst_per_sec = sim_timer_inst_per_sec ();
805 inst_delay_d = ((inst_per_sec*usec_delay)/1000000.0);
806 /* Bound delay to avoid overflow. */
807 /* Long delays are usually canceled before they expire */
808 if (inst_delay_d > (double)0x7fffffff)
809 inst_delay_d = (double)0x7fffffff;
810 inst_delay = (int32)inst_delay_d;
811 if ((inst_delay == 0) && (usec_delay != 0))
812 inst_delay = 1; /* Minimum non-zero delay is 1 instruction */
813 sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue addition %s at %d (%d usecs)\n",
814 sim_uname(uptr), inst_delay, usec_delay);
815 return _sim_activate (uptr, inst_delay); /* queue it now */
816 }
817
sim_register_clock_unit_tmr(UNIT * uptr,int32 tmr)818 t_stat sim_register_clock_unit_tmr (UNIT *uptr, int32 tmr)
819 {
820 if (tmr == SIM_INTERNAL_CLK)
821 tmr = SIM_NTIMERS;
822 else {
823 if ((tmr < 0) || (tmr >= SIM_NTIMERS))
824 return SCPE_IERR;
825 }
826 if (NULL == uptr) { /* deregistering? */
827 while (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) {
828 UNIT *uptr = sim_clock_cosched_queue[tmr];
829
830 _sim_coschedule_cancel (uptr);
831 _sim_activate (uptr, 1);
832 }
833 sim_clock_unit[tmr] = NULL;
834 return SCPE_OK;
835 }
836 if (NULL == sim_clock_unit[tmr])
837 sim_clock_cosched_queue[tmr] = QUEUE_LIST_END;
838 sim_clock_unit[tmr] = uptr;
839 uptr->dynflags |= UNIT_TMR_UNIT;
840 sim_timer_units[tmr].flags = UNIT_DIS | (sim_clock_unit[tmr] ? UNIT_IDLE : 0);
841 return SCPE_OK;
842 }
843
844 /* Cancel a unit on the coschedule queue */
_sim_coschedule_cancel(UNIT * uptr)845 static void _sim_coschedule_cancel (UNIT *uptr)
846 {
847 if (uptr->next) { /* On a queue? */
848 int tmr;
849
850 for (tmr=0; tmr<SIM_NTIMERS; tmr++) {
851 if (uptr == sim_clock_cosched_queue[tmr]) {
852 sim_clock_cosched_queue[tmr] = uptr->next;
853 uptr->next = NULL;
854 }
855 else {
856 UNIT *cptr;
857 for (cptr = sim_clock_cosched_queue[tmr];
858 (cptr != QUEUE_LIST_END);
859 cptr = cptr->next)
860 if (cptr->next == (uptr)) {
861 cptr->next = (uptr)->next;
862 uptr->next = NULL;
863 break;
864 }
865 }
866 if (uptr->next == NULL) { /* found? */
867 uptr->cancel = NULL;
868 sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceled Clock Coscheduled Event for %s\n", sim_uname(uptr));
869 return;
870 }
871 }
872 }
873 }
874
875