1 /* Copyright (C) 2015  Fabian Vogt
2  * Modified for the CE calculator by CEmu developers
3  *
4  * This program is free software: you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation, either version 3 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13 */
14 
15 #include "schedule.h"
16 #include "cpu.h"
17 #include "emu.h"
18 #include "debug/debug.h"
19 
20 #include <assert.h>
21 #include <string.h>
22 #include <stdio.h>
23 
24 sched_state_t sched;
25 
muldiv_floor(uint32_t a,uint32_t b,uint32_t c)26 static uint32_t muldiv_floor(uint32_t a, uint32_t b, uint32_t c) {
27     if (likely(b == c)) return a;
28     return (uint64_t)a * b / c;
29 }
muldiv_ceil(uint32_t a,uint32_t b,uint32_t c)30 static uint32_t muldiv_ceil(uint32_t a, uint32_t b, uint32_t c) {
31     if (likely(b == c)) return a;
32     return ((uint64_t)a * b + c - 1) / c;
33 }
34 
sched_run_event(enum sched_item_id id)35 void sched_run_event(enum sched_item_id id) {
36     (void)id;
37     sched.run_event_triggered = true;
38 }
39 
sched_event_next_cycle(void)40 uint32_t sched_event_next_cycle(void) {
41     enum sched_item_id next = sched.event.next;
42     assert(next >= SCHED_SECOND && next <= SCHED_LAST_EVENT);
43     assert(!sched.items[next].second || next == SCHED_SECOND);
44     return sched.event.cycle ? sched.event.cycle : sched.items[next].cycle;
45 }
46 
sched_update_next(enum sched_item_id id)47 static void sched_update_next(enum sched_item_id id) {
48     sched.event.next = id;
49     cpu_restore_next();
50 }
51 
sched_before(enum sched_item_id a_id,enum sched_item_id b_id)52 static bool sched_before(enum sched_item_id a_id, enum sched_item_id b_id) {
53     struct sched_item *a = &sched.items[a_id], *b = &sched.items[b_id];
54     return a->second <= b->second && a->cycle < b->cycle;
55 }
56 
sched_update(enum sched_item_id id)57 static void sched_update(enum sched_item_id id) {
58     struct sched_item *item = &sched.items[id];
59     if (id == SCHED_SECOND) {
60         sched_update_next(id);
61         for (id = SCHED_FIRST_EVENT; id <= SCHED_LAST_EVENT; id++) {
62             sched_update(id);
63         }
64     } else if (id >= SCHED_FIRST_EVENT && id <= SCHED_LAST_EVENT) {
65         if (item->callback.event && !item->second &&
66             item->cycle < sched_event_next_cycle()) {
67             sched_update_next(id);
68         }
69     } else if (id == SCHED_PREV_MA) {
70         sched.dma.next = id;
71         for (id = SCHED_FIRST_DMA; id <= SCHED_LAST_DMA; id++) {
72             sched_update(id);
73         }
74     } else if (id >= SCHED_FIRST_DMA && id <= SCHED_LAST_DMA) {
75         if (item->callback.dma && sched_active(id) &&
76             (sched.dma.next == SCHED_PREV_MA || sched_before(id, sched.dma.next))) {
77             sched.dma.next = id;
78         }
79     }
80 }
81 
sched_schedule(enum sched_item_id id,int32_t seconds,uint64_t ticks)82 static void sched_schedule(enum sched_item_id id, int32_t seconds, uint64_t ticks) {
83     struct sched_item *item = &sched.items[id];
84     item->second = seconds + ticks / sched.clockRates[item->clock];
85     item->tick = ticks % sched.clockRates[item->clock];
86     item->cycle = muldiv_ceil(item->tick, sched.clockRates[CLOCK_CPU], sched.clockRates[item->clock]);
87     if (id == sched.event.next) {
88         sched_update(SCHED_SECOND);
89     } else if (id == sched.dma.next) {
90         sched_update(SCHED_PREV_MA);
91     } else {
92         sched_update(id);
93     }
94 }
95 
sched_repeat_relative(enum sched_item_id id,enum sched_item_id base,uint32_t offset,uint64_t ticks)96 void sched_repeat_relative(enum sched_item_id id, enum sched_item_id base, uint32_t offset, uint64_t ticks) {
97     struct sched_item *item = &sched.items[base];
98     sched_schedule(id, item->second ^ item->second >> 31, muldiv_ceil(item->tick + offset, sched.clockRates[sched.items[id].clock], sched.clockRates[item->clock]) + ticks);
99 }
100 
sched_repeat(enum sched_item_id id,uint64_t ticks)101 void sched_repeat(enum sched_item_id id, uint64_t ticks) {
102     sched_repeat_relative(id, id, 0, ticks);
103 }
104 
sched_set(enum sched_item_id id,uint64_t ticks)105 void sched_set(enum sched_item_id id, uint64_t ticks) {
106     sched_schedule(id, 0, muldiv_floor(cpu.cycles, sched.clockRates[sched.items[id].clock], sched.clockRates[CLOCK_CPU]) + ticks);
107 }
108 
sched_clear(enum sched_item_id id)109 void sched_clear(enum sched_item_id id) {
110     struct sched_item *item = &sched.items[id];
111     if (sched_active(id)) {
112         item->second = ~item->second;
113         if (id == sched.event.next) {
114             sched_update(SCHED_SECOND);
115         } else if (id == sched.dma.next) {
116             sched_update(SCHED_PREV_MA);
117         }
118     }
119 }
120 
sched_active(enum sched_item_id id)121 bool sched_active(enum sched_item_id id) {
122     return sched.items[id].second >= 0;
123 }
124 
sched_cycle(enum sched_item_id id)125 uint64_t sched_cycle(enum sched_item_id id) {
126     struct sched_item *item = &sched.items[id];
127     assert(sched_active(id));
128     return (uint64_t)item->second * sched.clockRates[CLOCK_CPU] + item->cycle;
129 }
130 
sched_cycles_remaining(enum sched_item_id id)131 uint64_t sched_cycles_remaining(enum sched_item_id id) {
132     return sched_cycle(id) - cpu.cycles;
133 }
134 
sched_tick(enum sched_item_id id)135 uint64_t sched_tick(enum sched_item_id id) {
136     struct sched_item *item = &sched.items[id];
137     assert(item->second >= 0);
138     return (uint64_t)item->second * sched.clockRates[item->clock] + item->tick;
139 }
140 
sched_ticks_remaining(enum sched_item_id id)141 uint64_t sched_ticks_remaining(enum sched_item_id id) {
142     return sched_tick(id) - muldiv_floor(cpu.cycles, sched.clockRates[sched.items[id].clock], sched.clockRates[CLOCK_CPU]);
143 }
144 
sched_process_pending_events(void)145 void sched_process_pending_events(void) {
146     while (sched_event_next_cycle() <= cpu.cycles) {
147         enum sched_item_id id = sched.event.next;
148         sched_clear(id);
149         sched.items[id].callback.event(id);
150     }
151 }
152 
sched_process_pending_dma(uint8_t duration)153 void sched_process_pending_dma(uint8_t duration) {
154     if (sched.event.cycle) {
155         cpu.cycles += duration;
156         return;
157     }
158     while (true) {
159         enum sched_item_id id = sched.dma.next;
160         if (id == SCHED_PREV_MA) {
161             break;
162         }
163         if (sched_before(SCHED_PREV_MA, id)) {
164             if (sched_cycle(id) > cpu.cycles) {
165                 break;
166             }
167             sched_repeat_relative(SCHED_PREV_MA, id, 0, 0);
168         } else if (sched_cycle(SCHED_PREV_MA) > cpu.cycles) {
169             break;
170         }
171         sched_clear(id);
172         sched_repeat(SCHED_PREV_MA, sched.items[id].callback.dma(id));
173     }
174     if (duration) {
175         uint32_t prev_cycle = sched_cycle(SCHED_PREV_MA);
176         if (prev_cycle > cpu.cycles) {
177             cpu.dmaCycles += prev_cycle - cpu.cycles;
178             cpu.cycles = prev_cycle;
179         }
180         cpu.cycles += duration;
181     }
182     sched_set(SCHED_PREV_MA, 0);
183 }
184 
sched_second(enum sched_item_id id)185 static void sched_second(enum sched_item_id id) {
186     sched_process_pending_dma(0);
187     for (id = SCHED_SECOND; id < SCHED_NUM_ITEMS; id++) {
188         if (sched_active(id)) {
189             sched.items[id].second--;
190         }
191     }
192     cpu.seconds++;
193     cpu.cycles -= sched.clockRates[CLOCK_CPU];
194     cpu.eiDelay -= sched.clockRates[CLOCK_CPU];
195     cpu.baseCycles += sched.clockRates[CLOCK_CPU];
196     sched.items[SCHED_SECOND].second = 0; /* Don't use sched_repeat! */
197     sched_update(SCHED_SECOND);
198 }
199 
sched_set_clock(enum clock_id clock,uint32_t new_rate)200 void sched_set_clock(enum clock_id clock, uint32_t new_rate) {
201     enum sched_item_id id;
202     struct sched_item *item;
203     uint64_t ticks;
204 
205     if (new_rate == 0 || sched.event.cycle) {
206         return;
207     }
208     if (clock == CLOCK_CPU) {
209         cpu.baseCycles += cpu.cycles;
210         cpu.cycles = muldiv_floor(cpu.cycles, new_rate, sched.clockRates[CLOCK_CPU]);
211         cpu.baseCycles -= cpu.cycles;
212         for (id = 0; id < SCHED_NUM_ITEMS; id++) {
213             if (sched_active(id)) {
214                 item = &sched.items[id];
215                 if (item->clock == clock) {
216                     ticks = (uint64_t)item->second * sched.clockRates[item->clock] + item->tick;
217                     item->second = ticks / new_rate;
218                     item->tick = ticks % new_rate;
219                     item->cycle = item->tick;
220                 } else {
221                     item->cycle = muldiv_floor(item->tick, new_rate, sched.clockRates[item->clock]);
222                 }
223             }
224         }
225     }
226     sched_update(SCHED_SECOND);
227     sched.clockRates[clock] = new_rate;
228 }
229 
sched_get_clock_rate(enum clock_id clock)230 uint32_t sched_get_clock_rate(enum clock_id clock) {
231     return sched.clockRates[clock];
232 }
233 
sched_reset(void)234 void sched_reset(void) {
235     const uint32_t def_rates[CLOCK_NUM_ITEMS] = { 48000000, 60, 48000000, 24000000, 12000000, 6000000, 32768, 1 };
236 
237     memset(&sched, 0, sizeof sched);
238     memcpy(sched.clockRates, def_rates, sizeof(def_rates));
239 
240     sched_update_next(sched.dma.next = SCHED_SECOND);
241 
242     sched.items[SCHED_SECOND].callback.event = sched_second;
243     sched.items[SCHED_SECOND].clock = CLOCK_1;
244     sched.items[SCHED_SECOND].second = 0;
245     sched.items[SCHED_SECOND].tick = 1;
246     sched.items[SCHED_SECOND].cycle = sched.clockRates[CLOCK_CPU];
247 
248     sched.items[SCHED_RUN].clock = CLOCK_RUN;
249     sched.items[SCHED_RUN].callback.event = sched_run_event;
250     sched_set(SCHED_RUN, 0);
251 
252     sched.items[SCHED_PREV_MA].clock = CLOCK_48M;
253     sched_set(SCHED_PREV_MA, 0);
254 }
255 
sched_total_cycles(void)256 uint64_t sched_total_cycles(void) {
257     return cpu.baseCycles + cpu.cycles;
258 }
259 
sched_total_time(enum clock_id clock)260 uint64_t sched_total_time(enum clock_id clock) {
261     return (uint64_t)cpu.seconds * sched.clockRates[clock] + muldiv_floor(cpu.cycles, sched.clockRates[clock], sched.clockRates[CLOCK_CPU]);
262 }
263 
event_next_cycle(enum sched_item_id id)264 uint64_t event_next_cycle(enum sched_item_id id) {
265     struct sched_item *item = &sched.items[id];
266     return (uint64_t)item->second * sched.clockRates[CLOCK_CPU] + item->cycle - sched_event_next_cycle() + cpu.baseCycles;
267 }
268 
sched_save(FILE * image)269 bool sched_save(FILE *image) {
270     return fwrite(&sched, sizeof(sched), 1, image) == 1;
271 }
272 
sched_restore(FILE * image)273 bool sched_restore(FILE *image) {
274     bool ret;
275     enum sched_item_id id;
276     union sched_callback callbacks[SCHED_NUM_ITEMS];
277 
278     for (id = 0; id < SCHED_NUM_ITEMS; id++) {
279         callbacks[id] = sched.items[id].callback;
280     }
281 
282     ret = fread(&sched, sizeof(sched), 1, image) == 1;
283 
284     for (id = 0; id < SCHED_NUM_ITEMS; id++) {
285         sched.items[id].callback = callbacks[id];
286     }
287 
288     return ret;
289 }
290