1 #ifdef SCPU_CPP
2
dma_add_clocks(unsigned clocks)3 void sCPU::dma_add_clocks(unsigned clocks) {
4 status.dma_clocks += clocks;
5 add_clocks(clocks);
6 scheduler.sync_cpucop();
7 scheduler.sync_cpuppu();
8 }
9
dma_addr_valid(uint32 abus)10 bool sCPU::dma_addr_valid(uint32 abus) {
11 //reads from B-bus or S-CPU registers are invalid
12 if((abus & 0x40ff00) == 0x2100) return false; //$[00-3f|80-bf]:[2100-21ff]
13 if((abus & 0x40fe00) == 0x4000) return false; //$[00-3f|80-bf]:[4000-41ff]
14 if((abus & 0x40ffe0) == 0x4200) return false; //$[00-3f|80-bf]:[4200-421f]
15 if((abus & 0x40ff80) == 0x4300) return false; //$[00-3f|80-bf]:[4300-437f]
16 return true;
17 }
18
dma_read(uint32 abus)19 uint8 sCPU::dma_read(uint32 abus) {
20 if(dma_addr_valid(abus) == false) return 0x00; //does not return S-CPU MDR
21 return bus.read(abus);
22 }
23
dma_transfer(bool direction,uint8 bbus,uint32 abus)24 void sCPU::dma_transfer(bool direction, uint8 bbus, uint32 abus) {
25 if(direction == 0) {
26 //a->b transfer (to $21xx)
27 if(bbus == 0x80 && ((abus & 0xfe0000) == 0x7e0000 || (abus & 0x40e000) == 0x0000)) {
28 //illegal WRAM->WRAM transfer (bus conflict)
29 //read most likely occurs; no write occurs
30 //read is irrelevent, as it cannot be observed by software
31 dma_add_clocks(8);
32 } else {
33 dma_add_clocks(4);
34 uint8 data = dma_read(abus);
35 dma_add_clocks(4);
36 bus.write(0x2100 | bbus, data);
37 }
38 } else {
39 //b->a transfer (from $21xx)
40 if(bbus == 0x80 && ((abus & 0xfe0000) == 0x7e0000 || (abus & 0x40e000) == 0x0000)) {
41 //illegal WRAM->WRAM transfer (bus conflict)
42 //no read occurs; write does occur
43 dma_add_clocks(8);
44 bus.write(abus, 0x00); //does not write S-CPU MDR
45 } else {
46 dma_add_clocks(4);
47 uint8 data = bus.read(0x2100 | bbus);
48 dma_add_clocks(4);
49 if(dma_addr_valid(abus) == true) {
50 bus.write(abus, data);
51 }
52 }
53 }
54 }
55
56 /*****
57 * address calculation functions
58 *****/
59
dma_bbus(uint8 i,uint8 index)60 uint8 sCPU::dma_bbus(uint8 i, uint8 index) {
61 switch(channel[i].xfermode) { default:
62 case 0: return (channel[i].destaddr); //0
63 case 1: return (channel[i].destaddr + (index & 1)); //0,1
64 case 2: return (channel[i].destaddr); //0,0
65 case 3: return (channel[i].destaddr + ((index >> 1) & 1)); //0,0,1,1
66 case 4: return (channel[i].destaddr + (index & 3)); //0,1,2,3
67 case 5: return (channel[i].destaddr + (index & 1)); //0,1,0,1
68 case 6: return (channel[i].destaddr); //0,0 [2]
69 case 7: return (channel[i].destaddr + ((index >> 1) & 1)); //0,0,1,1 [3]
70 }
71 }
72
dma_addr(uint8 i)73 inline uint32 sCPU::dma_addr(uint8 i) {
74 uint32 r = (channel[i].srcbank << 16) | (channel[i].srcaddr);
75
76 if(channel[i].fixedxfer == false) {
77 if(channel[i].reversexfer == false) {
78 channel[i].srcaddr++;
79 } else {
80 channel[i].srcaddr--;
81 }
82 }
83
84 return r;
85 }
86
hdma_addr(uint8 i)87 inline uint32 sCPU::hdma_addr(uint8 i) {
88 return (channel[i].srcbank << 16) | (channel[i].hdma_addr++);
89 }
90
hdma_iaddr(uint8 i)91 inline uint32 sCPU::hdma_iaddr(uint8 i) {
92 return (channel[i].hdma_ibank << 16) | (channel[i].hdma_iaddr++);
93 }
94
95 /*****
96 * DMA functions
97 *****/
98
dma_enabled_channels()99 uint8 sCPU::dma_enabled_channels() {
100 uint8 r = 0;
101 for(unsigned i = 0; i < 8; i++) {
102 if(channel[i].dma_enabled) r++;
103 }
104 return r;
105 }
106
dma_run()107 void sCPU::dma_run() {
108 dma_add_clocks(8);
109 cycle_edge();
110
111 for(unsigned i = 0; i < 8; i++) {
112 if(channel[i].dma_enabled == false) continue;
113 dma_add_clocks(8);
114 cycle_edge();
115
116 unsigned index = 0;
117 do {
118 dma_transfer(channel[i].direction, dma_bbus(i, index++), dma_addr(i));
119 cycle_edge();
120 } while(channel[i].dma_enabled && --channel[i].xfersize);
121
122 channel[i].dma_enabled = false;
123 }
124
125 status.irq_lock = true;
126 event.enqueue(2, EventIrqLockRelease);
127 }
128
129 /*****
130 * HDMA functions
131 *****/
132
hdma_active(uint8 i)133 inline bool sCPU::hdma_active(uint8 i) {
134 return (channel[i].hdma_enabled && !channel[i].hdma_completed);
135 }
136
hdma_active_after(uint8 i)137 inline bool sCPU::hdma_active_after(uint8 i) {
138 for(unsigned n = i + 1; n < 8; n++) {
139 if(hdma_active(n) == true) return true;
140 }
141 return false;
142 }
143
hdma_enabled_channels()144 inline uint8 sCPU::hdma_enabled_channels() {
145 uint8 r = 0;
146 for(unsigned i = 0; i < 8; i++) {
147 if(channel[i].hdma_enabled) r++;
148 }
149 return r;
150 }
151
hdma_active_channels()152 inline uint8 sCPU::hdma_active_channels() {
153 uint8 r = 0;
154 for(unsigned i = 0; i < 8; i++) {
155 if(hdma_active(i) == true) r++;
156 }
157 return r;
158 }
159
hdma_update(uint8 i)160 void sCPU::hdma_update(uint8 i) {
161 channel[i].hdma_line_counter = dma_read(hdma_addr(i));
162 dma_add_clocks(8);
163
164 channel[i].hdma_completed = (channel[i].hdma_line_counter == 0);
165 channel[i].hdma_do_transfer = !channel[i].hdma_completed;
166
167 if(channel[i].hdma_indirect) {
168 channel[i].hdma_iaddr = dma_read(hdma_addr(i)) << 8;
169 dma_add_clocks(8);
170
171 if(!channel[i].hdma_completed || hdma_active_after(i)) {
172 channel[i].hdma_iaddr >>= 8;
173 channel[i].hdma_iaddr |= dma_read(hdma_addr(i)) << 8;
174 dma_add_clocks(8);
175 }
176 }
177 }
178
hdma_run()179 void sCPU::hdma_run() {
180 dma_add_clocks(8);
181
182 for(unsigned i = 0; i < 8; i++) {
183 if(hdma_active(i) == false) continue;
184 channel[i].dma_enabled = false; //HDMA run during DMA will stop DMA mid-transfer
185
186 if(channel[i].hdma_do_transfer) {
187 static const unsigned transfer_length[8] = { 1, 2, 2, 4, 4, 4, 2, 4 };
188 unsigned length = transfer_length[channel[i].xfermode];
189 for(unsigned index = 0; index < length; index++) {
190 unsigned addr = !channel[i].hdma_indirect ? hdma_addr(i) : hdma_iaddr(i);
191 dma_transfer(channel[i].direction, dma_bbus(i, index), addr);
192 }
193 }
194 }
195
196 for(unsigned i = 0; i < 8; i++) {
197 if(hdma_active(i) == false) continue;
198
199 channel[i].hdma_line_counter--;
200 channel[i].hdma_do_transfer = bool(channel[i].hdma_line_counter & 0x80);
201 if((channel[i].hdma_line_counter & 0x7f) == 0) {
202 hdma_update(i);
203 } else {
204 dma_add_clocks(8);
205 }
206 }
207
208 status.irq_lock = true;
209 event.enqueue(2, EventIrqLockRelease);
210 }
211
hdma_init_reset()212 void sCPU::hdma_init_reset() {
213 for(unsigned i = 0; i < 8; i++) {
214 channel[i].hdma_completed = false;
215 channel[i].hdma_do_transfer = false;
216 }
217 }
218
hdma_init()219 void sCPU::hdma_init() {
220 dma_add_clocks(8);
221
222 for(unsigned i = 0; i < 8; i++) {
223 if(!channel[i].hdma_enabled) continue;
224 channel[i].dma_enabled = false; //HDMA init during DMA will stop DMA mid-transfer
225
226 channel[i].hdma_addr = channel[i].srcaddr;
227 hdma_update(i);
228 }
229
230 status.irq_lock = true;
231 event.enqueue(2, EventIrqLockRelease);
232 }
233
234 /*****
235 * power / reset functions
236 *****/
237
dma_power()238 void sCPU::dma_power() {
239 for(unsigned i = 0; i < 8; i++) {
240 channel[i].dmap = 0xff;
241 channel[i].direction = 1;
242 channel[i].hdma_indirect = true;
243 channel[i].reversexfer = true;
244 channel[i].fixedxfer = true;
245 channel[i].xfermode = 7;
246
247 channel[i].destaddr = 0xff;
248
249 channel[i].srcaddr = 0xffff;
250 channel[i].srcbank = 0xff;
251
252 channel[i].xfersize = 0xffff;
253 //channel[i].hdma_iaddr = 0xffff; //union with xfersize
254 channel[i].hdma_ibank = 0xff;
255
256 channel[i].hdma_addr = 0xffff;
257 channel[i].hdma_line_counter = 0xff;
258 channel[i].unknown = 0xff;
259 }
260 }
261
dma_reset()262 void sCPU::dma_reset() {
263 for(unsigned i = 0; i < 8; i++) {
264 channel[i].dma_enabled = false;
265 channel[i].hdma_enabled = false;
266
267 channel[i].hdma_completed = false;
268 channel[i].hdma_do_transfer = false;
269 }
270 }
271
272 #endif
273