1 // license:BSD-3-Clause
2 // copyright-holders:Patrick Mackinlay
3
4 /*
5 * An implementation of the Fairchild/Intergraph Cache and Memory Management
6 * Unit (CAMMU) designed for use with the CLIPPER CPU family.
7 *
8 * The C100 and C300 designs used a pair of identical CAMMU devices, each
9 * containing a cache, TLB and dynamic translation unit. One device was
10 * configured and used for instruction memory, the other for data. It is
11 * possible to write to multiple CAMMU devices sharing a common system bus by
12 * using "global" register addresses.
13 *
14 * C400 designs initially implemented the memory management and cache functions
15 * using discrete logic, later using a more highly integrated memory management
16 * implementation, but still using discrete cache memory. In these systems, the
17 * mmu is consolidated into a single logical unit handling both instruction and
18 * data memory, with distinctly different program-visible architectures on the
19 * C4I and C4E/T devices. Almost no documentation for these has been located.
20 *
21 * Primary reference: http://bitsavers.org/pdf/fairchild/clipper/CLIPPER%20C300%2032-Bit%20Compute%20Engine.pdf
22 * Another reference: http://www.eecs.berkeley.edu/Pubs/TechRpts/1986/CSD-86-289.pdf
23 *
24 * TODO
25 * - c4 variants
26 * - fault register values
27 * - cache
28 * - bus errors
29 */
30
31 #include "emu.h"
32 #include "cammu.h"
33
34 #include <algorithm>
35
36 #define LOG_GENERAL (1U << 0)
37 #define LOG_ACCESS (1U << 1)
38 #define LOG_DTU (1U << 2)
39 #define LOG_TLB (1U << 3)
40
41 //#define VERBOSE (LOG_GENERAL | LOG_ACCESS | LOG_DTU)
42 #include "logmacro.h"
43
44 // each variant of the cammu has different registers and a different addressing map
map(address_map & map)45 void cammu_c4t_device::map(address_map &map)
46 {
47 map(0x008, 0x00b).rw(FUNC(cammu_c4t_device::ram_line_r), FUNC(cammu_c4t_device::ram_line_w));
48 map(0x010, 0x013).rw(FUNC(cammu_c4t_device::s_pdo_r), FUNC(cammu_c4t_device::s_pdo_w));
49 map(0x018, 0x01b).rw(FUNC(cammu_c4t_device::u_pdo_r), FUNC(cammu_c4t_device::u_pdo_w));
50 map(0x020, 0x023).rw(FUNC(cammu_c4t_device::htlb_offset_r), FUNC(cammu_c4t_device::htlb_offset_w));
51 map(0x028, 0x02b).rw(FUNC(cammu_c4t_device::i_fault_r), FUNC(cammu_c4t_device::i_fault_w));
52 map(0x030, 0x033).rw(FUNC(cammu_c4t_device::fault_address_1_r), FUNC(cammu_c4t_device::fault_address_1_w));
53 map(0x038, 0x03b).rw(FUNC(cammu_c4t_device::fault_address_2_r), FUNC(cammu_c4t_device::fault_address_2_w));
54 map(0x040, 0x043).rw(FUNC(cammu_c4t_device::fault_data_1_lo_r), FUNC(cammu_c4t_device::fault_data_1_lo_w));
55 map(0x048, 0x04b).rw(FUNC(cammu_c4t_device::fault_data_1_hi_r), FUNC(cammu_c4t_device::fault_data_1_hi_w));
56 map(0x050, 0x053).rw(FUNC(cammu_c4t_device::fault_data_2_lo_r), FUNC(cammu_c4t_device::fault_data_2_lo_w));
57 map(0x058, 0x05b).rw(FUNC(cammu_c4t_device::fault_data_2_hi_r), FUNC(cammu_c4t_device::fault_data_2_hi_w));
58 map(0x060, 0x063).rw(FUNC(cammu_c4t_device::c4_bus_poll_r), FUNC(cammu_c4t_device::c4_bus_poll_w));
59 map(0x068, 0x06b).rw(FUNC(cammu_c4t_device::control_r), FUNC(cammu_c4t_device::control_w));
60 map(0x070, 0x073).rw(FUNC(cammu_c4t_device::bio_control_r), FUNC(cammu_c4t_device::bio_control_w));
61 map(0x078, 0x07b).rw(FUNC(cammu_c4t_device::bio_address_tag_r), FUNC(cammu_c4t_device::bio_address_tag_w));
62
63 map(0x100, 0x103).rw(FUNC(cammu_c4t_device::cache_data_lo_r), FUNC(cammu_c4t_device::cache_data_lo_w));
64 map(0x104, 0x107).rw(FUNC(cammu_c4t_device::cache_data_hi_r), FUNC(cammu_c4t_device::cache_data_hi_w));
65 map(0x108, 0x10b).rw(FUNC(cammu_c4t_device::cache_cpu_tag_r), FUNC(cammu_c4t_device::cache_cpu_tag_w));
66 map(0x10c, 0x10f).rw(FUNC(cammu_c4t_device::cache_system_tag_valid_r), FUNC(cammu_c4t_device::cache_system_tag_valid_w));
67 map(0x110, 0x113).rw(FUNC(cammu_c4t_device::cache_system_tag_r), FUNC(cammu_c4t_device::cache_system_tag_w));
68 map(0x118, 0x11b).rw(FUNC(cammu_c4t_device::tlb_va_line_r), FUNC(cammu_c4t_device::tlb_va_line_w));
69 map(0x11c, 0x11f).rw(FUNC(cammu_c4t_device::tlb_ra_line_r), FUNC(cammu_c4t_device::tlb_ra_line_w));
70 }
71
map(address_map & map)72 void cammu_c4i_device::map(address_map &map)
73 {
74 map(0x000, 0x003).rw(FUNC(cammu_c4i_device::reset_r), FUNC(cammu_c4i_device::reset_w));
75 map(0x010, 0x013).rw(FUNC(cammu_c4i_device::s_pdo_r), FUNC(cammu_c4i_device::s_pdo_w));
76 map(0x018, 0x01b).rw(FUNC(cammu_c4i_device::u_pdo_r), FUNC(cammu_c4i_device::u_pdo_w));
77 map(0x020, 0x023).rw(FUNC(cammu_c4i_device::clr_s_data_tlb_r), FUNC(cammu_c4i_device::clr_s_data_tlb_w));
78 map(0x028, 0x02b).rw(FUNC(cammu_c4i_device::clr_u_data_tlb_r), FUNC(cammu_c4i_device::clr_u_data_tlb_w));
79 map(0x030, 0x033).rw(FUNC(cammu_c4i_device::clr_s_insn_tlb_r), FUNC(cammu_c4i_device::clr_s_insn_tlb_w));
80 map(0x038, 0x03b).rw(FUNC(cammu_c4i_device::clr_u_insn_tlb_r), FUNC(cammu_c4i_device::clr_u_insn_tlb_w));
81
82 map(0x068, 0x06b).rw(FUNC(cammu_c4i_device::control_r), FUNC(cammu_c4i_device::control_w));
83
84 map(0x080, 0x083).rw(FUNC(cammu_c4i_device::test_data_r), FUNC(cammu_c4i_device::test_data_w));
85 map(0x088, 0x08b).rw(FUNC(cammu_c4i_device::i_fault_r), FUNC(cammu_c4i_device::i_fault_w));
86 map(0x090, 0x093).rw(FUNC(cammu_c4i_device::fault_address_1_r), FUNC(cammu_c4i_device::fault_address_1_w));
87 map(0x098, 0x09b).rw(FUNC(cammu_c4i_device::fault_address_2_r), FUNC(cammu_c4i_device::fault_address_2_w));
88 map(0x0a0, 0x0a3).rw(FUNC(cammu_c4i_device::fault_data_1_lo_r), FUNC(cammu_c4i_device::fault_data_1_lo_w));
89 map(0x0a8, 0x0ab).rw(FUNC(cammu_c4i_device::fault_data_1_hi_r), FUNC(cammu_c4i_device::fault_data_1_hi_w));
90 map(0x0b0, 0x0b3).rw(FUNC(cammu_c4i_device::fault_data_2_lo_r), FUNC(cammu_c4i_device::fault_data_2_lo_w));
91 map(0x0b8, 0x0bb).rw(FUNC(cammu_c4i_device::fault_data_2_hi_r), FUNC(cammu_c4i_device::fault_data_2_hi_w));
92 map(0x0c0, 0x0c3).rw(FUNC(cammu_c4i_device::test_address_r), FUNC(cammu_c4i_device::test_address_w));
93 }
94
95 DEFINE_DEVICE_TYPE(CAMMU_C4T, cammu_c4t_device, "c4t", "C4E/C4T CAMMU")
96 DEFINE_DEVICE_TYPE(CAMMU_C4I, cammu_c4i_device, "c4i", "C4I CAMMU")
97 DEFINE_DEVICE_TYPE(CAMMU_C3, cammu_c3_device, "c3", "C1/C3 CAMMU")
98
cammu_c4t_device(const machine_config & mconfig,const char * tag,device_t * owner,uint32_t clock)99 cammu_c4t_device::cammu_c4t_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock)
100 : cammu_c4_device(mconfig, CAMMU_C4T, tag, owner, clock)
101 , m_ram_line(0)
102 , m_htlb_offset(0)
103 , m_c4_bus_poll(0)
104 , m_bio_control(0)
105 , m_bio_address_tag(0)
106 , m_cache_data_lo(0)
107 , m_cache_data_hi(0)
108 , m_cache_cpu_tag(0)
109 , m_cache_system_tag_valid(0)
110 , m_cache_system_tag(0)
111 , m_tlb_va_line(0)
112 , m_tlb_ra_line(0)
113 {
114 }
115
cammu_c4i_device(const machine_config & mconfig,const char * tag,device_t * owner,uint32_t clock)116 cammu_c4i_device::cammu_c4i_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock)
117 : cammu_c4_device(mconfig, CAMMU_C4I, tag, owner, clock)
118 , m_reset(0)
119 , m_clr_s_data_tlb(0)
120 , m_clr_u_data_tlb(0)
121 , m_clr_s_insn_tlb(0)
122 , m_clr_u_insn_tlb(0)
123 , m_test_data(0)
124 , m_test_address(0)
125 {
126 }
127
cammu_c4_device(const machine_config & mconfig,device_type type,const char * tag,device_t * owner,uint32_t clock)128 cammu_c4_device::cammu_c4_device(const machine_config &mconfig, device_type type, const char *tag, device_t *owner, uint32_t clock)
129 : cammu_device(mconfig, type, tag, owner, clock)
130 , m_s_pdo(0)
131 , m_u_pdo(0)
132 , m_control(0)
133 , m_i_fault(0)
134 , m_fault_address_1(0)
135 , m_fault_address_2(0)
136 , m_fault_data_1_lo(0)
137 , m_fault_data_1_hi(0)
138 , m_fault_data_2_lo(0)
139 , m_fault_data_2_hi(0)
140 {
141 }
142
cammu_c3_device(const machine_config & mconfig,const char * tag,device_t * owner,uint32_t clock)143 cammu_c3_device::cammu_c3_device(const machine_config &mconfig, const char *tag, device_t *owner, uint32_t clock)
144 : cammu_device(mconfig, CAMMU_C3, tag, owner, clock)
145 , m_linked{ this }
146 , m_s_pdo(0)
147 , m_u_pdo(0)
148 , m_fault(0)
149 , m_control(CID_C3)
150 {
151 }
152
cammu_device(const machine_config & mconfig,device_type type,const char * tag,device_t * owner,uint32_t clock)153 cammu_device::cammu_device(const machine_config &mconfig, device_type type, const char *tag, device_t *owner, uint32_t clock)
154 : device_t(mconfig, type, tag, owner, clock)
155 , m_exception_func(*this)
156 {
157 }
158
device_start()159 void cammu_device::device_start()
160 {
161 m_exception_func.resolve();
162 }
163
device_reset()164 void cammu_device::device_reset()
165 {
166 }
167
device_start()168 void cammu_c4_device::device_start()
169 {
170 cammu_device::device_start();
171
172 save_item(NAME(m_s_pdo));
173 save_item(NAME(m_u_pdo));
174 save_item(NAME(m_control));
175
176 save_item(NAME(m_i_fault));
177 save_item(NAME(m_fault_address_1));
178 save_item(NAME(m_fault_address_2));
179 save_item(NAME(m_fault_data_1_lo));
180 save_item(NAME(m_fault_data_1_hi));
181 save_item(NAME(m_fault_data_2_lo));
182 save_item(NAME(m_fault_data_2_hi));
183 }
184
device_start()185 void cammu_c4i_device::device_start()
186 {
187 cammu_c4_device::device_start();
188
189 save_item(NAME(m_reset));
190 save_item(NAME(m_clr_s_data_tlb));
191 save_item(NAME(m_clr_u_data_tlb));
192 save_item(NAME(m_clr_s_insn_tlb));
193 save_item(NAME(m_clr_u_insn_tlb));
194 save_item(NAME(m_test_data));
195 save_item(NAME(m_test_address));
196 }
197
device_start()198 void cammu_c4t_device::device_start()
199 {
200 cammu_c4_device::device_start();
201
202 save_item(NAME(m_ram_line));
203 save_item(NAME(m_htlb_offset));
204 save_item(NAME(m_c4_bus_poll));
205 save_item(NAME(m_bio_control));
206 save_item(NAME(m_bio_address_tag));
207
208 save_item(NAME(m_cache_data_lo));
209 save_item(NAME(m_cache_data_hi));
210 save_item(NAME(m_cache_cpu_tag));
211 save_item(NAME(m_cache_system_tag_valid));
212 save_item(NAME(m_cache_system_tag));
213 save_item(NAME(m_tlb_va_line));
214 save_item(NAME(m_tlb_ra_line));
215 }
216
device_start()217 void cammu_c3_device::device_start()
218 {
219 cammu_device::device_start();
220
221 save_item(NAME(m_s_pdo));
222 save_item(NAME(m_u_pdo));
223 save_item(NAME(m_fault));
224 save_item(NAME(m_control));
225
226 for (tlb_set_t &tlb_set : m_tlb)
227 {
228 tlb_set.u = false;
229
230 tlb_set.w.ra = tlb_set.w.va = 0;
231 m_memory[ST0].space->cache(tlb_set.w.cache);
232
233 tlb_set.x.ra = tlb_set.x.va = 0;
234 m_memory[ST0].space->cache(tlb_set.x.cache);
235 }
236 }
237
device_reset()238 void cammu_c3_device::device_reset()
239 {
240 cammu_device::device_reset();
241
242 m_control = (m_control & CNTL_CID) | (CNTL_ATE | UST_3 | CNTL_EWIR | CNTL_EWIW | CNTL_EWCW | CNTL_EP);
243 }
244
set_spaces(address_space & main_space,address_space & io_space,address_space & boot_space)245 void cammu_device::set_spaces(address_space &main_space, address_space &io_space, address_space &boot_space)
246 {
247 m_memory[ST0].space = &main_space;
248 m_memory[ST1].space = &main_space;
249 m_memory[ST2].space = &main_space;
250 m_memory[ST3].space = &main_space;
251
252 m_memory[ST4].space = &io_space;
253 m_memory[ST5].space = &boot_space;
254 m_memory[ST6].space = &main_space;
255
256 // FIXME: this tag is probably not used, but if it is, need to figure
257 // out how to implement it properly.
258 m_memory[ST7].space = &main_space;
259
260 for (memory_t &memory : m_memory)
261 memory.space->cache(memory.cache);
262 }
263
memory_translate(const u32 ssw,const int spacenum,const int intention,offs_t & address)264 bool cammu_device::memory_translate(const u32 ssw, const int spacenum, const int intention, offs_t &address)
265 {
266 // translate the address
267 translated_t translated = translate_address(ssw, address, BYTE,
268 (intention & TRANSLATE_TYPE_MASK) == TRANSLATE_READ ? READ :
269 (intention & TRANSLATE_TYPE_MASK) == TRANSLATE_WRITE ? WRITE :
270 EXECUTE);
271
272 // check that the requested space number matches the mapped space
273 if (translated.cache && translated.cache->space().spacenum() == spacenum)
274 {
275 address = translated.address;
276
277 return true;
278 }
279
280 return false;
281 }
282
translate_address(const u32 ssw,const u32 virtual_address,const access_size size,const access_type mode)283 cammu_device::translated_t cammu_device::translate_address(const u32 ssw, const u32 virtual_address, const access_size size, const access_type mode)
284 {
285 // get effective user/supervisor mode
286 const bool user = (mode == EXECUTE) ? (ssw & SSW_U) : (ssw & (SSW_U | SSW_UU));
287
288 // check for alignment faults
289 if (!machine().side_effects_disabled() && get_alignment())
290 {
291 if ((mode == EXECUTE && (virtual_address & 0x1)) || (mode != EXECUTE && virtual_address & (size - 1)))
292 {
293 set_fault(virtual_address, mode == EXECUTE ? EXCEPTION_I_ALIGNMENT_FAULT : EXCEPTION_D_ALIGNMENT_FAULT);
294
295 return { nullptr, 0 };
296 }
297 }
298
299 // in supervisor mode, the first 8 pages are always mapped via the hard-wired tlb
300 if (!user && (virtual_address & ~0x7fff) == 0)
301 {
302 switch (virtual_address & 0x7000)
303 {
304 // pages 0-3: main space pages 0-3
305 case 0x0000: return { &m_memory[ST1].cache, virtual_address & 0x3fff };
306 case 0x1000: return { &m_memory[ST2].cache, virtual_address & 0x3fff };
307 case 0x2000: return { &m_memory[ST3].cache, virtual_address & 0x3fff };
308 case 0x3000: return { &m_memory[ST3].cache, virtual_address & 0x3fff };
309
310 // pages 4-5: i/o space pages 0-1
311 case 0x4000: return { &m_memory[ST4].cache, virtual_address & 0x1fff };
312 case 0x5000: return { &m_memory[ST4].cache, virtual_address & 0x1fff };
313
314 // pages 6-7: boot space pages 0-1
315 case 0x6000: return { &m_memory[ST5].cache, virtual_address & 0x1fff };
316 case 0x7000: return { &m_memory[ST5].cache, virtual_address & 0x1fff };
317 }
318 }
319
320 // if not in mapped mode, use unmapped system tag
321 if ((ssw & SSW_M) == 0)
322 return { &m_memory[get_ust_space()].cache, virtual_address };
323
324 // get the page table entry
325 pte_t pte = get_pte(virtual_address, user);
326
327 // check for page faults
328 if (pte.entry & PTE_F)
329 {
330 if (!machine().side_effects_disabled())
331 {
332 LOG("%s page fault address 0x%08x ssw 0x%08x pte 0x%08x (%s)\n",
333 mode == EXECUTE ? "instruction" : "data",
334 virtual_address, ssw, pte.entry, machine().describe_context());
335
336 set_fault(virtual_address, mode == EXECUTE ? EXCEPTION_I_PAGE_FAULT : EXCEPTION_D_PAGE_FAULT);
337 }
338
339 return { nullptr, 0 };
340 }
341
342 // check for protection level faults
343 if (!machine().side_effects_disabled())
344 {
345 if ((mode == EXECUTE) && !get_access(mode, pte.entry, ssw))
346 {
347 LOGMASKED(LOG_ACCESS, "execute protection fault address 0x%08x ssw 0x%08x pte 0x%08x (%s)\n",
348 virtual_address, ssw, pte.entry, machine().describe_context());
349
350 set_fault(virtual_address, EXCEPTION_I_EXECUTE_PROTECT_FAULT);
351
352 return { nullptr, 0 };
353 }
354
355 if ((mode & READ) && !get_access(READ, pte.entry, ssw))
356 {
357 LOGMASKED(LOG_ACCESS, "read protection fault address 0x%08x ssw 0x%08x pte 0x%08x (%s)\n",
358 virtual_address, ssw, pte.entry, machine().describe_context());
359
360 set_fault(virtual_address, EXCEPTION_D_READ_PROTECT_FAULT);
361
362 return { nullptr, 0 };
363 }
364
365 if ((mode & WRITE) && !get_access(WRITE, pte.entry, ssw))
366 {
367 LOGMASKED(LOG_ACCESS, "write protection fault address 0x%08x ssw 0x%08x pte 0x%08x (%s)\n",
368 virtual_address, ssw, pte.entry, machine().describe_context());
369
370 set_fault(virtual_address, EXCEPTION_D_WRITE_PROTECT_FAULT);
371
372 return { nullptr, 0 };
373 }
374 }
375
376 // set pte referenced and dirty flags
377 if ((mode & WRITE) && !(pte.entry & PTE_D))
378 m_memory[ST0].cache.write_dword(pte.address, pte.entry | PTE_D | PTE_R);
379 else if (!(pte.entry & PTE_R))
380 m_memory[ST0].cache.write_dword(pte.address, pte.entry | PTE_R);
381
382 // translate the address
383 LOGMASKED(LOG_DTU, "%s address translated 0x%08x\n", mode == EXECUTE ? "instruction" : "data",
384 (pte.entry & ~CAMMU_PAGE_MASK) | (virtual_address & CAMMU_PAGE_MASK));
385
386 // return the system tag and translated address
387 return { &m_memory[(pte.entry & PTE_ST) >> ST_SHIFT].cache, (pte.entry & ~CAMMU_PAGE_MASK) | (virtual_address & CAMMU_PAGE_MASK) };
388 }
389
tlb_lookup(const bool user,const u32 virtual_address,const access_type mode)390 cammu_c3_device::tlb_line_t &cammu_c3_device::tlb_lookup(const bool user, const u32 virtual_address, const access_type mode)
391 {
392 const u8 set = (virtual_address >> 12) & 0x3f;
393 tlb_set_t &tlb_set = m_tlb[set];
394
395 // check w compartment
396 if ((tlb_set.w.va & TLB_VA_VA) == (virtual_address & TLB_VA_VA) && (((user && (tlb_set.w.va & TLB_VA_UV)) || (!user && (tlb_set.w.va & TLB_VA_SV)))))
397 {
398 LOGMASKED(LOG_TLB, "tlb_lookup 0x%08x set %2d line W hit 0x%08x\n", virtual_address, set, tlb_set.w.ra);
399
400 // mark x line least recently used
401 tlb_set.u = true;
402
403 return tlb_set.w;
404 }
405
406 // check x compartment
407 if ((tlb_set.x.va & TLB_VA_VA) == (virtual_address & TLB_VA_VA) && (((user && (tlb_set.x.va & TLB_VA_UV))) || (!user && (tlb_set.x.va & TLB_VA_SV))))
408 {
409 LOGMASKED(LOG_TLB, "tlb_lookup 0x%08x set %2d line X hit 0x%08x\n", virtual_address, set, tlb_set.x.ra);
410
411 // mark w line least recently used
412 tlb_set.u = false;
413
414 return tlb_set.x;
415 }
416
417 // return the least recently used line
418 if (tlb_set.u)
419 {
420 LOGMASKED(LOG_TLB, "tlb_lookup 0x%08x set %2d line X miss\n", virtual_address, set);
421
422 tlb_set.u = false;
423 tlb_set.x.ra &= ~TLB_RA_R;
424
425 return tlb_set.x;
426 }
427 else
428 {
429 LOGMASKED(LOG_TLB, "tlb_lookup 0x%08x set %2d line W miss\n", virtual_address, set);
430
431 tlb_set.u = true;
432 tlb_set.w.ra &= ~TLB_RA_R;
433
434 return tlb_set.w;
435 }
436 }
437
translate_address(const u32 ssw,const u32 virtual_address,const access_size size,const access_type mode)438 cammu_device::translated_t cammu_c3_device::translate_address(const u32 ssw, const u32 virtual_address, const access_size size, const access_type mode)
439 {
440 // get effective user/supervisor mode
441 const bool user = (mode == EXECUTE) ? (ssw & SSW_U) : (ssw & (SSW_U | SSW_UU));
442
443 // check for alignment faults
444 if (!machine().side_effects_disabled() && get_alignment())
445 {
446 if ((mode == EXECUTE && (virtual_address & 0x1)) || (mode != EXECUTE && virtual_address & (size - 1)))
447 {
448 set_fault(virtual_address, mode == EXECUTE ? EXCEPTION_I_ALIGNMENT_FAULT : EXCEPTION_D_ALIGNMENT_FAULT);
449
450 return { nullptr, 0 };
451 }
452 }
453
454 // in supervisor mode, the first 8 pages are always mapped via the hard-wired tlb
455 if (!user && (virtual_address & ~0x7fff) == 0)
456 {
457 switch (virtual_address & 0x7000)
458 {
459 // pages 0-3: main space pages 0-3
460 case 0x0000: return { &m_memory[ST1].cache, virtual_address & 0x3fff };
461 case 0x1000: return { &m_memory[ST2].cache, virtual_address & 0x3fff };
462 case 0x2000: return { &m_memory[ST3].cache, virtual_address & 0x3fff };
463 case 0x3000: return { &m_memory[ST3].cache, virtual_address & 0x3fff };
464
465 // pages 4-5: i/o space pages 0-1
466 case 0x4000: return { &m_memory[ST4].cache, virtual_address & 0x1fff };
467 case 0x5000: return { &m_memory[ST4].cache, virtual_address & 0x1fff };
468
469 // pages 6-7: boot space pages 0-1
470 case 0x6000: return { &m_memory[ST5].cache, virtual_address & 0x1fff };
471 case 0x7000: return { &m_memory[ST5].cache, virtual_address & 0x1fff };
472 }
473 }
474
475 // if not in mapped mode, use unmapped system tag
476 if ((ssw & SSW_M) == 0)
477 return { &m_memory[get_ust_space()].cache, virtual_address };
478
479 // check translation lookaside buffer
480 tlb_line_t &tlbl = tlb_lookup(user, virtual_address, mode);
481
482 pte_t pte = { PTE_F, 0 };
483
484 // handle translation lookaside buffer miss
485 if (!(tlbl.ra & TLB_RA_R))
486 {
487 // get the page table entry
488 pte = get_pte(virtual_address, user);
489
490 // check for page faults
491 if (pte.entry & PTE_F)
492 {
493 if (!machine().side_effects_disabled())
494 {
495 LOG("%s page fault address 0x%08x ssw 0x%08x pte 0x%08x (%s)\n",
496 mode == EXECUTE ? "instruction" : "data",
497 virtual_address, ssw, pte.entry, machine().describe_context());
498
499 set_fault(virtual_address, mode == EXECUTE ? EXCEPTION_I_PAGE_FAULT : EXCEPTION_D_PAGE_FAULT);
500 }
501
502 return { nullptr, 0 };
503 }
504
505 // update tlb line from page table entry
506 // FIXME: not sure if user/supervisor valid follow actual or effective mode?
507 tlbl.va = (virtual_address & TLB_VA_VA) | (user ? TLB_VA_UV : TLB_VA_SV);
508 tlbl.ra = pte.entry;
509 }
510
511 // check protection level
512 if (!machine().side_effects_disabled())
513 {
514 if ((mode == EXECUTE) && !get_access(EXECUTE, tlbl.ra, ssw))
515 {
516 LOGMASKED(LOG_ACCESS, "execute protection fault address 0x%08x ssw 0x%08x (%s)\n",
517 virtual_address, ssw, machine().describe_context());
518
519 set_fault(virtual_address, EXCEPTION_I_EXECUTE_PROTECT_FAULT);
520
521 return { nullptr, 0 };
522 }
523 if ((mode & READ) && !get_access(READ, tlbl.ra, ssw))
524 {
525 LOGMASKED(LOG_ACCESS, "read protection fault address 0x%08x ssw 0x%08x (%s)\n",
526 virtual_address, ssw, machine().describe_context());
527
528 set_fault(virtual_address, EXCEPTION_D_READ_PROTECT_FAULT);
529
530 return { nullptr, 0 };
531 }
532 if ((mode & WRITE) && !get_access(WRITE, tlbl.ra, ssw))
533 {
534 LOGMASKED(LOG_ACCESS, "write protection fault address 0x%08x ssw 0x%08x (%s)\n",
535 virtual_address, ssw, machine().describe_context());
536
537 set_fault(virtual_address, EXCEPTION_D_WRITE_PROTECT_FAULT);
538
539 return { nullptr, 0 };
540 }
541 }
542
543 // update dirty flag
544 if ((mode & WRITE) && !(tlbl.ra & TLB_RA_D))
545 {
546 // fetch the page table entry if needed
547 if (pte.entry & PTE_F)
548 pte = get_pte(virtual_address, user);
549
550 // set page table entry dirty flag
551 if (!(pte.entry & PTE_D))
552 {
553 pte.entry |= PTE_D | PTE_R;
554 m_memory[ST0].cache.write_dword(pte.address, pte.entry);
555 }
556
557 tlbl.ra |= TLB_RA_D | TLB_RA_R;
558 }
559
560 // update referenced flag
561 if (!(tlbl.ra & TLB_RA_R))
562 {
563 // fetch the page table entry if needed
564 if (pte.entry & PTE_F)
565 pte = get_pte(virtual_address, user);
566
567 // set page table entry referenced flag
568 if (!(pte.entry & PTE_R))
569 {
570 pte.entry |= PTE_R;
571 m_memory[ST0].cache.write_dword(pte.address, pte.entry);
572 }
573
574 tlbl.ra |= TLB_RA_R;
575 }
576
577 // return the system tag and translated address
578 LOGMASKED(LOG_DTU, "%s address translated 0x%08x\n", mode == EXECUTE ? "instruction" : "data",
579 (tlbl.ra & TLB_RA_RA) | (virtual_address & CAMMU_PAGE_MASK));
580
581 if (tlbl.ra & 0x800)
582 return { &m_memory[(tlbl.ra & TLB_RA_ST) >> ST_SHIFT].cache, (tlbl.ra & TLB_RA_RA) | (virtual_address & CAMMU_PAGE_MASK) };
583 else
584 return { &tlbl.cache, (tlbl.ra & TLB_RA_RA) | (virtual_address & CAMMU_PAGE_MASK) };
585 }
586
587 // return the page table entry for a given virtual address
get_pte(const u32 va,const bool user)588 cammu_device::pte_t cammu_device::get_pte(const u32 va, const bool user)
589 {
590 // get page table directory origin from user or supervisor pdo register
591 const u32 pdo = get_pdo(user);
592
593 // get page table directory index from top 12 bits of virtual address
594 const u32 ptdi = (va & VA_PTDI) >> 20;
595
596 // fetch page table directory entry
597 const u32 ptde = m_memory[ST0].cache.read_dword(pdo | ptdi);
598
599 LOGMASKED(LOG_DTU, "get_pte pdo 0x%08x ptdi 0x%08x ptde 0x%08x\n", pdo, ptdi, ptde);
600
601 // check for page table directory entry fault
602 if (ptde & PTDE_F)
603 return { PTE_F, pdo | ptdi };
604
605 // get the page table origin from the page table directory entry
606 const u32 pto = ptde & PTDE_PTO;
607
608 // get the page table index from the middle 12 bits of the virtual address
609 const u32 pti = (va & VA_PTI) >> 10;
610
611 // fetch page table entry
612 pte_t pte = { m_memory[ST0].cache.read_dword(pto | pti), pto | pti };
613
614 LOGMASKED(LOG_DTU, "get_pte pto 0x%08x pti 0x%08x pte 0x%08x\n", pto, pti, pte.entry);
615
616 // check for page table entry fault
617 if (!(pte.entry & PTE_F))
618 LOGMASKED(LOG_DTU, "get_pte address 0x%08x pte 0x%08x (%s)\n", va, pte.entry, machine().describe_context());
619
620 return pte;
621 }
622
get_access(const access_type mode,const u32 pte,const u32 ssw) const623 bool cammu_c4_device::get_access(const access_type mode, const u32 pte, const u32 ssw) const
624 {
625 switch (mode)
626 {
627 case READ: return pte & 0x20;
628 case WRITE: return pte & 0x10;
629 case EXECUTE: return pte & 0x08;
630
631 default: return false;
632 }
633 }
634
get_access(const access_type mode,const u32 pte,const u32 ssw) const635 bool cammu_c3_device::get_access(const access_type mode, const u32 pte, const u32 ssw) const
636 {
637 const u8 pl = (pte & PTE_PL) >> 3;
638
639 // special case for user data mode
640 if ((mode != EXECUTE) && !(ssw & SSW_U) && (ssw & SSW_UU))
641 return protection_matrix[(ssw & SSW_KU) ? 2 : 3][pl] & mode;
642 else
643 return protection_matrix[((ssw ^ SSW_K) & (SSW_U | SSW_K)) >> 29][pl] & mode;
644 }
645
646 // C100/C300 CAMMU protection level matrix
647 const u8 cammu_c3_device::protection_matrix[4][16] =
648 {
649 { RW, RW, RW, RW, RW, RW, RW, RWE, RE, R, R, R, N, N, N, N },
650 { N, RW, RW, RW, RW, RW, R, RWE, N, RE, R, R, RE, N, N, N },
651 { N, N, RW, RW, RW, R, R, RWE, N, N, RE, RE, N, RE, N, N },
652 { N, N, N, RW, R, R, R, RWE, N, N, N, RE, RE, N, RE, N }
653 };
654
reset_w(const u32 data)655 void cammu_c3_device::reset_w(const u32 data)
656 {
657 // translation lookaside buffer reset operations
658 if (data & (RESET_RSV | RESET_RUV | RESET_RD | RESET_RR))
659 {
660 LOGMASKED(LOG_TLB, "reset_w%s%s%s%s (%s)\n",
661 (data & RESET_RSV) ? " RSV" : "",
662 (data & RESET_RUV) ? " RUV" : "",
663 (data & RESET_RD) ? " RD" : "",
664 (data & RESET_RR) ? " RR" : "",
665 machine().describe_context());
666
667 const u32 va_mask = ((data & RESET_RSV) ? TLB_VA_SV : 0) | ((data & RESET_RUV) ? TLB_VA_UV : 0);
668 const u32 ra_mask = ((data & RESET_RD) ? TLB_RA_D : 0) | ((data & RESET_RR) ? TLB_RA_R : 0);
669
670 for (tlb_set_t &tlb_set : m_tlb)
671 {
672 tlb_set.w.va &= ~va_mask;
673 tlb_set.w.ra &= ~ra_mask;
674 tlb_set.x.va &= ~va_mask;
675 tlb_set.x.ra &= ~ra_mask;
676 }
677 }
678 }
679
tlb_r(const u8 address) const680 u32 cammu_c3_device::tlb_r(const u8 address) const
681 {
682 const u8 set = address >> 2;
683 u32 result = 0;
684
685 switch (address & 0x3)
686 {
687 case 0x0: result = m_tlb[set].w.ra | (m_tlb[set].u ? TLB_RA_U : 0); break;
688 case 0x1: result = m_tlb[set].w.va; break;
689 case 0x2: result = m_tlb[set].x.ra | (m_tlb[set].u ? TLB_RA_U : 0); break;
690 case 0x3: result = m_tlb[set].x.va; break;
691 }
692
693 LOGMASKED(LOG_TLB, "tlb_r set %2d line %c %s 0x%08x (%s)\n",
694 set, (address & 0x2) ? 'X' : 'W', (address & 0x1) ? "va" : "ra",
695 result, machine().describe_context());
696
697 return result;
698 }
699
tlb_w(const u8 address,const u32 data)700 void cammu_c3_device::tlb_w(const u8 address, const u32 data)
701 {
702 const u32 mem_mask = ~TLB_RA_U;
703 const u8 set = address >> 2;
704
705 LOGMASKED(LOG_TLB, "tlb_w set %2d line %c %s 0x%08x (%s)\n",
706 set, (address & 0x2) ? 'X' : 'W', (address & 0x1) ? "va" : "ra",
707 data, machine().describe_context());
708
709 switch (address & 0x3)
710 {
711 case 0x0: COMBINE_DATA(&m_tlb[set].w.ra); break;
712 case 0x1: COMBINE_DATA(&m_tlb[set].w.va); break;
713 case 0x2: COMBINE_DATA(&m_tlb[set].x.ra); break;
714 case 0x3: COMBINE_DATA(&m_tlb[set].x.va); break;
715 }
716 }
717
cammu_r(const u32 address)718 u32 cammu_c3_device::cammu_r(const u32 address)
719 {
720 switch (address & CAMMU_SELECT)
721 {
722 case CAMMU_D_TLB:
723 return tlb_r(address);
724
725 case CAMMU_D_REG:
726 switch (address & 0xff)
727 {
728 case CAMMU_REG_SPDO: return s_pdo_r();
729 case CAMMU_REG_UPDO: return u_pdo_r();
730 case CAMMU_REG_FAULT: return fault_r();
731 case CAMMU_REG_CONTROL: return control_r();
732 }
733 break;
734
735 case CAMMU_I_TLB:
736 return m_linked[1]->tlb_r(address);
737
738 case CAMMU_I_REG:
739 switch (address & 0xff)
740 {
741 case CAMMU_REG_SPDO: return m_linked[1]->s_pdo_r();
742 case CAMMU_REG_UPDO: return m_linked[1]->u_pdo_r();
743 case CAMMU_REG_FAULT: return m_linked[1]->fault_r();
744 case CAMMU_REG_CONTROL: return m_linked[1]->control_r();
745 }
746 break;
747 }
748
749 LOG("cammu_r unknown address 0x%08x\n", address);
750 return 0;
751 }
752
cammu_w(const u32 address,const u32 data)753 void cammu_c3_device::cammu_w(const u32 address, const u32 data)
754 {
755 switch (address & CAMMU_SELECT)
756 {
757 case CAMMU_D_TLB:
758 tlb_w(address, data);
759 break;
760
761 case CAMMU_D_REG:
762 switch (address & 0xff)
763 {
764 case CAMMU_REG_SPDO: s_pdo_w(data); break;
765 case CAMMU_REG_UPDO: u_pdo_w(data); break;
766 case CAMMU_REG_FAULT: fault_w(data); break;
767 case CAMMU_REG_CONTROL: control_w(data); break;
768 case CAMMU_REG_RESET: reset_w(data); break;
769 default:
770 break;
771 }
772 break;
773
774 case CAMMU_I_TLB:
775 m_linked[1]->tlb_w(address, data);
776 break;
777
778 case CAMMU_I_REG:
779 switch (address & 0xff)
780 {
781 case CAMMU_REG_SPDO: m_linked[1]->s_pdo_w(data); break;
782 case CAMMU_REG_UPDO: m_linked[1]->u_pdo_w(data); break;
783 case CAMMU_REG_FAULT: m_linked[1]->fault_w(data); break;
784 case CAMMU_REG_CONTROL: m_linked[1]->control_w(data); break;
785 case CAMMU_REG_RESET: m_linked[1]->reset_w(data); break;
786 default:
787 break;
788 }
789 break;
790
791 case CAMMU_G_TLB:
792 for (cammu_c3_device *cammu : m_linked)
793 cammu->tlb_w(address, data);
794 break;
795
796 case CAMMU_G_REG:
797 for (cammu_c3_device *cammu : m_linked)
798 switch (address & 0xff)
799 {
800 case CAMMU_REG_SPDO: cammu->s_pdo_w(data); break;
801 case CAMMU_REG_UPDO: cammu->u_pdo_w(data); break;
802 case CAMMU_REG_FAULT: cammu->fault_w(data); break;
803 case CAMMU_REG_CONTROL: cammu->control_w(data); break;
804 case CAMMU_REG_RESET: cammu->reset_w(data); break;
805 default:
806 break;
807 }
808 break;
809
810 default:
811 LOG("cammu_w unknown address 0x%08x data 0x%08x\n", address, data);
812 break;
813 }
814 }
815