1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4 */
5
6 #define _GNU_SOURCE
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <string.h>
11 #include <sys/types.h>
12 #include <sys/stat.h>
13 #include <fcntl.h>
14 #include <assert.h>
15
16 #include "cpu.h"
17 #include "vm.h"
18 #include "dynamips.h"
19 #include "memory.h"
20 #include "device.h"
21
22 #define DEBUG_DEV_ACCESS 0
23
24 /* Get device by ID */
dev_get_by_id(vm_instance_t * vm,u_int dev_id)25 struct vdevice *dev_get_by_id(vm_instance_t *vm,u_int dev_id)
26 {
27 if (!vm || (dev_id >= VM_DEVICE_MAX))
28 return NULL;
29
30 return(vm->dev_array[dev_id]);
31 }
32
33 /* Get device by name */
dev_get_by_name(vm_instance_t * vm,char * name)34 struct vdevice *dev_get_by_name(vm_instance_t *vm,char *name)
35 {
36 struct vdevice *dev;
37
38 if (!vm)
39 return NULL;
40
41 for(dev=vm->dev_list;dev;dev=dev->next)
42 if (!strcmp(dev->name,name))
43 return dev;
44
45 return NULL;
46 }
47
48 /* Device lookup by physical address */
dev_lookup(vm_instance_t * vm,m_uint64_t phys_addr,int cached)49 struct vdevice *dev_lookup(vm_instance_t *vm,m_uint64_t phys_addr,int cached)
50 {
51 struct vdevice *dev;
52
53 if (!vm)
54 return NULL;
55
56 for(dev=vm->dev_list;dev;dev=dev->next) {
57 if (cached && !(dev->flags & VDEVICE_FLAG_CACHING))
58 continue;
59
60 if ((phys_addr >= dev->phys_addr) &&
61 ((phys_addr - dev->phys_addr) < dev->phys_len))
62 return dev;
63 }
64
65 return NULL;
66 }
67
68 /* Find the next device after the specified address */
dev_lookup_next(vm_instance_t * vm,m_uint64_t phys_addr,struct vdevice * dev_start,int cached)69 struct vdevice *dev_lookup_next(vm_instance_t *vm,m_uint64_t phys_addr,
70 struct vdevice *dev_start,int cached)
71 {
72 struct vdevice *dev;
73
74 if (!vm)
75 return NULL;
76
77 dev = (dev_start != NULL) ? dev_start : vm->dev_list;
78 for(;dev;dev=dev->next) {
79 if (cached && !(dev->flags & VDEVICE_FLAG_CACHING))
80 continue;
81
82 if (dev->phys_addr > phys_addr)
83 return dev;
84 }
85
86 return NULL;
87 }
88
89 /* Initialize a device */
dev_init(struct vdevice * dev)90 void dev_init(struct vdevice *dev)
91 {
92 memset(dev,0,sizeof(*dev));
93 dev->fd = -1;
94 }
95
96 /* Allocate a device */
dev_create(char * name)97 struct vdevice *dev_create(char *name)
98 {
99 struct vdevice *dev;
100
101 if (!(dev = malloc(sizeof(*dev)))) {
102 fprintf(stderr,"dev_create: insufficient memory to "
103 "create device '%s'.\n",name);
104 return NULL;
105 }
106
107 dev_init(dev);
108 dev->name = name;
109 return dev;
110 }
111
112 /* Remove a device */
dev_remove(vm_instance_t * vm,struct vdevice * dev)113 void dev_remove(vm_instance_t *vm,struct vdevice *dev)
114 {
115 if (dev == NULL)
116 return;
117
118 vm_unbind_device(vm,dev);
119
120 vm_log(vm,"DEVICE",
121 "Removal of device %s, fd=%d, host_addr=0x%llx, flags=%d\n",
122 dev->name,dev->fd,(m_uint64_t)dev->host_addr,dev->flags);
123
124 if (dev->flags & VDEVICE_FLAG_REMAP) {
125 dev_init(dev);
126 return;
127 }
128
129 if (dev->flags & VDEVICE_FLAG_SPARSE) {
130 dev_sparse_shutdown(dev);
131
132 if (dev->flags & VDEVICE_FLAG_GHOST) {
133 vm_ghost_image_release(dev->fd);
134 dev_init(dev);
135 return;
136 }
137 }
138
139 if (dev->fd != -1) {
140 /* Unmap memory mapped file */
141 if (dev->host_addr) {
142 if (dev->flags & VDEVICE_FLAG_SYNC) {
143 memzone_sync_all((void *)dev->host_addr,dev->phys_len);
144 }
145
146 vm_log(vm,"MMAP","unmapping of device '%s', "
147 "fd=%d, host_addr=0x%llx, len=0x%x\n",
148 dev->name,dev->fd,(m_uint64_t)dev->host_addr,dev->phys_len);
149 memzone_unmap((void *)dev->host_addr,dev->phys_len);
150 }
151
152 if (dev->flags & VDEVICE_FLAG_SYNC)
153 fsync(dev->fd);
154
155 close(dev->fd);
156 } else {
157 /* Use of malloc'ed host memory: free it */
158 if (dev->host_addr)
159 free((void *)dev->host_addr);
160 }
161
162 /* reinitialize the device to a clean state */
163 dev_init(dev);
164 }
165
166 /* Show properties of a device */
dev_show(struct vdevice * dev)167 void dev_show(struct vdevice *dev)
168 {
169 if (!dev)
170 return;
171
172 printf(" %-18s: 0x%12.12llx (0x%8.8x)\n",
173 dev->name,dev->phys_addr,dev->phys_len);
174 }
175
176 /* Show the device list */
dev_show_list(vm_instance_t * vm)177 void dev_show_list(vm_instance_t *vm)
178 {
179 struct vdevice *dev;
180
181 printf("\nVM \"%s\" (%u) Device list:\n",vm->name,vm->instance_id);
182
183 for(dev=vm->dev_list;dev;dev=dev->next)
184 dev_show(dev);
185
186 printf("\n");
187 }
188
189 /* device access function */
dev_access(cpu_gen_t * cpu,u_int dev_id,m_uint32_t offset,u_int op_size,u_int op_type,m_uint64_t * data)190 void *dev_access(cpu_gen_t *cpu,u_int dev_id,m_uint32_t offset,
191 u_int op_size,u_int op_type,m_uint64_t *data)
192 {
193 struct vdevice *dev = cpu->vm->dev_array[dev_id];
194
195 #if DEBUG_DEV_ACCESS
196 cpu_log(cpu,"DEV_ACCESS","%s: dev_id=%u, offset=0x%8.8x, op_size=%u, "
197 "op_type=%u, data=%p\n",dev->name,dev_id,offset,op_size,op_type,data);
198 #endif
199
200 return(dev->handler(cpu,dev,offset,op_size,op_type,data));
201 }
202
203 /* Synchronize memory for a memory-mapped (mmap) device */
dev_sync(struct vdevice * dev)204 int dev_sync(struct vdevice *dev)
205 {
206 if (!dev || !dev->host_addr)
207 return(-1);
208
209 return(memzone_sync((void *)dev->host_addr,dev->phys_len));
210 }
211
212 /* Remap a device at specified physical address */
dev_remap(char * name,struct vdevice * orig,m_uint64_t paddr,m_uint32_t len)213 struct vdevice *dev_remap(char *name,struct vdevice *orig,
214 m_uint64_t paddr,m_uint32_t len)
215 {
216 struct vdevice *dev;
217
218 if (!(dev = dev_create(name)))
219 return NULL;
220
221 dev->phys_addr = paddr;
222 dev->phys_len = len;
223 dev->flags = orig->flags | VDEVICE_FLAG_REMAP;
224 dev->fd = orig->fd;
225 dev->host_addr = orig->host_addr;
226 dev->handler = orig->handler;
227 dev->sparse_map = orig->sparse_map;
228 return dev;
229 }
230
231 /* Create a RAM device */
dev_create_ram(vm_instance_t * vm,char * name,int sparse,char * filename,m_uint64_t paddr,m_uint32_t len)232 struct vdevice *dev_create_ram(vm_instance_t *vm,char *name,
233 int sparse,char *filename,
234 m_uint64_t paddr,m_uint32_t len)
235 {
236 struct vdevice *dev;
237 u_char *ram_ptr;
238
239 if (!(dev = dev_create(name)))
240 return NULL;
241
242 dev->phys_addr = paddr;
243 dev->phys_len = len;
244 dev->flags = VDEVICE_FLAG_CACHING;
245
246 if (!sparse) {
247 if (filename) {
248 dev->fd = memzone_create_file(filename,dev->phys_len,&ram_ptr);
249
250 if (dev->fd == -1) {
251 perror("dev_create_ram: mmap");
252 free(dev);
253 return NULL;
254 }
255
256 dev->host_addr = (m_iptr_t)ram_ptr;
257 } else {
258 dev->host_addr = (m_iptr_t)m_memalign(4096,dev->phys_len);
259 }
260
261 if (!dev->host_addr) {
262 free(dev);
263 return NULL;
264 }
265 } else {
266 dev_sparse_init(dev);
267 }
268
269 vm_bind_device(vm,dev);
270 return dev;
271 }
272
273 /* Create a ghosted RAM device */
274 struct vdevice *
dev_create_ghost_ram(vm_instance_t * vm,char * name,int sparse,char * filename,m_uint64_t paddr,m_uint32_t len)275 dev_create_ghost_ram(vm_instance_t *vm,char *name,int sparse,char *filename,
276 m_uint64_t paddr,m_uint32_t len)
277 {
278 struct vdevice *dev;
279 u_char *ram_ptr;
280
281 if (!(dev = dev_create(name)))
282 return NULL;
283
284 dev->phys_addr = paddr;
285 dev->phys_len = len;
286 dev->flags = VDEVICE_FLAG_CACHING|VDEVICE_FLAG_GHOST;
287
288 if (!sparse) {
289 dev->fd = memzone_open_cow_file(filename,dev->phys_len,&ram_ptr);
290 if (dev->fd == -1) {
291 perror("dev_create_ghost_ram: mmap");
292 free(dev);
293 return NULL;
294 }
295
296 if (!(dev->host_addr = (m_iptr_t)ram_ptr)) {
297 free(dev);
298 return NULL;
299 }
300 } else {
301 if (vm_ghost_image_get(filename,&ram_ptr,&dev->fd) == -1) {
302 free(dev);
303 return NULL;
304 }
305
306 dev->host_addr = (m_iptr_t)ram_ptr;
307 dev_sparse_init(dev);
308 }
309
310 vm_bind_device(vm,dev);
311 return dev;
312 }
313
314 /* Create a memory alias */
dev_create_ram_alias(vm_instance_t * vm,char * name,char * orig,m_uint64_t paddr,m_uint32_t len)315 struct vdevice *dev_create_ram_alias(vm_instance_t *vm,char *name,char *orig,
316 m_uint64_t paddr,m_uint32_t len)
317 {
318 struct vdevice *dev,*orig_dev;
319
320 /* try to locate the device */
321 if (!(orig_dev = dev_get_by_name(vm,orig))) {
322 fprintf(stderr,"VM%u: dev_create_ram_alias: unknown device '%s'.\n",
323 vm->instance_id,orig);
324 return NULL;
325 }
326
327 if (!(dev = dev_remap(name,orig_dev,paddr,len))) {
328 fprintf(stderr,"VM%u: dev_create_ram_alias: unable to create "
329 "new device %s.\n",vm->instance_id,name);
330 return NULL;
331 }
332
333 vm_bind_device(vm,dev);
334 return dev;
335 }
336
337 /* Initialize a sparse device */
dev_sparse_init(struct vdevice * dev)338 int dev_sparse_init(struct vdevice *dev)
339 {
340 u_int i,nr_pages;
341 size_t len;
342
343 /* create the sparse mapping */
344 nr_pages = normalize_size(dev->phys_len,VM_PAGE_SIZE,VM_PAGE_SHIFT);
345 len = nr_pages * sizeof(m_iptr_t);
346
347 if (!(dev->sparse_map = malloc(len)))
348 return(-1);
349
350 if (!dev->host_addr) {
351 memset(dev->sparse_map,0,len);
352 } else {
353 for(i=0;i<nr_pages;i++)
354 dev->sparse_map[i] = dev->host_addr + (i << VM_PAGE_SHIFT);
355 }
356
357 dev->flags |= VDEVICE_FLAG_SPARSE;
358 return(0);
359 }
360
361 /* Shutdown sparse device structures */
dev_sparse_shutdown(struct vdevice * dev)362 int dev_sparse_shutdown(struct vdevice *dev)
363 {
364 if (!(dev->flags & VDEVICE_FLAG_SPARSE))
365 return(-1);
366
367 free(dev->sparse_map);
368 dev->sparse_map = NULL;
369 return(0);
370 }
371
372 /* Show info about a sparse device */
dev_sparse_show_info(struct vdevice * dev)373 int dev_sparse_show_info(struct vdevice *dev)
374 {
375 u_int i,nr_pages,dirty_pages;
376
377 printf("Sparse information for device '%s':\n",dev->name);
378
379 if (!(dev->flags & VDEVICE_FLAG_SPARSE)) {
380 printf("This is not a sparse device.\n");
381 return(-1);
382 }
383
384 if (!dev->sparse_map) {
385 printf("No sparse map.\n");
386 return(-1);
387 }
388
389 nr_pages = normalize_size(dev->phys_len,VM_PAGE_SIZE,VM_PAGE_SHIFT);
390 dirty_pages = 0;
391
392 for(i=0;i<nr_pages;i++)
393 if (dev->sparse_map[i] & VDEVICE_PTE_DIRTY)
394 dirty_pages++;
395
396 printf("%u dirty pages on a total of %u pages.\n",dirty_pages,nr_pages);
397 return(0);
398 }
399
400 /* Get an host address for a sparse device */
dev_sparse_get_host_addr(vm_instance_t * vm,struct vdevice * dev,m_uint64_t paddr,u_int op_type,int * cow)401 m_iptr_t dev_sparse_get_host_addr(vm_instance_t *vm,struct vdevice *dev,
402 m_uint64_t paddr,u_int op_type,int *cow)
403 {
404 m_iptr_t ptr,ptr_new;
405 u_int offset;
406
407 offset = (paddr - dev->phys_addr) >> VM_PAGE_SHIFT;
408 ptr = dev->sparse_map[offset];
409 *cow = 0;
410
411 /*
412 * If the device is not in COW mode, allocate a host page if the physical
413 * page is requested for the first time.
414 */
415 if (!dev->host_addr) {
416 if (!(ptr & VDEVICE_PTE_DIRTY)) {
417 ptr = (m_iptr_t)vm_alloc_host_page(vm);
418 assert(ptr);
419
420 dev->sparse_map[offset] = ptr | VDEVICE_PTE_DIRTY;
421 return(ptr);
422 }
423
424 return(ptr & VM_PAGE_MASK);
425 }
426
427 /*
428 * We have a "ghost" base. We apply the copy-on-write (COW) mechanism
429 * ourselves.
430 */
431 if (ptr & VDEVICE_PTE_DIRTY)
432 return(ptr & VM_PAGE_MASK);
433
434 if (op_type == MTS_READ) {
435 *cow = 1;
436 return(ptr & VM_PAGE_MASK);
437 }
438
439 /* Write attempt on a "ghost" page. Duplicate it */
440 ptr_new = (m_iptr_t)vm_alloc_host_page(vm);
441 assert(ptr_new);
442
443 memcpy((void *)ptr_new,(void *)(ptr & VM_PAGE_MASK),VM_PAGE_SIZE);
444 dev->sparse_map[offset] = ptr_new | VDEVICE_PTE_DIRTY;
445 return(ptr_new);
446 }
447
448 /* Get virtual address space used on host for the specified device */
dev_get_vspace_size(struct vdevice * dev)449 size_t dev_get_vspace_size(struct vdevice *dev)
450 {
451 /* if the device is simply remapped, don't count it */
452 if (dev->flags & VDEVICE_FLAG_REMAP)
453 return(0);
454
455 if (dev->host_addr || (dev->flags & VDEVICE_FLAG_SPARSE))
456 return(dev->phys_len >> 10);
457
458 return(0);
459 }
460
461 /* dummy console handler */
dummy_console_handler(cpu_gen_t * cpu,struct vdevice * dev,m_uint32_t offset,u_int op_size,u_int op_type,m_uint64_t * data)462 static void *dummy_console_handler(cpu_gen_t *cpu,struct vdevice *dev,
463 m_uint32_t offset,u_int op_size,
464 u_int op_type,m_uint64_t *data)
465 {
466 switch(offset) {
467 case 0x40c:
468 if (op_type == MTS_READ)
469 *data = 0x04; /* tx ready */
470 break;
471
472 case 0x41c:
473 if (op_type == MTS_WRITE) {
474 printf("%c",(u_char)(*data & 0xff));
475 fflush(stdout);
476 }
477 break;
478 }
479
480 return NULL;
481 }
482
483 /* Create a dummy console */
dev_create_dummy_console(vm_instance_t * vm)484 int dev_create_dummy_console(vm_instance_t *vm)
485 {
486 struct vdevice *dev;
487
488 if (!(dev = dev_create("dummy_console")))
489 return(-1);
490
491 dev->phys_addr = 0x1e840000; /* 0x1f000000; */
492 dev->phys_len = 4096;
493 dev->handler = dummy_console_handler;
494
495 vm_bind_device(vm,dev);
496 return(0);
497 }
498