1 /*
2  *  virtual page mapping and translated block handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <inttypes.h>
33 
34 #include "cpu.h"
35 #include "exec-all.h"
36 #include "qemu-common.h"
37 #include "tcg.h"
38 #ifndef X49GP
39 #include "hw/hw.h"
40 #endif
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
46 
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
51 
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
55 
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
58 
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation.  */
61 #undef DEBUG_TB_CHECK
62 #endif
63 
64 #define SMC_BITMAP_USE_THRESHOLD 10
65 
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
81 #endif
82 
83 static TranslationBlock *tbs;
84 int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 static int nb_tbs;
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89 
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92  have limited branch ranges (possibly also PPC) so place it in a
93  section close to code segment. */
94 #define code_gen_section                                \
95     __attribute__((__section__(".gen_code")))           \
96     __attribute__((aligned (32)))
97 #elif defined(_WIN32)
98 /* Maximum alignment for Win32 is 16. */
99 #define code_gen_section                                \
100     __attribute__((aligned (16)))
101 #else
102 #define code_gen_section                                \
103     __attribute__((aligned (32)))
104 #endif
105 
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
112 
113 #if !defined(CONFIG_USER_ONLY)
114 int phys_ram_fd;
115 uint8_t *phys_ram_dirty;
116 static int in_migration;
117 
118 typedef struct RAMBlock {
119     uint8_t *host;
120     ram_addr_t offset;
121     ram_addr_t length;
122     struct RAMBlock *next;
123 } RAMBlock;
124 
125 static RAMBlock *ram_blocks;
126 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
127    then we can no longer assume contiguous ram offsets, and external uses
128    of this variable will break.  */
129 ram_addr_t last_ram_offset;
130 #endif
131 
132 CPUState *first_cpu;
133 /* current CPU in the current thread. It is only valid inside
134    cpu_exec() */
135 CPUState *cpu_single_env;
136 /* 0 = Do not count executed instructions.
137    1 = Precise instruction counting.
138    2 = Adaptive rate instruction counting.  */
139 int use_icount = 0;
140 /* Current instruction counter.  While executing translated code this may
141    include some instructions that have not yet been executed.  */
142 int64_t qemu_icount;
143 
144 typedef struct PageDesc {
145     /* list of TBs intersecting this ram page */
146     TranslationBlock *first_tb;
147     /* in order to optimize self modifying code, we count the number
148        of lookups we do to a given page to use a bitmap */
149     unsigned int code_write_count;
150     uint8_t *code_bitmap;
151 #if defined(CONFIG_USER_ONLY)
152     unsigned long flags;
153 #endif
154 } PageDesc;
155 
156 typedef struct PhysPageDesc {
157     /* offset in host memory of the page + io_index in the low bits */
158     ram_addr_t phys_offset;
159     ram_addr_t region_offset;
160 } PhysPageDesc;
161 
162 #define L2_BITS 10
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
164 /* XXX: this is a temporary hack for alpha target.
165  *      In the future, this is to be replaced by a multi-level table
166  *      to actually be able to handle the complete 64 bits address space.
167  */
168 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
169 #else
170 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
171 #endif
172 
173 #define L1_SIZE (1 << L1_BITS)
174 #define L2_SIZE (1 << L2_BITS)
175 
176 unsigned long qemu_real_host_page_size;
177 unsigned long qemu_host_page_bits;
178 unsigned long qemu_host_page_size;
179 unsigned long qemu_host_page_mask;
180 
181 /* XXX: for system emulation, it could just be an array */
182 static PageDesc *l1_map[L1_SIZE];
183 static PhysPageDesc **l1_phys_map;
184 
185 #if !defined(CONFIG_USER_ONLY)
186 static void io_mem_init(void);
187 
188 /* io memory support */
189 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
190 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
191 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
192 static char io_mem_used[IO_MEM_NB_ENTRIES];
193 static int io_mem_watch;
194 #endif
195 
196 /* log support */
197 #ifdef WIN32
198 static const char *logfilename = "qemu.log";
199 #else
200 static const char *logfilename = "/tmp/qemu.log";
201 #endif
202 FILE *logfile;
203 int loglevel;
204 static int log_append = 0;
205 
206 /* statistics */
207 static int tlb_flush_count;
208 static int tb_flush_count;
209 static int tb_phys_invalidate_count;
210 
211 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
212 typedef struct subpage_t {
213     target_phys_addr_t base;
214     CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
215     CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
216     void *opaque[TARGET_PAGE_SIZE][2][4];
217     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
218 } subpage_t;
219 
220 #ifdef _WIN32
map_exec(void * addr,long size)221 static void map_exec(void *addr, long size)
222 {
223     DWORD old_protect;
224     VirtualProtect(addr, size,
225                    PAGE_EXECUTE_READWRITE, &old_protect);
226 
227 }
228 #else
map_exec(void * addr,long size)229 static void map_exec(void *addr, long size)
230 {
231     unsigned long start, end, page_size;
232 
233     page_size = getpagesize();
234     start = (unsigned long)addr;
235     start &= ~(page_size - 1);
236 
237     end = (unsigned long)addr + size;
238     end += page_size - 1;
239     end &= ~(page_size - 1);
240 
241     mprotect((void *)start, end - start,
242              PROT_READ | PROT_WRITE | PROT_EXEC);
243 }
244 #endif
245 
page_init(void)246 static void page_init(void)
247 {
248     /* NOTE: we can always suppose that qemu_host_page_size >=
249        TARGET_PAGE_SIZE */
250 #ifdef _WIN32
251     {
252         SYSTEM_INFO system_info;
253 
254         GetSystemInfo(&system_info);
255         qemu_real_host_page_size = system_info.dwPageSize;
256     }
257 #else
258     qemu_real_host_page_size = getpagesize();
259 #endif
260     if (qemu_host_page_size == 0)
261         qemu_host_page_size = qemu_real_host_page_size;
262     if (qemu_host_page_size < TARGET_PAGE_SIZE)
263         qemu_host_page_size = TARGET_PAGE_SIZE;
264     qemu_host_page_bits = 0;
265     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
266         qemu_host_page_bits++;
267     qemu_host_page_mask = ~(qemu_host_page_size - 1);
268     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
269     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
270 
271 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
272     {
273         long long startaddr, endaddr;
274         FILE *f;
275         int n;
276 
277         mmap_lock();
278         last_brk = (unsigned long)sbrk(0);
279         f = fopen("/proc/self/maps", "r");
280         if (f) {
281             do {
282                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
283                 if (n == 2) {
284                     startaddr = MIN(startaddr,
285                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
286                     endaddr = MIN(endaddr,
287                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
288                     page_set_flags(startaddr & TARGET_PAGE_MASK,
289                                    TARGET_PAGE_ALIGN(endaddr),
290                                    PAGE_RESERVED);
291                 }
292             } while (!feof(f));
293             fclose(f);
294         }
295         mmap_unlock();
296     }
297 #endif
298 }
299 
page_l1_map(target_ulong index)300 static inline PageDesc **page_l1_map(target_ulong index)
301 {
302 #if TARGET_LONG_BITS > 32
303     /* Host memory outside guest VM.  For 32-bit targets we have already
304        excluded high addresses.  */
305     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
306         return NULL;
307 #endif
308     return &l1_map[index >> L2_BITS];
309 }
310 
page_find_alloc(target_ulong index)311 static inline PageDesc *page_find_alloc(target_ulong index)
312 {
313     PageDesc **lp, *p;
314     lp = page_l1_map(index);
315     if (!lp)
316         return NULL;
317 
318     p = *lp;
319     if (!p) {
320         /* allocate if not found */
321 #if defined(CONFIG_USER_ONLY)
322         size_t len = sizeof(PageDesc) * L2_SIZE;
323         /* Don't use qemu_malloc because it may recurse.  */
324         p = mmap(NULL, len, PROT_READ | PROT_WRITE,
325                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
326         *lp = p;
327         if (h2g_valid(p)) {
328             unsigned long addr = h2g(p);
329             page_set_flags(addr & TARGET_PAGE_MASK,
330                            TARGET_PAGE_ALIGN(addr + len),
331                            PAGE_RESERVED);
332         }
333 #else
334         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
335         *lp = p;
336 #endif
337     }
338     return p + (index & (L2_SIZE - 1));
339 }
340 
page_find(target_ulong index)341 static inline PageDesc *page_find(target_ulong index)
342 {
343     PageDesc **lp, *p;
344     lp = page_l1_map(index);
345     if (!lp)
346         return NULL;
347 
348     p = *lp;
349     if (!p) {
350         return NULL;
351     }
352     return p + (index & (L2_SIZE - 1));
353 }
354 
phys_page_find_alloc(target_phys_addr_t index,int alloc)355 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
356 {
357     void **lp, **p;
358     PhysPageDesc *pd;
359 
360     p = (void **)l1_phys_map;
361 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
362 
363 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
364 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
365 #endif
366     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
367     p = *lp;
368     if (!p) {
369         /* allocate if not found */
370         if (!alloc)
371             return NULL;
372         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
373         memset(p, 0, sizeof(void *) * L1_SIZE);
374         *lp = p;
375     }
376 #endif
377     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
378     pd = *lp;
379     if (!pd) {
380         int i;
381         /* allocate if not found */
382         if (!alloc)
383             return NULL;
384         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
385         *lp = pd;
386         for (i = 0; i < L2_SIZE; i++) {
387           pd[i].phys_offset = IO_MEM_UNASSIGNED;
388           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
389         }
390     }
391     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
392 }
393 
phys_page_find(target_phys_addr_t index)394 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
395 {
396     return phys_page_find_alloc(index, 0);
397 }
398 
399 #if !defined(CONFIG_USER_ONLY)
400 static void tlb_protect_code(ram_addr_t ram_addr);
401 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
402                                     target_ulong vaddr);
403 #define mmap_lock() do { } while(0)
404 #define mmap_unlock() do { } while(0)
405 #endif
406 
407 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
408 
409 #if defined(CONFIG_USER_ONLY)
410 /* Currently it is not recommended to allocate big chunks of data in
411    user mode. It will change when a dedicated libc will be used */
412 #define USE_STATIC_CODE_GEN_BUFFER
413 #endif
414 
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
417 #endif
418 
code_gen_alloc(unsigned long tb_size)419 static void code_gen_alloc(unsigned long tb_size)
420 {
421 #ifdef USE_STATIC_CODE_GEN_BUFFER
422     code_gen_buffer = static_code_gen_buffer;
423     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424     map_exec(code_gen_buffer, code_gen_buffer_size);
425 #else
426     code_gen_buffer_size = tb_size;
427     if (code_gen_buffer_size == 0) {
428 #if defined(CONFIG_USER_ONLY)
429         /* in user mode, phys_ram_size is not meaningful */
430         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
431 #else
432         /* XXX: needs adjustments */
433         code_gen_buffer_size = (unsigned long)(ram_size / 4);
434 #endif
435     }
436     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
437         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
438     /* The code gen buffer location may have constraints depending on
439        the host cpu and OS */
440 #if defined(__linux__)
441     {
442         int flags;
443         void *start = NULL;
444 
445         flags = MAP_PRIVATE | MAP_ANONYMOUS;
446 #if defined(__x86_64__)
447         flags |= MAP_32BIT;
448         /* Cannot map more than that */
449         if (code_gen_buffer_size > (800 * 1024 * 1024))
450             code_gen_buffer_size = (800 * 1024 * 1024);
451 #elif defined(__sparc_v9__)
452         // Map the buffer below 2G, so we can use direct calls and branches
453         flags |= MAP_FIXED;
454         start = (void *) 0x60000000UL;
455         if (code_gen_buffer_size > (512 * 1024 * 1024))
456             code_gen_buffer_size = (512 * 1024 * 1024);
457 #elif defined(__arm__)
458         /* Map the buffer below 32M, so we can use direct calls and branches */
459         flags |= MAP_FIXED;
460         start = (void *) 0x01000000UL;
461         if (code_gen_buffer_size > 16 * 1024 * 1024)
462             code_gen_buffer_size = 16 * 1024 * 1024;
463 #endif
464         code_gen_buffer = mmap(start, code_gen_buffer_size,
465                                PROT_WRITE | PROT_READ | PROT_EXEC,
466                                flags, -1, 0);
467         if (code_gen_buffer == MAP_FAILED) {
468             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
469             exit(1);
470         }
471     }
472 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
473     {
474         int flags;
475         void *addr = NULL;
476         flags = MAP_PRIVATE | MAP_ANONYMOUS;
477 #if defined(__x86_64__)
478         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
479          * 0x40000000 is free */
480         flags |= MAP_FIXED;
481         addr = (void *)0x40000000;
482         /* Cannot map more than that */
483         if (code_gen_buffer_size > (800 * 1024 * 1024))
484             code_gen_buffer_size = (800 * 1024 * 1024);
485 #endif
486         code_gen_buffer = mmap(addr, code_gen_buffer_size,
487                                PROT_WRITE | PROT_READ | PROT_EXEC,
488                                flags, -1, 0);
489         if (code_gen_buffer == MAP_FAILED) {
490             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
491             exit(1);
492         }
493     }
494 #else
495     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
496     map_exec(code_gen_buffer, code_gen_buffer_size);
497 #endif
498 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
499     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
500     code_gen_buffer_max_size = code_gen_buffer_size -
501         code_gen_max_block_size();
502     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
503     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
504 }
505 
506 /* Must be called before using the QEMU cpus. 'tb_size' is the size
507    (in bytes) allocated to the translation buffer. Zero means default
508    size. */
cpu_exec_init_all(unsigned long tb_size)509 void cpu_exec_init_all(unsigned long tb_size)
510 {
511     cpu_gen_init();
512     code_gen_alloc(tb_size);
513     code_gen_ptr = code_gen_buffer;
514     page_init();
515 #if !defined(CONFIG_USER_ONLY)
516     io_mem_init();
517 #endif
518 }
519 
520 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 
cpu_common_pre_save(void * opaque)522 static void cpu_common_pre_save(void *opaque)
523 {
524     CPUState *env = opaque;
525 
526     cpu_synchronize_state(env);
527 }
528 
cpu_common_pre_load(void * opaque)529 static int cpu_common_pre_load(void *opaque)
530 {
531     CPUState *env = opaque;
532 
533     cpu_synchronize_state(env);
534     return 0;
535 }
536 
cpu_common_post_load(void * opaque,int version_id)537 static int cpu_common_post_load(void *opaque, int version_id)
538 {
539     CPUState *env = opaque;
540 
541     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
542        version_id is increased. */
543     env->interrupt_request &= ~0x01;
544     tlb_flush(env, 1);
545 
546     return 0;
547 }
548 
549 static const VMStateDescription vmstate_cpu_common = {
550     .name = "cpu_common",
551     .version_id = 1,
552     .minimum_version_id = 1,
553     .minimum_version_id_old = 1,
554     .pre_save = cpu_common_pre_save,
555     .pre_load = cpu_common_pre_load,
556     .post_load = cpu_common_post_load,
557     .fields      = (VMStateField []) {
558         VMSTATE_UINT32(halted, CPUState),
559         VMSTATE_UINT32(interrupt_request, CPUState),
560         VMSTATE_END_OF_LIST()
561     }
562 };
563 #endif
564 
qemu_get_cpu(int cpu)565 CPUState *qemu_get_cpu(int cpu)
566 {
567     CPUState *env = first_cpu;
568 
569     while (env) {
570         if (env->cpu_index == cpu)
571             break;
572         env = env->next_cpu;
573     }
574 
575     return env;
576 }
577 
cpu_exec_init(CPUState * env)578 void cpu_exec_init(CPUState *env)
579 {
580     CPUState **penv;
581     int cpu_index;
582 
583 #if defined(CONFIG_USER_ONLY)
584     cpu_list_lock();
585 #endif
586     env->next_cpu = NULL;
587     penv = &first_cpu;
588     cpu_index = 0;
589     while (*penv != NULL) {
590         penv = &(*penv)->next_cpu;
591         cpu_index++;
592     }
593     env->cpu_index = cpu_index;
594     env->numa_node = 0;
595     QTAILQ_INIT(&env->breakpoints);
596     QTAILQ_INIT(&env->watchpoints);
597     *penv = env;
598 #if defined(CONFIG_USER_ONLY)
599     cpu_list_unlock();
600 #endif
601 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
602     vmstate_register(cpu_index, &vmstate_cpu_common, env);
603     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
604                     cpu_save, cpu_load, env);
605 #endif
606 }
607 
invalidate_page_bitmap(PageDesc * p)608 static inline void invalidate_page_bitmap(PageDesc *p)
609 {
610     if (p->code_bitmap) {
611         qemu_free(p->code_bitmap);
612         p->code_bitmap = NULL;
613     }
614     p->code_write_count = 0;
615 }
616 
617 /* set to NULL all the 'first_tb' fields in all PageDescs */
page_flush_tb(void)618 static void page_flush_tb(void)
619 {
620     int i, j;
621     PageDesc *p;
622 
623     for(i = 0; i < L1_SIZE; i++) {
624         p = l1_map[i];
625         if (p) {
626             for(j = 0; j < L2_SIZE; j++) {
627                 p->first_tb = NULL;
628                 invalidate_page_bitmap(p);
629                 p++;
630             }
631         }
632     }
633 }
634 
635 /* flush all the translation blocks */
636 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUState * env1)637 void tb_flush(CPUState *env1)
638 {
639     CPUState *env;
640 #if defined(DEBUG_FLUSH)
641     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
642            (unsigned long)(code_gen_ptr - code_gen_buffer),
643            nb_tbs, nb_tbs > 0 ?
644            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
645 #endif
646     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
647         cpu_abort(env1, "Internal error: code buffer overflow\n");
648 
649     nb_tbs = 0;
650 
651     for(env = first_cpu; env != NULL; env = env->next_cpu) {
652         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
653     }
654 
655     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
656     page_flush_tb();
657 
658     code_gen_ptr = code_gen_buffer;
659     /* XXX: flush processor icache at this point if cache flush is
660        expensive */
661     tb_flush_count++;
662 }
663 
664 #ifdef DEBUG_TB_CHECK
665 
tb_invalidate_check(target_ulong address)666 static void tb_invalidate_check(target_ulong address)
667 {
668     TranslationBlock *tb;
669     int i;
670     address &= TARGET_PAGE_MASK;
671     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
672         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
673             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
674                   address >= tb->pc + tb->size)) {
675                 printf("ERROR invalidate: address=" TARGET_FMT_lx
676                        " PC=%08lx size=%04x\n",
677                        address, (long)tb->pc, tb->size);
678             }
679         }
680     }
681 }
682 
683 /* verify that all the pages have correct rights for code */
tb_page_check(void)684 static void tb_page_check(void)
685 {
686     TranslationBlock *tb;
687     int i, flags1, flags2;
688 
689     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
690         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
691             flags1 = page_get_flags(tb->pc);
692             flags2 = page_get_flags(tb->pc + tb->size - 1);
693             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
694                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
695                        (long)tb->pc, tb->size, flags1, flags2);
696             }
697         }
698     }
699 }
700 
701 #endif
702 
703 /* invalidate one TB */
tb_remove(TranslationBlock ** ptb,TranslationBlock * tb,int next_offset)704 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
705                              int next_offset)
706 {
707     TranslationBlock *tb1;
708     for(;;) {
709         tb1 = *ptb;
710         if (tb1 == tb) {
711             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
712             break;
713         }
714         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
715     }
716 }
717 
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)718 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
719 {
720     TranslationBlock *tb1;
721     unsigned int n1;
722 
723     for(;;) {
724         tb1 = *ptb;
725         n1 = (long)tb1 & 3;
726         tb1 = (TranslationBlock *)((long)tb1 & ~3);
727         if (tb1 == tb) {
728             *ptb = tb1->page_next[n1];
729             break;
730         }
731         ptb = &tb1->page_next[n1];
732     }
733 }
734 
tb_jmp_remove(TranslationBlock * tb,int n)735 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
736 {
737     TranslationBlock *tb1, **ptb;
738     unsigned int n1;
739 
740     ptb = &tb->jmp_next[n];
741     tb1 = *ptb;
742     if (tb1) {
743         /* find tb(n) in circular list */
744         for(;;) {
745             tb1 = *ptb;
746             n1 = (long)tb1 & 3;
747             tb1 = (TranslationBlock *)((long)tb1 & ~3);
748             if (n1 == n && tb1 == tb)
749                 break;
750             if (n1 == 2) {
751                 ptb = &tb1->jmp_first;
752             } else {
753                 ptb = &tb1->jmp_next[n1];
754             }
755         }
756         /* now we can suppress tb(n) from the list */
757         *ptb = tb->jmp_next[n];
758 
759         tb->jmp_next[n] = NULL;
760     }
761 }
762 
763 /* reset the jump entry 'n' of a TB so that it is not chained to
764    another TB */
tb_reset_jump(TranslationBlock * tb,int n)765 static inline void tb_reset_jump(TranslationBlock *tb, int n)
766 {
767     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
768 }
769 
tb_phys_invalidate(TranslationBlock * tb,target_ulong page_addr)770 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
771 {
772     CPUState *env;
773     PageDesc *p;
774     unsigned int h, n1;
775     target_phys_addr_t phys_pc;
776     TranslationBlock *tb1, *tb2;
777 
778     /* remove the TB from the hash list */
779     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
780     h = tb_phys_hash_func(phys_pc);
781     tb_remove(&tb_phys_hash[h], tb,
782               offsetof(TranslationBlock, phys_hash_next));
783 
784     /* remove the TB from the page list */
785     if (tb->page_addr[0] != page_addr) {
786         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
787         tb_page_remove(&p->first_tb, tb);
788         invalidate_page_bitmap(p);
789     }
790     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
791         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
792         tb_page_remove(&p->first_tb, tb);
793         invalidate_page_bitmap(p);
794     }
795 
796     tb_invalidated_flag = 1;
797 
798     /* remove the TB from the hash list */
799     h = tb_jmp_cache_hash_func(tb->pc);
800     for(env = first_cpu; env != NULL; env = env->next_cpu) {
801         if (env->tb_jmp_cache[h] == tb)
802             env->tb_jmp_cache[h] = NULL;
803     }
804 
805     /* suppress this TB from the two jump lists */
806     tb_jmp_remove(tb, 0);
807     tb_jmp_remove(tb, 1);
808 
809     /* suppress any remaining jumps to this TB */
810     tb1 = tb->jmp_first;
811     for(;;) {
812         n1 = (long)tb1 & 3;
813         if (n1 == 2)
814             break;
815         tb1 = (TranslationBlock *)((long)tb1 & ~3);
816         tb2 = tb1->jmp_next[n1];
817         tb_reset_jump(tb1, n1);
818         tb1->jmp_next[n1] = NULL;
819         tb1 = tb2;
820     }
821     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
822 
823     tb_phys_invalidate_count++;
824 }
825 
set_bits(uint8_t * tab,int start,int len)826 static inline void set_bits(uint8_t *tab, int start, int len)
827 {
828     int end, mask, end1;
829 
830     end = start + len;
831     tab += start >> 3;
832     mask = 0xff << (start & 7);
833     if ((start & ~7) == (end & ~7)) {
834         if (start < end) {
835             mask &= ~(0xff << (end & 7));
836             *tab |= mask;
837         }
838     } else {
839         *tab++ |= mask;
840         start = (start + 8) & ~7;
841         end1 = end & ~7;
842         while (start < end1) {
843             *tab++ = 0xff;
844             start += 8;
845         }
846         if (start < end) {
847             mask = ~(0xff << (end & 7));
848             *tab |= mask;
849         }
850     }
851 }
852 
build_page_bitmap(PageDesc * p)853 static void build_page_bitmap(PageDesc *p)
854 {
855     int n, tb_start, tb_end;
856     TranslationBlock *tb;
857 
858     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
859 
860     tb = p->first_tb;
861     while (tb != NULL) {
862         n = (long)tb & 3;
863         tb = (TranslationBlock *)((long)tb & ~3);
864         /* NOTE: this is subtle as a TB may span two physical pages */
865         if (n == 0) {
866             /* NOTE: tb_end may be after the end of the page, but
867                it is not a problem */
868             tb_start = tb->pc & ~TARGET_PAGE_MASK;
869             tb_end = tb_start + tb->size;
870             if (tb_end > TARGET_PAGE_SIZE)
871                 tb_end = TARGET_PAGE_SIZE;
872         } else {
873             tb_start = 0;
874             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
875         }
876         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
877         tb = tb->page_next[n];
878     }
879 }
880 
tb_gen_code(CPUState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)881 TranslationBlock *tb_gen_code(CPUState *env,
882                               target_ulong pc, target_ulong cs_base,
883                               int flags, int cflags)
884 {
885     TranslationBlock *tb;
886     uint8_t *tc_ptr;
887     target_ulong phys_pc, phys_page2, virt_page2;
888     int code_gen_size;
889 
890     phys_pc = get_phys_addr_code(env, pc);
891     tb = tb_alloc(pc);
892     if (!tb) {
893         /* flush must be done */
894         tb_flush(env);
895         /* cannot fail at this point */
896         tb = tb_alloc(pc);
897         /* Don't forget to invalidate previous TB info.  */
898         tb_invalidated_flag = 1;
899     }
900     tc_ptr = code_gen_ptr;
901     tb->tc_ptr = tc_ptr;
902     tb->cs_base = cs_base;
903     tb->flags = flags;
904     tb->cflags = cflags;
905     cpu_gen_code(env, tb, &code_gen_size);
906     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
907 
908     /* check next page if needed */
909     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
910     phys_page2 = -1;
911     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
912         phys_page2 = get_phys_addr_code(env, virt_page2);
913     }
914     tb_link_phys(tb, phys_pc, phys_page2);
915     return tb;
916 }
917 
918 /* invalidate all TBs which intersect with the target physical page
919    starting in range [start;end[. NOTE: start and end must refer to
920    the same physical page. 'is_cpu_write_access' should be true if called
921    from a real cpu write access: the virtual CPU will exit the current
922    TB if code is modified inside this TB. */
tb_invalidate_phys_page_range(target_phys_addr_t start,target_phys_addr_t end,int is_cpu_write_access)923 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
924                                    int is_cpu_write_access)
925 {
926     TranslationBlock *tb, *tb_next, *saved_tb;
927     CPUState *env = cpu_single_env;
928     target_ulong tb_start, tb_end;
929     PageDesc *p;
930     int n;
931 #ifdef TARGET_HAS_PRECISE_SMC
932     int current_tb_not_found = is_cpu_write_access;
933     TranslationBlock *current_tb = NULL;
934     int current_tb_modified = 0;
935     target_ulong current_pc = 0;
936     target_ulong current_cs_base = 0;
937     int current_flags = 0;
938 #endif /* TARGET_HAS_PRECISE_SMC */
939 
940     p = page_find(start >> TARGET_PAGE_BITS);
941     if (!p)
942         return;
943     if (!p->code_bitmap &&
944         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
945         is_cpu_write_access) {
946         /* build code bitmap */
947         build_page_bitmap(p);
948     }
949 
950     /* we remove all the TBs in the range [start, end[ */
951     /* XXX: see if in some cases it could be faster to invalidate all the code */
952     tb = p->first_tb;
953     while (tb != NULL) {
954         n = (long)tb & 3;
955         tb = (TranslationBlock *)((long)tb & ~3);
956         tb_next = tb->page_next[n];
957         /* NOTE: this is subtle as a TB may span two physical pages */
958         if (n == 0) {
959             /* NOTE: tb_end may be after the end of the page, but
960                it is not a problem */
961             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
962             tb_end = tb_start + tb->size;
963         } else {
964             tb_start = tb->page_addr[1];
965             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
966         }
967         if (!(tb_end <= start || tb_start >= end)) {
968 #ifdef TARGET_HAS_PRECISE_SMC
969             if (current_tb_not_found) {
970                 current_tb_not_found = 0;
971                 current_tb = NULL;
972                 if (env->mem_io_pc) {
973                     /* now we have a real cpu fault */
974                     current_tb = tb_find_pc(env->mem_io_pc);
975                 }
976             }
977             if (current_tb == tb &&
978                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
979                 /* If we are modifying the current TB, we must stop
980                 its execution. We could be more precise by checking
981                 that the modification is after the current PC, but it
982                 would require a specialized function to partially
983                 restore the CPU state */
984 
985                 current_tb_modified = 1;
986                 cpu_restore_state(current_tb, env,
987                                   env->mem_io_pc, NULL);
988                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
989                                      &current_flags);
990             }
991 #endif /* TARGET_HAS_PRECISE_SMC */
992             /* we need to do that to handle the case where a signal
993                occurs while doing tb_phys_invalidate() */
994             saved_tb = NULL;
995             if (env) {
996                 saved_tb = env->current_tb;
997                 env->current_tb = NULL;
998             }
999             tb_phys_invalidate(tb, -1);
1000             if (env) {
1001                 env->current_tb = saved_tb;
1002                 if (env->interrupt_request && env->current_tb)
1003                     cpu_interrupt(env, env->interrupt_request);
1004             }
1005         }
1006         tb = tb_next;
1007     }
1008 #if !defined(CONFIG_USER_ONLY)
1009     /* if no code remaining, no need to continue to use slow writes */
1010     if (!p->first_tb) {
1011         invalidate_page_bitmap(p);
1012         if (is_cpu_write_access) {
1013             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1014         }
1015     }
1016 #endif
1017 #ifdef TARGET_HAS_PRECISE_SMC
1018     if (current_tb_modified) {
1019         /* we generate a block containing just the instruction
1020            modifying the memory. It will ensure that it cannot modify
1021            itself */
1022         env->current_tb = NULL;
1023         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1024         cpu_resume_from_signal(env, NULL);
1025     }
1026 #endif
1027 }
1028 
1029 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(target_phys_addr_t start,int len)1030 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1031 {
1032     PageDesc *p;
1033     int offset, b;
1034 #if 0
1035     if (1) {
1036         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1037                   cpu_single_env->mem_io_vaddr, len,
1038                   cpu_single_env->eip,
1039                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1040     }
1041 #endif
1042     p = page_find(start >> TARGET_PAGE_BITS);
1043     if (!p)
1044         return;
1045     if (p->code_bitmap) {
1046         offset = start & ~TARGET_PAGE_MASK;
1047         b = p->code_bitmap[offset >> 3] >> (offset & 7);
1048         if (b & ((1 << len) - 1))
1049             goto do_invalidate;
1050     } else {
1051     do_invalidate:
1052         tb_invalidate_phys_page_range(start, start + len, 1);
1053     }
1054 }
1055 
1056 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(target_phys_addr_t addr,unsigned long pc,void * puc)1057 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1058                                     unsigned long pc, void *puc)
1059 {
1060     TranslationBlock *tb;
1061     PageDesc *p;
1062     int n;
1063 #ifdef TARGET_HAS_PRECISE_SMC
1064     TranslationBlock *current_tb = NULL;
1065     CPUState *env = cpu_single_env;
1066     int current_tb_modified = 0;
1067     target_ulong current_pc = 0;
1068     target_ulong current_cs_base = 0;
1069     int current_flags = 0;
1070 #endif
1071 
1072     addr &= TARGET_PAGE_MASK;
1073     p = page_find(addr >> TARGET_PAGE_BITS);
1074     if (!p)
1075         return;
1076     tb = p->first_tb;
1077 #ifdef TARGET_HAS_PRECISE_SMC
1078     if (tb && pc != 0) {
1079         current_tb = tb_find_pc(pc);
1080     }
1081 #endif
1082     while (tb != NULL) {
1083         n = (long)tb & 3;
1084         tb = (TranslationBlock *)((long)tb & ~3);
1085 #ifdef TARGET_HAS_PRECISE_SMC
1086         if (current_tb == tb &&
1087             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1088                 /* If we are modifying the current TB, we must stop
1089                    its execution. We could be more precise by checking
1090                    that the modification is after the current PC, but it
1091                    would require a specialized function to partially
1092                    restore the CPU state */
1093 
1094             current_tb_modified = 1;
1095             cpu_restore_state(current_tb, env, pc, puc);
1096             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1097                                  &current_flags);
1098         }
1099 #endif /* TARGET_HAS_PRECISE_SMC */
1100         tb_phys_invalidate(tb, addr);
1101         tb = tb->page_next[n];
1102     }
1103     p->first_tb = NULL;
1104 #ifdef TARGET_HAS_PRECISE_SMC
1105     if (current_tb_modified) {
1106         /* we generate a block containing just the instruction
1107            modifying the memory. It will ensure that it cannot modify
1108            itself */
1109         env->current_tb = NULL;
1110         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1111         cpu_resume_from_signal(env, puc);
1112     }
1113 #endif
1114 }
1115 #endif
1116 
1117 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,target_ulong page_addr)1118 static inline void tb_alloc_page(TranslationBlock *tb,
1119                                  unsigned int n, target_ulong page_addr)
1120 {
1121     PageDesc *p;
1122     TranslationBlock *last_first_tb;
1123 
1124     tb->page_addr[n] = page_addr;
1125     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1126     tb->page_next[n] = p->first_tb;
1127     last_first_tb = p->first_tb;
1128     p->first_tb = (TranslationBlock *)((long)tb | n);
1129     invalidate_page_bitmap(p);
1130 
1131 #if defined(TARGET_HAS_SMC) || 1
1132 
1133 #if defined(CONFIG_USER_ONLY)
1134     if (p->flags & PAGE_WRITE) {
1135         target_ulong addr;
1136         PageDesc *p2;
1137         int prot;
1138 
1139         /* force the host page as non writable (writes will have a
1140            page fault + mprotect overhead) */
1141         page_addr &= qemu_host_page_mask;
1142         prot = 0;
1143         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1144             addr += TARGET_PAGE_SIZE) {
1145 
1146             p2 = page_find (addr >> TARGET_PAGE_BITS);
1147             if (!p2)
1148                 continue;
1149             prot |= p2->flags;
1150             p2->flags &= ~PAGE_WRITE;
1151             page_get_flags(addr);
1152           }
1153         mprotect(g2h(page_addr), qemu_host_page_size,
1154                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1155 #ifdef DEBUG_TB_INVALIDATE
1156         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1157                page_addr);
1158 #endif
1159     }
1160 #else
1161     /* if some code is already present, then the pages are already
1162        protected. So we handle the case where only the first TB is
1163        allocated in a physical page */
1164     if (!last_first_tb) {
1165         tlb_protect_code(page_addr);
1166     }
1167 #endif
1168 
1169 #endif /* TARGET_HAS_SMC */
1170 }
1171 
1172 /* Allocate a new translation block. Flush the translation buffer if
1173    too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)1174 TranslationBlock *tb_alloc(target_ulong pc)
1175 {
1176     TranslationBlock *tb;
1177 
1178     if (nb_tbs >= code_gen_max_blocks ||
1179         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1180         return NULL;
1181     tb = &tbs[nb_tbs++];
1182     tb->pc = pc;
1183     tb->cflags = 0;
1184     return tb;
1185 }
1186 
tb_free(TranslationBlock * tb)1187 void tb_free(TranslationBlock *tb)
1188 {
1189     /* In practice this is mostly used for single use temporary TB
1190        Ignore the hard cases and just back up if this TB happens to
1191        be the last one generated.  */
1192     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1193         code_gen_ptr = tb->tc_ptr;
1194         nb_tbs--;
1195     }
1196 }
1197 
1198 /* add a new TB and link it to the physical page tables. phys_page2 is
1199    (-1) to indicate that only one page contains the TB. */
tb_link_phys(TranslationBlock * tb,target_ulong phys_pc,target_ulong phys_page2)1200 void tb_link_phys(TranslationBlock *tb,
1201                   target_ulong phys_pc, target_ulong phys_page2)
1202 {
1203     unsigned int h;
1204     TranslationBlock **ptb;
1205 
1206     /* Grab the mmap lock to stop another thread invalidating this TB
1207        before we are done.  */
1208     mmap_lock();
1209     /* add in the physical hash table */
1210     h = tb_phys_hash_func(phys_pc);
1211     ptb = &tb_phys_hash[h];
1212     tb->phys_hash_next = *ptb;
1213     *ptb = tb;
1214 
1215     /* add in the page list */
1216     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1217     if (phys_page2 != -1)
1218         tb_alloc_page(tb, 1, phys_page2);
1219     else
1220         tb->page_addr[1] = -1;
1221 
1222     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1223     tb->jmp_next[0] = NULL;
1224     tb->jmp_next[1] = NULL;
1225 
1226     /* init original jump addresses */
1227     if (tb->tb_next_offset[0] != 0xffff)
1228         tb_reset_jump(tb, 0);
1229     if (tb->tb_next_offset[1] != 0xffff)
1230         tb_reset_jump(tb, 1);
1231 
1232 #ifdef DEBUG_TB_CHECK
1233     tb_page_check();
1234 #endif
1235     mmap_unlock();
1236 }
1237 
1238 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1239    tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(unsigned long tc_ptr)1240 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1241 {
1242     int m_min, m_max, m;
1243     unsigned long v;
1244     TranslationBlock *tb;
1245 
1246     if (nb_tbs <= 0)
1247         return NULL;
1248     if (tc_ptr < (unsigned long)code_gen_buffer ||
1249         tc_ptr >= (unsigned long)code_gen_ptr)
1250         return NULL;
1251     /* binary search (cf Knuth) */
1252     m_min = 0;
1253     m_max = nb_tbs - 1;
1254     while (m_min <= m_max) {
1255         m = (m_min + m_max) >> 1;
1256         tb = &tbs[m];
1257         v = (unsigned long)tb->tc_ptr;
1258         if (v == tc_ptr)
1259             return tb;
1260         else if (tc_ptr < v) {
1261             m_max = m - 1;
1262         } else {
1263             m_min = m + 1;
1264         }
1265     }
1266     return &tbs[m_max];
1267 }
1268 
1269 static void tb_reset_jump_recursive(TranslationBlock *tb);
1270 
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1271 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1272 {
1273     TranslationBlock *tb1, *tb_next, **ptb;
1274     unsigned int n1;
1275 
1276     tb1 = tb->jmp_next[n];
1277     if (tb1 != NULL) {
1278         /* find head of list */
1279         for(;;) {
1280             n1 = (long)tb1 & 3;
1281             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1282             if (n1 == 2)
1283                 break;
1284             tb1 = tb1->jmp_next[n1];
1285         }
1286         /* we are now sure now that tb jumps to tb1 */
1287         tb_next = tb1;
1288 
1289         /* remove tb from the jmp_first list */
1290         ptb = &tb_next->jmp_first;
1291         for(;;) {
1292             tb1 = *ptb;
1293             n1 = (long)tb1 & 3;
1294             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1295             if (n1 == n && tb1 == tb)
1296                 break;
1297             ptb = &tb1->jmp_next[n1];
1298         }
1299         *ptb = tb->jmp_next[n];
1300         tb->jmp_next[n] = NULL;
1301 
1302         /* suppress the jump to next tb in generated code */
1303         tb_reset_jump(tb, n);
1304 
1305         /* suppress jumps in the tb on which we could have jumped */
1306         tb_reset_jump_recursive(tb_next);
1307     }
1308 }
1309 
tb_reset_jump_recursive(TranslationBlock * tb)1310 static void tb_reset_jump_recursive(TranslationBlock *tb)
1311 {
1312     tb_reset_jump_recursive2(tb, 0);
1313     tb_reset_jump_recursive2(tb, 1);
1314 }
1315 
1316 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUState * env,target_ulong pc)1317 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1318 {
1319     target_phys_addr_t addr;
1320     target_ulong pd;
1321     ram_addr_t ram_addr;
1322     PhysPageDesc *p;
1323 
1324     addr = cpu_get_phys_page_debug(env, pc);
1325     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1326     if (!p) {
1327         pd = IO_MEM_UNASSIGNED;
1328     } else {
1329         pd = p->phys_offset;
1330     }
1331     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1332     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1333 }
1334 #endif
1335 
1336 /* Add a watchpoint.  */
cpu_watchpoint_insert(CPUState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)1337 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1338                           int flags, CPUWatchpoint **watchpoint)
1339 {
1340     target_ulong len_mask = ~(len - 1);
1341     CPUWatchpoint *wp;
1342 
1343     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1344     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1345         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1346                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1347         return -EINVAL;
1348     }
1349     wp = qemu_malloc(sizeof(*wp));
1350 
1351     wp->vaddr = addr;
1352     wp->len_mask = len_mask;
1353     wp->flags = flags;
1354 
1355     /* keep all GDB-injected watchpoints in front */
1356     if (flags & BP_GDB)
1357         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1358     else
1359         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1360 
1361     tlb_flush_page(env, addr);
1362 
1363     if (watchpoint)
1364         *watchpoint = wp;
1365     return 0;
1366 }
1367 
1368 /* Remove a specific watchpoint.  */
cpu_watchpoint_remove(CPUState * env,target_ulong addr,target_ulong len,int flags)1369 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1370                           int flags)
1371 {
1372     target_ulong len_mask = ~(len - 1);
1373     CPUWatchpoint *wp;
1374 
1375     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1376         if (addr == wp->vaddr && len_mask == wp->len_mask
1377                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1378             cpu_watchpoint_remove_by_ref(env, wp);
1379             return 0;
1380         }
1381     }
1382     return -ENOENT;
1383 }
1384 
1385 /* Remove a specific watchpoint by reference.  */
cpu_watchpoint_remove_by_ref(CPUState * env,CPUWatchpoint * watchpoint)1386 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1387 {
1388     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1389 
1390     tlb_flush_page(env, watchpoint->vaddr);
1391 
1392     qemu_free(watchpoint);
1393 }
1394 
1395 /* Remove all matching watchpoints.  */
cpu_watchpoint_remove_all(CPUState * env,int mask)1396 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1397 {
1398     CPUWatchpoint *wp, *next;
1399 
1400     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1401         if (wp->flags & mask)
1402             cpu_watchpoint_remove_by_ref(env, wp);
1403     }
1404 }
1405 
1406 /* Add a breakpoint.  */
cpu_breakpoint_insert(CPUState * env,target_ulong pc,int flags,CPUBreakpoint ** breakpoint)1407 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1408                           CPUBreakpoint **breakpoint)
1409 {
1410 #if defined(TARGET_HAS_ICE)
1411     CPUBreakpoint *bp;
1412 
1413     bp = qemu_malloc(sizeof(*bp));
1414 
1415     bp->pc = pc;
1416     bp->flags = flags;
1417 
1418     /* keep all GDB-injected breakpoints in front */
1419     if (flags & BP_GDB)
1420         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1421     else
1422         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1423 
1424     breakpoint_invalidate(env, pc);
1425 
1426     if (breakpoint)
1427         *breakpoint = bp;
1428     return 0;
1429 #else
1430     return -ENOSYS;
1431 #endif
1432 }
1433 
1434 /* Remove a specific breakpoint.  */
cpu_breakpoint_remove(CPUState * env,target_ulong pc,int flags)1435 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1436 {
1437 #if defined(TARGET_HAS_ICE)
1438     CPUBreakpoint *bp;
1439 
1440     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1441         if (bp->pc == pc && bp->flags == flags) {
1442             cpu_breakpoint_remove_by_ref(env, bp);
1443             return 0;
1444         }
1445     }
1446     return -ENOENT;
1447 #else
1448     return -ENOSYS;
1449 #endif
1450 }
1451 
1452 /* Remove a specific breakpoint by reference.  */
cpu_breakpoint_remove_by_ref(CPUState * env,CPUBreakpoint * breakpoint)1453 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1454 {
1455 #if defined(TARGET_HAS_ICE)
1456     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1457 
1458     breakpoint_invalidate(env, breakpoint->pc);
1459 
1460     qemu_free(breakpoint);
1461 #endif
1462 }
1463 
1464 /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUState * env,int mask)1465 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1466 {
1467 #if defined(TARGET_HAS_ICE)
1468     CPUBreakpoint *bp, *next;
1469 
1470     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1471         if (bp->flags & mask)
1472             cpu_breakpoint_remove_by_ref(env, bp);
1473     }
1474 #endif
1475 }
1476 
1477 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1478    CPU loop after each instruction */
cpu_single_step(CPUState * env,int enabled)1479 void cpu_single_step(CPUState *env, int enabled)
1480 {
1481 #if defined(TARGET_HAS_ICE)
1482     if (env->singlestep_enabled != enabled) {
1483         env->singlestep_enabled = enabled;
1484         if (kvm_enabled())
1485             kvm_update_guest_debug(env, 0);
1486         else {
1487             /* must flush all the translated code to avoid inconsistencies */
1488             /* XXX: only flush what is necessary */
1489             tb_flush(env);
1490         }
1491     }
1492 #endif
1493 }
1494 
1495 /* enable or disable low levels log */
cpu_set_log(int log_flags)1496 void cpu_set_log(int log_flags)
1497 {
1498     loglevel = log_flags;
1499     if (loglevel && !logfile) {
1500         logfile = fopen(logfilename, log_append ? "a" : "w");
1501         if (!logfile) {
1502             perror(logfilename);
1503             _exit(1);
1504         }
1505 #if !defined(CONFIG_SOFTMMU)
1506         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1507         {
1508             static char logfile_buf[4096];
1509             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1510         }
1511 #elif !defined(_WIN32)
1512         /* Win32 doesn't support line-buffering and requires size >= 2 */
1513         setvbuf(logfile, NULL, _IOLBF, 0);
1514 #endif
1515         log_append = 1;
1516     }
1517     if (!loglevel && logfile) {
1518         fclose(logfile);
1519         logfile = NULL;
1520     }
1521 }
1522 
cpu_set_log_filename(const char * filename)1523 void cpu_set_log_filename(const char *filename)
1524 {
1525     logfilename = strdup(filename);
1526     if (logfile) {
1527         fclose(logfile);
1528         logfile = NULL;
1529     }
1530     cpu_set_log(loglevel);
1531 }
1532 
cpu_unlink_tb(CPUState * env)1533 static void cpu_unlink_tb(CPUState *env)
1534 {
1535     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1536        problem and hope the cpu will stop of its own accord.  For userspace
1537        emulation this often isn't actually as bad as it sounds.  Often
1538        signals are used primarily to interrupt blocking syscalls.  */
1539     TranslationBlock *tb;
1540     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1541 
1542     tb = env->current_tb;
1543     /* if the cpu is currently executing code, we must unlink it and
1544        all the potentially executing TB */
1545     if (tb) {
1546         spin_lock(&interrupt_lock);
1547         env->current_tb = NULL;
1548         tb_reset_jump_recursive(tb);
1549         spin_unlock(&interrupt_lock);
1550     }
1551 }
1552 
1553 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * env,int mask)1554 void cpu_interrupt(CPUState *env, int mask)
1555 {
1556     int old_mask;
1557 
1558     old_mask = env->interrupt_request;
1559     env->interrupt_request |= mask;
1560 
1561 #ifndef CONFIG_USER_ONLY
1562     /*
1563      * If called from iothread context, wake the target cpu in
1564      * case its halted.
1565      */
1566     if (!qemu_cpu_self(env)) {
1567         qemu_cpu_kick(env);
1568         return;
1569     }
1570 #endif
1571 
1572     if (use_icount) {
1573         env->icount_decr.u16.high = 0xffff;
1574 #ifndef CONFIG_USER_ONLY
1575         if (!can_do_io(env)
1576             && (mask & ~old_mask) != 0) {
1577             cpu_abort(env, "Raised interrupt while not in I/O function");
1578         }
1579 #endif
1580     } else {
1581         cpu_unlink_tb(env);
1582     }
1583 }
1584 
cpu_reset_interrupt(CPUState * env,int mask)1585 void cpu_reset_interrupt(CPUState *env, int mask)
1586 {
1587     env->interrupt_request &= ~mask;
1588 }
1589 
cpu_exit(CPUState * env)1590 void cpu_exit(CPUState *env)
1591 {
1592     env->exit_request = 1;
1593     cpu_unlink_tb(env);
1594 }
1595 
1596 const CPULogItem cpu_log_items[] = {
1597     { CPU_LOG_TB_OUT_ASM, "out_asm",
1598       "show generated host assembly code for each compiled TB" },
1599     { CPU_LOG_TB_IN_ASM, "in_asm",
1600       "show target assembly code for each compiled TB" },
1601     { CPU_LOG_TB_OP, "op",
1602       "show micro ops for each compiled TB" },
1603     { CPU_LOG_TB_OP_OPT, "op_opt",
1604       "show micro ops "
1605 #ifdef TARGET_I386
1606       "before eflags optimization and "
1607 #endif
1608       "after liveness analysis" },
1609     { CPU_LOG_INT, "int",
1610       "show interrupts/exceptions in short format" },
1611     { CPU_LOG_EXEC, "exec",
1612       "show trace before each executed TB (lots of logs)" },
1613     { CPU_LOG_TB_CPU, "cpu",
1614       "show CPU state before block translation" },
1615 #ifdef TARGET_I386
1616     { CPU_LOG_PCALL, "pcall",
1617       "show protected mode far calls/returns/exceptions" },
1618     { CPU_LOG_RESET, "cpu_reset",
1619       "show CPU state before CPU resets" },
1620 #endif
1621 #ifdef DEBUG_IOPORT
1622     { CPU_LOG_IOPORT, "ioport",
1623       "show all i/o ports accesses" },
1624 #endif
1625     { 0, NULL, NULL },
1626 };
1627 
cmp1(const char * s1,int n,const char * s2)1628 static int cmp1(const char *s1, int n, const char *s2)
1629 {
1630     if (strlen(s2) != n)
1631         return 0;
1632     return memcmp(s1, s2, n) == 0;
1633 }
1634 
1635 /* takes a comma separated list of log masks. Return 0 if error. */
cpu_str_to_log_mask(const char * str)1636 int cpu_str_to_log_mask(const char *str)
1637 {
1638     const CPULogItem *item;
1639     int mask;
1640     const char *p, *p1;
1641 
1642     p = str;
1643     mask = 0;
1644     for(;;) {
1645         p1 = strchr(p, ',');
1646         if (!p1)
1647             p1 = p + strlen(p);
1648 	if(cmp1(p,p1-p,"all")) {
1649 		for(item = cpu_log_items; item->mask != 0; item++) {
1650 			mask |= item->mask;
1651 		}
1652 	} else {
1653         for(item = cpu_log_items; item->mask != 0; item++) {
1654             if (cmp1(p, p1 - p, item->name))
1655                 goto found;
1656         }
1657         return 0;
1658 	}
1659     found:
1660         mask |= item->mask;
1661         if (*p1 != ',')
1662             break;
1663         p = p1 + 1;
1664     }
1665     return mask;
1666 }
1667 
cpu_abort(CPUState * env,const char * fmt,...)1668 void cpu_abort(CPUState *env, const char *fmt, ...)
1669 {
1670     va_list ap;
1671     va_list ap2;
1672 
1673     va_start(ap, fmt);
1674     va_copy(ap2, ap);
1675     fprintf(stderr, "qemu: fatal: ");
1676     vfprintf(stderr, fmt, ap);
1677     fprintf(stderr, "\n");
1678 #ifdef TARGET_I386
1679     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1680 #else
1681     cpu_dump_state(env, stderr, fprintf, 0);
1682 #endif
1683     if (qemu_log_enabled()) {
1684         qemu_log("qemu: fatal: ");
1685         qemu_log_vprintf(fmt, ap2);
1686         qemu_log("\n");
1687 #ifdef TARGET_I386
1688         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1689 #else
1690         log_cpu_state(env, 0);
1691 #endif
1692         qemu_log_flush();
1693         qemu_log_close();
1694     }
1695     va_end(ap2);
1696     va_end(ap);
1697     abort();
1698 }
1699 
cpu_copy(CPUState * env)1700 CPUState *cpu_copy(CPUState *env)
1701 {
1702     CPUState *new_env = cpu_init(env->cpu_model_str);
1703     CPUState *next_cpu = new_env->next_cpu;
1704     int cpu_index = new_env->cpu_index;
1705 #if defined(TARGET_HAS_ICE)
1706     CPUBreakpoint *bp;
1707     CPUWatchpoint *wp;
1708 #endif
1709 
1710     memcpy(new_env, env, sizeof(CPUState));
1711 
1712     /* Preserve chaining and index. */
1713     new_env->next_cpu = next_cpu;
1714     new_env->cpu_index = cpu_index;
1715 
1716     /* Clone all break/watchpoints.
1717        Note: Once we support ptrace with hw-debug register access, make sure
1718        BP_CPU break/watchpoints are handled correctly on clone. */
1719     QTAILQ_INIT(&env->breakpoints);
1720     QTAILQ_INIT(&env->watchpoints);
1721 #if defined(TARGET_HAS_ICE)
1722     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1723         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1724     }
1725     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1726         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1727                               wp->flags, NULL);
1728     }
1729 #endif
1730 
1731     return new_env;
1732 }
1733 
1734 #if !defined(CONFIG_USER_ONLY)
1735 
tlb_flush_jmp_cache(CPUState * env,target_ulong addr)1736 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1737 {
1738     unsigned int i;
1739 
1740     /* Discard jump cache entries for any tb which might potentially
1741        overlap the flushed page.  */
1742     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1743     memset (&env->tb_jmp_cache[i], 0,
1744 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1745 
1746     i = tb_jmp_cache_hash_page(addr);
1747     memset (&env->tb_jmp_cache[i], 0,
1748 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1749 }
1750 
1751 static CPUTLBEntry s_cputlb_empty_entry = {
1752     .addr_read  = -1,
1753     .addr_write = -1,
1754     .addr_code  = -1,
1755     .addend     = -1,
1756 };
1757 
1758 /* NOTE: if flush_global is true, also flush global entries (not
1759    implemented yet) */
tlb_flush(CPUState * env,int flush_global)1760 void tlb_flush(CPUState *env, int flush_global)
1761 {
1762     int i;
1763 
1764 #if defined(DEBUG_TLB)
1765     printf("tlb_flush:\n");
1766 #endif
1767     /* must reset current TB so that interrupts cannot modify the
1768        links while we are modifying them */
1769     env->current_tb = NULL;
1770 
1771     for(i = 0; i < CPU_TLB_SIZE; i++) {
1772         int mmu_idx;
1773         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1774             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1775         }
1776     }
1777 
1778     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1779 
1780     tlb_flush_count++;
1781 }
1782 
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)1783 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1784 {
1785     if (addr == (tlb_entry->addr_read &
1786                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1787         addr == (tlb_entry->addr_write &
1788                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1789         addr == (tlb_entry->addr_code &
1790                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1791         *tlb_entry = s_cputlb_empty_entry;
1792     }
1793 }
1794 
tlb_flush_page(CPUState * env,target_ulong addr)1795 void tlb_flush_page(CPUState *env, target_ulong addr)
1796 {
1797     int i;
1798     int mmu_idx;
1799 
1800 #if defined(DEBUG_TLB)
1801     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1802 #endif
1803     /* must reset current TB so that interrupts cannot modify the
1804        links while we are modifying them */
1805     env->current_tb = NULL;
1806 
1807     addr &= TARGET_PAGE_MASK;
1808     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1809     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1810         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1811 
1812     tlb_flush_jmp_cache(env, addr);
1813 }
1814 
1815 /* update the TLBs so that writes to code in the virtual page 'addr'
1816    can be detected */
tlb_protect_code(ram_addr_t ram_addr)1817 static void tlb_protect_code(ram_addr_t ram_addr)
1818 {
1819     cpu_physical_memory_reset_dirty(ram_addr,
1820                                     ram_addr + TARGET_PAGE_SIZE,
1821                                     CODE_DIRTY_FLAG);
1822 }
1823 
1824 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1825    tested for self modifying code */
tlb_unprotect_code_phys(CPUState * env,ram_addr_t ram_addr,target_ulong vaddr)1826 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1827                                     target_ulong vaddr)
1828 {
1829     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1830 }
1831 
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,unsigned long start,unsigned long length)1832 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1833                                          unsigned long start, unsigned long length)
1834 {
1835     unsigned long addr;
1836     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1837         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1838         if ((addr - start) < length) {
1839             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1840         }
1841     }
1842 }
1843 
1844 /* Note: start and end must be within the same ram block.  */
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)1845 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1846                                      int dirty_flags)
1847 {
1848     CPUState *env;
1849     unsigned long length, start1;
1850     int i, mask, len;
1851     uint8_t *p;
1852 
1853     start &= TARGET_PAGE_MASK;
1854     end = TARGET_PAGE_ALIGN(end);
1855 
1856     length = end - start;
1857     if (length == 0)
1858         return;
1859     len = length >> TARGET_PAGE_BITS;
1860     mask = ~dirty_flags;
1861     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1862     for(i = 0; i < len; i++)
1863         p[i] &= mask;
1864 
1865     /* we modify the TLB cache so that the dirty bit will be set again
1866        when accessing the range */
1867     start1 = (unsigned long)qemu_get_ram_ptr(start);
1868     /* Chek that we don't span multiple blocks - this breaks the
1869        address comparisons below.  */
1870     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1871             != (end - 1) - start) {
1872         abort();
1873     }
1874 
1875     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1876         int mmu_idx;
1877         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1878             for(i = 0; i < CPU_TLB_SIZE; i++)
1879                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1880                                       start1, length);
1881         }
1882     }
1883 }
1884 
cpu_physical_memory_set_dirty_tracking(int enable)1885 int cpu_physical_memory_set_dirty_tracking(int enable)
1886 {
1887     in_migration = enable;
1888     if (kvm_enabled()) {
1889         return kvm_set_migration_log(enable);
1890     }
1891     return 0;
1892 }
1893 
cpu_physical_memory_get_dirty_tracking(void)1894 int cpu_physical_memory_get_dirty_tracking(void)
1895 {
1896     return in_migration;
1897 }
1898 
cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,target_phys_addr_t end_addr)1899 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1900                                    target_phys_addr_t end_addr)
1901 {
1902     int ret = 0;
1903 
1904     if (kvm_enabled())
1905         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1906     return ret;
1907 }
1908 
tlb_update_dirty(CPUTLBEntry * tlb_entry)1909 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1910 {
1911     ram_addr_t ram_addr;
1912     void *p;
1913 
1914     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1915         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1916             + tlb_entry->addend);
1917         ram_addr = qemu_ram_addr_from_host(p);
1918         if (!cpu_physical_memory_is_dirty(ram_addr)) {
1919             tlb_entry->addr_write |= TLB_NOTDIRTY;
1920         }
1921     }
1922 }
1923 
1924 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUState * env)1925 void cpu_tlb_update_dirty(CPUState *env)
1926 {
1927     int i;
1928     int mmu_idx;
1929     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1930         for(i = 0; i < CPU_TLB_SIZE; i++)
1931             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1932     }
1933 }
1934 
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)1935 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1936 {
1937     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1938         tlb_entry->addr_write = vaddr;
1939 }
1940 
1941 /* update the TLB corresponding to virtual page vaddr
1942    so that it is no longer dirty */
tlb_set_dirty(CPUState * env,target_ulong vaddr)1943 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1944 {
1945     int i;
1946     int mmu_idx;
1947 
1948     vaddr &= TARGET_PAGE_MASK;
1949     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1950     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1951         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1952 }
1953 
1954 /* add a new TLB entry. At most one entry for a given virtual address
1955    is permitted. Return 0 if OK or 2 if the page could not be mapped
1956    (can only happen in non SOFTMMU mode for I/O pages or pages
1957    conflicting with the host address space). */
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1958 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1959                       target_phys_addr_t paddr, int prot,
1960                       int mmu_idx, int is_softmmu)
1961 {
1962     PhysPageDesc *p;
1963     unsigned long pd;
1964     unsigned int index;
1965     target_ulong address;
1966     target_ulong code_address;
1967     target_phys_addr_t addend;
1968     int ret;
1969     CPUTLBEntry *te;
1970     CPUWatchpoint *wp;
1971     target_phys_addr_t iotlb;
1972 
1973     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1974     if (!p) {
1975         pd = IO_MEM_UNASSIGNED;
1976     } else {
1977         pd = p->phys_offset;
1978     }
1979 #if defined(DEBUG_TLB)
1980     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1981            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1982 #endif
1983 
1984     ret = 0;
1985     address = vaddr;
1986     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1987         /* IO memory case (romd handled later) */
1988         address |= TLB_MMIO;
1989     }
1990     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1991     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1992         /* Normal RAM.  */
1993         iotlb = pd & TARGET_PAGE_MASK;
1994         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1995             iotlb |= IO_MEM_NOTDIRTY;
1996         else
1997             iotlb |= IO_MEM_ROM;
1998     } else {
1999         /* IO handlers are currently passed a physical address.
2000            It would be nice to pass an offset from the base address
2001            of that region.  This would avoid having to special case RAM,
2002            and avoid full address decoding in every device.
2003            We can't use the high bits of pd for this because
2004            IO_MEM_ROMD uses these as a ram address.  */
2005         iotlb = (pd & ~TARGET_PAGE_MASK);
2006         if (p) {
2007             iotlb += p->region_offset;
2008         } else {
2009             iotlb += paddr;
2010         }
2011     }
2012 
2013     code_address = address;
2014     /* Make accesses to pages with watchpoints go via the
2015        watchpoint trap routines.  */
2016     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2017         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2018             iotlb = io_mem_watch + paddr;
2019             /* TODO: The memory case can be optimized by not trapping
2020                reads of pages with a write breakpoint.  */
2021             address |= TLB_MMIO;
2022         }
2023     }
2024 
2025     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2026     env->iotlb[mmu_idx][index] = iotlb - vaddr;
2027     te = &env->tlb_table[mmu_idx][index];
2028     te->addend = addend - vaddr;
2029     if (prot & PAGE_READ) {
2030         te->addr_read = address;
2031     } else {
2032         te->addr_read = -1;
2033     }
2034 
2035     if (prot & PAGE_EXEC) {
2036         te->addr_code = code_address;
2037     } else {
2038         te->addr_code = -1;
2039     }
2040     if (prot & PAGE_WRITE) {
2041         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2042             (pd & IO_MEM_ROMD)) {
2043             /* Write access calls the I/O callback.  */
2044             te->addr_write = address | TLB_MMIO;
2045         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2046                    !cpu_physical_memory_is_dirty(pd)) {
2047             te->addr_write = address | TLB_NOTDIRTY;
2048         } else {
2049             te->addr_write = address;
2050         }
2051     } else {
2052         te->addr_write = -1;
2053     }
2054     return ret;
2055 }
2056 
2057 #else
2058 
tlb_flush(CPUState * env,int flush_global)2059 void tlb_flush(CPUState *env, int flush_global)
2060 {
2061 }
2062 
tlb_flush_page(CPUState * env,target_ulong addr)2063 void tlb_flush_page(CPUState *env, target_ulong addr)
2064 {
2065 }
2066 
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)2067 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2068                       target_phys_addr_t paddr, int prot,
2069                       int mmu_idx, int is_softmmu)
2070 {
2071     return 0;
2072 }
2073 
2074 /*
2075  * Walks guest process memory "regions" one by one
2076  * and calls callback function 'fn' for each region.
2077  */
walk_memory_regions(void * priv,int (* fn)(void *,unsigned long,unsigned long,unsigned long))2078 int walk_memory_regions(void *priv,
2079     int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2080 {
2081     unsigned long start, end;
2082     PageDesc *p = NULL;
2083     int i, j, prot, prot1;
2084     int rc = 0;
2085 
2086     start = end = -1;
2087     prot = 0;
2088 
2089     for (i = 0; i <= L1_SIZE; i++) {
2090         p = (i < L1_SIZE) ? l1_map[i] : NULL;
2091         for (j = 0; j < L2_SIZE; j++) {
2092             prot1 = (p == NULL) ? 0 : p[j].flags;
2093             /*
2094              * "region" is one continuous chunk of memory
2095              * that has same protection flags set.
2096              */
2097             if (prot1 != prot) {
2098                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2099                 if (start != -1) {
2100                     rc = (*fn)(priv, start, end, prot);
2101                     /* callback can stop iteration by returning != 0 */
2102                     if (rc != 0)
2103                         return (rc);
2104                 }
2105                 if (prot1 != 0)
2106                     start = end;
2107                 else
2108                     start = -1;
2109                 prot = prot1;
2110             }
2111             if (p == NULL)
2112                 break;
2113         }
2114     }
2115     return (rc);
2116 }
2117 
dump_region(void * priv,unsigned long start,unsigned long end,unsigned long prot)2118 static int dump_region(void *priv, unsigned long start,
2119     unsigned long end, unsigned long prot)
2120 {
2121     FILE *f = (FILE *)priv;
2122 
2123     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2124         start, end, end - start,
2125         ((prot & PAGE_READ) ? 'r' : '-'),
2126         ((prot & PAGE_WRITE) ? 'w' : '-'),
2127         ((prot & PAGE_EXEC) ? 'x' : '-'));
2128 
2129     return (0);
2130 }
2131 
2132 /* dump memory mappings */
page_dump(FILE * f)2133 void page_dump(FILE *f)
2134 {
2135     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2136             "start", "end", "size", "prot");
2137     walk_memory_regions(f, dump_region);
2138 }
2139 
page_get_flags(target_ulong address)2140 int page_get_flags(target_ulong address)
2141 {
2142     PageDesc *p;
2143 
2144     p = page_find(address >> TARGET_PAGE_BITS);
2145     if (!p)
2146         return 0;
2147     return p->flags;
2148 }
2149 
2150 /* modify the flags of a page and invalidate the code if
2151    necessary. The flag PAGE_WRITE_ORG is positioned automatically
2152    depending on PAGE_WRITE */
page_set_flags(target_ulong start,target_ulong end,int flags)2153 void page_set_flags(target_ulong start, target_ulong end, int flags)
2154 {
2155     PageDesc *p;
2156     target_ulong addr;
2157 
2158     /* mmap_lock should already be held.  */
2159     start = start & TARGET_PAGE_MASK;
2160     end = TARGET_PAGE_ALIGN(end);
2161     if (flags & PAGE_WRITE)
2162         flags |= PAGE_WRITE_ORG;
2163     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2164         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2165         /* We may be called for host regions that are outside guest
2166            address space.  */
2167         if (!p)
2168             return;
2169         /* if the write protection is set, then we invalidate the code
2170            inside */
2171         if (!(p->flags & PAGE_WRITE) &&
2172             (flags & PAGE_WRITE) &&
2173             p->first_tb) {
2174             tb_invalidate_phys_page(addr, 0, NULL);
2175         }
2176         p->flags = flags;
2177     }
2178 }
2179 
page_check_range(target_ulong start,target_ulong len,int flags)2180 int page_check_range(target_ulong start, target_ulong len, int flags)
2181 {
2182     PageDesc *p;
2183     target_ulong end;
2184     target_ulong addr;
2185 
2186     if (start + len < start)
2187         /* we've wrapped around */
2188         return -1;
2189 
2190     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2191     start = start & TARGET_PAGE_MASK;
2192 
2193     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2194         p = page_find(addr >> TARGET_PAGE_BITS);
2195         if( !p )
2196             return -1;
2197         if( !(p->flags & PAGE_VALID) )
2198             return -1;
2199 
2200         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2201             return -1;
2202         if (flags & PAGE_WRITE) {
2203             if (!(p->flags & PAGE_WRITE_ORG))
2204                 return -1;
2205             /* unprotect the page if it was put read-only because it
2206                contains translated code */
2207             if (!(p->flags & PAGE_WRITE)) {
2208                 if (!page_unprotect(addr, 0, NULL))
2209                     return -1;
2210             }
2211             return 0;
2212         }
2213     }
2214     return 0;
2215 }
2216 
2217 /* called from signal handler: invalidate the code and unprotect the
2218    page. Return TRUE if the fault was successfully handled. */
page_unprotect(target_ulong address,unsigned long pc,void * puc)2219 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2220 {
2221     unsigned int page_index, prot, pindex;
2222     PageDesc *p, *p1;
2223     target_ulong host_start, host_end, addr;
2224 
2225     /* Technically this isn't safe inside a signal handler.  However we
2226        know this only ever happens in a synchronous SEGV handler, so in
2227        practice it seems to be ok.  */
2228     mmap_lock();
2229 
2230     host_start = address & qemu_host_page_mask;
2231     page_index = host_start >> TARGET_PAGE_BITS;
2232     p1 = page_find(page_index);
2233     if (!p1) {
2234         mmap_unlock();
2235         return 0;
2236     }
2237     host_end = host_start + qemu_host_page_size;
2238     p = p1;
2239     prot = 0;
2240     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2241         prot |= p->flags;
2242         p++;
2243     }
2244     /* if the page was really writable, then we change its
2245        protection back to writable */
2246     if (prot & PAGE_WRITE_ORG) {
2247         pindex = (address - host_start) >> TARGET_PAGE_BITS;
2248         if (!(p1[pindex].flags & PAGE_WRITE)) {
2249             mprotect((void *)g2h(host_start), qemu_host_page_size,
2250                      (prot & PAGE_BITS) | PAGE_WRITE);
2251             p1[pindex].flags |= PAGE_WRITE;
2252             /* and since the content will be modified, we must invalidate
2253                the corresponding translated code. */
2254             tb_invalidate_phys_page(address, pc, puc);
2255 #ifdef DEBUG_TB_CHECK
2256             tb_invalidate_check(address);
2257 #endif
2258             mmap_unlock();
2259             return 1;
2260         }
2261     }
2262     mmap_unlock();
2263     return 0;
2264 }
2265 
tlb_set_dirty(CPUState * env,unsigned long addr,target_ulong vaddr)2266 static inline void tlb_set_dirty(CPUState *env,
2267                                  unsigned long addr, target_ulong vaddr)
2268 {
2269 }
2270 #endif /* defined(CONFIG_USER_ONLY) */
2271 
2272 #if !defined(CONFIG_USER_ONLY)
2273 
2274 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2275                              ram_addr_t memory, ram_addr_t region_offset);
2276 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2277                            ram_addr_t orig_memory, ram_addr_t region_offset);
2278 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2279                       need_subpage)                                     \
2280     do {                                                                \
2281         if (addr > start_addr)                                          \
2282             start_addr2 = 0;                                            \
2283         else {                                                          \
2284             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2285             if (start_addr2 > 0)                                        \
2286                 need_subpage = 1;                                       \
2287         }                                                               \
2288                                                                         \
2289         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2290             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2291         else {                                                          \
2292             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2293             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2294                 need_subpage = 1;                                       \
2295         }                                                               \
2296     } while (0)
2297 
2298 /* register physical memory.
2299    For RAM, 'size' must be a multiple of the target page size.
2300    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2301    io memory page.  The address used when calling the IO function is
2302    the offset from the start of the region, plus region_offset.  Both
2303    start_addr and region_offset are rounded down to a page boundary
2304    before calculating this offset.  This should not be a problem unless
2305    the low bits of start_addr and region_offset differ.  */
cpu_register_physical_memory_offset(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset,ram_addr_t region_offset)2306 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2307                                          ram_addr_t size,
2308                                          ram_addr_t phys_offset,
2309                                          ram_addr_t region_offset)
2310 {
2311     target_phys_addr_t addr, end_addr;
2312     PhysPageDesc *p;
2313     CPUState *env;
2314     ram_addr_t orig_size = size;
2315     void *subpage;
2316 
2317     if (kvm_enabled())
2318         kvm_set_phys_mem(start_addr, size, phys_offset);
2319 
2320     if (phys_offset == IO_MEM_UNASSIGNED) {
2321         region_offset = start_addr;
2322     }
2323     region_offset &= TARGET_PAGE_MASK;
2324     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2325     end_addr = start_addr + (target_phys_addr_t)size;
2326     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2327         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2328         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2329             ram_addr_t orig_memory = p->phys_offset;
2330             target_phys_addr_t start_addr2, end_addr2;
2331             int need_subpage = 0;
2332 
2333             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2334                           need_subpage);
2335             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2336                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2337                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2338                                            &p->phys_offset, orig_memory,
2339                                            p->region_offset);
2340                 } else {
2341                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2342                                             >> IO_MEM_SHIFT];
2343                 }
2344                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2345                                  region_offset);
2346                 p->region_offset = 0;
2347             } else {
2348                 p->phys_offset = phys_offset;
2349                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2350                     (phys_offset & IO_MEM_ROMD))
2351                     phys_offset += TARGET_PAGE_SIZE;
2352             }
2353         } else {
2354             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2355             p->phys_offset = phys_offset;
2356             p->region_offset = region_offset;
2357             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2358                 (phys_offset & IO_MEM_ROMD)) {
2359                 phys_offset += TARGET_PAGE_SIZE;
2360             } else {
2361                 target_phys_addr_t start_addr2, end_addr2;
2362                 int need_subpage = 0;
2363 
2364                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2365                               end_addr2, need_subpage);
2366 
2367                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2368                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2369                                            &p->phys_offset, IO_MEM_UNASSIGNED,
2370                                            addr & TARGET_PAGE_MASK);
2371                     subpage_register(subpage, start_addr2, end_addr2,
2372                                      phys_offset, region_offset);
2373                     p->region_offset = 0;
2374                 }
2375             }
2376         }
2377         region_offset += TARGET_PAGE_SIZE;
2378     }
2379 
2380     /* since each CPU stores ram addresses in its TLB cache, we must
2381        reset the modified entries */
2382     /* XXX: slow ! */
2383     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2384         tlb_flush(env, 1);
2385     }
2386 }
2387 
2388 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(target_phys_addr_t addr)2389 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2390 {
2391     PhysPageDesc *p;
2392 
2393     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2394     if (!p)
2395         return IO_MEM_UNASSIGNED;
2396     return p->phys_offset;
2397 }
2398 
qemu_register_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2399 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2400 {
2401     if (kvm_enabled())
2402         kvm_coalesce_mmio_region(addr, size);
2403 }
2404 
qemu_unregister_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2405 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2406 {
2407     if (kvm_enabled())
2408         kvm_uncoalesce_mmio_region(addr, size);
2409 }
2410 
2411 #ifdef X49GP
2412 ram_addr_t x49gp_ram_alloc(ram_addr_t size, uint8_t *base);
x49gp_ram_alloc(ram_addr_t size,uint8_t * base)2413 ram_addr_t x49gp_ram_alloc(ram_addr_t size, uint8_t *base)
2414 {
2415     RAMBlock *new_block;
2416 
2417     size = TARGET_PAGE_ALIGN(size);
2418     new_block = qemu_malloc(sizeof(*new_block));
2419     new_block->host = base;
2420     new_block->offset = last_ram_offset;
2421     new_block->length = size;
2422 
2423     new_block->next = ram_blocks;
2424     ram_blocks = new_block;
2425 
2426     last_ram_offset += size;
2427 
2428     return new_block->offset;
2429 }
2430 #else
qemu_ram_alloc(ram_addr_t size)2431 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2432 {
2433     RAMBlock *new_block;
2434 
2435     size = TARGET_PAGE_ALIGN(size);
2436     new_block = qemu_malloc(sizeof(*new_block));
2437 
2438 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2439     /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2440     new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2441                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2442 #else
2443     new_block->host = qemu_vmalloc(size);
2444 #endif
2445 #ifdef MADV_MERGEABLE
2446     madvise(new_block->host, size, MADV_MERGEABLE);
2447 #endif
2448     new_block->offset = last_ram_offset;
2449     new_block->length = size;
2450 
2451     new_block->next = ram_blocks;
2452     ram_blocks = new_block;
2453 
2454     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2455         (last_ram_offset + size) >> TARGET_PAGE_BITS);
2456     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2457            0xff, size >> TARGET_PAGE_BITS);
2458 
2459     last_ram_offset += size;
2460 
2461     if (kvm_enabled())
2462         kvm_setup_guest_memory(new_block->host, size);
2463 
2464     return new_block->offset;
2465 }
2466 #endif
2467 
qemu_ram_free(ram_addr_t addr)2468 void qemu_ram_free(ram_addr_t addr)
2469 {
2470     /* TODO: implement this.  */
2471 }
2472 
2473 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2474    With the exception of the softmmu code in this file, this should
2475    only be used for local memory (e.g. video ram) that the device owns,
2476    and knows it isn't going to access beyond the end of the block.
2477 
2478    It should not be used for general purpose DMA.
2479    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2480  */
qemu_get_ram_ptr(ram_addr_t addr)2481 void *qemu_get_ram_ptr(ram_addr_t addr)
2482 {
2483     RAMBlock *prev;
2484     RAMBlock **prevp;
2485     RAMBlock *block;
2486 
2487     prev = NULL;
2488     prevp = &ram_blocks;
2489     block = ram_blocks;
2490     while (block && (block->offset > addr
2491                      || block->offset + block->length <= addr)) {
2492         if (prev)
2493           prevp = &prev->next;
2494         prev = block;
2495         block = block->next;
2496     }
2497     if (!block) {
2498         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2499         abort();
2500     }
2501     /* Move this entry to to start of the list.  */
2502     if (prev) {
2503         prev->next = block->next;
2504         block->next = *prevp;
2505         *prevp = block;
2506     }
2507     return block->host + (addr - block->offset);
2508 }
2509 
2510 /* Some of the softmmu routines need to translate from a host pointer
2511    (typically a TLB entry) back to a ram offset.  */
qemu_ram_addr_from_host(void * ptr)2512 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2513 {
2514     RAMBlock *prev;
2515     RAMBlock **prevp;
2516     RAMBlock *block;
2517     uint8_t *host = ptr;
2518 
2519     prev = NULL;
2520     prevp = &ram_blocks;
2521     block = ram_blocks;
2522     while (block && (block->host > host
2523                      || block->host + block->length <= host)) {
2524         if (prev)
2525           prevp = &prev->next;
2526         prev = block;
2527         block = block->next;
2528     }
2529     if (!block) {
2530         fprintf(stderr, "Bad ram pointer %p\n", ptr);
2531         abort();
2532     }
2533     return block->offset + (host - block->host);
2534 }
2535 
unassigned_mem_readb(void * opaque,target_phys_addr_t addr)2536 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2537 {
2538 #ifdef DEBUG_UNASSIGNED
2539     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2540 #endif
2541 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2542     do_unassigned_access(addr, 0, 0, 0, 1);
2543 #endif
2544     return 0;
2545 }
2546 
unassigned_mem_readw(void * opaque,target_phys_addr_t addr)2547 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2548 {
2549 #ifdef DEBUG_UNASSIGNED
2550     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2551 #endif
2552 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2553     do_unassigned_access(addr, 0, 0, 0, 2);
2554 #endif
2555     return 0;
2556 }
2557 
unassigned_mem_readl(void * opaque,target_phys_addr_t addr)2558 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2559 {
2560 #ifdef DEBUG_UNASSIGNED
2561     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2562 #endif
2563 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2564     do_unassigned_access(addr, 0, 0, 0, 4);
2565 #endif
2566     return 0;
2567 }
2568 
unassigned_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2569 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2570 {
2571 #ifdef DEBUG_UNASSIGNED
2572     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2573 #endif
2574 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2575     do_unassigned_access(addr, 1, 0, 0, 1);
2576 #endif
2577 }
2578 
unassigned_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2579 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2580 {
2581 #ifdef DEBUG_UNASSIGNED
2582     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2583 #endif
2584 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2585     do_unassigned_access(addr, 1, 0, 0, 2);
2586 #endif
2587 }
2588 
unassigned_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2589 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2590 {
2591 #ifdef DEBUG_UNASSIGNED
2592     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2593 #endif
2594 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2595     do_unassigned_access(addr, 1, 0, 0, 4);
2596 #endif
2597 }
2598 
2599 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2600     unassigned_mem_readb,
2601     unassigned_mem_readw,
2602     unassigned_mem_readl,
2603 };
2604 
2605 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2606     unassigned_mem_writeb,
2607     unassigned_mem_writew,
2608     unassigned_mem_writel,
2609 };
2610 
notdirty_mem_writeb(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2611 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2612                                 uint32_t val)
2613 {
2614     int dirty_flags;
2615     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2616     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2617 #if !defined(CONFIG_USER_ONLY)
2618         tb_invalidate_phys_page_fast(ram_addr, 1);
2619         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2620 #endif
2621     }
2622     stb_p(qemu_get_ram_ptr(ram_addr), val);
2623     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2624     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2625     /* we remove the notdirty callback only if the code has been
2626        flushed */
2627     if (dirty_flags == 0xff)
2628         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2629 }
2630 
notdirty_mem_writew(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2631 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2632                                 uint32_t val)
2633 {
2634     int dirty_flags;
2635     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2636     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2637 #if !defined(CONFIG_USER_ONLY)
2638         tb_invalidate_phys_page_fast(ram_addr, 2);
2639         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2640 #endif
2641     }
2642     stw_p(qemu_get_ram_ptr(ram_addr), val);
2643     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2644     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2645     /* we remove the notdirty callback only if the code has been
2646        flushed */
2647     if (dirty_flags == 0xff)
2648         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2649 }
2650 
notdirty_mem_writel(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2651 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2652                                 uint32_t val)
2653 {
2654     int dirty_flags;
2655     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2656     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2657 #if !defined(CONFIG_USER_ONLY)
2658         tb_invalidate_phys_page_fast(ram_addr, 4);
2659         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2660 #endif
2661     }
2662     stl_p(qemu_get_ram_ptr(ram_addr), val);
2663     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2664     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2665     /* we remove the notdirty callback only if the code has been
2666        flushed */
2667     if (dirty_flags == 0xff)
2668         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2669 }
2670 
2671 static CPUReadMemoryFunc * const error_mem_read[3] = {
2672     NULL, /* never used */
2673     NULL, /* never used */
2674     NULL, /* never used */
2675 };
2676 
2677 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2678     notdirty_mem_writeb,
2679     notdirty_mem_writew,
2680     notdirty_mem_writel,
2681 };
2682 
2683 /* Generate a debug exception if a watchpoint has been hit.  */
check_watchpoint(int offset,int len_mask,int flags)2684 static void check_watchpoint(int offset, int len_mask, int flags)
2685 {
2686     CPUState *env = cpu_single_env;
2687     target_ulong pc, cs_base;
2688     TranslationBlock *tb;
2689     target_ulong vaddr;
2690     CPUWatchpoint *wp;
2691     int cpu_flags;
2692 
2693     if (env->watchpoint_hit) {
2694         /* We re-entered the check after replacing the TB. Now raise
2695          * the debug interrupt so that is will trigger after the
2696          * current instruction. */
2697         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2698         return;
2699     }
2700     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2701     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2702         if ((vaddr == (wp->vaddr & len_mask) ||
2703              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2704             wp->flags |= BP_WATCHPOINT_HIT;
2705             if (!env->watchpoint_hit) {
2706                 env->watchpoint_hit = wp;
2707                 tb = tb_find_pc(env->mem_io_pc);
2708                 if (!tb) {
2709                     cpu_abort(env, "check_watchpoint: could not find TB for "
2710                               "pc=%p", (void *)env->mem_io_pc);
2711                 }
2712                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2713                 tb_phys_invalidate(tb, -1);
2714                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2715                     env->exception_index = EXCP_DEBUG;
2716                 } else {
2717                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2718                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2719                 }
2720                 cpu_resume_from_signal(env, NULL);
2721             }
2722         } else {
2723             wp->flags &= ~BP_WATCHPOINT_HIT;
2724         }
2725     }
2726 }
2727 
2728 /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2729    so these check for a hit then pass through to the normal out-of-line
2730    phys routines.  */
watch_mem_readb(void * opaque,target_phys_addr_t addr)2731 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2732 {
2733     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2734     return ldub_phys(addr);
2735 }
2736 
watch_mem_readw(void * opaque,target_phys_addr_t addr)2737 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2738 {
2739     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2740     return lduw_phys(addr);
2741 }
2742 
watch_mem_readl(void * opaque,target_phys_addr_t addr)2743 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2744 {
2745     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2746     return ldl_phys(addr);
2747 }
2748 
watch_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2749 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2750                              uint32_t val)
2751 {
2752     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2753     stb_phys(addr, val);
2754 }
2755 
watch_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2756 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2757                              uint32_t val)
2758 {
2759     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2760     stw_phys(addr, val);
2761 }
2762 
watch_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2763 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2764                              uint32_t val)
2765 {
2766     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2767     stl_phys(addr, val);
2768 }
2769 
2770 static CPUReadMemoryFunc * const watch_mem_read[3] = {
2771     watch_mem_readb,
2772     watch_mem_readw,
2773     watch_mem_readl,
2774 };
2775 
2776 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2777     watch_mem_writeb,
2778     watch_mem_writew,
2779     watch_mem_writel,
2780 };
2781 
subpage_readlen(subpage_t * mmio,target_phys_addr_t addr,unsigned int len)2782 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2783                                  unsigned int len)
2784 {
2785     uint32_t ret;
2786     unsigned int idx;
2787 
2788     idx = SUBPAGE_IDX(addr);
2789 #if defined(DEBUG_SUBPAGE)
2790     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2791            mmio, len, addr, idx);
2792 #endif
2793     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2794                                        addr + mmio->region_offset[idx][0][len]);
2795 
2796     return ret;
2797 }
2798 
subpage_writelen(subpage_t * mmio,target_phys_addr_t addr,uint32_t value,unsigned int len)2799 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2800                               uint32_t value, unsigned int len)
2801 {
2802     unsigned int idx;
2803 
2804     idx = SUBPAGE_IDX(addr);
2805 #if defined(DEBUG_SUBPAGE)
2806     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2807            mmio, len, addr, idx, value);
2808 #endif
2809     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2810                                   addr + mmio->region_offset[idx][1][len],
2811                                   value);
2812 }
2813 
subpage_readb(void * opaque,target_phys_addr_t addr)2814 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2815 {
2816 #if defined(DEBUG_SUBPAGE)
2817     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2818 #endif
2819 
2820     return subpage_readlen(opaque, addr, 0);
2821 }
2822 
subpage_writeb(void * opaque,target_phys_addr_t addr,uint32_t value)2823 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2824                             uint32_t value)
2825 {
2826 #if defined(DEBUG_SUBPAGE)
2827     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2828 #endif
2829     subpage_writelen(opaque, addr, value, 0);
2830 }
2831 
subpage_readw(void * opaque,target_phys_addr_t addr)2832 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2833 {
2834 #if defined(DEBUG_SUBPAGE)
2835     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2836 #endif
2837 
2838     return subpage_readlen(opaque, addr, 1);
2839 }
2840 
subpage_writew(void * opaque,target_phys_addr_t addr,uint32_t value)2841 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2842                             uint32_t value)
2843 {
2844 #if defined(DEBUG_SUBPAGE)
2845     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2846 #endif
2847     subpage_writelen(opaque, addr, value, 1);
2848 }
2849 
subpage_readl(void * opaque,target_phys_addr_t addr)2850 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2851 {
2852 #if defined(DEBUG_SUBPAGE)
2853     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2854 #endif
2855 
2856     return subpage_readlen(opaque, addr, 2);
2857 }
2858 
subpage_writel(void * opaque,target_phys_addr_t addr,uint32_t value)2859 static void subpage_writel (void *opaque,
2860                          target_phys_addr_t addr, uint32_t value)
2861 {
2862 #if defined(DEBUG_SUBPAGE)
2863     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2864 #endif
2865     subpage_writelen(opaque, addr, value, 2);
2866 }
2867 
2868 static CPUReadMemoryFunc * const subpage_read[] = {
2869     &subpage_readb,
2870     &subpage_readw,
2871     &subpage_readl,
2872 };
2873 
2874 static CPUWriteMemoryFunc * const subpage_write[] = {
2875     &subpage_writeb,
2876     &subpage_writew,
2877     &subpage_writel,
2878 };
2879 
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory,ram_addr_t region_offset)2880 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2881                              ram_addr_t memory, ram_addr_t region_offset)
2882 {
2883     int idx, eidx;
2884     unsigned int i;
2885 
2886     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2887         return -1;
2888     idx = SUBPAGE_IDX(start);
2889     eidx = SUBPAGE_IDX(end);
2890 #if defined(DEBUG_SUBPAGE)
2891     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2892            mmio, start, end, idx, eidx, memory);
2893 #endif
2894     memory >>= IO_MEM_SHIFT;
2895     for (; idx <= eidx; idx++) {
2896         for (i = 0; i < 4; i++) {
2897             if (io_mem_read[memory][i]) {
2898                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2899                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2900                 mmio->region_offset[idx][0][i] = region_offset;
2901             }
2902             if (io_mem_write[memory][i]) {
2903                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2904                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2905                 mmio->region_offset[idx][1][i] = region_offset;
2906             }
2907         }
2908     }
2909 
2910     return 0;
2911 }
2912 
subpage_init(target_phys_addr_t base,ram_addr_t * phys,ram_addr_t orig_memory,ram_addr_t region_offset)2913 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2914                            ram_addr_t orig_memory, ram_addr_t region_offset)
2915 {
2916     subpage_t *mmio;
2917     int subpage_memory;
2918 
2919     mmio = qemu_mallocz(sizeof(subpage_t));
2920 
2921     mmio->base = base;
2922     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2923 #if defined(DEBUG_SUBPAGE)
2924     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2925            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2926 #endif
2927     *phys = subpage_memory | IO_MEM_SUBPAGE;
2928     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2929                          region_offset);
2930 
2931     return mmio;
2932 }
2933 
get_free_io_mem_idx(void)2934 static int get_free_io_mem_idx(void)
2935 {
2936     int i;
2937 
2938     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2939         if (!io_mem_used[i]) {
2940             io_mem_used[i] = 1;
2941             return i;
2942         }
2943     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2944     return -1;
2945 }
2946 
2947 /* mem_read and mem_write are arrays of functions containing the
2948    function to access byte (index 0), word (index 1) and dword (index
2949    2). Functions can be omitted with a NULL function pointer.
2950    If io_index is non zero, the corresponding io zone is
2951    modified. If it is zero, a new io zone is allocated. The return
2952    value can be used with cpu_register_physical_memory(). (-1) is
2953    returned if error. */
cpu_register_io_memory_fixed(int io_index,CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)2954 static int cpu_register_io_memory_fixed(int io_index,
2955                                         CPUReadMemoryFunc * const *mem_read,
2956                                         CPUWriteMemoryFunc * const *mem_write,
2957                                         void *opaque)
2958 {
2959     int i, subwidth = 0;
2960 
2961     if (io_index <= 0) {
2962         io_index = get_free_io_mem_idx();
2963         if (io_index == -1)
2964             return io_index;
2965     } else {
2966         io_index >>= IO_MEM_SHIFT;
2967         if (io_index >= IO_MEM_NB_ENTRIES)
2968             return -1;
2969     }
2970 
2971     for(i = 0;i < 3; i++) {
2972         if (!mem_read[i] || !mem_write[i])
2973             subwidth = IO_MEM_SUBWIDTH;
2974         io_mem_read[io_index][i] = mem_read[i];
2975         io_mem_write[io_index][i] = mem_write[i];
2976     }
2977     io_mem_opaque[io_index] = opaque;
2978     return (io_index << IO_MEM_SHIFT) | subwidth;
2979 }
2980 
cpu_register_io_memory(CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)2981 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2982                            CPUWriteMemoryFunc * const *mem_write,
2983                            void *opaque)
2984 {
2985     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2986 }
2987 
cpu_unregister_io_memory(int io_table_address)2988 void cpu_unregister_io_memory(int io_table_address)
2989 {
2990     int i;
2991     int io_index = io_table_address >> IO_MEM_SHIFT;
2992 
2993     for (i=0;i < 3; i++) {
2994         io_mem_read[io_index][i] = unassigned_mem_read[i];
2995         io_mem_write[io_index][i] = unassigned_mem_write[i];
2996     }
2997     io_mem_opaque[io_index] = NULL;
2998     io_mem_used[io_index] = 0;
2999 }
3000 
io_mem_init(void)3001 static void io_mem_init(void)
3002 {
3003     int i;
3004 
3005     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3006     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3007     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3008     for (i=0; i<5; i++)
3009         io_mem_used[i] = 1;
3010 
3011     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3012                                           watch_mem_write, NULL);
3013 }
3014 
3015 #endif /* !defined(CONFIG_USER_ONLY) */
3016 
3017 /* physical memory access (slow version, mainly for debug) */
3018 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3019 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3020                             int len, int is_write)
3021 {
3022     int l, flags;
3023     target_ulong page;
3024     void * p;
3025 
3026     while (len > 0) {
3027         page = addr & TARGET_PAGE_MASK;
3028         l = (page + TARGET_PAGE_SIZE) - addr;
3029         if (l > len)
3030             l = len;
3031         flags = page_get_flags(page);
3032         if (!(flags & PAGE_VALID))
3033             return;
3034         if (is_write) {
3035             if (!(flags & PAGE_WRITE))
3036                 return;
3037             /* XXX: this code should not depend on lock_user */
3038             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3039                 /* FIXME - should this return an error rather than just fail? */
3040                 return;
3041             memcpy(p, buf, l);
3042             unlock_user(p, addr, l);
3043         } else {
3044             if (!(flags & PAGE_READ))
3045                 return;
3046             /* XXX: this code should not depend on lock_user */
3047             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3048                 /* FIXME - should this return an error rather than just fail? */
3049                 return;
3050             memcpy(buf, p, l);
3051             unlock_user(p, addr, 0);
3052         }
3053         len -= l;
3054         buf += l;
3055         addr += l;
3056     }
3057 }
3058 
3059 #else
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3060 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3061                             int len, int is_write)
3062 {
3063     int l, io_index;
3064     uint8_t *ptr;
3065     uint32_t val;
3066     target_phys_addr_t page;
3067     unsigned long pd;
3068     PhysPageDesc *p;
3069 
3070     while (len > 0) {
3071         page = addr & TARGET_PAGE_MASK;
3072         l = (page + TARGET_PAGE_SIZE) - addr;
3073         if (l > len)
3074             l = len;
3075         p = phys_page_find(page >> TARGET_PAGE_BITS);
3076         if (!p) {
3077             pd = IO_MEM_UNASSIGNED;
3078         } else {
3079             pd = p->phys_offset;
3080         }
3081 
3082         if (is_write) {
3083             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3084                 target_phys_addr_t addr1 = addr;
3085                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3086                 if (p)
3087                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3088                 /* XXX: could force cpu_single_env to NULL to avoid
3089                    potential bugs */
3090                 if (l >= 4 && ((addr1 & 3) == 0)) {
3091                     /* 32 bit write access */
3092                     val = ldl_p(buf);
3093                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3094                     l = 4;
3095                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3096                     /* 16 bit write access */
3097                     val = lduw_p(buf);
3098                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3099                     l = 2;
3100                 } else {
3101                     /* 8 bit write access */
3102                     val = ldub_p(buf);
3103                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3104                     l = 1;
3105                 }
3106             } else {
3107                 unsigned long addr1;
3108                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3109                 /* RAM case */
3110                 ptr = qemu_get_ram_ptr(addr1);
3111                 memcpy(ptr, buf, l);
3112                 if (!cpu_physical_memory_is_dirty(addr1)) {
3113                     /* invalidate code */
3114                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3115                     /* set dirty bit */
3116                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3117                         (0xff & ~CODE_DIRTY_FLAG);
3118                 }
3119             }
3120         } else {
3121             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3122                 !(pd & IO_MEM_ROMD)) {
3123                 target_phys_addr_t addr1 = addr;
3124                 /* I/O case */
3125                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3126                 if (p)
3127                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3128                 if (l >= 4 && ((addr1 & 3) == 0)) {
3129                     /* 32 bit read access */
3130                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3131                     stl_p(buf, val);
3132                     l = 4;
3133                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3134                     /* 16 bit read access */
3135                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3136                     stw_p(buf, val);
3137                     l = 2;
3138                 } else {
3139                     /* 8 bit read access */
3140                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3141                     stb_p(buf, val);
3142                     l = 1;
3143                 }
3144             } else {
3145                 /* RAM case */
3146                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3147                     (addr & ~TARGET_PAGE_MASK);
3148                 memcpy(buf, ptr, l);
3149             }
3150         }
3151         len -= l;
3152         buf += l;
3153         addr += l;
3154     }
3155 }
3156 
3157 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(target_phys_addr_t addr,const uint8_t * buf,int len)3158 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3159                                    const uint8_t *buf, int len)
3160 {
3161     int l;
3162     uint8_t *ptr;
3163     target_phys_addr_t page;
3164     unsigned long pd;
3165     PhysPageDesc *p;
3166 
3167     while (len > 0) {
3168         page = addr & TARGET_PAGE_MASK;
3169         l = (page + TARGET_PAGE_SIZE) - addr;
3170         if (l > len)
3171             l = len;
3172         p = phys_page_find(page >> TARGET_PAGE_BITS);
3173         if (!p) {
3174             pd = IO_MEM_UNASSIGNED;
3175         } else {
3176             pd = p->phys_offset;
3177         }
3178 
3179         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3180             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3181             !(pd & IO_MEM_ROMD)) {
3182             /* do nothing */
3183         } else {
3184             unsigned long addr1;
3185             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3186             /* ROM/RAM case */
3187             ptr = qemu_get_ram_ptr(addr1);
3188             memcpy(ptr, buf, l);
3189         }
3190         len -= l;
3191         buf += l;
3192         addr += l;
3193     }
3194 }
3195 
3196 typedef struct {
3197     void *buffer;
3198     target_phys_addr_t addr;
3199     target_phys_addr_t len;
3200 } BounceBuffer;
3201 
3202 static BounceBuffer bounce;
3203 
3204 typedef struct MapClient {
3205     void *opaque;
3206     void (*callback)(void *opaque);
3207     QLIST_ENTRY(MapClient) link;
3208 } MapClient;
3209 
3210 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3211     = QLIST_HEAD_INITIALIZER(map_client_list);
3212 
cpu_register_map_client(void * opaque,void (* callback)(void * opaque))3213 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3214 {
3215     MapClient *client = qemu_malloc(sizeof(*client));
3216 
3217     client->opaque = opaque;
3218     client->callback = callback;
3219     QLIST_INSERT_HEAD(&map_client_list, client, link);
3220     return client;
3221 }
3222 
cpu_unregister_map_client(void * _client)3223 void cpu_unregister_map_client(void *_client)
3224 {
3225     MapClient *client = (MapClient *)_client;
3226 
3227     QLIST_REMOVE(client, link);
3228     qemu_free(client);
3229 }
3230 
cpu_notify_map_clients(void)3231 static void cpu_notify_map_clients(void)
3232 {
3233     MapClient *client;
3234 
3235     while (!QLIST_EMPTY(&map_client_list)) {
3236         client = QLIST_FIRST(&map_client_list);
3237         client->callback(client->opaque);
3238         cpu_unregister_map_client(client);
3239     }
3240 }
3241 
3242 /* Map a physical memory region into a host virtual address.
3243  * May map a subset of the requested range, given by and returned in *plen.
3244  * May return NULL if resources needed to perform the mapping are exhausted.
3245  * Use only for reads OR writes - not for read-modify-write operations.
3246  * Use cpu_register_map_client() to know when retrying the map operation is
3247  * likely to succeed.
3248  */
cpu_physical_memory_map(target_phys_addr_t addr,target_phys_addr_t * plen,int is_write)3249 void *cpu_physical_memory_map(target_phys_addr_t addr,
3250                               target_phys_addr_t *plen,
3251                               int is_write)
3252 {
3253     target_phys_addr_t len = *plen;
3254     target_phys_addr_t done = 0;
3255     int l;
3256     uint8_t *ret = NULL;
3257     uint8_t *ptr;
3258     target_phys_addr_t page;
3259     unsigned long pd;
3260     PhysPageDesc *p;
3261     unsigned long addr1;
3262 
3263     while (len > 0) {
3264         page = addr & TARGET_PAGE_MASK;
3265         l = (page + TARGET_PAGE_SIZE) - addr;
3266         if (l > len)
3267             l = len;
3268         p = phys_page_find(page >> TARGET_PAGE_BITS);
3269         if (!p) {
3270             pd = IO_MEM_UNASSIGNED;
3271         } else {
3272             pd = p->phys_offset;
3273         }
3274 
3275         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3276             if (done || bounce.buffer) {
3277                 break;
3278             }
3279             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3280             bounce.addr = addr;
3281             bounce.len = l;
3282             if (!is_write) {
3283                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3284             }
3285             ptr = bounce.buffer;
3286         } else {
3287             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3288             ptr = qemu_get_ram_ptr(addr1);
3289         }
3290         if (!done) {
3291             ret = ptr;
3292         } else if (ret + done != ptr) {
3293             break;
3294         }
3295 
3296         len -= l;
3297         addr += l;
3298         done += l;
3299     }
3300     *plen = done;
3301     return ret;
3302 }
3303 
3304 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3305  * Will also mark the memory as dirty if is_write == 1.  access_len gives
3306  * the amount of memory that was actually read or written by the caller.
3307  */
cpu_physical_memory_unmap(void * buffer,target_phys_addr_t len,int is_write,target_phys_addr_t access_len)3308 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3309                                int is_write, target_phys_addr_t access_len)
3310 {
3311     if (buffer != bounce.buffer) {
3312         if (is_write) {
3313             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3314             while (access_len) {
3315                 unsigned l;
3316                 l = TARGET_PAGE_SIZE;
3317                 if (l > access_len)
3318                     l = access_len;
3319                 if (!cpu_physical_memory_is_dirty(addr1)) {
3320                     /* invalidate code */
3321                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3322                     /* set dirty bit */
3323                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3324                         (0xff & ~CODE_DIRTY_FLAG);
3325                 }
3326                 addr1 += l;
3327                 access_len -= l;
3328             }
3329         }
3330         return;
3331     }
3332     if (is_write) {
3333         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3334     }
3335     qemu_free(bounce.buffer);
3336     bounce.buffer = NULL;
3337     cpu_notify_map_clients();
3338 }
3339 
3340 /* warning: addr must be aligned */
ldl_phys(target_phys_addr_t addr)3341 uint32_t ldl_phys(target_phys_addr_t addr)
3342 {
3343     int io_index;
3344     uint8_t *ptr;
3345     uint32_t val;
3346     unsigned long pd;
3347     PhysPageDesc *p;
3348 
3349     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3350     if (!p) {
3351         pd = IO_MEM_UNASSIGNED;
3352     } else {
3353         pd = p->phys_offset;
3354     }
3355 
3356     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3357         !(pd & IO_MEM_ROMD)) {
3358         /* I/O case */
3359         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3360         if (p)
3361             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3362         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3363     } else {
3364         /* RAM case */
3365         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3366             (addr & ~TARGET_PAGE_MASK);
3367         val = ldl_p(ptr);
3368     }
3369     return val;
3370 }
3371 
3372 /* warning: addr must be aligned */
ldq_phys(target_phys_addr_t addr)3373 uint64_t ldq_phys(target_phys_addr_t addr)
3374 {
3375     int io_index;
3376     uint8_t *ptr;
3377     uint64_t val;
3378     unsigned long pd;
3379     PhysPageDesc *p;
3380 
3381     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3382     if (!p) {
3383         pd = IO_MEM_UNASSIGNED;
3384     } else {
3385         pd = p->phys_offset;
3386     }
3387 
3388     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3389         !(pd & IO_MEM_ROMD)) {
3390         /* I/O case */
3391         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3392         if (p)
3393             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3394 #ifdef TARGET_WORDS_BIGENDIAN
3395         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3396         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3397 #else
3398         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3399         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3400 #endif
3401     } else {
3402         /* RAM case */
3403         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3404             (addr & ~TARGET_PAGE_MASK);
3405         val = ldq_p(ptr);
3406     }
3407     return val;
3408 }
3409 
3410 /* XXX: optimize */
ldub_phys(target_phys_addr_t addr)3411 uint32_t ldub_phys(target_phys_addr_t addr)
3412 {
3413     uint8_t val;
3414     cpu_physical_memory_read(addr, &val, 1);
3415     return val;
3416 }
3417 
3418 /* XXX: optimize */
lduw_phys(target_phys_addr_t addr)3419 uint32_t lduw_phys(target_phys_addr_t addr)
3420 {
3421     uint16_t val;
3422     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3423     return tswap16(val);
3424 }
3425 
3426 /* warning: addr must be aligned. The ram page is not masked as dirty
3427    and the code inside is not invalidated. It is useful if the dirty
3428    bits are used to track modified PTEs */
stl_phys_notdirty(target_phys_addr_t addr,uint32_t val)3429 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3430 {
3431     int io_index;
3432     uint8_t *ptr;
3433     unsigned long pd;
3434     PhysPageDesc *p;
3435 
3436     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3437     if (!p) {
3438         pd = IO_MEM_UNASSIGNED;
3439     } else {
3440         pd = p->phys_offset;
3441     }
3442 
3443     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3444         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3445         if (p)
3446             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3447         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3448     } else {
3449         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3450         ptr = qemu_get_ram_ptr(addr1);
3451         stl_p(ptr, val);
3452 
3453         if (unlikely(in_migration)) {
3454             if (!cpu_physical_memory_is_dirty(addr1)) {
3455                 /* invalidate code */
3456                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3457                 /* set dirty bit */
3458                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3459                     (0xff & ~CODE_DIRTY_FLAG);
3460             }
3461         }
3462     }
3463 }
3464 
stq_phys_notdirty(target_phys_addr_t addr,uint64_t val)3465 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3466 {
3467     int io_index;
3468     uint8_t *ptr;
3469     unsigned long pd;
3470     PhysPageDesc *p;
3471 
3472     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3473     if (!p) {
3474         pd = IO_MEM_UNASSIGNED;
3475     } else {
3476         pd = p->phys_offset;
3477     }
3478 
3479     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3480         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3481         if (p)
3482             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3483 #ifdef TARGET_WORDS_BIGENDIAN
3484         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3485         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3486 #else
3487         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3488         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3489 #endif
3490     } else {
3491         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3492             (addr & ~TARGET_PAGE_MASK);
3493         stq_p(ptr, val);
3494     }
3495 }
3496 
3497 /* warning: addr must be aligned */
stl_phys(target_phys_addr_t addr,uint32_t val)3498 void stl_phys(target_phys_addr_t addr, uint32_t val)
3499 {
3500     int io_index;
3501     uint8_t *ptr;
3502     unsigned long pd;
3503     PhysPageDesc *p;
3504 
3505     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3506     if (!p) {
3507         pd = IO_MEM_UNASSIGNED;
3508     } else {
3509         pd = p->phys_offset;
3510     }
3511 
3512     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3513         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3514         if (p)
3515             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3516         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3517     } else {
3518         unsigned long addr1;
3519         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3520         /* RAM case */
3521         ptr = qemu_get_ram_ptr(addr1);
3522         stl_p(ptr, val);
3523         if (!cpu_physical_memory_is_dirty(addr1)) {
3524             /* invalidate code */
3525             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3526             /* set dirty bit */
3527             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3528                 (0xff & ~CODE_DIRTY_FLAG);
3529         }
3530     }
3531 }
3532 
3533 /* XXX: optimize */
stb_phys(target_phys_addr_t addr,uint32_t val)3534 void stb_phys(target_phys_addr_t addr, uint32_t val)
3535 {
3536     uint8_t v = val;
3537     cpu_physical_memory_write(addr, &v, 1);
3538 }
3539 
3540 /* XXX: optimize */
stw_phys(target_phys_addr_t addr,uint32_t val)3541 void stw_phys(target_phys_addr_t addr, uint32_t val)
3542 {
3543     uint16_t v = tswap16(val);
3544     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3545 }
3546 
3547 /* XXX: optimize */
stq_phys(target_phys_addr_t addr,uint64_t val)3548 void stq_phys(target_phys_addr_t addr, uint64_t val)
3549 {
3550     val = tswap64(val);
3551     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3552 }
3553 
3554 #endif
3555 
3556 /* virtual memory access for debug (includes writing to ROM) */
cpu_memory_rw_debug(CPUState * env,target_ulong addr,uint8_t * buf,int len,int is_write)3557 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3558                         uint8_t *buf, int len, int is_write)
3559 {
3560     int l;
3561     target_phys_addr_t phys_addr;
3562     target_ulong page;
3563 
3564     while (len > 0) {
3565         page = addr & TARGET_PAGE_MASK;
3566         phys_addr = cpu_get_phys_page_debug(env, page);
3567         /* if no physical page mapped, return an error */
3568         if (phys_addr == -1)
3569             return -1;
3570         l = (page + TARGET_PAGE_SIZE) - addr;
3571         if (l > len)
3572             l = len;
3573         phys_addr += (addr & ~TARGET_PAGE_MASK);
3574 #if !defined(CONFIG_USER_ONLY)
3575         if (is_write)
3576             cpu_physical_memory_write_rom(phys_addr, buf, l);
3577         else
3578 #endif
3579             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3580         len -= l;
3581         buf += l;
3582         addr += l;
3583     }
3584     return 0;
3585 }
3586 
3587 /* in deterministic execution mode, instructions doing device I/Os
3588    must be at the end of the TB */
cpu_io_recompile(CPUState * env,void * retaddr)3589 void cpu_io_recompile(CPUState *env, void *retaddr)
3590 {
3591     TranslationBlock *tb;
3592     uint32_t n, cflags;
3593     target_ulong pc, cs_base;
3594     uint64_t flags;
3595 
3596     tb = tb_find_pc((unsigned long)retaddr);
3597     if (!tb) {
3598         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3599                   retaddr);
3600     }
3601     n = env->icount_decr.u16.low + tb->icount;
3602     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3603     /* Calculate how many instructions had been executed before the fault
3604        occurred.  */
3605     n = n - env->icount_decr.u16.low;
3606     /* Generate a new TB ending on the I/O insn.  */
3607     n++;
3608     /* On MIPS and SH, delay slot instructions can only be restarted if
3609        they were already the first instruction in the TB.  If this is not
3610        the first instruction in a TB then re-execute the preceding
3611        branch.  */
3612 #if defined(TARGET_MIPS)
3613     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3614         env->active_tc.PC -= 4;
3615         env->icount_decr.u16.low++;
3616         env->hflags &= ~MIPS_HFLAG_BMASK;
3617     }
3618 #elif defined(TARGET_SH4)
3619     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3620             && n > 1) {
3621         env->pc -= 2;
3622         env->icount_decr.u16.low++;
3623         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3624     }
3625 #endif
3626     /* This should never happen.  */
3627     if (n > CF_COUNT_MASK)
3628         cpu_abort(env, "TB too big during recompile");
3629 
3630     cflags = n | CF_LAST_IO;
3631     pc = tb->pc;
3632     cs_base = tb->cs_base;
3633     flags = tb->flags;
3634     tb_phys_invalidate(tb, -1);
3635     /* FIXME: In theory this could raise an exception.  In practice
3636        we have already translated the block once so it's probably ok.  */
3637     tb_gen_code(env, pc, cs_base, flags, cflags);
3638     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3639        the first in the TB) then we end up generating a whole new TB and
3640        repeating the fault, which is horribly inefficient.
3641        Better would be to execute just this insn uncached, or generate a
3642        second new TB.  */
3643     cpu_resume_from_signal(env, NULL);
3644 }
3645 
dump_exec_info(FILE * f,int (* cpu_fprintf)(FILE * f,const char * fmt,...))3646 void dump_exec_info(FILE *f,
3647                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3648 {
3649     int i, target_code_size, max_target_code_size;
3650     int direct_jmp_count, direct_jmp2_count, cross_page;
3651     TranslationBlock *tb;
3652 
3653     target_code_size = 0;
3654     max_target_code_size = 0;
3655     cross_page = 0;
3656     direct_jmp_count = 0;
3657     direct_jmp2_count = 0;
3658     for(i = 0; i < nb_tbs; i++) {
3659         tb = &tbs[i];
3660         target_code_size += tb->size;
3661         if (tb->size > max_target_code_size)
3662             max_target_code_size = tb->size;
3663         if (tb->page_addr[1] != -1)
3664             cross_page++;
3665         if (tb->tb_next_offset[0] != 0xffff) {
3666             direct_jmp_count++;
3667             if (tb->tb_next_offset[1] != 0xffff) {
3668                 direct_jmp2_count++;
3669             }
3670         }
3671     }
3672     /* XXX: avoid using doubles ? */
3673     cpu_fprintf(f, "Translation buffer state:\n");
3674     cpu_fprintf(f, "gen code size       %ld/%ld\n",
3675                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3676     cpu_fprintf(f, "TB count            %d/%d\n",
3677                 nb_tbs, code_gen_max_blocks);
3678     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3679                 nb_tbs ? target_code_size / nb_tbs : 0,
3680                 max_target_code_size);
3681     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3682                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3683                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3684     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3685             cross_page,
3686             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3687     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3688                 direct_jmp_count,
3689                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3690                 direct_jmp2_count,
3691                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3692     cpu_fprintf(f, "\nStatistics:\n");
3693     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3694     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3695     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3696     tcg_dump_info(f, cpu_fprintf);
3697 }
3698 
3699 #if !defined(CONFIG_USER_ONLY)
3700 
3701 #define MMUSUFFIX _cmmu
3702 #define GETPC() NULL
3703 #define env cpu_single_env
3704 #define SOFTMMU_CODE_ACCESS
3705 
3706 #define SHIFT 0
3707 #include "softmmu_template.h"
3708 
3709 #define SHIFT 1
3710 #include "softmmu_template.h"
3711 
3712 #define SHIFT 2
3713 #include "softmmu_template.h"
3714 
3715 #define SHIFT 3
3716 #include "softmmu_template.h"
3717 
3718 #undef env
3719 
3720 #endif
3721