xref: /qemu/accel/tcg/tcg-accel-ops.c (revision b2a3cbb8)
1 /*
2  * QEMU TCG vCPU common functionality
3  *
4  * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
5  *
6  * Copyright (c) 2003-2008 Fabrice Bellard
7  * Copyright (c) 2014 Red Hat Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a copy
10  * of this software and associated documentation files (the "Software"), to deal
11  * in the Software without restriction, including without limitation the rights
12  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13  * copies of the Software, and to permit persons to whom the Software is
14  * furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25  * THE SOFTWARE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "sysemu/tcg.h"
30 #include "sysemu/replay.h"
31 #include "sysemu/cpu-timers.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/guest-random.h"
34 #include "exec/exec-all.h"
35 #include "exec/hwaddr.h"
36 #include "exec/gdbstub.h"
37 
38 #include "tcg-accel-ops.h"
39 #include "tcg-accel-ops-mttcg.h"
40 #include "tcg-accel-ops-rr.h"
41 #include "tcg-accel-ops-icount.h"
42 
43 /* common functionality among all TCG variants */
44 
45 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
46 {
47     uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
48     cflags |= parallel ? CF_PARALLEL : 0;
49     cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
50     cpu->tcg_cflags = cflags;
51 }
52 
53 void tcg_cpus_destroy(CPUState *cpu)
54 {
55     cpu_thread_signal_destroyed(cpu);
56 }
57 
58 int tcg_cpus_exec(CPUState *cpu)
59 {
60     int ret;
61 #ifdef CONFIG_PROFILER
62     int64_t ti;
63 #endif
64     assert(tcg_enabled());
65 #ifdef CONFIG_PROFILER
66     ti = profile_getclock();
67 #endif
68     cpu_exec_start(cpu);
69     ret = cpu_exec(cpu);
70     cpu_exec_end(cpu);
71 #ifdef CONFIG_PROFILER
72     qatomic_set(&tcg_ctx->prof.cpu_exec_time,
73                 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
74 #endif
75     return ret;
76 }
77 
78 /* mask must never be zero, except for A20 change call */
79 void tcg_handle_interrupt(CPUState *cpu, int mask)
80 {
81     g_assert(qemu_mutex_iothread_locked());
82 
83     cpu->interrupt_request |= mask;
84 
85     /*
86      * If called from iothread context, wake the target cpu in
87      * case its halted.
88      */
89     if (!qemu_cpu_is_self(cpu)) {
90         qemu_cpu_kick(cpu);
91     } else {
92         qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
93     }
94 }
95 
96 static bool tcg_supports_guest_debug(void)
97 {
98     return true;
99 }
100 
101 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
102 static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
103 {
104     static const int xlat[] = {
105         [GDB_WATCHPOINT_WRITE]  = BP_GDB | BP_MEM_WRITE,
106         [GDB_WATCHPOINT_READ]   = BP_GDB | BP_MEM_READ,
107         [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
108     };
109 
110     CPUClass *cc = CPU_GET_CLASS(cpu);
111     int cputype = xlat[gdbtype];
112 
113     if (cc->gdb_stop_before_watchpoint) {
114         cputype |= BP_STOP_BEFORE_ACCESS;
115     }
116     return cputype;
117 }
118 
119 static int tcg_insert_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
120 {
121     CPUState *cpu;
122     int err = 0;
123 
124     switch (type) {
125     case GDB_BREAKPOINT_SW:
126     case GDB_BREAKPOINT_HW:
127         CPU_FOREACH(cpu) {
128             err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
129             if (err) {
130                 break;
131             }
132         }
133         return err;
134     case GDB_WATCHPOINT_WRITE:
135     case GDB_WATCHPOINT_READ:
136     case GDB_WATCHPOINT_ACCESS:
137         CPU_FOREACH(cpu) {
138             err = cpu_watchpoint_insert(cpu, addr, len,
139                                         xlat_gdb_type(cpu, type), NULL);
140             if (err) {
141                 break;
142             }
143         }
144         return err;
145     default:
146         return -ENOSYS;
147     }
148 }
149 
150 static int tcg_remove_breakpoint(CPUState *cs, int type, hwaddr addr, hwaddr len)
151 {
152     CPUState *cpu;
153     int err = 0;
154 
155     switch (type) {
156     case GDB_BREAKPOINT_SW:
157     case GDB_BREAKPOINT_HW:
158         CPU_FOREACH(cpu) {
159             err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
160             if (err) {
161                 break;
162             }
163         }
164         return err;
165     case GDB_WATCHPOINT_WRITE:
166     case GDB_WATCHPOINT_READ:
167     case GDB_WATCHPOINT_ACCESS:
168         CPU_FOREACH(cpu) {
169             err = cpu_watchpoint_remove(cpu, addr, len,
170                                         xlat_gdb_type(cpu, type));
171             if (err) {
172                 break;
173             }
174         }
175         return err;
176     default:
177         return -ENOSYS;
178     }
179 }
180 
181 static inline void tcg_remove_all_breakpoints(CPUState *cpu)
182 {
183     cpu_breakpoint_remove_all(cpu, BP_GDB);
184     cpu_watchpoint_remove_all(cpu, BP_GDB);
185 }
186 
187 static void tcg_accel_ops_init(AccelOpsClass *ops)
188 {
189     if (qemu_tcg_mttcg_enabled()) {
190         ops->create_vcpu_thread = mttcg_start_vcpu_thread;
191         ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
192         ops->handle_interrupt = tcg_handle_interrupt;
193     } else {
194         ops->create_vcpu_thread = rr_start_vcpu_thread;
195         ops->kick_vcpu_thread = rr_kick_vcpu_thread;
196 
197         if (icount_enabled()) {
198             ops->handle_interrupt = icount_handle_interrupt;
199             ops->get_virtual_clock = icount_get;
200             ops->get_elapsed_ticks = icount_get;
201         } else {
202             ops->handle_interrupt = tcg_handle_interrupt;
203         }
204     }
205 
206     ops->supports_guest_debug = tcg_supports_guest_debug;
207     ops->insert_breakpoint = tcg_insert_breakpoint;
208     ops->remove_breakpoint = tcg_remove_breakpoint;
209     ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
210 }
211 
212 static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
213 {
214     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
215 
216     ops->ops_init = tcg_accel_ops_init;
217 }
218 
219 static const TypeInfo tcg_accel_ops_type = {
220     .name = ACCEL_OPS_NAME("tcg"),
221 
222     .parent = TYPE_ACCEL_OPS,
223     .class_init = tcg_accel_ops_class_init,
224     .abstract = true,
225 };
226 module_obj(ACCEL_OPS_NAME("tcg"));
227 
228 static void tcg_accel_ops_register_types(void)
229 {
230     type_register_static(&tcg_accel_ops_type);
231 }
232 type_init(tcg_accel_ops_register_types);
233