1 /*
2 * QEMU Plugin API
3 *
4 * This provides the API that is available to the plugins to interact
5 * with QEMU. We have to be careful not to expose internal details of
6 * how QEMU works so we abstract out things like translation and
7 * instructions to anonymous data types:
8 *
9 * qemu_plugin_tb
10 * qemu_plugin_insn
11 * qemu_plugin_register
12 *
13 * Which can then be passed back into the API to do additional things.
14 * As such all the public functions in here are exported in
15 * qemu-plugin.h.
16 *
17 * The general life-cycle of a plugin is:
18 *
19 * - plugin is loaded, public qemu_plugin_install called
20 * - the install func registers callbacks for events
21 * - usually an atexit_cb is registered to dump info at the end
22 * - when a registered event occurs the plugin is called
23 * - some events pass additional info
24 * - during translation the plugin can decide to instrument any
25 * instruction
26 * - when QEMU exits all the registered atexit callbacks are called
27 *
28 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
29 * Copyright (C) 2019, Linaro
30 *
31 * License: GNU GPL, version 2 or later.
32 * See the COPYING file in the top-level directory.
33 *
34 * SPDX-License-Identifier: GPL-2.0-or-later
35 *
36 */
37
38 #include "qemu/osdep.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/plugin.h"
41 #include "qemu/log.h"
42 #include "qemu/timer.h"
43 #include "tcg/tcg.h"
44 #include "exec/exec-all.h"
45 #include "exec/gdbstub.h"
46 #include "exec/translator.h"
47 #include "disas/disas.h"
48 #include "plugin.h"
49 #ifndef CONFIG_USER_ONLY
50 #include "qapi/error.h"
51 #include "migration/blocker.h"
52 #include "exec/ram_addr.h"
53 #include "qemu/plugin-memory.h"
54 #include "hw/boards.h"
55 #else
56 #include "qemu.h"
57 #ifdef CONFIG_LINUX
58 #include "loader.h"
59 #endif
60 #endif
61
62 /* Uninstall and Reset handlers */
63
qemu_plugin_uninstall(qemu_plugin_id_t id,qemu_plugin_simple_cb_t cb)64 void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb)
65 {
66 plugin_reset_uninstall(id, cb, false);
67 }
68
qemu_plugin_reset(qemu_plugin_id_t id,qemu_plugin_simple_cb_t cb)69 void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb)
70 {
71 plugin_reset_uninstall(id, cb, true);
72 }
73
74 /*
75 * Plugin Register Functions
76 *
77 * This allows the plugin to register callbacks for various events
78 * during the translation.
79 */
80
qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id,qemu_plugin_vcpu_simple_cb_t cb)81 void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id,
82 qemu_plugin_vcpu_simple_cb_t cb)
83 {
84 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_INIT, cb);
85 }
86
qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id,qemu_plugin_vcpu_simple_cb_t cb)87 void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id,
88 qemu_plugin_vcpu_simple_cb_t cb)
89 {
90 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_EXIT, cb);
91 }
92
tb_is_mem_only(void)93 static bool tb_is_mem_only(void)
94 {
95 return tb_cflags(tcg_ctx->gen_tb) & CF_MEMI_ONLY;
96 }
97
qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb * tb,qemu_plugin_vcpu_udata_cb_t cb,enum qemu_plugin_cb_flags flags,void * udata)98 void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
99 qemu_plugin_vcpu_udata_cb_t cb,
100 enum qemu_plugin_cb_flags flags,
101 void *udata)
102 {
103 if (!tb_is_mem_only()) {
104 plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata);
105 }
106 }
107
qemu_plugin_register_vcpu_tb_exec_cond_cb(struct qemu_plugin_tb * tb,qemu_plugin_vcpu_udata_cb_t cb,enum qemu_plugin_cb_flags flags,enum qemu_plugin_cond cond,qemu_plugin_u64 entry,uint64_t imm,void * udata)108 void qemu_plugin_register_vcpu_tb_exec_cond_cb(struct qemu_plugin_tb *tb,
109 qemu_plugin_vcpu_udata_cb_t cb,
110 enum qemu_plugin_cb_flags flags,
111 enum qemu_plugin_cond cond,
112 qemu_plugin_u64 entry,
113 uint64_t imm,
114 void *udata)
115 {
116 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) {
117 return;
118 }
119 if (cond == QEMU_PLUGIN_COND_ALWAYS) {
120 qemu_plugin_register_vcpu_tb_exec_cb(tb, cb, flags, udata);
121 return;
122 }
123 plugin_register_dyn_cond_cb__udata(&tb->cbs, cb, flags,
124 cond, entry, imm, udata);
125 }
126
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(struct qemu_plugin_tb * tb,enum qemu_plugin_op op,qemu_plugin_u64 entry,uint64_t imm)127 void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
128 struct qemu_plugin_tb *tb,
129 enum qemu_plugin_op op,
130 qemu_plugin_u64 entry,
131 uint64_t imm)
132 {
133 if (!tb_is_mem_only()) {
134 plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm);
135 }
136 }
137
qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn * insn,qemu_plugin_vcpu_udata_cb_t cb,enum qemu_plugin_cb_flags flags,void * udata)138 void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
139 qemu_plugin_vcpu_udata_cb_t cb,
140 enum qemu_plugin_cb_flags flags,
141 void *udata)
142 {
143 if (!tb_is_mem_only()) {
144 plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata);
145 }
146 }
147
qemu_plugin_register_vcpu_insn_exec_cond_cb(struct qemu_plugin_insn * insn,qemu_plugin_vcpu_udata_cb_t cb,enum qemu_plugin_cb_flags flags,enum qemu_plugin_cond cond,qemu_plugin_u64 entry,uint64_t imm,void * udata)148 void qemu_plugin_register_vcpu_insn_exec_cond_cb(
149 struct qemu_plugin_insn *insn,
150 qemu_plugin_vcpu_udata_cb_t cb,
151 enum qemu_plugin_cb_flags flags,
152 enum qemu_plugin_cond cond,
153 qemu_plugin_u64 entry,
154 uint64_t imm,
155 void *udata)
156 {
157 if (cond == QEMU_PLUGIN_COND_NEVER || tb_is_mem_only()) {
158 return;
159 }
160 if (cond == QEMU_PLUGIN_COND_ALWAYS) {
161 qemu_plugin_register_vcpu_insn_exec_cb(insn, cb, flags, udata);
162 return;
163 }
164 plugin_register_dyn_cond_cb__udata(&insn->insn_cbs, cb, flags,
165 cond, entry, imm, udata);
166 }
167
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(struct qemu_plugin_insn * insn,enum qemu_plugin_op op,qemu_plugin_u64 entry,uint64_t imm)168 void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
169 struct qemu_plugin_insn *insn,
170 enum qemu_plugin_op op,
171 qemu_plugin_u64 entry,
172 uint64_t imm)
173 {
174 if (!tb_is_mem_only()) {
175 plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm);
176 }
177 }
178
179
180 /*
181 * We always plant memory instrumentation because they don't finalise until
182 * after the operation has complete.
183 */
qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn * insn,qemu_plugin_vcpu_mem_cb_t cb,enum qemu_plugin_cb_flags flags,enum qemu_plugin_mem_rw rw,void * udata)184 void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
185 qemu_plugin_vcpu_mem_cb_t cb,
186 enum qemu_plugin_cb_flags flags,
187 enum qemu_plugin_mem_rw rw,
188 void *udata)
189 {
190 plugin_register_vcpu_mem_cb(&insn->mem_cbs, cb, flags, rw, udata);
191 }
192
qemu_plugin_register_vcpu_mem_inline_per_vcpu(struct qemu_plugin_insn * insn,enum qemu_plugin_mem_rw rw,enum qemu_plugin_op op,qemu_plugin_u64 entry,uint64_t imm)193 void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
194 struct qemu_plugin_insn *insn,
195 enum qemu_plugin_mem_rw rw,
196 enum qemu_plugin_op op,
197 qemu_plugin_u64 entry,
198 uint64_t imm)
199 {
200 plugin_register_inline_op_on_entry(&insn->mem_cbs, rw, op, entry, imm);
201 }
202
qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,qemu_plugin_vcpu_tb_trans_cb_t cb)203 void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
204 qemu_plugin_vcpu_tb_trans_cb_t cb)
205 {
206 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_TB_TRANS, cb);
207 }
208
qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id,qemu_plugin_vcpu_syscall_cb_t cb)209 void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id,
210 qemu_plugin_vcpu_syscall_cb_t cb)
211 {
212 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL, cb);
213 }
214
215 void
qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id,qemu_plugin_vcpu_syscall_ret_cb_t cb)216 qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id,
217 qemu_plugin_vcpu_syscall_ret_cb_t cb)
218 {
219 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, cb);
220 }
221
222 /*
223 * Plugin Queries
224 *
225 * These are queries that the plugin can make to gauge information
226 * from our opaque data types. We do not want to leak internal details
227 * here just information useful to the plugin.
228 */
229
230 /*
231 * Translation block information:
232 *
233 * A plugin can query the virtual address of the start of the block
234 * and the number of instructions in it. It can also get access to
235 * each translated instruction.
236 */
237
qemu_plugin_tb_n_insns(const struct qemu_plugin_tb * tb)238 size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb)
239 {
240 return tb->n;
241 }
242
qemu_plugin_tb_vaddr(const struct qemu_plugin_tb * tb)243 uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb)
244 {
245 const DisasContextBase *db = tcg_ctx->plugin_db;
246 return db->pc_first;
247 }
248
249 struct qemu_plugin_insn *
qemu_plugin_tb_get_insn(const struct qemu_plugin_tb * tb,size_t idx)250 qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx)
251 {
252 struct qemu_plugin_insn *insn;
253 if (unlikely(idx >= tb->n)) {
254 return NULL;
255 }
256 insn = g_ptr_array_index(tb->insns, idx);
257 return insn;
258 }
259
260 /*
261 * Instruction information
262 *
263 * These queries allow the plugin to retrieve information about each
264 * instruction being translated.
265 */
266
qemu_plugin_insn_data(const struct qemu_plugin_insn * insn,void * dest,size_t len)267 size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn,
268 void *dest, size_t len)
269 {
270 const DisasContextBase *db = tcg_ctx->plugin_db;
271
272 len = MIN(len, insn->len);
273 return translator_st(db, dest, insn->vaddr, len) ? len : 0;
274 }
275
qemu_plugin_insn_size(const struct qemu_plugin_insn * insn)276 size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn)
277 {
278 return insn->len;
279 }
280
qemu_plugin_insn_vaddr(const struct qemu_plugin_insn * insn)281 uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn)
282 {
283 return insn->vaddr;
284 }
285
qemu_plugin_insn_haddr(const struct qemu_plugin_insn * insn)286 void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn)
287 {
288 const DisasContextBase *db = tcg_ctx->plugin_db;
289 vaddr page0_last = db->pc_first | ~TARGET_PAGE_MASK;
290
291 if (db->fake_insn) {
292 return NULL;
293 }
294
295 /*
296 * ??? The return value is not intended for use of host memory,
297 * but as a proxy for address space and physical address.
298 * Thus we are only interested in the first byte and do not
299 * care about spanning pages.
300 */
301 if (insn->vaddr <= page0_last) {
302 if (db->host_addr[0] == NULL) {
303 return NULL;
304 }
305 return db->host_addr[0] + insn->vaddr - db->pc_first;
306 } else {
307 if (db->host_addr[1] == NULL) {
308 return NULL;
309 }
310 return db->host_addr[1] + insn->vaddr - (page0_last + 1);
311 }
312 }
313
qemu_plugin_insn_disas(const struct qemu_plugin_insn * insn)314 char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn)
315 {
316 return plugin_disas(tcg_ctx->cpu, tcg_ctx->plugin_db,
317 insn->vaddr, insn->len);
318 }
319
qemu_plugin_insn_symbol(const struct qemu_plugin_insn * insn)320 const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn)
321 {
322 const char *sym = lookup_symbol(insn->vaddr);
323 return sym[0] != 0 ? sym : NULL;
324 }
325
326 /*
327 * The memory queries allow the plugin to query information about a
328 * memory access.
329 */
330
qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info)331 unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info)
332 {
333 MemOp op = get_memop(info);
334 return op & MO_SIZE;
335 }
336
qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info)337 bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info)
338 {
339 MemOp op = get_memop(info);
340 return op & MO_SIGN;
341 }
342
qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info)343 bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info)
344 {
345 MemOp op = get_memop(info);
346 return (op & MO_BSWAP) == MO_BE;
347 }
348
qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)349 bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
350 {
351 return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W;
352 }
353
qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info)354 qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info)
355 {
356 uint64_t low = current_cpu->neg.plugin_mem_value_low;
357 qemu_plugin_mem_value value;
358
359 switch (qemu_plugin_mem_size_shift(info)) {
360 case 0:
361 value.type = QEMU_PLUGIN_MEM_VALUE_U8;
362 value.data.u8 = (uint8_t)low;
363 break;
364 case 1:
365 value.type = QEMU_PLUGIN_MEM_VALUE_U16;
366 value.data.u16 = (uint16_t)low;
367 break;
368 case 2:
369 value.type = QEMU_PLUGIN_MEM_VALUE_U32;
370 value.data.u32 = (uint32_t)low;
371 break;
372 case 3:
373 value.type = QEMU_PLUGIN_MEM_VALUE_U64;
374 value.data.u64 = low;
375 break;
376 case 4:
377 value.type = QEMU_PLUGIN_MEM_VALUE_U128;
378 value.data.u128.low = low;
379 value.data.u128.high = current_cpu->neg.plugin_mem_value_high;
380 break;
381 default:
382 g_assert_not_reached();
383 }
384 return value;
385 }
386
387 /*
388 * Virtual Memory queries
389 */
390
391 #ifdef CONFIG_SOFTMMU
392 static __thread struct qemu_plugin_hwaddr hwaddr_info;
393 #endif
394
qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,uint64_t vaddr)395 struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
396 uint64_t vaddr)
397 {
398 #ifdef CONFIG_SOFTMMU
399 CPUState *cpu = current_cpu;
400 unsigned int mmu_idx = get_mmuidx(info);
401 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
402 hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0;
403
404 assert(mmu_idx < NB_MMU_MODES);
405
406 if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
407 hwaddr_info.is_store, &hwaddr_info)) {
408 error_report("invalid use of qemu_plugin_get_hwaddr");
409 return NULL;
410 }
411
412 return &hwaddr_info;
413 #else
414 return NULL;
415 #endif
416 }
417
qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr * haddr)418 bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr)
419 {
420 #ifdef CONFIG_SOFTMMU
421 return haddr->is_io;
422 #else
423 return false;
424 #endif
425 }
426
qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr * haddr)427 uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
428 {
429 #ifdef CONFIG_SOFTMMU
430 if (haddr) {
431 return haddr->phys_addr;
432 }
433 #endif
434 return 0;
435 }
436
qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr * h)437 const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
438 {
439 #ifdef CONFIG_SOFTMMU
440 if (h && h->is_io) {
441 MemoryRegion *mr = h->mr;
442 if (!mr->name) {
443 unsigned maddr = (uintptr_t)mr;
444 g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
445 return g_intern_string(temp);
446 } else {
447 return g_intern_string(mr->name);
448 }
449 } else {
450 return g_intern_static_string("RAM");
451 }
452 #else
453 return g_intern_static_string("Invalid");
454 #endif
455 }
456
qemu_plugin_num_vcpus(void)457 int qemu_plugin_num_vcpus(void)
458 {
459 return plugin_num_vcpus();
460 }
461
462 /*
463 * Plugin output
464 */
qemu_plugin_outs(const char * string)465 void qemu_plugin_outs(const char *string)
466 {
467 qemu_log_mask(CPU_LOG_PLUGIN, "%s", string);
468 }
469
qemu_plugin_bool_parse(const char * name,const char * value,bool * ret)470 bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret)
471 {
472 return name && value && qapi_bool_parse(name, value, ret, NULL);
473 }
474
475 /*
476 * Binary path, start and end locations
477 */
qemu_plugin_path_to_binary(void)478 const char *qemu_plugin_path_to_binary(void)
479 {
480 char *path = NULL;
481 #ifdef CONFIG_USER_ONLY
482 TaskState *ts = get_task_state(current_cpu);
483 path = g_strdup(ts->bprm->filename);
484 #endif
485 return path;
486 }
487
qemu_plugin_start_code(void)488 uint64_t qemu_plugin_start_code(void)
489 {
490 uint64_t start = 0;
491 #ifdef CONFIG_USER_ONLY
492 TaskState *ts = get_task_state(current_cpu);
493 start = ts->info->start_code;
494 #endif
495 return start;
496 }
497
qemu_plugin_end_code(void)498 uint64_t qemu_plugin_end_code(void)
499 {
500 uint64_t end = 0;
501 #ifdef CONFIG_USER_ONLY
502 TaskState *ts = get_task_state(current_cpu);
503 end = ts->info->end_code;
504 #endif
505 return end;
506 }
507
qemu_plugin_entry_code(void)508 uint64_t qemu_plugin_entry_code(void)
509 {
510 uint64_t entry = 0;
511 #ifdef CONFIG_USER_ONLY
512 TaskState *ts = get_task_state(current_cpu);
513 entry = ts->info->entry;
514 #endif
515 return entry;
516 }
517
518 /*
519 * Create register handles.
520 *
521 * We need to create a handle for each register so the plugin
522 * infrastructure can call gdbstub to read a register. They are
523 * currently just a pointer encapsulation of the gdb_reg but in
524 * future may hold internal plugin state so its important plugin
525 * authors are not tempted to treat them as numbers.
526 *
527 * We also construct a result array with those handles and some
528 * ancillary data the plugin might find useful.
529 */
530
create_register_handles(GArray * gdbstub_regs)531 static GArray *create_register_handles(GArray *gdbstub_regs)
532 {
533 GArray *find_data = g_array_new(true, true,
534 sizeof(qemu_plugin_reg_descriptor));
535
536 for (int i = 0; i < gdbstub_regs->len; i++) {
537 GDBRegDesc *grd = &g_array_index(gdbstub_regs, GDBRegDesc, i);
538 qemu_plugin_reg_descriptor desc;
539
540 /* skip "un-named" regs */
541 if (!grd->name) {
542 continue;
543 }
544
545 /* Create a record for the plugin */
546 desc.handle = GINT_TO_POINTER(grd->gdb_reg + 1);
547 desc.name = g_intern_string(grd->name);
548 desc.feature = g_intern_string(grd->feature_name);
549 g_array_append_val(find_data, desc);
550 }
551
552 return find_data;
553 }
554
qemu_plugin_get_registers(void)555 GArray *qemu_plugin_get_registers(void)
556 {
557 g_assert(current_cpu);
558
559 g_autoptr(GArray) regs = gdb_get_register_list(current_cpu);
560 return create_register_handles(regs);
561 }
562
qemu_plugin_read_memory_vaddr(vaddr addr,GByteArray * data,size_t len)563 bool qemu_plugin_read_memory_vaddr(vaddr addr, GByteArray *data, size_t len)
564 {
565 g_assert(current_cpu);
566
567 if (len == 0) {
568 return false;
569 }
570
571 g_byte_array_set_size(data, len);
572
573 int result = cpu_memory_rw_debug(current_cpu, addr, data->data,
574 data->len, false);
575
576 if (result < 0) {
577 return false;
578 }
579
580 return true;
581 }
582
qemu_plugin_read_register(struct qemu_plugin_register * reg,GByteArray * buf)583 int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf)
584 {
585 g_assert(current_cpu);
586
587 return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg) - 1);
588 }
589
qemu_plugin_scoreboard_new(size_t element_size)590 struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size)
591 {
592 return plugin_scoreboard_new(element_size);
593 }
594
qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard * score)595 void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score)
596 {
597 plugin_scoreboard_free(score);
598 }
599
qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard * score,unsigned int vcpu_index)600 void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
601 unsigned int vcpu_index)
602 {
603 g_assert(vcpu_index < qemu_plugin_num_vcpus());
604 /* we can't use g_array_index since entry size is not statically known */
605 char *base_ptr = score->data->data;
606 return base_ptr + vcpu_index * g_array_get_element_size(score->data);
607 }
608
plugin_u64_address(qemu_plugin_u64 entry,unsigned int vcpu_index)609 static uint64_t *plugin_u64_address(qemu_plugin_u64 entry,
610 unsigned int vcpu_index)
611 {
612 char *ptr = qemu_plugin_scoreboard_find(entry.score, vcpu_index);
613 return (uint64_t *)(ptr + entry.offset);
614 }
615
qemu_plugin_u64_add(qemu_plugin_u64 entry,unsigned int vcpu_index,uint64_t added)616 void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
617 uint64_t added)
618 {
619 *plugin_u64_address(entry, vcpu_index) += added;
620 }
621
qemu_plugin_u64_get(qemu_plugin_u64 entry,unsigned int vcpu_index)622 uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry,
623 unsigned int vcpu_index)
624 {
625 return *plugin_u64_address(entry, vcpu_index);
626 }
627
qemu_plugin_u64_set(qemu_plugin_u64 entry,unsigned int vcpu_index,uint64_t val)628 void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
629 uint64_t val)
630 {
631 *plugin_u64_address(entry, vcpu_index) = val;
632 }
633
qemu_plugin_u64_sum(qemu_plugin_u64 entry)634 uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry)
635 {
636 uint64_t total = 0;
637 for (int i = 0, n = qemu_plugin_num_vcpus(); i < n; ++i) {
638 total += qemu_plugin_u64_get(entry, i);
639 }
640 return total;
641 }
642
643 /*
644 * Time control
645 */
646 static bool has_control;
647 #ifdef CONFIG_SOFTMMU
648 static Error *migration_blocker;
649 #endif
650
qemu_plugin_request_time_control(void)651 const void *qemu_plugin_request_time_control(void)
652 {
653 if (!has_control) {
654 has_control = true;
655 #ifdef CONFIG_SOFTMMU
656 error_setg(&migration_blocker,
657 "TCG plugin time control does not support migration");
658 migrate_add_blocker(&migration_blocker, NULL);
659 #endif
660 return &has_control;
661 }
662 return NULL;
663 }
664
665 #ifdef CONFIG_SOFTMMU
advance_virtual_time__async(CPUState * cpu,run_on_cpu_data data)666 static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data)
667 {
668 int64_t new_time = data.host_ulong;
669 qemu_clock_advance_virtual_time(new_time);
670 }
671 #endif
672
qemu_plugin_update_ns(const void * handle,int64_t new_time)673 void qemu_plugin_update_ns(const void *handle, int64_t new_time)
674 {
675 #ifdef CONFIG_SOFTMMU
676 if (handle == &has_control) {
677 /* Need to execute out of cpu_exec, so bql can be locked. */
678 async_run_on_cpu(current_cpu,
679 advance_virtual_time__async,
680 RUN_ON_CPU_HOST_ULONG(new_time));
681 }
682 #endif
683 }
684